summaryrefslogtreecommitdiff
path: root/sys/kern/kern_rwlock.c
diff options
context:
space:
mode:
authorTed Unangst <tedu@cvs.openbsd.org>2006-06-02 05:02:35 +0000
committerTed Unangst <tedu@cvs.openbsd.org>2006-06-02 05:02:35 +0000
commit3dac4b45f4f0d96e6362a2125ade55b0b79dc4fa (patch)
tree588c2be1981b59c296a8f1409682bfbb234b48b6 /sys/kern/kern_rwlock.c
parent6561d9ff4b678831c24e591b232833eead9dab68 (diff)
remove upgrade/downgrade, they are useless.
rework the main entry points to just use rw_test_and_set. rework exit paths to be more like enter paths. add sleepfail so more lockmgr can be replaced. some from art, ok sturm
Diffstat (limited to 'sys/kern/kern_rwlock.c')
-rw-r--r--sys/kern/kern_rwlock.c106
1 files changed, 33 insertions, 73 deletions
diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c
index 4c750a2f2f6..ec6a02fe443 100644
--- a/sys/kern/kern_rwlock.c
+++ b/sys/kern/kern_rwlock.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_rwlock.c,v 1.7 2006/05/07 20:12:41 tedu Exp $ */
+/* $OpenBSD: kern_rwlock.c,v 1.8 2006/06/02 05:02:34 tedu Exp $ */
/*
* Copyright (c) 2002, 2003 Artur Grabowski <art@openbsd.org>
@@ -45,10 +45,6 @@
* Sets RWLOCK_WAIT|RWLOCK_WRWANT while waiting.
* RW_READ RWLOCK_WRLOCK|RWLOCK_WRWANT may not be set. We increment
* with RWLOCK_READ_INCR. RWLOCK_WAIT while waiting.
- * RW_UPGRADE There must be exactly one holder of the read lock.
- * We increment with what's needed for RW_WRITE - RW_READ.
- * RWLOCK_WAIT|RWLOCK_WRWANT while waiting.
- * RW_DOWNGRADE Always doable. Increment with -RW_WRITE + RW_READ.
*/
static const struct rwlock_op {
unsigned long inc;
@@ -71,20 +67,6 @@ static const struct rwlock_op {
0,
PLOCK
},
- { /* RW_UPGRADE */
- RWLOCK_WRLOCK-RWLOCK_READ_INCR,
- ~(RWLOCK_READ_INCR | RWLOCK_WAIT | RWLOCK_WRWANT),
- RWLOCK_WAIT|RWLOCK_WRLOCK,
- 1,
- PLOCK - 4
- },
- { /* RW_DOWNGRADE */
- -RWLOCK_WRLOCK + RWLOCK_READ_INCR,
- 0,
- 0,
- -1,
- 0
- }
};
#ifndef __HAVE_MD_RWLOCK
@@ -94,10 +76,11 @@ static const struct rwlock_op {
void
rw_enter_read(struct rwlock *rwl)
{
- if (__predict_false(rwl->rwl_owner & RWLOCK_WRLOCK))
+ unsigned long owner = rwl->rwl_owner;
+
+ if (__predict_false((owner & RWLOCK_WRLOCK) ||
+ rw_test_and_set(&rwl->rwl_owner, owner, owner + RWLOCK_READ_INCR)))
rw_enter(rwl, RW_READ);
- else
- rwl->rwl_owner += RWLOCK_READ_INCR;
}
void
@@ -105,26 +88,19 @@ rw_enter_write(struct rwlock *rwl)
{
struct proc *p = curproc;
- if (__predict_false(rwl->rwl_owner != 0))
+ if (__predict_false(rw_test_and_set(&rwl->rwl_owner, 0,
+ RW_PROC(p) | RWLOCK_WRLOCK)))
rw_enter(rwl, RW_WRITE);
- else
- rwl->rwl_owner = RW_PROC(p) | RWLOCK_WRLOCK;
}
void
rw_exit_read(struct rwlock *rwl)
{
unsigned long owner = rwl->rwl_owner;
- unsigned long decr = (owner & (RWLOCK_WAIT|RWLOCK_WRWANT)) |
- RWLOCK_READ_INCR;
-
- rwl->rwl_owner -= decr;
- /*
- * Potential MP race here. If the owner had WRWANT set we cleared
- * it and a reader can sneak in before a writer. Do we care?
- */
- if (__predict_false(owner & RWLOCK_WAIT))
- rw_exit_waiters(rwl, owner);
+
+ if (__predict_false((owner & RWLOCK_WAIT) ||
+ rw_test_and_set(&rwl->rwl_owner, owner, owner - RWLOCK_READ_INCR)))
+ rw_exit(rwl);
}
void
@@ -132,13 +108,9 @@ rw_exit_write(struct rwlock *rwl)
{
unsigned long owner = rwl->rwl_owner;
- rwl->rwl_owner = 0;
- /*
- * Potential MP race here. If the owner had WRWANT set we cleared
- * it and a reader can sneak in before a writer. Do we care?
- */
- if (__predict_false(owner & RWLOCK_WAIT))
- rw_exit_waiters(rwl, owner);
+ if (__predict_false((owner & RWLOCK_WAIT) ||
+ rw_test_and_set(&rwl->rwl_owner, owner, 0)))
+ rw_exit(rwl);
}
int
@@ -166,35 +138,13 @@ rw_enter_diag(struct rwlock *rwl, int flags)
if (RW_PROC(curproc) == RW_PROC(rwl->rwl_owner))
panic("rw_enter: locking against myself");
break;
- case RW_UPGRADE:
- /*
- * Since we're holding the read lock, it can't possibly
- * be write locked.
- */
- if (rwl->rwl_owner & RWLOCK_WRLOCK)
- panic("rw_enter: upgraded lock write locked");
- break;
- case RW_DOWNGRADE:
- /*
- * If we're downgrading, we must hold the write lock.
- */
- if (RW_PROC(curproc) != RW_PROC(rwl->rwl_owner))
- panic("rw_enter: not holder");
default:
panic("rw_enter: unknown op 0x%x", flags);
}
}
-static void
-rw_exit_diag(struct rwlock *rwl, int owner)
-{
- if ((owner & RWLOCK_WAIT) == 0)
- panic("rw_exit: no waiter");
-}
-
#else
#define rw_enter_diag(r, f)
-#define rw_exit_diag(r, o)
#endif
void
@@ -216,11 +166,7 @@ rw_enter(struct rwlock *rwl, int flags)
op = &rw_ops[flags & RW_OPMASK];
- inc = op->inc;
- if (op->proc_mult == -1)
- inc -= RW_PROC(curproc);
- else if (op->proc_mult == 1)
- inc += RW_PROC(curproc);
+ inc = op->inc + RW_PROC(curproc) * op->proc_mult;
prio = op->wait_prio;
if (flags & RW_INTR)
prio |= PCATCH;
@@ -231,6 +177,8 @@ retry:
rw_enter_diag(rwl, flags);
+ if (flags & RW_NOSLEEP)
+ return (EBUSY);
if ((error = tsleep(rwl, prio, rwl->rwl_name, 0)) != 0)
return (error);
if (flags & RW_SLEEPFAIL)
@@ -244,9 +192,21 @@ retry:
}
void
-rw_exit_waiters(struct rwlock *rwl, unsigned long owner)
+rw_exit(struct rwlock *rwl)
{
- rw_exit_diag(rwl, owner);
- /* We wake up all waiters because we can't know how many they are. */
- wakeup(rwl);
+ unsigned long owner = rwl->rwl_owner;
+ int wrlock = owner & RWLOCK_WRLOCK;
+ unsigned long set;
+
+ do {
+ owner = rwl->rwl_owner;
+ if (wrlock)
+ set = 0;
+ else
+ set = (owner - RWLOCK_READ_INCR) &
+ ~(RWLOCK_WAIT|RWLOCK_WRWANT);
+ } while (rw_test_and_set(&rwl->rwl_owner, owner, set));
+
+ if (owner & RWLOCK_WAIT)
+ wakeup(rwl);
}