summaryrefslogtreecommitdiff
path: root/sys/kern
diff options
context:
space:
mode:
authorTed Unangst <tedu@cvs.openbsd.org>2013-05-01 17:13:06 +0000
committerTed Unangst <tedu@cvs.openbsd.org>2013-05-01 17:13:06 +0000
commitc0430be1727a23297df2ed35783c626aad9b154d (patch)
tree0d1696ce1a1fdff351092876056d00a2c42ca2ce /sys/kern
parent99e5da00b2dc04a474ce7552941e6a95a97fe325 (diff)
exorcise lockmgr. the api remains, but is now backed by recursive rwlocks.
originally by thib. ok deraadt jsing and anyone who tested
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_lock.c312
-rw-r--r--sys/kern/kern_rwlock.c96
2 files changed, 106 insertions, 302 deletions
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index d7a928bdc8f..9593ac5b276 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_lock.c,v 1.39 2013/03/28 16:55:25 deraadt Exp $ */
+/* $OpenBSD: kern_lock.c,v 1.40 2013/05/01 17:13:05 tedu Exp $ */
/*
* Copyright (c) 1995
@@ -41,39 +41,10 @@
#include <sys/systm.h>
#include <sys/sched.h>
-
-/*
- * Locking primitives implementation.
- * Locks provide shared/exclusive synchronization.
- */
-
-/*
- * Acquire a resource. We sleep on the address of the lk_sharecount
- * member normally; if waiting for it to drain we sleep on the address
- * of the lk_waitcount member instead.
- */
-#define ACQUIRE(lkp, error, extflags, drain, wanted) \
-do { \
- for (error = 0; wanted; ) { \
- if ((drain)) \
- (lkp)->lk_flags |= LK_WAITDRAIN; \
- else \
- (lkp)->lk_waitcount++; \
- error = tsleep((drain) ? \
- &(lkp)->lk_waitcount : &(lkp)->lk_sharecount, \
- (lkp)->lk_prio, (lkp)->lk_wmesg, (lkp)->lk_timo); \
- if ((drain) == 0) \
- (lkp)->lk_waitcount--; \
- if (error) \
- break; \
- } \
-} while (0)
-
-#define SETHOLDER(lkp, pid, cpu_id) \
- (lkp)->lk_lockholder = (pid)
-
-#define WEHOLDIT(lkp, pid, cpu_id) \
- ((lkp)->lk_lockholder == (pid))
+#ifdef MP_LOCKDEBUG
+/* CPU-dependent timing, needs this to be settable from ddb. */
+int __mp_lock_spinout = 200000000;
+#endif
/*
* Initialize a lock; required before use.
@@ -81,269 +52,54 @@ do { \
void
lockinit(struct lock *lkp, int prio, char *wmesg, int timo, int flags)
{
+ KASSERT(flags == 0);
bzero(lkp, sizeof(struct lock));
- lkp->lk_flags = flags & LK_EXTFLG_MASK;
- lkp->lk_lockholder = LK_NOPROC;
- lkp->lk_prio = prio;
- lkp->lk_timo = timo;
- lkp->lk_wmesg = wmesg; /* just a name for spin locks */
+ rrw_init(&lkp->lk_lck, wmesg);
}
-/*
- * Determine the status of a lock.
- */
int
lockstatus(struct lock *lkp)
{
- int lock_type = 0;
-
- if (lkp->lk_exclusivecount != 0)
- lock_type = LK_EXCLUSIVE;
- else if (lkp->lk_sharecount != 0)
- lock_type = LK_SHARED;
- return (lock_type);
+ return (rrw_status(&lkp->lk_lck));
}
-/*
- * Set, change, or release a lock.
- *
- * Shared requests increment the shared count. Exclusive requests set the
- * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
- * accepted shared locks and shared-to-exclusive upgrades to go away.
- */
int
-lockmgr(__volatile struct lock *lkp, u_int flags, void *notused)
+lockmgr(struct lock *lkp, u_int flags, void *notused)
{
- int error;
- pid_t pid;
- int extflags;
- cpuid_t cpu_id;
- struct proc *p = curproc;
-
- error = 0;
- extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
-
-#ifdef DIAGNOSTIC
- if (p == NULL)
- panic("lockmgr: process context required");
-#endif
- /* Process context required. */
- pid = p->p_pid;
- cpu_id = cpu_number();
-
- /*
- * Once a lock has drained, the LK_DRAINING flag is set and an
- * exclusive lock is returned. The only valid operation thereafter
- * is a single release of that exclusive lock. This final release
- * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
- * further requests of any sort will result in a panic. The bits
- * selected for these two flags are chosen so that they will be set
- * in memory that is freed (freed memory is filled with 0xdeadbeef).
- */
- if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
-#ifdef DIAGNOSTIC
- if (lkp->lk_flags & LK_DRAINED)
- panic("lockmgr: using decommissioned lock");
- if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
- WEHOLDIT(lkp, pid, cpu_id) == 0)
- panic("lockmgr: non-release on draining lock: %d",
- flags & LK_TYPE_MASK);
-#endif /* DIAGNOSTIC */
- lkp->lk_flags &= ~LK_DRAINING;
- lkp->lk_flags |= LK_DRAINED;
+ int rwflags = 0;
+
+ KASSERT(!((flags & (LK_SHARED|LK_EXCLUSIVE)) ==
+ (LK_SHARED|LK_EXCLUSIVE)));
+ KASSERT(!((flags & (LK_CANRECURSE|LK_RECURSEFAIL)) ==
+ (LK_CANRECURSE|LK_RECURSEFAIL)));
+ KASSERT((flags & LK_RELEASE) ||
+ (flags & (LK_SHARED|LK_EXCLUSIVE|LK_DRAIN)));
+
+ if (flags & LK_RELEASE) {
+ rrw_exit(&lkp->lk_lck);
+ return (0);
}
+ if (flags & LK_SHARED)
+ rwflags |= RW_READ;
+ if (flags & (LK_EXCLUSIVE|LK_DRAIN))
+ rwflags |= RW_WRITE;
+ if (flags & LK_RECURSEFAIL)
+ rwflags |= RW_RECURSEFAIL;
+ if (flags & LK_NOWAIT)
+ rwflags |= RW_NOSLEEP;
- /*
- * Check if the caller is asking us to be schizophrenic.
- */
- if ((lkp->lk_flags & (LK_CANRECURSE|LK_RECURSEFAIL)) ==
- (LK_CANRECURSE|LK_RECURSEFAIL))
- panic("lockmgr: make up your mind");
-
- switch (flags & LK_TYPE_MASK) {
+ return (rrw_enter(&lkp->lk_lck, rwflags));
- case LK_SHARED:
- if (WEHOLDIT(lkp, pid, cpu_id) == 0) {
- /*
- * If just polling, check to see if we will block.
- */
- if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
- (LK_HAVE_EXCL | LK_WANT_EXCL))) {
- error = EBUSY;
- break;
- }
- /*
- * Wait for exclusive locks and upgrades to clear.
- */
- ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
- (LK_HAVE_EXCL | LK_WANT_EXCL));
- if (error)
- break;
- lkp->lk_sharecount++;
- break;
- }
- /*
- * We hold an exclusive lock, so downgrade it to shared.
- * An alternative would be to fail with EDEADLK.
- */
- lkp->lk_sharecount++;
-
- if (WEHOLDIT(lkp, pid, cpu_id) == 0 ||
- lkp->lk_exclusivecount == 0)
- panic("lockmgr: not holding exclusive lock");
- lkp->lk_sharecount += lkp->lk_exclusivecount;
- lkp->lk_exclusivecount = 0;
- lkp->lk_flags &= ~LK_HAVE_EXCL;
- SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
- if (lkp->lk_waitcount)
- wakeup(&lkp->lk_sharecount);
- break;
-
- case LK_EXCLUSIVE:
- if (WEHOLDIT(lkp, pid, cpu_id)) {
- /*
- * Recursive lock.
- */
- if ((extflags & LK_CANRECURSE) == 0) {
- if (extflags & LK_RECURSEFAIL) {
- error = EDEADLK;
- break;
- } else
- panic("lockmgr: locking against myself");
- }
- lkp->lk_exclusivecount++;
- break;
- }
- /*
- * If we are just polling, check to see if we will sleep.
- */
- if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
- (LK_HAVE_EXCL | LK_WANT_EXCL)) ||
- lkp->lk_sharecount != 0)) {
- error = EBUSY;
- break;
- }
- /*
- * Try to acquire the want_exclusive flag.
- */
- ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
- (LK_HAVE_EXCL | LK_WANT_EXCL));
- if (error)
- break;
- lkp->lk_flags |= LK_WANT_EXCL;
- /*
- * Wait for shared locks and upgrades to finish.
- */
- ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount != 0);
- lkp->lk_flags &= ~LK_WANT_EXCL;
- if (error)
- break;
- lkp->lk_flags |= LK_HAVE_EXCL;
- SETHOLDER(lkp, pid, cpu_id);
- if (lkp->lk_exclusivecount != 0)
- panic("lockmgr: non-zero exclusive count");
- lkp->lk_exclusivecount = 1;
- break;
-
- case LK_RELEASE:
- if (lkp->lk_exclusivecount != 0) {
- if (WEHOLDIT(lkp, pid, cpu_id) == 0) {
- panic("lockmgr: pid %d, not exclusive lock "
- "holder %d unlocking",
- pid, lkp->lk_lockholder);
- }
- lkp->lk_exclusivecount--;
- if (lkp->lk_exclusivecount == 0) {
- lkp->lk_flags &= ~LK_HAVE_EXCL;
- SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
- }
- } else if (lkp->lk_sharecount != 0) {
- lkp->lk_sharecount--;
- }
-#ifdef DIAGNOSTIC
- else
- panic("lockmgr: release of unlocked lock!");
-#endif
- if (lkp->lk_waitcount)
- wakeup(&lkp->lk_sharecount);
- break;
-
- case LK_DRAIN:
- /*
- * Check that we do not already hold the lock, as it can
- * never drain if we do. Unfortunately, we have no way to
- * check for holding a shared lock, but at least we can
- * check for an exclusive one.
- */
- if (WEHOLDIT(lkp, pid, cpu_id))
- panic("lockmgr: draining against myself");
- /*
- * If we are just polling, check to see if we will sleep.
- */
- if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
- (LK_HAVE_EXCL | LK_WANT_EXCL)) ||
- lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
- error = EBUSY;
- break;
- }
- ACQUIRE(lkp, error, extflags, 1,
- ((lkp->lk_flags &
- (LK_HAVE_EXCL | LK_WANT_EXCL)) ||
- lkp->lk_sharecount != 0 ||
- lkp->lk_waitcount != 0));
- if (error)
- break;
- lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
- SETHOLDER(lkp, pid, cpu_id);
- lkp->lk_exclusivecount = 1;
- break;
-
- default:
- panic("lockmgr: unknown locktype request %d",
- flags & LK_TYPE_MASK);
- /* NOTREACHED */
- }
- if ((lkp->lk_flags & LK_WAITDRAIN) != 0 &&
- ((lkp->lk_flags &
- (LK_HAVE_EXCL | LK_WANT_EXCL)) == 0 &&
- lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
- lkp->lk_flags &= ~LK_WAITDRAIN;
- wakeup(&lkp->lk_waitcount);
- }
- return (error);
}
-#ifdef DIAGNOSTIC
-/*
- * Print out information about state of a lock. Used by VOP_PRINT
- * routines to display status about contained locks.
- */
-void
-lockmgr_printinfo(__volatile struct lock *lkp)
-{
-
- if (lkp->lk_sharecount)
- printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
- lkp->lk_sharecount);
- else if (lkp->lk_flags & LK_HAVE_EXCL) {
- printf(" lock type %s: EXCL (count %d) by ",
- lkp->lk_wmesg, lkp->lk_exclusivecount);
- printf("pid %d", lkp->lk_lockholder);
- } else
- printf(" not locked");
- if (lkp->lk_waitcount > 0)
- printf(" with %d pending", lkp->lk_waitcount);
-}
-#endif /* DIAGNOSTIC */
-
#if defined(MULTIPROCESSOR)
/*
* Functions for manipulating the kernel_lock. We put them here
* so that they show up in profiles.
*/
-struct __mp_lock kernel_lock;
+struct __mp_lock kernel_lock;
void
_kernel_lock_init(void)
@@ -368,10 +124,4 @@ _kernel_unlock(void)
{
__mp_unlock(&kernel_lock);
}
-
-#ifdef MP_LOCKDEBUG
-/* CPU-dependent timing, needs this to be settable from ddb. */
-int __mp_lock_spinout = 200000000;
-#endif
-
#endif /* MULTIPROCESSOR */
diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c
index 7efb00e1d2c..d417012bc1f 100644
--- a/sys/kern/kern_rwlock.c
+++ b/sys/kern/kern_rwlock.c
@@ -1,28 +1,20 @@
-/* $OpenBSD: kern_rwlock.c,v 1.17 2011/07/05 03:58:22 weingart Exp $ */
+/* $OpenBSD: kern_rwlock.c,v 1.18 2013/05/01 17:13:05 tedu Exp $ */
/*
* Copyright (c) 2002, 2003 Artur Grabowski <art@openbsd.org>
- * All rights reserved.
+ * Copyright (c) 2011 Thordur Bjornsson <thib@secnorth.net>
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
*
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
- * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/param.h>
@@ -69,6 +61,9 @@ static const struct rwlock_op {
0,
PLOCK
},
+ { /* Sparse Entry. */
+ 0,
+ },
{ /* RW_DOWNGRADE */
RWLOCK_READ_INCR - RWLOCK_WRLOCK,
0,
@@ -191,7 +186,7 @@ rw_enter(struct rwlock *rwl, int flags)
unsigned long inc, o;
int error;
- op = &rw_ops[flags & RW_OPMASK];
+ op = &rw_ops[(flags & RW_OPMASK) - 1];
inc = op->inc + RW_PROC(curproc) * op->proc_mult;
retry:
@@ -258,6 +253,13 @@ rw_exit(struct rwlock *rwl)
wakeup(rwl);
}
+int
+rw_status(struct rwlock *rwl)
+{
+
+ return (rwl->rwl_owner != 0L);
+}
+
#ifdef DIAGNOSTIC
void
rw_assert_wrlock(struct rwlock *rwl)
@@ -283,3 +285,55 @@ rw_assert_unlocked(struct rwlock *rwl)
panic("%s: lock held", rwl->rwl_name);
}
#endif
+
+/* recursive rwlocks; */
+void
+rrw_init(struct rrwlock *rrwl, char *name)
+{
+ bzero(rrwl, sizeof(struct rrwlock));
+ rw_init(&rrwl->rrwl_lock, name);
+}
+
+int
+rrw_enter(struct rrwlock *rrwl, int flags)
+{
+ int rv;
+
+ if (RWLOCK_OWNER(&rrwl->rrwl_lock) ==
+ (struct proc *)RW_PROC(curproc)) {
+ if (flags & RW_RECURSEFAIL)
+ return (EDEADLK);
+ else {
+ rrwl->rrwl_wcnt++;
+ return (0);
+ }
+ }
+
+ rv = rw_enter(&rrwl->rrwl_lock, flags);
+ if (rv == 0)
+ rrwl->rrwl_wcnt = 1;
+
+ return (rv);
+}
+
+void
+rrw_exit(struct rrwlock *rrwl)
+{
+
+ if (RWLOCK_OWNER(&rrwl->rrwl_lock) ==
+ (struct proc *)RW_PROC(curproc)) {
+ KASSERT(rrwl->rrwl_wcnt > 0);
+ rrwl->rrwl_wcnt--;
+ if (rrwl->rrwl_wcnt != 0)
+ return;
+ }
+
+ rw_exit(&rrwl->rrwl_lock);
+}
+
+int
+rrw_status(struct rrwlock *rrwl)
+{
+
+ return (rw_status(&rrwl->rrwl_lock));
+}