summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/arch/alpha/alpha/pmap.c29
-rw-r--r--sys/arch/amd64/amd64/pmap.c38
-rw-r--r--sys/arch/arm/arm/pmap.c24
-rw-r--r--sys/arch/i386/i386/machdep.c18
-rw-r--r--sys/arch/i386/i386/pmap.c40
-rw-r--r--sys/arch/i386/i386/pmapae.c35
-rw-r--r--sys/kern/kern_lock.c431
-rw-r--r--sys/sys/lock.h93
-rw-r--r--sys/sys/sched.h24
9 files changed, 67 insertions, 665 deletions
diff --git a/sys/arch/alpha/alpha/pmap.c b/sys/arch/alpha/alpha/pmap.c
index f39ff355ac1..dc9ac1e59c1 100644
--- a/sys/arch/alpha/alpha/pmap.c
+++ b/sys/arch/alpha/alpha/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.48 2006/11/29 12:24:15 miod Exp $ */
+/* $OpenBSD: pmap.c,v 1.49 2007/02/03 16:48:21 miod Exp $ */
/* $NetBSD: pmap.c,v 1.154 2000/12/07 22:18:55 thorpej Exp $ */
/*-
@@ -337,20 +337,6 @@ u_long pmap_asn_generation[ALPHA_MAXPROCS]; /* current ASN generation */
* This pmap module uses two types of locks: `normal' (sleep)
* locks and `simple' (spin) locks. They are used as follows:
*
- * READ/WRITE SPIN LOCKS
- * ---------------------
- *
- * * pmap_main_lock - This lock is used to prevent deadlock and/or
- * provide mutex access to the pmap module. Most operations lock
- * the pmap first, then PV lists as needed. However, some operations,
- * such as pmap_page_protect(), lock the PV lists before locking
- * the pmaps. To prevent deadlock, we require a mutex lock on the
- * pmap module if locking in the PV->pmap direction. This is
- * implemented by acquiring a (shared) read lock on pmap_main_lock
- * if locking pmap->PV and a (exclusive) write lock if locking in
- * the PV->pmap direction. Since only one thread can hold a write
- * lock at a time, this provides the mutex.
- *
* SIMPLE LOCKS
* ------------
*
@@ -380,25 +366,13 @@ u_long pmap_asn_generation[ALPHA_MAXPROCS]; /* current ASN generation */
* with the pmap already locked by the caller (which will be
* an interface function).
*/
-struct lock pmap_main_lock;
struct simplelock pmap_all_pmaps_slock;
struct simplelock pmap_growkernel_slock;
-#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
-#define PMAP_MAP_TO_HEAD_LOCK() \
- spinlockmgr(&pmap_main_lock, LK_SHARED, NULL)
-#define PMAP_MAP_TO_HEAD_UNLOCK() \
- spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
-#define PMAP_HEAD_TO_MAP_LOCK() \
- spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL)
-#define PMAP_HEAD_TO_MAP_UNLOCK() \
- spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
-#else
#define PMAP_MAP_TO_HEAD_LOCK() /* nothing */
#define PMAP_MAP_TO_HEAD_UNLOCK() /* nothing */
#define PMAP_HEAD_TO_MAP_LOCK() /* nothing */
#define PMAP_HEAD_TO_MAP_UNLOCK() /* nothing */
-#endif /* MULTIPROCESSOR || LOCKDEBUG */
#if defined(MULTIPROCESSOR)
/*
@@ -967,7 +941,6 @@ pmap_bootstrap(paddr_t ptaddr, u_int maxasn, u_long ncpuids)
/*
* Initialize the locks.
*/
- spinlockinit(&pmap_main_lock, "pmaplk", 0);
simple_lock_init(&pmap_all_pmaps_slock);
/*
diff --git a/sys/arch/amd64/amd64/pmap.c b/sys/arch/amd64/amd64/pmap.c
index dd8eb7082b6..016e52fc058 100644
--- a/sys/arch/amd64/amd64/pmap.c
+++ b/sys/arch/amd64/amd64/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.16 2007/01/15 23:19:05 jsg Exp $ */
+/* $OpenBSD: pmap.c,v 1.17 2007/02/03 16:48:23 miod Exp $ */
/* $NetBSD: pmap.c,v 1.3 2003/05/08 18:13:13 thorpej Exp $ */
/*
@@ -215,21 +215,6 @@
*
* we have the following locks that we must contend with:
*
- * "normal" locks:
- *
- * - pmap_main_lock
- * this lock is used to prevent deadlock and/or provide mutex
- * access to the pmap system. most operations lock the pmap
- * structure first, then they lock the pv_lists (if needed).
- * however, some operations such as pmap_page_protect lock
- * the pv_lists and then lock pmaps. in order to prevent a
- * cycle, we require a mutex lock when locking the pv_lists
- * first. thus, the "pmap = >pv_list" lockers must gain a
- * read-lock on pmap_main_lock before locking the pmap. and
- * the "pv_list => pmap" lockers must gain a write-lock on
- * pmap_main_lock before locking. since only one thread
- * can write-lock a lock at a time, this provides mutex.
- *
* "simple" locks:
*
* - pmap lock (per pmap, part of uvm_object)
@@ -272,28 +257,12 @@ paddr_t DMPDpa;
struct simplelock pmaps_lock;
-#if (defined(MULTIPROCESSOR) || defined(LOCKDEBUG)) && 0
-struct lock pmap_main_lock;
-#define PMAP_MAP_TO_HEAD_LOCK() \
- (void) spinlockmgr(&pmap_main_lock, LK_SHARED, NULL)
-#define PMAP_MAP_TO_HEAD_UNLOCK() \
- (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
-
-#define PMAP_HEAD_TO_MAP_LOCK() \
- (void) spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL)
-#define PMAP_HEAD_TO_MAP_UNLOCK() \
- (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
-
-#else
-
#define PMAP_MAP_TO_HEAD_LOCK() /* null */
#define PMAP_MAP_TO_HEAD_UNLOCK() /* null */
#define PMAP_HEAD_TO_MAP_LOCK() /* null */
#define PMAP_HEAD_TO_MAP_UNLOCK() /* null */
-#endif
-
#define COUNT(x) /* nothing */
/*
@@ -850,9 +819,6 @@ pmap_bootstrap(vaddr_t kva_start, paddr_t max_pa)
* init the static-global locks and global lists.
*/
-#if (defined(MULTIPROCESSOR) || defined(LOCKDEBUG)) && 0
- spinlockinit(&pmap_main_lock, "pmaplk", 0);
-#endif
simple_lock_init(&pmaps_lock);
LIST_INIT(&pmaps);
@@ -1022,7 +988,6 @@ pmap_init(void)
/*
* pmap_enter_pv: enter a mapping onto a pv_head lst
*
- * => caller should hold the proper lock on pmap_main_lock
* => caller should have pmap locked
* => we will gain the lock on the pv_head and allocate the new pv_entry
* => caller should adjust ptp's wire_count before calling
@@ -1047,7 +1012,6 @@ pmap_enter_pv(struct pv_head *pvh, struct pv_entry *pve, struct pmap *pmap,
/*
* pmap_remove_pv: try to remove a mapping from a pv_list
*
- * => caller should hold proper lock on pmap_main_lock
* => pmap should be locked
* => caller should hold lock on pv_head [so that attrs can be adjusted]
* => caller should adjust ptp's wire_count and free PTP if needed
diff --git a/sys/arch/arm/arm/pmap.c b/sys/arch/arm/arm/pmap.c
index 42aaa0a0dc7..6f234a8f70b 100644
--- a/sys/arch/arm/arm/pmap.c
+++ b/sys/arch/arm/arm/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.8 2006/05/26 17:11:40 miod Exp $ */
+/* $OpenBSD: pmap.c,v 1.9 2007/02/03 16:48:23 miod Exp $ */
/* $NetBSD: pmap.c,v 1.147 2004/01/18 13:03:50 scw Exp $ */
/*
@@ -311,23 +311,10 @@ boolean_t pmap_initialized;
* Misc. locking data structures
*/
-#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
-static struct lock pmap_main_lock;
-
-#define PMAP_MAP_TO_HEAD_LOCK() \
- (void) spinlockmgr(&pmap_main_lock, LK_SHARED, NULL)
-#define PMAP_MAP_TO_HEAD_UNLOCK() \
- (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
-#define PMAP_HEAD_TO_MAP_LOCK() \
- (void) spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL)
-#define PMAP_HEAD_TO_MAP_UNLOCK() \
- spinlockmgr(&pmap_main_lock, LK_RELEASE, (void *) 0)
-#else
#define PMAP_MAP_TO_HEAD_LOCK() /* null */
#define PMAP_MAP_TO_HEAD_UNLOCK() /* null */
#define PMAP_HEAD_TO_MAP_LOCK() /* null */
#define PMAP_HEAD_TO_MAP_UNLOCK() /* null */
-#endif
#define pmap_acquire_pmap_lock(pm) \
do { \
@@ -695,7 +682,6 @@ do { \
/*
* pmap_enter_pv: enter a mapping onto a vm_page lst
*
- * => caller should hold the proper lock on pmap_main_lock
* => caller should have pmap locked
* => we will gain the lock on the vm_page and allocate the new pv_entry
* => caller should adjust ptp's wire_count before calling
@@ -755,7 +741,6 @@ pmap_find_pv(struct vm_page *pg, pmap_t pm, vaddr_t va)
/*
* pmap_remove_pv: try to remove a mapping from a pv_list
*
- * => caller should hold proper lock on pmap_main_lock
* => pmap should be locked
* => caller should hold lock on vm_page [so that attrs can be adjusted]
* => caller should adjust ptp's wire_count and free PTP if needed
@@ -3966,13 +3951,6 @@ pmap_bootstrap(pd_entry_t *kernel_l1pt, vaddr_t vstart, vaddr_t vend)
&pmap_kernel_l2dtable_kva, NULL);
/*
- * init the static-global locks and global pmap list.
- */
-#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
- spinlockinit(&pmap_main_lock, "pmaplk", 0);
-#endif
-
- /*
* We can now initialise the first L1's metadata.
*/
SLIST_INIT(&l1_list);
diff --git a/sys/arch/i386/i386/machdep.c b/sys/arch/i386/i386/machdep.c
index 05a6deda792..0f4d4ba1876 100644
--- a/sys/arch/i386/i386/machdep.c
+++ b/sys/arch/i386/i386/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.373 2006/12/23 22:46:13 deraadt Exp $ */
+/* $OpenBSD: machdep.c,v 1.374 2007/02/03 16:48:23 miod Exp $ */
/* $NetBSD: machdep.c,v 1.214 1996/11/10 03:16:17 thorpej Exp $ */
/*-
@@ -4384,42 +4384,26 @@ void
i386_intlock(int ipl)
{
if (ipl < IPL_SCHED)
-#ifdef notdef
- spinlockmgr(&kernel_lock, LK_EXCLUSIVE|LK_CANRECURSE, 0);
-#else
__mp_lock(&kernel_lock);
-#endif
}
void
i386_intunlock(int ipl)
{
if (ipl < IPL_SCHED)
-#ifdef notdef
- spinlockmgr(&kernel_lock, LK_RELEASE, 0);
-#else
__mp_unlock(&kernel_lock);
-#endif
}
void
i386_softintlock(void)
{
-#ifdef notdef
- spinlockmgr(&kernel_lock, LK_EXCLUSIVE|LK_CANRECURSE, 0);
-#else
__mp_lock(&kernel_lock);
-#endif
}
void
i386_softintunlock(void)
{
-#ifdef notdef
- spinlockmgr(&kernel_lock, LK_RELEASE, 0);
-#else
__mp_unlock(&kernel_lock);
-#endif
}
#endif
diff --git a/sys/arch/i386/i386/pmap.c b/sys/arch/i386/i386/pmap.c
index dda070dbfe0..0871e629546 100644
--- a/sys/arch/i386/i386/pmap.c
+++ b/sys/arch/i386/i386/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.95 2006/09/19 11:06:33 jsg Exp $ */
+/* $OpenBSD: pmap.c,v 1.96 2007/02/03 16:48:23 miod Exp $ */
/* $NetBSD: pmap.c,v 1.91 2000/06/02 17:46:37 thorpej Exp $ */
/*
@@ -347,21 +347,6 @@
*
* we have the following locks that we must contend with:
*
- * "normal" locks:
- *
- * - pmap_main_lock
- * this lock is used to prevent deadlock and/or provide mutex
- * access to the pmap system. most operations lock the pmap
- * structure first, then they lock the pv_lists (if needed).
- * however, some operations such as pmap_page_protect lock
- * the pv_lists and then lock pmaps. in order to prevent a
- * cycle, we require a mutex lock when locking the pv_lists
- * first. thus, the "pmap = >pv_list" lockers must gain a
- * read-lock on pmap_main_lock before locking the pmap. and
- * the "pv_list => pmap" lockers must gain a write-lock on
- * pmap_main_lock before locking. since only one thread
- * can write-lock a lock at a time, this provides mutex.
- *
* "simple" locks:
*
* - pmap lock (per pmap, part of uvm_object)
@@ -393,30 +378,12 @@
struct simplelock pvalloc_lock;
struct simplelock pmaps_lock;
-#if defined(MULTIPROCESSOR) && 0
-
-struct lock pmap_main_lock;
-
-#define PMAP_MAP_TO_HEAD_LOCK() \
- spinlockmgr(&pmap_main_lock, LK_SHARED, (void *) 0)
-#define PMAP_MAP_TO_HEAD_UNLOCK() \
- spinlockmgr(&pmap_main_lock, LK_RELEASE, (void *) 0)
-
-#define PMAP_HEAD_TO_MAP_LOCK() \
- spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, (void *) 0)
-#define PMAP_HEAD_TO_MAP_UNLOCK() \
- spinlockmgr(&pmap_main_lock, LK_RELEASE, (void *) 0)
-
-#else
-
#define PMAP_MAP_TO_HEAD_LOCK() /* null */
#define PMAP_MAP_TO_HEAD_UNLOCK() /* null */
#define PMAP_HEAD_TO_MAP_LOCK() /* null */
#define PMAP_HEAD_TO_MAP_UNLOCK() /* null */
-#endif
-
#define PG_FRAME 0xfffff000 /* page frame mask */
#define PG_LGFRAME 0xffc00000 /* large (4M) page frame mask */
@@ -1286,9 +1253,6 @@ pmap_bootstrap(vaddr_t kva_start)
* init the static-global locks and global lists.
*/
-#if defined(MULTIPROCESSOR) && 0
- spinlockinit(&pmap_main_lock, "pmaplk", 0);
-#endif
simple_lock_init(&pvalloc_lock);
simple_lock_init(&pmaps_lock);
LIST_INIT(&pmaps);
@@ -1873,7 +1837,6 @@ pmap_free_pvpage(void)
/*
* pmap_enter_pv: enter a mapping onto a pv_head lst
*
- * => caller should hold the proper lock on pmap_main_lock
* => caller should have pmap locked
* => we will gain the lock on the pv_head and allocate the new pv_entry
* => caller should adjust ptp's wire_count before calling
@@ -1898,7 +1861,6 @@ pmap_enter_pv(struct pv_head *pvh, struct pv_entry *pve, struct pmap *pmap,
/*
* pmap_remove_pv: try to remove a mapping from a pv_list
*
- * => caller should hold proper lock on pmap_main_lock
* => pmap should be locked
* => caller should hold lock on pv_head [so that attrs can be adjusted]
* => caller should adjust ptp's wire_count and free PTP if needed
diff --git a/sys/arch/i386/i386/pmapae.c b/sys/arch/i386/i386/pmapae.c
index 5e0b97f241c..cef3cf74842 100644
--- a/sys/arch/i386/i386/pmapae.c
+++ b/sys/arch/i386/i386/pmapae.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmapae.c,v 1.6 2006/11/29 22:40:13 miod Exp $ */
+/* $OpenBSD: pmapae.c,v 1.7 2007/02/03 16:48:23 miod Exp $ */
/*
* Copyright (c) 2006 Michael Shalayeff
@@ -397,21 +397,6 @@
*
* we have the following locks that we must contend with:
*
- * "normal" locks:
- *
- * - pmap_main_lock
- * this lock is used to prevent deadlock and/or provide mutex
- * access to the pmap system. most operations lock the pmap
- * structure first, then they lock the pv_lists (if needed).
- * however, some operations such as pmap_page_protect lock
- * the pv_lists and then lock pmaps. in order to prevent a
- * cycle, we require a mutex lock when locking the pv_lists
- * first. thus, the "pmap = >pv_list" lockers must gain a
- * read-lock on pmap_main_lock before locking the pmap. and
- * the "pv_list => pmap" lockers must gain a write-lock on
- * pmap_main_lock before locking. since only one thread
- * can write-lock a lock at a time, this provides mutex.
- *
* "simple" locks:
*
* - pmap lock (per pmap, part of uvm_object)
@@ -440,30 +425,12 @@
* locking data structures
*/
-#if defined(MULTIPROCESSOR) && 0
-
-extern struct lock pmap_main_lock;
-
-#define PMAP_MAP_TO_HEAD_LOCK() \
- spinlockmgr(&pmap_main_lock, LK_SHARED, (void *) 0)
-#define PMAP_MAP_TO_HEAD_UNLOCK() \
- spinlockmgr(&pmap_main_lock, LK_RELEASE, (void *) 0)
-
-#define PMAP_HEAD_TO_MAP_LOCK() \
- spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, (void *) 0)
-#define PMAP_HEAD_TO_MAP_UNLOCK() \
- spinlockmgr(&pmap_main_lock, LK_RELEASE, (void *) 0)
-
-#else
-
#define PMAP_MAP_TO_HEAD_LOCK() /* null */
#define PMAP_MAP_TO_HEAD_UNLOCK() /* null */
#define PMAP_HEAD_TO_MAP_LOCK() /* null */
#define PMAP_HEAD_TO_MAP_UNLOCK() /* null */
-#endif
-
#define PG_FRAME 0xffffff000ULL /* page frame mask */
#define PG_LGFRAME 0xfffe00000ULL /* large (2M) page frame mask */
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index 94413ebd95f..f52906b2d01 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_lock.c,v 1.22 2006/01/03 15:34:21 jmc Exp $ */
+/* $OpenBSD: kern_lock.c,v 1.23 2007/02/03 16:48:23 miod Exp $ */
/*
* Copyright (c) 1995
@@ -62,42 +62,20 @@ void playback_stacktrace(int *, int);
*/
#if defined(LOCKDEBUG) || defined(DIAGNOSTIC) /* { */
-#if defined(MULTIPROCESSOR) /* { */
-#define COUNT_CPU(cpu_id, x) \
- curcpu()->ci_spin_locks += (x)
-#else
-u_long spin_locks;
-#define COUNT_CPU(cpu_id, x) spin_locks += (x)
-#endif /* MULTIPROCESSOR */ /* } */
-
#define COUNT(lkp, p, cpu_id, x) \
-do { \
- if ((lkp)->lk_flags & LK_SPIN) \
- COUNT_CPU((cpu_id), (x)); \
- else \
- (p)->p_locks += (x); \
-} while (/*CONSTCOND*/0)
+ (p)->p_locks += (x)
#else
#define COUNT(lkp, p, cpu_id, x)
-#define COUNT_CPU(cpu_id, x)
#endif /* LOCKDEBUG || DIAGNOSTIC */ /* } */
-#ifndef SPINLOCK_SPIN_HOOK /* from <machine/lock.h> */
-#define SPINLOCK_SPIN_HOOK /* nothing */
-#endif
-
-#define INTERLOCK_ACQUIRE(lkp, flags, s) \
+#define INTERLOCK_ACQUIRE(lkp, flags) \
do { \
- if ((flags) & LK_SPIN) \
- s = spllock(); \
simple_lock(&(lkp)->lk_interlock); \
} while (/*CONSTCOND*/ 0)
-#define INTERLOCK_RELEASE(lkp, flags, s) \
+#define INTERLOCK_RELEASE(lkp, flags) \
do { \
simple_unlock(&(lkp)->lk_interlock); \
- if ((flags) & LK_SPIN) \
- splx(s); \
} while (/*CONSTCOND*/ 0)
#ifdef DDB /* { */
@@ -115,109 +93,41 @@ int simple_lock_debugger = 0;
#define SLOCK_TRACE() /* nothing */
#endif /* } */
-#if defined(LOCKDEBUG)
-#if defined(DDB)
-#define SPINLOCK_SPINCHECK_DEBUGGER Debugger()
-#else
-#define SPINLOCK_SPINCHECK_DEBUGGER /* nothing */
-#endif
-
-#define SPINLOCK_SPINCHECK_DECL \
- /* 32-bits of count -- wrap constitutes a "spinout" */ \
- uint32_t __spinc = 0
-
-#define SPINLOCK_SPINCHECK \
-do { \
- if (++__spinc == 0) { \
- lock_printf("LK_SPIN spinout, excl %d, share %d\n", \
- lkp->lk_exclusivecount, lkp->lk_sharecount); \
- if (lkp->lk_exclusivecount) \
- lock_printf("held by CPU %lu\n", \
- (u_long) lkp->lk_cpu); \
- if (lkp->lk_lock_file) \
- lock_printf("last locked at %s:%d\n", \
- lkp->lk_lock_file, lkp->lk_lock_line); \
- if (lkp->lk_unlock_file) \
- lock_printf("last unlocked at %s:%d\n", \
- lkp->lk_unlock_file, lkp->lk_unlock_line); \
- SLOCK_TRACE(); \
- SPINLOCK_SPINCHECK_DEBUGGER; \
- } \
-} while (/*CONSTCOND*/ 0)
-#else
-#define SPINLOCK_SPINCHECK_DECL /* nothing */
-#define SPINLOCK_SPINCHECK /* nothing */
-#endif /* LOCKDEBUG && DDB */
-
/*
* Acquire a resource.
*/
#define ACQUIRE(lkp, error, extflags, drain, wanted) \
- if ((extflags) & LK_SPIN) { \
- int interlocked; \
- SPINLOCK_SPINCHECK_DECL; \
- \
- if ((drain) == 0) \
+do { \
+ for (error = 0; wanted; ) { \
+ if ((drain)) \
+ (lkp)->lk_flags |= LK_WAITDRAIN; \
+ else \
(lkp)->lk_waitcount++; \
- for (interlocked = 1;;) { \
- SPINLOCK_SPINCHECK; \
- if (wanted) { \
- if (interlocked) { \
- INTERLOCK_RELEASE((lkp), \
- LK_SPIN, s); \
- interlocked = 0; \
- } \
- SPINLOCK_SPIN_HOOK; \
- } else if (interlocked) { \
- break; \
- } else { \
- INTERLOCK_ACQUIRE((lkp), LK_SPIN, s); \
- interlocked = 1; \
- } \
- } \
+ /* XXX Cast away volatile. */ \
+ error = ltsleep((drain) ? \
+ (void *)&(lkp)->lk_flags : (void *)(lkp), \
+ (lkp)->lk_prio, (lkp)->lk_wmesg, (lkp)->lk_timo, \
+ &(lkp)->lk_interlock); \
if ((drain) == 0) \
(lkp)->lk_waitcount--; \
- KASSERT((wanted) == 0); \
- error = 0; /* sanity */ \
- } else { \
- for (error = 0; wanted; ) { \
- if ((drain)) \
- (lkp)->lk_flags |= LK_WAITDRAIN; \
- else \
- (lkp)->lk_waitcount++; \
- /* XXX Cast away volatile. */ \
- error = ltsleep((drain) ? \
- (void *)&(lkp)->lk_flags : \
- (void *)(lkp), (lkp)->lk_prio, \
- (lkp)->lk_wmesg, (lkp)->lk_timo, \
- &(lkp)->lk_interlock); \
- if ((drain) == 0) \
- (lkp)->lk_waitcount--; \
- if (error) \
- break; \
- if ((extflags) & LK_SLEEPFAIL) { \
- error = ENOLCK; \
- break; \
- } \
+ if (error) \
+ break; \
+ if ((extflags) & LK_SLEEPFAIL) { \
+ error = ENOLCK; \
+ break; \
} \
- }
+ } \
+} while (0)
#define SETHOLDER(lkp, pid, cpu_id) \
-do { \
- if ((lkp)->lk_flags & LK_SPIN) \
- (lkp)->lk_cpu = cpu_id; \
- else \
- (lkp)->lk_lockholder = pid; \
-} while (/*CONSTCOND*/0)
+ (lkp)->lk_lockholder = (pid)
#define WEHOLDIT(lkp, pid, cpu_id) \
- (((lkp)->lk_flags & LK_SPIN) != 0 ? \
- ((lkp)->lk_cpu == (cpu_id)) : \
- ((lkp)->lk_lockholder == (pid)))
+ (lkp)->lk_lockholder == (pid)
#define WAKEUP_WAITER(lkp) \
do { \
- if (((lkp)->lk_flags & LK_SPIN) == 0 && (lkp)->lk_waitcount) { \
+ if ((lkp)->lk_waitcount) { \
/* XXX Cast away volatile. */ \
wakeup((void *)(lkp)); \
} \
@@ -240,37 +150,15 @@ struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER;
TAILQ_HEAD(, lock) spinlock_list =
TAILQ_HEAD_INITIALIZER(spinlock_list);
+#endif /* LOCKDEBUG */ /* } */
#define HAVEIT(lkp) \
do { \
- if ((lkp)->lk_flags & LK_SPIN) { \
- int s = spllock(); \
- SPINLOCK_LIST_LOCK(); \
- /* XXX Cast away volatile. */ \
- TAILQ_INSERT_TAIL(&spinlock_list, (struct lock *)(lkp), \
- lk_list); \
- SPINLOCK_LIST_UNLOCK(); \
- splx(s); \
- } \
} while (/*CONSTCOND*/0)
#define DONTHAVEIT(lkp) \
do { \
- if ((lkp)->lk_flags & LK_SPIN) { \
- int s = spllock(); \
- SPINLOCK_LIST_LOCK(); \
- /* XXX Cast away volatile. */ \
- TAILQ_REMOVE(&spinlock_list, (struct lock *)(lkp), \
- lk_list); \
- SPINLOCK_LIST_UNLOCK(); \
- splx(s); \
- } \
} while (/*CONSTCOND*/0)
-#else
-#define HAVEIT(lkp) /* nothing */
-
-#define DONTHAVEIT(lkp) /* nothing */
-#endif /* LOCKDEBUG */ /* } */
#if defined(LOCKDEBUG)
/*
@@ -304,13 +192,9 @@ lockinit(struct lock *lkp, int prio, char *wmesg, int timo, int flags)
bzero(lkp, sizeof(struct lock));
simple_lock_init(&lkp->lk_interlock);
lkp->lk_flags = flags & LK_EXTFLG_MASK;
- if (flags & LK_SPIN)
- lkp->lk_cpu = LK_NOCPU;
- else {
- lkp->lk_lockholder = LK_NOPROC;
- lkp->lk_prio = prio;
- lkp->lk_timo = timo;
- }
+ lkp->lk_lockholder = LK_NOPROC;
+ lkp->lk_prio = prio;
+ lkp->lk_timo = timo;
lkp->lk_wmesg = wmesg; /* just a name for spin locks */
#if defined(LOCKDEBUG)
lkp->lk_lock_file = NULL;
@@ -324,14 +208,14 @@ lockinit(struct lock *lkp, int prio, char *wmesg, int timo, int flags)
int
lockstatus(struct lock *lkp)
{
- int s = 0, lock_type = 0;
+ int lock_type = 0;
- INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
+ INTERLOCK_ACQUIRE(lkp, lkp->lk_flags);
if (lkp->lk_exclusivecount != 0)
lock_type = LK_EXCLUSIVE;
else if (lkp->lk_sharecount != 0)
lock_type = LK_SHARED;
- INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
+ INTERLOCK_RELEASE(lkp, lkp->lk_flags);
return (lock_type);
}
@@ -349,35 +233,21 @@ lockmgr(__volatile struct lock *lkp, u_int flags, struct simplelock *interlkp)
pid_t pid;
int extflags;
cpuid_t cpu_id;
- int s = 0;
struct proc *p = curproc;
error = 0;
- INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
+ INTERLOCK_ACQUIRE(lkp, lkp->lk_flags);
if (flags & LK_INTERLOCK)
simple_unlock(interlkp);
extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
-#ifdef DIAGNOSTIC /* { */
- /*
- * Don't allow spins on sleep locks and don't allow sleeps
- * on spin locks.
- */
- if ((flags ^ lkp->lk_flags) & LK_SPIN)
- panic("lockmgr: sleep/spin mismatch");
-#endif /* } */
-
- if (extflags & LK_SPIN) {
- pid = LK_KERNPROC;
- } else {
#ifdef DIAGNOSTIC
- if (p == NULL)
- panic("lockmgr: process context required");
+ if (p == NULL)
+ panic("lockmgr: process context required");
#endif
- /* Process context required. */
- pid = p->p_pid;
- }
+ /* Process context required. */
+ pid = p->p_pid;
cpu_id = CPU_NUMBER();
/*
@@ -388,8 +258,6 @@ lockmgr(__volatile struct lock *lkp, u_int flags, struct simplelock *interlkp)
* further requests of any sort will result in a panic. The bits
* selected for these two flags are chosen so that they will be set
* in memory that is freed (freed memory is filled with 0xdeadbeef).
- * The final release is permitted to give a new lease on life to
- * the lock by specifying LK_REENABLE.
*/
if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
#ifdef DIAGNOSTIC
@@ -401,8 +269,7 @@ lockmgr(__volatile struct lock *lkp, u_int flags, struct simplelock *interlkp)
flags & LK_TYPE_MASK);
#endif /* DIAGNOSTIC */
lkp->lk_flags &= ~LK_DRAINING;
- if ((flags & LK_REENABLE) == 0)
- lkp->lk_flags |= LK_DRAINED;
+ lkp->lk_flags |= LK_DRAINED;
}
/*
@@ -449,7 +316,6 @@ lockmgr(__volatile struct lock *lkp, u_int flags, struct simplelock *interlkp)
panic("lockmgr: not holding exclusive lock");
lkp->lk_sharecount += lkp->lk_exclusivecount;
lkp->lk_exclusivecount = 0;
- lkp->lk_recurselevel = 0;
lkp->lk_flags &= ~LK_HAVE_EXCL;
SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
#if defined(LOCKDEBUG)
@@ -460,20 +326,6 @@ lockmgr(__volatile struct lock *lkp, u_int flags, struct simplelock *interlkp)
WAKEUP_WAITER(lkp);
break;
- case LK_EXCLUPGRADE:
- /*
- * If another process is ahead of us to get an upgrade,
- * then we want to fail rather than have an intervening
- * exclusive access.
- */
- if (lkp->lk_flags & LK_WANT_UPGRADE) {
- lkp->lk_sharecount--;
- COUNT(lkp, p, cpu_id, -1);
- error = EBUSY;
- break;
- }
- /* fall into normal upgrade */
-
case LK_UPGRADE:
/*
* Upgrade a shared lock to an exclusive one. If another
@@ -517,8 +369,6 @@ lockmgr(__volatile struct lock *lkp, u_int flags, struct simplelock *interlkp)
if (lkp->lk_exclusivecount != 0)
panic("lockmgr: non-zero exclusive count");
lkp->lk_exclusivecount = 1;
- if (extflags & LK_SETRECURSE)
- lkp->lk_recurselevel = 1;
COUNT(lkp, p, cpu_id, 1);
break;
}
@@ -536,8 +386,7 @@ lockmgr(__volatile struct lock *lkp, u_int flags, struct simplelock *interlkp)
/*
* Recursive lock.
*/
- if ((extflags & LK_CANRECURSE) == 0 &&
- lkp->lk_recurselevel == 0) {
+ if ((extflags & LK_CANRECURSE) == 0) {
if (extflags & LK_RECURSEFAIL) {
error = EDEADLK;
break;
@@ -545,9 +394,6 @@ lockmgr(__volatile struct lock *lkp, u_int flags, struct simplelock *interlkp)
panic("lockmgr: locking against myself");
}
lkp->lk_exclusivecount++;
- if (extflags & LK_SETRECURSE &&
- lkp->lk_recurselevel == 0)
- lkp->lk_recurselevel = lkp->lk_exclusivecount;
COUNT(lkp, p, cpu_id, 1);
break;
}
@@ -586,27 +432,16 @@ lockmgr(__volatile struct lock *lkp, u_int flags, struct simplelock *interlkp)
if (lkp->lk_exclusivecount != 0)
panic("lockmgr: non-zero exclusive count");
lkp->lk_exclusivecount = 1;
- if (extflags & LK_SETRECURSE)
- lkp->lk_recurselevel = 1;
COUNT(lkp, p, cpu_id, 1);
break;
case LK_RELEASE:
if (lkp->lk_exclusivecount != 0) {
if (WEHOLDIT(lkp, pid, cpu_id) == 0) {
- if (lkp->lk_flags & LK_SPIN) {
- panic("lockmgr: processor %lu, not "
- "exclusive lock holder %lu "
- "unlocking", cpu_id, lkp->lk_cpu);
- } else {
- panic("lockmgr: pid %d, not "
- "exclusive lock holder %d "
- "unlocking", pid,
- lkp->lk_lockholder);
- }
+ panic("lockmgr: pid %d, not exclusive lock "
+ "holder %d unlocking",
+ pid, lkp->lk_lockholder);
}
- if (lkp->lk_exclusivecount == lkp->lk_recurselevel)
- lkp->lk_recurselevel = 0;
lkp->lk_exclusivecount--;
COUNT(lkp, p, cpu_id, -1);
if (lkp->lk_exclusivecount == 0) {
@@ -662,142 +497,27 @@ lockmgr(__volatile struct lock *lkp, u_int flags, struct simplelock *interlkp)
#endif
HAVEIT(lkp);
lkp->lk_exclusivecount = 1;
- /* XXX unlikely that we'd want this */
- if (extflags & LK_SETRECURSE)
- lkp->lk_recurselevel = 1;
COUNT(lkp, p, cpu_id, 1);
break;
default:
- INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
+ INTERLOCK_RELEASE(lkp, lkp->lk_flags);
panic("lockmgr: unknown locktype request %d",
flags & LK_TYPE_MASK);
/* NOTREACHED */
}
- if ((lkp->lk_flags & (LK_WAITDRAIN | LK_SPIN)) == LK_WAITDRAIN &&
+ if ((lkp->lk_flags & LK_WAITDRAIN) != 0 &&
((lkp->lk_flags &
(LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 &&
lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
lkp->lk_flags &= ~LK_WAITDRAIN;
wakeup((void *)&lkp->lk_flags);
}
- INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
+ INTERLOCK_RELEASE(lkp, lkp->lk_flags);
return (error);
}
/*
- * For a recursive spinlock held one or more times by the current CPU,
- * release all N locks, and return N.
- * Intended for use in mi_switch() shortly before context switching.
- */
-
-#ifdef notyet
-int
-#if defined(LOCKDEBUG)
-_spinlock_release_all(__volatile struct lock *lkp, const char *file, int line)
-#else
-spinlock_release_all(__volatile struct lock *lkp)
-#endif
-{
- int s, count;
- cpuid_t cpu_id;
-
- KASSERT(lkp->lk_flags & LK_SPIN);
-
- INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
-
- cpu_id = CPU_NUMBER();
- count = lkp->lk_exclusivecount;
-
- if (count != 0) {
-#ifdef DIAGNOSTIC
- if (WEHOLDIT(lkp, 0, cpu_id) == 0) {
- panic("spinlock_release_all: processor %lu, not "
- "exclusive lock holder %lu "
- "unlocking", (long)cpu_id, lkp->lk_cpu);
- }
-#endif
- lkp->lk_recurselevel = 0;
- lkp->lk_exclusivecount = 0;
- COUNT_CPU(cpu_id, -count);
- lkp->lk_flags &= ~LK_HAVE_EXCL;
- SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
-#if defined(LOCKDEBUG)
- lkp->lk_unlock_file = file;
- lkp->lk_unlock_line = line;
-#endif
- DONTHAVEIT(lkp);
- }
-#ifdef DIAGNOSTIC
- else if (lkp->lk_sharecount != 0)
- panic("spinlock_release_all: release of shared lock!");
- else
- panic("spinlock_release_all: release of unlocked lock!");
-#endif
- INTERLOCK_RELEASE(lkp, LK_SPIN, s);
-
- return (count);
-}
-#endif
-
-/*
- * For a recursive spinlock held one or more times by the current CPU,
- * release all N locks, and return N.
- * Intended for use in mi_switch() right after resuming execution.
- */
-
-#ifdef notyet
-void
-#if defined(LOCKDEBUG)
-_spinlock_acquire_count(__volatile struct lock *lkp, int count,
- const char *file, int line)
-#else
-spinlock_acquire_count(__volatile struct lock *lkp, int count)
-#endif
-{
- int s, error;
- cpuid_t cpu_id;
-
- KASSERT(lkp->lk_flags & LK_SPIN);
-
- INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
-
- cpu_id = CPU_NUMBER();
-
-#ifdef DIAGNOSTIC
- if (WEHOLDIT(lkp, LK_NOPROC, cpu_id))
- panic("spinlock_acquire_count: processor %lu already holds lock", (long)cpu_id);
-#endif
- /*
- * Try to acquire the want_exclusive flag.
- */
- ACQUIRE(lkp, error, LK_SPIN, 0, lkp->lk_flags &
- (LK_HAVE_EXCL | LK_WANT_EXCL));
- lkp->lk_flags |= LK_WANT_EXCL;
- /*
- * Wait for shared locks and upgrades to finish.
- */
- ACQUIRE(lkp, error, LK_SPIN, 0, lkp->lk_sharecount != 0 ||
- (lkp->lk_flags & LK_WANT_UPGRADE));
- lkp->lk_flags &= ~LK_WANT_EXCL;
- lkp->lk_flags |= LK_HAVE_EXCL;
- SETHOLDER(lkp, LK_NOPROC, cpu_id);
-#if defined(LOCKDEBUG)
- lkp->lk_lock_file = file;
- lkp->lk_lock_line = line;
-#endif
- HAVEIT(lkp);
- if (lkp->lk_exclusivecount != 0)
- panic("lockmgr: non-zero exclusive count");
- lkp->lk_exclusivecount = count;
- lkp->lk_recurselevel = 1;
- COUNT_CPU(cpu_id, count);
-
- INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
-}
-#endif
-
-/*
* Print out information about state of a lock. Used by VOP_PRINT
* routines to display ststus about contained locks.
*/
@@ -811,13 +531,10 @@ lockmgr_printinfo(__volatile struct lock *lkp)
else if (lkp->lk_flags & LK_HAVE_EXCL) {
printf(" lock type %s: EXCL (count %d) by ",
lkp->lk_wmesg, lkp->lk_exclusivecount);
- if (lkp->lk_flags & LK_SPIN)
- printf("processor %lu", lkp->lk_cpu);
- else
- printf("pid %d", lkp->lk_lockholder);
+ printf("pid %d", lkp->lk_lockholder);
} else
printf(" not locked");
- if ((lkp->lk_flags & LK_SPIN) == 0 && lkp->lk_waitcount > 0)
+ if (lkp->lk_waitcount > 0)
printf(" with %d pending", lkp->lk_waitcount);
}
@@ -1147,60 +864,6 @@ simple_lock_only_held(volatile struct simplelock *lp, const char *where)
* so that they show up in profiles.
*/
-/*
- * XXX Instead of using struct lock for the kernel lock and thus requiring us
- * XXX to implement simplelocks, causing all sorts of fine-grained locks all
- * XXX over our tree getting activated consuming both time and potentially
- * XXX introducing locking protocol bugs.
- */
-#ifdef notyet
-
-struct lock kernel_lock;
-
-void
-_kernel_lock_init(void)
-{
- spinlockinit(&kernel_lock, "klock", 0);
-}
-
-/*
- * Acquire/release the kernel lock. Intended for use in the scheduler
- * and the lower half of the kernel.
- */
-void
-_kernel_lock(int flag)
-{
- SCHED_ASSERT_UNLOCKED();
- spinlockmgr(&kernel_lock, flag, 0);
-}
-
-void
-_kernel_unlock(void)
-{
- spinlockmgr(&kernel_lock, LK_RELEASE, 0);
-}
-
-/*
- * Acquire/release the kernel_lock on behalf of a process. Intended for
- * use in the top half of the kernel.
- */
-void
-_kernel_proc_lock(struct proc *p)
-{
- SCHED_ASSERT_UNLOCKED();
- spinlockmgr(&kernel_lock, LK_EXCLUSIVE, 0);
- p->p_flag |= P_BIGLOCK;
-}
-
-void
-_kernel_proc_unlock(struct proc *p)
-{
- p->p_flag &= ~P_BIGLOCK;
- spinlockmgr(&kernel_lock, LK_RELEASE, 0);
-}
-
-#else
-
struct __mp_lock kernel_lock;
void
@@ -1247,8 +910,6 @@ _kernel_proc_unlock(struct proc *p)
__mp_unlock(&kernel_lock);
}
-#endif
-
#ifdef MP_LOCKDEBUG
/* CPU-dependent timing, needs this to be settable from ddb. */
int __mp_lock_spinout = 200000000;
diff --git a/sys/sys/lock.h b/sys/sys/lock.h
index c3453ae6d60..5a5d4e69015 100644
--- a/sys/sys/lock.h
+++ b/sys/sys/lock.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: lock.h,v 1.15 2005/11/19 02:18:01 pedro Exp $ */
+/* $OpenBSD: lock.h,v 1.16 2007/02/03 16:48:23 miod Exp $ */
/*
* Copyright (c) 1995
@@ -55,8 +55,7 @@ struct lock {
u_int lk_flags; /* see below */
int lk_sharecount; /* # of accepted shared locks */
int lk_waitcount; /* # of processes sleeping for lock */
- short lk_exclusivecount; /* # of recursive exclusive locks */
- short lk_recurselevel; /* lvl above which recursion ok */
+ int lk_exclusivecount; /* # of recursive exclusive locks */
/*
* This is the sleep message for sleep locks, and a simple name
@@ -64,34 +63,14 @@ struct lock {
*/
char *lk_wmesg; /* resource sleeping (for tsleep) */
- union {
- struct {
- /* pid of exclusive lock holder */
- pid_t lk_sleep_lockholder;
+ /* pid of exclusive lock holder */
+ pid_t lk_lockholder;
- /* priority at which to sleep */
- int lk_sleep_prio;
+ /* priority at which to sleep */
+ int lk_prio;
- /* maximum sleep time (for tsleep) */
- int lk_sleep_timo;
- } lk_un_sleep;
- struct {
- /* CPU ID of exclusive lock holder */
- cpuid_t lk_spin_cpu;
-#if defined(LOCKDEBUG)
- TAILQ_ENTRY(lock) lk_spin_list;
-#endif
- } lk_un_spin;
- } lk_un;
-
-#define lk_lockholder lk_un.lk_un_sleep.lk_sleep_lockholder
-#define lk_prio lk_un.lk_un_sleep.lk_sleep_prio
-#define lk_timo lk_un.lk_un_sleep.lk_sleep_timo
-
-#define lk_cpu lk_un.lk_un_spin.lk_spin_cpu
-#if defined(LOCKDEBUG)
-#define lk_list lk_un.lk_un_spin.lk_spin_list
-#endif
+ /* maximum sleep time (for tsleep) */
+ int lk_timo;
#if defined(LOCKDEBUG)
const char *lk_lock_file;
@@ -117,12 +96,6 @@ struct lock {
* have upgraded to an exclusive lock. Other processes may get
* exclusive access to the resource between the time that the upgrade
* is requested and the time that it is granted.
- * LK_EXCLUPGRADE - the process must hold a shared lock that it wants to
- * have upgraded to an exclusive lock. If the request succeeds, no
- * other processes will have gotten exclusive access to the resource
- * between the time that the upgrade is requested and the time that
- * it is granted. However, if another process has already requested
- * an upgrade, the request will fail (see error returns below).
* LK_DOWNGRADE - the process must hold an exclusive lock that it wants
* to have downgraded to a shared lock. If the process holds multiple
* (recursive) exclusive locks, they will all be downgraded to shared
@@ -138,7 +111,6 @@ struct lock {
#define LK_SHARED 0x00000001 /* shared lock */
#define LK_EXCLUSIVE 0x00000002 /* exclusive lock */
#define LK_UPGRADE 0x00000003 /* shared-to-exclusive upgrade */
-#define LK_EXCLUPGRADE 0x00000004 /* first shared-to-exclusive upgrade */
#define LK_DOWNGRADE 0x00000005 /* exclusive-to-shared downgrade */
#define LK_RELEASE 0x00000006 /* release any type of lock */
#define LK_DRAIN 0x00000007 /* wait for all lock activity to end */
@@ -146,17 +118,13 @@ struct lock {
* External lock flags.
*
* The first three flags may be set in lock_init to set their mode permanently,
- * or passed in as arguments to the lock manager. The LK_REENABLE flag may be
- * set only at the release of a lock obtained by drain.
+ * or passed in as arguments to the lock manager.
*/
#define LK_EXTFLG_MASK 0x00700070 /* mask of external flags */
#define LK_NOWAIT 0x00000010 /* do not sleep to await lock */
#define LK_SLEEPFAIL 0x00000020 /* sleep, then return failure */
#define LK_CANRECURSE 0x00000040 /* allow recursive exclusive lock */
-#define LK_REENABLE 0x00000080 /* lock is be reenabled after drain */
-#define LK_SETRECURSE 0x00100000 /* other locks while we have it OK */
#define LK_RECURSEFAIL 0x00200000 /* fail if recursive exclusive lock */
-#define LK_SPIN 0x00400000 /* lock spins instead of sleeps */
/*
* Internal lock flags.
*
@@ -209,16 +177,6 @@ int lockmgr(__volatile struct lock *, u_int flags, struct simplelock *);
void lockmgr_printinfo(__volatile struct lock *);
int lockstatus(struct lock *);
-#if (0 && defined(MULTIPROCESSOR)) || defined(LOCKDEBUG)
-#define spinlockinit(lkp, name, flags) \
- lockinit((lkp), 0, (name), 0, (flags) | LK_SPIN)
-#define spinlockmgr(lkp, flags, intrlk) \
- lockmgr((lkp), (flags) | LK_SPIN, (intrlk))
-#else
-#define spinlockinit(lkp, name, flags) (void)(lkp)
-#define spinlockmgr(lkp, flags, intrlk) (0)
-#endif
-
#if defined(LOCKDEBUG)
int _spinlock_release_all(__volatile struct lock *, const char *, int);
void _spinlock_acquire_count(__volatile struct lock *, int, const char *,
@@ -238,37 +196,14 @@ void spinlock_acquire_count(__volatile struct lock *, int);
#define LOCK_ASSERT(x) /* nothing */
#endif
-#if defined(MULTIPROCESSOR)
-/*
- * XXX Instead of using struct lock for the kernel lock and thus requiring us
- * XXX to implement simplelocks, causing all sorts of fine-grained locks all
- * XXX over our tree getting activated consuming both time and potentially
- * XXX introducing locking protocol bugs.
- */
-#ifdef notyet
-
-extern struct lock kernel_lock;
-
-/*
- * XXX Simplelock macros used at "trusted" places.
- */
-#define SIMPLELOCK simplelock
-#define SIMPLE_LOCK_INIT simple_lock_init
-#define SIMPLE_LOCK simple_lock
-#define SIMPLE_UNLOCK simple_unlock
-
-#endif
-
-#else
-
+#if !defined(MULTIPROCESSOR)
/*
* XXX Simplelock macros used at "trusted" places.
*/
-#define SIMPLELOCK simplelock
-#define SIMPLE_LOCK_INIT simple_lock_init
-#define SIMPLE_LOCK simple_lock
-#define SIMPLE_UNLOCK simple_unlock
-
+#define SIMPLELOCK simplelock
+#define SIMPLE_LOCK_INIT simple_lock_init
+#define SIMPLE_LOCK simple_lock
+#define SIMPLE_UNLOCK simple_unlock
#endif
#endif /* !_LOCK_H_ */
diff --git a/sys/sys/sched.h b/sys/sys/sched.h
index 1256bece474..f45101cd590 100644
--- a/sys/sys/sched.h
+++ b/sys/sys/sched.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: sched.h,v 1.13 2005/06/17 22:33:34 niklas Exp $ */
+/* $OpenBSD: sched.h,v 1.14 2007/02/03 16:48:23 miod Exp $ */
/* $NetBSD: sched.h,v 1.2 1999/02/28 18:14:58 ross Exp $ */
/*-
@@ -154,27 +154,6 @@ void roundrobin(struct cpu_info *);
* XXX over our tree getting activated consuming both time and potentially
* XXX introducing locking protocol bugs.
*/
-#ifdef notyet
-
-extern struct simplelock sched_lock;
-
-#define SCHED_ASSERT_LOCKED() KASSERT(simple_lock_held(&sched_lock))
-#define SCHED_ASSERT_UNLOCKED() KASSERT(simple_lock_held(&sched_lock) == 0)
-
-#define SCHED_LOCK(s) \
-do { \
- s = splsched(); \
- simple_lock(&sched_lock); \
-} while (/* CONSTCOND */ 0)
-
-#define SCHED_UNLOCK(s) \
-do { \
- simple_unlock(&sched_lock); \
- splx(s); \
-} while (/* CONSTCOND */ 0)
-
-#else
-
extern struct __mp_lock sched_lock;
#define SCHED_ASSERT_LOCKED() KASSERT(__mp_lock_held(&sched_lock))
@@ -192,7 +171,6 @@ do { \
splx(s); \
} while (/* CONSTCOND */ 0)
-#endif
void sched_lock_idle(void);
void sched_unlock_idle(void);