summaryrefslogtreecommitdiff
path: root/sys/arch/i386
diff options
context:
space:
mode:
authorMiod Vallat <miod@cvs.openbsd.org>2007-02-03 16:48:24 +0000
committerMiod Vallat <miod@cvs.openbsd.org>2007-02-03 16:48:24 +0000
commit03be750656a93b86cb10dd6f837caa84ba6a9acd (patch)
treef9013b29fe2fe3e07e461272a9026d79c8574b8b /sys/arch/i386
parentd7cf6ffdf47ce2a23c432949e376688ca2eedfb8 (diff)
Remove unused functionality from lockmgr():
- LK_EXCLUPGRADE is never used. - LK_REENABLE is never used. - LK_SETRECURSE is never used. Because of this, the lk_recurselevel field is always zero, so it can be removed to. - the spinlock version (and LK_SPIN) is never used, since it was decided to use different locking structure for MP-safe protection. Tested by many
Diffstat (limited to 'sys/arch/i386')
-rw-r--r--sys/arch/i386/i386/machdep.c18
-rw-r--r--sys/arch/i386/i386/pmap.c40
-rw-r--r--sys/arch/i386/i386/pmapae.c35
3 files changed, 3 insertions, 90 deletions
diff --git a/sys/arch/i386/i386/machdep.c b/sys/arch/i386/i386/machdep.c
index 05a6deda792..0f4d4ba1876 100644
--- a/sys/arch/i386/i386/machdep.c
+++ b/sys/arch/i386/i386/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.373 2006/12/23 22:46:13 deraadt Exp $ */
+/* $OpenBSD: machdep.c,v 1.374 2007/02/03 16:48:23 miod Exp $ */
/* $NetBSD: machdep.c,v 1.214 1996/11/10 03:16:17 thorpej Exp $ */
/*-
@@ -4384,42 +4384,26 @@ void
i386_intlock(int ipl)
{
if (ipl < IPL_SCHED)
-#ifdef notdef
- spinlockmgr(&kernel_lock, LK_EXCLUSIVE|LK_CANRECURSE, 0);
-#else
__mp_lock(&kernel_lock);
-#endif
}
void
i386_intunlock(int ipl)
{
if (ipl < IPL_SCHED)
-#ifdef notdef
- spinlockmgr(&kernel_lock, LK_RELEASE, 0);
-#else
__mp_unlock(&kernel_lock);
-#endif
}
void
i386_softintlock(void)
{
-#ifdef notdef
- spinlockmgr(&kernel_lock, LK_EXCLUSIVE|LK_CANRECURSE, 0);
-#else
__mp_lock(&kernel_lock);
-#endif
}
void
i386_softintunlock(void)
{
-#ifdef notdef
- spinlockmgr(&kernel_lock, LK_RELEASE, 0);
-#else
__mp_unlock(&kernel_lock);
-#endif
}
#endif
diff --git a/sys/arch/i386/i386/pmap.c b/sys/arch/i386/i386/pmap.c
index dda070dbfe0..0871e629546 100644
--- a/sys/arch/i386/i386/pmap.c
+++ b/sys/arch/i386/i386/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.95 2006/09/19 11:06:33 jsg Exp $ */
+/* $OpenBSD: pmap.c,v 1.96 2007/02/03 16:48:23 miod Exp $ */
/* $NetBSD: pmap.c,v 1.91 2000/06/02 17:46:37 thorpej Exp $ */
/*
@@ -347,21 +347,6 @@
*
* we have the following locks that we must contend with:
*
- * "normal" locks:
- *
- * - pmap_main_lock
- * this lock is used to prevent deadlock and/or provide mutex
- * access to the pmap system. most operations lock the pmap
- * structure first, then they lock the pv_lists (if needed).
- * however, some operations such as pmap_page_protect lock
- * the pv_lists and then lock pmaps. in order to prevent a
- * cycle, we require a mutex lock when locking the pv_lists
- * first. thus, the "pmap = >pv_list" lockers must gain a
- * read-lock on pmap_main_lock before locking the pmap. and
- * the "pv_list => pmap" lockers must gain a write-lock on
- * pmap_main_lock before locking. since only one thread
- * can write-lock a lock at a time, this provides mutex.
- *
* "simple" locks:
*
* - pmap lock (per pmap, part of uvm_object)
@@ -393,30 +378,12 @@
struct simplelock pvalloc_lock;
struct simplelock pmaps_lock;
-#if defined(MULTIPROCESSOR) && 0
-
-struct lock pmap_main_lock;
-
-#define PMAP_MAP_TO_HEAD_LOCK() \
- spinlockmgr(&pmap_main_lock, LK_SHARED, (void *) 0)
-#define PMAP_MAP_TO_HEAD_UNLOCK() \
- spinlockmgr(&pmap_main_lock, LK_RELEASE, (void *) 0)
-
-#define PMAP_HEAD_TO_MAP_LOCK() \
- spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, (void *) 0)
-#define PMAP_HEAD_TO_MAP_UNLOCK() \
- spinlockmgr(&pmap_main_lock, LK_RELEASE, (void *) 0)
-
-#else
-
#define PMAP_MAP_TO_HEAD_LOCK() /* null */
#define PMAP_MAP_TO_HEAD_UNLOCK() /* null */
#define PMAP_HEAD_TO_MAP_LOCK() /* null */
#define PMAP_HEAD_TO_MAP_UNLOCK() /* null */
-#endif
-
#define PG_FRAME 0xfffff000 /* page frame mask */
#define PG_LGFRAME 0xffc00000 /* large (4M) page frame mask */
@@ -1286,9 +1253,6 @@ pmap_bootstrap(vaddr_t kva_start)
* init the static-global locks and global lists.
*/
-#if defined(MULTIPROCESSOR) && 0
- spinlockinit(&pmap_main_lock, "pmaplk", 0);
-#endif
simple_lock_init(&pvalloc_lock);
simple_lock_init(&pmaps_lock);
LIST_INIT(&pmaps);
@@ -1873,7 +1837,6 @@ pmap_free_pvpage(void)
/*
* pmap_enter_pv: enter a mapping onto a pv_head lst
*
- * => caller should hold the proper lock on pmap_main_lock
* => caller should have pmap locked
* => we will gain the lock on the pv_head and allocate the new pv_entry
* => caller should adjust ptp's wire_count before calling
@@ -1898,7 +1861,6 @@ pmap_enter_pv(struct pv_head *pvh, struct pv_entry *pve, struct pmap *pmap,
/*
* pmap_remove_pv: try to remove a mapping from a pv_list
*
- * => caller should hold proper lock on pmap_main_lock
* => pmap should be locked
* => caller should hold lock on pv_head [so that attrs can be adjusted]
* => caller should adjust ptp's wire_count and free PTP if needed
diff --git a/sys/arch/i386/i386/pmapae.c b/sys/arch/i386/i386/pmapae.c
index 5e0b97f241c..cef3cf74842 100644
--- a/sys/arch/i386/i386/pmapae.c
+++ b/sys/arch/i386/i386/pmapae.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmapae.c,v 1.6 2006/11/29 22:40:13 miod Exp $ */
+/* $OpenBSD: pmapae.c,v 1.7 2007/02/03 16:48:23 miod Exp $ */
/*
* Copyright (c) 2006 Michael Shalayeff
@@ -397,21 +397,6 @@
*
* we have the following locks that we must contend with:
*
- * "normal" locks:
- *
- * - pmap_main_lock
- * this lock is used to prevent deadlock and/or provide mutex
- * access to the pmap system. most operations lock the pmap
- * structure first, then they lock the pv_lists (if needed).
- * however, some operations such as pmap_page_protect lock
- * the pv_lists and then lock pmaps. in order to prevent a
- * cycle, we require a mutex lock when locking the pv_lists
- * first. thus, the "pmap = >pv_list" lockers must gain a
- * read-lock on pmap_main_lock before locking the pmap. and
- * the "pv_list => pmap" lockers must gain a write-lock on
- * pmap_main_lock before locking. since only one thread
- * can write-lock a lock at a time, this provides mutex.
- *
* "simple" locks:
*
* - pmap lock (per pmap, part of uvm_object)
@@ -440,30 +425,12 @@
* locking data structures
*/
-#if defined(MULTIPROCESSOR) && 0
-
-extern struct lock pmap_main_lock;
-
-#define PMAP_MAP_TO_HEAD_LOCK() \
- spinlockmgr(&pmap_main_lock, LK_SHARED, (void *) 0)
-#define PMAP_MAP_TO_HEAD_UNLOCK() \
- spinlockmgr(&pmap_main_lock, LK_RELEASE, (void *) 0)
-
-#define PMAP_HEAD_TO_MAP_LOCK() \
- spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, (void *) 0)
-#define PMAP_HEAD_TO_MAP_UNLOCK() \
- spinlockmgr(&pmap_main_lock, LK_RELEASE, (void *) 0)
-
-#else
-
#define PMAP_MAP_TO_HEAD_LOCK() /* null */
#define PMAP_MAP_TO_HEAD_UNLOCK() /* null */
#define PMAP_HEAD_TO_MAP_LOCK() /* null */
#define PMAP_HEAD_TO_MAP_UNLOCK() /* null */
-#endif
-
#define PG_FRAME 0xffffff000ULL /* page frame mask */
#define PG_LGFRAME 0xfffe00000ULL /* large (2M) page frame mask */