summaryrefslogtreecommitdiff
path: root/sys/arch
diff options
context:
space:
mode:
Diffstat (limited to 'sys/arch')
-rw-r--r--sys/arch/alpha/alpha/pmap.c29
-rw-r--r--sys/arch/amd64/amd64/pmap.c38
-rw-r--r--sys/arch/arm/arm/pmap.c24
-rw-r--r--sys/arch/i386/i386/machdep.c18
-rw-r--r--sys/arch/i386/i386/pmap.c40
-rw-r--r--sys/arch/i386/i386/pmapae.c35
6 files changed, 6 insertions, 178 deletions
diff --git a/sys/arch/alpha/alpha/pmap.c b/sys/arch/alpha/alpha/pmap.c
index f39ff355ac1..dc9ac1e59c1 100644
--- a/sys/arch/alpha/alpha/pmap.c
+++ b/sys/arch/alpha/alpha/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.48 2006/11/29 12:24:15 miod Exp $ */
+/* $OpenBSD: pmap.c,v 1.49 2007/02/03 16:48:21 miod Exp $ */
/* $NetBSD: pmap.c,v 1.154 2000/12/07 22:18:55 thorpej Exp $ */
/*-
@@ -337,20 +337,6 @@ u_long pmap_asn_generation[ALPHA_MAXPROCS]; /* current ASN generation */
* This pmap module uses two types of locks: `normal' (sleep)
* locks and `simple' (spin) locks. They are used as follows:
*
- * READ/WRITE SPIN LOCKS
- * ---------------------
- *
- * * pmap_main_lock - This lock is used to prevent deadlock and/or
- * provide mutex access to the pmap module. Most operations lock
- * the pmap first, then PV lists as needed. However, some operations,
- * such as pmap_page_protect(), lock the PV lists before locking
- * the pmaps. To prevent deadlock, we require a mutex lock on the
- * pmap module if locking in the PV->pmap direction. This is
- * implemented by acquiring a (shared) read lock on pmap_main_lock
- * if locking pmap->PV and a (exclusive) write lock if locking in
- * the PV->pmap direction. Since only one thread can hold a write
- * lock at a time, this provides the mutex.
- *
* SIMPLE LOCKS
* ------------
*
@@ -380,25 +366,13 @@ u_long pmap_asn_generation[ALPHA_MAXPROCS]; /* current ASN generation */
* with the pmap already locked by the caller (which will be
* an interface function).
*/
-struct lock pmap_main_lock;
struct simplelock pmap_all_pmaps_slock;
struct simplelock pmap_growkernel_slock;
-#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
-#define PMAP_MAP_TO_HEAD_LOCK() \
- spinlockmgr(&pmap_main_lock, LK_SHARED, NULL)
-#define PMAP_MAP_TO_HEAD_UNLOCK() \
- spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
-#define PMAP_HEAD_TO_MAP_LOCK() \
- spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL)
-#define PMAP_HEAD_TO_MAP_UNLOCK() \
- spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
-#else
#define PMAP_MAP_TO_HEAD_LOCK() /* nothing */
#define PMAP_MAP_TO_HEAD_UNLOCK() /* nothing */
#define PMAP_HEAD_TO_MAP_LOCK() /* nothing */
#define PMAP_HEAD_TO_MAP_UNLOCK() /* nothing */
-#endif /* MULTIPROCESSOR || LOCKDEBUG */
#if defined(MULTIPROCESSOR)
/*
@@ -967,7 +941,6 @@ pmap_bootstrap(paddr_t ptaddr, u_int maxasn, u_long ncpuids)
/*
* Initialize the locks.
*/
- spinlockinit(&pmap_main_lock, "pmaplk", 0);
simple_lock_init(&pmap_all_pmaps_slock);
/*
diff --git a/sys/arch/amd64/amd64/pmap.c b/sys/arch/amd64/amd64/pmap.c
index dd8eb7082b6..016e52fc058 100644
--- a/sys/arch/amd64/amd64/pmap.c
+++ b/sys/arch/amd64/amd64/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.16 2007/01/15 23:19:05 jsg Exp $ */
+/* $OpenBSD: pmap.c,v 1.17 2007/02/03 16:48:23 miod Exp $ */
/* $NetBSD: pmap.c,v 1.3 2003/05/08 18:13:13 thorpej Exp $ */
/*
@@ -215,21 +215,6 @@
*
* we have the following locks that we must contend with:
*
- * "normal" locks:
- *
- * - pmap_main_lock
- * this lock is used to prevent deadlock and/or provide mutex
- * access to the pmap system. most operations lock the pmap
- * structure first, then they lock the pv_lists (if needed).
- * however, some operations such as pmap_page_protect lock
- * the pv_lists and then lock pmaps. in order to prevent a
- * cycle, we require a mutex lock when locking the pv_lists
- * first. thus, the "pmap = >pv_list" lockers must gain a
- * read-lock on pmap_main_lock before locking the pmap. and
- * the "pv_list => pmap" lockers must gain a write-lock on
- * pmap_main_lock before locking. since only one thread
- * can write-lock a lock at a time, this provides mutex.
- *
* "simple" locks:
*
* - pmap lock (per pmap, part of uvm_object)
@@ -272,28 +257,12 @@ paddr_t DMPDpa;
struct simplelock pmaps_lock;
-#if (defined(MULTIPROCESSOR) || defined(LOCKDEBUG)) && 0
-struct lock pmap_main_lock;
-#define PMAP_MAP_TO_HEAD_LOCK() \
- (void) spinlockmgr(&pmap_main_lock, LK_SHARED, NULL)
-#define PMAP_MAP_TO_HEAD_UNLOCK() \
- (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
-
-#define PMAP_HEAD_TO_MAP_LOCK() \
- (void) spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL)
-#define PMAP_HEAD_TO_MAP_UNLOCK() \
- (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
-
-#else
-
#define PMAP_MAP_TO_HEAD_LOCK() /* null */
#define PMAP_MAP_TO_HEAD_UNLOCK() /* null */
#define PMAP_HEAD_TO_MAP_LOCK() /* null */
#define PMAP_HEAD_TO_MAP_UNLOCK() /* null */
-#endif
-
#define COUNT(x) /* nothing */
/*
@@ -850,9 +819,6 @@ pmap_bootstrap(vaddr_t kva_start, paddr_t max_pa)
* init the static-global locks and global lists.
*/
-#if (defined(MULTIPROCESSOR) || defined(LOCKDEBUG)) && 0
- spinlockinit(&pmap_main_lock, "pmaplk", 0);
-#endif
simple_lock_init(&pmaps_lock);
LIST_INIT(&pmaps);
@@ -1022,7 +988,6 @@ pmap_init(void)
/*
* pmap_enter_pv: enter a mapping onto a pv_head lst
*
- * => caller should hold the proper lock on pmap_main_lock
* => caller should have pmap locked
* => we will gain the lock on the pv_head and allocate the new pv_entry
* => caller should adjust ptp's wire_count before calling
@@ -1047,7 +1012,6 @@ pmap_enter_pv(struct pv_head *pvh, struct pv_entry *pve, struct pmap *pmap,
/*
* pmap_remove_pv: try to remove a mapping from a pv_list
*
- * => caller should hold proper lock on pmap_main_lock
* => pmap should be locked
* => caller should hold lock on pv_head [so that attrs can be adjusted]
* => caller should adjust ptp's wire_count and free PTP if needed
diff --git a/sys/arch/arm/arm/pmap.c b/sys/arch/arm/arm/pmap.c
index 42aaa0a0dc7..6f234a8f70b 100644
--- a/sys/arch/arm/arm/pmap.c
+++ b/sys/arch/arm/arm/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.8 2006/05/26 17:11:40 miod Exp $ */
+/* $OpenBSD: pmap.c,v 1.9 2007/02/03 16:48:23 miod Exp $ */
/* $NetBSD: pmap.c,v 1.147 2004/01/18 13:03:50 scw Exp $ */
/*
@@ -311,23 +311,10 @@ boolean_t pmap_initialized;
* Misc. locking data structures
*/
-#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
-static struct lock pmap_main_lock;
-
-#define PMAP_MAP_TO_HEAD_LOCK() \
- (void) spinlockmgr(&pmap_main_lock, LK_SHARED, NULL)
-#define PMAP_MAP_TO_HEAD_UNLOCK() \
- (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
-#define PMAP_HEAD_TO_MAP_LOCK() \
- (void) spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL)
-#define PMAP_HEAD_TO_MAP_UNLOCK() \
- spinlockmgr(&pmap_main_lock, LK_RELEASE, (void *) 0)
-#else
#define PMAP_MAP_TO_HEAD_LOCK() /* null */
#define PMAP_MAP_TO_HEAD_UNLOCK() /* null */
#define PMAP_HEAD_TO_MAP_LOCK() /* null */
#define PMAP_HEAD_TO_MAP_UNLOCK() /* null */
-#endif
#define pmap_acquire_pmap_lock(pm) \
do { \
@@ -695,7 +682,6 @@ do { \
/*
* pmap_enter_pv: enter a mapping onto a vm_page lst
*
- * => caller should hold the proper lock on pmap_main_lock
* => caller should have pmap locked
* => we will gain the lock on the vm_page and allocate the new pv_entry
* => caller should adjust ptp's wire_count before calling
@@ -755,7 +741,6 @@ pmap_find_pv(struct vm_page *pg, pmap_t pm, vaddr_t va)
/*
* pmap_remove_pv: try to remove a mapping from a pv_list
*
- * => caller should hold proper lock on pmap_main_lock
* => pmap should be locked
* => caller should hold lock on vm_page [so that attrs can be adjusted]
* => caller should adjust ptp's wire_count and free PTP if needed
@@ -3966,13 +3951,6 @@ pmap_bootstrap(pd_entry_t *kernel_l1pt, vaddr_t vstart, vaddr_t vend)
&pmap_kernel_l2dtable_kva, NULL);
/*
- * init the static-global locks and global pmap list.
- */
-#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
- spinlockinit(&pmap_main_lock, "pmaplk", 0);
-#endif
-
- /*
* We can now initialise the first L1's metadata.
*/
SLIST_INIT(&l1_list);
diff --git a/sys/arch/i386/i386/machdep.c b/sys/arch/i386/i386/machdep.c
index 05a6deda792..0f4d4ba1876 100644
--- a/sys/arch/i386/i386/machdep.c
+++ b/sys/arch/i386/i386/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.373 2006/12/23 22:46:13 deraadt Exp $ */
+/* $OpenBSD: machdep.c,v 1.374 2007/02/03 16:48:23 miod Exp $ */
/* $NetBSD: machdep.c,v 1.214 1996/11/10 03:16:17 thorpej Exp $ */
/*-
@@ -4384,42 +4384,26 @@ void
i386_intlock(int ipl)
{
if (ipl < IPL_SCHED)
-#ifdef notdef
- spinlockmgr(&kernel_lock, LK_EXCLUSIVE|LK_CANRECURSE, 0);
-#else
__mp_lock(&kernel_lock);
-#endif
}
void
i386_intunlock(int ipl)
{
if (ipl < IPL_SCHED)
-#ifdef notdef
- spinlockmgr(&kernel_lock, LK_RELEASE, 0);
-#else
__mp_unlock(&kernel_lock);
-#endif
}
void
i386_softintlock(void)
{
-#ifdef notdef
- spinlockmgr(&kernel_lock, LK_EXCLUSIVE|LK_CANRECURSE, 0);
-#else
__mp_lock(&kernel_lock);
-#endif
}
void
i386_softintunlock(void)
{
-#ifdef notdef
- spinlockmgr(&kernel_lock, LK_RELEASE, 0);
-#else
__mp_unlock(&kernel_lock);
-#endif
}
#endif
diff --git a/sys/arch/i386/i386/pmap.c b/sys/arch/i386/i386/pmap.c
index dda070dbfe0..0871e629546 100644
--- a/sys/arch/i386/i386/pmap.c
+++ b/sys/arch/i386/i386/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.95 2006/09/19 11:06:33 jsg Exp $ */
+/* $OpenBSD: pmap.c,v 1.96 2007/02/03 16:48:23 miod Exp $ */
/* $NetBSD: pmap.c,v 1.91 2000/06/02 17:46:37 thorpej Exp $ */
/*
@@ -347,21 +347,6 @@
*
* we have the following locks that we must contend with:
*
- * "normal" locks:
- *
- * - pmap_main_lock
- * this lock is used to prevent deadlock and/or provide mutex
- * access to the pmap system. most operations lock the pmap
- * structure first, then they lock the pv_lists (if needed).
- * however, some operations such as pmap_page_protect lock
- * the pv_lists and then lock pmaps. in order to prevent a
- * cycle, we require a mutex lock when locking the pv_lists
- * first. thus, the "pmap = >pv_list" lockers must gain a
- * read-lock on pmap_main_lock before locking the pmap. and
- * the "pv_list => pmap" lockers must gain a write-lock on
- * pmap_main_lock before locking. since only one thread
- * can write-lock a lock at a time, this provides mutex.
- *
* "simple" locks:
*
* - pmap lock (per pmap, part of uvm_object)
@@ -393,30 +378,12 @@
struct simplelock pvalloc_lock;
struct simplelock pmaps_lock;
-#if defined(MULTIPROCESSOR) && 0
-
-struct lock pmap_main_lock;
-
-#define PMAP_MAP_TO_HEAD_LOCK() \
- spinlockmgr(&pmap_main_lock, LK_SHARED, (void *) 0)
-#define PMAP_MAP_TO_HEAD_UNLOCK() \
- spinlockmgr(&pmap_main_lock, LK_RELEASE, (void *) 0)
-
-#define PMAP_HEAD_TO_MAP_LOCK() \
- spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, (void *) 0)
-#define PMAP_HEAD_TO_MAP_UNLOCK() \
- spinlockmgr(&pmap_main_lock, LK_RELEASE, (void *) 0)
-
-#else
-
#define PMAP_MAP_TO_HEAD_LOCK() /* null */
#define PMAP_MAP_TO_HEAD_UNLOCK() /* null */
#define PMAP_HEAD_TO_MAP_LOCK() /* null */
#define PMAP_HEAD_TO_MAP_UNLOCK() /* null */
-#endif
-
#define PG_FRAME 0xfffff000 /* page frame mask */
#define PG_LGFRAME 0xffc00000 /* large (4M) page frame mask */
@@ -1286,9 +1253,6 @@ pmap_bootstrap(vaddr_t kva_start)
* init the static-global locks and global lists.
*/
-#if defined(MULTIPROCESSOR) && 0
- spinlockinit(&pmap_main_lock, "pmaplk", 0);
-#endif
simple_lock_init(&pvalloc_lock);
simple_lock_init(&pmaps_lock);
LIST_INIT(&pmaps);
@@ -1873,7 +1837,6 @@ pmap_free_pvpage(void)
/*
* pmap_enter_pv: enter a mapping onto a pv_head lst
*
- * => caller should hold the proper lock on pmap_main_lock
* => caller should have pmap locked
* => we will gain the lock on the pv_head and allocate the new pv_entry
* => caller should adjust ptp's wire_count before calling
@@ -1898,7 +1861,6 @@ pmap_enter_pv(struct pv_head *pvh, struct pv_entry *pve, struct pmap *pmap,
/*
* pmap_remove_pv: try to remove a mapping from a pv_list
*
- * => caller should hold proper lock on pmap_main_lock
* => pmap should be locked
* => caller should hold lock on pv_head [so that attrs can be adjusted]
* => caller should adjust ptp's wire_count and free PTP if needed
diff --git a/sys/arch/i386/i386/pmapae.c b/sys/arch/i386/i386/pmapae.c
index 5e0b97f241c..cef3cf74842 100644
--- a/sys/arch/i386/i386/pmapae.c
+++ b/sys/arch/i386/i386/pmapae.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmapae.c,v 1.6 2006/11/29 22:40:13 miod Exp $ */
+/* $OpenBSD: pmapae.c,v 1.7 2007/02/03 16:48:23 miod Exp $ */
/*
* Copyright (c) 2006 Michael Shalayeff
@@ -397,21 +397,6 @@
*
* we have the following locks that we must contend with:
*
- * "normal" locks:
- *
- * - pmap_main_lock
- * this lock is used to prevent deadlock and/or provide mutex
- * access to the pmap system. most operations lock the pmap
- * structure first, then they lock the pv_lists (if needed).
- * however, some operations such as pmap_page_protect lock
- * the pv_lists and then lock pmaps. in order to prevent a
- * cycle, we require a mutex lock when locking the pv_lists
- * first. thus, the "pmap = >pv_list" lockers must gain a
- * read-lock on pmap_main_lock before locking the pmap. and
- * the "pv_list => pmap" lockers must gain a write-lock on
- * pmap_main_lock before locking. since only one thread
- * can write-lock a lock at a time, this provides mutex.
- *
* "simple" locks:
*
* - pmap lock (per pmap, part of uvm_object)
@@ -440,30 +425,12 @@
* locking data structures
*/
-#if defined(MULTIPROCESSOR) && 0
-
-extern struct lock pmap_main_lock;
-
-#define PMAP_MAP_TO_HEAD_LOCK() \
- spinlockmgr(&pmap_main_lock, LK_SHARED, (void *) 0)
-#define PMAP_MAP_TO_HEAD_UNLOCK() \
- spinlockmgr(&pmap_main_lock, LK_RELEASE, (void *) 0)
-
-#define PMAP_HEAD_TO_MAP_LOCK() \
- spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, (void *) 0)
-#define PMAP_HEAD_TO_MAP_UNLOCK() \
- spinlockmgr(&pmap_main_lock, LK_RELEASE, (void *) 0)
-
-#else
-
#define PMAP_MAP_TO_HEAD_LOCK() /* null */
#define PMAP_MAP_TO_HEAD_UNLOCK() /* null */
#define PMAP_HEAD_TO_MAP_LOCK() /* null */
#define PMAP_HEAD_TO_MAP_UNLOCK() /* null */
-#endif
-
#define PG_FRAME 0xffffff000ULL /* page frame mask */
#define PG_LGFRAME 0xfffe00000ULL /* large (2M) page frame mask */