summaryrefslogtreecommitdiff
path: root/sys/arch
diff options
context:
space:
mode:
authorMark Kettenis <kettenis@cvs.openbsd.org>2015-07-10 10:07:32 +0000
committerMark Kettenis <kettenis@cvs.openbsd.org>2015-07-10 10:07:32 +0000
commit040105acb5c44016d07d7b3da28a8c5df5499d9d (patch)
tree82247902fa8522a737dd8756470a54b52691a928 /sys/arch
parent0a3d7d83a4b3a0753a2d91981a65ef5f50525b15 (diff)
Don't call pool_put(9) while holding a mutex. Instead collect pv entries in
a list and put them back into the pool after releasing the mutex. This prevents a lock ordering problem between the per-pmap mutexes and the kernel lock that arises because pool_put(9) may grab the kernel lock when it decides to free a pool page. This seems to make the i386 pmap mpsafe enough to run the reaper without holding the kernel lock. ok sthen@ (who helped me a lot debugging this)
Diffstat (limited to 'sys/arch')
-rw-r--r--sys/arch/i386/i386/pmap.c21
1 files changed, 15 insertions, 6 deletions
diff --git a/sys/arch/i386/i386/pmap.c b/sys/arch/i386/i386/pmap.c
index 8b900fd4520..f6a30be0453 100644
--- a/sys/arch/i386/i386/pmap.c
+++ b/sys/arch/i386/i386/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.180 2015/07/02 16:14:43 kettenis Exp $ */
+/* $OpenBSD: pmap.c,v 1.181 2015/07/10 10:07:31 kettenis Exp $ */
/* $NetBSD: pmap.c,v 1.91 2000/06/02 17:46:37 thorpej Exp $ */
/*
@@ -454,7 +454,7 @@ pt_entry_t *pmap_map_ptes_86(struct pmap *);
void pmap_unmap_ptes_86(struct pmap *);
void pmap_do_remove_86(struct pmap *, vaddr_t, vaddr_t, int);
void pmap_remove_ptes_86(struct pmap *, struct vm_page *, vaddr_t,
- vaddr_t, vaddr_t, int);
+ vaddr_t, vaddr_t, int, struct pv_entry **);
void *pmap_pv_page_alloc(struct pool *, int, int *);
void pmap_pv_page_free(struct pool *, void *);
@@ -1744,7 +1744,7 @@ pmap_copy_page_86(struct vm_page *srcpg, struct vm_page *dstpg)
void
pmap_remove_ptes_86(struct pmap *pmap, struct vm_page *ptp, vaddr_t ptpva,
- vaddr_t startva, vaddr_t endva, int flags)
+ vaddr_t startva, vaddr_t endva, int flags, struct pv_entry **free_pvs)
{
struct pv_entry *pve;
pt_entry_t *pte = (pt_entry_t *) ptpva;
@@ -1805,8 +1805,10 @@ pmap_remove_ptes_86(struct pmap *pmap, struct vm_page *ptp, vaddr_t ptpva,
/* sync R/M bits */
pmap_sync_flags_pte_86(pg, opte);
pve = pmap_remove_pv(pg, pmap, startva);
- if (pve)
- pmap_free_pv(NULL, pve);
+ if (pve) {
+ pve->pv_next = *free_pvs;
+ *free_pvs = pve;
+ }
/* end of "for" loop: time for next pte */
}
@@ -1831,6 +1833,8 @@ pmap_do_remove_86(struct pmap *pmap, vaddr_t sva, vaddr_t eva, int flags)
paddr_t ptppa;
vaddr_t blkendva;
struct vm_page *ptp;
+ struct pv_entry *pve;
+ struct pv_entry *free_pvs = NULL;
TAILQ_HEAD(, vm_page) empty_ptps;
int shootall;
vaddr_t va;
@@ -1898,7 +1902,7 @@ pmap_do_remove_86(struct pmap *pmap, vaddr_t sva, vaddr_t eva, int flags)
}
}
pmap_remove_ptes_86(pmap, ptp, (vaddr_t)&ptes[atop(va)],
- va, blkendva, flags);
+ va, blkendva, flags, &free_pvs);
/* If PTP is no longer being used, free it. */
if (ptp && ptp->wire_count <= 1) {
@@ -1916,6 +1920,11 @@ pmap_do_remove_86(struct pmap *pmap, vaddr_t sva, vaddr_t eva, int flags)
pmap_unmap_ptes_86(pmap);
pmap_tlb_shootwait();
+ while ((pve = free_pvs) != NULL) {
+ free_pvs = pve->pv_next;
+ pmap_free_pv(pmap, pve);
+ }
+
while ((ptp = TAILQ_FIRST(&empty_ptps)) != NULL) {
TAILQ_REMOVE(&empty_ptps, ptp, pageq);
uvm_pagefree(ptp);