summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorMark Kettenis <kettenis@cvs.openbsd.org>2015-09-03 18:49:20 +0000
committerMark Kettenis <kettenis@cvs.openbsd.org>2015-09-03 18:49:20 +0000
commitd5e598bc619d5c1f365f16ca6739eeae50f8e394 (patch)
tree37b852ecd7322e9f3f65ae35a818e0666e635575 /sys
parent4ca51b63f04154ab5d677ff197fe99e35d1fb9e7 (diff)
Fix a race in pmap_page_remove_86() and pmap_page_remove_pae().
ok millert@, tedu@, mlarkin@
Diffstat (limited to 'sys')
-rw-r--r--sys/arch/i386/i386/pmap.c29
-rw-r--r--sys/arch/i386/i386/pmapae.c27
2 files changed, 51 insertions, 5 deletions
diff --git a/sys/arch/i386/i386/pmap.c b/sys/arch/i386/i386/pmap.c
index e3811b73ee3..57c2754be49 100644
--- a/sys/arch/i386/i386/pmap.c
+++ b/sys/arch/i386/i386/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.184 2015/08/31 20:42:41 kettenis Exp $ */
+/* $OpenBSD: pmap.c,v 1.185 2015/09/03 18:49:19 kettenis Exp $ */
/* $NetBSD: pmap.c,v 1.91 2000/06/02 17:46:37 thorpej Exp $ */
/*
@@ -1941,6 +1941,7 @@ void
pmap_page_remove_86(struct vm_page *pg)
{
struct pv_entry *pve;
+ struct pmap *pm;
pt_entry_t *ptes, opte;
TAILQ_HEAD(, vm_page) empty_ptps;
struct vm_page *ptp;
@@ -1952,11 +1953,33 @@ pmap_page_remove_86(struct vm_page *pg)
mtx_enter(&pg->mdpage.pv_mtx);
while ((pve = pg->mdpage.pv_list) != NULL) {
- pg->mdpage.pv_list = pve->pv_next;
pmap_reference(pve->pv_pmap);
+ pm = pve->pv_pmap;
mtx_leave(&pg->mdpage.pv_mtx);
- ptes = pmap_map_ptes_86(pve->pv_pmap); /* locks pmap */
+ ptes = pmap_map_ptes_86(pm); /* locks pmap */
+
+ /*
+ * We dropped the pvlist lock before grabbing the pmap
+ * lock to avoid lock ordering problems. This means
+ * we have to check the pvlist again since somebody
+ * else might have modified it. All we care about is
+ * that the pvlist entry matches the pmap we just
+ * locked. If it doesn't, unlock the pmap and try
+ * again.
+ */
+ mtx_enter(&pg->mdpage.pv_mtx);
+ if ((pve = pg->mdpage.pv_list) == NULL ||
+ pve->pv_pmap != pm) {
+ mtx_leave(&pg->mdpage.pv_mtx);
+ pmap_unmap_ptes_86(pm); /* unlocks pmap */
+ pmap_destroy(pm);
+ mtx_enter(&pg->mdpage.pv_mtx);
+ continue;
+ }
+
+ pg->mdpage.pv_list = pve->pv_next;
+ mtx_leave(&pg->mdpage.pv_mtx);
#ifdef DIAGNOSTIC
if (pve->pv_ptp && (PDE(pve->pv_pmap, pdei(pve->pv_va)) &
diff --git a/sys/arch/i386/i386/pmapae.c b/sys/arch/i386/i386/pmapae.c
index 891f9bddf37..1eb3e15a200 100644
--- a/sys/arch/i386/i386/pmapae.c
+++ b/sys/arch/i386/i386/pmapae.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmapae.c,v 1.45 2015/08/28 05:00:42 mlarkin Exp $ */
+/* $OpenBSD: pmapae.c,v 1.46 2015/09/03 18:49:19 kettenis Exp $ */
/*
* Copyright (c) 2006-2008 Michael Shalayeff
@@ -1178,6 +1178,7 @@ void
pmap_page_remove_pae(struct vm_page *pg)
{
struct pv_entry *pve;
+ struct pmap *pm;
pt_entry_t *ptes, opte;
TAILQ_HEAD(, vm_page) empty_ptps;
struct vm_page *ptp;
@@ -1189,12 +1190,34 @@ pmap_page_remove_pae(struct vm_page *pg)
mtx_enter(&pg->mdpage.pv_mtx);
while ((pve = pg->mdpage.pv_list) != NULL) {
- pg->mdpage.pv_list = pve->pv_next;
pmap_reference(pve->pv_pmap);
+ pm = pve->pv_pmap;
mtx_leave(&pg->mdpage.pv_mtx);
ptes = pmap_map_ptes_pae(pve->pv_pmap); /* locks pmap */
+ /*
+ * We dropped the pvlist lock before grabbing the pmap
+ * lock to avoid lock ordering problems. This means
+ * we have to check the pvlist again since somebody
+ * else might have modified it. All we care about is
+ * that the pvlist entry matches the pmap we just
+ * locked. If it doesn't, unlock the pmap and try
+ * again.
+ */
+ mtx_enter(&pg->mdpage.pv_mtx);
+ if ((pve = pg->mdpage.pv_list) == NULL ||
+ pve->pv_pmap != pm) {
+ mtx_leave(&pg->mdpage.pv_mtx);
+ pmap_unmap_ptes_pae(pm); /* unlocks pmap */
+ pmap_destroy(pm);
+ mtx_enter(&pg->mdpage.pv_mtx);
+ continue;
+ }
+
+ pg->mdpage.pv_list = pve->pv_next;
+ mtx_leave(&pg->mdpage.pv_mtx);
+
#ifdef DIAGNOSTIC
if (pve->pv_ptp && (PDE(pve->pv_pmap, pdei(pve->pv_va)) &
PG_FRAME)