summaryrefslogtreecommitdiff
path: root/sys/arch/i386
diff options
context:
space:
mode:
authorgrr <grr@cvs.openbsd.org>1997-08-17 17:31:38 +0000
committergrr <grr@cvs.openbsd.org>1997-08-17 17:31:38 +0000
commite63187eca56a3ad82e60da45f9367e9665e9da64 (patch)
treefff28a625477cbbe3310e3c7bb53b1054b0c3174 /sys/arch/i386
parent32b1587779c422bd10454433ffe1cd2c3144f47c (diff)
Back out Mickey's 8/1 pmap.c change, which was misguided and caused
stability problems with swapped/paged out processes getting segementation vioations when reactivated. Also add some additional paranoia about whether an allocation being changed to pageable is actually a page-table and move some sanity checking from #ifdef DEBUG over to #ifdef DIAGSNOTIC.
Diffstat (limited to 'sys/arch/i386')
-rw-r--r--sys/arch/i386/i386/pmap.c29
-rw-r--r--sys/arch/i386/i386/pmap.old.c29
2 files changed, 42 insertions, 16 deletions
diff --git a/sys/arch/i386/i386/pmap.c b/sys/arch/i386/i386/pmap.c
index 0192a847f51..dace4b52baf 100644
--- a/sys/arch/i386/i386/pmap.c
+++ b/sys/arch/i386/i386/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.17 1997/07/28 23:46:10 mickey Exp $ */
+/* $OpenBSD: pmap.c,v 1.18 1997/08/17 17:31:37 grr Exp $ */
/* $NetBSD: pmap.c,v 1.36 1996/05/03 19:42:22 christos Exp $ */
/*
@@ -1538,6 +1538,7 @@ pmap_copy_page(src, dst)
* will specify that these pages are to be wired
* down (or not) as appropriate.
*/
+
void
pmap_pageable(pmap, sva, eva, pageable)
pmap_t pmap;
@@ -1557,18 +1558,23 @@ pmap_pageable(pmap, sva, eva, pageable)
* be all zeros and there is no need to clean it.
* Assumption:
* - PT pages have only one pv_table entry
+ * - PT pages are the only single-page allocations
+ * between the user stack and kernel va's
+ * See also pmap_enter & pmap_protect for rehashes of this...
*/
- if (pmap != pmap_kernel() || !pageable)
- return;
- for ( ; sva < eva; sva += NBPG) {
+ if (pageable &&
+ pmap == pmap_kernel() &&
+ sva >= VM_MAXUSER_ADDRESS && eva <= VM_MAX_ADDRESS &&
+ eva - sva == NBPG) {
register vm_offset_t pa;
register pt_entry_t *pte;
-
-#ifdef DEBUG
+#ifdef DIAGNOSTIC
u_int pind;
register struct pv_entry *pv;
+#endif
+#ifdef DEBUG
if ((pmapdebug & (PDB_FOLLOW|PDB_PTPAGE)) == PDB_PTPAGE)
printf("pmap_pageable(%x, %x, %x, %x)",
pmap, sva, eva, pageable);
@@ -1582,9 +1588,16 @@ pmap_pageable(pmap, sva, eva, pageable)
pa = pmap_pte_pa(pte);
-#ifdef DEBUG
- if ((pind = pmap_page_index(pa)) == -1)
+#ifdef DIAGNOSTIC
+ if ((*pte & (PG_u | PG_RW)) != (PG_u | PG_RW))
+ printf("pmap_pageable: unexpected pte=%x va %x\n",
+ *pte, sva);
+
+ if ((pind = pmap_page_index(pa)) == -1) {
+ printf("pmap_pageable: invalid pa %x va %x\n",
+ pa, sva);
return;
+ }
pv = &pv_table[pind];
if (pv->pv_va != sva || pv->pv_next) {
diff --git a/sys/arch/i386/i386/pmap.old.c b/sys/arch/i386/i386/pmap.old.c
index 6ba2811f3dd..28c81c0efc0 100644
--- a/sys/arch/i386/i386/pmap.old.c
+++ b/sys/arch/i386/i386/pmap.old.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.old.c,v 1.17 1997/07/28 23:46:10 mickey Exp $ */
+/* $OpenBSD: pmap.old.c,v 1.18 1997/08/17 17:31:37 grr Exp $ */
/* $NetBSD: pmap.c,v 1.36 1996/05/03 19:42:22 christos Exp $ */
/*
@@ -1538,6 +1538,7 @@ pmap_copy_page(src, dst)
* will specify that these pages are to be wired
* down (or not) as appropriate.
*/
+
void
pmap_pageable(pmap, sva, eva, pageable)
pmap_t pmap;
@@ -1557,18 +1558,23 @@ pmap_pageable(pmap, sva, eva, pageable)
* be all zeros and there is no need to clean it.
* Assumption:
* - PT pages have only one pv_table entry
+ * - PT pages are the only single-page allocations
+ * between the user stack and kernel va's
+ * See also pmap_enter & pmap_protect for rehashes of this...
*/
- if (pmap != pmap_kernel() || !pageable)
- return;
- for ( ; sva < eva; sva += NBPG) {
+ if (pageable &&
+ pmap == pmap_kernel() &&
+ sva >= VM_MAXUSER_ADDRESS && eva <= VM_MAX_ADDRESS &&
+ eva - sva == NBPG) {
register vm_offset_t pa;
register pt_entry_t *pte;
-
-#ifdef DEBUG
+#ifdef DIAGNOSTIC
u_int pind;
register struct pv_entry *pv;
+#endif
+#ifdef DEBUG
if ((pmapdebug & (PDB_FOLLOW|PDB_PTPAGE)) == PDB_PTPAGE)
printf("pmap_pageable(%x, %x, %x, %x)",
pmap, sva, eva, pageable);
@@ -1582,9 +1588,16 @@ pmap_pageable(pmap, sva, eva, pageable)
pa = pmap_pte_pa(pte);
-#ifdef DEBUG
- if ((pind = pmap_page_index(pa)) == -1)
+#ifdef DIAGNOSTIC
+ if ((*pte & (PG_u | PG_RW)) != (PG_u | PG_RW))
+ printf("pmap_pageable: unexpected pte=%x va %x\n",
+ *pte, sva);
+
+ if ((pind = pmap_page_index(pa)) == -1) {
+ printf("pmap_pageable: invalid pa %x va %x\n",
+ pa, sva);
return;
+ }
pv = &pv_table[pind];
if (pv->pv_va != sva || pv->pv_next) {