summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorMiod Vallat <miod@cvs.openbsd.org>2007-03-04 16:59:04 +0000
committerMiod Vallat <miod@cvs.openbsd.org>2007-03-04 16:59:04 +0000
commit817929a0127ee8d7b07b2f1f869431e62b345e2e (patch)
treef59f90bfbb5bc26bd6d3e74bff520397e39cf267 /sys
parent5fa648a68b3d7b01f597fb2f097ecf46592782b8 (diff)
Oops, bring back pmap_collect1() and the check for pmap_kernel() in
pmap_collect() as we do an explicit pmap_collect(pmap_kernel()) in dire memory situations.
Diffstat (limited to 'sys')
-rw-r--r--sys/arch/m68k/m68k/pmap_motorola.c146
1 files changed, 135 insertions, 11 deletions
diff --git a/sys/arch/m68k/m68k/pmap_motorola.c b/sys/arch/m68k/m68k/pmap_motorola.c
index 292dfcb26f5..b4d2433cf01 100644
--- a/sys/arch/m68k/m68k/pmap_motorola.c
+++ b/sys/arch/m68k/m68k/pmap_motorola.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap_motorola.c,v 1.49 2007/02/17 19:08:58 miod Exp $ */
+/* $OpenBSD: pmap_motorola.c,v 1.50 2007/03/04 16:59:03 miod Exp $ */
/*
* Copyright (c) 1999 The NetBSD Foundation, Inc.
@@ -308,6 +308,7 @@ void pmap_changebit(struct vm_page *, int, int);
int pmap_enter_ptpage(pmap_t, vaddr_t);
void pmap_ptpage_addref(vaddr_t);
int pmap_ptpage_delref(vaddr_t);
+void pmap_collect1(paddr_t, paddr_t);
#ifdef PMAP_DEBUG
@@ -777,6 +778,7 @@ void
pmap_remove_flags(pmap, sva, eva, flags)
pmap_t pmap;
vaddr_t sva, eva;
+ int flags;
{
vaddr_t nssva;
pt_entry_t *pte;
@@ -1671,16 +1673,138 @@ pmap_collect(pmap)
PMAP_DPRINTF(PDB_FOLLOW, ("pmap_collect(%p)\n", pmap));
- /*
- * This process is about to be swapped out; free all of
- * the PT pages by removing the physical mappings for its
- * entire address space. Note: pmap_remove() performs
- * all necessary locking.
- */
- flags = active_pmap(pmap) ? PRM_TFLUSH : 0;
- pmap_remove_flags(pmap, VM_MIN_ADDRESS, VM_MAX_ADDRESS,
- flags | PRM_SKIPWIRED);
- pmap_update(pmap);
+ if (pmap == pmap_kernel()) {
+ int bank, s;
+
+ /*
+ * XXX This is very bogus. We should handle kernel PT
+ * XXX pages much differently.
+ */
+
+ s = splvm();
+ for (bank = 0; bank < vm_nphysseg; bank++)
+ pmap_collect1(ptoa(vm_physmem[bank].start),
+ ptoa(vm_physmem[bank].end));
+ splx(s);
+ } else {
+ /*
+ * This process is about to be swapped out; free all of
+ * the PT pages by removing the physical mappings for its
+ * entire address space. Note: pmap_remove() performs
+ * all necessary locking.
+ */
+ flags = active_pmap(pmap) ? PRM_TFLUSH : 0;
+ pmap_remove_flags(pmap, VM_MIN_ADDRESS, VM_MAX_ADDRESS,
+ flags | PRM_SKIPWIRED);
+ pmap_update(pmap);
+ }
+}
+
+/*
+ * pmap_collect1:
+ *
+ * Garbage-collect KPT pages. Helper for the above (bogus)
+ * pmap_collect().
+ *
+ * Note: THIS SHOULD GO AWAY, AND BE REPLACED WITH A BETTER
+ * WAY OF HANDLING PT PAGES!
+ */
+void
+pmap_collect1(startpa, endpa)
+ paddr_t startpa, endpa;
+{
+ paddr_t pa;
+ struct pv_entry *pv;
+ pt_entry_t *pte;
+ paddr_t kpa;
+#ifdef PMAP_DEBUG
+ st_entry_t *ste;
+ int opmapdebug = 0 /* XXX initialize to quiet gcc -Wall */;
+#endif
+
+ for (pa = startpa; pa < endpa; pa += PAGE_SIZE) {
+ struct kpt_page *kpt, **pkpt;
+
+ /*
+ * Locate physical pages which are being used as kernel
+ * page table pages.
+ */
+ pv = pa_to_pvh(pa);
+ if (pv->pv_pmap != pmap_kernel() || !(pv->pv_flags & PV_PTPAGE))
+ continue;
+ do {
+ if (pv->pv_ptste && pv->pv_ptpmap == pmap_kernel())
+ break;
+ } while ((pv = pv->pv_next));
+ if (pv == NULL)
+ continue;
+#ifdef PMAP_DEBUG
+ if (pv->pv_va < (vaddr_t)Sysmap ||
+ pv->pv_va >= (vaddr_t)Sysmap + MACHINE_MAX_PTSIZE)
+ printf("collect: kernel PT VA out of range\n");
+ else
+ goto ok;
+ pmap_pvdump(pa);
+ continue;
+ok:
+#endif
+ pte = (pt_entry_t *)(pv->pv_va + PAGE_SIZE);
+ while (--pte >= (pt_entry_t *)pv->pv_va && *pte == PG_NV)
+ ;
+ if (pte >= (pt_entry_t *)pv->pv_va)
+ continue;
+
+#ifdef PMAP_DEBUG
+ if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) {
+ printf("collect: freeing KPT page at %lx (ste %x@%p)\n",
+ pv->pv_va, *pv->pv_ptste, pv->pv_ptste);
+ opmapdebug = pmapdebug;
+ pmapdebug |= PDB_PTPAGE;
+ }
+
+ ste = pv->pv_ptste;
+#endif
+ /*
+ * If all entries were invalid we can remove the page.
+ * We call pmap_remove_entry to take care of invalidating
+ * ST and Sysptmap entries.
+ */
+ pmap_extract(pmap_kernel(), pv->pv_va, &kpa);
+ pmap_remove_mapping(pmap_kernel(), pv->pv_va, PT_ENTRY_NULL,
+ PRM_TFLUSH|PRM_CFLUSH);
+ /*
+ * Use the physical address to locate the original
+ * (kmem_alloc assigned) address for the page and put
+ * that page back on the free list.
+ */
+ for (pkpt = &kpt_used_list, kpt = *pkpt;
+ kpt != NULL;
+ pkpt = &kpt->kpt_next, kpt = *pkpt)
+ if (kpt->kpt_pa == kpa)
+ break;
+#ifdef PMAP_DEBUG
+ if (kpt == NULL)
+ panic("pmap_collect: lost a KPT page");
+ if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
+ printf("collect: %lx (%lx) to free list\n",
+ kpt->kpt_va, kpa);
+#endif
+ *pkpt = kpt->kpt_next;
+ kpt->kpt_next = kpt_free_list;
+ kpt_free_list = kpt;
+#ifdef PMAP_DEBUG
+ if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
+ pmapdebug = opmapdebug;
+
+ if (*ste & SG_V)
+ printf("collect: kernel STE at %p still valid (%x)\n",
+ ste, *ste);
+ ste = &Sysptmap[ste - pmap_ste(pmap_kernel(), 0)];
+ if (*ste & SG_V)
+ printf("collect: kernel PTmap at %p still valid (%x)\n",
+ ste, *ste);
+#endif
+ }
}
/*