summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorArtur Grabowski <art@cvs.openbsd.org>2002-10-28 19:30:22 +0000
committerArtur Grabowski <art@cvs.openbsd.org>2002-10-28 19:30:22 +0000
commit3a655b60424cb495dd3a09c36837104c7579f8fa (patch)
treea0318679eb4d003928846e6fb204594aaaab5f02
parent40b843b528243d5f0d45ff34cc55d12547f94acd (diff)
Convert sparc pmap from physseg to VM_PAGE_MD.
This allows us to remove some ambiguities on how some functions are called, remove some diagnostic checks for conditions that can never happen and remove the ugly hack with "pmap_initialized". It also removes some unnecessary overhead in time-critical functions like pmap_{zero,copy}_page and pmap_{is,clear}_{mod*,ref*}. miod@ ok
-rw-r--r--sys/arch/sparc/include/pmap.h9
-rw-r--r--sys/arch/sparc/include/vmparam.h25
-rw-r--r--sys/arch/sparc/sparc/pmap.c183
3 files changed, 77 insertions, 140 deletions
diff --git a/sys/arch/sparc/include/pmap.h b/sys/arch/sparc/include/pmap.h
index d4bc88aba33..03c4ffbe571 100644
--- a/sys/arch/sparc/include/pmap.h
+++ b/sys/arch/sparc/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.33 2002/09/10 18:29:43 art Exp $ */
+/* $OpenBSD: pmap.h,v 1.34 2002/10/28 19:30:21 art Exp $ */
/* $NetBSD: pmap.h,v 1.30 1997/08/04 20:00:47 pk Exp $ */
/*
@@ -187,12 +187,7 @@ typedef struct pmap *pmap_t;
*
* THIS SHOULD BE PART OF THE CORE MAP
*/
-struct pvlist {
- struct pvlist *pv_next; /* next pvlist, if any */
- struct pmap *pv_pmap; /* pmap of this va */
- vaddr_t pv_va; /* virtual address */
- int pv_flags; /* flags (below) */
-};
+/* XXX - struct pvlist moved to vmparam.h because of include ordering issues */
/*
* Flags in pv_flags. Note that PV_MOD must be 1 and PV_REF must be 2
diff --git a/sys/arch/sparc/include/vmparam.h b/sys/arch/sparc/include/vmparam.h
index 602110bfb9a..4d73d51d387 100644
--- a/sys/arch/sparc/include/vmparam.h
+++ b/sys/arch/sparc/include/vmparam.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: vmparam.h,v 1.26 2002/07/26 18:36:33 art Exp $ */
+/* $OpenBSD: vmparam.h,v 1.27 2002/10/28 19:30:21 art Exp $ */
/* $NetBSD: vmparam.h,v 1.13 1997/07/12 16:20:03 perry Exp $ */
/*
@@ -114,11 +114,28 @@
/*
* pmap specific data stored in the vm_physmem[] array
*/
-#define __HAVE_PMAP_PHYSSEG
-struct pmap_physseg {
- struct pvlist *pv_head;
+
+
+/* XXX - belongs in pmap.h, but put here because of ordering issues */
+struct pvlist {
+ struct pvlist *pv_next; /* next pvlist, if any */
+ struct pmap *pv_pmap; /* pmap of this va */
+ vaddr_t pv_va; /* virtual address */
+ int pv_flags; /* flags (below) */
};
+#define __HAVE_VM_PAGE_MD
+struct vm_page_md {
+ struct pvlist pv_head;
+};
+
+#define VM_MDPAGE_INIT(pg) do { \
+ (pg)->mdpage.pv_head.pv_next = NULL; \
+ (pg)->mdpage.pv_head.pv_pmap = NULL; \
+ (pg)->mdpage.pv_head.pv_va = 0; \
+ (pg)->mdpage.pv_head.pv_flags = 0; \
+} while (0)
+
#define VM_NFREELIST 1
#define VM_FREELIST_DEFAULT 0
diff --git a/sys/arch/sparc/sparc/pmap.c b/sys/arch/sparc/sparc/pmap.c
index 8835f82b675..bdfb5a0f18e 100644
--- a/sys/arch/sparc/sparc/pmap.c
+++ b/sys/arch/sparc/sparc/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.129 2002/09/10 18:29:43 art Exp $ */
+/* $OpenBSD: pmap.c,v 1.130 2002/10/28 19:30:21 art Exp $ */
/* $NetBSD: pmap.c,v 1.118 1998/05/19 19:00:18 thorpej Exp $ */
/*
@@ -172,8 +172,7 @@ static u_int VA2PA(caddr_t);
* Given a page number, return the head of its pvlist.
*/
static __inline struct pvlist *
-pvhead(pnum)
- int pnum;
+pvhead(int pnum)
{
int bank, off;
@@ -181,7 +180,7 @@ pvhead(pnum)
if (bank == -1)
return NULL;
- return &vm_physmem[bank].pmseg.pv_head[off];
+ return &vm_physmem[bank].pgs[off].mdpage.pv_head;
}
struct pool pvpool;
@@ -238,12 +237,6 @@ pgt_page_free(struct pool *pp, void *v)
#endif /* SUN4M */
/*
- * XXX - horrible kludge to let us use "managed" pages to map the pv lists.
- * We check this in pmap_enter to see if it's safe to use pvhead.
- */
-static int pmap_initialized = 0;
-
-/*
* Each virtual segment within each pmap is either valid or invalid.
* It is valid if pm_npte[VA_VSEG(va)] is not 0. This does not mean
* it is in the MMU, however; that is true iff pm_segmap[VA_VSEG(va)]
@@ -744,7 +737,7 @@ sparc_protection_init4m(void)
static void sortm(struct memarr *, int);
void ctx_alloc(struct pmap *);
void ctx_free(struct pmap *);
-void pv_flushcache(struct pvlist *);
+void pg_flushcache(struct vm_page *);
void kvm_iocache(caddr_t, int);
#ifdef DEBUG
void pm_check(char *, struct pmap *);
@@ -2548,9 +2541,9 @@ pv_link4m(pv, pm, va, nc)
* potentially in the cache. Called only if vactype != VAC_NONE.
*/
void
-pv_flushcache(pv)
- struct pvlist *pv;
+pg_flushcache(struct vm_page *pg)
{
+ struct pvlist *pv = &pg->mdpage.pv_head;
struct pmap *pm;
int s, ctx;
@@ -3364,34 +3357,8 @@ pmap_alloc_cpu(sc)
void
pmap_init()
{
- int n, npages;
- vsize_t size;
- vaddr_t addr;
-
- npages = 0;
- for (n = 0; n < vm_nphysseg; n++)
- npages += vm_physmem[n].end - vm_physmem[n].start;
-
- size = npages * sizeof(struct pvlist);
- size = round_page(size);
- addr = uvm_km_zalloc(kernel_map, size);
- if (addr == 0)
- panic("pmap_init: no memory for pv_list");
-
- for (n = 0; n < vm_nphysseg; n++) {
- vm_physmem[n].pmseg.pv_head = (struct pvlist *)addr;
- addr += (vm_physmem[n].end - vm_physmem[n].start) *
- sizeof(struct pvlist);
- }
-
pool_init(&pvpool, sizeof(struct pvlist), 0, 0, 0, "pvpl", NULL);
- /*
- * We can set it here since it's only used in pmap_enter to see
- * if pv lists have been mapped.
- */
- pmap_initialized = 1;
-
#if defined(SUN4M)
if (CPU_ISSUN4M) {
/*
@@ -4212,9 +4179,7 @@ pmap_rmu4m(pm, va, endva, vr, vs)
#if defined(SUN4) || defined(SUN4C)
void
-pmap_page_protect4_4c(pg, prot)
- struct vm_page *pg;
- vm_prot_t prot;
+pmap_page_protect4_4c(struct vm_page *pg, vm_prot_t prot)
{
struct pvlist *pv, *pv0, *npv;
struct pmap *pm;
@@ -4222,7 +4187,6 @@ pmap_page_protect4_4c(pg, prot)
int flags, nleft, i, s, ctx;
struct regmap *rp;
struct segmap *sp;
- paddr_t pa = VM_PAGE_TO_PHYS(pg);
#ifdef DEBUG
if (!pmap_pa_exists(pa))
@@ -4232,11 +4196,10 @@ pmap_page_protect4_4c(pg, prot)
printf("pmap_page_protect(0x%lx, 0x%x)\n", pa, prot);
#endif
/*
- * Skip unmanaged pages, or operations that do not take
- * away write permission.
+ * Skip operations that do not take away write permission.
*/
- pv = pvhead(atop(pa));
- if (!pv || prot & VM_PROT_WRITE)
+ pv = &pg->mdpage.pv_head;
+ if (prot & VM_PROT_WRITE)
return;
write_user_windows(); /* paranoia */
if (prot & VM_PROT_READ) {
@@ -4610,9 +4573,7 @@ useless:
* to read-only (in which case pv_changepte does the trick).
*/
void
-pmap_page_protect4m(pg, prot)
- struct vm_page *pg;
- vm_prot_t prot;
+pmap_page_protect4m(struct vm_page *pg, vm_prot_t prot)
{
struct pvlist *pv, *pv0, *npv;
struct pmap *pm;
@@ -4620,21 +4581,17 @@ pmap_page_protect4m(pg, prot)
int flags, s, ctx;
struct regmap *rp;
struct segmap *sp;
- paddr_t pa = VM_PAGE_TO_PHYS(pg);
#ifdef DEBUG
- if (!pmap_pa_exists(pa))
- panic("pmap_page_protect: no such address: 0x%lx", pa);
if ((pmapdebug & PDB_CHANGEPROT) ||
(pmapdebug & PDB_REMOVE && prot == VM_PROT_NONE))
printf("pmap_page_protect(0x%lx, 0x%x)\n", pa, prot);
#endif
- pv = pvhead(atop(pa));
+ pv = &pg->mdpage.pv_head;
/*
- * Skip unmanaged pages, or operations that do not take
- * away write permission.
+ * Skip operations that do not take away write permission.
*/
- if (!pv || prot & VM_PROT_WRITE)
+ if (prot & VM_PROT_WRITE)
return;
write_user_windows(); /* paranoia */
if (prot & VM_PROT_READ) {
@@ -4923,7 +4880,7 @@ pmap_enter4_4c(pm, va, pa, prot, flags)
* since the pvlist no-cache bit might change as a result of the
* new mapping.
*/
- if (pmap_initialized && (pteproto & PG_TYPE) == PG_OBMEM)
+ if ((pteproto & PG_TYPE) == PG_OBMEM)
pv = pvhead(atop(pa));
else
pv = NULL;
@@ -5302,7 +5259,7 @@ pmap_enter4m(pm, va, pa, prot, flags)
* since the pvlist no-cache bit might change as a result of the
* new mapping.
*/
- if ((pteproto & SRMMU_PGTYPE) == PG_SUN4M_OBMEM && pmap_initialized)
+ if ((pteproto & SRMMU_PGTYPE) == PG_SUN4M_OBMEM)
pv = pvhead(atop(pa));
else
pv = NULL;
@@ -5755,15 +5712,12 @@ pmap_collect(pm)
* Clear the modify bit for the given physical page.
*/
boolean_t
-pmap_clear_modify4_4c(pg)
- struct vm_page *pg;
+pmap_clear_modify4_4c(struct vm_page *pg)
{
struct pvlist *pv;
- u_int pfn = atop(VM_PAGE_TO_PHYS(pg));
boolean_t ret;
- if ((pv = pvhead(pfn)) == NULL)
- return (0);
+ pv = &pg->mdpage.pv_head;
(void) pv_syncflags4_4c(pv);
ret = pv->pv_flags & PV_MOD;
@@ -5776,14 +5730,11 @@ pmap_clear_modify4_4c(pg)
* Tell whether the given physical page has been modified.
*/
boolean_t
-pmap_is_modified4_4c(pg)
- struct vm_page *pg;
+pmap_is_modified4_4c(struct vm_page *pg)
{
struct pvlist *pv;
- u_int pfn = atop(VM_PAGE_TO_PHYS(pg));
- if ((pv = pvhead(pfn)) == NULL)
- return (0);
+ pv = &pg->mdpage.pv_head;
return (pv->pv_flags & PV_MOD || pv_syncflags4_4c(pv) & PV_MOD);
}
@@ -5792,15 +5743,12 @@ pmap_is_modified4_4c(pg)
* Clear the reference bit for the given physical page.
*/
boolean_t
-pmap_clear_reference4_4c(pg)
- struct vm_page *pg;
+pmap_clear_reference4_4c(struct vm_page *pg)
{
struct pvlist *pv;
- u_int pfn = atop(VM_PAGE_TO_PHYS(pg));
boolean_t ret;
- if ((pv = pvhead(pfn)) == NULL)
- return (0);
+ pv = &pg->mdpage.pv_head;
(void) pv_syncflags4_4c(pv);
ret = pv->pv_flags & PV_REF;
@@ -5813,14 +5761,11 @@ pmap_clear_reference4_4c(pg)
* Tell whether the given physical page has been referenced.
*/
boolean_t
-pmap_is_referenced4_4c(pg)
- struct vm_page *pg;
+pmap_is_referenced4_4c(struct vm_page *pg)
{
struct pvlist *pv;
- u_int pfn = atop(VM_PAGE_TO_PHYS(pg));
- if ((pv = pvhead(pfn)) == NULL)
- return (0);
+ pv = &pg->mdpage.pv_head;
return (pv->pv_flags & PV_REF || pv_syncflags4_4c(pv) & PV_REF);
}
@@ -5841,15 +5786,12 @@ pmap_is_referenced4_4c(pg)
* Clear the modify bit for the given physical page.
*/
boolean_t
-pmap_clear_modify4m(pg)
- struct vm_page *pg;
+pmap_clear_modify4m(struct vm_page *pg)
{
struct pvlist *pv;
- u_int pfn = atop(VM_PAGE_TO_PHYS(pg));
boolean_t ret;
- if ((pv = pvhead(pfn)) == NULL)
- return (0);
+ pv = &pg->mdpage.pv_head;
(void) pv_syncflags4m(pv);
ret = pv->pv_flags & PV_MOD4M;
@@ -5862,14 +5804,12 @@ pmap_clear_modify4m(pg)
* Tell whether the given physical page has been modified.
*/
boolean_t
-pmap_is_modified4m(pg)
- struct vm_page *pg;
+pmap_is_modified4m(struct vm_page *pg)
{
struct pvlist *pv;
- u_int pfn = atop(VM_PAGE_TO_PHYS(pg));
- if ((pv = pvhead(pfn)) == NULL)
- return (0);
+ pv = &pg->mdpage.pv_head;
+
return (pv->pv_flags & PV_MOD4M || pv_syncflags4m(pv) & PV_MOD4M);
}
@@ -5877,15 +5817,12 @@ pmap_is_modified4m(pg)
* Clear the reference bit for the given physical page.
*/
boolean_t
-pmap_clear_reference4m(pg)
- struct vm_page *pg;
+pmap_clear_reference4m(struct vm_page *pg)
{
struct pvlist *pv;
- u_int pfn = atop(VM_PAGE_TO_PHYS(pg));
boolean_t ret;
- if ((pv = pvhead(pfn)) == NULL)
- return (0);
+ pv = &pg->mdpage.pv_head;
(void) pv_syncflags4m(pv);
ret = pv->pv_flags & PV_REF4M;
@@ -5898,14 +5835,12 @@ pmap_clear_reference4m(pg)
* Tell whether the given physical page has been referenced.
*/
boolean_t
-pmap_is_referenced4m(pg)
- struct vm_page *pg;
+pmap_is_referenced4m(struct vm_page *pg)
{
struct pvlist *pv;
- u_int pfn = atop(VM_PAGE_TO_PHYS(pg));
- if ((pv = pvhead(pfn)) == NULL)
- return (0);
+ pv = &pg->mdpage.pv_head;
+
return (pv->pv_flags & PV_REF4M || pv_syncflags4m(pv) & PV_REF4M);
}
#endif /* 4m */
@@ -5925,16 +5860,14 @@ pmap_zero_page4_4c(struct vm_page *pg)
paddr_t pa = VM_PAGE_TO_PHYS(pg);
caddr_t va;
int pte;
- struct pvlist *pv;
- if (pmap_initialized && (pv = pvhead(atop(pa))) != NULL) {
- /*
- * The following might not be necessary since the page
- * is being cleared because it is about to be allocated,
- * i.e., is in use by no one.
- */
- pv_flushcache(pv);
- }
+ /*
+ * The following might not be necessary since the page
+ * is being cleared because it is about to be allocated,
+ * i.e., is in use by no one.
+ */
+ pg_flushcache(pg);
+
pte = PG_V | PG_S | PG_W | PG_NC | (atop(pa) & PG_PFNUM);
va = vpage[0];
@@ -5959,17 +5892,14 @@ pmap_copy_page4_4c(struct vm_page *srcpg, struct vm_page *dstpg)
paddr_t dst = VM_PAGE_TO_PHYS(dstpg);
caddr_t sva, dva;
int spte, dpte;
- struct pvlist *pv;
- pv = pvhead(atop(src));
- if (pv && CACHEINFO.c_vactype == VAC_WRITEBACK)
- pv_flushcache(pv);
+ if (CACHEINFO.c_vactype == VAC_WRITEBACK)
+ pg_flushcache(srcpg);
spte = PG_V | PG_S | (atop(src) & PG_PFNUM);
- pv = pvhead(atop(dst));
- if (pv && CACHEINFO.c_vactype != VAC_NONE)
- pv_flushcache(pv);
+ if (CACHEINFO.c_vactype != VAC_NONE)
+ pg_flushcache(dstpg);
dpte = PG_V | PG_S | PG_W | PG_NC | (atop(dst) & PG_PFNUM);
@@ -5995,22 +5925,20 @@ void
pmap_zero_page4m(struct vm_page *pg)
{
paddr_t pa = VM_PAGE_TO_PHYS(pg);
- int pte;
- struct pvlist *pv;
- static int *ptep;
static vaddr_t va;
+ static int *ptep;
+ int pte;
if (ptep == NULL)
ptep = getptep4m(pmap_kernel(), (va = (vaddr_t)vpage[0]));
- if (pmap_initialized && (pv = pvhead(atop(pa))) != NULL &&
- CACHEINFO.c_vactype != VAC_NONE) {
+ if (CACHEINFO.c_vactype != VAC_NONE) {
/*
* The following might not be necessary since the page
* is being cleared because it is about to be allocated,
* i.e., is in use by no one.
*/
- pv_flushcache(pv);
+ pg_flushcache(pg);
}
pte = (SRMMU_TEPTE | (atop(pa) << SRMMU_PPNSHIFT) | PPROT_N_RWX);
@@ -6041,26 +5969,23 @@ pmap_copy_page4m(struct vm_page *srcpg, struct vm_page *dstpg)
{
paddr_t src = VM_PAGE_TO_PHYS(srcpg);
paddr_t dst = VM_PAGE_TO_PHYS(dstpg);
- int spte, dpte;
- struct pvlist *pv;
static int *sptep, *dptep;
static vaddr_t sva, dva;
+ int spte, dpte;
if (sptep == NULL) {
sptep = getptep4m(pmap_kernel(), (sva = (vaddr_t)vpage[0]));
dptep = getptep4m(pmap_kernel(), (dva = (vaddr_t)vpage[1]));
}
- pv = pvhead(atop(src));
- if (pv && CACHEINFO.c_vactype == VAC_WRITEBACK)
- pv_flushcache(pv);
+ if (CACHEINFO.c_vactype == VAC_WRITEBACK)
+ pg_flushcache(srcpg);
spte = SRMMU_TEPTE | SRMMU_PG_C | (atop(src) << SRMMU_PPNSHIFT) |
PPROT_N_RX;
- pv = pvhead(atop(dst));
- if (pv && CACHEINFO.c_vactype != VAC_NONE)
- pv_flushcache(pv);
+ if (CACHEINFO.c_vactype != VAC_NONE)
+ pg_flushcache(dstpg);
dpte = (SRMMU_TEPTE | (atop(dst) << SRMMU_PPNSHIFT) | PPROT_N_RWX);
if (cpuinfo.flags & CPUFLG_CACHE_MANDATORY)