summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/arch/mvme88k/include/pmap.h19
-rw-r--r--sys/arch/mvme88k/mvme88k/pmap.c724
2 files changed, 319 insertions, 424 deletions
diff --git a/sys/arch/mvme88k/include/pmap.h b/sys/arch/mvme88k/include/pmap.h
index 71b0a667478..39ede11061d 100644
--- a/sys/arch/mvme88k/include/pmap.h
+++ b/sys/arch/mvme88k/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.26 2002/03/14 01:26:39 millert Exp $ */
+/* $OpenBSD: pmap.h,v 1.27 2002/05/07 00:54:34 miod Exp $ */
/*
* Mach Operating System
* Copyright (c) 1991 Carnegie Mellon University
@@ -14,7 +14,6 @@
*/
#ifndef _MACHINE_PMAP_H_
#define _MACHINE_PMAP_H_
-#define OMRON_PMAP
#include <machine/mmu.h> /* batc_template_t, BATC_MAX, etc.*/
#include <machine/pcb.h> /* pcb_t, etc.*/
@@ -27,6 +26,7 @@ typedef sdt_entry_t *sdt_ptr_t;
*/
typedef struct pmap *pmap_t;
+/* #define PMAP_USE_BATC */
struct pmap {
sdt_ptr_t sdt_paddr; /* physical pointer to sdt */
sdt_ptr_t sdt_vaddr; /* virtual pointer to sdt */
@@ -37,16 +37,15 @@ struct pmap {
/* cpus using of this pmap; NCPU must be <= 32 */
u_int32_t cpus_using;
-#ifdef DEBUG
- pmap_t next;
- pmap_t prev;
-#endif
-
- /* for OMRON_PMAP */
+#ifdef PMAP_USE_BATC
batc_template_t i_batc[BATC_MAX]; /* instruction BATCs */
batc_template_t d_batc[BATC_MAX]; /* data BATCs */
- /* end OMRON_PMAP */
+#endif
+#ifdef DEBUG
+ pmap_t next;
+ pmap_t prev;
+#endif
};
#define PMAP_NULL ((pmap_t) 0)
@@ -81,7 +80,7 @@ extern caddr_t vmmap;
#define PMAP_DEACTIVATE(proc) pmap_deactivate(proc)
void pmap_bootstrap(vaddr_t, paddr_t *, paddr_t *, vaddr_t *, vaddr_t *);
-void pmap_cache_ctrl(pmap_t, vaddr_t, vaddr_t, unsigned);
+void pmap_cache_ctrl(pmap_t, vaddr_t, vaddr_t, u_int);
#endif /* _KERNEL */
diff --git a/sys/arch/mvme88k/mvme88k/pmap.c b/sys/arch/mvme88k/mvme88k/pmap.c
index df791879176..31ef3af7461 100644
--- a/sys/arch/mvme88k/mvme88k/pmap.c
+++ b/sys/arch/mvme88k/mvme88k/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.65 2002/03/14 03:15:57 millert Exp $ */
+/* $OpenBSD: pmap.c,v 1.66 2002/05/07 00:54:37 miod Exp $ */
/*
* Copyright (c) 2001, 2002 Miodrag Vallat
* Copyright (c) 1998-2001 Steve Murphree, Jr.
@@ -45,9 +45,6 @@
*
*/
-/* don't want to make them general yet. */
-#define OMRON_PMAP
-
#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
@@ -93,24 +90,23 @@ extern vaddr_t virtual_avail, virtual_end;
#define CD_ACTIVATE 0x0000004 /* pmap_activate */
#define CD_KMAP 0x0000008 /* pmap_expand_kmap */
#define CD_MAP 0x0000010 /* pmap_map */
-#define CD_MAPB 0x0000020 /* pmap_map_batc */
-#define CD_CACHE 0x0000040 /* pmap_cache_ctrl */
-#define CD_BOOT 0x0000080 /* pmap_bootstrap */
-#define CD_INIT 0x0000100 /* pmap_init */
-#define CD_CREAT 0x0000200 /* pmap_create */
-#define CD_FREE 0x0000400 /* pmap_release */
-#define CD_DESTR 0x0000800 /* pmap_destroy */
-#define CD_RM 0x0001000 /* pmap_remove */
-#define CD_RMAL 0x0002000 /* pmap_remove_all */
-#define CD_PROT 0x0004000 /* pmap_protect */
-#define CD_EXP 0x0008000 /* pmap_expand */
-#define CD_ENT 0x0010000 /* pmap_enter */
-#define CD_UPD 0x0020000 /* pmap_update */
-#define CD_COL 0x0040000 /* pmap_collect */
-#define CD_CBIT 0x0080000 /* pmap_changebit */
-#define CD_TBIT 0x0100000 /* pmap_testbit */
-#define CD_CREF 0x0200000 /* pmap_clear_reference */
-#define CD_PGMV 0x0400000 /* pagemove */
+#define CD_CACHE 0x0000020 /* pmap_cache_ctrl */
+#define CD_BOOT 0x0000040 /* pmap_bootstrap */
+#define CD_INIT 0x0000080 /* pmap_init */
+#define CD_CREAT 0x0000100 /* pmap_create */
+#define CD_FREE 0x0000200 /* pmap_release */
+#define CD_DESTR 0x0000400 /* pmap_destroy */
+#define CD_RM 0x0000800 /* pmap_remove */
+#define CD_RMAL 0x0001000 /* pmap_remove_all */
+#define CD_PROT 0x0002000 /* pmap_protect */
+#define CD_EXP 0x0004000 /* pmap_expand */
+#define CD_ENT 0x0008000 /* pmap_enter */
+#define CD_UPD 0x0010000 /* pmap_update */
+#define CD_COL 0x0020000 /* pmap_collect */
+#define CD_CBIT 0x0040000 /* pmap_changebit */
+#define CD_TBIT 0x0080000 /* pmap_testbit */
+#define CD_CREF 0x0100000 /* pmap_clear_reference */
+#define CD_PGMV 0x0200000 /* pagemove */
#define CD_ALL 0x0FFFFFC
int pmap_con_dbg = CD_NONE;
@@ -154,13 +150,18 @@ kpdt_entry_t kpdt_free;
#define M188_PDT_SIZE 0
#endif
-#if (defined(MVME187) || defined(MVME197))
+#if defined(MVME187) || defined(MVME197)
#define M1x7_PDT_SIZE (atop(OBIO_SIZE) * sizeof(pt_entry_t))
#else
#define M1x7_PDT_SIZE 0
#endif
+#if defined(MVME188) && defined(MVME187) || defined(MVME197)
#define OBIO_PDT_SIZE ((brdtyp == BRD_188) ? M188_PDT_SIZE : M1x7_PDT_SIZE)
+#else
+#define OBIO_PDT_SIZE MAX(M188_PDT_SIZE, M1x7_PDT_SIZE)
+#endif
+
#define MAX_KERNEL_PDT_SIZE (KERNEL_PDT_SIZE + OBIO_PDT_SIZE)
/*
@@ -170,13 +171,13 @@ kpdt_entry_t kpdt_free;
vaddr_t phys_map_vaddr1, phys_map_vaddr2;
/*
- * The Modify List
+ * Cached page flags
*
* This is an array, one byte per physical page, which keeps track
- * of modified flags for pages which are no longer containd in any
- * pmap. (for mapped pages, the modified flags are in the PTE.)
+ * of some flags for pages which are no longer containd in any
+ * pmap (for mapped pages, the modified flags are in the PTE).
*/
-char *pmap_modify_list;
+u_int8_t *pmap_cached_attributes;
#define PV_ENTRY_NULL ((pv_entry_t) 0)
@@ -230,6 +231,8 @@ int etherlen;
boolean_t pmap_initialized = FALSE;/* Has pmap_init completed? */
+#ifdef PMAP_USE_BATC
+
/*
* number of BATC entries used
*/
@@ -240,6 +243,8 @@ int batc_used;
*/
batc_entry_t batc_entry[BATC_MAX];
+#endif /* PMAP_USE_BATC */
+
vaddr_t kmapva = 0;
extern vaddr_t bugromva;
extern vaddr_t sramva;
@@ -253,8 +258,7 @@ pt_entry_t *pmap_expand_kmap(vaddr_t, vm_prot_t);
void pmap_remove_range(pmap_t, vaddr_t, vaddr_t);
void pmap_expand(pmap_t, vaddr_t);
void pmap_release(pmap_t);
-vaddr_t pmap_map(vaddr_t, paddr_t, paddr_t, vm_prot_t, unsigned int);
-vaddr_t pmap_map_batc(vaddr_t, paddr_t, paddr_t, vm_prot_t, unsigned int);
+vaddr_t pmap_map(vaddr_t, paddr_t, paddr_t, vm_prot_t, u_int);
pt_entry_t *pmap_pte(pmap_t, vaddr_t);
void pmap_remove_all(paddr_t);
void pmap_changebit(paddr_t, int, int);
@@ -422,7 +426,7 @@ pmap_expand_kmap(virt, prot)
#ifdef DEBUG
if ((pmap_con_dbg & (CD_KMAP | CD_FULL)) == (CD_KMAP | CD_FULL))
- printf("(pmap_expand_kmap :%x) v %x\n", curproc,virt);
+ printf("(pmap_expand_kmap: %x) v %x\n", curproc,virt);
#endif
template = m88k_protection(kernel_pmap, prot) | SG_V;
@@ -432,17 +436,16 @@ pmap_expand_kmap(virt, prot)
panic("pmap_expand_kmap: segment table entry VALID");
kpdt_ent = kpdt_free;
- if (kpdt_ent == KPDT_ENTRY_NULL) {
- printf("pmap_expand_kmap: Ran out of kernel pte tables\n");
- return (PT_ENTRY_NULL);
- }
+ if (kpdt_ent == KPDT_ENTRY_NULL)
+ panic("pmap_expand_kmap: Ran out of kernel pte tables");
+
kpdt_free = kpdt_free->next;
/* physical table */
*sdt = kpdt_ent->phys | template;
/* virtual table */
*(sdt + SDT_ENTRIES) = (vaddr_t)kpdt_ent | template;
- (unsigned)(kpdt_ent->phys) = 0;
- (unsigned)(kpdt_ent->next) = 0;
+ kpdt_ent->phys = (paddr_t)0;
+ kpdt_ent->next = NULL;
return ((pt_entry_t *)(kpdt_ent) + PDTIDX(virt));
} /* pmap_expand_kmap() */
@@ -490,16 +493,20 @@ pmap_map(virt, start, end, prot, cmode)
vaddr_t virt;
paddr_t start, end;
vm_prot_t prot;
- unsigned int cmode;
+ u_int cmode;
{
- unsigned npages;
- unsigned num_phys_pages;
+ u_int npages;
+ u_int num_phys_pages;
pt_entry_t template, *pte;
paddr_t page;
+#ifdef PMAP_USE_BATC
+ batc_template_t batctmp;
+ int i;
+#endif
#ifdef DEBUG
if ((pmap_con_dbg & (CD_MAP | CD_NORM)) == (CD_MAP | CD_NORM))
- printf ("(pmap_map :%x) phys address from %x to %x mapped at virtual %x, prot %x cmode %x\n",
+ printf ("(pmap_map: %x) phys address from %x to %x mapped at virtual %x, prot %x cmode %x\n",
curproc, start, end, virt, prot, cmode);
#endif
@@ -510,94 +517,7 @@ pmap_map(virt, start, end, prot, cmode)
template = m88k_protection(kernel_pmap, prot) | cmode | PG_V;
- page = trunc_page(start);
- npages = atop(round_page(end) - page);
- for (num_phys_pages = npages; num_phys_pages > 0; num_phys_pages--) {
- if ((pte = pmap_pte(kernel_pmap, virt)) == PT_ENTRY_NULL)
- if ((pte = pmap_expand_kmap(virt, VM_PROT_READ|VM_PROT_WRITE)) == PT_ENTRY_NULL)
- panic("pmap_map: Cannot allocate pte table");
-
-#ifdef DEBUG
- if ((pmap_con_dbg & (CD_MAP | CD_FULL)) == (CD_MAP | CD_FULL))
- if (PDT_VALID(pte))
- printf("(pmap_map :%x) pte @ 0x%x already valid\n", curproc, (unsigned)pte);
-#endif
- *pte = template | page;
- page += PAGE_SIZE;
- virt += PAGE_SIZE;
- }
- return virt;
-} /* pmap_map() */
-
-/*
- * Routine: PMAP_MAP_BATC
- *
- * Function:
- * Map memory using BATC at initialization. The physical addresses being
- * mapped are not managed and are never unmapped.
- *
- * Parameters:
- * virt virtual address of range to map
- * start physical address of range to map
- * end physical address of end of range
- * prot protection attributes
- * cmode cache control attributes
- *
- * External & Global:
- * batc_used number of BATC used
- *
- * Calls:
- * m88k_protection
- * pmap_pte
- * pmap_expand_kmap
- *
- * For each page that needs mapping:
- * If both virt and phys are on the BATC block boundary, map using BATC.
- * Else make mapping in the same manner as pmap_map.
- *
- * initialize BATC and pte template
- * look for number of phys pages in range
- * {
- * if virt and phys are on BATC block boundary
- * {
- * map using BATC
- * increment virt and phys one BATC block
- * continue outer loop
- * }
- * pmap_pte(virt) - expand if necessary
- * stuff pte from template
- * increment virt one page
- * increment template paddr one page
- * }
- *
- */
-vaddr_t
-pmap_map_batc(virt, start, end, prot, cmode)
- vaddr_t virt;
- paddr_t start, end;
- vm_prot_t prot;
- unsigned int cmode;
-{
- unsigned npages;
- unsigned num_phys_pages;
- pt_entry_t template, *pte;
- paddr_t page;
- batc_template_t batctmp;
- int i;
-
-#ifdef DEBUG
- if ((pmap_con_dbg & (CD_MAPB | CD_FULL)) == (CD_MAPB | CD_FULL))
- printf ("(pmap_map_batc :%x) phys address from %x to %x mapped at virtual %x, prot %x\n", curproc,
- start, end, virt, prot);
-#endif
-
-#ifdef DIAGNOSTIC
- if (start > end)
- panic("pmap_map_batc: start greater than end address");
-#endif
-
-
- template = m88k_protection(kernel_pmap, prot) | cmode | PG_V;
+#ifdef PMAP_USE_BATC
batctmp.bits = 0;
batctmp.field.sup = 1; /* supervisor */
@@ -611,12 +531,17 @@ pmap_map_batc(virt, start, end, prot, cmode)
batctmp.field.wp = 1; /* protection */
batctmp.field.v = 1; /* valid */
+#endif
+
page = trunc_page(start);
npages = atop(round_page(end) - page);
- for (num_phys_pages = npages; num_phys_pages > 0; num_phys_pages--) {
+ for (num_phys_pages = npages; num_phys_pages != 0; num_phys_pages--) {
+
+#ifdef PMAP_USE_BATC
+
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_MAPB | CD_FULL)) == (CD_MAPB | CD_FULL))
- printf("(pmap_map_batc :%x) num_phys_pg=%x, virt=%x, "
+ if ((pmap_con_dbg & (CD_MAP | CD_FULL)) == (CD_MAP | CD_FULL))
+ printf("(pmap_map: %x) num_phys_pg=%x, virt=%x, "
"align V=%d, page=%x, align P=%d\n",
curproc, num_phys_pages, virt,
BATC_BLK_ALIGNED(virt), page,
@@ -632,23 +557,22 @@ pmap_map_batc(virt, start, end, prot, cmode)
batctmp.field.lba = M88K_BTOBLK(virt);
batctmp.field.pba = M88K_BTOBLK(page);
- for (i = 0; i < max_cpus; i++)
+ for (i = 0; i < MAX_CPUS; i++)
if (cpu_sets[i])
cmmu_set_pair_batc_entry(i, batc_used,
batctmp.bits);
batc_entry[batc_used] = batctmp.field;
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_MAPB | CD_NORM)) == (CD_MAPB | CD_NORM)) {
- printf("(pmap_map_batc :%x) BATC used=%d, data=%x\n", curproc, batc_used, batctmp.bits);
+ if ((pmap_con_dbg & (CD_MAP | CD_NORM)) == (CD_MAP | CD_NORM)) {
+ printf("(pmap_map: %x) BATC used=%d, data=%x\n", curproc, batc_used, batctmp.bits);
}
- if (pmap_con_dbg & CD_MAPB) {
-
- for (i = 0; i < BATC_BLKBYTES; i += PAGE_SIZE ) {
- pte = pmap_pte(kernel_pmap, virt+i);
+ if (pmap_con_dbg & CD_MAP)
+ for (i = 0; i < BATC_BLKBYTES; i += PAGE_SIZE) {
+ pte = pmap_pte(kernel_pmap, virt + i);
if (PDT_VALID(pte))
- printf("(pmap_map_batc :%x) va %x is already mapped : pte %x\n", curproc, virt+i, *pte);
+ printf("(pmap_map: %x) va %x is already mapped: pte %x\n",
+ curproc, virt + i, *pte);
}
- }
#endif
batc_used++;
virt += BATC_BLKBYTES;
@@ -656,22 +580,24 @@ pmap_map_batc(virt, start, end, prot, cmode)
num_phys_pages -= BATC_BLKBYTES/PAGE_SIZE;
continue;
}
+#endif /* PMAP_USE_BATC */
+
if ((pte = pmap_pte(kernel_pmap, virt)) == PT_ENTRY_NULL)
- if ((pte = pmap_expand_kmap(virt, VM_PROT_READ|VM_PROT_WRITE)) == PT_ENTRY_NULL)
- panic("pmap_map_batc: Cannot allocate pte table");
+ pte = pmap_expand_kmap(virt,
+ VM_PROT_READ | VM_PROT_WRITE);
-#ifdef DEBUG
- if (pmap_con_dbg & CD_MAPB)
+#ifdef DEBUG
+ if ((pmap_con_dbg & (CD_MAP | CD_FULL)) == (CD_MAP | CD_FULL))
if (PDT_VALID(pte))
- printf("(pmap_map_batc :%x) pte @ 0x%x already valid\n", curproc, (unsigned)pte);
+ printf("(pmap_map: %x) pte @ 0x%p already valid\n", curproc, pte);
#endif
- *pte = template | trunc_page(page);
+ *pte = template | page;
virt += PAGE_SIZE;
page += PAGE_SIZE;
}
return virt;
-} /* pmap_map_batc() */
+} /* pmap_map() */
/*
* Routine: PMAP_CACHE_CONTROL
@@ -684,7 +610,7 @@ pmap_map_batc(virt, start, end, prot, cmode)
* pmap_t map
* vaddr_t s
* vaddr_t e
- * unsigned mode
+ * u_int mode
*
* Calls:
* pmap_pte
@@ -701,14 +627,14 @@ void
pmap_cache_ctrl(pmap, s, e, mode)
pmap_t pmap;
vaddr_t s, e;
- unsigned mode;
+ u_int mode;
{
int spl;
pt_entry_t *pte;
- vaddr_t va;
+ vaddr_t va, pteva;
boolean_t kflush;
int cpu;
- unsigned users;
+ u_int users;
#ifdef DEBUG
if (mode & CACHE_MASK) {
@@ -716,7 +642,7 @@ pmap_cache_ctrl(pmap, s, e, mode)
return;
}
if ((pmap_con_dbg & (CD_CACHE | CD_NORM)) == (CD_CACHE | CD_NORM)) {
- printf("(pmap_cache_ctrl :%x) pmap %x, va %x, mode %x\n", curproc, pmap, s, mode);
+ printf("(pmap_cache_ctrl: %x) pmap %x, va %x, mode %x\n", curproc, pmap, s, mode);
}
#endif /* DEBUG */
@@ -739,7 +665,7 @@ pmap_cache_ctrl(pmap, s, e, mode)
continue;
#ifdef DEBUG
if ((pmap_con_dbg & (CD_CACHE | CD_NORM)) == (CD_CACHE | CD_NORM)) {
- printf("(cache_ctrl) pte@0x%08x\n",(unsigned)pte);
+ printf("(cache_ctrl) pte@0x%p\n", pte);
}
#endif /* DEBUG */
/*
@@ -753,10 +679,10 @@ pmap_cache_ctrl(pmap, s, e, mode)
/*
* Data cache should be copied back and invalidated.
*/
- for (cpu = 0; cpu < max_cpus; cpu++)
+ pteva = ptoa(PG_PFNUM(*pte));
+ for (cpu = 0; cpu < MAX_CPUS; cpu++)
if (cpu_sets[cpu])
- cmmu_flush_remote_cache(cpu, ptoa(PG_PFNUM(*pte)),
- PAGE_SIZE);
+ cmmu_flush_remote_cache(cpu, pteva, PAGE_SIZE);
}
PMAP_UNLOCK(pmap, spl);
} /* pmap_cache_ctrl */
@@ -787,7 +713,7 @@ pmap_cache_ctrl(pmap, s, e, mode)
*
* Calls:
* simple_lock_init
- * pmap_map or pmap_map_batc
+ * pmap_map
*
* The physical address 'load_start' is mapped at
* VM_MIN_KERNEL_ADDRESS, which maps the kernel code and data at the
@@ -827,16 +753,16 @@ pmap_bootstrap(load_start, phys_start, phys_end, virt_start, virt_end)
pt_entry_t *pte;
int i;
pmap_table_t ptable;
- extern char *kernelstart, *etext;
+ extern void *kernelstart, *etext;
#ifdef DEBUG
if ((pmap_con_dbg & (CD_BOOT | CD_NORM)) == (CD_BOOT | CD_NORM)) {
- printf("pmap_bootstrap : \"load_start\" 0x%x\n", load_start);
+ printf("pmap_bootstrap: \"load_start\" 0x%x\n", load_start);
}
#endif
#ifdef DIAGNOSTIC
if (!PAGE_ALIGNED(load_start))
- panic("pmap_bootstrap : \"load_start\" not on the m88k page boundary : 0x%x", load_start);
+ panic("pmap_bootstrap: \"load_start\" not on the m88k page boundary: 0x%x", load_start);
#endif
simple_lock_init(&kernel_pmap->lock);
@@ -859,7 +785,7 @@ pmap_bootstrap(load_start, phys_start, phys_end, virt_start, virt_end)
*phys_start = round_page(*phys_start);
*virt_start = *phys_start +
- (trunc_page((unsigned)&kernelstart) - load_start);
+ (trunc_page((vaddr_t)&kernelstart) - load_start);
/*
* Initialize kernel_pmap structure
@@ -898,7 +824,7 @@ pmap_bootstrap(load_start, phys_start, phys_end, virt_start, virt_end)
if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
printf(" kernel segment start = 0x%x\n", kernel_pmap->sdt_paddr);
printf("kernel segment table size = 0x%x\n", kernel_pmap_size);
- printf(" kernel segment end = 0x%x\n", ((unsigned)kernel_pmap->sdt_paddr) + kernel_pmap_size);
+ printf(" kernel segment end = 0x%x\n", ((paddr_t)kernel_pmap->sdt_paddr) + kernel_pmap_size);
}
#endif
/* init all segment descriptors to zero */
@@ -915,9 +841,8 @@ pmap_bootstrap(load_start, phys_start, phys_end, virt_start, virt_end)
kpdt_phys = *phys_start;
kpdt_virt = (kpdt_entry_t)*virt_start;
- pdt_size = MAX_KERNEL_PDT_SIZE;
/* might as well round up to a page - XXX smurph */
- pdt_size = round_page(pdt_size);
+ pdt_size = round_page(MAX_KERNEL_PDT_SIZE);
kernel_pmap_size += pdt_size;
*phys_start += pdt_size;
*virt_start += pdt_size;
@@ -945,7 +870,7 @@ pmap_bootstrap(load_start, phys_start, phys_end, virt_start, virt_end)
* init the kpdt queue
*/
kpdt_free = kpdt_virt;
- for (i = pdt_size / PDT_SIZE; i > 0; i--) {
+ for (i = pdt_size / PDT_SIZE; i != 0; i--) {
kpdt_virt->next = (kpdt_entry_t)((vaddr_t)kpdt_virt + PDT_SIZE);
kpdt_virt->phys = kpdt_phys;
kpdt_virt = kpdt_virt->next;
@@ -958,26 +883,21 @@ pmap_bootstrap(load_start, phys_start, phys_end, virt_start, virt_end)
*/
s_text = load_start; /* paddr of text */
- e_text = load_start + ((unsigned)&etext -
- trunc_page((unsigned)&kernelstart));
+ e_text = load_start + ((vaddr_t)&etext -
+ trunc_page((vaddr_t)&kernelstart));
/* paddr of end of text section*/
e_text = round_page(e_text);
-#ifdef OMRON_PMAP
-#define PMAPER pmap_map
-#else
-#define PMAPER pmap_map_batc
-#endif
+ /* map the first 64k (BUG ROM) read only, cache inhibited (? XXX) */
+ vaddr = pmap_map(0, 0, 0x10000, VM_PROT_WRITE | VM_PROT_READ,
+ CACHE_INH);
- /* map the first 64k (BUG ROM) read only, cache inhibited (? XXX) */
- vaddr = PMAPER(0, 0, 0x10000, VM_PROT_WRITE | VM_PROT_READ, CACHE_INH);
-
- /* map the kernel text read only */
- vaddr = PMAPER((vaddr_t)trunc_page(((unsigned)&kernelstart)),
+ /* map the kernel text read only */
+ vaddr = pmap_map(trunc_page((vaddr_t)&kernelstart),
s_text, e_text, VM_PROT_WRITE | VM_PROT_READ,
CACHE_GLOBAL); /* shouldn't it be RO? XXX*/
- vaddr = PMAPER(vaddr, e_text, (paddr_t)kmap,
+ vaddr = pmap_map(vaddr, e_text, (paddr_t)kmap,
VM_PROT_WRITE | VM_PROT_READ, CACHE_GLOBAL);
/*
@@ -995,7 +915,7 @@ pmap_bootstrap(load_start, phys_start, phys_end, virt_start, virt_end)
while (vaddr < (*virt_start - kernel_pmap_size))
vaddr = round_page(vaddr + 1);
}
- vaddr = PMAPER(vaddr, (paddr_t)kmap, *phys_start,
+ vaddr = pmap_map(vaddr, (paddr_t)kmap, *phys_start,
VM_PROT_WRITE | VM_PROT_READ, CACHE_INH);
if (vaddr != *virt_start) {
@@ -1006,7 +926,7 @@ pmap_bootstrap(load_start, phys_start, phys_end, virt_start, virt_end)
*/
#ifdef DEBUG
if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
- printf("1:vaddr %x *virt_start 0x%x *phys_start 0x%x\n", vaddr,
+ printf("1: vaddr %x *virt_start 0x%x *phys_start 0x%x\n", vaddr,
*virt_start, *phys_start);
}
#endif
@@ -1026,7 +946,7 @@ pmap_bootstrap(load_start, phys_start, phys_end, virt_start, virt_end)
etherlen = ETHERPAGES * NBPG;
etherbuf = (void *)vaddr;
- vaddr = PMAPER(vaddr, *phys_start, *phys_start + etherlen,
+ vaddr = pmap_map(vaddr, *phys_start, *phys_start + etherlen,
VM_PROT_WRITE | VM_PROT_READ, CACHE_INH);
*virt_start += etherlen;
@@ -1035,7 +955,7 @@ pmap_bootstrap(load_start, phys_start, phys_end, virt_start, virt_end)
if (vaddr != *virt_start) {
#ifdef DEBUG
if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
- printf("2:vaddr %x *virt_start %x *phys_start %x\n", vaddr,
+ printf("2: vaddr %x *virt_start %x *phys_start %x\n", vaddr,
*virt_start, *phys_start);
}
#endif
@@ -1081,17 +1001,16 @@ pmap_bootstrap(load_start, phys_start, phys_end, virt_start, virt_end)
}
#endif
- for (; ptable->size != 0xffffffffU; ptable++){
+ for (; ptable->size != (size_t)(-1); ptable++){
if (ptable->size) {
/*
* size-1, 'cause pmap_map rounds up to next pagenumber
*/
- PMAPER(ptable->virt_start, ptable->phys_start,
+ pmap_map(ptable->virt_start, ptable->phys_start,
ptable->phys_start + (ptable->size - 1),
ptable->prot, ptable->cacheability);
}
}
-#undef PMAPER
/*
* Allocate all the submaps we need. Note that SYSMAP just allocates
@@ -1203,7 +1122,7 @@ pmap_bootstrap(load_start, phys_start, phys_end, virt_start, virt_end)
*
* Extern/Globals
* pv_head_table
- * pmap_modify_list
+ * pmap_cached_attributes
* pmap_phys_start
* pmap_phys_end
* pmap_initialized
@@ -1212,7 +1131,7 @@ pmap_bootstrap(load_start, phys_start, phys_end, virt_start, virt_end)
* uvm_km_zalloc
*
* This routine does not really have much to do. It allocates space
- * for the pv_head_table, pmap_modify_list; and sets these
+ * for the pv_head_table, pmap_cached_attributes; and sets these
* pointers. It also initializes zones for pmap structures, pv_entry
* structures, and segment tables.
*
@@ -1232,7 +1151,7 @@ pmap_init(void)
vaddr_t addr;
vsize_t s;
pv_entry_t pv;
- char *attr;
+ u_int8_t *attr;
int bank;
#ifdef DEBUG
@@ -1247,7 +1166,7 @@ pmap_init(void)
npages += vm_physmem[bank].end - vm_physmem[bank].start;
s = PV_TABLE_SIZE(npages); /* pv_list */
- s += npages * sizeof(char); /* pmap_modify_list */
+ s += npages * sizeof(u_int8_t); /* pmap_cached_attributes */
#ifdef DEBUG
if ((pmap_con_dbg & (CD_INIT | CD_FULL)) == (CD_INIT | CD_FULL)) {
@@ -1263,14 +1182,14 @@ pmap_init(void)
pv_head_table = (pv_entry_t)addr;
addr += PV_TABLE_SIZE(npages);
- pmap_modify_list = (char *)addr;
+ pmap_cached_attributes = (u_int8_t *)addr;
/*
* Now that the pv and attribute tables have been allocated,
* assign them to the memory segments.
*/
pv = pv_head_table;
- attr = pmap_modify_list;
+ attr = pmap_cached_attributes;
for (bank = 0; bank < vm_nphysseg; bank++) {
npages = vm_physmem[bank].end - vm_physmem[bank].start;
vm_physmem[bank].pmseg.pvent = pv;
@@ -1322,7 +1241,7 @@ pmap_zero_page(phys)
srcpte = pmap_pte(kernel_pmap, srcva);
SPLVM(spl);
- cmmu_flush_tlb(1, srcva, PAGE_SIZE);
+ cmmu_flush_tlb(TRUE, srcva, PAGE_SIZE);
*srcpte = trunc_page(phys) |
m88k_protection(kernel_pmap, VM_PROT_READ | VM_PROT_WRITE) |
CACHE_GLOBAL | PG_V;
@@ -1347,10 +1266,13 @@ pmap_zero_page(phys)
struct pmap *
pmap_create(void)
{
- pmap_t p;
- sdt_entry_t *segdt;
- int i;
- unsigned int s;
+ pmap_t p;
+ sdt_entry_t *segdt;
+ u_int s;
+
+#ifdef PMAP_USE_BATC
+ int i;
+#endif
p = pool_get(&pmappool, PR_WAITOK);
bzero(p, sizeof(*p));
@@ -1361,7 +1283,7 @@ pmap_create(void)
s = round_page(2 * SDT_SIZE);
#ifdef DEBUG
if ((pmap_con_dbg & (CD_CREAT | CD_NORM)) == (CD_CREAT | CD_NORM)) {
- printf("(pmap_create :%x) need %d pages for sdt\n",
+ printf("(pmap_create: %x) need %d pages for sdt\n",
curproc, atop(s));
}
#endif
@@ -1374,7 +1296,9 @@ pmap_create(void)
* Initialize pointer to segment table both virtual and physical.
*/
p->sdt_vaddr = segdt;
- pmap_extract(kernel_pmap, (vaddr_t)segdt, (paddr_t *)&p->sdt_paddr);
+ if (pmap_extract(kernel_pmap, (vaddr_t)segdt,
+ (paddr_t *)&p->sdt_paddr) == FALSE)
+ panic("pmap_create: pmap_extract failed!");
if (!PAGE_ALIGNED(p->sdt_paddr))
panic("pmap_create: sdt_table 0x%x not aligned on page boundary",
@@ -1382,8 +1306,8 @@ pmap_create(void)
#ifdef DEBUG
if ((pmap_con_dbg & (CD_CREAT | CD_NORM)) == (CD_CREAT | CD_NORM)) {
- printf("(pmap_create :%x) pmap=0x%x, sdt_vaddr=0x%x, sdt_paddr=0x%x\n",
- curproc, (unsigned)p, p->sdt_vaddr, p->sdt_paddr);
+ printf("(pmap_create: %x) pmap=0x%p, sdt_vaddr=0x%x, sdt_paddr=0x%x\n",
+ curproc, p, p->sdt_vaddr, p->sdt_paddr);
}
#endif
@@ -1412,7 +1336,7 @@ pmap_create(void)
simple_lock_init(&p->lock);
p->cpus_using = 0;
-#ifdef OMRON_PMAP
+#ifdef PMAP_USE_BATC
/* initialize block address translation cache */
for (i = 0; i < BATC_MAX; i++) {
p->i_batc[i].bits = 0;
@@ -1462,11 +1386,11 @@ pmap_release(pmap)
unsigned long sdt_va; /* outer loop index */
sdt_entry_t *sdttbl; /* ptr to first entry in the segment table */
pt_entry_t *gdttbl; /* ptr to first entry in a page table */
- unsigned int i,j;
+ u_int i,j;
#ifdef DEBUG
if ((pmap_con_dbg & (CD_FREE | CD_NORM)) == (CD_FREE | CD_NORM))
- printf("(pmap_release :%x) pmap %x\n", curproc, pmap);
+ printf("(pmap_release: %x) pmap %x\n", curproc, pmap);
#endif
sdttbl = pmap->sdt_vaddr; /* addr of segment table */
@@ -1486,7 +1410,7 @@ pmap_release(pmap)
if ((gdttbl = pmap_pte(pmap, (vaddr_t)sdt_va)) != PT_ENTRY_NULL) {
#ifdef DEBUG
if ((pmap_con_dbg & (CD_FREE | CD_FULL)) == (CD_FREE | CD_FULL))
- printf("(pmap_release :%x) free page table = 0x%x\n",
+ printf("(pmap_release: %x) free page table = 0x%x\n",
curproc, gdttbl);
#endif
PT_FREE(gdttbl);
@@ -1495,17 +1419,17 @@ pmap_release(pmap)
#ifdef DEBUG
if ((pmap_con_dbg & (CD_FREE | CD_FULL)) == (CD_FREE | CD_FULL))
- printf("(pmap_release :%x) free segment table = 0x%x\n",
+ printf("(pmap_release: %x) free segment table = 0x%x\n",
curproc, sdttbl);
#endif
/*
* Freeing both *actual* and *shadow* segment tables
*/
- uvm_km_free(kernel_map, (vaddr_t)sdttbl, 2*SDT_SIZE);
+ uvm_km_free(kernel_map, (vaddr_t)sdttbl, round_page(2*SDT_SIZE));
#ifdef DEBUG
if ((pmap_con_dbg & (CD_FREE | CD_NORM)) == (CD_FREE | CD_NORM))
- printf("(pmap_release :%x) ref_count = 0\n", curproc);
+ printf("(pmap_release: %x) ref_count = 0\n", curproc);
/* unlink from list of pmap structs */
pmap->prev->next = pmap->next;
pmap->next->prev = pmap->prev;
@@ -1588,11 +1512,11 @@ pmap_reference(p)
* Parameters:
* pmap pointer to pmap structure
* s virtual address of start of range to remove
- * e virtual address of start of range to remove
+ * e virtual address of end of range to remove
*
* External/Global:
* pv lists
- * pmap_modify_list
+ * pmap_cached_attributes
*
* Calls:
* pmap_pte
@@ -1611,8 +1535,8 @@ pmap_reference(p)
*
* If the PTE is valid, the routine must invalidated the entry. The
* 'modified' bit, if on, is referenced to the VM, and into the appropriate
- * entry in the pmap_modify_list. Next, the function must find the PV list
- * entry associated with this pmap/va (if it doesn't exist - the function
+ * entry in the pmap_cached_attributes. Next, the function must find the PV
+ * list entry associated with this pmap/va (if it doesn't exist - the function
* panics). The PV list entry is unlinked from the list, and returned to
* its zone.
*/
@@ -1626,7 +1550,7 @@ pmap_remove_range(pmap, s, e)
pv_entry_t pvl;
paddr_t pa;
vaddr_t va;
- unsigned users;
+ u_int users;
pt_entry_t opte;
boolean_t kflush;
@@ -1711,13 +1635,12 @@ pmap_remove_range(pmap, s, e)
}
if (cur == PV_ENTRY_NULL) {
printf("pmap_remove_range: looking for VA "
- "0x%x (pa 0x%x) PV list at 0x%x\n", va, pa, (unsigned)pvl);
+ "0x%x (pa 0x%x) PV list at 0x%p\n", va, pa, pvl);
panic("pmap_remove_range: mapping not in pv_list");
}
prev->next = cur->next;
pool_put(&pvpool, cur);
- /*pvl = pa_to_pvh(pa);*/
}
} /* if PMAP_MANAGED */
@@ -1772,16 +1695,16 @@ pmap_remove(map, s, e)
{
int spl;
- if (map == PMAP_NULL) {
+ if (map == PMAP_NULL)
return;
- }
+
#ifdef DEBUG
if ((pmap_con_dbg & (CD_RM | CD_NORM)) == (CD_RM | CD_NORM))
- printf("(pmap_remove :%x) map %x s %x e %x\n", curproc, map, s, e);
+ printf("(pmap_remove: %x) map %x s %x e %x\n", curproc, map, s, e);
#endif
#ifdef DIAGNOSTIC
- if (s > e)
+ if (s >= e)
panic("pmap_remove: start greater than end address");
#endif
@@ -1803,7 +1726,7 @@ pmap_remove(map, s, e)
*
* Extern/Global:
* pv_head_array, pv lists
- * pmap_modify_list
+ * pmap_cached_attributes
*
* Calls:
* simple_lock
@@ -1816,7 +1739,8 @@ pmap_remove(map, s, e)
* obtain a pointer to the page table entry (PTE) associated with the
* va (the PTE must exist and be valid, otherwise the routine panics).
* The hardware 'modified' bit in the PTE is examined. If it is on, the
- * pmap_modify_list entry corresponding to the physical page is set to 1.
+ * corresponding bit in the pmap_cached_attributes entry corresponding
+ * to the physical page is set to 1.
* Then, the PTE is invalidated, and the PV list entry is unlinked and
* freed.
*
@@ -1840,7 +1764,7 @@ pmap_remove_all(phys)
/* not a managed page. */
#ifdef DEBUG
if (pmap_con_dbg & CD_RMAL)
- printf("(pmap_remove_all :%x) phys addr 0x%x not a managed page\n", curproc, phys);
+ printf("(pmap_remove_all: %x) phys addr 0x%x not a managed page\n", curproc, phys);
#endif
return;
}
@@ -1848,9 +1772,8 @@ pmap_remove_all(phys)
SPLVM(spl);
/*
* Walk down PV list, removing all mappings.
- * We have to do the same work as in pmap_remove_pte_page
- * since that routine locks the pv_head. We don't have
- * to lock the pv_head, since we have the entire pmap system.
+ * We don't have to lock the pv_head, since we have the entire pmap
+ * system.
*/
remove_all_Retry:
@@ -1873,15 +1796,15 @@ remove_all_Retry:
#ifdef DIAGNOSTIC
if (pte == PT_ENTRY_NULL) {
#ifdef DEBUG
- printf("(pmap_remove_all :%x) phys %x pmap %x va %x dbgcnt %x\n",
- (unsigned)curproc, phys, (unsigned)pmap, va, dbgcnt);
+ printf("(pmap_remove_all: %p) phys %p pmap %x va %x dbgcnt %x\n",
+ curproc, phys, pmap, va, dbgcnt);
#endif
panic("pmap_remove_all: pte NULL");
}
#endif /* DIAGNOSTIC */
if (!PDT_VALID(pte)) {
pvl = pvl->next;
- continue; /* no page mapping */
+ goto next; /* no page mapping */
}
if (pmap_pte_w(pte)) {
#ifdef DEBUG
@@ -1890,20 +1813,20 @@ remove_all_Retry:
phys);
#endif
pvl = pvl->next;
- continue;
+ goto next;
}
- pmap_remove_range(pmap, va, va);
+ pmap_remove_range(pmap, va, va + PAGE_SIZE);
+#ifdef DEBUG
+ dbgcnt++;
+#endif
/*
* Do not free any page tables,
* leaves that for when VM calls pmap_collect().
*/
-
+next:
simple_unlock(&pmap->lock);
-#ifdef DEBUG
- dbgcnt++;
-#endif
}
SPLX(spl);
} /* pmap_remove_all() */
@@ -1943,7 +1866,7 @@ pmap_protect(pmap, s, e, prot)
int spl;
pt_entry_t ap, *pte;
vaddr_t va;
- unsigned users;
+ u_int users;
boolean_t kflush;
if (pmap == PMAP_NULL || prot & VM_PROT_WRITE)
@@ -1973,20 +1896,21 @@ pmap_protect(pmap, s, e, prot)
/*
* Loop through the range in vm_page_size increment.
- * Do not assume that either start or end fall on any
- * kind of page boundary (though this may be true ?!).
*/
- for (va = s; va <= e; va += PAGE_SIZE) {
+ for (va = s; va < e; va += PAGE_SIZE) {
pte = pmap_pte(pmap, va);
if (pte == PT_ENTRY_NULL) {
va &= SDT_MASK; /* align to segment */
- if (va <= e - (1<<SDT_SHIFT))
- va += (1<<SDT_SHIFT) - PAGE_SIZE; /* no page table, skip to next seg entry */
- else /* wrap around */
+ if (va <= e - (1 << SDT_SHIFT)) {
+ /* no page table, skip to next seg entry */
+ va += (1 << SDT_SHIFT) - PAGE_SIZE;
+ } else {
+ /* wrap around */
break;
+ }
#ifdef DEBUG
if ((pmap_con_dbg & (CD_PROT | CD_FULL)) == (CD_PROT | CD_FULL))
- printf("(pmap_protect :%x) no page table :: skip to 0x%x\n", curproc, va + PAGE_SIZE);
+ printf("(pmap_protect: %x) no page table: skip to 0x%x\n", curproc, va + PAGE_SIZE);
#endif
continue;
}
@@ -1994,7 +1918,7 @@ pmap_protect(pmap, s, e, prot)
if (!PDT_VALID(pte)) {
#ifdef DEBUG
if ((pmap_con_dbg & (CD_PROT | CD_FULL)) == (CD_PROT | CD_FULL))
- printf("(pmap_protect :%x) pte invalid pte @ 0x%x\n", curproc, pte);
+ printf("(pmap_protect: %x) pte invalid pte @ 0x%x\n", curproc, pte);
#endif
continue; /* no page mapping */
}
@@ -2044,7 +1968,7 @@ pmap_protect(pmap, s, e, prot)
*
* 1: This routine immediately allocates space for a page table.
*
- * 2: The page table entries (PTEs) are initialized (set invalid), and
+ * 2: The page table entries (PTEs) are initialized (set invalid), and
* the corresponding segment table entry is set to point to the new
* page table.
*
@@ -2072,14 +1996,15 @@ pmap_expand(map, v)
#ifdef DEBUG
if ((pmap_con_dbg & (CD_EXP | CD_NORM)) == (CD_EXP | CD_NORM))
- printf ("(pmap_expand :%x) map %x v %x\n", curproc, map, v);
+ printf ("(pmap_expand: %x) map %x v %x\n", curproc, map, v);
#endif
CHECK_PAGE_ALIGN (v, "pmap_expand");
/* XXX */
pdt_vaddr = uvm_km_zalloc(kernel_map, PAGE_SIZE);
- pmap_extract(kernel_pmap, pdt_vaddr, &pdt_paddr);
+ if (pmap_extract(kernel_pmap, pdt_vaddr, &pdt_paddr) == FALSE)
+ panic("pmap_expand: pmap_extract failed!");
#ifdef MVME188
if (brdtyp == BRD_188) {
@@ -2103,7 +2028,7 @@ pmap_expand(map, v)
#ifdef DEBUG
if (pmap_con_dbg & CD_EXP)
- printf("(pmap_expand :%x) table has already been allocated\n", curproc);
+ printf("(pmap_expand: %x) table has already been allocated\n", curproc);
#endif
return;
}
@@ -2112,10 +2037,10 @@ pmap_expand(map, v)
* its containing page 'table group',i.e. the group of
* page tables that fit eithin a single VM page.
* Using that, obtain the segment table pointer that references the
- * first page table in the group, and initilize all the
+ * first page table in the group, and initialize all the
* segment table descriptions for the page 'table group'.
*/
- v &= ~((1<<(LOG2_PDT_TABLE_GROUP_SIZE+PDT_BITS+PG_BITS))-1);
+ v &= ~((1 << (LOG2_PDT_TABLE_GROUP_SIZE + PDT_BITS + PG_BITS)) - 1);
sdt = SDTENT(map,v);
@@ -2142,7 +2067,7 @@ pmap_expand(map, v)
* If specified, the page will be wired down, meaning that the
* related pte can not be reclaimed.
*
- * N.B.: This is only routine which MAY NOT lazy-evaluation or lose
+ * N.B.: This is the only routine which MAY NOT lazy-evaluation or lose
* information. That is, this routine must actually insert this page
* into the given map NOW.
*
@@ -2155,7 +2080,7 @@ pmap_expand(map, v)
*
* Extern/Global:
* pv_head_array, pv lists
- * pmap_modify_list
+ * pmap_cached_attributes
*
* Calls:
* m88k_protection
@@ -2214,8 +2139,8 @@ pmap_enter(pmap, va, pa, prot, flags)
paddr_t old_pa;
pt_entry_t template;
pv_entry_t pvl;
- unsigned users;
- int kflush;
+ u_int users;
+ boolean_t kflush;
boolean_t wired = (flags & PMAP_WIRED) != 0;
CHECK_PAGE_ALIGN(va, "pmap_entry - VA");
@@ -2228,9 +2153,9 @@ pmap_enter(pmap, va, pa, prot, flags)
#ifdef DEBUG
if ((pmap_con_dbg & (CD_ENT | CD_NORM)) == (CD_ENT | CD_NORM)) {
if (pmap == kernel_pmap)
- printf("(pmap_enter :%x) pmap kernel va %x pa %x\n", curproc, va, pa);
+ printf("(pmap_enter: %x) pmap kernel va %x pa %x\n", curproc, va, pa);
else
- printf("(pmap_enter :%x) pmap %x va %x pa %x\n", curproc, pmap, va, pa);
+ printf("(pmap_enter: %x) pmap %x va %x pa %x\n", curproc, pmap, va, pa);
}
#endif
ap = m88k_protection(pmap, prot);
@@ -2253,9 +2178,7 @@ Retry:
*/
while ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL) {
if (pmap == kernel_pmap) {
- if (pmap_expand_kmap(va, VM_PROT_READ|VM_PROT_WRITE) ==
- PT_ENTRY_NULL)
- panic("pmap_expand: Cannot allocate kernel pte table");
+ pmap_expand_kmap(va, VM_PROT_READ|VM_PROT_WRITE);
} else {
/*
* Must unlock to expand the pmap.
@@ -2313,28 +2236,22 @@ Retry:
/*
* Remove old mapping from the PV list if necessary.
*/
- if (old_pa != (paddr_t)-1) {
- /*
- * Invalidate the translation buffer,
- * then remove the mapping.
- */
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_ENT | CD_NORM)) == (CD_ENT | CD_NORM)) {
- if (va == phys_map_vaddr1 || va == phys_map_vaddr2) {
- printf("vaddr1 0x%x vaddr2 0x%x va 0x%x pa 0x%x managed %x\n",
- phys_map_vaddr1, phys_map_vaddr2, va, old_pa,
- PMAP_MANAGED(pa) ? 1 : 0);
- printf("pte %x pfn %x valid %x\n",
- pte, PG_PFNUM(*pte), PDT_VALID(pte));
- }
- }
-#endif
+ if ((pmap_con_dbg & (CD_ENT | CD_NORM)) == (CD_ENT | CD_NORM)) {
if (va == phys_map_vaddr1 || va == phys_map_vaddr2) {
- flush_atc_entry(users, va, TRUE);
- } else {
- pmap_remove_range(pmap, va, va + PAGE_SIZE);
+ printf("vaddr1 0x%x vaddr2 0x%x va 0x%x pa 0x%x managed %x\n",
+ phys_map_vaddr1, phys_map_vaddr2, va, old_pa,
+ PMAP_MANAGED(pa) ? 1 : 0);
+ printf("pte %x pfn %x valid %x\n",
+ pte, PG_PFNUM(*pte), PDT_VALID(pte));
}
}
+#endif
+ if (va == phys_map_vaddr1 || va == phys_map_vaddr2) {
+ flush_atc_entry(users, va, TRUE);
+ } else {
+ pmap_remove_range(pmap, va, va + PAGE_SIZE);
+ }
if (PMAP_MANAGED(pa)) {
#ifdef DEBUG
@@ -2422,19 +2339,19 @@ Retry:
} /* pmap_enter */
/*
- * Routine: pmap_unwire
+ * Routine: pmap_unwire
*
- * Function: Change the wiring attributes for a map/virtual-address
- * Pair.
- * Parameters:
- * pmap pointer to pmap structure
- * v virtual address of page to be unwired
+ * Function: Change the wiring attributes for a map/virtual-address pair.
*
- * Calls:
- * pmap_pte
+ * Parameters:
+ * pmap pointer to pmap structure
+ * v virtual address of page to be unwired
+ *
+ * Calls:
+ * pmap_pte
*
- * Special Assumptions:
- * The mapping must already exist in the pmap.
+ * Special Assumptions:
+ * The mapping must already exist in the pmap.
*/
void
pmap_unwire(map, v)
@@ -2474,11 +2391,10 @@ pmap_unwire(map, v)
* PMAP_LOCK, PMAP_UNLOCK
* pmap_pte
*
+ * If BATC mapping is enabled and the specified pmap is kernel_pmap,
+ * batc_entry is scanned to find out the mapping.
*
- * This routine checks BATC mapping first. BATC has been used and
- * the specified pmap is kernel_pmap, batc_entry is scanned to find out
- * the mapping.
- * Then the routine calls pmap_pte to get a (virtual) pointer to
+ * Then the routine calls pmap_pte to get a (virtual) pointer to
* the page table entry (PTE) associated with the given virtual
* address. If the page table does not exist, or if the PTE is not valid,
* then 0 address is returned. Otherwise, the physical page address from
@@ -2492,26 +2408,31 @@ pmap_extract(pmap, va, pap)
{
pt_entry_t *pte;
paddr_t pa;
- int i;
int spl;
boolean_t rv = FALSE;
+#ifdef PMAP_USE_BATC
+ int i;
+#endif
+
#ifdef DIAGNOSTIC
if (pmap == PMAP_NULL)
panic("pmap_extract: pmap is NULL");
#endif
+#ifdef PMAP_USE_BATC
/*
* check BATC first
*/
- if (pmap == kernel_pmap && batc_used > 0)
- for (i = batc_used - 1; i > 0; i--)
+ if (pmap == kernel_pmap && batc_used != 0)
+ for (i = batc_used - 1; i != 0; i--)
if (batc_entry[i].lba == M88K_BTOBLK(va)) {
if (pap != NULL)
*pap = (batc_entry[i].pba << BATC_BLKSHIFT) |
(va & BATC_BLKMASK);
return (TRUE);
}
+#endif
PMAP_LOCK(pmap, spl);
@@ -2531,21 +2452,21 @@ pmap_extract(pmap, va, pap)
} /* pmap_extract() */
/*
- * Routine: PMAP_COPY
+ * Routine: PMAP_COPY
*
- * Function:
- * Copy the range specigfied by src_adr/len from the source map
- * to the range dst_addr/len in the destination map. This routine
- * is only advisory and need not do anything.
+ * Function:
+ * Copy the range specigfied by src_adr/len from the source map
+ * to the range dst_addr/len in the destination map. This routine
+ * is only advisory and need not do anything.
*
- * Parameters:
- * dst_pmap pointer to destination pmap structure
- * src_pmap pointer to source pmap structure
- * dst_addr VA in destination map
- * len length of address space being copied
- * src_addr VA in source map
+ * Parameters:
+ * dst_pmap pointer to destination pmap structure
+ * src_pmap pointer to source pmap structure
+ * dst_addr VA in destination map
+ * len length of address space being copied
+ * src_addr VA in source map
*
- * At this time, the 88200 pmap implementation does nothing in this
+ * At this time, the 88200 pmap implementation does nothing in this
* function. Translation tables in the destination map will be allocated
* at VM fault time.
*/
@@ -2610,7 +2531,7 @@ pmap_collect(pmap)
boolean_t found_gdt_wired; /* flag indicating a wired page exists
in a page table's address range */
int spl;
- unsigned int i, j;
+ u_int i, j;
#ifdef DIAGNOSTIC
if (pmap == PMAP_NULL)
@@ -2622,7 +2543,7 @@ pmap_collect(pmap)
#ifdef DEBUG
if ((pmap_con_dbg & (CD_COL | CD_NORM)) == (CD_COL | CD_NORM))
- printf ("(pmap_collect :%x) pmap %x\n", curproc, pmap);
+ printf ("(pmap_collect: %x) pmap %x\n", curproc, pmap);
#endif
PMAP_LOCK(pmap, spl);
@@ -2642,7 +2563,7 @@ pmap_collect(pmap)
/* Segment table loop */
for (; i < j; i++, sdtp += PDT_TABLE_GROUP_SIZE) {
- sdt_va = VM_MIN_ADDRESS + PDT_TABLE_GROUP_VA_SPACE*i;
+ sdt_va = VM_MIN_ADDRESS + PDT_TABLE_GROUP_VA_SPACE * i;
gdttbl = pmap_pte(pmap, sdt_va);
@@ -2664,20 +2585,18 @@ pmap_collect(pmap)
continue; /* can't free this range */
/* figure out end of range. Watch for wraparound */
-
- sdt_vt = sdt_va <= VM_MAX_ADDRESS-PDT_TABLE_GROUP_VA_SPACE ?
- sdt_va+PDT_TABLE_GROUP_VA_SPACE :
- VM_MAX_ADDRESS;
+ sdt_vt = sdt_va <= VM_MAX_ADDRESS - PDT_TABLE_GROUP_VA_SPACE ?
+ sdt_va + PDT_TABLE_GROUP_VA_SPACE : VM_MAX_ADDRESS;
/* invalidate all maps in this range */
pmap_remove_range(pmap, sdt_va, sdt_vt);
/*
- * we can safely deallocated the page map(s)
+ * we can safely deallocate the page map(s)
*/
for (sdt = sdtp; sdt < (sdtp + PDT_TABLE_GROUP_SIZE); sdt++) {
*((sdt_entry_t *) sdt) = 0;
- *((sdt_entry_t *)(sdt+SDT_ENTRIES)) = 0;
+ *((sdt_entry_t *)(sdt + SDT_ENTRIES)) = 0;
}
/*
@@ -2695,7 +2614,7 @@ pmap_collect(pmap)
#ifdef DEBUG
if ((pmap_con_dbg & (CD_COL | CD_NORM)) == (CD_COL | CD_NORM))
- printf ("(pmap_collect :%x) done \n", curproc);
+ printf ("(pmap_collect: %x) done\n", curproc);
#endif
} /* pmap collect() */
@@ -2727,17 +2646,16 @@ pmap_activate(p)
struct proc *p;
{
apr_template_t apr_data;
-#ifdef notyet
-#ifdef OMRON_PMAP
+ pmap_t pmap = vm_map_pmap(&p->p_vmspace->vm_map);
+ int cpu = cpu_number();
+
+#ifdef PMAP_USE_BATC
int n;
#endif
-#endif
- pmap_t pmap = p->p_vmspace->vm_map.pmap;
- int cpu = cpu_number();
#ifdef DEBUG
if ((pmap_con_dbg & (CD_ACTIVATE | CD_NORM)) == (CD_ACTIVATE | CD_NORM))
- printf("(pmap_activate :%x) pmap 0x%x\n", p, (unsigned)pmap);
+ printf("(pmap_activate: %x) pmap 0x%p\n", p, pmap);
#endif
if (pmap != kernel_pmap) {
@@ -2752,8 +2670,8 @@ pmap_activate(p)
apr_data.field.g = 1;
apr_data.field.ci = 0;
apr_data.field.te = 1;
-#ifdef notyet
-#ifdef OMRON_PMAP
+
+#ifdef PMAP_USE_BATC
/*
* cmmu_pmap_activate will set the uapr and the batc entries,
* then flush the *USER* TLB. IF THE KERNEL WILL EVER CARE
@@ -2763,19 +2681,11 @@ pmap_activate(p)
cmmu_pmap_activate(cpu, apr_data.bits,
pmap->i_batc, pmap->d_batc);
for (n = 0; n < BATC_MAX; n++)
- *(unsigned*)&batc_entry[n] = pmap->i_batc[n].bits;
-#else
- cmmu_set_uapr(apr_data.bits);
- cmmu_flush_tlb(0, 0, -1);
-#endif
+ *(register_t *)&batc_entry[n] = pmap->i_batc[n].bits;
#else
- /*
- * I am forcing it to not program the BATC at all. pmap.c module
- * needs major, major cleanup. XXX nivas
- */
cmmu_set_uapr(apr_data.bits);
- cmmu_flush_tlb(0, 0, -1);
-#endif /* notyet */
+ cmmu_flush_tlb(FALSE, 0, -1);
+#endif /* PMAP_USE_BATC */
/*
* Mark that this cpu is using the pmap.
@@ -2783,16 +2693,6 @@ pmap_activate(p)
SETBIT_CPUSET(cpu, &(pmap->cpus_using));
simple_unlock(&pmap->lock);
- } else {
- /*
- * kernel_pmap must be always active.
- */
-
-#ifdef DEBUG
- if ((pmap_con_dbg & (CD_ACTIVATE | CD_NORM)) == (CD_ACTIVATE | CD_NORM))
- printf("(pmap_activate :%x) called for kernel_pmap\n", curproc);
-#endif
-
}
} /* pmap_activate() */
@@ -2813,7 +2713,7 @@ void
pmap_deactivate(p)
struct proc *p;
{
- pmap_t pmap = p->p_vmspace->vm_map.pmap;
+ pmap_t pmap = vm_map_pmap(&p->p_vmspace->vm_map);
int cpu = cpu_number();
if (pmap != kernel_pmap) {
@@ -2827,26 +2727,26 @@ pmap_deactivate(p)
} /* pmap_deactivate() */
/*
- * Routine: PMAP_COPY_PAGE
+ * Routine: PMAP_COPY_PAGE
*
- * Function:
- * Copies the specified pages.
+ * Function:
+ * Copies the specified pages.
*
- * Parameters:
- * src PA of source page
- * dst PA of destination page
+ * Parameters:
+ * src PA of source page
+ * dst PA of destination page
*
- * Extern/Global:
- * phys_map_vaddr1
- * phys_map_vaddr2
+ * Extern/Global:
+ * phys_map_vaddr1
+ * phys_map_vaddr2
*
- * Calls:
- * m88kprotection
+ * Calls:
+ * m88k_protection
*
- * Special Assumptions:
- * no locking reauired
+ * Special Assumptions:
+ * no locking reauired
*
- * This routine maps the physical pages at the 'phys_map' virtual
+ * This routine maps the physical pages at the 'phys_map' virtual
* addresses set up in pmap_bootstrap. It flushes the TLB to make the
* new mappings effective, and performs the copy.
*/
@@ -2872,13 +2772,13 @@ pmap_copy_page(src, dst)
dstpte = pmap_pte(kernel_pmap, dstva);
SPLVM(spl);
- cmmu_flush_tlb(1, srcva, PAGE_SIZE);
+ cmmu_flush_tlb(TRUE, srcva, PAGE_SIZE);
*srcpte = template | trunc_page(src);
/*
* Map destination physical address.
*/
- cmmu_flush_tlb(1, dstva, PAGE_SIZE);
+ cmmu_flush_tlb(TRUE, dstva, PAGE_SIZE);
*dstpte = template | trunc_page(dst);
SPLX(spl);
@@ -2889,23 +2789,23 @@ pmap_copy_page(src, dst)
} /* pmap_copy_page() */
/*
- * Routine: PMAP_CHANGEBIT
+ * Routine: PMAP_CHANGEBIT
*
- * Function:
- * Update the pte bits on the specified physical page.
+ * Function:
+ * Update the pte bits on the specified physical page.
*
- * Parameters:
- * pg vm_page
- * set bits to set
- * mask bits to mask
+ * Parameters:
+ * pg vm_page
+ * set bits to set
+ * mask bits to mask
*
- * Extern/Global:
- * pv_head_table, pv_lists
- * pmap_modify_list
+ * Extern/Global:
+ * pv_head_table, pv_lists
+ * pmap_cached_attributes
*
- * Calls:
- * pa_to_pvh
- * pmap_pte
+ * Calls:
+ * pa_to_pvh
+ * pmap_pte
*
* The pte bits corresponding to the page's frame index will be changed as
* requested. The PV list will be traversed.
@@ -2924,7 +2824,7 @@ pmap_changebit(pg, set, mask)
pmap_t pmap;
int spl;
vaddr_t va;
- unsigned users;
+ u_int users;
boolean_t kflush;
#ifdef DIAGNOSTIC
@@ -2940,20 +2840,19 @@ changebit_Retry:
/*
* Clear saved attributes (modify, reference)
*/
- /* *pa_to_attribute(pg) |= set; */
*pa_to_attribute(pg) &= mask;
if (pvl->pmap == PMAP_NULL) {
#ifdef DEBUG
if ((pmap_con_dbg & (CD_CBIT | CD_NORM)) == (CD_CBIT | CD_NORM))
- printf("(pmap_changebit :%x) phys addr 0x%x not mapped\n",
+ printf("(pmap_changebit: %x) phys addr 0x%x not mapped\n",
curproc, pg);
#endif
SPLX(spl);
return;
}
- /* for each listed pmap, turn off the page modified bit */
+ /* for each listed pmap, update the affected bits */
for (pvep = pvl; pvep != PV_ENTRY_NULL; pvep = pvep->next) {
pmap = pvep->pmap;
va = pvep->va;
@@ -3004,10 +2903,10 @@ changebit_Retry:
} /* pmap_changebit() */
/*
- * Routine: PMAP_CLEAR_MODIFY
+ * Routine: PMAP_CLEAR_MODIFY
*
- * Function:
- * Clears the modify bits on the specified physical page.
+ * Function:
+ * Clears the modify bits on the specified physical page.
*/
boolean_t
pmap_clear_modify(pg)
@@ -3022,30 +2921,28 @@ pmap_clear_modify(pg)
}
/*
- * Routine: PMAP_TESTBIT
+ * Routine: PMAP_TESTBIT
*
- * Function:
- * Test the modified/referenced bits of a physical page.
+ * Function:
+ * Test the modified/referenced bits of a physical page.
*
- * Parameters:
- * pg vm_page
- * bit bit to test
+ * Parameters:
+ * pg vm_page
+ * bit bit to test
*
- * Extern/Global:
- * pv_head_array, pv lists
- * pmap_modify_list
+ * Extern/Global:
+ * pv_head_array, pv lists
+ * pmap_cached_attributes
*
- * Calls:
- * simple_lock, simple_unlock
- * SPLVM, SPLX
- * PMAP_MANAGED
- * pa_to_pvh
- * pmap_pte
+ * Calls:
+ * simple_lock, simple_unlock
+ * pa_to_pvh
+ * pmap_pte
*
- * If the attribute list for the given page has the bit, this routine
+ * If the attribute list for the given page has the bit, this routine
* returns TRUE.
*
- * Otherwise, this routine walks the PV list corresponding to the
+ * Otherwise, this routine walks the PV list corresponding to the
* given page. For each pmap/va pair, the page descripter table entry is
* examined. If the selected bit is found on, the function returns TRUE
* immediately (doesn't need to walk remainder of list), and updates the
@@ -3077,7 +2974,7 @@ testbit_Retry:
no use looking further... */
#ifdef DEBUG
if ((pmap_con_dbg & (CD_TBIT | CD_NORM)) == (CD_TBIT | CD_NORM))
- printf("(pmap_testbit :%x) already cached a modify flag for this page\n",
+ printf("(pmap_testbit: %x) already cached a modify flag for this page\n",
curproc);
#endif
SPLX(spl);
@@ -3090,7 +2987,7 @@ testbit_Retry:
rv = (boolean_t)(*pa_to_attribute(pg) & bit);
#ifdef DEBUG
if ((pmap_con_dbg & (CD_TBIT | CD_NORM)) == (CD_TBIT | CD_NORM))
- printf("(pmap_testbit :%x) phys addr 0x%x not mapped\n",
+ printf("(pmap_testbit: %x) phys addr 0x%x not mapped\n",
curproc, pg);
#endif
SPLX(spl);
@@ -3114,7 +3011,7 @@ testbit_Retry:
*pa_to_attribute(pg) |= bit;
#ifdef DEBUG
if ((pmap_con_dbg & (CD_TBIT | CD_FULL)) == (CD_TBIT | CD_FULL))
- printf("(pmap_testbit :%x) modified page pte@0x%x\n", curproc, (unsigned)ptep);
+ printf("(pmap_testbit: %x) modified page pte@0x%p\n", curproc, ptep);
#endif
SPLX(spl);
return (TRUE);
@@ -3128,11 +3025,11 @@ testbit_Retry:
} /* pmap_testbit() */
/*
- * Routine: PMAP_IS_MODIFIED
+ * Routine: PMAP_IS_MODIFIED
*
- * Function:
- * Return whether or not the specified physical page is modified
- * by any physical maps.
+ * Function:
+ * Return whether or not the specified physical page is modified
+ * by any physical maps.
*/
boolean_t
pmap_is_modified(pg)
@@ -3144,10 +3041,10 @@ pmap_is_modified(pg)
} /* pmap_is_modified() */
/*
- * Routine: PMAP_CLEAR_REFERENCE
+ * Routine: PMAP_CLEAR_REFERENCE
*
- * Function:
- * Clear the reference bit on the specified physical page.
+ * Function:
+ * Clear the reference bit on the specified physical page.
*/
boolean_t
pmap_clear_reference(pg)
@@ -3224,14 +3121,14 @@ pmap_kenter_pa(va, pa, prot)
{
int spl;
pt_entry_t template, *pte;
- unsigned users;
+ u_int users;
CHECK_PAGE_ALIGN (va, "pmap_kenter_pa - VA");
CHECK_PAGE_ALIGN (pa, "pmap_kenter_pa - PA");
#ifdef DEBUG
if ((pmap_con_dbg & (CD_ENT | CD_NORM)) == (CD_ENT | CD_NORM)) {
- printf ("(pmap_kenter_pa :%x) va %x pa %x\n", curproc, va, pa);
+ printf ("(pmap_kenter_pa: %x) va %x pa %x\n", curproc, va, pa);
}
#endif
@@ -3244,8 +3141,7 @@ pmap_kenter_pa(va, pa, prot)
* Expand pmap to include this pte.
*/
while ((pte = pmap_pte(kernel_pmap, va)) == PT_ENTRY_NULL) {
- if (pmap_expand_kmap(va, VM_PROT_READ|VM_PROT_WRITE) == PT_ENTRY_NULL)
- panic("pmap_kenter_pa: Cannot allocate kernel pte table");
+ pmap_expand_kmap(va, VM_PROT_READ|VM_PROT_WRITE);
}
/*
@@ -3271,11 +3167,11 @@ pmap_kremove(va, len)
vsize_t len;
{
int spl;
- unsigned users;
+ u_int users;
#ifdef DEBUG
if ((pmap_con_dbg & (CD_RM | CD_NORM)) == (CD_RM | CD_NORM))
- printf("(pmap_kremove :%x) va %x len %x\n", curproc, va, len);
+ printf("(pmap_kremove: %x) va %x len %x\n", curproc, va, len);
#endif
CHECK_PAGE_ALIGN(va, "pmap_kremove addr");
@@ -3284,7 +3180,7 @@ pmap_kremove(va, len)
PMAP_LOCK(kernel_pmap, spl);
users = kernel_pmap->cpus_using;
- for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
+ for (len >>= PAGE_SHIFT; len != 0; len--, va += PAGE_SIZE) {
vaddr_t e = va + PAGE_SIZE;
sdt_entry_t *sdt;
pt_entry_t *pte;