summaryrefslogtreecommitdiff
path: root/sys/arch/i386
diff options
context:
space:
mode:
Diffstat (limited to 'sys/arch/i386')
-rw-r--r--sys/arch/i386/conf/DISKLESS4
-rw-r--r--sys/arch/i386/conf/GENERIC6
-rw-r--r--sys/arch/i386/conf/RAMDISK4
-rw-r--r--sys/arch/i386/conf/RAMDISKB4
-rw-r--r--sys/arch/i386/conf/RAMDISKC4
-rw-r--r--sys/arch/i386/conf/RAMDISK_CD4
-rw-r--r--sys/arch/i386/i386/bios.c6
-rw-r--r--sys/arch/i386/i386/db_memrw.c14
-rw-r--r--sys/arch/i386/i386/gdt.c39
-rw-r--r--sys/arch/i386/i386/genassym.cf15
-rw-r--r--sys/arch/i386/i386/locore.s74
-rw-r--r--sys/arch/i386/i386/machdep.c101
-rw-r--r--sys/arch/i386/i386/mem.c14
-rw-r--r--sys/arch/i386/i386/pmap.old.c1903
-rw-r--r--sys/arch/i386/i386/rbus_machdep.c6
-rw-r--r--sys/arch/i386/i386/sys_machdep.c65
-rw-r--r--sys/arch/i386/i386/trap.c58
-rw-r--r--sys/arch/i386/i386/vm_machdep.c81
-rw-r--r--sys/arch/i386/include/gdt.h7
-rw-r--r--sys/arch/i386/include/param.h8
-rw-r--r--sys/arch/i386/include/pmap.h512
-rw-r--r--sys/arch/i386/include/pmap.new.h509
-rw-r--r--sys/arch/i386/include/pmap.old.h210
-rw-r--r--sys/arch/i386/include/pte.h5
-rw-r--r--sys/arch/i386/include/vmparam.h12
-rw-r--r--sys/arch/i386/isa/npx.c8
-rw-r--r--sys/arch/i386/isa/vector.s6
27 files changed, 537 insertions, 3142 deletions
diff --git a/sys/arch/i386/conf/DISKLESS b/sys/arch/i386/conf/DISKLESS
index 135f4b52099..4a7fc94a69a 100644
--- a/sys/arch/i386/conf/DISKLESS
+++ b/sys/arch/i386/conf/DISKLESS
@@ -1,4 +1,4 @@
-# $OpenBSD: DISKLESS,v 1.32 2001/05/01 23:30:52 todd Exp $
+# $OpenBSD: DISKLESS,v 1.33 2001/05/05 23:25:26 art Exp $
# $NetBSD: DISKLESS,v 1.26 1996/05/20 18:17:16 mrg Exp $
#
# DISKLESS -- Generic machine setup for diskless boot.
@@ -14,7 +14,7 @@ option I586_CPU
option I686_CPU
option GPL_MATH_EMULATE # floating point emulation
-option UVM # use the UVM virtual memory system
+option UVM # use the UVM virtual memory system. REQUIRED
#option VM86 # Virtual 8086 emulation
#option USER_LDT # user-settable LDT; used by WINE
diff --git a/sys/arch/i386/conf/GENERIC b/sys/arch/i386/conf/GENERIC
index ea8c8089836..d58a59fcce5 100644
--- a/sys/arch/i386/conf/GENERIC
+++ b/sys/arch/i386/conf/GENERIC
@@ -1,4 +1,4 @@
-# $OpenBSD: GENERIC,v 1.256 2001/04/24 22:13:00 deraadt Exp $
+# $OpenBSD: GENERIC,v 1.257 2001/05/05 23:25:28 art Exp $
# $NetBSD: GENERIC,v 1.48 1996/05/20 18:17:23 mrg Exp $
#
# GENERIC -- everything that's currently supported
@@ -12,9 +12,9 @@ option I386_CPU # CPU classes; at least one is REQUIRED
option I486_CPU
option I586_CPU
option I686_CPU
-option GPL_MATH_EMULATE # floating point emulation
+option GPL_MATH_EMULATE # floating point emulation. required.
-option UVM # use the UVM virtual memory system
+option UVM # use the UVM virtual memory system. REQUIRED
#option VM86 # Virtual 8086 emulation
#option USER_LDT # user-settable LDT; used by WINE
diff --git a/sys/arch/i386/conf/RAMDISK b/sys/arch/i386/conf/RAMDISK
index 16731ec216c..a9abeb19b99 100644
--- a/sys/arch/i386/conf/RAMDISK
+++ b/sys/arch/i386/conf/RAMDISK
@@ -1,4 +1,4 @@
-# $OpenBSD: RAMDISK,v 1.102 2001/04/13 05:43:03 deraadt Exp $
+# $OpenBSD: RAMDISK,v 1.103 2001/05/05 23:25:29 art Exp $
machine i386 # architecture, used by config; REQUIRED
@@ -11,7 +11,7 @@ option I586_CPU
option I686_CPU
option GPL_MATH_EMULATE # floating point emulation
-option UVM # use the UVM virtual memory system
+option UVM # use the UVM virtual memory system. REQUIRED
#option DUMMY_NOPS # speed hack; recommended
diff --git a/sys/arch/i386/conf/RAMDISKB b/sys/arch/i386/conf/RAMDISKB
index bab79a4dab9..301e03b1f7c 100644
--- a/sys/arch/i386/conf/RAMDISKB
+++ b/sys/arch/i386/conf/RAMDISKB
@@ -1,4 +1,4 @@
-# $OpenBSD: RAMDISKB,v 1.36 2001/04/13 05:43:04 deraadt Exp $
+# $OpenBSD: RAMDISKB,v 1.37 2001/05/05 23:25:30 art Exp $
machine i386 # architecture, used by config; REQUIRED
@@ -11,7 +11,7 @@ option I586_CPU
option I686_CPU
option GPL_MATH_EMULATE # floating point emulation
-option UVM # use the UVM virtual memory system
+option UVM # use the UVM virtual memory system. REQUIRED
#option DUMMY_NOPS # speed hack; recommended
diff --git a/sys/arch/i386/conf/RAMDISKC b/sys/arch/i386/conf/RAMDISKC
index 2af6e2144fd..65269a10b1d 100644
--- a/sys/arch/i386/conf/RAMDISKC
+++ b/sys/arch/i386/conf/RAMDISKC
@@ -1,4 +1,4 @@
-# $OpenBSD: RAMDISKC,v 1.17 2001/04/13 05:43:04 deraadt Exp $
+# $OpenBSD: RAMDISKC,v 1.18 2001/05/05 23:25:31 art Exp $
machine i386 # architecture, used by config; REQUIRED
@@ -11,7 +11,7 @@ option I586_CPU
option I686_CPU
option GPL_MATH_EMULATE # floating point emulation
-option UVM # use the UVM virtual memory system
+option UVM # use the UVM virtual memory system. REQUIRED
#option DUMMY_NOPS # speed hack; recommended
diff --git a/sys/arch/i386/conf/RAMDISK_CD b/sys/arch/i386/conf/RAMDISK_CD
index d25b135a2ed..dca7fa69661 100644
--- a/sys/arch/i386/conf/RAMDISK_CD
+++ b/sys/arch/i386/conf/RAMDISK_CD
@@ -1,4 +1,4 @@
-# $OpenBSD: RAMDISK_CD,v 1.43 2001/04/24 22:13:00 deraadt Exp $
+# $OpenBSD: RAMDISK_CD,v 1.44 2001/05/05 23:25:32 art Exp $
machine i386 # architecture, used by config; REQUIRED
@@ -11,7 +11,7 @@ option I586_CPU
option I686_CPU
option GPL_MATH_EMULATE # floating point emulation
-option UVM # use the UVM virtual memory system
+option UVM # use the UVM virtual memory system. REQUIRED
#option DUMMY_NOPS # speed hack; recommended
diff --git a/sys/arch/i386/i386/bios.c b/sys/arch/i386/i386/bios.c
index 71770437eda..794705890e0 100644
--- a/sys/arch/i386/i386/bios.c
+++ b/sys/arch/i386/i386/bios.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: bios.c,v 1.42 2001/02/28 19:16:06 mickey Exp $ */
+/* $OpenBSD: bios.c,v 1.43 2001/05/05 23:25:34 art Exp $ */
/*
* Copyright (c) 1997-2001 Michael Shalayeff
@@ -396,11 +396,7 @@ bios32_service(service, e, ei)
endpa = i386_round_page(BIOS32_END);
-#if defined(UVM)
sva = va = uvm_km_valloc(kernel_map, endpa);
-#else
- sva = va = kmem_alloc_pageable(kernel_map, endpa);
-#endif
if (va == 0)
return (0);
diff --git a/sys/arch/i386/i386/db_memrw.c b/sys/arch/i386/i386/db_memrw.c
index 18335b985e5..6318d2edbce 100644
--- a/sys/arch/i386/i386/db_memrw.c
+++ b/sys/arch/i386/i386/db_memrw.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: db_memrw.c,v 1.2 2001/03/22 23:36:51 niklas Exp $ */
+/* $OpenBSD: db_memrw.c,v 1.3 2001/05/05 23:25:35 art Exp $ */
/* $NetBSD: db_memrw.c,v 1.6 1999/04/12 20:38:19 pk Exp $ */
/*
@@ -60,10 +60,6 @@ db_read_bytes(addr, size, data)
*data++ = *src++;
}
-#ifndef PMAP_NEW
-pt_entry_t *pmap_pte __P((pmap_t, vm_offset_t));
-#endif
-
/*
* Write bytes to kernel address space for debugger.
*/
@@ -84,22 +80,14 @@ db_write_bytes(addr, size, data)
if (addr >= VM_MIN_KERNEL_ADDRESS &&
addr < (vm_offset_t)&etext) {
-#ifdef PMAP_NEW
ptep0 = PTE_BASE + i386_btop(addr);
-#else
- ptep0 = pmap_pte(pmap_kernel(), addr);
-#endif
oldmap0 = *ptep0;
*(int *)ptep0 |= /* INTEL_PTE_WRITE */ PG_RW;
addr1 = i386_trunc_page(addr + size - 1);
if (i386_trunc_page(addr) != addr1) {
/* data crosses a page boundary */
-#ifdef PMAP_NEW
ptep1 = PTE_BASE + i386_btop(addr1);
-#else
- ptep1 = pmap_pte(pmap_kernel(), addr1);
-#endif
oldmap1 = *ptep1;
*(int *)ptep1 |= /* INTEL_PTE_WRITE */ PG_RW;
}
diff --git a/sys/arch/i386/i386/gdt.c b/sys/arch/i386/i386/gdt.c
index e8e4ba691db..b5458b4e757 100644
--- a/sys/arch/i386/i386/gdt.c
+++ b/sys/arch/i386/i386/gdt.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: gdt.c,v 1.13 2001/04/30 13:17:38 art Exp $ */
+/* $OpenBSD: gdt.c,v 1.14 2001/05/05 23:25:35 art Exp $ */
/* $NetBSD: gdt.c,v 1.8 1996/05/03 19:42:06 christos Exp $ */
/*-
@@ -45,9 +45,7 @@
#include <vm/vm.h>
#include <vm/vm_kern.h>
-#if defined(UVM)
#include <uvm/uvm_extern.h>
-#endif
#include <machine/gdt.h>
@@ -168,16 +166,9 @@ gdt_init()
min_len = MINGDTSIZ * sizeof(union descriptor);
gdt_size = MINGDTSIZ;
-#if defined(UVM)
dynamic_gdt = (union descriptor *)uvm_km_valloc(kernel_map, max_len);
uvm_map_pageable(kernel_map, (vaddr_t)dynamic_gdt,
(vaddr_t)dynamic_gdt + min_len, FALSE);
-#else
- dynamic_gdt = (union descriptor *)kmem_alloc_pageable(kernel_map,
- max_len);
- vm_map_pageable(kernel_map, (vm_offset_t)dynamic_gdt,
- (vm_offset_t)dynamic_gdt + min_len, FALSE);
-#endif
bcopy(gdt, dynamic_gdt, NGDT * sizeof(union descriptor));
setregion(&region, dynamic_gdt, max_len - 1);
@@ -193,13 +184,8 @@ gdt_grow()
gdt_size <<= 1;
new_len = old_len << 1;
-#if defined(UVM)
uvm_map_pageable(kernel_map, (vaddr_t)dynamic_gdt + old_len,
(vaddr_t)dynamic_gdt + new_len, FALSE);
-#else
- vm_map_pageable(kernel_map, (vm_offset_t)dynamic_gdt + old_len,
- (vm_offset_t)dynamic_gdt + new_len, FALSE);
-#endif
}
void
@@ -210,13 +196,8 @@ gdt_shrink()
old_len = gdt_size * sizeof(union descriptor);
gdt_size >>= 1;
new_len = old_len >> 1;
-#if defined(UVM)
uvm_map_pageable(kernel_map, (vaddr_t)dynamic_gdt + new_len,
(vaddr_t)dynamic_gdt + old_len, TRUE);
-#else
- vm_map_pageable(kernel_map, (vm_offset_t)dynamic_gdt + new_len,
- (vm_offset_t)dynamic_gdt + old_len, TRUE);
-#endif
}
/*
@@ -306,13 +287,8 @@ tss_free(pcb)
}
void
-#ifdef PMAP_NEW
ldt_alloc(pmap, ldt, len)
struct pmap *pmap;
-#else
-ldt_alloc(pcb, ldt, len)
- struct pcb *pcb;
-#endif
union descriptor *ldt;
size_t len;
{
@@ -321,33 +297,20 @@ ldt_alloc(pcb, ldt, len)
slot = gdt_get_slot();
setsegment(&dynamic_gdt[slot].sd, ldt, len - 1, SDT_SYSLDT, SEL_KPL, 0,
0);
-#ifdef PMAP_NEW
simple_lock(&pmap->pm_lock);
pmap->pm_ldt_sel = GSEL(slot, SEL_KPL);
simple_unlock(&pmap->pm_lock);
-#else
- pcb->pcb_ldt_sel = GSEL(slot, SEL_KPL);
-#endif
}
void
-#ifdef PMAP_NEW
ldt_free(pmap)
struct pmap *pmap;
-#else
-ldt_free(pcb)
- struct pcb *pcb;
-#endif
{
int slot;
-#ifdef PMAP_NEW
simple_lock(&pmap->pm_lock);
slot = IDXSEL(pmap->pm_ldt_sel);
simple_unlock(&pmap->pm_lock);
-#else
- slot = IDXSEL(pcb->pcb_ldt_sel);
-#endif
gdt_put_slot(slot);
}
diff --git a/sys/arch/i386/i386/genassym.cf b/sys/arch/i386/i386/genassym.cf
index d6c8d82511c..b75e396d006 100644
--- a/sys/arch/i386/i386/genassym.cf
+++ b/sys/arch/i386/i386/genassym.cf
@@ -1,4 +1,4 @@
-# $OpenBSD: genassym.cf,v 1.8 2001/03/22 23:36:51 niklas Exp $
+# $OpenBSD: genassym.cf,v 1.9 2001/05/05 23:25:36 art Exp $
#
# Copyright (c) 1982, 1990 The Regents of the University of California.
# All rights reserved.
@@ -45,9 +45,7 @@ endif
include <vm/vm.h>
-ifdef UVM
include <uvm/uvm_extern.h>
-endif
include <machine/trap.h>
include <machine/pmap.h>
@@ -73,16 +71,10 @@ endif
define SRUN SRUN
# values for page tables
-ifdef PMAP_NEW
define PDSLOT_KERN PDSLOT_KERN
define PDSLOT_PTE PDSLOT_PTE
define NKPTP_MIN NKPTP_MIN
define NKPTP_MAX NKPTP_MAX
-else
-define PTDPTDI PTDPTDI
-define KPTDI KPTDI
-define NKPDE NKPDE
-endif
define APTDPTDI APTDPTDI
# values for virtual memory
@@ -101,13 +93,8 @@ define P_FLAG offsetof(struct proc, p_flag)
define P_SYSTEM P_SYSTEM
# interrupt/fault metering
-ifdef UVM
define V_TRAP offsetof(struct uvmexp, traps)
define V_INTR offsetof(struct uvmexp, intrs)
-else
-define V_TRAP offsetof(struct vmmeter, v_trap)
-define V_INTR offsetof(struct vmmeter, v_intr)
-endif
# pcb fields
define PCB_CR3 offsetof(struct pcb, pcb_cr3)
diff --git a/sys/arch/i386/i386/locore.s b/sys/arch/i386/i386/locore.s
index f05505d6b26..41f01f844c0 100644
--- a/sys/arch/i386/i386/locore.s
+++ b/sys/arch/i386/i386/locore.s
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.s,v 1.54 2001/03/22 23:36:51 niklas Exp $ */
+/* $OpenBSD: locore.s,v 1.55 2001/05/05 23:25:37 art Exp $ */
/* $NetBSD: locore.s,v 1.145 1996/05/03 19:41:19 christos Exp $ */
/*-
@@ -121,17 +121,9 @@
* Within PTmap, the page directory can be found (third indirection).
*/
.globl _PTmap,_PTD,_PTDpde
-#ifdef PMAP_NEW
.set _PTmap,(PDSLOT_PTE << PDSHIFT)
.set _PTD,(_PTmap + PDSLOT_PTE * NBPG)
.set _PTDpde,(_PTD + PDSLOT_PTE * 4) # XXX 4 == sizeof pde
-#else
- .set _PTmap,(PTDPTDI << PDSHIFT)
- .set _PTD,(_PTmap + PTDPTDI * NBPG)
- .set _PTDpde,(_PTD + PTDPTDI * 4) # XXX 4 == sizeof pde
- .globl _Sysmap
- .set _Sysmap,(_PTmap + KPTDI * NBPG)
-#endif
/*
* APTmap, APTD is the alternate recursive pagemap.
@@ -428,11 +420,7 @@ try586: /* Use the `cpuid' instruction. */
#define PROC0PDIR ((0) * NBPG)
#define PROC0STACK ((1) * NBPG)
#define SYSMAP ((1+UPAGES) * NBPG)
-#ifdef PMAP_NEW
#define TABLESIZE ((1+UPAGES) * NBPG) /* + nkpde * NBPG */
-#else
-#define TABLESIZE ((1+UPAGES+NKPDE) * NBPG)
-#endif
/* Clear the BSS. */
movl $RELOC(_edata),%edi
@@ -462,7 +450,6 @@ try586: /* Use the `cpuid' instruction. */
addl $PGOFSET, %esi # page align up
andl $~PGOFSET, %esi
-#ifdef PMAP_NEW
/*
* Calculate the size of the kernel page table directory, and
* how many entries it will have.
@@ -483,11 +470,6 @@ try586: /* Use the `cpuid' instruction. */
addl %esi,%ecx # end of tables
subl %edi,%ecx # size of tables
shrl $2,%ecx
-#else
- /* Clear memory for bootstrap tables. */
- movl %esi, %edi
- movl $((TABLESIZE + 3) >> 2), %ecx # size of tables
-#endif
xorl %eax, %eax
cld
rep
@@ -531,14 +513,10 @@ try586: /* Use the `cpuid' instruction. */
/* Map the data, BSS, and bootstrap tables read-write. */
leal (PG_V|PG_KW)(%edx),%eax
-#ifdef PMAP_NEW
movl RELOC(_nkpde),%ecx
shll $PGSHIFT,%ecx
addl $TABLESIZE,%ecx
addl %esi,%ecx # end of tables
-#else
- leal (TABLESIZE)(%esi),%ecx # end of tables
-#endif
subl %edx,%ecx # subtract end of text
shrl $PGSHIFT,%ecx
fillkpt
@@ -551,46 +529,23 @@ try586: /* Use the `cpuid' instruction. */
/*
* Construct a page table directory.
*/
-#ifdef PMAP_NEW
movl RELOC(_nkpde),%ecx # count of pde s,
leal (PROC0PDIR+0*4)(%esi),%ebx # where temp maps!
leal (SYSMAP+PG_V|PG_KW)(%esi),%eax # pte for KPT in proc 0
fillkpt
-#else
-/*
- * Install a PDE for temporary double map of kernel text.
- * Maps two pages, in case the kernel is larger than 4M.
- * XXX: should the number of pages to map be decided at run-time?
- */
- leal (SYSMAP+PG_V|PG_KW)(%esi),%eax # calc Sysmap physaddr
- movl %eax,(PROC0PDIR+0*4)(%esi) # map it in
- addl $NBPG, %eax # 2nd Sysmap page
- movl %eax,(PROC0PDIR+1*4)(%esi) # map it too
- /* code below assumes %eax == sysmap physaddr, so we adjust it back */
- subl $NBPG, %eax
-#endif
/*
* Map kernel PDEs: this is the real mapping used
* after the temp mapping outlives its usefulness.
*/
-#ifdef PMAP_NEW
movl RELOC(_nkpde),%ecx # count of pde s,
leal (PROC0PDIR+PDSLOT_KERN*4)(%esi),%ebx # map them high
leal (SYSMAP+PG_V|PG_KW)(%esi),%eax # pte for KPT in proc 0
-#else
- movl $NKPDE,%ecx # count of pde's
- leal (PROC0PDIR+KPTDI*4)(%esi),%ebx # map them high
-#endif
fillkpt
/* Install a PDE recursively mapping page directory as a page table! */
leal (PROC0PDIR+PG_V|PG_KW)(%esi),%eax # pte for ptd
-#ifdef PMAP_NEW
movl %eax,(PROC0PDIR+PDSLOT_PTE*4)(%esi) # recursive PD slot
-#else
- movl %eax,(PROC0PDIR+PTDPTDI*4)(%esi) # phys addr from above
-#endif
/* Save phys. addr of PTD, for libkvm. */
movl %esi,RELOC(_PTDpaddr)
@@ -608,27 +563,18 @@ try586: /* Use the `cpuid' instruction. */
begin:
/* Now running relocated at KERNBASE. Remove double mapping. */
-#ifdef PMAP_NEW
movl _nkpde,%ecx # for this many pde s,
leal (PROC0PDIR+0*4)(%esi),%ebx # which is where temp maps!
addl $(KERNBASE), %ebx # now use relocated address
1: movl $0,(%ebx)
addl $4,%ebx # next pde
loop 1b
-#else
- movl $0,(PROC0PDIR+0*4)(%esi)
- movl $0,(PROC0PDIR+1*4)(%esi)
-#endif
/* Relocate atdevbase. */
-#ifdef PMAP_NEW
movl _nkpde,%edx
shll $PGSHIFT,%edx
addl $(TABLESIZE+KERNBASE),%edx
addl %esi,%edx
-#else
- leal (TABLESIZE+KERNBASE)(%esi),%edx
-#endif
movl %edx,_atdevbase
/* Set up bootstrap stack. */
@@ -638,14 +584,10 @@ begin:
movl %esi,PCB_CR3(%eax) # pcb->pcb_cr3
xorl %ebp,%ebp # mark end of frames
-#ifdef PMAP_NEW
movl _nkpde,%eax
shll $PGSHIFT,%eax
addl $TABLESIZE,%eax
addl %esi,%eax # skip past stack and page tables
-#else
- leal (TABLESIZE)(%esi),%eax # skip past stack and page tables
-#endif
pushl %eax
call _init386 # wire 386 chip for unix operation
addl $4,%esp
@@ -824,7 +766,6 @@ ENTRY(bcopyb)
cld
ret
-#if defined(UVM)
/*
* kcopy(caddr_t from, caddr_t to, size_t len);
* Copy len bytes, abort on fault.
@@ -882,7 +823,6 @@ ENTRY(kcopy)
popl %esi
xorl %eax,%eax
ret
-#endif
/*
* bcopyw(caddr_t from, caddr_t to, size_t len);
@@ -1668,11 +1608,7 @@ ENTRY(longjmp)
* actually to shrink the 0-127 range of priorities into the 32 available
* queues.
*/
-#ifdef UVM
.globl _C_LABEL(whichqs),_C_LABEL(qs),_C_LABEL(uvmexp),_C_LABEL(panic)
-#else
- .globl _whichqs,_qs,_cnt,_panic
-#endif
/*
* setrunqueue(struct proc *p);
@@ -1967,12 +1903,8 @@ switch_return:
* Switch to proc0's saved context and deallocate the address space and kernel
* stack for p. Then jump into cpu_switch(), as if we were in proc0 all along.
*/
-#if defined(UVM)
.globl _C_LABEL(proc0),_C_LABEL(uvmspace_free),_C_LABEL(kernel_map)
.globl _C_LABEL(uvm_km_free),_C_LABEL(tss_free)
-#else
- .globl _proc0,_vmspace_free,_kernel_map,_kmem_free,_tss_free
-#endif
ENTRY(switch_exit)
movl 4(%esp),%edi # old process
movl $_proc0,%ebx
@@ -2148,11 +2080,7 @@ IDTVEC(fpu)
INTRENTRY
pushl _cpl # if_ppl in intrframe
pushl %esp # push address of intrframe
-#if defined(UVM)
incl _C_LABEL(uvmexp)+V_TRAP
-#else
- incl _cnt+V_TRAP
-#endif
call _npxintr
addl $8,%esp # pop address and if_ppl
INTRFASTEXIT
diff --git a/sys/arch/i386/i386/machdep.c b/sys/arch/i386/i386/machdep.c
index 30ff3a17f89..2d06dc4c7d3 100644
--- a/sys/arch/i386/i386/machdep.c
+++ b/sys/arch/i386/i386/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.158 2001/05/05 22:33:45 art Exp $ */
+/* $OpenBSD: machdep.c,v 1.159 2001/05/05 23:25:37 art Exp $ */
/* $NetBSD: machdep.c,v 1.214 1996/11/10 03:16:17 thorpej Exp $ */
/*-
@@ -123,9 +123,7 @@
#include <vm/vm_kern.h>
#include <vm/vm_page.h>
-#if defined(UVM)
#include <uvm/uvm_extern.h>
-#endif
#include <sys/sysctl.h>
@@ -198,15 +196,9 @@ extern struct proc *npxproc;
*/
#define CHUNKSZ (3 * 1024 * 1024)
#define ISADMA_LIMIT (16 * 1024 * 1024) /* XXX wrong place */
-#ifdef UVM
#define ALLOC_PGS(sz, limit, pgs) \
uvm_pglistalloc((sz), 0, (limit), PAGE_SIZE, 0, &(pgs), 1, 0)
#define FREE_PGS(pgs) uvm_pglistfree(&(pgs))
-#else
-#define ALLOC_PGS(sz, limit, pgs) \
- vm_page_alloc_memory((sz), 0, (limit), PAGE_SIZE, 0, &(pgs), 1, 0)
-#define FREE_PGS(pgs) vm_page_free_memory(&(pgs))
-#endif
/* the following is used externally (sysctl_hw) */
char machine[] = "i386"; /* cpu "architecture" */
@@ -255,13 +247,9 @@ int i386_fpu_fdivbug;
bootarg_t *bootargp;
vm_offset_t avail_end;
-#if defined(UVM)
vm_map_t exec_map = NULL;
vm_map_t mb_map = NULL;
vm_map_t phys_map = NULL;
-#else
-vm_map_t buffer_map;
-#endif
int kbd_reset;
@@ -385,11 +373,7 @@ cpu_startup()
* and then give everything true virtual addresses.
*/
sz = (int)allocsys((caddr_t)0);
-#if defined(UVM)
if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(sz))) == 0)
-#else
- if ((v = (caddr_t)kmem_alloc(kernel_map, round_page(sz))) == 0)
-#endif
panic("startup: no room for tables");
if (allocsys(v) - v != sz)
panic("startup: table size inconsistency");
@@ -404,24 +388,14 @@ cpu_startup()
* Allocate a submap for exec arguments. This map effectively
* limits the number of processes exec'ing at any time.
*/
-#if defined(UVM)
exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
-#else
- exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, 16*NCARGS,
- TRUE);
-#endif
/*
* Allocate a submap for physio
*/
-#if defined(UVM)
phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
VM_PHYS_SIZE, 0, FALSE, NULL);
-#else
- phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, VM_PHYS_SIZE,
- TRUE);
-#endif
/*
* Finally, allocate mbuf pool. Since mclrefcnt is an off-size
@@ -430,26 +404,16 @@ cpu_startup()
mclrefcnt = (char *)malloc(NMBCLUSTERS+PAGE_SIZE/MCLBYTES, M_MBUF,
M_NOWAIT);
bzero(mclrefcnt, NMBCLUSTERS+PAGE_SIZE/MCLBYTES);
-#if defined(UVM)
mb_map = uvm_km_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
VM_MBUF_SIZE, VM_MAP_INTRSAFE, FALSE, NULL);
-#else
- mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
- VM_MBUF_SIZE, FALSE);
-#endif
/*
* Initialize timeouts
*/
timeout_init();
-#if defined(UVM)
printf("avail mem = %lu (%uK)\n", ptoa(uvmexp.free),
ptoa(uvmexp.free)/1024);
-#else
- printf("avail mem = %lu (%uK)\n", ptoa(cnt.v_free_count),
- ptoa(cnt.v_free_count)/1024);
-#endif
printf("using %d buffers containing %u bytes (%uK) of memory\n",
nbuf, bufpages * PAGE_SIZE, bufpages * PAGE_SIZE / 1024);
@@ -574,9 +538,6 @@ allocsys(v)
if (nswbuf > 256)
nswbuf = 256; /* sanity */
}
-#if !defined(UVM)
- valloc(swbuf, struct buf, nswbuf);
-#endif
valloc(buf, struct buf, nbuf);
return v;
}
@@ -592,21 +553,12 @@ setup_buffers(maxaddr)
vm_page_t pg;
size = MAXBSIZE * nbuf;
-#if defined(UVM)
if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size),
NULL, UVM_UNKNOWN_OFFSET,
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
UVM_ADV_NORMAL, 0)) != KERN_SUCCESS)
panic("cpu_startup: cannot allocate VM for buffers");
addr = (vaddr_t)buffers;
-#else
- buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers,
- maxaddr, size, TRUE);
- addr = (vm_offset_t)buffers;
- if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0,
- &addr, size, FALSE) != KERN_SUCCESS)
- panic("startup: cannot allocate buffers");
-#endif
base = bufpages / nbuf;
residual = bufpages % nbuf;
@@ -2035,9 +1987,6 @@ extern int IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
#if defined(I586_CPU)
extern int IDTVEC(f00f_redirect);
-#ifndef PMAP_NEW
-pt_entry_t *pmap_pte __P((pmap_t, vm_offset_t));
-#endif
int cpu_f00f_bug = 0;
@@ -2050,11 +1999,7 @@ fix_f00f()
void *p;
/* Allocate two new pages */
-#if defined(UVM)
va = uvm_km_zalloc(kernel_map, NBPG*2);
-#else
- va = kmem_alloc(kernel_map, NBPG*2);
-#endif
p = (void *)(va + NBPG - 7*sizeof(*idt));
/* Copy over old IDT */
@@ -2066,11 +2011,7 @@ fix_f00f()
SEL_KPL, GCODE_SEL);
/* Map first page RO */
-#ifdef PMAP_NEW
pte = PTE_BASE + i386_btop(va);
-#else
- pte = pmap_pte(pmap_kernel(), va);
-#endif
*pte &= ~PG_RW;
/* Reload idtr */
@@ -2257,9 +2198,7 @@ init386(first_avail)
#endif
for (i = 0; i < ndumpmem; i++) {
int32_t a, e;
-#ifdef UVM
int32_t lim;
-#endif
a = dumpmem[i].start;
e = dumpmem[i].end;
@@ -2269,7 +2208,6 @@ init386(first_avail)
e = atop(avail_end);
if (a < e) {
-#ifdef UVM
if (a < atop(16 * 1024 * 1024)) {
lim = MIN(atop(16 * 1024 * 1024), e);
#ifdef DEBUG
@@ -2291,9 +2229,6 @@ init386(first_avail)
uvm_page_physload(a, e, a, e,
VM_FREELIST_DEFAULT);
}
-#else
- vm_page_physload(a, e, a, e);
-#endif
}
}
#ifdef DEBUG
@@ -2726,9 +2661,7 @@ bus_mem_add_mapping(bpa, size, cacheable, bshp)
{
u_long pa, endpa;
vm_offset_t va;
-#ifdef PMAP_NEW
pt_entry_t *pte;
-#endif
pa = i386_trunc_page(bpa);
endpa = i386_round_page(bpa + size);
@@ -2738,11 +2671,7 @@ bus_mem_add_mapping(bpa, size, cacheable, bshp)
panic("bus_mem_add_mapping: overflow");
#endif
-#if defined(UVM)
va = uvm_km_valloc(kernel_map, endpa - pa);
-#else
- va = kmem_alloc_pageable(kernel_map, endpa - pa);
-#endif
if (va == 0)
return (ENOMEM);
@@ -2759,19 +2688,12 @@ bus_mem_add_mapping(bpa, size, cacheable, bshp)
* on those machines.
*/
if (cpu_class != CPUCLASS_386) {
-#ifdef PMAP_NEW
pte = kvtopte(va);
if (cacheable)
*pte &= ~PG_N;
else
*pte |= PG_N;
pmap_update_pg(va);
-#else
- if (!cacheable)
- pmap_changebit(pa, PG_N, ~0);
- else
- pmap_changebit(pa, 0, ~PG_N);
-#endif
}
}
@@ -2816,11 +2738,7 @@ bus_space_unmap(t, bsh, size)
/*
* Free the kernel virtual mapping.
*/
-#if defined(UVM)
uvm_km_free(kernel_map, va, endva - va);
-#else
- kmem_free(kernel_map, va, endva - va);
-#endif
break;
default:
@@ -3137,11 +3055,7 @@ _bus_dmamem_free(t, segs, nsegs)
}
}
-#if defined(UVM)
uvm_pglistfree(&mlist);
-#else
- vm_page_free_memory(&mlist);
-#endif
}
/*
@@ -3162,11 +3076,7 @@ _bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
int curseg;
size = round_page(size);
-#if defined(UVM)
va = uvm_km_valloc(kmem_map, size);
-#else
- va = kmem_alloc_pageable(kmem_map, size);
-#endif
if (va == 0)
return (ENOMEM);
@@ -3204,11 +3114,7 @@ _bus_dmamem_unmap(t, kva, size)
#endif
size = round_page(size);
-#if defined(UVM)
uvm_km_free(kmem_map, (vm_offset_t)kva, size);
-#else
- kmem_free(kmem_map, (vm_offset_t)kva, size);
-#endif
}
/*
@@ -3277,13 +3183,8 @@ _bus_dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs,
* Allocate pages from the VM system.
*/
TAILQ_INIT(&mlist);
-#if defined(UVM)
error = uvm_pglistalloc(size, low, high,
alignment, boundary, &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
-#else
- error = vm_page_alloc_memory(size, low, high,
- alignment, boundary, &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
-#endif
if (error)
return (error);
diff --git a/sys/arch/i386/i386/mem.c b/sys/arch/i386/i386/mem.c
index c744360dba3..7c1c9aaa619 100644
--- a/sys/arch/i386/i386/mem.c
+++ b/sys/arch/i386/i386/mem.c
@@ -1,5 +1,5 @@
/* $NetBSD: mem.c,v 1.31 1996/05/03 19:42:19 christos Exp $ */
-/* $OpenBSD: mem.c,v 1.16 2001/05/05 20:56:38 art Exp $ */
+/* $OpenBSD: mem.c,v 1.17 2001/05/05 23:25:39 art Exp $ */
/*
* Copyright (c) 1988 University of Utah.
* Copyright (c) 1982, 1986, 1990, 1993
@@ -59,9 +59,7 @@
#include <vm/vm.h>
-#if defined(UVM)
#include <uvm/uvm_extern.h>
-#endif
#include "mtrr.h"
@@ -189,15 +187,9 @@ mmrw(dev, uio, flags)
case 1:
v = uio->uio_offset;
c = min(iov->iov_len, MAXPHYS);
-#if defined(UVM)
if (!uvm_kernacc((caddr_t)v, c,
uio->uio_rw == UIO_READ ? B_READ : B_WRITE))
return (EFAULT);
-#else
- if (!kernacc((caddr_t)v, c,
- uio->uio_rw == UIO_READ ? B_READ : B_WRITE))
- return (EFAULT);
-#endif
error = uiomove((caddr_t)v, c, uio);
continue;
@@ -258,11 +250,7 @@ mmmmap(dev, off, prot)
/* minor device 1 is kernel memory */
case 1:
/* XXX - writability, executability checks? */
-#if defined(UVM)
if (!uvm_kernacc((caddr_t)off, NBPG, B_READ))
-#else
- if (!kernacc((caddr_t)off, NBPG, B_READ))
-#endif
return -1;
return i386_btop(vtophys(off));
#ifdef APERTURE
diff --git a/sys/arch/i386/i386/pmap.old.c b/sys/arch/i386/i386/pmap.old.c
deleted file mode 100644
index 48ce1fc730b..00000000000
--- a/sys/arch/i386/i386/pmap.old.c
+++ /dev/null
@@ -1,1903 +0,0 @@
-/* $OpenBSD: pmap.old.c,v 1.38 2001/05/05 21:26:36 art Exp $ */
-/* $NetBSD: pmap.c,v 1.36 1996/05/03 19:42:22 christos Exp $ */
-
-/*
- * Copyright (c) 1993, 1994, 1995 Charles M. Hannum. All rights reserved.
- * Copyright (c) 1991 Regents of the University of California.
- * All rights reserved.
- *
- * This code is derived from software contributed to Berkeley by
- * the Systems Programming Group of the University of Utah Computer
- * Science Department and William Jolitz of UUNET Technologies Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
- * 4. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * @(#)pmap.c 7.7 (Berkeley) 5/12/91
- */
-
-/*
- * Derived originally from an old hp300 version by Mike Hibler. The version
- * by William Jolitz has been heavily modified to allow non-contiguous
- * mapping of physical memory by Wolfgang Solfrank, and to fix several bugs
- * and greatly speedup it up by Charles Hannum.
- *
- * A recursive map [a pde which points to the page directory] is used to map
- * the page tables using the pagetables themselves. This is done to reduce
- * the impact on kernel virtual memory for lots of sparse address space, and
- * to reduce the cost of memory to each process.
- */
-
-/*
- * Manages physical address maps.
- *
- * In addition to hardware address maps, this
- * module is called upon to provide software-use-only
- * maps which may or may not be stored in the same
- * form as hardware maps. These pseudo-maps are
- * used to store intermediate results from copy
- * operations to and from address spaces.
- *
- * Since the information managed by this module is
- * also stored by the logical address mapping module,
- * this module may throw away valid virtual-to-physical
- * mappings at almost any time. However, invalidations
- * of virtual-to-physical mappings must be done as
- * requested.
- *
- * In order to cope with hardware architectures which
- * make virtual-to-physical map invalidates expensive,
- * this module may delay invalidate or reduced protection
- * operations until such time as they are actually
- * necessary. This module is given full information as
- * to which processors are currently using which maps,
- * and to when physical maps must be made correct.
- */
-
-#include <sys/param.h>
-#include <sys/systm.h>
-#include <sys/proc.h>
-#include <sys/malloc.h>
-#include <sys/user.h>
-
-#include <vm/vm.h>
-#include <vm/vm_kern.h>
-#include <vm/vm_page.h>
-
-#if defined(UVM)
-#include <uvm/uvm.h>
-#endif
-
-#include <machine/cpu.h>
-
-#include <dev/isa/isareg.h>
-#include <stand/boot/bootarg.h>
-#include <i386/isa/isa_machdep.h>
-
-#include "isa.h"
-#include "isadma.h"
-
-/*
- * Allocate various and sundry SYSMAPs used in the days of old VM
- * and not yet converted. XXX.
- */
-#define BSDVM_COMPAT 1
-
-#ifdef DEBUG
-struct {
- int kernel; /* entering kernel mapping */
- int user; /* entering user mapping */
- int ptpneeded; /* needed to allocate a PT page */
- int pwchange; /* no mapping change, just wiring or protection */
- int wchange; /* no mapping change, just wiring */
- int mchange; /* was mapped but mapping to different page */
- int managed; /* a managed page */
- int firstpv; /* first mapping for this PA */
- int secondpv; /* second mapping for this PA */
- int ci; /* cache inhibited */
- int unmanaged; /* not a managed page */
- int flushes; /* cache flushes */
-} enter_stats;
-struct {
- int calls;
- int removes;
- int pvfirst;
- int pvsearch;
- int ptinvalid;
- int uflushes;
- int sflushes;
-} remove_stats;
-
-int pmapdebug = 0 /* 0xffff */;
-#define PDB_FOLLOW 0x0001
-#define PDB_INIT 0x0002
-#define PDB_ENTER 0x0004
-#define PDB_REMOVE 0x0008
-#define PDB_CREATE 0x0010
-#define PDB_PTPAGE 0x0020
-#define PDB_CACHE 0x0040
-#define PDB_BITS 0x0080
-#define PDB_COLLECT 0x0100
-#define PDB_PROTECT 0x0200
-#define PDB_PDRTAB 0x0400
-#define PDB_PARANOIA 0x2000
-#define PDB_WIRING 0x4000
-#define PDB_PVDUMP 0x8000
-#endif
-
-/*
- * Get PDEs and PTEs for user/kernel address space
- */
-#define pmap_pde(m, v) (&((m)->pm_pdir[((vm_offset_t)(v) >> PDSHIFT)&1023]))
-
-/*
- * Empty PTEs and PDEs are always 0, but checking only the valid bit allows
- * the compiler to generate `testb' rather than `testl'.
- */
-#define pmap_pde_v(pde) (*(pde) & PG_V)
-#define pmap_pte_pa(pte) (*(pte) & PG_FRAME)
-#define pmap_pte_w(pte) (*(pte) & PG_W)
-#define pmap_pte_m(pte) (*(pte) & PG_M)
-#define pmap_pte_u(pte) (*(pte) & PG_U)
-#define pmap_pte_v(pte) (*(pte) & PG_V)
-#define pmap_pte_set_w(pte, v) ((v) ? (*(pte) |= PG_W) : (*(pte) &= ~PG_W))
-#define pmap_pte_set_prot(pte, v) ((*(pte) &= ~PG_PROT), (*(pte) |= (v)))
-
-/*
- * Given a map and a machine independent protection code,
- * convert to a vax protection code.
- */
-pt_entry_t protection_codes[8];
-
-struct pmap kernel_pmap_store;
-
-vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss)*/
-vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
-int npages;
-
-boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
-TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
-int pv_nfree;
-
-pt_entry_t *pmap_pte __P((pmap_t, vm_offset_t));
-struct pv_entry * pmap_alloc_pv __P((void));
-void pmap_free_pv __P((struct pv_entry *));
-void i386_protection_init __P((void));
-void pmap_collect_pv __P((void));
-__inline void pmap_remove_pv __P((pmap_t, vm_offset_t, struct pv_entry *));
-__inline void pmap_enter_pv __P((pmap_t, vm_offset_t, struct pv_entry *));
-void pmap_remove_all __P((vm_offset_t));
-void pads __P((pmap_t pm));
-void pmap_dump_pvlist __P((vm_offset_t phys, char *m));
-void pmap_pvdump __P((vm_offset_t pa));
-
-#if BSDVM_COMPAT
-#include <sys/msgbuf.h>
-
-/*
- * All those kernel PT submaps that BSD is so fond of
- */
-pt_entry_t *CMAP1, *CMAP2, *XXX_mmap;
-caddr_t CADDR1, CADDR2, vmmap;
-pt_entry_t *msgbufmap, *bootargmap;
-#endif /* BSDVM_COMPAT */
-
-/*
- * Bootstrap the system enough to run with virtual memory.
- * Map the kernel's code and data, and allocate the system page table.
- *
- * On the I386 this is called after mapping has already been enabled
- * and just syncs the pmap module with what has already been done.
- * [We can't call it easily with mapping off since the kernel is not
- * mapped with PA == VA, hence we would have to relocate every address
- * from the linked base (virtual) address to the actual (physical)
- * address starting relative to 0]
- */
-
-void
-pmap_bootstrap(virtual_start)
- vm_offset_t virtual_start;
-{
-#if BSDVM_COMPAT
- vm_offset_t va;
- pt_entry_t *pte;
-#endif
-
- /* Register the page size with the vm system */
-#if defined(UVM)
- uvm_setpagesize();
-#else
- vm_set_page_size();
-#endif
-
- virtual_avail = virtual_start;
- virtual_end = VM_MAX_KERNEL_ADDRESS;
-
- /*
- * Initialize protection array.
- */
- i386_protection_init();
-
-#ifdef notdef
- /*
- * Create Kernel page directory table and page maps.
- * [ currently done in locore. i have wild and crazy ideas -wfj ]
- */
- bzero(firstaddr, (1+NKPDE)*NBPG);
- pmap_kernel()->pm_pdir = firstaddr + VM_MIN_KERNEL_ADDRESS;
- pmap_kernel()->pm_ptab = firstaddr + VM_MIN_KERNEL_ADDRESS + NBPG;
-
- firstaddr += NBPG;
- for (x = i386_btod(VM_MIN_KERNEL_ADDRESS);
- x < i386_btod(VM_MIN_KERNEL_ADDRESS) + NKPDE; x++) {
- pd_entry_t *pde;
- pde = pmap_kernel()->pm_pdir + x;
- *pde = (firstaddr + x*NBPG) | PG_V | PG_KW;
- }
-#else
- pmap_kernel()->pm_pdir =
- (pd_entry_t *)(proc0.p_addr->u_pcb.pcb_cr3 + KERNBASE);
-#endif
-
- simple_lock_init(&pmap_kernel()->pm_lock);
- pmap_kernel()->pm_count = 1;
-
-#if BSDVM_COMPAT
- /*
- * Allocate all the submaps we need
- */
-#define SYSMAP(c, p, v, n) \
- v = (c)va; va += ((n)*NBPG); p = pte; pte += (n);
-
- va = virtual_avail;
- pte = pmap_pte(pmap_kernel(), va);
-
- SYSMAP(caddr_t ,CMAP1 ,CADDR1 ,1 )
- SYSMAP(caddr_t ,CMAP2 ,CADDR2 ,1 )
- SYSMAP(caddr_t ,XXX_mmap ,vmmap ,1 )
- SYSMAP(struct msgbuf * ,msgbufmap ,msgbufp ,btoc(MSGBUFSIZE))
- SYSMAP(bootarg_t * ,bootargmap ,bootargp ,btoc(bootargc))
- virtual_avail = va;
-#endif
-
- /*
- * Reserve pmap space for mapping physical pages during dump.
- */
- virtual_avail = reserve_dumppages(virtual_avail);
-
- /* flawed, no mappings?? */
- if (ctob(physmem) > 31*1024*1024 && MAXKPDE != NKPDE) {
- vm_offset_t p;
- int i;
-
- p = virtual_avail;
- virtual_avail += (MAXKPDE-NKPDE+1) * NBPG;
- bzero((void *)p, (MAXKPDE-NKPDE+1) * NBPG);
- p = round_page(p);
- for (i = NKPDE; i < MAXKPDE; i++, p += NBPG)
- PTD[KPTDI+i] = (pd_entry_t)p |
- PG_V | PG_KW;
- }
-}
-
-void
-pmap_virtual_space(startp, endp)
- vm_offset_t *startp;
- vm_offset_t *endp;
-{
- *startp = virtual_avail;
- *endp = virtual_end;
-}
-
-/*
- * Initialize the pmap module.
- * Called by vm_init, to initialize any structures that the pmap
- * system needs to map virtual memory.
- */
-void
-pmap_init()
-{
- vm_offset_t addr;
- vm_size_t s;
- int lcv;
-
- if (PAGE_SIZE != NBPG)
- panic("pmap_init: CLSIZE != 1");
-
- npages = 0;
- for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
- npages += (vm_physmem[lcv].end - vm_physmem[lcv].start);
- s = (vm_size_t) (sizeof(struct pv_entry) * npages + npages);
- s = round_page(s);
-#if defined(UVM)
- addr = (vm_offset_t) uvm_km_zalloc(kernel_map, s);
- if (addr == NULL)
- panic("pmap_init");
-#else
- addr = (vm_offset_t) kmem_alloc(kernel_map, s);
-#endif
-
- /* allocate pv_entry stuff first */
- for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
- vm_physmem[lcv].pmseg.pvent = (struct pv_entry *) addr;
- addr = (vm_offset_t)(vm_physmem[lcv].pmseg.pvent +
- (vm_physmem[lcv].end - vm_physmem[lcv].start));
- }
- /* allocate attrs next */
- for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
- vm_physmem[lcv].pmseg.attrs = (char *) addr;
- addr = (vm_offset_t)(vm_physmem[lcv].pmseg.attrs +
- (vm_physmem[lcv].end - vm_physmem[lcv].start));
- }
- TAILQ_INIT(&pv_page_freelist);
-
-#ifdef DEBUG
- if (pmapdebug & PDB_INIT)
- printf("pmap_init: %lx bytes (%x pgs)\n",
- s, npages);
-#endif
-
- /*
- * Now it is safe to enable pv_entry recording.
- */
- pmap_initialized = TRUE;
-}
-
-struct pv_entry *
-pmap_alloc_pv()
-{
- struct pv_page *pvp;
- struct pv_entry *pv;
- int i;
-
- if (pv_nfree == 0) {
-#if defined(UVM)
- /* NOTE: can't lock kernel_map here */
- MALLOC(pvp, struct pv_page *, NBPG, M_VMPVENT, M_WAITOK);
-#else
- pvp = (struct pv_page *)kmem_alloc(kernel_map, NBPG);
-#endif
- if (pvp == 0)
- panic("pmap_alloc_pv: kmem_alloc() failed");
- pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1];
- for (i = NPVPPG - 2; i; i--, pv++)
- pv->pv_next = pv + 1;
- pv->pv_next = 0;
- pv_nfree += pvp->pvp_pgi.pgi_nfree = NPVPPG - 1;
- TAILQ_INSERT_HEAD(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
- pv = &pvp->pvp_pv[0];
- } else {
- --pv_nfree;
- pvp = pv_page_freelist.tqh_first;
- if (--pvp->pvp_pgi.pgi_nfree == 0) {
- TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
- }
- pv = pvp->pvp_pgi.pgi_freelist;
-#ifdef DIAGNOSTIC
- if (pv == 0)
- panic("pmap_alloc_pv: pgi_nfree inconsistent");
-#endif
- pvp->pvp_pgi.pgi_freelist = pv->pv_next;
- }
- return pv;
-}
-
-void
-pmap_free_pv(pv)
- struct pv_entry *pv;
-{
- register struct pv_page *pvp;
-
- pvp = (struct pv_page *) trunc_page((vaddr_t)pv);
- switch (++pvp->pvp_pgi.pgi_nfree) {
- case 1:
- TAILQ_INSERT_TAIL(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
- default:
- pv->pv_next = pvp->pvp_pgi.pgi_freelist;
- pvp->pvp_pgi.pgi_freelist = pv;
- ++pv_nfree;
- break;
- case NPVPPG:
- pv_nfree -= NPVPPG - 1;
- TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
-#if defined(UVM)
- FREE((vaddr_t) pvp, M_VMPVENT);
-#else
- kmem_free(kernel_map, (vm_offset_t)pvp, NBPG);
-#endif
- break;
- }
-}
-
-void
-pmap_collect_pv()
-{
- struct pv_page_list pv_page_collectlist;
- struct pv_page *pvp, *npvp;
- struct pv_entry *ph, *ppv, *pv, *npv;
- int s;
- int bank, off;
-
- TAILQ_INIT(&pv_page_collectlist);
-
- for (pvp = pv_page_freelist.tqh_first; pvp; pvp = npvp) {
- if (pv_nfree < NPVPPG)
- break;
- npvp = pvp->pvp_pgi.pgi_list.tqe_next;
- if (pvp->pvp_pgi.pgi_nfree > NPVPPG / 3) {
- TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
- TAILQ_INSERT_TAIL(&pv_page_collectlist, pvp, pvp_pgi.pgi_list);
- pv_nfree -= pvp->pvp_pgi.pgi_nfree;
- pvp->pvp_pgi.pgi_nfree = -1;
- }
- }
-
- if (pv_page_collectlist.tqh_first == 0)
- return;
-
- if ((bank = vm_physseg_find(atop(0), &off)) == -1) {
- printf("INVALID PA!");
- return;
- }
-
- for (ph = &vm_physmem[bank].pmseg.pvent[off]; ph; ph = ph->pv_next) {
- if (ph->pv_pmap == 0)
- continue;
- s = splimp();
- for (ppv = ph; (pv = ppv->pv_next) != 0; ) {
- pvp = (struct pv_page *) trunc_page((vaddr_t)pv);
- if (pvp->pvp_pgi.pgi_nfree == -1) {
- pvp = pv_page_freelist.tqh_first;
- if (--pvp->pvp_pgi.pgi_nfree == 0) {
- TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
- }
- npv = pvp->pvp_pgi.pgi_freelist;
-#ifdef DIAGNOSTIC
- if (npv == 0)
- panic("pmap_collect_pv: pgi_nfree inconsistent");
-#endif
- pvp->pvp_pgi.pgi_freelist = npv->pv_next;
- *npv = *pv;
- ppv->pv_next = npv;
- ppv = npv;
- } else
- ppv = pv;
- }
- splx(s);
- }
-
- for (pvp = pv_page_collectlist.tqh_first; pvp; pvp = npvp) {
- npvp = pvp->pvp_pgi.pgi_list.tqe_next;
-#if defined(UVM)
- FREE((vaddr_t) pvp, M_VMPVENT);
-#else
- kmem_free(kernel_map, (vm_offset_t)pvp, NBPG);
-#endif
- }
-}
-
-__inline void
-pmap_enter_pv(pmap, va, pv)
- register pmap_t pmap;
- vm_offset_t va;
- struct pv_entry *pv;
-{
- register struct pv_entry *npv;
- int s;
-
- if (!pmap_initialized)
- return;
-
-#ifdef DEBUG
- if (pmapdebug & PDB_ENTER)
- printf("pmap_enter_pv: pv %x: %x/%x/%x\n",
- pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
-#endif
- s = splimp();
-
- if (pv->pv_pmap == NULL) {
- /*
- * No entries yet, use header as the first entry
- */
-#ifdef DEBUG
- enter_stats.firstpv++;
-#endif
- pv->pv_va = va;
- pv->pv_pmap = pmap;
- pv->pv_next = NULL;
- } else {
- /*
- * There is at least one other VA mapping this page.
- * Place this entry after the header.
- */
-#ifdef DEBUG
- for (npv = pv; npv; npv = npv->pv_next)
- if (pmap == npv->pv_pmap && va == npv->pv_va)
- panic("pmap_enter_pv: already in pv_tab");
-#endif
- npv = pmap_alloc_pv();
- npv->pv_va = va;
- npv->pv_pmap = pmap;
- npv->pv_next = pv->pv_next;
- pv->pv_next = npv;
-#ifdef DEBUG
- if (!npv->pv_next)
- enter_stats.secondpv++;
-#endif
- }
- splx(s);
-}
-
-__inline void
-pmap_remove_pv(pmap, va, pv)
- register pmap_t pmap;
- vm_offset_t va;
- struct pv_entry *pv;
-{
- register struct pv_entry *npv;
- int s;
-
- /*
- * Remove from the PV table (raise IPL since we
- * may be called at interrupt time).
- */
- s = splimp();
-
- /*
- * If it is the first entry on the list, it is actually
- * in the header and we must copy the following entry up
- * to the header. Otherwise we must search the list for
- * the entry. In either case we free the now unused entry.
- */
- if (pmap == pv->pv_pmap && va == pv->pv_va) {
- npv = pv->pv_next;
- if (npv) {
- *pv = *npv;
- pmap_free_pv(npv);
- } else
- pv->pv_pmap = NULL;
- } else {
- for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) {
- if (pmap == npv->pv_pmap && va == npv->pv_va)
- break;
- }
- if (npv) {
- pv->pv_next = npv->pv_next;
- pmap_free_pv(npv);
- }
- }
- splx(s);
-}
-
-/*
- * Used to map a range of physical addresses into kernel
- * virtual address space.
- *
- * For now, VM is already on, we only need to map the
- * specified memory.
- */
-vm_offset_t
-pmap_map(va, spa, epa, prot)
- vm_offset_t va, spa, epa;
- int prot;
-{
-
-#ifdef DEBUG
- if (pmapdebug & PDB_FOLLOW)
- printf("pmap_map(%x, %x, %x, %x)\n", va, spa, epa, prot);
-#endif
-
- while (spa < epa) {
- pmap_enter(pmap_kernel(), va, spa, prot, FALSE, 0);
- va += NBPG;
- spa += NBPG;
- }
- return va;
-}
-
-/*
- * Create and return a physical map.
- *
- * If the size specified for the map
- * is zero, the map is an actual physical
- * map, and may be referenced by the
- * hardware.
- *
- * If the size specified is non-zero,
- * the map will be used in software only, and
- * is bounded by that size.
- *
- * [ just allocate a ptd and mark it uninitialize -- should we track
- * with a table which process has which ptd? -wfj ]
- */
-pmap_t
-pmap_create(size)
- vm_size_t size;
-{
- register pmap_t pmap;
-
-#ifdef DEBUG
- if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
- printf("pmap_create(%x)\n", size);
-#endif
-
- /*
- * Software use map does not need a pmap
- */
- if (size)
- return NULL;
-
- pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
- bzero(pmap, sizeof(*pmap));
- pmap_pinit(pmap);
- return pmap;
-}
-
-/*
- * Initialize a preallocated and zeroed pmap structure,
- * such as one in a vmspace structure.
- */
-void
-pmap_pinit(pmap)
- register struct pmap *pmap;
-{
-
-#ifdef DEBUG
- if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
- printf("pmap_pinit(%x)\n", pmap);
-#endif
-
- /*
- * No need to allocate page table space yet but we do need a
- * valid page directory table.
- */
-#if defined(UVM)
- pmap->pm_pdir = (pd_entry_t *) uvm_km_zalloc(kernel_map, NBPG);
-#else
- pmap->pm_pdir = (pd_entry_t *) kmem_alloc(kernel_map, NBPG);
-#endif
-
-#ifdef DIAGNOSTIC
- if (pmap->pm_pdir == NULL)
- panic("pmap_pinit: alloc failed");
-#endif
- /* wire in kernel global address entries */
- bcopy(&PTD[KPTDI], &pmap->pm_pdir[KPTDI], MAXKPDE *
- sizeof(pd_entry_t));
-
- /* install self-referential address mapping entry */
- pmap->pm_pdir[PTDPTDI] = pmap_extract(pmap_kernel(),
- (vm_offset_t)pmap->pm_pdir) | PG_V | PG_KW;
-
- pmap->pm_count = 1;
- simple_lock_init(&pmap->pm_lock);
-}
-
-/*
- * Retire the given physical map from service.
- * Should only be called if the map contains
- * no valid mappings.
- */
-void
-pmap_destroy(pmap)
- register pmap_t pmap;
-{
- int count;
-
- if (pmap == NULL)
- return;
-
-#ifdef DEBUG
- if (pmapdebug & PDB_FOLLOW)
- printf("pmap_destroy(%x)\n", pmap);
-#endif
-
- simple_lock(&pmap->pm_lock);
- count = --pmap->pm_count;
- simple_unlock(&pmap->pm_lock);
- if (count == 0) {
- pmap_release(pmap);
- free((caddr_t)pmap, M_VMPMAP);
- }
-}
-
-/*
- * Release any resources held by the given physical map.
- * Called when a pmap initialized by pmap_pinit is being released.
- * Should only be called if the map contains no valid mappings.
- */
-void
-pmap_release(pmap)
- register struct pmap *pmap;
-{
-
-#ifdef DEBUG
- if (pmapdebug & PDB_FOLLOW)
- printf("pmap_release(%x)\n", pmap);
-#endif
-
-#ifdef DIAGNOSTICx
- /* sometimes 1, sometimes 0; could rearrange pmap_destroy */
- if (pmap->pm_count != 1)
- panic("pmap_release count");
-#endif
-
-#if defined(UVM)
- uvm_km_free(kernel_map, (vaddr_t)pmap->pm_pdir, NBPG);
-#else
- kmem_free(kernel_map, (vm_offset_t)pmap->pm_pdir, NBPG);
-#endif
-}
-
-/*
- * Add a reference to the specified pmap.
- */
-void
-pmap_reference(pmap)
- pmap_t pmap;
-{
-
- if (pmap == NULL)
- return;
-
-#ifdef DEBUG
- if (pmapdebug & PDB_FOLLOW)
- printf("pmap_reference(%x)", pmap);
-#endif
-
- simple_lock(&pmap->pm_lock);
- pmap->pm_count++;
- simple_unlock(&pmap->pm_lock);
-}
-
-void
-pmap_activate(p)
- struct proc *p;
-{
- struct pcb *pcb = &p->p_addr->u_pcb;
- pmap_t pmap = p->p_vmspace->vm_map.pmap;
-
- pcb->pcb_cr3 = pmap_extract(pmap_kernel(), (vm_offset_t)pmap->pm_pdir);
- if (p == curproc)
- lcr3(pcb->pcb_cr3);
-}
-
-void
-pmap_deactivate(p)
- struct proc *p;
-{
-}
-
-/*
- * Remove the given range of addresses from the specified map.
- *
- * It is assumed that the start and end are properly
- * rounded to the page size.
- */
-void
-pmap_remove(pmap, sva, eva)
- struct pmap *pmap;
- register vm_offset_t sva, eva;
-{
- register pt_entry_t *pte;
- vm_offset_t pa;
- int bank, off;
- int flush = 0;
-
- sva &= PG_FRAME;
- eva &= PG_FRAME;
-
- /*
- * We need to acquire a pointer to a page table page before entering
- * the following loop.
- */
- while (sva < eva) {
- pte = pmap_pte(pmap, sva);
- if (pte)
- break;
- sva = (sva & PD_MASK) + NBPD;
- }
-
- while (sva < eva) {
- /* only check once in a while */
- if ((sva & PT_MASK) == 0) {
- if (!pmap_pde_v(pmap_pde(pmap, sva))) {
- /* We can race ahead here, to the next pde. */
- sva += NBPD;
- pte += i386_btop(NBPD);
- continue;
- }
- }
-
- pte = pmap_pte(pmap, sva);
- if (pte == NULL) {
- /* We can race ahead here, to the next pde. */
- sva = (sva & PD_MASK) + NBPD;
- continue;
- }
-
- if (!pmap_pte_v(pte)) {
-#ifdef __GNUC__
- /*
- * Scan ahead in a tight loop for the next used PTE in
- * this page. We don't scan the whole region here
- * because we don't want to zero-fill unused page table
- * pages.
- */
- int n, m;
-
- n = min(eva - sva, NBPD - (sva & PT_MASK)) >> PGSHIFT;
- __asm __volatile(
- "cld\n\trepe\n\tscasl\n\tje 1f\n\tincl %1\n\t1:"
- : "=D" (pte), "=c" (m)
- : "0" (pte), "1" (n), "a" (0));
- sva += (n - m) << PGSHIFT;
- if (!m)
- continue;
- /* Overshot. */
- --pte;
-#else
- goto next;
-#endif
- }
-
- flush = 1;
-
- /*
- * Update statistics
- */
- if (pmap_pte_w(pte))
- pmap->pm_stats.wired_count--;
- pmap->pm_stats.resident_count--;
-
- pa = pmap_pte_pa(pte);
-
- /*
- * Invalidate the PTEs.
- * XXX: should cluster them up and invalidate as many
- * as possible at once.
- */
-#ifdef DEBUG
- if (pmapdebug & PDB_REMOVE)
- printf("remove: inv pte at %x(%x) ", pte, *pte);
-#endif
-
-#ifdef needednotdone
-reduce wiring count on page table pages as references drop
-#endif
-
- if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
- vm_physmem[bank].pmseg.attrs[off] |=
- *pte & (PG_M | PG_U);
- pmap_remove_pv(pmap, sva,
- &vm_physmem[bank].pmseg.pvent[off]);
- }
-
- *pte = 0;
-
-#ifndef __GNUC__
- next:
-#endif
- sva += NBPG;
- pte++;
- }
-
- if (flush)
- pmap_update();
-}
-
-/*
- * Routine: pmap_remove_all
- * Function:
- * Removes this physical page from
- * all physical maps in which it resides.
- * Reflects back modify bits to the pager.
- */
-void
-pmap_remove_all(pa)
- vm_offset_t pa;
-{
- struct pv_entry *ph, *pv, *npv;
- register pmap_t pmap;
- register pt_entry_t *pte;
- int bank, off;
- int s;
-
-#ifdef DEBUG
- if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
- printf("pmap_remove_all(%x)", pa);
- /*pmap_pvdump(pa);*/
-#endif
-
- bank = vm_physseg_find(atop(pa), &off);
- if (bank == -1)
- return;
-
- pv = ph = &vm_physmem[bank].pmseg.pvent[off];
- s = splimp();
-
- if (ph->pv_pmap == NULL) {
- splx(s);
- return;
- }
-
- while (pv) {
- pmap = pv->pv_pmap;
- pte = pmap_pte(pmap, pv->pv_va);
-
-#ifdef DEBUG
- if (!pte || !pmap_pte_v(pte) || pmap_pte_pa(pte) != pa)
- panic("pmap_remove_all: bad mapping");
-#endif
-
- /*
- * Update statistics
- */
- if (pmap_pte_w(pte))
- pmap->pm_stats.wired_count--;
- pmap->pm_stats.resident_count--;
-
- /*
- * Invalidate the PTEs.
- * XXX: should cluster them up and invalidate as many
- * as possible at once.
- */
-#ifdef DEBUG
- if (pmapdebug & PDB_REMOVE)
- printf("remove: inv pte at %x(%x) ", pte, *pte);
-#endif
-
-#ifdef needednotdone
-reduce wiring count on page table pages as references drop
-#endif
-
- /*
- * Update saved attributes for managed page
- */
- vm_physmem[bank].pmseg.attrs[off] |= *pte & (PG_M | PG_U);
- *pte = 0;
-
- npv = pv->pv_next;
- if (pv == ph)
- ph->pv_pmap = NULL;
- else
- pmap_free_pv(pv);
- pv = npv;
- }
- splx(s);
-
- pmap_update();
-}
-
-/*
- * Set the physical protection on the
- * specified range of this map as requested.
- */
-void
-pmap_protect(pmap, sva, eva, prot)
- register pmap_t pmap;
- vm_offset_t sva, eva;
- vm_prot_t prot;
-{
- register pt_entry_t *pte;
- register int i386prot;
- int flush = 0;
-
-#ifdef DEBUG
- if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
- printf("pmap_protect(%x, %x, %x, %x)", pmap, sva, eva, prot);
-#endif
-
- if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
- pmap_remove(pmap, sva, eva);
- return;
- }
-
- if (prot & VM_PROT_WRITE)
- return;
-
- sva &= PG_FRAME;
- eva &= PG_FRAME;
-
- /*
- * We need to acquire a pointer to a page table page before entering
- * the following loop.
- */
- while (sva < eva) {
- pte = pmap_pte(pmap, sva);
- if (pte)
- break;
- sva = (sva & PD_MASK) + NBPD;
- }
-
- while (sva < eva) {
- /* only check once in a while */
- if ((sva & PT_MASK) == 0) {
- if (!pmap_pde_v(pmap_pde(pmap, sva))) {
- /* We can race ahead here, to the next pde. */
- sva += NBPD;
- pte += i386_btop(NBPD);
- continue;
- }
- }
-
- if (!pmap_pte_v(pte)) {
-#ifdef __GNUC__
- /*
- * Scan ahead in a tight loop for the next used PTE in
- * this page. We don't scan the whole region here
- * because we don't want to zero-fill unused page table
- * pages.
- */
- int n, m;
-
- n = min(eva - sva, NBPD - (sva & PT_MASK)) >> PGSHIFT;
- __asm __volatile(
- "cld\n\trepe\n\tscasl\n\tje 1f\n\tincl %1\n\t1:"
- : "=D" (pte), "=c" (m)
- : "0" (pte), "1" (n), "a" (0));
- sva += (n - m) << PGSHIFT;
- if (!m)
- continue;
- /* Overshot. */
- --pte;
-#else
- goto next;
-#endif
- }
-
- flush = 1;
-
- i386prot = protection_codes[prot];
- if (sva < VM_MAXUSER_ADDRESS) /* see also pmap_enter() */
- i386prot |= PG_u;
- else if (sva < VM_MAX_ADDRESS)
- i386prot |= PG_u | PG_RW;
- pmap_pte_set_prot(pte, i386prot);
-
-#ifndef __GNUC__
- next:
-#endif
- sva += NBPG;
- pte++;
- }
-
- if (flush)
- pmap_update();
-}
-
-/*
- * Insert the given physical page (p) at
- * the specified virtual address (v) in the
- * target physical map with the protection requested.
- *
- * If specified, the page will be wired down, meaning
- * that the related pte can not be reclaimed.
- *
- * NB: This is the only routine which MAY NOT lazy-evaluate
- * or lose information. That is, this routine must actually
- * insert this page into the given map NOW.
- */
-void
-pmap_enter(pmap, va, pa, prot, wired, access_type)
- register pmap_t pmap;
- vm_offset_t va;
- register vm_offset_t pa;
- vm_prot_t prot;
- boolean_t wired;
- vm_prot_t access_type;
-{
- register pt_entry_t *pte;
- register pt_entry_t npte;
- int bank, off;
- int flush = 0;
- boolean_t cacheable;
-
-#ifdef DEBUG
- if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
- printf("pmap_enter(%x, %x, %x, %x, %x)", pmap, va, pa, prot,
- wired);
-#endif
-
- if (pmap == NULL)
- return;
-
- if (va >= VM_MAX_KERNEL_ADDRESS)
- panic("pmap_enter: too big");
- /* also, should not muck with PTD va! */
-
-#ifdef DEBUG
- if (pmap == pmap_kernel())
- enter_stats.kernel++;
- else
- enter_stats.user++;
-#endif
-
- pte = pmap_pte(pmap, va);
- if (!pte) {
- /*
- * Page Directory table entry not valid, we need a new PT page
- *
- * we want to vm_fault in a new zero-filled PT page for our
- * use. in order to do this, we want to call vm_fault()
- * with the VA of where we want to put the PTE. but in
- * order to call vm_fault() we need to know which vm_map
- * we are faulting in. in the m68k pmap's this is easy
- * since all PT pages live in one global vm_map ("pt_map")
- * and we have a lot of virtual space we can use for the
- * pt_map (since the kernel doesn't have to share its 4GB
- * address space with processes). but in the i386 port
- * the kernel must live in the top part of the virtual
- * address space and PT pages live in their process' vm_map
- * rather than a global one. the problem is that we have
- * no way of knowing which vm_map is the correct one to
- * fault on.
- *
- * XXX: see NetBSD PR#1834 and Mycroft's posting to
- * tech-kern on 7 Jan 1996.
- *
- * rather than always calling panic, we try and make an
- * educated guess as to which vm_map to use by using curproc.
- * this is a workaround and may not fully solve the problem?
- */
- struct vm_map *vmap;
- int rv;
- vm_offset_t v;
-
- if (curproc == NULL || curproc->p_vmspace == NULL ||
- pmap != curproc->p_vmspace->vm_map.pmap)
- panic("ptdi %x", pmap->pm_pdir[PTDPTDI]);
-
- /* our guess about the vm_map was good! fault it in. */
-
- vmap = &curproc->p_vmspace->vm_map;
- v = trunc_page((vaddr_t)vtopte(va));
-#ifdef DEBUG
- printf("faulting in a pt page map %x va %x\n", vmap, v);
-#endif
-#if defined(UVM)
- rv = uvm_fault(vmap, v, 0, VM_PROT_READ|VM_PROT_WRITE);
-#else
- rv = vm_fault(vmap, v, VM_PROT_READ|VM_PROT_WRITE, FALSE);
-#endif
- if (rv != KERN_SUCCESS)
- panic("ptdi2 %x", pmap->pm_pdir[PTDPTDI]);
-#if defined(UVM)
- /*
- * XXX It is possible to get here from uvm_fault with vmap
- * locked. uvm_map_pageable requires it to be unlocked, so
- * try to record the state of the lock, unlock it, and then
- * after the call, reacquire the original lock.
- * THIS IS A GROSS HACK!
- */
- {
- int ls = lockstatus(&vmap->lock);
-
- if (ls)
- lockmgr(&vmap->lock, LK_RELEASE, (void *)0,
- curproc);
- uvm_map_pageable(vmap, v, round_page(v+1), FALSE);
- if (ls)
- lockmgr(&vmap->lock, ls, (void *)0, curproc);
- }
-#else
- vm_map_pageable(vmap, v, round_page(v+1), FALSE);
-#endif
- pte = pmap_pte(pmap, va);
- if (!pte)
- panic("ptdi3 %x", pmap->pm_pdir[PTDPTDI]);
- }
-#ifdef DEBUG
- if (pmapdebug & PDB_ENTER)
- printf("enter: pte %x, *pte %x ", pte, *pte);
-#endif
-
- if (pmap_pte_v(pte)) {
- register vm_offset_t opa;
-
- /*
- * Check for wiring change and adjust statistics.
- */
- if ((wired && !pmap_pte_w(pte)) ||
- (!wired && pmap_pte_w(pte))) {
- /*
- * We don't worry about wiring PT pages as they remain
- * resident as long as there are valid mappings in them.
- * Hence, if a user page is wired, the PT page will be also.
- */
-#ifdef DEBUG
- if (pmapdebug & PDB_ENTER)
- printf("enter: wiring change -> %x ", wired);
-#endif
- if (wired)
- pmap->pm_stats.wired_count++;
- else
- pmap->pm_stats.wired_count--;
-#ifdef DEBUG
- enter_stats.wchange++;
-#endif
- }
-
- flush = 1;
- opa = pmap_pte_pa(pte);
-
- /*
- * Mapping has not changed, must be protection or wiring change.
- */
- if (opa == pa) {
-#ifdef DEBUG
- enter_stats.pwchange++;
-#endif
- goto validate;
- }
-
- /*
- * Mapping has changed, invalidate old range and fall through to
- * handle validating new mapping.
- */
-#ifdef DEBUG
- if (pmapdebug & PDB_ENTER)
- printf("enter: removing old mapping %x pa %x ", va, opa);
-#endif
- if ((bank = vm_physseg_find(atop(opa), &off)) != -1) {
- vm_physmem[bank].pmseg.attrs[off] |=
- *pte & (PG_M | PG_U);
- pmap_remove_pv(pmap, va,
- &vm_physmem[bank].pmseg.pvent[off]);
- }
-#ifdef DEBUG
- enter_stats.mchange++;
-#endif
- } else {
- /*
- * Increment counters
- */
- pmap->pm_stats.resident_count++;
- if (wired)
- pmap->pm_stats.wired_count++;
- }
-
- /*
- * Enter on the PV list if part of our managed memory
- */
- if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
-#ifdef DEBUG
- enter_stats.managed++;
-#endif
- pmap_enter_pv(pmap, va, &vm_physmem[bank].pmseg.pvent[off]);
- cacheable = TRUE;
- } else if (pmap_initialized) {
-#ifdef DEBUG
- enter_stats.unmanaged++;
-#endif
- /*
- * Assumption: if it is not part of our managed memory
- * then it must be device memory which may be volatile.
- */
- cacheable = FALSE;
- }
-
-validate:
- /*
- * Now validate mapping with desired protection/wiring.
- * Assume uniform modified and referenced status for all
- * I386 pages in a MACH page.
- */
- npte = (pa & PG_FRAME) | protection_codes[prot] | PG_V;
- if (wired)
- npte |= PG_W;
-
- if (va < VM_MAXUSER_ADDRESS) /* i.e. below USRSTACK */
- npte |= PG_u;
- else if (va < VM_MAX_ADDRESS)
- /*
- * Page tables need to be user RW, for some reason, and the
- * user area must be writable too. Anything above
- * VM_MAXUSER_ADDRESS is protected from user access by
- * the user data and code segment descriptors, so this is OK.
- */
- npte |= PG_u | PG_RW;
-
-#ifdef DEBUG
- if (pmapdebug & PDB_ENTER)
- printf("enter: new pte value %x ", npte);
-#endif
-
- *pte = npte;
- if (flush)
- pmap_update();
-}
-
-/*
- * pmap_page_protect:
- *
- * Lower the permission for all mappings to a given page.
- */
-void
-pmap_page_protect(phys, prot)
- vm_offset_t phys;
- vm_prot_t prot;
-{
-
- switch (prot) {
- case VM_PROT_READ:
- case VM_PROT_READ|VM_PROT_EXECUTE:
- pmap_copy_on_write(phys);
- break;
- case VM_PROT_ALL:
- break;
- default:
- pmap_remove_all(phys);
- break;
- }
-}
-
-/*
- * Routine: pmap_change_wiring
- * Function: Change the wiring attribute for a map/virtual-address
- * pair.
- * In/out conditions:
- * The mapping must already exist in the pmap.
- */
-void
-pmap_change_wiring(pmap, va, wired)
- register pmap_t pmap;
- vm_offset_t va;
- boolean_t wired;
-{
- register pt_entry_t *pte;
-
-#ifdef DEBUG
- if (pmapdebug & PDB_FOLLOW)
- printf("pmap_change_wiring(%x, %x, %x)", pmap, va, wired);
-#endif
-
- pte = pmap_pte(pmap, va);
- if (!pte)
- return;
-
-#ifdef DEBUG
- /*
- * Page not valid. Should this ever happen?
- * Just continue and change wiring anyway.
- */
- if (!pmap_pte_v(pte)) {
- if (pmapdebug & PDB_PARANOIA)
- printf("pmap_change_wiring: invalid PTE for %x ", va);
- }
-#endif
-
- if ((wired && !pmap_pte_w(pte)) || (!wired && pmap_pte_w(pte))) {
- if (wired)
- pmap->pm_stats.wired_count++;
- else
- pmap->pm_stats.wired_count--;
- pmap_pte_set_w(pte, wired);
- }
-}
-
-/*
- * Routine: pmap_pte
- * Function:
- * Extract the page table entry associated
- * with the given map/virtual_address pair.
- */
-pt_entry_t *
-pmap_pte(pmap, va)
- register pmap_t pmap;
- vm_offset_t va;
-{
- pt_entry_t *ptp;
-
-#ifdef DEBUG
- if (pmapdebug & PDB_FOLLOW)
- printf("pmap_pte(%x, %x) ->\n", pmap, va);
-#endif
-
- if (!pmap || !pmap_pde_v(pmap_pde(pmap, va)))
- return NULL;
-
- if ((pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde & PG_FRAME) ||
- pmap == pmap_kernel())
- /* current address space or kernel */
- ptp = PTmap;
- else {
- /* alternate address space */
- if ((pmap->pm_pdir[PTDPTDI] & PG_FRAME) != (APTDpde & PG_FRAME)) {
- APTDpde = pmap->pm_pdir[PTDPTDI];
- pmap_update();
- }
- ptp = APTmap;
- }
-
- return ptp + i386_btop(va);
-}
-
-/*
- * Routine: pmap_extract
- * Function:
- * Extract the physical page address associated
- * with the given map/virtual_address pair.
- */
-vm_offset_t
-pmap_extract(pmap, va)
- register pmap_t pmap;
- vm_offset_t va;
-{
- register pt_entry_t *pte;
- register vm_offset_t pa;
-
-#ifdef DEBUGx
- if (pmapdebug & PDB_FOLLOW)
- printf("pmap_extract(%x, %x) -> ", pmap, va);
-#endif
-
- pte = pmap_pte(pmap, va);
- if (!pte)
- return NULL;
- if (!pmap_pte_v(pte))
- return NULL;
-
- pa = pmap_pte_pa(pte);
-#ifdef DEBUG
- if (pmapdebug & PDB_FOLLOW)
- printf("%x\n", pa);
-#endif
- return pa | (va & ~PG_FRAME);
-}
-
-/*
- * Copy the range specified by src_addr/len
- * from the source map to the range dst_addr/len
- * in the destination map.
- *
- * This routine is only advisory and need not do anything.
- */
-void
-pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
- pmap_t dst_pmap, src_pmap;
- vm_offset_t dst_addr, src_addr;
- vm_size_t len;
-{
-
-#ifdef DEBUG
- if (pmapdebug & PDB_FOLLOW)
- printf("pmap_copy(%x, %x, %x, %x, %x)",
- dst_pmap, src_pmap, dst_addr, len, src_addr);
-#endif
-}
-
-/*
- * Routine: pmap_collect
- * Function:
- * Garbage collects the physical map system for
- * pages which are no longer used.
- * Success need not be guaranteed -- that is, there
- * may well be pages which are not referenced, but
- * others may be collected.
- * Usage:
- * Called by the pageout daemon when pages are scarce.
- * [ needs to be written -wfj ] XXXX
- */
-void
-pmap_collect(pmap)
- pmap_t pmap;
-{
-#ifdef DEBUG
- if (pmapdebug & PDB_FOLLOW)
- printf("pmap_collect(%x) ", pmap);
-#endif
-
- if (pmap != pmap_kernel())
- return;
-
-}
-
-#if DEBUG
-void
-pmap_dump_pvlist(phys, m)
- vm_offset_t phys;
- char *m;
-{
- register struct pv_entry *pv;
- int bank, off;
-
- if (!(pmapdebug & PDB_PARANOIA))
- return;
-
- if (!pmap_initialized)
- return;
- printf("%s %08x:", m, phys);
- bank = vm_physseg_find(atop(phys), &off);
- pv = &vm_physmem[bank].pmseg.pvent[off];
- if (pv->pv_pmap == NULL) {
- printf(" no mappings\n");
- return;
- }
- for (; pv; pv = pv->pv_next)
- printf(" pmap %08x va %08x", pv->pv_pmap, pv->pv_va);
- printf("\n");
-}
-#else
-#define pmap_dump_pvlist(a,b)
-#endif
-
-/*
- * pmap_zero_page zeros the specified by mapping it into
- * virtual memory and using bzero to clear its contents.
- */
-void
-pmap_zero_page(phys)
- register vm_offset_t phys;
-{
-
-#ifdef DEBUG
- if (pmapdebug & PDB_FOLLOW)
- printf("pmap_zero_page(%x)", phys);
-#endif
-
- pmap_dump_pvlist(phys, "pmap_zero_page: phys");
- *CMAP2 = (phys & PG_FRAME) | PG_V | PG_KW /*| PG_N*/;
- pmap_update();
- bzero(CADDR2, NBPG);
-}
-
-/*
- * pmap_copy_page copies the specified page by mapping
- * it into virtual memory and using bcopy to copy its
- * contents.
- */
-void
-pmap_copy_page(src, dst)
- register vm_offset_t src, dst;
-{
-
-#ifdef DEBUG
- if (pmapdebug & PDB_FOLLOW)
- printf("pmap_copy_page(%x, %x)", src, dst);
-#endif
-
- pmap_dump_pvlist(src, "pmap_copy_page: src");
- pmap_dump_pvlist(dst, "pmap_copy_page: dst");
- *CMAP1 = (src & PG_FRAME) | PG_V | PG_KR;
- *CMAP2 = (dst & PG_FRAME) | PG_V | PG_KW /*| PG_N*/;
- pmap_update();
- bcopy(CADDR1, CADDR2, NBPG);
-}
-
-/*
- * Routine: pmap_pageable
- * Function:
- * Make the specified pages (by pmap, offset)
- * pageable (or not) as requested.
- *
- * A page which is not pageable may not take
- * a fault; therefore, its page table entry
- * must remain valid for the duration.
- *
- * This routine is merely advisory; pmap_enter
- * will specify that these pages are to be wired
- * down (or not) as appropriate.
- */
-
-void
-pmap_pageable(pmap, sva, eva, pageable)
- pmap_t pmap;
- vm_offset_t sva, eva;
- boolean_t pageable;
-{
-
-#ifdef DEBUG
- if (pmapdebug & PDB_FOLLOW)
- printf("pmap_pageable(%x, %x, %x, %x)",
- pmap, sva, eva, pageable);
-#endif
-
- /*
- * If we are making a PT page pageable then all valid
- * mappings must be gone from that page. Hence it should
- * be all zeros and there is no need to clean it.
- * Assumption:
- * - PT pages have only one pv_table entry
- * - PT pages are the only single-page allocations
- * between the user stack and kernel va's
- * See also pmap_enter & pmap_protect for rehashes of this...
- */
-
- if (pageable &&
- pmap == pmap_kernel() &&
- sva >= VM_MAXUSER_ADDRESS && eva <= VM_MAX_ADDRESS &&
- eva - sva == NBPG) {
- register vm_offset_t pa;
- register pt_entry_t *pte;
-#ifdef DIAGNOSTIC
- int bank, off;
- register struct pv_entry *pv;
-#endif
-
-#ifdef DEBUG
- if ((pmapdebug & (PDB_FOLLOW|PDB_PTPAGE)) == PDB_PTPAGE)
- printf("pmap_pageable(%x, %x, %x, %x)",
- pmap, sva, eva, pageable);
-#endif
-
- pte = pmap_pte(pmap, sva);
- if (!pte)
- return;
- if (!pmap_pte_v(pte))
- return;
-
- pa = pmap_pte_pa(pte);
-
-#ifdef DIAGNOSTIC
- if ((*pte & (PG_u | PG_RW)) != (PG_u | PG_RW))
- printf("pmap_pageable: unexpected pte=%x va %x\n",
- *pte, sva);
- if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
- return;
- pv = &vm_physmem[bank].pmseg.pvent[off];
- if (pv->pv_va != sva || pv->pv_next) {
- printf("pmap_pageable: bad PT page va %x next %x\n",
- pv->pv_va, pv->pv_next);
- return;
- }
-#endif
-
- /*
- * Mark it unmodified to avoid pageout
- */
- pmap_clear_modify(pa);
-
-#ifdef needsomethinglikethis
- if (pmapdebug & PDB_PTPAGE)
- printf("pmap_pageable: PT page %x(%x) unmodified\n",
- sva, *pmap_pte(pmap, sva));
- if (pmapdebug & PDB_WIRING)
- pmap_check_wiring("pageable", sva);
-#endif
- }
-}
-
-/*
- * Miscellaneous support routines follow
- */
-void
-i386_protection_init()
-{
-
- protection_codes[VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE] = 0;
- protection_codes[VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE] =
- protection_codes[VM_PROT_NONE | VM_PROT_READ | VM_PROT_NONE] =
- protection_codes[VM_PROT_NONE | VM_PROT_READ | VM_PROT_EXECUTE] = PG_RO;
- protection_codes[VM_PROT_WRITE | VM_PROT_NONE | VM_PROT_NONE] =
- protection_codes[VM_PROT_WRITE | VM_PROT_NONE | VM_PROT_EXECUTE] =
- protection_codes[VM_PROT_WRITE | VM_PROT_READ | VM_PROT_NONE] =
- protection_codes[VM_PROT_WRITE | VM_PROT_READ | VM_PROT_EXECUTE] = PG_RW;
-}
-
-boolean_t
-pmap_testbit(pa, setbits)
- register vm_offset_t pa;
- int setbits;
-{
- register struct pv_entry *pv;
- register pt_entry_t *pte;
- int s;
- int bank, off;
-
- if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
- return FALSE;
- pv = &vm_physmem[bank].pmseg.pvent[off];
- s = splimp();
-
- /*
- * Check saved info first
- */
- if (vm_physmem[bank].pmseg.attrs[off] & setbits) {
- splx(s);
- return TRUE;
- }
-
- /*
- * Not found, check current mappings returning
- * immediately if found.
- */
- if (pv->pv_pmap != NULL) {
- for (; pv; pv = pv->pv_next) {
- pte = pmap_pte(pv->pv_pmap, pv->pv_va);
- if (*pte & setbits) {
- splx(s);
- return TRUE;
- }
- }
- }
- splx(s);
- return FALSE;
-}
-
-/*
- * Modify pte bits for all ptes corresponding to the given physical address.
- * We use `maskbits' rather than `clearbits' because we're always passing
- * constants and the latter would require an extra inversion at run-time.
- */
-void
-pmap_changebit(pa, setbits, maskbits)
- register vm_offset_t pa;
- int setbits, maskbits;
-{
- register struct pv_entry *pv;
- register pt_entry_t *pte;
- vm_offset_t va;
- int s;
- int bank, off;
-
-#ifdef DEBUG
- if (pmapdebug & PDB_BITS)
- printf("pmap_changebit(%x, %x, %x)",
- pa, setbits, ~maskbits);
-#endif
-
- if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
- return;
- pv = &vm_physmem[bank].pmseg.pvent[off];
- s = splimp();
-
- /*
- * Clear saved attributes (modify, reference)
- */
- if (~maskbits)
- vm_physmem[bank].pmseg.attrs[off] &= maskbits;
-
- /*
- * Loop over all current mappings setting/clearing as appropos
- * If setting RO do we need to clear the VAC?
- */
- if (pv->pv_pmap != NULL) {
- for (; pv; pv = pv->pv_next) {
- va = pv->pv_va;
-
- /*
- * XXX don't write protect pager mappings
- */
- if ((PG_RO && setbits == PG_RO) ||
- (PG_RW && maskbits == ~PG_RW)) {
-#if defined(UVM)
- if (va >= uvm.pager_sva && va < uvm.pager_eva)
- continue;
-#else
- extern vm_offset_t pager_sva, pager_eva;
-
- if (va >= pager_sva && va < pager_eva)
- continue;
-#endif
- }
-
- pte = pmap_pte(pv->pv_pmap, va);
- *pte = (*pte & maskbits) | setbits;
- }
- pmap_update();
- }
- splx(s);
-}
-
-void
-pmap_prefault(map, v, l)
- vm_map_t map;
- vm_offset_t v;
- vm_size_t l;
-{
- vm_offset_t pv, pv2;
-
- for (pv = v; pv < v + l ; pv += ~PD_MASK + 1) {
- if (!pmap_pde_v(pmap_pde(map->pmap, pv))) {
- pv2 = trunc_page((vaddr_t)vtopte(pv));
-#if defined(UVM)
- uvm_fault(map, pv2, 0, VM_PROT_READ);
-#else
- vm_fault(map, pv2, VM_PROT_READ, FALSE);
-#endif
- }
- pv &= PD_MASK;
- }
-}
-
-#ifdef DEBUG
-void
-pmap_pvdump(pa)
- vm_offset_t pa;
-{
- register struct pv_entry *pv;
- int bank, off;
-
- printf("pa %x", pa);
- if ((bank = vm_physseg_find(atop(pa), &off)) == -1) {
- printf("INVALID PA!");
- } else {
- for (pv = &vm_physmem[bank].pmseg.pvent[off] ; pv ;
- pv = pv->pv_next) {
- printf(" -> pmap %p, va %lx", pv->pv_pmap, pv->pv_va);
- pads(pv->pv_pmap);
- }
- }
- printf(" ");
-}
-
-#ifdef notyet
-void
-pmap_check_wiring(str, va)
- char *str;
- vm_offset_t va;
-{
- vm_map_entry_t entry;
- register int count, *pte;
-
- va = trunc_page(va);
- if (!pmap_pde_v(pmap_pde(pmap_kernel(), va)) ||
- !pmap_pte_v(pmap_pte(pmap_kernel(), va)))
- return;
-
- if (!vm_map_lookup_entry(pt_map, va, &entry)) {
- printf("wired_check: entry for %x not found\n", va);
- return;
- }
- count = 0;
- for (pte = (int *)va; pte < (int *)(va + NBPG); pte++)
- if (*pte)
- count++;
- if (entry->wired_count != count)
- printf("*%s*: %x: w%d/a%d\n",
- str, va, entry->wired_count, count);
-}
-#endif
-
-/* print address space of pmap*/
-void
-pads(pm)
- pmap_t pm;
-{
- unsigned va, i, j;
- register pt_entry_t *pte;
-
- if (pm == pmap_kernel())
- return;
- for (i = 0; i < 1024; i++)
- if (pmap_pde_v(&pm->pm_pdir[i]))
- for (j = 0; j < 1024 ; j++) {
- va = (i << PDSHIFT) | (j << PGSHIFT);
- if (pm == pmap_kernel() &&
- va < VM_MIN_KERNEL_ADDRESS)
- continue;
- if (pm != pmap_kernel() &&
- va > VM_MAX_ADDRESS)
- continue;
- pte = pmap_pte(pm, va);
- if (pmap_pte_v(pte))
- printf("%x:%x ", va, *pte);
- }
-}
-#endif
diff --git a/sys/arch/i386/i386/rbus_machdep.c b/sys/arch/i386/i386/rbus_machdep.c
index 8f3d791867e..96a1e7ad42d 100644
--- a/sys/arch/i386/i386/rbus_machdep.c
+++ b/sys/arch/i386/i386/rbus_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: rbus_machdep.c,v 1.8 2001/05/01 03:15:43 mickey Exp $ */
+/* $OpenBSD: rbus_machdep.c,v 1.9 2001/05/05 23:25:40 art Exp $ */
/* $NetBSD: rbus_machdep.c,v 1.2 1999/10/15 06:43:06 haya Exp $ */
/*
@@ -110,11 +110,7 @@ _bus_space_unmap(t, bsh, size, adrp)
/*
* Free the kernel virtual mapping.
*/
-#if defined(UVM)
uvm_km_free(kernel_map, va, endva - va);
-#else
- kmem_free(kernel_map, va, endva - va);
-#endif
}
} else {
panic("_i386_memio_unmap: bad bus space tag");
diff --git a/sys/arch/i386/i386/sys_machdep.c b/sys/arch/i386/i386/sys_machdep.c
index 5d72a697af6..109823edb24 100644
--- a/sys/arch/i386/i386/sys_machdep.c
+++ b/sys/arch/i386/i386/sys_machdep.c
@@ -59,9 +59,7 @@
#include <vm/vm.h>
#include <vm/vm_kern.h>
-#if defined(UVM)
#include <uvm/uvm_extern.h>
-#endif
#include <machine/cpu.h>
#include <machine/cpufunc.h>
@@ -94,21 +92,12 @@ i386_user_cleanup(pcb)
struct pcb *pcb;
{
-#ifdef PMAP_NEW
ldt_free(pcb->pcb_pmap);
-#else
- ldt_free(pcb);
-#endif
pcb->pcb_ldt_sel = GSEL(GLDT_SEL, SEL_KPL);
if (pcb == curpcb)
lldt(pcb->pcb_ldt_sel);
-#if defined(UVM)
uvm_km_free(kernel_map, (vaddr_t)pcb->pcb_ldt,
(pcb->pcb_ldt_len * sizeof(union descriptor)));
-#else
- kmem_free(kernel_map, (vm_offset_t)pcb->pcb_ldt,
- (pcb->pcb_ldt_len * sizeof(union descriptor)));
-#endif
pcb->pcb_ldt = 0;
}
@@ -165,13 +154,8 @@ i386_set_ldt(p, args, retval)
{
int error, i, n;
struct pcb *pcb = &p->p_addr->u_pcb;
-#ifdef PMAP_NEW
pmap_t pmap = p->p_vmspace->vm_map.pmap;
-#endif
int fsslot, gsslot;
-#ifndef PMAP_NEW
- int s;
-#endif
struct i386_set_ldt_args ua;
union descriptor desc;
@@ -193,49 +177,24 @@ i386_set_ldt(p, args, retval)
*/
/* allocate user ldt */
-#ifdef PMAP_NEW
if (pmap->pm_ldt == 0 || (ua.start + ua.num) > pmap->pm_ldt_len) {
-#else
- if (pcb->pcb_ldt == 0 || (ua.start + ua.num) > pcb->pcb_ldt_len) {
-#endif
size_t old_len, new_len;
union descriptor *old_ldt, *new_ldt;
-#ifdef PMAP_NEW
if (pmap->pm_flags & PMF_USER_LDT) {
old_len = pmap->pm_ldt_len * sizeof(union descriptor);
old_ldt = pmap->pm_ldt;
-#else
- if (pcb->pcb_flags & PCB_USER_LDT) {
- old_len = pcb->pcb_ldt_len * sizeof(union descriptor);
- old_ldt = pcb->pcb_ldt;
-#endif
} else {
old_len = NLDT * sizeof(union descriptor);
old_ldt = ldt;
-#ifdef PMAP_NEW
pmap->pm_ldt_len = 512;
-#else
- pcb->pcb_ldt_len = 512;
-#endif
}
-#ifdef PMAP_NEW
while ((ua.start + ua.num) > pmap->pm_ldt_len)
pmap->pm_ldt_len *= 2;
new_len = pmap->pm_ldt_len * sizeof(union descriptor);
-#else
- while ((ua.start + ua.num) > pcb->pcb_ldt_len)
- pcb->pcb_ldt_len *= 2;
- new_len = pcb->pcb_ldt_len * sizeof(union descriptor);
-#endif
-#if defined(UVM)
new_ldt = (union descriptor *)uvm_km_alloc(kernel_map, new_len);
-#else
- new_ldt = (union descriptor *)kmem_alloc(kernel_map, new_len);
-#endif
bcopy(old_ldt, new_ldt, old_len);
bzero((caddr_t)new_ldt + old_len, new_len - old_len);
-#ifdef PMAP_NEW
pmap->pm_ldt = new_ldt;
if (pmap->pm_flags & PCB_USER_LDT)
@@ -244,15 +203,6 @@ i386_set_ldt(p, args, retval)
pmap->pm_flags |= PCB_USER_LDT;
ldt_alloc(pmap, new_ldt, new_len);
pcb->pcb_ldt_sel = pmap->pm_ldt_sel;
-#else
- pcb->pcb_ldt = new_ldt;
-
- if (pcb->pcb_flags & PCB_USER_LDT)
- ldt_free(pcb);
- else
- pcb->pcb_flags |= PCB_USER_LDT;
- ldt_alloc(pcb, new_ldt, new_len);
-#endif
if (pcb == curpcb)
lldt(pcb->pcb_ldt_sel);
@@ -263,11 +213,7 @@ i386_set_ldt(p, args, retval)
*/
if (old_ldt != ldt)
-#if defined(UVM)
uvm_km_free(kernel_map, (vaddr_t)old_ldt, old_len);
-#else
- kmem_free(kernel_map, (vaddr_t)old_ldt, old_len);
-#endif
#ifdef LDT_DEBUG
printf("i386_set_ldt(%d): new_ldt=%p\n", p->p_pid, new_ldt);
#endif
@@ -342,28 +288,17 @@ i386_set_ldt(p, args, retval)
}
}
-#ifndef PMAP_NEW
- s = splhigh();
-#endif
-
/* Now actually replace the descriptors. */
for (i = 0, n = ua.start; i < ua.num; i++, n++) {
if ((error = copyin(&ua.desc[i], &desc, sizeof(desc))) != 0)
goto out;
-#ifdef PMAP_NEW
pmap->pm_ldt[n] = desc;
-#else
- pcb->pcb_ldt[n] = desc;
-#endif
}
*retval = ua.start;
out:
-#ifndef PMAP_NEW
- splx(s);
-#endif
return (error);
}
#endif /* USER_LDT */
diff --git a/sys/arch/i386/i386/trap.c b/sys/arch/i386/i386/trap.c
index d0d12d0759e..2b2dd6501a5 100644
--- a/sys/arch/i386/i386/trap.c
+++ b/sys/arch/i386/i386/trap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: trap.c,v 1.39 2001/05/05 21:26:37 art Exp $ */
+/* $OpenBSD: trap.c,v 1.40 2001/05/05 23:25:42 art Exp $ */
/* $NetBSD: trap.c,v 1.95 1996/05/05 06:50:02 mycroft Exp $ */
/*-
@@ -61,9 +61,7 @@
#include <vm/pmap.h>
#include <vm/vm_map.h>
-#if defined(UVM)
#include <uvm/uvm_extern.h>
-#endif
#include <machine/cpu.h>
#include <machine/cpufunc.h>
@@ -200,11 +198,7 @@ trap(frame)
vm_prot_t vftype, ftype;
union sigval sv;
-#if defined(UVM)
uvmexp.traps++;
-#else
- cnt.v_trap++;
-#endif
/* SIGSEGV and SIGBUS need this */
if (frame.tf_err & PGEX_W) {
@@ -371,11 +365,7 @@ trap(frame)
goto out;
case T_ASTFLT|T_USER: /* Allow process switch */
-#if defined(UVM)
uvmexp.softs++;
-#else
- cnt.v_soft++;
-#endif
if (p->p_flag & P_OWEUPC) {
p->p_flag &= ~P_OWEUPC;
ADDUPROF(p);
@@ -444,9 +434,6 @@ trap(frame)
int rv;
extern vm_map_t kernel_map;
unsigned nss;
-#ifndef PMAP_NEW
- unsigned v;
-#endif
if (vm == NULL)
goto we_re_toast;
@@ -490,32 +477,7 @@ trap(frame)
}
}
-#ifndef PMAP_NEW
- /* check if page table is mapped, if not, fault it first */
- if ((PTD[pdei(va)] & PG_V) == 0) {
- v = trunc_page((vaddr_t)vtopte(va));
-#if defined(UVM)
- rv = uvm_fault(map, v, 0, ftype);
-#else
- rv = vm_fault(map, v, ftype, FALSE);
-#endif
- if (rv != KERN_SUCCESS)
- goto nogo;
- /* check if page table fault, increment wiring */
-#if defined(UVM)
- uvm_map_pageable(map, v, round_page(v+1), FALSE);
-#else
- vm_map_pageable(map, v, round_page(v+1), FALSE);
-#endif
- } else
- v = 0;
-#endif
-
-#if defined(UVM)
rv = uvm_fault(map, va, 0, ftype);
-#else
- rv = vm_fault(map, va, ftype, FALSE);
-#endif
if (rv == KERN_SUCCESS) {
if (nss > vm->vm_ssize)
vm->vm_ssize = nss;
@@ -524,19 +486,11 @@ trap(frame)
goto out;
}
-#ifndef PMAP_NEW
- nogo:
-#endif
if (type == T_PAGEFLT) {
if (pcb->pcb_onfault != 0)
goto copyfault;
-#if defined(UVM)
printf("uvm_fault(%p, 0x%lx, 0, %d) -> %x\n",
map, va, ftype, rv);
-#else
- printf("vm_fault(%p, %lx, %x, 0) -> %x\n",
- map, va, ftype, rv);
-#endif
goto we_re_toast;
}
sv.sival_int = rcr2();
@@ -620,15 +574,9 @@ trapwrite(addr)
nss = 0;
}
-#if defined(UVM)
if (uvm_fault(&vm->vm_map, va, 0, VM_PROT_READ | VM_PROT_WRITE)
!= KERN_SUCCESS)
return 1;
-#else
- if (vm_fault(&vm->vm_map, va, VM_PROT_READ | VM_PROT_WRITE, FALSE)
- != KERN_SUCCESS)
- return 1;
-#endif
if (nss > vm->vm_ssize)
vm->vm_ssize = nss;
@@ -654,11 +602,7 @@ syscall(frame)
register_t code, args[8], rval[2];
u_quad_t sticks;
-#if defined(UVM)
uvmexp.syscalls++;
-#else
- cnt.v_syscall++;
-#endif
#ifdef DIAGNOSTIC
if (!USERMODE(frame.tf_cs, frame.tf_eflags))
panic("syscall");
diff --git a/sys/arch/i386/i386/vm_machdep.c b/sys/arch/i386/i386/vm_machdep.c
index bcbda38c1f3..93e99ba3d9f 100644
--- a/sys/arch/i386/i386/vm_machdep.c
+++ b/sys/arch/i386/i386/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.24 2001/05/05 21:26:37 art Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.25 2001/05/05 23:25:43 art Exp $ */
/* $NetBSD: vm_machdep.c,v 1.61 1996/05/03 19:42:35 christos Exp $ */
/*-
@@ -61,9 +61,7 @@
#include <vm/vm.h>
#include <vm/vm_kern.h>
-#if defined(UVM)
#include <uvm/uvm_extern.h>
-#endif
#include <machine/cpu.h>
#include <machine/gdt.h>
@@ -114,46 +112,21 @@ cpu_fork(p1, p2, stack, stacksize)
/* Sync curpcb (which is presumably p1's PCB) and copy it to p2. */
savectx(curpcb);
*pcb = p1->p_addr->u_pcb;
-#ifndef PMAP_NEW
- pmap_activate(p2);
-#endif
/*
* Preset these so that gdt_compact() doesn't get confused if called
* during the allocations below.
*/
pcb->pcb_tss_sel = GSEL(GNULL_SEL, SEL_KPL);
-#ifndef PMAP_NEW
- pcb->pcb_ldt_sel = GSEL(GLDT_SEL, SEL_KPL);
-#else
/*
* Activate the addres space. Note this will refresh pcb_ldt_sel.
*/
pmap_activate(p2);
-#endif
/* Fix up the TSS. */
pcb->pcb_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
pcb->pcb_tss.tss_esp0 = (int)p2->p_addr + USPACE - 16;
tss_alloc(pcb);
-#if defined(USER_LDT) && !defined(PMAP_NEW)
- /* Copy the LDT, if necessary. */
- if (pcb->pcb_flags & PCB_USER_LDT) {
- size_t len;
- union descriptor *new_ldt;
-
- len = pcb->pcb_ldt_len * sizeof(union descriptor);
-#if defined(UVM)
- new_ldt = (union descriptor *)uvm_km_alloc(kernel_map, len);
-#else
- new_ldt = (union descriptor *)kmem_alloc(kernel_map, len);
-#endif
- bcopy(pcb->pcb_ldt, new_ldt, len);
- pcb->pcb_ldt = new_ldt;
- ldt_alloc(pcb, new_ldt, len);
- }
-#endif
-
/*
* Copy the trapframe, and arrange for the child to return directly
* through rei(). Note the inline version of cpu_set_kpc().
@@ -221,11 +194,7 @@ cpu_exit(p)
npxproc = 0;
#endif
-#if defined(UVM)
uvmexp.swtch++;
-#else
- cnt.v_swtch++;
-#endif
switch_exit(p);
}
@@ -236,17 +205,6 @@ cpu_wait(p)
struct pcb *pcb;
pcb = &p->p_addr->u_pcb;
-#ifndef PMAP_NEW
-#ifdef USER_LDT
- if (pcb->pcb_flags & PCB_USER_LDT)
- i386_user_cleanup(pcb);
-#endif
-#else
- /*
- * No need to do user LDT cleanup here; it's handled in
- * pmap_destroy().
- */
-#endif
tss_free(pcb);
}
@@ -334,9 +292,7 @@ pagemove(from, to, size)
size_t size;
{
pt_entry_t *fpte, *tpte;
-#ifdef PMAP_NEW
pt_entry_t ofpte, otpte;
-#endif
#ifdef DIAGNOSTIC
if ((size & PAGE_MASK) != 0)
@@ -345,13 +301,10 @@ pagemove(from, to, size)
fpte = kvtopte(from);
tpte = kvtopte(to);
while (size > 0) {
-#ifdef PMAP_NEW
ofpte = *fpte;
otpte = *tpte;
-#endif
*tpte++ = *fpte;
*fpte++ = 0;
-#ifdef PMAP_NEW
#if defined(I386_CPU)
if (cpu_class != CPUCLASS_386)
#endif
@@ -361,20 +314,15 @@ pagemove(from, to, size)
if (ofpte & PG_V)
pmap_update_pg((vm_offset_t) from);
}
-#endif
from += NBPG;
to += NBPG;
size -= NBPG;
}
-#ifdef PMAP_NEW
#if defined(I386_CPU)
if (cpu_class != CPUCLASS_386)
tlbflush();
#endif
-#else
- pmap_update();
-#endif
}
/*
@@ -418,25 +366,15 @@ vmapbuf(bp, len)
vm_size_t len;
{
vm_offset_t faddr, taddr, off;
-#ifdef PMAP_NEW
paddr_t fpa;
-#else
- pt_entry_t *fpte, *tpte;
- pt_entry_t *pmap_pte __P((pmap_t, vm_offset_t));
-#endif
if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
faddr = trunc_page((vaddr_t)(bp->b_saveaddr = bp->b_data));
off = (vm_offset_t)bp->b_data - faddr;
len = round_page(off + len);
-#if defined(UVM)
taddr= uvm_km_valloc_wait(phys_map, len);
-#else
- taddr = kmem_alloc_wait(phys_map, len);
-#endif
bp->b_data = (caddr_t)(taddr + off);
-#ifdef PMAP_NEW
/*
* The region is locked, so we expect that pmap_pte() will return
* non-NULL.
@@ -459,19 +397,6 @@ vmapbuf(bp, len)
taddr += PAGE_SIZE;
len -= PAGE_SIZE;
}
-#else
- /*
- * The region is locked, so we expect that pmap_pte() will return
- * non-NULL.
- */
- fpte = pmap_pte(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map), faddr);
- tpte = pmap_pte(vm_map_pmap(phys_map), taddr);
- do {
- *tpte++ = *fpte++;
- len -= PAGE_SIZE;
- } while (len);
-#endif
-
}
/*
@@ -490,11 +415,7 @@ vunmapbuf(bp, len)
addr = trunc_page((vaddr_t)bp->b_data);
off = (vm_offset_t)bp->b_data - addr;
len = round_page(off + len);
-#if defined(UVM)
uvm_km_free_wakeup(phys_map, addr, len);
-#else
- kmem_free_wakeup(phys_map, addr, len);
-#endif
bp->b_data = bp->b_saveaddr;
bp->b_saveaddr = 0;
}
diff --git a/sys/arch/i386/include/gdt.h b/sys/arch/i386/include/gdt.h
index 8d6144ba8b1..f3e3d7f90d0 100644
--- a/sys/arch/i386/include/gdt.h
+++ b/sys/arch/i386/include/gdt.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: gdt.h,v 1.7 2001/04/30 13:17:37 art Exp $ */
+/* $OpenBSD: gdt.h,v 1.8 2001/05/05 23:25:45 art Exp $ */
/* $NetBSD: gdt.h,v 1.3 1996/02/27 22:32:11 jtc Exp $ */
/*-
@@ -41,11 +41,6 @@
void gdt_init __P((void));
void tss_alloc __P((struct pcb *));
void tss_free __P((struct pcb *));
-#ifdef PMAP_NEW
void ldt_alloc __P((struct pmap *, union descriptor *, size_t));
void ldt_free __P((struct pmap *));
-#else
-void ldt_alloc __P((struct pcb *, union descriptor *, size_t));
-void ldt_free __P((struct pcb *));
-#endif
#endif
diff --git a/sys/arch/i386/include/param.h b/sys/arch/i386/include/param.h
index 98f5a4017ed..227e3ed4831 100644
--- a/sys/arch/i386/include/param.h
+++ b/sys/arch/i386/include/param.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: param.h,v 1.15 2001/05/05 20:56:39 art Exp $ */
+/* $OpenBSD: param.h,v 1.16 2001/05/05 23:25:46 art Exp $ */
/* $NetBSD: param.h,v 1.29 1996/03/04 05:04:26 cgd Exp $ */
/*-
@@ -95,13 +95,7 @@
#define MSGBUFSIZE 2*NBPG /* default message buffer size */
#endif
-#if !defined(UVM) && defined(PMAP_NEW)
-#error PMAP_NEW is not compatible with old VM
-#elif defined(UVM) && !defined(PMAP_NEW) && !defined(PMAP_OLD)
#define PMAP_NEW
-#elif defined(PMAP_NEW) && defined(PMAP_OLD)
-#error Both PMAP_NEW and PMAP_OLD cannot be defined concurrently
-#endif
/*
* Constants related to network buffer management.
diff --git a/sys/arch/i386/include/pmap.h b/sys/arch/i386/include/pmap.h
index de770cb59a6..50b1c0dd352 100644
--- a/sys/arch/i386/include/pmap.h
+++ b/sys/arch/i386/include/pmap.h
@@ -1,5 +1,509 @@
-#ifdef PMAP_NEW
-#include <machine/pmap.new.h>
-#else
-#include <machine/pmap.old.h>
+/* $OpenBSD: pmap.h,v 1.14 2001/05/05 23:25:47 art Exp $ */
+/* $NetBSD: pmap.h,v 1.43 2000/02/11 07:00:13 thorpej Exp $ */
+
+/*
+ *
+ * Copyright (c) 1997 Charles D. Cranor and Washington University.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgment:
+ * This product includes software developed by Charles D. Cranor and
+ * Washington University.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * pmap.h: see pmap.c for the history of this pmap module.
+ */
+
+#ifndef _I386_PMAP_H_
+#define _I386_PMAP_H_
+
+#if defined(_KERNEL) && !defined(_LKM) && defined(__NetBSD__)
+#include "opt_user_ldt.h"
#endif
+
+#include <machine/cpufunc.h>
+#include <machine/pte.h>
+#include <machine/segments.h>
+#include <vm/pglist.h>
+#include <uvm/uvm_object.h>
+
+/*
+ * see pte.h for a description of i386 MMU terminology and hardware
+ * interface.
+ *
+ * a pmap describes a processes' 4GB virtual address space. this
+ * virtual address space can be broken up into 1024 4MB regions which
+ * are described by PDEs in the PDP. the PDEs are defined as follows:
+ *
+ * (ranges are inclusive -> exclusive, just like vm_map_entry start/end)
+ * (the following assumes that KERNBASE is 0xc0000000)
+ *
+ * PDE#s VA range usage
+ * 0->767 0x0 -> 0xbfc00000 user address space, note that the
+ * max user address is 0xbfbfe000
+ * the final two pages in the last 4MB
+ * used to be reserved for the UAREA
+ * but now are no longer used
+ * 768 0xbfc00000-> recursive mapping of PDP (used for
+ * 0xc0000000 linear mapping of PTPs)
+ * 768->1023 0xc0000000-> kernel address space (constant
+ * 0xffc00000 across all pmap's/processes)
+ * 1023 0xffc00000-> "alternate" recursive PDP mapping
+ * <end> (for other pmaps)
+ *
+ *
+ * note: a recursive PDP mapping provides a way to map all the PTEs for
+ * a 4GB address space into a linear chunk of virtual memory. in other
+ * words, the PTE for page 0 is the first int mapped into the 4MB recursive
+ * area. the PTE for page 1 is the second int. the very last int in the
+ * 4MB range is the PTE that maps VA 0xffffe000 (the last page in a 4GB
+ * address).
+ *
+ * all pmap's PD's must have the same values in slots 768->1023 so that
+ * the kernel is always mapped in every process. these values are loaded
+ * into the PD at pmap creation time.
+ *
+ * at any one time only one pmap can be active on a processor. this is
+ * the pmap whose PDP is pointed to by processor register %cr3. this pmap
+ * will have all its PTEs mapped into memory at the recursive mapping
+ * point (slot #767 as show above). when the pmap code wants to find the
+ * PTE for a virtual address, all it has to do is the following:
+ *
+ * address of PTE = (767 * 4MB) + (VA / NBPG) * sizeof(pt_entry_t)
+ * = 0xbfc00000 + (VA / 4096) * 4
+ *
+ * what happens if the pmap layer is asked to perform an operation
+ * on a pmap that is not the one which is currently active? in that
+ * case we take the PA of the PDP of non-active pmap and put it in
+ * slot 1023 of the active pmap. this causes the non-active pmap's
+ * PTEs to get mapped in the final 4MB of the 4GB address space
+ * (e.g. starting at 0xffc00000).
+ *
+ * the following figure shows the effects of the recursive PDP mapping:
+ *
+ * PDP (%cr3)
+ * +----+
+ * | 0| -> PTP#0 that maps VA 0x0 -> 0x400000
+ * | |
+ * | |
+ * | 767| -> points back to PDP (%cr3) mapping VA 0xbfc00000 -> 0xc0000000
+ * | 768| -> first kernel PTP (maps 0xc0000000 -> 0xf0400000)
+ * | |
+ * |1023| -> points to alternate pmap's PDP (maps 0xffc00000 -> end)
+ * +----+
+ *
+ * note that the PDE#767 VA (0xbfc00000) is defined as "PTE_BASE"
+ * note that the PDE#1023 VA (0xffc00000) is defined as "APTE_BASE"
+ *
+ * starting at VA 0xbfc00000 the current active PDP (%cr3) acts as a
+ * PTP:
+ *
+ * PTP#767 == PDP(%cr3) => maps VA 0xbfc00000 -> 0xc0000000
+ * +----+
+ * | 0| -> maps the contents of PTP#0 at VA 0xbfc00000->0xbfc01000
+ * | |
+ * | |
+ * | 767| -> maps contents of PTP#767 (the PDP) at VA 0xbffbf000
+ * | 768| -> maps contents of first kernel PTP
+ * | |
+ * |1023|
+ * +----+
+ *
+ * note that mapping of the PDP at PTP#959's VA (0xeffbf000) is
+ * defined as "PDP_BASE".... within that mapping there are two
+ * defines:
+ * "PDP_PDE" (0xeffbfefc) is the VA of the PDE in the PDP
+ * which points back to itself.
+ * "APDP_PDE" (0xeffbfffc) is the VA of the PDE in the PDP which
+ * establishes the recursive mapping of the alternate pmap.
+ * to set the alternate PDP, one just has to put the correct
+ * PA info in *APDP_PDE.
+ *
+ * note that in the APTE_BASE space, the APDP appears at VA
+ * "APDP_BASE" (0xfffff000).
+ */
+
+/*
+ * the following defines identify the slots used as described above.
+ */
+
+#define PDSLOT_PTE ((KERNBASE/NBPD)-1) /* 767: for recursive PDP map */
+#define PDSLOT_KERN (KERNBASE/NBPD) /* 768: start of kernel space */
+#define PDSLOT_APTE ((unsigned)1023) /* 1023: alternative recursive slot */
+
+/*
+ * the following defines give the virtual addresses of various MMU
+ * data structures:
+ * PTE_BASE and APTE_BASE: the base VA of the linear PTE mappings
+ * PTD_BASE and APTD_BASE: the base VA of the recursive mapping of the PTD
+ * PDP_PDE and APDP_PDE: the VA of the PDE that points back to the PDP/APDP
+ */
+
+#define PTE_BASE ((pt_entry_t *) (PDSLOT_PTE * NBPD) )
+#define APTE_BASE ((pt_entry_t *) (PDSLOT_APTE * NBPD) )
+#define PDP_BASE ((pd_entry_t *)(((char *)PTE_BASE) + (PDSLOT_PTE * NBPG)))
+#define APDP_BASE ((pd_entry_t *)(((char *)APTE_BASE) + (PDSLOT_APTE * NBPG)))
+#define PDP_PDE (PDP_BASE + PDSLOT_PTE)
+#define APDP_PDE (PDP_BASE + PDSLOT_APTE)
+
+/*
+ * XXXCDC: tmp xlate from old names:
+ * PTDPTDI -> PDSLOT_PTE
+ * KPTDI -> PDSLOT_KERN
+ * APTDPTDI -> PDSLOT_APTE
+ */
+
+/*
+ * the follow define determines how many PTPs should be set up for the
+ * kernel by locore.s at boot time. this should be large enough to
+ * get the VM system running. once the VM system is running, the
+ * pmap module can add more PTPs to the kernel area on demand.
+ */
+
+#ifndef NKPTP
+#define NKPTP 4 /* 16MB to start */
+#endif
+#define NKPTP_MIN 4 /* smallest value we allow */
+#define NKPTP_MAX (1024 - (KERNBASE/NBPD) - 1)
+ /* largest value (-1 for APTP space) */
+
+/*
+ * various address macros
+ *
+ * vtopte: return a pointer to the PTE mapping a VA
+ * kvtopte: same as above (takes a KVA, but doesn't matter with this pmap)
+ * ptetov: given a pointer to a PTE, return the VA that it maps
+ * vtophys: translate a VA to the PA mapped to it
+ *
+ * plus alternative versions of the above
+ */
+
+#define vtopte(VA) (PTE_BASE + i386_btop(VA))
+#define kvtopte(VA) vtopte(VA)
+#define ptetov(PT) (i386_ptob(PT - PTE_BASE))
+#define vtophys(VA) ((*vtopte(VA) & PG_FRAME) | \
+ ((unsigned)(VA) & ~PG_FRAME))
+#define avtopte(VA) (APTE_BASE + i386_btop(VA))
+#define ptetoav(PT) (i386_ptob(PT - APTE_BASE))
+#define avtophys(VA) ((*avtopte(VA) & PG_FRAME) | \
+ ((unsigned)(VA) & ~PG_FRAME))
+
+/*
+ * pdei/ptei: generate index into PDP/PTP from a VA
+ */
+#define pdei(VA) (((VA) & PD_MASK) >> PDSHIFT)
+#define ptei(VA) (((VA) & PT_MASK) >> PGSHIFT)
+
+/*
+ * PTP macros:
+ * a PTP's index is the PD index of the PDE that points to it
+ * a PTP's offset is the byte-offset in the PTE space that this PTP is at
+ * a PTP's VA is the first VA mapped by that PTP
+ *
+ * note that NBPG == number of bytes in a PTP (4096 bytes == 1024 entries)
+ * NBPD == number of bytes a PTP can map (4MB)
+ */
+
+#define ptp_i2o(I) ((I) * NBPG) /* index => offset */
+#define ptp_o2i(O) ((O) / NBPG) /* offset => index */
+#define ptp_i2v(I) ((I) * NBPD) /* index => VA */
+#define ptp_v2i(V) ((V) / NBPD) /* VA => index (same as pdei) */
+
+/*
+ * PG_AVAIL usage: we make use of the ignored bits of the PTE
+ */
+
+#define PG_W PG_AVAIL1 /* "wired" mapping */
+#define PG_PVLIST PG_AVAIL2 /* mapping has entry on pvlist */
+/* PG_AVAIL3 not used */
+
+#ifdef _KERNEL
+/*
+ * pmap data structures: see pmap.c for details of locking.
+ */
+
+struct pmap;
+typedef struct pmap *pmap_t;
+
+/*
+ * we maintain a list of all non-kernel pmaps
+ */
+
+LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
+
+/*
+ * the pmap structure
+ *
+ * note that the pm_obj contains the simple_lock, the reference count,
+ * page list, and number of PTPs within the pmap.
+ */
+
+struct pmap {
+ struct uvm_object pm_obj; /* object (lck by object lock) */
+#define pm_lock pm_obj.vmobjlock
+ LIST_ENTRY(pmap) pm_list; /* list (lck by pm_list lock) */
+ pd_entry_t *pm_pdir; /* VA of PD (lck by object lock) */
+ u_int32_t pm_pdirpa; /* PA of PD (read-only after create) */
+ struct vm_page *pm_ptphint; /* pointer to a PTP in our pmap */
+ struct pmap_statistics pm_stats; /* pmap stats (lck by object lock) */
+
+ int pm_flags; /* see below */
+
+ union descriptor *pm_ldt; /* user-set LDT */
+ int pm_ldt_len; /* number of LDT entries */
+ int pm_ldt_sel; /* LDT selector */
+};
+
+/* pm_flags */
+#define PMF_USER_LDT 0x01 /* pmap has user-set LDT */
+
+/*
+ * for each managed physical page we maintain a list of <PMAP,VA>'s
+ * which it is mapped at. the list is headed by a pv_head structure.
+ * there is one pv_head per managed phys page (allocated at boot time).
+ * the pv_head structure points to a list of pv_entry structures (each
+ * describes one mapping).
+ */
+
+struct pv_entry;
+
+struct pv_head {
+ simple_lock_data_t pvh_lock; /* locks every pv on this list */
+ struct pv_entry *pvh_list; /* head of list (locked by pvh_lock) */
+};
+
+struct pv_entry { /* locked by its list's pvh_lock */
+ struct pv_entry *pv_next; /* next entry */
+ struct pmap *pv_pmap; /* the pmap */
+ vaddr_t pv_va; /* the virtual address */
+ struct vm_page *pv_ptp; /* the vm_page of the PTP */
+};
+
+/*
+ * pv_entrys are dynamically allocated in chunks from a single page.
+ * we keep track of how many pv_entrys are in use for each page and
+ * we can free pv_entry pages if needed. there is one lock for the
+ * entire allocation system.
+ */
+
+struct pv_page_info {
+ TAILQ_ENTRY(pv_page) pvpi_list;
+ struct pv_entry *pvpi_pvfree;
+ int pvpi_nfree;
+};
+
+/*
+ * number of pv_entry's in a pv_page
+ * (note: won't work on systems where NPBG isn't a constant)
+ */
+
+#define PVE_PER_PVPAGE ((NBPG - sizeof(struct pv_page_info)) / \
+ sizeof(struct pv_entry))
+
+/*
+ * a pv_page: where pv_entrys are allocated from
+ */
+
+struct pv_page {
+ struct pv_page_info pvinfo;
+ struct pv_entry pvents[PVE_PER_PVPAGE];
+};
+
+/*
+ * pmap_remove_record: a record of VAs that have been unmapped, used to
+ * flush TLB. if we have more than PMAP_RR_MAX then we stop recording.
+ */
+
+#define PMAP_RR_MAX 16 /* max of 16 pages (64K) */
+
+struct pmap_remove_record {
+ int prr_npages;
+ vaddr_t prr_vas[PMAP_RR_MAX];
+};
+
+/*
+ * pmap_transfer_location: used to pass the current location in the
+ * pmap between pmap_transfer and pmap_transfer_ptes [e.g. during
+ * a pmap_copy].
+ */
+
+struct pmap_transfer_location {
+ vaddr_t addr; /* the address (page-aligned) */
+ pt_entry_t *pte; /* the PTE that maps address */
+ struct vm_page *ptp; /* the PTP that the PTE lives in */
+};
+
+/*
+ * global kernel variables
+ */
+
+extern pd_entry_t PTD[];
+
+/* PTDpaddr: is the physical address of the kernel's PDP */
+extern u_long PTDpaddr;
+
+extern struct pmap kernel_pmap_store; /* kernel pmap */
+extern int nkpde; /* current # of PDEs for kernel */
+extern int pmap_pg_g; /* do we support PG_G? */
+
+/*
+ * macros
+ */
+
+#define pmap_kernel() (&kernel_pmap_store)
+#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
+#define pmap_update() tlbflush()
+
+#define pmap_clear_modify(pg) pmap_change_attrs(pg, 0, PG_M)
+#define pmap_clear_reference(pg) pmap_change_attrs(pg, 0, PG_U)
+#define pmap_copy(DP,SP,D,L,S) pmap_transfer(DP,SP,D,L,S, FALSE)
+#define pmap_is_modified(pg) pmap_test_attrs(pg, PG_M)
+#define pmap_is_referenced(pg) pmap_test_attrs(pg, PG_U)
+#define pmap_move(DP,SP,D,L,S) pmap_transfer(DP,SP,D,L,S, TRUE)
+#define pmap_phys_address(ppn) i386_ptob(ppn)
+#define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */
+
+
+/*
+ * prototypes
+ */
+
+void pmap_activate __P((struct proc *));
+void pmap_bootstrap __P((vaddr_t));
+boolean_t pmap_change_attrs __P((struct vm_page *, int, int));
+void pmap_deactivate __P((struct proc *));
+static void pmap_page_protect __P((struct vm_page *, vm_prot_t));
+void pmap_page_remove __P((struct vm_page *));
+static void pmap_protect __P((struct pmap *, vaddr_t,
+ vaddr_t, vm_prot_t));
+void pmap_remove __P((struct pmap *, vaddr_t, vaddr_t));
+boolean_t pmap_test_attrs __P((struct vm_page *, int));
+void pmap_transfer __P((struct pmap *, struct pmap *, vaddr_t,
+ vsize_t, vaddr_t, boolean_t));
+static void pmap_update_pg __P((vaddr_t));
+static void pmap_update_2pg __P((vaddr_t,vaddr_t));
+void pmap_write_protect __P((struct pmap *, vaddr_t,
+ vaddr_t, vm_prot_t));
+
+vaddr_t reserve_dumppages __P((vaddr_t)); /* XXX: not a pmap fn */
+
+#define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
+
+/*
+ * inline functions
+ */
+
+/*
+ * pmap_update_pg: flush one page from the TLB (or flush the whole thing
+ * if hardware doesn't support one-page flushing)
+ */
+
+__inline static void
+pmap_update_pg(va)
+ vaddr_t va;
+{
+#if defined(I386_CPU)
+ if (cpu_class == CPUCLASS_386)
+ pmap_update();
+ else
+#endif
+ invlpg((u_int) va);
+}
+
+/*
+ * pmap_update_2pg: flush two pages from the TLB
+ */
+
+__inline static void
+pmap_update_2pg(va, vb)
+ vaddr_t va, vb;
+{
+#if defined(I386_CPU)
+ if (cpu_class == CPUCLASS_386)
+ pmap_update();
+ else
+#endif
+ {
+ invlpg((u_int) va);
+ invlpg((u_int) vb);
+ }
+}
+
+/*
+ * pmap_page_protect: change the protection of all recorded mappings
+ * of a managed page
+ *
+ * => this function is a frontend for pmap_page_remove/pmap_change_attrs
+ * => we only have to worry about making the page more protected.
+ * unprotecting a page is done on-demand at fault time.
+ */
+
+__inline static void
+pmap_page_protect(pg, prot)
+ struct vm_page *pg;
+ vm_prot_t prot;
+{
+ if ((prot & VM_PROT_WRITE) == 0) {
+ if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
+ (void) pmap_change_attrs(pg, PG_RO, PG_RW);
+ } else {
+ pmap_page_remove(pg);
+ }
+ }
+}
+
+/*
+ * pmap_protect: change the protection of pages in a pmap
+ *
+ * => this function is a frontend for pmap_remove/pmap_write_protect
+ * => we only have to worry about making the page more protected.
+ * unprotecting a page is done on-demand at fault time.
+ */
+
+__inline static void
+pmap_protect(pmap, sva, eva, prot)
+ struct pmap *pmap;
+ vaddr_t sva, eva;
+ vm_prot_t prot;
+{
+ if ((prot & VM_PROT_WRITE) == 0) {
+ if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
+ pmap_write_protect(pmap, sva, eva, prot);
+ } else {
+ pmap_remove(pmap, sva, eva);
+ }
+ }
+}
+
+vaddr_t pmap_map __P((vaddr_t, paddr_t, paddr_t, vm_prot_t));
+
+#if defined(USER_LDT)
+void pmap_ldt_cleanup __P((struct proc *));
+#define PMAP_FORK
+#endif /* USER_LDT */
+
+#endif /* _KERNEL */
+#endif /* _I386_PMAP_H_ */
diff --git a/sys/arch/i386/include/pmap.new.h b/sys/arch/i386/include/pmap.new.h
deleted file mode 100644
index 712d418dd33..00000000000
--- a/sys/arch/i386/include/pmap.new.h
+++ /dev/null
@@ -1,509 +0,0 @@
-/* $OpenBSD: pmap.new.h,v 1.1 2001/03/22 23:36:52 niklas Exp $ */
-/* $NetBSD: pmap.h,v 1.43 2000/02/11 07:00:13 thorpej Exp $ */
-
-/*
- *
- * Copyright (c) 1997 Charles D. Cranor and Washington University.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgment:
- * This product includes software developed by Charles D. Cranor and
- * Washington University.
- * 4. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * pmap.h: see pmap.c for the history of this pmap module.
- */
-
-#ifndef _I386_PMAP_H_
-#define _I386_PMAP_H_
-
-#if defined(_KERNEL) && !defined(_LKM) && defined(__NetBSD__)
-#include "opt_user_ldt.h"
-#endif
-
-#include <machine/cpufunc.h>
-#include <machine/pte.h>
-#include <machine/segments.h>
-#include <vm/pglist.h>
-#include <uvm/uvm_object.h>
-
-/*
- * see pte.h for a description of i386 MMU terminology and hardware
- * interface.
- *
- * a pmap describes a processes' 4GB virtual address space. this
- * virtual address space can be broken up into 1024 4MB regions which
- * are described by PDEs in the PDP. the PDEs are defined as follows:
- *
- * (ranges are inclusive -> exclusive, just like vm_map_entry start/end)
- * (the following assumes that KERNBASE is 0xc0000000)
- *
- * PDE#s VA range usage
- * 0->767 0x0 -> 0xbfc00000 user address space, note that the
- * max user address is 0xbfbfe000
- * the final two pages in the last 4MB
- * used to be reserved for the UAREA
- * but now are no longer used
- * 768 0xbfc00000-> recursive mapping of PDP (used for
- * 0xc0000000 linear mapping of PTPs)
- * 768->1023 0xc0000000-> kernel address space (constant
- * 0xffc00000 across all pmap's/processes)
- * 1023 0xffc00000-> "alternate" recursive PDP mapping
- * <end> (for other pmaps)
- *
- *
- * note: a recursive PDP mapping provides a way to map all the PTEs for
- * a 4GB address space into a linear chunk of virtual memory. in other
- * words, the PTE for page 0 is the first int mapped into the 4MB recursive
- * area. the PTE for page 1 is the second int. the very last int in the
- * 4MB range is the PTE that maps VA 0xffffe000 (the last page in a 4GB
- * address).
- *
- * all pmap's PD's must have the same values in slots 768->1023 so that
- * the kernel is always mapped in every process. these values are loaded
- * into the PD at pmap creation time.
- *
- * at any one time only one pmap can be active on a processor. this is
- * the pmap whose PDP is pointed to by processor register %cr3. this pmap
- * will have all its PTEs mapped into memory at the recursive mapping
- * point (slot #767 as show above). when the pmap code wants to find the
- * PTE for a virtual address, all it has to do is the following:
- *
- * address of PTE = (767 * 4MB) + (VA / NBPG) * sizeof(pt_entry_t)
- * = 0xbfc00000 + (VA / 4096) * 4
- *
- * what happens if the pmap layer is asked to perform an operation
- * on a pmap that is not the one which is currently active? in that
- * case we take the PA of the PDP of non-active pmap and put it in
- * slot 1023 of the active pmap. this causes the non-active pmap's
- * PTEs to get mapped in the final 4MB of the 4GB address space
- * (e.g. starting at 0xffc00000).
- *
- * the following figure shows the effects of the recursive PDP mapping:
- *
- * PDP (%cr3)
- * +----+
- * | 0| -> PTP#0 that maps VA 0x0 -> 0x400000
- * | |
- * | |
- * | 767| -> points back to PDP (%cr3) mapping VA 0xbfc00000 -> 0xc0000000
- * | 768| -> first kernel PTP (maps 0xc0000000 -> 0xf0400000)
- * | |
- * |1023| -> points to alternate pmap's PDP (maps 0xffc00000 -> end)
- * +----+
- *
- * note that the PDE#767 VA (0xbfc00000) is defined as "PTE_BASE"
- * note that the PDE#1023 VA (0xffc00000) is defined as "APTE_BASE"
- *
- * starting at VA 0xbfc00000 the current active PDP (%cr3) acts as a
- * PTP:
- *
- * PTP#767 == PDP(%cr3) => maps VA 0xbfc00000 -> 0xc0000000
- * +----+
- * | 0| -> maps the contents of PTP#0 at VA 0xbfc00000->0xbfc01000
- * | |
- * | |
- * | 767| -> maps contents of PTP#767 (the PDP) at VA 0xbffbf000
- * | 768| -> maps contents of first kernel PTP
- * | |
- * |1023|
- * +----+
- *
- * note that mapping of the PDP at PTP#959's VA (0xeffbf000) is
- * defined as "PDP_BASE".... within that mapping there are two
- * defines:
- * "PDP_PDE" (0xeffbfefc) is the VA of the PDE in the PDP
- * which points back to itself.
- * "APDP_PDE" (0xeffbfffc) is the VA of the PDE in the PDP which
- * establishes the recursive mapping of the alternate pmap.
- * to set the alternate PDP, one just has to put the correct
- * PA info in *APDP_PDE.
- *
- * note that in the APTE_BASE space, the APDP appears at VA
- * "APDP_BASE" (0xfffff000).
- */
-
-/*
- * the following defines identify the slots used as described above.
- */
-
-#define PDSLOT_PTE ((KERNBASE/NBPD)-1) /* 767: for recursive PDP map */
-#define PDSLOT_KERN (KERNBASE/NBPD) /* 768: start of kernel space */
-#define PDSLOT_APTE ((unsigned)1023) /* 1023: alternative recursive slot */
-
-/*
- * the following defines give the virtual addresses of various MMU
- * data structures:
- * PTE_BASE and APTE_BASE: the base VA of the linear PTE mappings
- * PTD_BASE and APTD_BASE: the base VA of the recursive mapping of the PTD
- * PDP_PDE and APDP_PDE: the VA of the PDE that points back to the PDP/APDP
- */
-
-#define PTE_BASE ((pt_entry_t *) (PDSLOT_PTE * NBPD) )
-#define APTE_BASE ((pt_entry_t *) (PDSLOT_APTE * NBPD) )
-#define PDP_BASE ((pd_entry_t *)(((char *)PTE_BASE) + (PDSLOT_PTE * NBPG)))
-#define APDP_BASE ((pd_entry_t *)(((char *)APTE_BASE) + (PDSLOT_APTE * NBPG)))
-#define PDP_PDE (PDP_BASE + PDSLOT_PTE)
-#define APDP_PDE (PDP_BASE + PDSLOT_APTE)
-
-/*
- * XXXCDC: tmp xlate from old names:
- * PTDPTDI -> PDSLOT_PTE
- * KPTDI -> PDSLOT_KERN
- * APTDPTDI -> PDSLOT_APTE
- */
-
-/*
- * the follow define determines how many PTPs should be set up for the
- * kernel by locore.s at boot time. this should be large enough to
- * get the VM system running. once the VM system is running, the
- * pmap module can add more PTPs to the kernel area on demand.
- */
-
-#ifndef NKPTP
-#define NKPTP 4 /* 16MB to start */
-#endif
-#define NKPTP_MIN 4 /* smallest value we allow */
-#define NKPTP_MAX (1024 - (KERNBASE/NBPD) - 1)
- /* largest value (-1 for APTP space) */
-
-/*
- * various address macros
- *
- * vtopte: return a pointer to the PTE mapping a VA
- * kvtopte: same as above (takes a KVA, but doesn't matter with this pmap)
- * ptetov: given a pointer to a PTE, return the VA that it maps
- * vtophys: translate a VA to the PA mapped to it
- *
- * plus alternative versions of the above
- */
-
-#define vtopte(VA) (PTE_BASE + i386_btop(VA))
-#define kvtopte(VA) vtopte(VA)
-#define ptetov(PT) (i386_ptob(PT - PTE_BASE))
-#define vtophys(VA) ((*vtopte(VA) & PG_FRAME) | \
- ((unsigned)(VA) & ~PG_FRAME))
-#define avtopte(VA) (APTE_BASE + i386_btop(VA))
-#define ptetoav(PT) (i386_ptob(PT - APTE_BASE))
-#define avtophys(VA) ((*avtopte(VA) & PG_FRAME) | \
- ((unsigned)(VA) & ~PG_FRAME))
-
-/*
- * pdei/ptei: generate index into PDP/PTP from a VA
- */
-#define pdei(VA) (((VA) & PD_MASK) >> PDSHIFT)
-#define ptei(VA) (((VA) & PT_MASK) >> PGSHIFT)
-
-/*
- * PTP macros:
- * a PTP's index is the PD index of the PDE that points to it
- * a PTP's offset is the byte-offset in the PTE space that this PTP is at
- * a PTP's VA is the first VA mapped by that PTP
- *
- * note that NBPG == number of bytes in a PTP (4096 bytes == 1024 entries)
- * NBPD == number of bytes a PTP can map (4MB)
- */
-
-#define ptp_i2o(I) ((I) * NBPG) /* index => offset */
-#define ptp_o2i(O) ((O) / NBPG) /* offset => index */
-#define ptp_i2v(I) ((I) * NBPD) /* index => VA */
-#define ptp_v2i(V) ((V) / NBPD) /* VA => index (same as pdei) */
-
-/*
- * PG_AVAIL usage: we make use of the ignored bits of the PTE
- */
-
-#define PG_W PG_AVAIL1 /* "wired" mapping */
-#define PG_PVLIST PG_AVAIL2 /* mapping has entry on pvlist */
-/* PG_AVAIL3 not used */
-
-#ifdef _KERNEL
-/*
- * pmap data structures: see pmap.c for details of locking.
- */
-
-struct pmap;
-typedef struct pmap *pmap_t;
-
-/*
- * we maintain a list of all non-kernel pmaps
- */
-
-LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
-
-/*
- * the pmap structure
- *
- * note that the pm_obj contains the simple_lock, the reference count,
- * page list, and number of PTPs within the pmap.
- */
-
-struct pmap {
- struct uvm_object pm_obj; /* object (lck by object lock) */
-#define pm_lock pm_obj.vmobjlock
- LIST_ENTRY(pmap) pm_list; /* list (lck by pm_list lock) */
- pd_entry_t *pm_pdir; /* VA of PD (lck by object lock) */
- u_int32_t pm_pdirpa; /* PA of PD (read-only after create) */
- struct vm_page *pm_ptphint; /* pointer to a PTP in our pmap */
- struct pmap_statistics pm_stats; /* pmap stats (lck by object lock) */
-
- int pm_flags; /* see below */
-
- union descriptor *pm_ldt; /* user-set LDT */
- int pm_ldt_len; /* number of LDT entries */
- int pm_ldt_sel; /* LDT selector */
-};
-
-/* pm_flags */
-#define PMF_USER_LDT 0x01 /* pmap has user-set LDT */
-
-/*
- * for each managed physical page we maintain a list of <PMAP,VA>'s
- * which it is mapped at. the list is headed by a pv_head structure.
- * there is one pv_head per managed phys page (allocated at boot time).
- * the pv_head structure points to a list of pv_entry structures (each
- * describes one mapping).
- */
-
-struct pv_entry;
-
-struct pv_head {
- simple_lock_data_t pvh_lock; /* locks every pv on this list */
- struct pv_entry *pvh_list; /* head of list (locked by pvh_lock) */
-};
-
-struct pv_entry { /* locked by its list's pvh_lock */
- struct pv_entry *pv_next; /* next entry */
- struct pmap *pv_pmap; /* the pmap */
- vaddr_t pv_va; /* the virtual address */
- struct vm_page *pv_ptp; /* the vm_page of the PTP */
-};
-
-/*
- * pv_entrys are dynamically allocated in chunks from a single page.
- * we keep track of how many pv_entrys are in use for each page and
- * we can free pv_entry pages if needed. there is one lock for the
- * entire allocation system.
- */
-
-struct pv_page_info {
- TAILQ_ENTRY(pv_page) pvpi_list;
- struct pv_entry *pvpi_pvfree;
- int pvpi_nfree;
-};
-
-/*
- * number of pv_entry's in a pv_page
- * (note: won't work on systems where NPBG isn't a constant)
- */
-
-#define PVE_PER_PVPAGE ((NBPG - sizeof(struct pv_page_info)) / \
- sizeof(struct pv_entry))
-
-/*
- * a pv_page: where pv_entrys are allocated from
- */
-
-struct pv_page {
- struct pv_page_info pvinfo;
- struct pv_entry pvents[PVE_PER_PVPAGE];
-};
-
-/*
- * pmap_remove_record: a record of VAs that have been unmapped, used to
- * flush TLB. if we have more than PMAP_RR_MAX then we stop recording.
- */
-
-#define PMAP_RR_MAX 16 /* max of 16 pages (64K) */
-
-struct pmap_remove_record {
- int prr_npages;
- vaddr_t prr_vas[PMAP_RR_MAX];
-};
-
-/*
- * pmap_transfer_location: used to pass the current location in the
- * pmap between pmap_transfer and pmap_transfer_ptes [e.g. during
- * a pmap_copy].
- */
-
-struct pmap_transfer_location {
- vaddr_t addr; /* the address (page-aligned) */
- pt_entry_t *pte; /* the PTE that maps address */
- struct vm_page *ptp; /* the PTP that the PTE lives in */
-};
-
-/*
- * global kernel variables
- */
-
-extern pd_entry_t PTD[];
-
-/* PTDpaddr: is the physical address of the kernel's PDP */
-extern u_long PTDpaddr;
-
-extern struct pmap kernel_pmap_store; /* kernel pmap */
-extern int nkpde; /* current # of PDEs for kernel */
-extern int pmap_pg_g; /* do we support PG_G? */
-
-/*
- * macros
- */
-
-#define pmap_kernel() (&kernel_pmap_store)
-#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
-#define pmap_update() tlbflush()
-
-#define pmap_clear_modify(pg) pmap_change_attrs(pg, 0, PG_M)
-#define pmap_clear_reference(pg) pmap_change_attrs(pg, 0, PG_U)
-#define pmap_copy(DP,SP,D,L,S) pmap_transfer(DP,SP,D,L,S, FALSE)
-#define pmap_is_modified(pg) pmap_test_attrs(pg, PG_M)
-#define pmap_is_referenced(pg) pmap_test_attrs(pg, PG_U)
-#define pmap_move(DP,SP,D,L,S) pmap_transfer(DP,SP,D,L,S, TRUE)
-#define pmap_phys_address(ppn) i386_ptob(ppn)
-#define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */
-
-
-/*
- * prototypes
- */
-
-void pmap_activate __P((struct proc *));
-void pmap_bootstrap __P((vaddr_t));
-boolean_t pmap_change_attrs __P((struct vm_page *, int, int));
-void pmap_deactivate __P((struct proc *));
-static void pmap_page_protect __P((struct vm_page *, vm_prot_t));
-void pmap_page_remove __P((struct vm_page *));
-static void pmap_protect __P((struct pmap *, vaddr_t,
- vaddr_t, vm_prot_t));
-void pmap_remove __P((struct pmap *, vaddr_t, vaddr_t));
-boolean_t pmap_test_attrs __P((struct vm_page *, int));
-void pmap_transfer __P((struct pmap *, struct pmap *, vaddr_t,
- vsize_t, vaddr_t, boolean_t));
-static void pmap_update_pg __P((vaddr_t));
-static void pmap_update_2pg __P((vaddr_t,vaddr_t));
-void pmap_write_protect __P((struct pmap *, vaddr_t,
- vaddr_t, vm_prot_t));
-
-vaddr_t reserve_dumppages __P((vaddr_t)); /* XXX: not a pmap fn */
-
-#define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
-
-/*
- * inline functions
- */
-
-/*
- * pmap_update_pg: flush one page from the TLB (or flush the whole thing
- * if hardware doesn't support one-page flushing)
- */
-
-__inline static void
-pmap_update_pg(va)
- vaddr_t va;
-{
-#if defined(I386_CPU)
- if (cpu_class == CPUCLASS_386)
- pmap_update();
- else
-#endif
- invlpg((u_int) va);
-}
-
-/*
- * pmap_update_2pg: flush two pages from the TLB
- */
-
-__inline static void
-pmap_update_2pg(va, vb)
- vaddr_t va, vb;
-{
-#if defined(I386_CPU)
- if (cpu_class == CPUCLASS_386)
- pmap_update();
- else
-#endif
- {
- invlpg((u_int) va);
- invlpg((u_int) vb);
- }
-}
-
-/*
- * pmap_page_protect: change the protection of all recorded mappings
- * of a managed page
- *
- * => this function is a frontend for pmap_page_remove/pmap_change_attrs
- * => we only have to worry about making the page more protected.
- * unprotecting a page is done on-demand at fault time.
- */
-
-__inline static void
-pmap_page_protect(pg, prot)
- struct vm_page *pg;
- vm_prot_t prot;
-{
- if ((prot & VM_PROT_WRITE) == 0) {
- if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
- (void) pmap_change_attrs(pg, PG_RO, PG_RW);
- } else {
- pmap_page_remove(pg);
- }
- }
-}
-
-/*
- * pmap_protect: change the protection of pages in a pmap
- *
- * => this function is a frontend for pmap_remove/pmap_write_protect
- * => we only have to worry about making the page more protected.
- * unprotecting a page is done on-demand at fault time.
- */
-
-__inline static void
-pmap_protect(pmap, sva, eva, prot)
- struct pmap *pmap;
- vaddr_t sva, eva;
- vm_prot_t prot;
-{
- if ((prot & VM_PROT_WRITE) == 0) {
- if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
- pmap_write_protect(pmap, sva, eva, prot);
- } else {
- pmap_remove(pmap, sva, eva);
- }
- }
-}
-
-vaddr_t pmap_map __P((vaddr_t, paddr_t, paddr_t, vm_prot_t));
-
-#if defined(USER_LDT)
-void pmap_ldt_cleanup __P((struct proc *));
-#define PMAP_FORK
-#endif /* USER_LDT */
-
-#endif /* _KERNEL */
-#endif /* _I386_PMAP_H_ */
diff --git a/sys/arch/i386/include/pmap.old.h b/sys/arch/i386/include/pmap.old.h
deleted file mode 100644
index 2965a1258fb..00000000000
--- a/sys/arch/i386/include/pmap.old.h
+++ /dev/null
@@ -1,210 +0,0 @@
-/* $OpenBSD: pmap.old.h,v 1.12 1999/09/20 02:47:43 deraadt Exp $ */
-/* $NetBSD: pmap.h,v 1.23 1996/05/03 19:26:30 christos Exp $ */
-
-/*
- * Copyright (c) 1995 Charles M. Hannum. All rights reserved.
- * Copyright (c) 1991 Regents of the University of California.
- * All rights reserved.
- *
- * This code is derived from software contributed to Berkeley by
- * the Systems Programming Group of the University of Utah Computer
- * Science Department and William Jolitz of UUNET Technologies Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
- * 4. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * @(#)pmap.h 7.4 (Berkeley) 5/12/91
- */
-
-/*
- * Derived from hp300 version by Mike Hibler, this version by William
- * Jolitz uses a recursive map [a pde points to the page directory] to
- * map the page tables using the pagetables themselves. This is done to
- * reduce the impact on kernel virtual memory for lots of sparse address
- * space, and to reduce the cost of memory to each process.
- *
- * from hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
- */
-
-#ifndef _I386_PMAP_H_
-#define _I386_PMAP_H_
-
-#include <machine/cpufunc.h>
-#include <machine/pte.h>
-
-/*
- * 386 page table entry and page table directory
- * W.Jolitz, 8/89
- */
-
-/*
- * One page directory, shared between
- * kernel and user modes.
- */
-#define KPTDI (KERNBASE>>22) /* start of kernel virtual pde's */
-#define PTDPTDI (KPTDI-1) /* ptd entry that points to ptd! */
-#define APTDPTDI 0x3ff /* start of alternate page directory */
-#define MAXKPDE (APTDPTDI-KPTDI)
-#ifndef NKPDE /* permit config file override */
-#define NKPDE 127 /* # to static alloc */
-#endif
-
-/*
- * Address of current and alternate address space page table maps
- * and directories.
- */
-#ifdef _KERNEL
-extern pt_entry_t PTmap[], APTmap[], Upte;
-extern pd_entry_t PTD[], APTD[], PTDpde, APTDpde, Upde;
-extern pt_entry_t *Sysmap;
-
-extern int PTDpaddr; /* physical address of kernel PTD */
-
-void pmap_bootstrap __P((vm_offset_t start));
-boolean_t pmap_testbit __P((vm_offset_t, int));
-void pmap_changebit __P((vm_offset_t, int, int));
-void pmap_prefault __P((vm_map_t, vm_offset_t, vm_size_t));
-#endif
-
-/*
- * virtual address to page table entry and
- * to physical address. Likewise for alternate address space.
- * Note: these work recursively, thus vtopte of a pte will give
- * the corresponding pde that in turn maps it.
- */
-#define vtopte(va) (PTmap + i386_btop(va))
-#define kvtopte(va) vtopte(va)
-#define ptetov(pt) (i386_ptob(pt - PTmap))
-#define vtophys(va) \
- ((*vtopte(va) & PG_FRAME) | ((unsigned)(va) & ~PG_FRAME))
-
-#define avtopte(va) (APTmap + i386_btop(va))
-#define ptetoav(pt) (i386_ptob(pt - APTmap))
-#define avtophys(va) \
- ((*avtopte(va) & PG_FRAME) | ((unsigned)(va) & ~PG_FRAME))
-
-/*
- * macros to generate page directory/table indicies
- */
-#define pdei(va) (((va) & PD_MASK) >> PDSHIFT)
-#define ptei(va) (((va) & PT_MASK) >> PGSHIFT)
-
-/*
- * Pmap stuff
- */
-typedef struct pmap {
- pd_entry_t *pm_pdir; /* KVA of page directory */
- boolean_t pm_pdchanged; /* pdir changed */
- short pm_dref; /* page directory ref count */
- short pm_count; /* pmap reference count */
- simple_lock_data_t pm_lock; /* lock on pmap */
- struct pmap_statistics pm_stats; /* pmap statistics */
- long pm_ptpages; /* more stats: PT pages */
-} *pmap_t;
-
-/*
- * For each vm_page_t, there is a list of all currently valid virtual
- * mappings of that page. An entry is a pv_entry, the list is pv_table.
- */
-struct pv_entry {
- struct pv_entry *pv_next; /* next pv_entry */
- pmap_t pv_pmap; /* pmap where mapping lies */
- vm_offset_t pv_va; /* virtual address for mapping */
-};
-
-struct pv_page;
-
-struct pv_page_info {
- TAILQ_ENTRY(pv_page) pgi_list;
- struct pv_entry *pgi_freelist;
- int pgi_nfree;
-};
-
-/*
- * This is basically:
- * ((NBPG - sizeof(struct pv_page_info)) / sizeof(struct pv_entry))
- */
-#define NPVPPG 340
-
-struct pv_page {
- struct pv_page_info pvp_pgi;
- struct pv_entry pvp_pv[NPVPPG];
-};
-
-#ifdef _KERNEL
-extern struct pmap kernel_pmap_store;
-
-#define pmap_kernel() (&kernel_pmap_store)
-#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
-#define pmap_update() tlbflush()
-
-vm_offset_t reserve_dumppages __P((vm_offset_t));
-
-static __inline void
-pmap_clear_modify(vm_offset_t pa)
-{
- pmap_changebit(pa, 0, ~PG_M);
-}
-
-static __inline void
-pmap_clear_reference(vm_offset_t pa)
-{
- pmap_changebit(pa, 0, ~PG_U);
-}
-
-static __inline void
-pmap_copy_on_write(vm_offset_t pa)
-{
- pmap_changebit(pa, PG_RO, ~PG_RW);
-}
-
-static __inline boolean_t
-pmap_is_modified(vm_offset_t pa)
-{
- return pmap_testbit(pa, PG_M);
-}
-
-static __inline boolean_t
-pmap_is_referenced(vm_offset_t pa)
-{
- return pmap_testbit(pa, PG_U);
-}
-
-static __inline vm_offset_t
-pmap_phys_address(int ppn)
-{
- return i386_ptob(ppn);
-}
-
-void pmap_activate __P((struct proc *));
-void pmap_deactivate __P((struct proc *));
-vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));
-
-#endif /* _KERNEL */
-
-#endif /* _I386_PMAP_H_ */
diff --git a/sys/arch/i386/include/pte.h b/sys/arch/i386/include/pte.h
index 711716cf5ff..b23165108f7 100644
--- a/sys/arch/i386/include/pte.h
+++ b/sys/arch/i386/include/pte.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pte.h,v 1.4 2001/01/26 23:05:29 mickey Exp $ */
+/* $OpenBSD: pte.h,v 1.5 2001/05/05 23:25:49 art Exp $ */
/* $NetBSD: pte.h,v 1.11 1998/02/06 21:58:05 thorpej Exp $ */
/*
@@ -171,9 +171,6 @@ typedef u_int32_t pt_entry_t; /* PTE */
#define PG_PS 0x00000080 /* 4MB page size */
#define PG_G 0x00000100 /* global, don't TLB flush */
#define PG_AVAIL1 0x00000200 /* ignored by hardware */
-#ifndef PMAP_NEW
-#define PG_W PG_AVAIL1 /* page is wired */
-#endif
#define PG_AVAIL2 0x00000400 /* ignored by hardware */
#define PG_AVAIL3 0x00000800 /* ignored by hardware */
#define PG_FRAME 0xfffff000 /* page frame mask */
diff --git a/sys/arch/i386/include/vmparam.h b/sys/arch/i386/include/vmparam.h
index 762aa954d50..8752163f618 100644
--- a/sys/arch/i386/include/vmparam.h
+++ b/sys/arch/i386/include/vmparam.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: vmparam.h,v 1.17 2001/05/05 20:56:39 art Exp $ */
+/* $OpenBSD: vmparam.h,v 1.18 2001/05/05 23:25:49 art Exp $ */
/* $NetBSD: vmparam.h,v 1.15 1994/10/27 04:16:34 cgd Exp $ */
/*-
@@ -131,10 +131,8 @@
*/
/* XXX Compatibility */
-#ifdef PMAP_NEW
#define APTDPTDI PDSLOT_APTE
#define PTDPTDI PDSLOT_PTE
-#endif
/* user/kernel map constants */
#define VM_MIN_ADDRESS ((vm_offset_t)0)
@@ -161,17 +159,9 @@
/*
* pmap specific data stored in the vm_physmem[] array
*/
-#if defined(PMAP_NEW)
struct pmap_physseg {
struct pv_head *pvhead; /* pv_head array */
char *attrs; /* attrs array */
};
-#else
-struct pmap_physseg {
- struct pv_entry *pvent; /* pv_entry array */
- char *attrs; /* attrs array */
-};
-#endif
-
#endif /* _MACHINE_VM_PARAM_H_ */
diff --git a/sys/arch/i386/isa/npx.c b/sys/arch/i386/isa/npx.c
index 2a267fa63c2..73f7833686f 100644
--- a/sys/arch/i386/isa/npx.c
+++ b/sys/arch/i386/isa/npx.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: npx.c,v 1.18 2001/04/09 07:14:16 tholo Exp $ */
+/* $OpenBSD: npx.c,v 1.19 2001/05/05 23:25:51 art Exp $ */
/* $NetBSD: npx.c,v 1.57 1996/05/12 23:12:24 mycroft Exp $ */
#if 0
@@ -54,10 +54,8 @@
#include <sys/ioctl.h>
#include <sys/device.h>
-#if defined(UVM)
#include <vm/vm.h>
#include <uvm/uvm_extern.h>
-#endif
#include <machine/cpu.h>
#include <machine/intr.h>
@@ -392,11 +390,7 @@ npxintr(arg)
int code;
union sigval sv;
-#if defined(UVM)
uvmexp.traps++;
-#else
- cnt.v_trap++;
-#endif
iprintf(("Intr"));
if (p == 0 || npx_type == NPX_NONE) {
diff --git a/sys/arch/i386/isa/vector.s b/sys/arch/i386/isa/vector.s
index 0305ea09e32..21a403b2a8d 100644
--- a/sys/arch/i386/isa/vector.s
+++ b/sys/arch/i386/isa/vector.s
@@ -1,4 +1,4 @@
-/* $OpenBSD: vector.s,v 1.10 1999/02/26 04:22:03 art Exp $ */
+/* $OpenBSD: vector.s,v 1.11 2001/05/05 23:25:52 art Exp $ */
/* $NetBSD: vector.s,v 1.32 1996/01/07 21:29:47 mycroft Exp $ */
/*
@@ -35,11 +35,7 @@
#define ICU_HARDWARE_MASK
-#if defined(UVM)
#define MY_COUNT _C_LABEL(uvmexp)
-#else
-#define MY_COUNT _cnt
-#endif
/*
* These macros are fairly self explanatory. If ICU_SPECIAL_MASK_MODE is