summaryrefslogtreecommitdiff
path: root/sys/arch/i386/include
diff options
context:
space:
mode:
Diffstat (limited to 'sys/arch/i386/include')
-rw-r--r--sys/arch/i386/include/_types.h6
-rw-r--r--sys/arch/i386/include/atomic.h9
-rw-r--r--sys/arch/i386/include/bus.h12
-rw-r--r--sys/arch/i386/include/cpu.h6
-rw-r--r--sys/arch/i386/include/loadfile_machdep.h4
-rw-r--r--sys/arch/i386/include/param.h10
-rw-r--r--sys/arch/i386/include/pmap.h363
-rw-r--r--sys/arch/i386/include/pte.h107
-rw-r--r--sys/arch/i386/include/tss.h4
-rw-r--r--sys/arch/i386/include/vmparam.h15
10 files changed, 200 insertions, 336 deletions
diff --git a/sys/arch/i386/include/_types.h b/sys/arch/i386/include/_types.h
index 8d54ca43d8c..f731aefd89f 100644
--- a/sys/arch/i386/include/_types.h
+++ b/sys/arch/i386/include/_types.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: _types.h,v 1.2 2006/01/13 17:50:06 millert Exp $ */
+/* $OpenBSD: _types.h,v 1.3 2006/04/27 15:37:53 mickey Exp $ */
/*-
* Copyright (c) 1990, 1993
@@ -86,9 +86,9 @@ typedef __int32_t __register_t;
/* VM system types */
typedef unsigned long __vaddr_t;
-typedef unsigned long __paddr_t;
typedef unsigned long __vsize_t;
-typedef unsigned long __psize_t;
+typedef unsigned long long __paddr_t;
+typedef unsigned long long __psize_t;
/* Standard system types */
typedef int __clock_t;
diff --git a/sys/arch/i386/include/atomic.h b/sys/arch/i386/include/atomic.h
index e3be6b68b1b..a06878e87b0 100644
--- a/sys/arch/i386/include/atomic.h
+++ b/sys/arch/i386/include/atomic.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: atomic.h,v 1.2 2004/06/13 21:49:16 niklas Exp $ */
+/* $OpenBSD: atomic.h,v 1.3 2006/04/27 15:37:53 mickey Exp $ */
/* $NetBSD: atomic.h,v 1.1.2.2 2000/02/21 18:54:07 sommerfeld Exp $ */
/*-
@@ -44,6 +44,13 @@
#ifndef _LOCORE
+static __inline u_int64_t
+i386_atomic_testset_uq (volatile u_int64_t *ptr, u_int64_t val) {
+ __asm__ volatile ("\n1:\tlock; cmpxchg8b (%1); jnz 1b" : "+A" (val) :
+ "r" (ptr), "b" ((u_int32_t)val), "c" ((u_int32_t)(val >> 32)));
+ return val;
+}
+
static __inline u_int32_t
i386_atomic_testset_ul (volatile u_int32_t *ptr, unsigned long val) {
__asm__ volatile ("xchgl %0,(%2)" :"=r" (val):"0" (val),"r" (ptr));
diff --git a/sys/arch/i386/include/bus.h b/sys/arch/i386/include/bus.h
index 0b26d524f49..9900c76d4f0 100644
--- a/sys/arch/i386/include/bus.h
+++ b/sys/arch/i386/include/bus.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: bus.h,v 1.38 2006/04/27 15:17:16 mickey Exp $ */
+/* $OpenBSD: bus.h,v 1.39 2006/04/27 15:37:53 mickey Exp $ */
/* $NetBSD: bus.h,v 1.6 1996/11/10 03:19:25 thorpej Exp $ */
/*-
@@ -741,7 +741,7 @@ void bus_space_free(bus_space_tag_t t, bus_space_handle_t bsh,
#define BUS_DMA_COHERENT 0x004 /* hint: map memory DMA coherent */
#define BUS_DMA_BUS1 0x010 /* placeholders for bus functions... */
#define BUS_DMA_BUS2 0x020
-#define BUS_DMA_BUS3 0x040
+#define BUS_DMA_64BIT 0x040 /* large memory high segment is ok */
#define BUS_DMA_24BIT 0x080 /* isadma map */
#define BUS_DMA_STREAMING 0x100 /* hint: sequential, unidirectional */
#define BUS_DMA_READ 0x200 /* mapping is device -> memory only */
@@ -771,7 +771,10 @@ typedef struct i386_bus_dmamap *bus_dmamap_t;
*/
struct i386_bus_dma_segment {
bus_addr_t ds_addr; /* DMA address */
+ paddr_t ds_addr2; /* replacement store */
bus_size_t ds_len; /* length of transfer */
+ vaddr_t ds_va; /* mapped loaded data */
+ vaddr_t ds_va2; /* mapped replacement data */
};
typedef struct i386_bus_dma_segment bus_dma_segment_t;
@@ -863,6 +866,11 @@ struct i386_bus_dmamap {
void *_dm_cookie; /* cookie for bus-specific functions */
+ struct vm_page **_dm_pages; /* replacement pages */
+ vaddr_t _dm_pgva; /* those above -- mapped */
+ int _dm_npages; /* number of pages allocated */
+ int _dm_nused; /* number of pages replaced */
+
/*
* PUBLIC MEMBERS: these are used by machine-independent code.
*/
diff --git a/sys/arch/i386/include/cpu.h b/sys/arch/i386/include/cpu.h
index bf7327f06a4..568a2ef2de5 100644
--- a/sys/arch/i386/include/cpu.h
+++ b/sys/arch/i386/include/cpu.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.h,v 1.74 2006/01/12 22:39:21 weingart Exp $ */
+/* $OpenBSD: cpu.h,v 1.75 2006/04/27 15:37:53 mickey Exp $ */
/* $NetBSD: cpu.h,v 1.35 1996/05/05 19:29:26 christos Exp $ */
/*-
@@ -418,6 +418,10 @@ int kvtop(caddr_t);
void vm86_gpfault(struct proc *, int);
#endif /* VM86 */
+#ifndef SMALL_KERNEL
+int cpu_paenable(void *);
+#endif /* !SMALL_KERNEL */
+
#ifdef GENERIC
/* swapgeneric.c */
void setconf(void);
diff --git a/sys/arch/i386/include/loadfile_machdep.h b/sys/arch/i386/include/loadfile_machdep.h
index a121e81d4ef..5903231fc58 100644
--- a/sys/arch/i386/include/loadfile_machdep.h
+++ b/sys/arch/i386/include/loadfile_machdep.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: loadfile_machdep.h,v 1.1 2003/04/17 03:42:14 drahn Exp $ */
+/* $OpenBSD: loadfile_machdep.h,v 1.2 2006/04/27 15:37:53 mickey Exp $ */
/* $NetBSD: loadfile_machdep.h,v 1.1 1999/04/29 03:17:12 tsubai Exp $ */
/*-
@@ -43,7 +43,7 @@
#define LOAD_KERNEL (LOAD_ALL & ~LOAD_TEXTA)
#define COUNT_KERNEL (COUNT_ALL & ~COUNT_TEXTA)
-#define LOADADDR(a) ((((u_long)(a)) + offset)&0xfffffff)
+#define LOADADDR(a) (((u_long)(a) + (u_long)offset)&0xfffffff)
#define ALIGNENTRY(a) ((u_long)(a))
#define READ(f, b, c) read((f), (void *)LOADADDR(b), (c))
#define BCOPY(s, d, c) memcpy((void *)LOADADDR(d), (void *)(s), (c))
diff --git a/sys/arch/i386/include/param.h b/sys/arch/i386/include/param.h
index 439a9859587..433644f782e 100644
--- a/sys/arch/i386/include/param.h
+++ b/sys/arch/i386/include/param.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: param.h,v 1.35 2006/03/19 01:47:23 martin Exp $ */
+/* $OpenBSD: param.h,v 1.36 2006/04/27 15:37:53 mickey Exp $ */
/* $NetBSD: param.h,v 1.29 1996/03/04 05:04:26 cgd Exp $ */
/*-
@@ -75,8 +75,6 @@
#define PAGE_SIZE (1 << PAGE_SHIFT)
#define PAGE_MASK (PAGE_SIZE - 1)
-#define NPTEPG (NBPG/(sizeof (pt_entry_t)))
-
/*
* Start of kernel virtual space. Remember to alter the memory and
* page table layout description in pmap.h when changing this.
@@ -131,9 +129,3 @@
/* bytes to disk blocks */
#define dbtob(x) ((x) << DEV_BSHIFT)
#define btodb(x) ((x) >> DEV_BSHIFT)
-
-/*
- * Mach derived conversion macros
- */
-#define i386_round_pdr(x) ((((unsigned)(x)) + PDOFSET) & ~PDOFSET)
-#define i386_trunc_pdr(x) ((unsigned)(x) & ~PDOFSET)
diff --git a/sys/arch/i386/include/pmap.h b/sys/arch/i386/include/pmap.h
index 4a350f4201f..b1e1c2ec4b8 100644
--- a/sys/arch/i386/include/pmap.h
+++ b/sys/arch/i386/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.41 2006/01/12 22:39:21 weingart Exp $ */
+/* $OpenBSD: pmap.h,v 1.42 2006/04/27 15:37:53 mickey Exp $ */
/* $NetBSD: pmap.h,v 1.44 2000/04/24 17:18:18 thorpej Exp $ */
/*
@@ -47,123 +47,11 @@
#include <uvm/uvm_object.h>
/*
- * See pte.h for a description of i386 MMU terminology and hardware
- * interface.
- *
- * A pmap describes a process' 4GB virtual address space. This
- * virtual address space can be broken up into 1024 4MB regions which
- * are described by PDEs in the PDP. The PDEs are defined as follows:
- *
- * Ranges are inclusive -> exclusive, just like vm_map_entry start/end.
- * The following assumes that KERNBASE is 0xd0000000.
- *
- * PDE#s VA range Usage
- * 0->831 0x0 -> 0xcfc00000 user address space, note that the
- * max user address is 0xcfbfe000
- * the final two pages in the last 4MB
- * used to be reserved for the UAREA
- * but now are no longer used.
- * 831 0xcfc00000-> recursive mapping of PDP (used for
- * 0xd0000000 linear mapping of PTPs).
- * 832->1023 0xd0000000-> kernel address space (constant
- * 0xffc00000 across all pmaps/processes).
- * 1023 0xffc00000-> "alternate" recursive PDP mapping
- * <end> (for other pmaps).
- *
- *
- * Note: A recursive PDP mapping provides a way to map all the PTEs for
- * a 4GB address space into a linear chunk of virtual memory. In other
- * words, the PTE for page 0 is the first int mapped into the 4MB recursive
- * area. The PTE for page 1 is the second int. The very last int in the
- * 4MB range is the PTE that maps VA 0xffffe000 (the last page in a 4GB
- * address).
- *
- * All pmaps' PDs must have the same values in slots 832->1023 so that
- * the kernel is always mapped in every process. These values are loaded
- * into the PD at pmap creation time.
- *
- * At any one time only one pmap can be active on a processor. This is
- * the pmap whose PDP is pointed to by processor register %cr3. This pmap
- * will have all its PTEs mapped into memory at the recursive mapping
- * point (slot #831 as show above). When the pmap code wants to find the
- * PTE for a virtual address, all it has to do is the following:
- *
- * Address of PTE = (831 * 4MB) + (VA / NBPG) * sizeof(pt_entry_t)
- * = 0xcfc00000 + (VA / 4096) * 4
- *
- * What happens if the pmap layer is asked to perform an operation
- * on a pmap that is not the one which is currently active? In that
- * case we take the PA of the PDP of non-active pmap and put it in
- * slot 1023 of the active pmap. This causes the non-active pmap's
- * PTEs to get mapped in the final 4MB of the 4GB address space
- * (e.g. starting at 0xffc00000).
- *
- * The following figure shows the effects of the recursive PDP mapping:
- *
- * PDP (%cr3)
- * +----+
- * | 0| -> PTP#0 that maps VA 0x0 -> 0x400000
- * | |
- * | |
- * | 831| -> points back to PDP (%cr3) mapping VA 0xcfc00000 -> 0xd0000000
- * | 832| -> first kernel PTP (maps 0xd0000000 -> 0xe0400000)
- * | |
- * |1023| -> points to alternate pmap's PDP (maps 0xffc00000 -> end)
- * +----+
- *
- * Note that the PDE#831 VA (0xcfc00000) is defined as "PTE_BASE".
- * Note that the PDE#1023 VA (0xffc00000) is defined as "APTE_BASE".
- *
- * Starting at VA 0xcfc00000 the current active PDP (%cr3) acts as a
- * PTP:
- *
- * PTP#831 == PDP(%cr3) => maps VA 0xcfc00000 -> 0xd0000000
- * +----+
- * | 0| -> maps the contents of PTP#0 at VA 0xcfc00000->0xcfc01000
- * | |
- * | |
- * | 831| -> maps the contents of PTP#831 (the PDP) at VA 0xcff3f000
- * | 832| -> maps the contents of first kernel PTP
- * | |
- * |1023|
- * +----+
- *
- * Note that mapping of the PDP at PTP#831's VA (0xcff3f000) is
- * defined as "PDP_BASE".... within that mapping there are two
- * defines:
- * "PDP_PDE" (0xcff3fcfc) is the VA of the PDE in the PDP
- * which points back to itself.
- * "APDP_PDE" (0xcff3fffc) is the VA of the PDE in the PDP which
- * establishes the recursive mapping of the alternate pmap.
- * To set the alternate PDP, one just has to put the correct
- * PA info in *APDP_PDE.
- *
- * Note that in the APTE_BASE space, the APDP appears at VA
- * "APDP_BASE" (0xfffff000).
+ * The following defines identify the slots used as described in pmap.c .
*/
-
-/*
- * The following defines identify the slots used as described above.
- */
-
-#define PDSLOT_PTE ((KERNBASE/NBPD)-1) /* 831: for recursive PDP map */
-#define PDSLOT_KERN (KERNBASE/NBPD) /* 832: start of kernel space */
-#define PDSLOT_APTE ((unsigned)1023) /* 1023: alternative recursive slot */
-
-/*
- * The following defines give the virtual addresses of various MMU
- * data structures:
- * PTE_BASE and APTE_BASE: the base VA of the linear PTE mappings
- * PTD_BASE and APTD_BASE: the base VA of the recursive mapping of the PTD
- * PDP_PDE and APDP_PDE: the VA of the PDE that points back to the PDP/APDP
- */
-
-#define PTE_BASE ((pt_entry_t *) (PDSLOT_PTE * NBPD) )
-#define APTE_BASE ((pt_entry_t *) (PDSLOT_APTE * NBPD) )
-#define PDP_BASE ((pd_entry_t *)(((char *)PTE_BASE) + (PDSLOT_PTE * NBPG)))
-#define APDP_BASE ((pd_entry_t *)(((char *)APTE_BASE) + (PDSLOT_APTE * NBPG)))
-#define PDP_PDE (PDP_BASE + PDSLOT_PTE)
-#define APDP_PDE (PDP_BASE + PDSLOT_APTE)
+#define PDSLOT_PTE ((KERNBASE/NBPD)-2) /* 830: for recursive PDP map */
+#define PDSLOT_KERN (KERNBASE/NBPD) /* 832: start of kernel space */
+#define PDSLOT_APTE ((unsigned)1022) /* 1022: alternative recursive slot */
/*
* The following define determines how many PTPs should be set up for the
@@ -171,55 +59,10 @@
* get the VM system running. Once the VM system is running, the
* pmap module can add more PTPs to the kernel area on demand.
*/
-
#ifndef NKPTP
-#define NKPTP 4 /* 16MB to start */
+#define NKPTP 8 /* 16/32MB to start */
#endif
#define NKPTP_MIN 4 /* smallest value we allow */
-#define NKPTP_MAX (1024 - (KERNBASE/NBPD) - 1)
- /* largest value (-1 for APTP space) */
-
-/*
- * various address macros
- *
- * vtopte: return a pointer to the PTE mapping a VA
- * kvtopte: same as above (takes a KVA, but doesn't matter with this pmap)
- * ptetov: given a pointer to a PTE, return the VA that it maps
- * vtophys: translate a VA to the PA mapped to it
- *
- * plus alternative versions of the above
- */
-
-#define vtopte(VA) (PTE_BASE + atop(VA))
-#define kvtopte(VA) vtopte(VA)
-#define ptetov(PT) (ptoa(PT - PTE_BASE))
-#define vtophys(VA) ((*vtopte(VA) & PG_FRAME) | \
- ((unsigned)(VA) & ~PG_FRAME))
-#define avtopte(VA) (APTE_BASE + atop(VA))
-#define ptetoav(PT) (ptoa(PT - APTE_BASE))
-#define avtophys(VA) ((*avtopte(VA) & PG_FRAME) | \
- ((unsigned)(VA) & ~PG_FRAME))
-
-/*
- * pdei/ptei: generate index into PDP/PTP from a VA
- */
-#define pdei(VA) (((VA) & PD_MASK) >> PDSHIFT)
-#define ptei(VA) (((VA) & PT_MASK) >> PGSHIFT)
-
-/*
- * PTP macros:
- * A PTP's index is the PD index of the PDE that points to it.
- * A PTP's offset is the byte-offset in the PTE space that this PTP is at.
- * A PTP's VA is the first VA mapped by that PTP.
- *
- * Note that NBPG == number of bytes in a PTP (4096 bytes == 1024 entries)
- * NBPD == number of bytes a PTP can map (4MB)
- */
-
-#define ptp_i2o(I) ((I) * NBPG) /* index => offset */
-#define ptp_o2i(O) ((O) / NBPG) /* offset => index */
-#define ptp_i2v(I) ((I) * NBPD) /* index => VA */
-#define ptp_v2i(V) ((V) / NBPD) /* VA => index (same as pdei) */
/*
* PG_AVAIL usage: we make use of the ignored bits of the PTE
@@ -229,12 +72,6 @@
#define PG_PVLIST PG_AVAIL2 /* mapping has entry on pvlist */
#define PG_X PG_AVAIL3 /* executable mapping */
-/*
- * Number of PTE's per cache line. 4 byte pte, 32-byte cache line
- * Used to avoid false sharing of cache lines.
- */
-#define NPTECL 8
-
#ifdef _KERNEL
/*
* pmap data structures: see pmap.c for details of locking.
@@ -257,13 +94,15 @@ LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
*/
struct pmap {
+ paddr_t pm_pdidx[4]; /* PDIEs for PAE mode */
+ paddr_t pm_pdirpa; /* PA of PD (read-only after create) */
+ vaddr_t pm_pdir; /* VA of PD (lck by object lock) */
+ int pm_pdirsize; /* PD size (4k vs 16k on pae */
struct uvm_object pm_obj; /* object (lck by object lock) */
#define pm_lock pm_obj.vmobjlock
LIST_ENTRY(pmap) pm_list; /* list (lck by pm_list lock) */
- pd_entry_t *pm_pdir; /* VA of PD (lck by object lock) */
- paddr_t pm_pdirpa; /* PA of PD (read-only after create) */
struct vm_page *pm_ptphint; /* pointer to a PTP in our pmap */
- struct pmap_statistics pm_stats; /* pmap stats (lck by object lock) */
+ struct pmap_statistics pm_stats;/* pmap stats (lck by object lock) */
vaddr_t pm_hiexec; /* highest executable mapping */
int pm_flags; /* see below */
@@ -333,67 +172,185 @@ struct pv_page {
/*
* global kernel variables
*/
-
-extern pd_entry_t PTD[];
-
-/* PTDpaddr: is the physical address of the kernel's PDP */
-extern u_int32_t PTDpaddr;
-
+extern char PTD[];
extern struct pmap kernel_pmap_store; /* kernel pmap */
-extern int nkpde; /* current # of PDEs for kernel */
-extern int pmap_pg_g; /* do we support PG_G? */
+extern int nkptp_max;
/*
- * Macros
+ * Our dual-pmap design requires to play a pointer-and-seek.
+ * Although being nice folks we are handle single-pmap kernels special.
*/
+#define PMAP_EXCLUDE_DECLS /* tells uvm_pmap.h *not* to include decls */
+/*
+ * Dumb macros
+ */
#define pmap_kernel() (&kernel_pmap_store)
#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
#define pmap_update(pm) /* nada */
-#define pmap_clear_modify(pg) pmap_change_attrs(pg, 0, PG_M)
-#define pmap_clear_reference(pg) pmap_change_attrs(pg, 0, PG_U)
-#define pmap_copy(DP,SP,D,L,S)
-#define pmap_is_modified(pg) pmap_test_attrs(pg, PG_M)
-#define pmap_is_referenced(pg) pmap_test_attrs(pg, PG_U)
-#define pmap_phys_address(ppn) ptoa(ppn)
-#define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */
-
-#define pmap_proc_iflush(p,va,len) /* nothing */
-#define pmap_unuse_final(p) /* nothing */
+#define pmap_clear_modify(pg) pmap_change_attrs(pg, 0, PG_M)
+#define pmap_clear_reference(pg) pmap_change_attrs(pg, 0, PG_U)
+#define pmap_copy(DP,SP,D,L,S) /* nicht */
+#define pmap_is_modified(pg) pmap_test_attrs(pg, PG_M)
+#define pmap_is_referenced(pg) pmap_test_attrs(pg, PG_U)
+#define pmap_phys_address(ppn) ptoa(ppn)
+#define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */
+#define pmap_proc_iflush(p,va,len) /* nothing */
+#define pmap_unuse_final(p) /* 4anaEB u nycToTa */
/*
* Prototypes
*/
-
void pmap_bootstrap(vaddr_t);
-boolean_t pmap_change_attrs(struct vm_page *, int, int);
+void pmap_bootstrap_pae(void);
+void pmap_virtual_space(vaddr_t *, vaddr_t *);
+void pmap_init(void);
+struct pmap * pmap_create(void);
+void pmap_destroy(struct pmap *);
+void pmap_reference(struct pmap *);
+void pmap_fork(struct pmap *, struct pmap *);
+void pmap_collect(struct pmap *);
+void pmap_activate(struct proc *);
+void pmap_deactivate(struct proc *);
+void pmap_kenter_pa(vaddr_t, paddr_t, vm_prot_t);
+void pmap_kremove(vaddr_t, vsize_t);
+void pmap_zero_page(struct vm_page *);
+void pmap_copy_page(struct vm_page *, struct vm_page *);
+
+struct pv_entry*pmap_alloc_pv(struct pmap *, int);
+void pmap_enter_pv(struct pv_head *, struct pv_entry *,
+ struct pmap *, vaddr_t, struct vm_page *);
+void pmap_free_pv(struct pmap *, struct pv_entry *);
+void pmap_free_pvs(struct pmap *, struct pv_entry *);
+void pmap_free_pv_doit(struct pv_entry *);
+void pmap_free_pvpage(void);
static void pmap_page_protect(struct vm_page *, vm_prot_t);
-void pmap_page_remove(struct vm_page *);
-static void pmap_protect(struct pmap *, vaddr_t,
- vaddr_t, vm_prot_t);
-void pmap_remove(struct pmap *, vaddr_t, vaddr_t);
-boolean_t pmap_test_attrs(struct vm_page *, int);
+static void pmap_protect(struct pmap *, vaddr_t, vaddr_t, vm_prot_t);
static void pmap_update_pg(vaddr_t);
-static void pmap_update_2pg(vaddr_t,vaddr_t);
-void pmap_write_protect(struct pmap *, vaddr_t,
- vaddr_t, vm_prot_t);
+static void pmap_update_2pg(vaddr_t, vaddr_t);
int pmap_exec_fixup(struct vm_map *, struct trapframe *,
struct pcb *);
+void pmap_exec_account(struct pmap *, vaddr_t, u_int32_t,
+ u_int32_t);
vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */
+paddr_t vtophys(vaddr_t va);
-void pmap_tlb_shootdown(pmap_t, vaddr_t, pt_entry_t, int32_t *);
+void pmap_tlb_shootdown(pmap_t, vaddr_t, u_int32_t, int32_t *);
void pmap_tlb_shootnow(int32_t);
void pmap_do_tlb_shootdown(struct cpu_info *);
+boolean_t pmap_is_curpmap(struct pmap *);
+boolean_t pmap_is_active(struct pmap *, int);
+void pmap_apte_flush(struct pmap *);
+struct pv_entry *pmap_remove_pv(struct pv_head *, struct pmap *, vaddr_t);
+
+#ifdef SMALL_KERNEL
+#define pmap_pte_set_86 pmap_pte_set
+#define pmap_pte_setbits_86 pmap_pte_setbits
+#define pmap_pte_bits_86 pmap_pte_bits
+#define pmap_pte_paddr_86 pmap_pte_paddr
+#define pmap_change_attrs_86 pmap_change_attrs
+#define pmap_enter_86 pmap_enter
+#define pmap_extract_86 pmap_extract
+#define pmap_growkernel_86 pmap_growkernel
+#define pmap_page_remove_86 pmap_page_remove
+#define pmap_remove_86 pmap_remove
+#define pmap_test_attrs_86 pmap_test_attrs
+#define pmap_unwire_86 pmap_unwire
+#define pmap_write_protect_86 pmap_write_protect
+#define pmap_pinit_pd_86 pmap_pinit_pd
+#define pmap_zero_phys_86 pmap_zero_phys
+#define pmap_zero_page_uncached_86 pmap_zero_page_uncached
+#define pmap_copy_page_86 pmap_copy_page
+#define pmap_try_steal_pv_86 pmap_try_steal_pv
+#else
+extern u_int32_t (*pmap_pte_set_p)(vaddr_t, paddr_t, u_int32_t);
+extern u_int32_t (*pmap_pte_setbits_p)(vaddr_t, u_int32_t, u_int32_t);
+extern u_int32_t (*pmap_pte_bits_p)(vaddr_t);
+extern paddr_t (*pmap_pte_paddr_p)(vaddr_t);
+extern boolean_t (*pmap_change_attrs_p)(struct vm_page *, int, int);
+extern int (*pmap_enter_p)(pmap_t, vaddr_t, paddr_t, vm_prot_t, int);
+extern boolean_t (*pmap_extract_p)(pmap_t, vaddr_t, paddr_t *);
+extern vaddr_t (*pmap_growkernel_p)(vaddr_t);
+extern void (*pmap_page_remove_p)(struct vm_page *);
+extern void (*pmap_remove_p)(struct pmap *, vaddr_t, vaddr_t);
+extern boolean_t (*pmap_test_attrs_p)(struct vm_page *, int);
+extern void (*pmap_unwire_p)(struct pmap *, vaddr_t);
+extern void (*pmap_write_protect_p)(struct pmap*, vaddr_t, vaddr_t, vm_prot_t);
+extern void (*pmap_pinit_pd_p)(pmap_t);
+extern void (*pmap_zero_phys_p)(paddr_t);
+extern boolean_t (*pmap_zero_page_uncached_p)(paddr_t);
+extern void (*pmap_copy_page_p)(struct vm_page *, struct vm_page *);
+extern boolean_t (*pmap_try_steal_pv_p)(struct pv_head *pvh,
+ struct pv_entry *cpv, struct pv_entry *prevpv);
+
+u_int32_t pmap_pte_set_pae(vaddr_t, paddr_t, u_int32_t);
+u_int32_t pmap_pte_setbits_pae(vaddr_t, u_int32_t, u_int32_t);
+u_int32_t pmap_pte_bits_pae(vaddr_t);
+paddr_t pmap_pte_paddr_pae(vaddr_t);
+boolean_t pmap_try_steal_pv_pae(struct pv_head *pvh, struct pv_entry *cpv,
+ struct pv_entry *prevpv);
+boolean_t pmap_change_attrs_pae(struct vm_page *, int, int);
+int pmap_enter_pae(pmap_t, vaddr_t, paddr_t, vm_prot_t, int);
+boolean_t pmap_extract_pae(pmap_t, vaddr_t, paddr_t *);
+vaddr_t pmap_growkernel_pae(vaddr_t);
+void pmap_page_remove_pae(struct vm_page *);
+void pmap_remove_pae(struct pmap *, vaddr_t, vaddr_t);
+boolean_t pmap_test_attrs_pae(struct vm_page *, int);
+void pmap_unwire_pae(struct pmap *, vaddr_t);
+void pmap_write_protect_pae(struct pmap *, vaddr_t, vaddr_t, vm_prot_t);
+void pmap_pinit_pd_pae(pmap_t);
+void pmap_zero_phys_pae(paddr_t);
+boolean_t pmap_zero_page_uncached_pae(paddr_t);
+void pmap_copy_page_pae(struct vm_page *, struct vm_page *);
+
+#define pmap_pte_set (*pmap_pte_set_p)
+#define pmap_pte_setbits (*pmap_pte_setbits_p)
+#define pmap_pte_bits (*pmap_pte_bits_p)
+#define pmap_pte_paddr (*pmap_pte_paddr_p)
+#define pmap_change_attrs (*pmap_change_attrs_p)
+#define pmap_enter (*pmap_enter_p)
+#define pmap_extract (*pmap_extract_p)
+#define pmap_growkernel (*pmap_growkernel_p)
+#define pmap_page_remove (*pmap_page_remove_p)
+#define pmap_remove (*pmap_remove_p)
+#define pmap_test_attrs (*pmap_test_attrs_p)
+#define pmap_unwire (*pmap_unwire_p)
+#define pmap_write_protect (*pmap_write_protect_p)
+#define pmap_pinit_pd (*pmap_pinit_pd_p)
+#define pmap_zero_phys (*pmap_zero_phys_p)
+#define pmap_zero_page_uncached (*pmap_zero_page_uncached_p)
+#define pmap_copy_page (*pmap_copy_page_p)
+#define pmap_try_steal_pv (*pmap_try_steal_pv_p)
+#endif
+
+u_int32_t pmap_pte_set_86(vaddr_t, paddr_t, u_int32_t);
+u_int32_t pmap_pte_setbits_86(vaddr_t, u_int32_t, u_int32_t);
+u_int32_t pmap_pte_bits_86(vaddr_t);
+paddr_t pmap_pte_paddr_86(vaddr_t);
+boolean_t pmap_try_steal_pv_86(struct pv_head *pvh, struct pv_entry *cpv,
+ struct pv_entry *prevpv);
+boolean_t pmap_change_attrs_86(struct vm_page *, int, int);
+int pmap_enter_86(pmap_t, vaddr_t, paddr_t, vm_prot_t, int);
+boolean_t pmap_extract_86(pmap_t, vaddr_t, paddr_t *);
+vaddr_t pmap_growkernel_86(vaddr_t);
+void pmap_page_remove_86(struct vm_page *);
+void pmap_remove_86(struct pmap *, vaddr_t, vaddr_t);
+boolean_t pmap_test_attrs_86(struct vm_page *, int);
+void pmap_unwire_86(struct pmap *, vaddr_t);
+void pmap_write_protect_86(struct pmap *, vaddr_t, vaddr_t, vm_prot_t);
+void pmap_pinit_pd_86(pmap_t);
+void pmap_zero_phys_86(paddr_t);
+boolean_t pmap_zero_page_uncached_86(paddr_t);
+void pmap_copy_page_86(struct vm_page *, struct vm_page *);
#define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
/*
* Do idle page zero'ing uncached to avoid polluting the cache.
*/
-boolean_t pmap_zero_page_uncached(paddr_t);
#define PMAP_PAGEIDLEZERO(pg) pmap_zero_page_uncached(VM_PAGE_TO_PHYS(pg))
/*
diff --git a/sys/arch/i386/include/pte.h b/sys/arch/i386/include/pte.h
index e27c072c19d..73a3bc3e7b0 100644
--- a/sys/arch/i386/include/pte.h
+++ b/sys/arch/i386/include/pte.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pte.h,v 1.7 2004/02/06 00:23:21 deraadt Exp $ */
+/* $OpenBSD: pte.h,v 1.8 2006/04/27 15:37:53 mickey Exp $ */
/* $NetBSD: pte.h,v 1.11 1998/02/06 21:58:05 thorpej Exp $ */
/*
@@ -45,114 +45,11 @@
#define _I386_PTE_H_
/*
- * i386 MMU hardware structure:
- *
- * the i386 MMU is a two-level MMU which maps 4GB of virtual memory.
- * the pagesize is 4K (4096 [0x1000] bytes), although newer pentium
- * processors can support a 4MB pagesize as well.
- *
- * the first level table (segment table?) is called a "page directory"
- * and it contains 1024 page directory entries (PDEs). each PDE is
- * 4 bytes (an int), so a PD fits in a single 4K page. this page is
- * the page directory page (PDP). each PDE in a PDP maps 4MB of space
- * (1024 * 4MB = 4GB). a PDE contains the physical address of the
- * second level table: the page table. or, if 4MB pages are being used,
- * then the PDE contains the PA of the 4MB page being mapped.
- *
- * a page table consists of 1024 page table entries (PTEs). each PTE is
- * 4 bytes (an int), so a page table also fits in a single 4K page. a
- * 4K page being used as a page table is called a page table page (PTP).
- * each PTE in a PTP maps one 4K page (1024 * 4K = 4MB). a PTE contains
- * the physical address of the page it maps and some flag bits (described
- * below).
- *
- * the processor has a special register, "cr3", which points to the
- * the PDP which is currently controlling the mappings of the virtual
- * address space.
- *
- * the following picture shows the translation process for a 4K page:
- *
- * %cr3 register [PA of PDP]
- * |
- * |
- * | bits <31-22> of VA bits <21-12> of VA bits <11-0>
- * | index the PDP (0 - 1023) index the PTP are the page offset
- * | | | |
- * | v | |
- * +--->+----------+ | |
- * | PD Page | PA of v |
- * | |---PTP-------->+------------+ |
- * | 1024 PDE | | page table |--PTE--+ |
- * | entries | | (aka PTP) | | |
- * +----------+ | 1024 PTE | | |
- * | entries | | |
- * +------------+ | |
- * | |
- * bits <31-12> bits <11-0>
- * p h y s i c a l a d d r
- *
- * the i386 caches PTEs in a TLB. it is important to flush out old
- * TLB mappings when making a change to a mappings. writing to the
- * %cr3 will flush the entire TLB. newer processors also have an
- * instruction that will invalidate the mapping of a single page (which
- * is useful if you are changing a single mappings because it preserves
- * all the cached TLB entries).
- *
- * as shows, bits 31-12 of the PTE contain PA of the page being mapped.
- * the rest of the PTE is defined as follows:
- * bit# name use
- * 11 n/a available for OS use, hardware ignores it
- * 10 n/a available for OS use, hardware ignores it
- * 9 n/a available for OS use, hardware ignores it
- * 8 G global bit (see discussion below)
- * 7 PS page size [for PDEs] (0=4k, 1=4M <if supported>)
- * 6 D dirty (modified) page
- * 5 A accessed (referenced) page
- * 4 PCD cache disable
- * 3 PWT prevent write through (cache)
- * 2 U/S user/supervisor bit (0=supervisor only, 1=both u&s)
- * 1 R/W read/write bit (0=read only, 1=read-write)
- * 0 P present (valid)
- *
- * notes:
- * - on the i386 the R/W bit is ignored if processor is in supervisor
- * state (bug!)
- * - PS is only supported on newer processors
- * - PTEs with the G bit are global in the sense that they are not
- * flushed from the TLB when %cr3 is written (to flush, use the
- * "flush single page" instruction). this is only supported on
- * newer processors. this bit can be used to keep the kernel's
- * TLB entries around while context switching. since the kernel
- * is mapped into all processes at the same place it does not make
- * sense to flush these entries when switching from one process'
- * pmap to another.
- */
-
-#if !defined(_LOCORE)
-
-/*
- * here we define the data types for PDEs and PTEs
- */
-
-typedef u_int32_t pd_entry_t; /* PDE */
-typedef u_int32_t pt_entry_t; /* PTE */
-
-#endif
-
-/*
* now we define various for playing with virtual addresses
*/
#define PDSHIFT 22 /* offset of PD index in VA */
#define NBPD (1 << PDSHIFT) /* # bytes mapped by PD (4MB) */
-#define PDOFSET (NBPD-1) /* mask for non-PD part of VA */
-#if 0 /* not used? */
-#define NPTEPD (NBPD / NBPG) /* # of PTEs in a PD */
-#else
-#define PTES_PER_PTP (NBPD / NBPG) /* # of PTEs in a PTP */
-#endif
-#define PD_MASK 0xffc00000 /* page directory address bits */
-#define PT_MASK 0x003ff000 /* page table address bits */
/*
* here we define the bits of the PDE/PTE, as described above:
@@ -173,8 +70,6 @@ typedef u_int32_t pt_entry_t; /* PTE */
#define PG_AVAIL1 0x00000200 /* ignored by hardware */
#define PG_AVAIL2 0x00000400 /* ignored by hardware */
#define PG_AVAIL3 0x00000800 /* ignored by hardware */
-#define PG_FRAME 0xfffff000 /* page frame mask */
-#define PG_LGFRAME 0xffc00000 /* large (4M) page frame mask */
/*
* various short-hand protection codes
diff --git a/sys/arch/i386/include/tss.h b/sys/arch/i386/include/tss.h
index 20f6f38f7d5..7590b8ce3a1 100644
--- a/sys/arch/i386/include/tss.h
+++ b/sys/arch/i386/include/tss.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: tss.h,v 1.6 2003/06/02 23:27:47 millert Exp $ */
+/* $OpenBSD: tss.h,v 1.7 2006/04/27 15:37:53 mickey Exp $ */
/* $NetBSD: tss.h,v 1.6 1995/10/11 04:20:28 mycroft Exp $ */
/*-
@@ -50,7 +50,7 @@ struct i386tss {
int __tss_ss1;
int __tss_esp2;
int __tss_ss2;
- int tss_cr3; /* page directory paddr */
+ int tss_cr3; /* page directory [pointer] paddr */
int __tss_eip;
int __tss_eflags;
int __tss_eax;
diff --git a/sys/arch/i386/include/vmparam.h b/sys/arch/i386/include/vmparam.h
index 43edd842463..6174c378725 100644
--- a/sys/arch/i386/include/vmparam.h
+++ b/sys/arch/i386/include/vmparam.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: vmparam.h,v 1.34 2006/03/15 17:56:06 mickey Exp $ */
+/* $OpenBSD: vmparam.h,v 1.35 2006/04/27 15:37:53 mickey Exp $ */
/* $NetBSD: vmparam.h,v 1.15 1994/10/27 04:16:34 cgd Exp $ */
/*-
@@ -91,22 +91,23 @@
/* user/kernel map constants */
#define VM_MIN_ADDRESS ((vaddr_t)0)
-#define VM_MAXUSER_ADDRESS ((vaddr_t)((PDSLOT_PTE<<PDSHIFT) - USPACE))
-#define VM_MAX_ADDRESS ((vaddr_t)((PDSLOT_PTE<<PDSHIFT) + \
- (PDSLOT_PTE<<PGSHIFT)))
+#define VM_MAXUSER_ADDRESS ((vaddr_t)0xcf800000)
+#define VM_MAX_ADDRESS (vm_max_address)
+extern vaddr_t vm_max_address;
#define VM_MIN_KERNEL_ADDRESS ((vaddr_t)KERNBASE)
-#define VM_MAX_KERNEL_ADDRESS ((vaddr_t)(PDSLOT_APTE<<PDSHIFT))
+#define VM_MAX_KERNEL_ADDRESS ((vaddr_t)0xff800000)
/* virtual sizes (bytes) for various kernel submaps */
#define VM_PHYS_SIZE (USRIOSIZE*PAGE_SIZE)
-#define VM_PHYSSEG_MAX 5 /* actually we could have this many segments */
+#define VM_PHYSSEG_MAX 8 /* actually we could have this many segments */
#define VM_PHYSSEG_STRAT VM_PSTRAT_BSEARCH
#define VM_PHYSSEG_NOADD /* can't add RAM after vm_mem_init */
-#define VM_NFREELIST 2
+#define VM_NFREELIST 3
#define VM_FREELIST_DEFAULT 0
#define VM_FREELIST_FIRST16 1
+#define VM_FREELIST_ABOVE4G 2
/*
* pmap specific data stored in the vm_physmem[] array