summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorMike Larkin <mlarkin@cvs.openbsd.org>2015-04-12 21:37:34 +0000
committerMike Larkin <mlarkin@cvs.openbsd.org>2015-04-12 21:37:34 +0000
commite17bb4cf9e89e3e5aaf7005cfdea73ebfb02c8ee (patch)
tree725f7960168e65feaf83db32f299e7e8a46ae9cb /sys
parentcfe53e2b0d5af77b09bc43d16f0b3acc10d46d3a (diff)
Fix some KNF, spacing, and typo issues. Moving the deck chairs around to
reduce differences between PAE and no-PAE i386 pmaps.
Diffstat (limited to 'sys')
-rw-r--r--sys/arch/i386/i386/pmap.c84
-rw-r--r--sys/arch/i386/i386/pmapae.c75
-rw-r--r--sys/arch/i386/include/pmap.h4
3 files changed, 72 insertions, 91 deletions
diff --git a/sys/arch/i386/i386/pmap.c b/sys/arch/i386/i386/pmap.c
index 31c3aafa0ab..729de4a87e4 100644
--- a/sys/arch/i386/i386/pmap.c
+++ b/sys/arch/i386/i386/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.173 2015/04/12 19:21:32 mlarkin Exp $ */
+/* $OpenBSD: pmap.c,v 1.174 2015/04/12 21:37:33 mlarkin Exp $ */
/* $NetBSD: pmap.c,v 1.91 2000/06/02 17:46:37 thorpej Exp $ */
/*
@@ -294,7 +294,7 @@
* PDP_PDE and APDP_PDE: the VA of the PDE that points back to the PDP/APDP
*/
#define PTE_BASE ((pt_entry_t *) (PDSLOT_PTE * NBPD))
-#define APTE_BASE ((pt_entry_t *) (PDSLOT_APTE * NBPD))
+#define APTE_BASE ((pt_entry_t *) (PDSLOT_APTE * NBPD))
#define PDP_BASE ((pd_entry_t *)(((char *)PTE_BASE) + (PDSLOT_PTE * NBPG)))
#define APDP_BASE ((pd_entry_t *)(((char *)APTE_BASE) + (PDSLOT_APTE * NBPG)))
#define PDP_PDE (PDP_BASE + PDSLOT_PTE)
@@ -318,8 +318,7 @@
*
* vtopte: return a pointer to the PTE mapping a VA
*/
-#define vtopte(VA) (PTE_BASE + atop((vaddr_t)VA))
-
+#define vtopte(VA) (PTE_BASE + atop((vaddr_t)VA))
/*
* PTP macros:
@@ -331,25 +330,34 @@
* NBPD == number of bytes a PTP can map (4MB)
*/
-#define ptp_i2o(I) ((I) * NBPG) /* index => offset */
-#define ptp_o2i(O) ((O) / NBPG) /* offset => index */
-#define ptp_i2v(I) ((I) * NBPD) /* index => VA */
-#define ptp_v2i(V) ((V) / NBPD) /* VA => index (same as pdei) */
+#define ptp_i2o(I) ((I) * NBPG) /* index => offset */
+#define ptp_o2i(O) ((O) / NBPG) /* offset => index */
+#define ptp_i2v(I) ((I) * NBPD) /* index => VA */
+#define ptp_v2i(V) ((V) / NBPD) /* VA => index (same as pdei) */
-#define PDE(pm,i) (((pd_entry_t *)(pm)->pm_pdir)[(i)])
+/*
+ * Access PD and PT
+ */
+#define PDE(pm,i) (((pd_entry_t *)(pm)->pm_pdir)[(i)])
/*
* here we define the data types for PDEs and PTEs
*/
-typedef u_int32_t pd_entry_t; /* PDE */
-typedef u_int32_t pt_entry_t; /* PTE */
+typedef u_int32_t pd_entry_t; /* PDE */
+typedef u_int32_t pt_entry_t; /* PTE */
+
+/*
+ * Number of PTEs per cache line. 4 byte pte, 64-byte cache line
+ * Used to avoid false sharing of cache lines.
+ */
+#define NPTECL 16
/*
* global data structures
*/
-/* the kernel's pmap (proc0) */
-struct pmap __attribute__ ((aligned (32))) kernel_pmap_store;
+/* The kernel's pmap (proc0), 32 byte aligned in case we are using PAE */
+struct pmap __attribute__ ((aligned (32))) kernel_pmap_store;
/*
* nkpde is the number of kernel PTPs allocated for the kernel at
@@ -360,9 +368,6 @@ struct pmap __attribute__ ((aligned (32))) kernel_pmap_store;
int nkpde = NKPTP;
int nkptp_max = 1024 - (KERNBASE / NBPD) - 1;
-#ifdef NKPDE
-#error "obsolete NKPDE: use NKPTP"
-#endif
extern int cpu_pae;
@@ -376,7 +381,7 @@ int pmap_pg_g = 0;
/*
* pmap_pg_wc: if our processor supports PAT then we set this
* to be the pte bits for Write Combining. Else we fall back to
- * UC- so mtrrs can override the cacheability;
+ * UC- so mtrrs can override the cacheability
*/
int pmap_pg_wc = PG_UCMINUS;
@@ -388,6 +393,20 @@ uint32_t protection_codes[8]; /* maps MI prot to i386 prot code */
boolean_t pmap_initialized = FALSE; /* pmap_init done yet? */
/*
+ * MULTIPROCESSOR: special VA's/ PTE's are actually allocated inside a
+ * MAXCPUS*NPTECL array of PTE's, to avoid cache line thrashing
+ * due to false sharing.
+ */
+
+#ifdef MULTIPROCESSOR
+#define PTESLEW(pte, id) ((pte)+(id)*NPTECL)
+#define VASLEW(va,id) ((va)+(id)*NPTECL*NBPG)
+#else
+#define PTESLEW(pte, id) (pte)
+#define VASLEW(va,id) (va)
+#endif
+
+/*
* pv management structures.
*/
struct pool pmap_pv_pool;
@@ -428,26 +447,6 @@ struct pmap_head pmaps;
struct pool pmap_pmap_pool;
/*
- * Number of PTE's per cache line. 4 byte pte, 64-byte cache line
- * Used to avoid false sharing of cache lines.
- */
-#define NPTECL 16
-
-/*
- * MULTIPROCESSOR: special VA's/ PTE's are actually allocated inside a
- * MAXCPUS*NPTECL array of PTE's, to avoid cache line thrashing
- * due to false sharing.
- */
-
-#ifdef MULTIPROCESSOR
-#define PTESLEW(pte, id) ((pte)+(id)*NPTECL)
-#define VASLEW(va,id) ((va)+(id)*NPTECL*NBPG)
-#else
-#define PTESLEW(pte, id) (pte)
-#define VASLEW(va,id) (va)
-#endif
-
-/*
* special VAs and the PTEs that map them
*/
@@ -458,13 +457,6 @@ caddr_t vmmap; /* XXX: used by mem.c... it should really uvm_map_reserve it */
/*
* local prototypes
*/
-
-struct pv_entry *pmap_alloc_pv(struct pmap *, int); /* see codes in pmap.h */
-void pmap_enter_pv(struct vm_page *, struct pv_entry *,
- struct pmap *, vaddr_t, struct vm_page *);
-void pmap_free_pv(struct pmap *, struct pv_entry *);
-void pmap_free_pvs(struct pmap *, struct pv_entry *);
-
struct vm_page *pmap_alloc_ptp_86(struct pmap *, int, pt_entry_t);
struct vm_page *pmap_get_ptp_86(struct pmap *, int);
pt_entry_t *pmap_map_ptes_86(struct pmap *);
@@ -484,10 +476,6 @@ void pmap_sync_flags_pte_86(struct vm_page *, pt_entry_t);
void pmap_drop_ptp(struct pmap *, vaddr_t, struct vm_page *,
pt_entry_t *);
-void pmap_apte_flush(void);
-void pmap_exec_account(struct pmap *, vaddr_t, pt_entry_t,
- pt_entry_t);
-
void setcslimit(struct pmap *, struct trapframe *, struct pcb *,
vaddr_t);
void pmap_pinit_pd_86(struct pmap *);
diff --git a/sys/arch/i386/i386/pmapae.c b/sys/arch/i386/i386/pmapae.c
index 2049c9cd824..142a2e863ac 100644
--- a/sys/arch/i386/i386/pmapae.c
+++ b/sys/arch/i386/i386/pmapae.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmapae.c,v 1.29 2015/04/12 19:21:32 mlarkin Exp $ */
+/* $OpenBSD: pmapae.c,v 1.30 2015/04/12 21:37:33 mlarkin Exp $ */
/*
* Copyright (c) 2006-2008 Michael Shalayeff
@@ -335,23 +335,23 @@
* because cr3 is only 32 bits wide.
*
*/
-#define PG_FRAME 0xffffff000ULL /* page frame mask */
-#define PG_LGFRAME 0xfffe00000ULL /* large (2M) page frame mask */
+#define PG_FRAME 0xffffff000ULL /* page frame mask */
+#define PG_LGFRAME 0xfffe00000ULL /* large (2M) page frame mask */
/*
- * Redefine the PDSHIFT, NBPD
+ * Redefine the PDSHIFT and NBPD macros for PAE
*/
-#undef PDSHIFT
-#define PDSHIFT 21 /* page directory address shift */
-#undef NBPD
-#define NBPD (1U << PDSHIFT) /* # bytes mapped by PD (2MB) */
-
-#undef PDSLOT_PTE
-#define PDSLOT_PTE (1660U) /* 1660: for recursive PDP map */
-#undef PDSLOT_KERN
-#define PDSLOT_KERN (1664U) /* 1664: start of kernel space */
-#undef PDSLOT_APTE
-#define PDSLOT_APTE (2044U) /* 2044: alternative recursive slot */
+#undef PDSHIFT
+#define PDSHIFT 21 /* page directory address shift */
+#undef NBPD
+#define NBPD (1U << PDSHIFT) /* # bytes mapped by PD (2MB) */
+
+#undef PDSLOT_PTE
+#define PDSLOT_PTE (1660U) /* 1660: for recursive PDP map */
+#undef PDSLOT_KERN
+#define PDSLOT_KERN (1664U) /* 1664: start of kernel space */
+#undef PDSLOT_APTE
+#define PDSLOT_APTE (2044U) /* 2044: alternative recursive slot */
/*
* The following defines give the virtual addresses of various MMU
@@ -360,25 +360,25 @@
* PTD_BASE and APTD_BASE: the base VA of the recursive mapping of the PTD
* PDP_PDE and APDP_PDE: the VA of the PDE that points back to the PDP/APDP
*/
-#define PTE_BASE ((pt_entry_t *) (PDSLOT_PTE * NBPD) )
-#define APTE_BASE ((pt_entry_t *) (PDSLOT_APTE * NBPD) )
-#define PDP_BASE ((pd_entry_t *)(((char *)PTE_BASE) + (PDSLOT_PTE * NBPG)))
-#define APDP_BASE ((pd_entry_t *)(((char *)APTE_BASE) + (PDSLOT_APTE * NBPG)))
-#define PDP_PDE (PDP_BASE + PDSLOT_PTE)
-#define APDP_PDE (PDP_BASE + PDSLOT_APTE)
+#define PTE_BASE ((pt_entry_t *) (PDSLOT_PTE * NBPD) )
+#define APTE_BASE ((pt_entry_t *) (PDSLOT_APTE * NBPD) )
+#define PDP_BASE ((pd_entry_t *)(((char *)PTE_BASE) + (PDSLOT_PTE * NBPG)))
+#define APDP_BASE ((pd_entry_t *)(((char *)APTE_BASE) + (PDSLOT_APTE * NBPG)))
+#define PDP_PDE (PDP_BASE + PDSLOT_PTE)
+#define APDP_PDE (PDP_BASE + PDSLOT_APTE)
/*
* pdei/ptei: generate index into PDP/PTP from a VA
*/
-#define PD_MASK 0xffe00000 /* page directory address bits */
-#define PT_MASK 0x001ff000 /* page table address bits */
-#define pdei(VA) (((VA) & PD_MASK) >> PDSHIFT)
-#define ptei(VA) (((VA) & PT_MASK) >> PGSHIFT)
+#define PD_MASK 0xffe00000 /* page directory address bits */
+#define PT_MASK 0x001ff000 /* page table address bits */
+#define pdei(VA) (((VA) & PD_MASK) >> PDSHIFT)
+#define ptei(VA) (((VA) & PT_MASK) >> PGSHIFT)
/*
* Mach derived conversion macros
*/
-#define i386_round_pdr(x) ((((unsigned)(x)) + ~PD_MASK) & PD_MASK)
+#define i386_round_pdr(x) ((((unsigned)(x)) + ~PD_MASK) & PD_MASK)
/*
* various address macros
@@ -386,8 +386,7 @@
* vtopte: return a pointer to the PTE mapping a VA
*
*/
-#define vtopte(VA) (PTE_BASE + atop((vaddr_t)VA))
-
+#define vtopte(VA) (PTE_BASE + atop((vaddr_t)VA))
/*
* PTP macros:
@@ -395,8 +394,8 @@
* A PTP's offset is the byte-offset in the PTE space that this PTP is at.
* A PTP's VA is the first VA mapped by that PTP.
*
- * Note that NBPG == number of bytes in a PTP (4096 bytes == 1024 entries)
- * NBPD == number of bytes a PTP can map (4MB)
+ * Note that NBPG == number of bytes in a PTP (4096 bytes == 512 entries)
+ * NBPD == number of bytes a PTP can map (2MB)
*/
#define ptp_i2o(I) ((I) * NBPG) /* index => offset */
@@ -410,13 +409,13 @@
#define PDE(pm,i) (((pd_entry_t *)(pm)->pm_pdir)[(i)])
/*
- * here we define the data types for PDEs and PTEs
+ * here we define the data types for PDEs and PTEs for PAE
*/
typedef u_int64_t pd_entry_t; /* PDE */
typedef u_int64_t pt_entry_t; /* PTE */
/*
- * Number of PTE's per cache line. 8 byte pte, 64-byte cache line
+ * Number of PTEs per cache line. 8 byte pte, 64-byte cache line
* Used to avoid false sharing of cache lines.
*/
#define NPTECL 8
@@ -455,25 +454,17 @@ extern struct pmap_head pmaps;
/*
* local prototypes
*/
-struct pv_entry *pmap_add_pvpage(struct pv_page *, boolean_t);
-struct pv_entry *pmap_alloc_pv(struct pmap *, int); /* see codes in pmap.h */
-struct pv_entry *pmap_alloc_pvpage(struct pmap *, int);
-void pmap_enter_pv(struct vm_page *, struct pv_entry *,
- struct pmap *, vaddr_t, struct vm_page *);
-void pmap_free_pv(struct pmap *, struct pv_entry *);
-void pmap_free_pvs(struct pmap *, struct pv_entry *);
void pmap_free_pv_doit(struct pv_entry *);
-void pmap_free_pvpage(void);
struct vm_page *pmap_alloc_ptp_pae(struct pmap *, int, pt_entry_t);
struct vm_page *pmap_get_ptp_pae(struct pmap *, int);
pt_entry_t *pmap_map_ptes_pae(struct pmap *);
-void pmap_do_remove_pae(struct pmap *, vaddr_t, vaddr_t, int);
+void pmap_unmap_ptes_pae(struct pmap *);
+void pmap_do_remove_pae(struct pmap *, vaddr_t, vaddr_t, int);
void pmap_remove_ptes_pae(struct pmap *, struct vm_page *,
vaddr_t, vaddr_t, vaddr_t, int);
boolean_t pmap_remove_pte_pae(struct pmap *, struct vm_page *,
pt_entry_t *, vaddr_t, int);
void pmap_sync_flags_pte_pae(struct vm_page *, pt_entry_t);
-void pmap_unmap_ptes_pae(struct pmap *);
static __inline u_int
pmap_pte2flags(pt_entry_t pte)
diff --git a/sys/arch/i386/include/pmap.h b/sys/arch/i386/include/pmap.h
index 6aa02e3e7c0..e03ba08e9a0 100644
--- a/sys/arch/i386/include/pmap.h
+++ b/sys/arch/i386/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.73 2015/04/12 18:37:54 mlarkin Exp $ */
+/* $OpenBSD: pmap.h,v 1.74 2015/04/12 21:37:33 mlarkin Exp $ */
/* $NetBSD: pmap.h,v 1.44 2000/04/24 17:18:18 thorpej Exp $ */
/*
@@ -240,8 +240,10 @@ void pmap_kenter_pa(vaddr_t, paddr_t, vm_prot_t);
void pmap_kremove(vaddr_t, vsize_t);
void pmap_zero_page(struct vm_page *);
void pmap_copy_page(struct vm_page *, struct vm_page *);
+struct pv_entry *pmap_alloc_pv(struct pmap *, int);
void pmap_enter_pv(struct vm_page *, struct pv_entry *,
struct pmap *, vaddr_t, struct vm_page *);
+void pmap_free_pv(struct pmap *, struct pv_entry *);
void pmap_free_pvs(struct pmap *, struct pv_entry *);
boolean_t pmap_clear_attrs(struct vm_page *, int);
static void pmap_page_protect(struct vm_page *, vm_prot_t);