summaryrefslogtreecommitdiff
path: root/sys/arch
diff options
context:
space:
mode:
authorArtur Grabowski <art@cvs.openbsd.org>2007-05-27 08:58:32 +0000
committerArtur Grabowski <art@cvs.openbsd.org>2007-05-27 08:58:32 +0000
commite38e61ec55290a8ec2e97a1b20bab37b278d3397 (patch)
tree7b7e89b0c2d63141b95346112c29061126dd9fd0 /sys/arch
parentb138ce8a7a91d745072497d69175d5dfff1d8990 (diff)
- Redo the way we set up the direct map. Map the first 4GB of it
in locore so that we can use the direct map in pmap_bootstrap when setting up the initial page tables. - Introduce a second direct map (I love large address spaces) with uncached pages. jason@ ok
Diffstat (limited to 'sys/arch')
-rw-r--r--sys/arch/amd64/amd64/genassym.cf13
-rw-r--r--sys/arch/amd64/amd64/locore.S39
-rw-r--r--sys/arch/amd64/amd64/pmap.c93
-rw-r--r--sys/arch/amd64/include/pmap.h13
4 files changed, 117 insertions, 41 deletions
diff --git a/sys/arch/amd64/amd64/genassym.cf b/sys/arch/amd64/amd64/genassym.cf
index 06905160182..934f707e79d 100644
--- a/sys/arch/amd64/amd64/genassym.cf
+++ b/sys/arch/amd64/amd64/genassym.cf
@@ -1,4 +1,4 @@
-# $OpenBSD: genassym.cf,v 1.12 2007/05/25 16:22:11 art Exp $
+# $OpenBSD: genassym.cf,v 1.13 2007/05/27 08:58:31 art Exp $
# Written by Artur Grabowski art@openbsd.org, Public Domain
include <sys/param.h>
@@ -140,6 +140,8 @@ export PG_KR
export PG_KW
export PG_N
export PG_NX
+export PG_G
+export PG_PS
# pmap constants
export PDIR_SLOT_PTE
@@ -148,6 +150,15 @@ export NKL4_KIMG_ENTRIES
export NKL3_KIMG_ENTRIES
export NKL2_KIMG_ENTRIES
+export NDML4_ENTRIES
+export NDML3_ENTRIES
+export NDML2_ENTRIES
+
+export NBPD_L2
+export NPDPG
+
+export PDIR_SLOT_DIRECT
+
export CR4_DEFAULT
export PAGE_SIZE
diff --git a/sys/arch/amd64/amd64/locore.S b/sys/arch/amd64/amd64/locore.S
index c94d739ecea..ae0d1245563 100644
--- a/sys/arch/amd64/amd64/locore.S
+++ b/sys/arch/amd64/amd64/locore.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.S,v 1.21 2005/08/20 00:33:59 jsg Exp $ */
+/* $OpenBSD: locore.S,v 1.22 2007/05/27 08:58:31 art Exp $ */
/* $NetBSD: locore.S,v 1.13 2004/03/25 18:33:17 drochner Exp $ */
/*
@@ -363,9 +363,11 @@ bi_size_ok:
#define PROC0_PTP3_OFF (PROC0_STK_OFF + UPAGES * NBPG)
#define PROC0_PTP2_OFF (PROC0_PTP3_OFF + NKL4_KIMG_ENTRIES * NBPG)
#define PROC0_PTP1_OFF (PROC0_PTP2_OFF + TABLE_L3_ENTRIES * NBPG)
+#define PROC0_DMP3_OFF (PROC0_PTP1_OFF + TABLE_L2_ENTRIES * NBPG)
+#define PROC0_DMP2_OFF (PROC0_DMP3_OFF + NDML3_ENTRIES * NBPG)
#define TABLESIZE \
- ((NKL4_KIMG_ENTRIES + TABLE_L3_ENTRIES + TABLE_L2_ENTRIES + 1 + UPAGES) \
- * NBPG)
+ ((NKL4_KIMG_ENTRIES + TABLE_L3_ENTRIES + TABLE_L2_ENTRIES + 1 + UPAGES + \
+ NDML3_ENTRIES + NDML2_ENTRIES) * NBPG)
#define fillkpt \
1: movl %eax,(%ebx) ; /* store phys addr */ \
@@ -484,9 +486,36 @@ bi_size_ok:
movl $NKL4_KIMG_ENTRIES,%ecx
fillkpt
+ /*
+ * Map the first 4 GB with the direct map. We'll map the rest
+ * in pmap_bootstrap. But we always need the first 4GB during
+ * bootstrap.
+ */
+ leal (PROC0_DMP2_OFF)(%esi), %ebx
+ xorl %eax, %eax
+ orl $(PG_V|PG_KW|PG_PS|PG_G), %eax
+ movl $(NDML2_ENTRIES * NPDPG), %ecx
+1: movl %eax, (%ebx)
+ movl $0, 4(%ebx)
+ addl $8, %ebx
+ addl $NBPD_L2, %eax
+ loop 1b
+
+ leal (PROC0_DMP3_OFF)(%esi), %ebx
+ leal (PROC0_DMP2_OFF)(%esi), %eax
+ orl $(PG_V|PG_KW), %eax
+ movl $NDML2_ENTRIES, %ecx
+ fillkpt
+
+ leal (PROC0_PML4_OFF + PDIR_SLOT_DIRECT * 8)(%esi), %ebx
+ leal (PROC0_DMP3_OFF)(%esi), %eax
+ orl $(PG_V|PG_KW), %eax
+ movl $NDML3_ENTRIES, %ecx
+ fillkpt
+
/* Install recursive top level PDE */
- leal (PROC0_PML4_OFF + PDIR_SLOT_PTE*8)(%esi),%ebx
- leal (PROC0_PML4_OFF)(%esi),%eax
+ leal (PROC0_PML4_OFF + PDIR_SLOT_PTE*8)(%esi),%ebx
+ leal (PROC0_PML4_OFF)(%esi),%eax
orl $(PG_V|PG_KW),%eax
movl %eax,(%ebx)
movl $0, 4(%ebx)
diff --git a/sys/arch/amd64/amd64/pmap.c b/sys/arch/amd64/amd64/pmap.c
index aac10a10054..15e60b4da0a 100644
--- a/sys/arch/amd64/amd64/pmap.c
+++ b/sys/arch/amd64/amd64/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.23 2007/05/25 16:22:11 art Exp $ */
+/* $OpenBSD: pmap.c,v 1.24 2007/05/27 08:58:31 art Exp $ */
/* $NetBSD: pmap.c,v 1.3 2003/05/08 18:13:13 thorpej Exp $ */
/*
@@ -237,11 +237,6 @@ long nbpd[] = NBPD_INITIALIZER;
pd_entry_t *normal_pdes[] = PDES_INITIALIZER;
pd_entry_t *alternate_pdes[] = APDES_INITIALIZER;
-/*
- * Direct map.
- */
-paddr_t DMPDpa;
-
/* int nkpde = NKPTP; */
struct simplelock pmaps_lock;
@@ -575,8 +570,6 @@ pmap_bootstrap(vaddr_t kva_start, paddr_t max_pa)
{
vaddr_t kva, kva_end;
struct pmap *kpm;
- pt_entry_t *tmppte;
- vaddr_t tmpva;
int i;
unsigned long p1i;
pt_entry_t pg_nx = (cpu_feature & CPUID_NXE? PG_NX : 0);
@@ -658,58 +651,83 @@ pmap_bootstrap(vaddr_t kva_start, paddr_t max_pa)
}
/*
- * Temporary mapping for setting up the direct map.
+ * Map the direct map. The first 4GB were mapped in locore, here
+ * we map the rest if it exists. We actually use the direct map
+ * here to set up the page tables, we're assuming that we're still
+ * operating in the lower 4GB of memory.
*/
- tmpva = (KERNBASE + NKL2_KIMG_ENTRIES * NBPD_L2);
- virtual_avail += PAGE_SIZE;
- tmppte = PTE_BASE + pl1_i(tmpva);
+ ndmpdp = (max_pa + NBPD_L3 - 1) >> L3_SHIFT;
+ if (ndmpdp < NDML2_ENTRIES)
+ ndmpdp = NDML2_ENTRIES; /* At least 4GB */
+
+printf("ndmpdp: %ld\n", ndmpdp);
+
+ dmpdp = kpm->pm_pdir[PDIR_SLOT_DIRECT] & PG_FRAME;
+
+ dmpd = avail_start; avail_start += ndmpdp * PAGE_SIZE;
+
+ for (i = NDML2_ENTRIES; i < NPDPG * ndmpdp; i++) {
+ paddr_t pdp;
+ vaddr_t va;
+
+ pdp = (paddr_t)&(((pd_entry_t *)dmpd)[i]);
+ va = PMAP_DIRECT_MAP(pdp);
+
+ *((pd_entry_t *)va) = ((paddr_t)i << L2_SHIFT);
+ *((pd_entry_t *)va) |= PG_RW | PG_V | PG_PS | PG_G | PG_U |
+ PG_M;
+ }
+
+ for (i = NDML2_ENTRIES; i < ndmpdp; i++) {
+ paddr_t pdp;
+ vaddr_t va;
+
+ pdp = (paddr_t)&(((pd_entry_t *)dmpdp)[i]);
+ va = PMAP_DIRECT_MAP(pdp);
+
+ *((pd_entry_t *)va) = dmpd + (i << PAGE_SHIFT);
+ *((pd_entry_t *)va) |= PG_RW | PG_V | PG_U | PG_M;
+ }
+
+ kpm->pm_pdir[PDIR_SLOT_DIRECT] = dmpdp | PG_V | PG_KW | PG_U |
+ PG_M;
/*
- * Map the direct map. We steal pages for the page tables from
- * avail_start, then we create temporary mappings using the
- * early_zerop. Scary, slow, but we only do it once.
+ * Now do the same thing, but for the direct uncached map.
*/
ndmpdp = (max_pa + NBPD_L3 - 1) >> L3_SHIFT;
- if (ndmpdp < 4)
- ndmpdp = 4; /* At least 4GB */
+ if (ndmpdp < NDML2_ENTRIES)
+ ndmpdp = NDML2_ENTRIES; /* At least 4GB */
dmpdp = avail_start; avail_start += PAGE_SIZE;
dmpd = avail_start; avail_start += ndmpdp * PAGE_SIZE;
for (i = 0; i < NPDPG * ndmpdp; i++) {
paddr_t pdp;
- paddr_t off;
vaddr_t va;
pdp = (paddr_t)&(((pd_entry_t *)dmpd)[i]);
- off = pdp - trunc_page(pdp);
- *tmppte = (trunc_page(pdp) & PG_FRAME) | PG_V | PG_RW;
- pmap_update_pg(tmpva);
+ va = PMAP_DIRECT_MAP(pdp);
- va = tmpva + off;
*((pd_entry_t *)va) = (paddr_t)i << L2_SHIFT;
- *((pd_entry_t *)va) |= PG_RW | PG_V | PG_PS | PG_G;
+ *((pd_entry_t *)va) |= PG_RW | PG_V | PG_PS | PG_G | PG_N |
+ PG_U | PG_M;
}
for (i = 0; i < ndmpdp; i++) {
paddr_t pdp;
- paddr_t off;
vaddr_t va;
pdp = (paddr_t)&(((pd_entry_t *)dmpdp)[i]);
- off = pdp - trunc_page(pdp);
- *tmppte = (trunc_page(pdp) & PG_FRAME) | PG_V | PG_RW;
- pmap_update_pg(tmpva);
+ va = PMAP_DIRECT_MAP(pdp);
- va = tmpva + off;
*((pd_entry_t *)va) = dmpd + (i << PAGE_SHIFT);
- *((pd_entry_t *)va) |= PG_RW | PG_V | PG_U;
+ *((pd_entry_t *)va) |= PG_RW | PG_V | PG_U | PG_M;
}
- *tmppte = 0;
-
- DMPDpa = dmpdp;
- kpm->pm_pdir[PDIR_SLOT_DIRECT] = DMPDpa | PG_V | PG_KW | PG_U;
+ kpm->pm_pdir[PDIR_SLOT_DIRECT_NC] = dmpdp | PG_V | PG_KW | PG_U |
+ PG_M;
+
tlbflush();
msgbuf_vaddr = virtual_avail;
@@ -1073,7 +1091,8 @@ pmap_pdp_ctor(void *arg, void *object, int flags)
memset(&pdir[PDIR_SLOT_KERN + npde], 0,
(NTOPLEVEL_PDES - (PDIR_SLOT_KERN + npde)) * sizeof(pd_entry_t));
- pdir[PDIR_SLOT_DIRECT] = DMPDpa | PG_V | PG_KW | PG_U;
+ pdir[PDIR_SLOT_DIRECT] = pmap_kernel()->pm_pdir[PDIR_SLOT_DIRECT];
+ pdir[PDIR_SLOT_DIRECT_NC] = pmap_kernel()->pm_pdir[PDIR_SLOT_DIRECT_NC];
#if VM_MIN_KERNEL_ADDRESS != KERNBASE
pdir[pl4_pi(KERNBASE)] = PDP_BASE[pl4_pi(KERNBASE)];
@@ -1388,6 +1407,12 @@ pmap_extract(struct pmap *pmap, vaddr_t va, paddr_t *pap)
return (TRUE);
}
+ if (pmap == pmap_kernel() && va >= PMAP_DIRECT_BASE_NC &&
+ va < PMAP_DIRECT_END_NC) {
+ *pap = va - PMAP_DIRECT_BASE_NC;
+ return (TRUE);
+ }
+
pmap_map_ptes(pmap, &ptes, &pdes);
if (pmap_pdes_valid(va, pdes, &pde) == FALSE) {
return FALSE;
diff --git a/sys/arch/amd64/include/pmap.h b/sys/arch/amd64/include/pmap.h
index b06e2528580..05a2cd5ae0f 100644
--- a/sys/arch/amd64/include/pmap.h
+++ b/sys/arch/amd64/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.12 2007/05/25 16:22:11 art Exp $ */
+/* $OpenBSD: pmap.h,v 1.13 2007/05/27 08:58:31 art Exp $ */
/* $NetBSD: pmap.h,v 1.1 2003/04/26 18:39:46 fvdl Exp $ */
/*
@@ -157,11 +157,13 @@
#define L4_SLOT_KERNBASE 511
#define L4_SLOT_APTE 510
#define L4_SLOT_DIRECT 509
+#define L4_SLOT_DIRECT_NC 508
#define PDIR_SLOT_KERN L4_SLOT_KERN
#define PDIR_SLOT_PTE L4_SLOT_PTE
#define PDIR_SLOT_APTE L4_SLOT_APTE
#define PDIR_SLOT_DIRECT L4_SLOT_DIRECT
+#define PDIR_SLOT_DIRECT_NC L4_SLOT_DIRECT_NC
/*
* the following defines give the virtual addresses of various MMU
@@ -176,6 +178,8 @@
#define APTE_BASE ((pt_entry_t *) (VA_SIGN_NEG((L4_SLOT_APTE * NBPD_L4))))
#define PMAP_DIRECT_BASE (VA_SIGN_NEG((L4_SLOT_DIRECT * NBPD_L4)))
#define PMAP_DIRECT_END (VA_SIGN_NEG(((L4_SLOT_DIRECT + 1) * NBPD_L4)))
+#define PMAP_DIRECT_BASE_NC (VA_SIGN_NEG((L4_SLOT_DIRECT_NC * NBPD_L4)))
+#define PMAP_DIRECT_END_NC (VA_SIGN_NEG(((L4_SLOT_DIRECT_NC + 1) * NBPD_L4)))
#define L1_BASE PTE_BASE
#define AL1_BASE APTE_BASE
@@ -203,6 +207,10 @@
#define NKL3_KIMG_ENTRIES 1
#define NKL2_KIMG_ENTRIES 8
+#define NDML4_ENTRIES 1
+#define NDML3_ENTRIES 1
+#define NDML2_ENTRIES 4 /* 4GB */
+
/*
* Since kva space is below the kernel in its entirety, we start off
* with zero entries on each level.
@@ -558,6 +566,9 @@ void pmap_ldt_cleanup(struct proc *);
#define PMAP_DIRECT_MAP(pa) ((vaddr_t)PMAP_DIRECT_BASE + pa)
#define PMAP_DIRECT_UNMAP(va) ((paddr_t)va - PMAP_DIRECT_BASE)
+#define PMAP_DIRECT_NC_MAP(pa) ((vaddr_t)PMAP_DIRECT_BASE_NC + pa)
+#define PMAP_DIRECT_NC_UNMAP(va) ((paddr_t)va - PMAP_DIRECT_BASE_NC)
+
#define pmap_map_direct(pg) PMAP_DIRECT_MAP(VM_PAGE_TO_PHYS(pg))
#define pmap_unmap_direct(va) PHYS_TO_VM_PAGE(PMAP_DIRECT_UNMAP(va))