summaryrefslogtreecommitdiff
path: root/sys/arch/i386
diff options
context:
space:
mode:
authorMartin Pieuchot <mpi@cvs.openbsd.org>2024-11-01 12:07:54 +0000
committerMartin Pieuchot <mpi@cvs.openbsd.org>2024-11-01 12:07:54 +0000
commit93e18076e92891c1ea26dc77ebccc8c79399a83c (patch)
tree5ed9876fcc4b7a70fd073a9a30468a57a8e9fddf /sys/arch/i386
parent41d04d26d4d12db69c47789b746ee28a4c0b7ccc (diff)
Allocate PD for Intel's U-K before transferring mappings to the pae pmap.
Use km_alloc(9) instead of uvm_km_zalloc() for the allocation because the mappings are no longer "lost" when switching over to the pae pmap. Introduce a new function to reduce code duplication involving km_alloc(9). Remove printing some __func__ in panic(9) strings, they are redundant. Tested by sthen@ in a bulk. ok mlarkin@, hshoexer@
Diffstat (limited to 'sys/arch/i386')
-rw-r--r--sys/arch/i386/i386/pmap.c54
-rw-r--r--sys/arch/i386/i386/pmapae.c112
2 files changed, 77 insertions, 89 deletions
diff --git a/sys/arch/i386/i386/pmap.c b/sys/arch/i386/i386/pmap.c
index dc7f8a152d4..aedabeceb63 100644
--- a/sys/arch/i386/i386/pmap.c
+++ b/sys/arch/i386/i386/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.223 2024/04/03 18:43:32 miod Exp $ */
+/* $OpenBSD: pmap.c,v 1.224 2024/11/01 12:07:53 mpi Exp $ */
/* $NetBSD: pmap.c,v 1.91 2000/06/02 17:46:37 thorpej Exp $ */
/*
@@ -885,6 +885,25 @@ pmap_kremove(vaddr_t sva, vsize_t len)
}
/*
+ * Allocate a new PD for Intel's U-K.
+ */
+void
+pmap_alloc_pdir_intel_x86(struct pmap *pmap)
+{
+ vaddr_t va;
+
+ KASSERT(pmap->pm_pdir_intel == 0);
+
+ va = (vaddr_t)km_alloc(NBPG, &kv_any, &kp_zero, &kd_waitok);
+ if (va == 0)
+ panic("kernel_map out of virtual space");
+ pmap->pm_pdir_intel = va;
+ if (!pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_pdir_intel,
+ &pmap->pm_pdirpa_intel))
+ panic("can't locate PD page");
+}
+
+/*
* p m a p i n i t f u n c t i o n s
*
* pmap_bootstrap and pmap_init are called during system startup
@@ -1350,6 +1369,8 @@ pmap_create(void)
pmap->pm_ptphint = NULL;
pmap->pm_hiexec = 0;
pmap->pm_flags = 0;
+ pmap->pm_pdir_intel = 0;
+ pmap->pm_pdirpa_intel = 0;
initcodesegment(&pmap->pm_codeseg);
@@ -1363,7 +1384,7 @@ pmap_pinit_pd_86(struct pmap *pmap)
/* allocate PDP */
pmap->pm_pdir = (vaddr_t)km_alloc(NBPG, &kv_any, &kp_dirty, &kd_waitok);
if (pmap->pm_pdir == 0)
- panic("pmap_pinit_pd_86: kernel_map out of virtual space!");
+ panic("kernel_map out of virtual space");
pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_pdir,
&pmap->pm_pdirpa);
pmap->pm_pdirsize = NBPG;
@@ -1393,15 +1414,7 @@ pmap_pinit_pd_86(struct pmap *pmap)
* execution, one that lacks all kernel mappings.
*/
if (cpu_meltdown) {
- pmap->pm_pdir_intel = (vaddr_t)km_alloc(NBPG, &kv_any, &kp_zero,
- &kd_waitok);
- if (pmap->pm_pdir_intel == 0)
- panic("%s: kernel_map out of virtual space!", __func__);
-
- if (!pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_pdir_intel,
- &pmap->pm_pdirpa_intel))
- panic("%s: unknown PA mapping for meltdown PD",
- __func__);
+ pmap_alloc_pdir_intel_x86(pmap);
/* Copy PDEs from pmap_kernel's U-K view */
bcopy((void *)pmap_kernel()->pm_pdir_intel,
@@ -1411,9 +1424,6 @@ pmap_pinit_pd_86(struct pmap *pmap)
"pdir_intel 0x%lx pdirpa_intel 0x%lx\n",
__func__, pmap, pmap->pm_pdir, pmap->pm_pdirpa,
pmap->pm_pdir_intel, pmap->pm_pdirpa_intel);
- } else {
- pmap->pm_pdir_intel = 0;
- pmap->pm_pdirpa_intel = 0;
}
mtx_enter(&pmaps_lock);
@@ -2509,18 +2519,10 @@ pmap_enter_special_86(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int32_t flags)
/* Must be kernel VA */
if (va < VM_MIN_KERNEL_ADDRESS)
- panic("%s: invalid special mapping va 0x%lx requested",
- __func__, va);
-
- if (!pmap->pm_pdir_intel) {
- pmap->pm_pdir_intel = (vaddr_t)km_alloc(NBPG, &kv_any, &kp_zero,
- &kd_waitok);
- if (pmap->pm_pdir_intel == 0)
- panic("%s: kernel_map out of virtual space!", __func__);
- if (!pmap_extract(pmap, pmap->pm_pdir_intel,
- &pmap->pm_pdirpa_intel))
- panic("%s: can't locate PD page", __func__);
- }
+ panic("invalid special mapping va 0x%lx requested", va);
+
+ if (!pmap->pm_pdir_intel)
+ pmap_alloc_pdir_intel_x86(pmap);
DPRINTF("%s: pm_pdir_intel 0x%x pm_pdirpa_intel 0x%x\n", __func__,
(uint32_t)pmap->pm_pdir_intel, (uint32_t)pmap->pm_pdirpa_intel);
diff --git a/sys/arch/i386/i386/pmapae.c b/sys/arch/i386/i386/pmapae.c
index c88ec8ffebb..b7be0b44e73 100644
--- a/sys/arch/i386/i386/pmapae.c
+++ b/sys/arch/i386/i386/pmapae.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmapae.c,v 1.72 2024/05/30 10:56:24 mpi Exp $ */
+/* $OpenBSD: pmapae.c,v 1.73 2024/11/01 12:07:53 mpi Exp $ */
/*
* Copyright (c) 2006-2008 Michael Shalayeff
@@ -606,6 +606,38 @@ pmap_pte_paddr_pae(vaddr_t va)
}
/*
+ * Allocate a new PD for Intel's U-K.
+ */
+void
+pmap_alloc_pdir_intel_pae(struct pmap *pmap)
+{
+ vaddr_t va;
+ int i;
+
+ KASSERT(pmap->pm_pdir_intel == 0);
+
+ va = (vaddr_t)km_alloc(4 * NBPG, &kv_any, &kp_zero, &kd_waitok);
+ if (va == 0)
+ panic("kernel_map out of virtual space");
+ pmap->pm_pdir_intel = va;
+ if (!pmap_extract(pmap_kernel(), (vaddr_t)&pmap->pm_pdidx_intel,
+ &pmap->pm_pdirpa_intel))
+ panic("can't locate PDPT");
+
+ for (i = 0; i < 4; i++) {
+ pmap->pm_pdidx_intel[i] = 0;
+ if (!pmap_extract(pmap, va + i * NBPG,
+ (paddr_t *)&pmap->pm_pdidx_intel[i]))
+ panic("can't locate PD page");
+
+ pmap->pm_pdidx_intel[i] |= PG_V;
+
+ DPRINTF("%s: pm_pdidx_intel[%d] = 0x%llx\n", __func__,
+ i, pmap->pm_pdidx_intel[i]);
+ }
+}
+
+/*
* Switch over to PAE page tables
*/
void
@@ -615,7 +647,7 @@ pmap_bootstrap_pae(void)
struct pmap *kpm = pmap_kernel();
struct vm_page *ptp;
paddr_t ptaddr;
- u_int32_t bits;
+ u_int32_t bits, *pd = NULL;
vaddr_t va, eva;
pt_entry_t pte;
@@ -640,6 +672,13 @@ pmap_bootstrap_pae(void)
PDE(kpm, PDSLOT_PTE+2) = kpm->pm_pdidx[2] | PG_KW | PG_M | PG_U;
PDE(kpm, PDSLOT_PTE+3) = kpm->pm_pdidx[3] | PG_KW | PG_M | PG_U;
+ /* allocate new special PD before transferring all mappings. */
+ if (kpm->pm_pdir_intel) {
+ pd = (uint32_t *)kpm->pm_pdir_intel;
+ kpm->pm_pdir_intel = kpm->pm_pdirpa_intel = 0;
+ pmap_alloc_pdir_intel_pae(kpm);
+ }
+
/* transfer all kernel mappings over into pae tables */
for (va = KERNBASE, eva = va + (nkpde << PDSHIFT86);
va < eva; va += PAGE_SIZE) {
@@ -679,15 +718,12 @@ pmap_bootstrap_pae(void)
}
/* Transfer special mappings */
- if (kpm->pm_pdir_intel) {
- uint32_t *pd, *ptp;
+ if (pd) {
+ uint32_t *ptp;
uint32_t l1idx, l2idx;
paddr_t npa;
struct vm_page *ptppg;
- pd = (uint32_t *)kpm->pm_pdir_intel;
- kpm->pm_pdir_intel = kpm->pm_pdirpa_intel = 0;
-
for (va = KERNBASE, eva = va + (nkpde << PDSHIFT86); va < eva;
va += PAGE_SIZE) {
l1idx = ((va & PT_MASK86) >> PGSHIFT);
@@ -939,7 +975,7 @@ pmap_pinit_pd_pae(struct pmap *pmap)
pmap->pm_pdir = (vaddr_t)km_alloc(4 * NBPG, &kv_any, &kp_dirty,
&kd_waitok);
if (pmap->pm_pdir == 0)
- panic("pmap_pinit_pd_pae: kernel_map out of virtual space!");
+ panic("kernel_map out of virtual space");
/* page index is in the pmap! */
pmap_extract(pmap_kernel(), (vaddr_t)pmap, &pmap->pm_pdirpa);
va = (vaddr_t)pmap->pm_pdir;
@@ -988,25 +1024,7 @@ pmap_pinit_pd_pae(struct pmap *pmap)
* execution, one that lacks all kernel mappings.
*/
if (cpu_meltdown) {
- int i;
-
- va = (vaddr_t)km_alloc(4 * NBPG, &kv_any, &kp_zero, &kd_waitok);
- if (va == 0)
- panic("%s: kernel_map out of virtual space!", __func__);
- if (!pmap_extract(pmap_kernel(),
- (vaddr_t)&pmap->pm_pdidx_intel, &pmap->pm_pdirpa_intel))
- panic("%s: can't locate PDPT", __func__);
- pmap->pm_pdir_intel = va;
-
- for (i = 0; i < 4; i++) {
- pmap->pm_pdidx_intel[i] = 0;
- if (!pmap_extract(pmap, va + i * NBPG,
- (paddr_t *)&pmap->pm_pdidx_intel[i]))
- panic("%s: can't locate PD page", __func__);
- pmap->pm_pdidx_intel[i] |= PG_V;
- DPRINTF("%s: pm_pdidx_intel[%d] = 0x%llx\n", __func__,
- i, pmap->pm_pdidx_intel[i]);
- }
+ pmap_alloc_pdir_intel_pae(pmap);
/* Copy PDEs from pmap_kernel's U-K view */
bcopy((void *)pmap_kernel()->pm_pdir_intel,
@@ -1016,9 +1034,6 @@ pmap_pinit_pd_pae(struct pmap *pmap)
"pdir_intel 0x%lx pdirpa_intel 0x%lx\n",
__func__, pmap, pmap->pm_pdir, pmap->pm_pdirpa,
pmap->pm_pdir_intel, pmap->pm_pdirpa_intel);
- } else {
- pmap->pm_pdir_intel = 0;
- pmap->pm_pdirpa_intel = 0;
}
mtx_enter(&pmaps_lock);
@@ -1903,13 +1918,11 @@ void
pmap_enter_special_pae(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int32_t flags)
{
struct pmap *pmap = pmap_kernel();
- struct vm_page *ptppg = NULL, *pdppg;
+ struct vm_page *ptppg = NULL;
pd_entry_t *pd, *ptp;
pt_entry_t *ptes;
uint32_t l2idx, l1idx;
- vaddr_t vapd;
paddr_t npa;
- int i;
/* If CPU is secure, no need to do anything */
if (!cpu_meltdown)
@@ -1917,36 +1930,9 @@ pmap_enter_special_pae(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int32_t flags)
/* Must be kernel VA */
if (va < VM_MIN_KERNEL_ADDRESS)
- panic("%s: invalid special mapping va 0x%lx requested",
- __func__, va);
-
- if (!pmap->pm_pdir_intel) {
- if ((vapd = uvm_km_zalloc(kernel_map, 4 * NBPG)) == 0)
- panic("%s: kernel_map out of virtual space!", __func__);
- pmap->pm_pdir_intel = vapd;
- if (!pmap_extract(pmap, (vaddr_t)&pmap->pm_pdidx_intel,
- &pmap->pm_pdirpa_intel))
- panic("%s: can't locate PDPT", __func__);
-
- for (i = 0; i < 4; i++) {
- pmap->pm_pdidx_intel[i] = 0;
- if (!pmap_extract(pmap, vapd + i*NBPG,
- (paddr_t *)&pmap->pm_pdidx_intel[i]))
- panic("%s: can't locate PD page", __func__);
-
- /* ensure PDPs are wired down XXX hshoexer why? */
- pdppg = PHYS_TO_VM_PAGE(pmap->pm_pdidx_intel[i]);
- if (pdppg == NULL)
- panic("%s: no vm_page for pdidx %d", __func__, i);
- atomic_clearbits_int(&pdppg->pg_flags, PG_BUSY);
- pdppg->wire_count = 1; /* no mappings yet */
-
- pmap->pm_pdidx_intel[i] |= PG_V;
-
- DPRINTF("%s: pm_pdidx_intel[%d] = 0x%llx\n", __func__,
- i, pmap->pm_pdidx_intel[i]);
- }
- }
+ panic("invalid special mapping va 0x%lx requested", va);
+
+ KASSERT(pmap->pm_pdir_intel != 0);
DPRINTF("%s: pm_pdir_intel 0x%x pm_pdirpa_intel 0x%x\n", __func__,
(uint32_t)pmap->pm_pdir_intel, (uint32_t)pmap->pm_pdirpa_intel);