summaryrefslogtreecommitdiff
path: root/sys/arch/amd64
diff options
context:
space:
mode:
authorPhilip Guenther <guenther@cvs.openbsd.org>2018-09-12 06:09:40 +0000
committerPhilip Guenther <guenther@cvs.openbsd.org>2018-09-12 06:09:40 +0000
commit83942f081d5efe5b6e55bcf4d891645c7df2d13b (patch)
tree856a78be6998204ad843fef643ee763be1372ba3 /sys/arch/amd64
parente55f253f4f87913ac139b207d8d23303fe86a63a (diff)
When shooting pages in the KVA range, all pmaps have the page mapped,
not just pmap_kernel() and this CPUs pmap. Meanwhile, when mapping another pmap's tables, order the locking so that we don't need IPIs specific to the temp pmap. tested in snaps for a bit ok mlarkin@
Diffstat (limited to 'sys/arch/amd64')
-rw-r--r--sys/arch/amd64/amd64/pmap.c48
1 files changed, 26 insertions, 22 deletions
diff --git a/sys/arch/amd64/amd64/pmap.c b/sys/arch/amd64/amd64/pmap.c
index 3b13d45d2fa..ded25b56bcf 100644
--- a/sys/arch/amd64/amd64/pmap.c
+++ b/sys/arch/amd64/amd64/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.118 2018/09/09 22:46:54 guenther Exp $ */
+/* $OpenBSD: pmap.c,v 1.119 2018/09/12 06:09:39 guenther Exp $ */
/* $NetBSD: pmap.c,v 1.3 2003/05/08 18:13:13 thorpej Exp $ */
/*
@@ -375,28 +375,28 @@ pmap_sync_flags_pte(struct vm_page *pg, u_long pte)
paddr_t
pmap_map_ptes(struct pmap *pmap)
{
- paddr_t cr3 = rcr3();
+ paddr_t cr3;
KASSERT(pmap->pm_type != PMAP_TYPE_EPT);
/* the kernel's pmap is always accessible */
- if (pmap == pmap_kernel() || pmap->pm_pdirpa == cr3) {
- cr3 = 0;
- } else {
- /*
- * Not sure if we need this, but better be safe.
- * We don't have the current pmap in order to unset its
- * active bit, but this just means that we may receive
- * an unneccessary cross-CPU TLB flush now and then.
- */
- x86_atomic_setbits_u64(&pmap->pm_cpus, (1ULL << cpu_number()));
+ if (pmap == pmap_kernel())
+ return 0;
+ /*
+ * Lock the target map before switching to its page tables to
+ * guarantee other CPUs have finished changing the tables before
+ * we potentially start caching table and TLB entries.
+ */
+ mtx_enter(&pmap->pm_mtx);
+
+ cr3 = rcr3();
+ if (pmap->pm_pdirpa == cr3)
+ cr3 = 0;
+ else {
lcr3(pmap->pm_pdirpa);
}
- if (pmap != pmap_kernel())
- mtx_enter(&pmap->pm_mtx);
-
return cr3;
}
@@ -406,10 +406,8 @@ pmap_unmap_ptes(struct pmap *pmap, paddr_t save_cr3)
if (pmap != pmap_kernel())
mtx_leave(&pmap->pm_mtx);
- if (save_cr3 != 0) {
- x86_atomic_clearbits_u64(&pmap->pm_cpus, (1ULL << cpu_number()));
+ if (save_cr3 != 0)
lcr3(save_cr3);
- }
}
int
@@ -2864,10 +2862,12 @@ pmap_tlb_shootpage(struct pmap *pm, vaddr_t va, int shootself)
CPU_INFO_ITERATOR cii;
long wait = 0;
u_int64_t mask = 0;
+ int is_kva = va >= VM_MIN_KERNEL_ADDRESS;
CPU_INFO_FOREACH(cii, ci) {
- if (ci == self || !pmap_is_active(pm, ci->ci_cpuid) ||
- !(ci->ci_flags & CPUF_RUNNING))
+ if (ci == self || !(ci->ci_flags & CPUF_RUNNING))
+ continue;
+ if (!is_kva && !pmap_is_active(pm, ci->ci_cpuid))
continue;
mask |= (1ULL << ci->ci_cpuid);
wait++;
@@ -2913,11 +2913,13 @@ pmap_tlb_shootrange(struct pmap *pm, vaddr_t sva, vaddr_t eva, int shootself)
CPU_INFO_ITERATOR cii;
long wait = 0;
u_int64_t mask = 0;
+ int is_kva = sva >= VM_MIN_KERNEL_ADDRESS;
vaddr_t va;
CPU_INFO_FOREACH(cii, ci) {
- if (ci == self || !pmap_is_active(pm, ci->ci_cpuid) ||
- !(ci->ci_flags & CPUF_RUNNING))
+ if (ci == self || !(ci->ci_flags & CPUF_RUNNING))
+ continue;
+ if (!is_kva && !pmap_is_active(pm, ci->ci_cpuid))
continue;
mask |= (1ULL << ci->ci_cpuid);
wait++;
@@ -2966,6 +2968,8 @@ pmap_tlb_shoottlb(struct pmap *pm, int shootself)
long wait = 0;
u_int64_t mask = 0;
+ KASSERT(pm != pmap_kernel());
+
CPU_INFO_FOREACH(cii, ci) {
if (ci == self || !pmap_is_active(pm, ci->ci_cpuid) ||
!(ci->ci_flags & CPUF_RUNNING))