summaryrefslogtreecommitdiff
path: root/sys/arch/mips64
diff options
context:
space:
mode:
authorVisa Hankala <visa@cvs.openbsd.org>2015-12-31 04:25:52 +0000
committerVisa Hankala <visa@cvs.openbsd.org>2015-12-31 04:25:52 +0000
commit78509a3767dac429726f769962941c1959983548 (patch)
tree82afb1e675c2a78cf0176a63aa371cd92fe5645c /sys/arch/mips64
parent2d24f6ab69fa3a12156a33caeff5860b974fe0c3 (diff)
Protect mips64 pmap and pvlist structs with a mutex to make pmap
operations MP-safe. Tested on octeon and sgi (IP27, IP30). Feedback from kettenis@ long ago
Diffstat (limited to 'sys/arch/mips64')
-rw-r--r--sys/arch/mips64/include/pmap.h22
-rw-r--r--sys/arch/mips64/mips64/pmap.c268
-rw-r--r--sys/arch/mips64/mips64/trap.c32
3 files changed, 254 insertions, 68 deletions
diff --git a/sys/arch/mips64/include/pmap.h b/sys/arch/mips64/include/pmap.h
index fa47de3f1d3..b50460d7f06 100644
--- a/sys/arch/mips64/include/pmap.h
+++ b/sys/arch/mips64/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.38 2015/02/15 21:34:33 miod Exp $ */
+/* $OpenBSD: pmap.h,v 1.39 2015/12/31 04:25:51 visa Exp $ */
/*
* Copyright (c) 1987 Carnegie-Mellon University
@@ -38,6 +38,8 @@
#ifndef _MIPS64_PMAP_H_
#define _MIPS64_PMAP_H_
+#include <sys/mutex.h>
+
#ifdef _KERNEL
#include <machine/pte.h>
@@ -97,6 +99,20 @@
/* number of segments entries */
#define PMAP_SEGTABSIZE (PMAP_L2SIZE / sizeof(void *))
+/*
+ * Concurrency control
+ *
+ * - struct pmap:
+ * - pm_dir_mtx must be held when adding or removing a mapping in the pmap.
+ * - pm_pte_mtx must be held when modifying the page directory or page table
+ * entries. In addition, the lock protects the subfields of field pm_stats.
+ *
+ * - struct vm_page_md:
+ * - pv_mtx protects the physical-to-virtual list.
+ *
+ * The order for locking is pm_dir_mtx -> pv_mtx -> pm_pte_mtx.
+ */
+
struct segtab {
pt_entry_t *seg_tab[PMAP_SEGTABSIZE];
};
@@ -110,6 +126,8 @@ struct pmap_asid_info {
* Machine dependent pmap structure.
*/
typedef struct pmap {
+ struct mutex pm_dir_mtx; /* page directory lock */
+ struct mutex pm_pte_mtx; /* page table entry lock */
int pm_count; /* pmap reference count */
struct pmap_statistics pm_stats; /* pmap statistics */
struct segtab *pm_segtab; /* pointers to pages of PTEs */
@@ -212,11 +230,13 @@ typedef struct pv_entry {
} *pv_entry_t;
struct vm_page_md {
+ struct mutex pv_mtx; /* pv list lock */
struct pv_entry pv_ent; /* pv list of this seg */
};
#define VM_MDPAGE_INIT(pg) \
do { \
+ mtx_init(&(pg)->mdpage.pv_mtx, IPL_VM); \
(pg)->mdpage.pv_ent.pv_next = NULL; \
(pg)->mdpage.pv_ent.pv_pmap = NULL; \
(pg)->mdpage.pv_ent.pv_va = 0; \
diff --git a/sys/arch/mips64/mips64/pmap.c b/sys/arch/mips64/mips64/pmap.c
index 83722d2a146..c3d58b86b69 100644
--- a/sys/arch/mips64/mips64/pmap.c
+++ b/sys/arch/mips64/mips64/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.81 2015/08/11 13:15:36 visa Exp $ */
+/* $OpenBSD: pmap.c,v 1.82 2015/12/31 04:25:51 visa Exp $ */
/*
* Copyright (c) 2001-2004 Opsycon AB (www.opsycon.se / www.opsycon.com)
@@ -59,6 +59,8 @@ struct pool pmap_pg_pool;
int pmap_pv_lowat = PMAP_PV_LOWAT;
uint pmap_alloc_tlbpid(struct proc *);
+void pmap_do_remove(pmap_t, vaddr_t, vaddr_t);
+void pmap_do_page_cache(vm_page_t, u_int);
int pmap_enter_pv(pmap_t, vaddr_t, vm_page_t, pt_entry_t *);
void pmap_remove_pv(pmap_t, vaddr_t, paddr_t);
void *pmap_pg_alloc(struct pool *, int, int *);
@@ -165,6 +167,8 @@ pmap_invalidate_kernel_page(vaddr_t va)
struct cpu_info *ci;
CPU_INFO_ITERATOR cii;
+ MUTEX_ASSERT_LOCKED(&pmap_kernel()->pm_pte_mtx);
+
CPU_INFO_FOREACH(cii, ci)
if (cpuset_isset(&cpus_running, ci))
cpumask |= 1 << ci->ci_cpuid;
@@ -189,6 +193,8 @@ pmap_invalidate_user_page(pmap_t pmap, vaddr_t va)
struct cpu_info *ci;
CPU_INFO_ITERATOR cii;
+ MUTEX_ASSERT_LOCKED(&pmap->pm_pte_mtx);
+
CPU_INFO_FOREACH(cii, ci)
if (cpuset_isset(&cpus_running, ci)) {
unsigned int i = ci->ci_cpuid;
@@ -244,6 +250,8 @@ pmap_update_kernel_page(vaddr_t va, pt_entry_t entry)
struct cpu_info *ci;
CPU_INFO_ITERATOR cii;
+ MUTEX_ASSERT_LOCKED(&pmap_kernel()->pm_pte_mtx);
+
CPU_INFO_FOREACH(cii, ci)
if (cpuset_isset(&cpus_running, ci))
cpumask |= 1 << ci->ci_cpuid;
@@ -271,6 +279,8 @@ pmap_update_user_page(pmap_t pmap, vaddr_t va, pt_entry_t entry)
struct cpu_info *ci;
CPU_INFO_ITERATOR cii;
+ MUTEX_ASSERT_LOCKED(&pmap->pm_pte_mtx);
+
CPU_INFO_FOREACH(cii, ci)
if (cpuset_isset(&cpus_running, ci)) {
unsigned int i = ci->ci_cpuid;
@@ -376,7 +386,11 @@ pmap_bootstrap(void)
pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0,"pvpl", NULL);
pool_init(&pmap_pg_pool, PMAP_L2SIZE, PMAP_L2SIZE, 0, 0, "pmappgpl",
&pmap_pg_allocator);
+ pool_setipl(&pmap_pv_pool, IPL_VM);
+ pool_setipl(&pmap_pg_pool, IPL_VM);
+ mtx_init(&pmap_kernel()->pm_dir_mtx, IPL_VM);
+ mtx_init(&pmap_kernel()->pm_pte_mtx, IPL_VM);
pmap_kernel()->pm_count = 1;
#ifndef CPU_R8000
@@ -499,17 +513,17 @@ pmap_t
pmap_create()
{
pmap_t pmap;
- int i, s;
+ int i;
extern struct vmspace vmspace0;
extern struct user *proc0paddr;
DPRINTF(PDB_FOLLOW|PDB_CREATE, ("pmap_create()\n"));
- s = splvm();
pmap = pool_get(&pmap_pmap_pool, PR_WAITOK | PR_ZERO);
- splx(s);
+ mtx_init(&pmap->pm_dir_mtx, IPL_VM);
+ mtx_init(&pmap->pm_pte_mtx, IPL_VM);
pmap->pm_count = 1;
pmap->pm_segtab = (struct segtab *)pool_get(&pmap_pg_pool,
@@ -544,11 +558,11 @@ extern struct user *proc0paddr;
void
pmap_destroy(pmap_t pmap)
{
- int s, count;
+ int count;
DPRINTF(PDB_FOLLOW|PDB_CREATE, ("pmap_destroy(%p)\n", pmap));
- count = --pmap->pm_count;
+ count = atomic_dec_int_nv(&pmap->pm_count);
if (count > 0)
return;
@@ -581,9 +595,7 @@ pmap_destroy(pmap_t pmap)
#endif
}
- s = splvm();
pool_put(&pmap_pmap_pool, pmap);
- splx(s);
}
/*
@@ -595,7 +607,7 @@ pmap_reference(pmap_t pmap)
DPRINTF(PDB_FOLLOW, ("pmap_reference(%p)\n", pmap));
- pmap->pm_count++;
+ atomic_inc_int(&pmap->pm_count);
}
/*
@@ -633,19 +645,20 @@ pmap_deactivate(struct proc *p)
* rounded to the page size.
*/
void
-pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
+pmap_do_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
{
vaddr_t nssva;
pt_entry_t *pte, entry;
paddr_t pa;
struct cpu_info *ci = curcpu();
+ MUTEX_ASSERT_LOCKED(&pmap->pm_dir_mtx);
+
DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
("pmap_remove(%p, %p, %p)\n", pmap, (void *)sva, (void *)eva));
stat_count(remove_stats.calls);
- KERNEL_LOCK();
if (pmap == pmap_kernel()) {
/* remove entries from kernel pmap */
#ifdef DIAGNOSTIC
@@ -654,6 +667,7 @@ pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
panic("pmap_remove(%p, %p): not in range",
(void *)sva, (void *)eva);
#endif
+ mtx_enter(&pmap->pm_pte_mtx);
pte = kvtopte(sva);
for(; sva < eva; sva += PAGE_SIZE, pte++) {
entry = *pte;
@@ -665,15 +679,18 @@ pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
pa = pfn_to_pad(entry);
if ((entry & PG_CACHEMODE) == PG_CACHED)
Mips_HitSyncDCache(ci, sva, PAGE_SIZE);
- pmap_remove_pv(pmap, sva, pa);
*pte = PG_NV | PG_G;
/*
* Flush the TLB for the given address.
*/
pmap_invalidate_kernel_page(sva);
stat_count(remove_stats.flushes);
+
+ mtx_leave(&pmap->pm_pte_mtx);
+ pmap_remove_pv(pmap, sva, pa);
+ mtx_enter(&pmap->pm_pte_mtx);
}
- KERNEL_UNLOCK();
+ mtx_leave(&pmap->pm_pte_mtx);
return;
}
@@ -681,6 +698,7 @@ pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
if (eva > VM_MAXUSER_ADDRESS)
panic("pmap_remove: uva not in range");
#endif
+ mtx_enter(&pmap->pm_pte_mtx);
while (sva < eva) {
nssva = mips_trunc_seg(sva) + NBSEG;
if (nssva == 0 || nssva > eva)
@@ -707,16 +725,27 @@ pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
pa = pfn_to_pad(entry);
if ((entry & PG_CACHEMODE) == PG_CACHED)
Mips_SyncDCachePage(ci, sva, pa);
- pmap_remove_pv(pmap, sva, pa);
*pte = PG_NV;
/*
* Flush the TLB for the given address.
*/
pmap_invalidate_user_page(pmap, sva);
stat_count(remove_stats.flushes);
+
+ mtx_leave(&pmap->pm_pte_mtx);
+ pmap_remove_pv(pmap, sva, pa);
+ mtx_enter(&pmap->pm_pte_mtx);
}
}
- KERNEL_UNLOCK();
+ mtx_leave(&pmap->pm_pte_mtx);
+}
+
+void
+pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
+{
+ mtx_enter(&pmap->pm_dir_mtx);
+ pmap_do_remove(pmap, sva, eva);
+ mtx_leave(&pmap->pm_dir_mtx);
}
/*
@@ -727,9 +756,9 @@ pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
void
pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
{
+ pmap_t pmap;
pv_entry_t pv;
vaddr_t va;
- int s;
if (prot == PROT_NONE) {
DPRINTF(PDB_REMOVE, ("pmap_page_protect(%p, 0x%x)\n", pg, prot));
@@ -746,30 +775,62 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
/* copy_on_write */
case PROT_READ:
case PROT_READ | PROT_EXEC:
+ mtx_enter(&pg->mdpage.pv_mtx);
pv = pg_to_pvh(pg);
- s = splvm();
/*
* Loop over all current mappings setting/clearing as apropos.
*/
if (pv->pv_pmap != NULL) {
for (; pv; pv = pv->pv_next) {
va = pv->pv_va;
+ /*
+ * It is safe to leave the page locked because
+ * pmap_protect() will only change PTEs due to
+ * the PROT_READ bit.
+ */
pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE,
prot);
}
}
- splx(s);
+ mtx_leave(&pg->mdpage.pv_mtx);
break;
/* remove_all */
default:
- pv = pg_to_pvh(pg);
- s = splvm();
- while (pv->pv_pmap != NULL) {
+ mtx_enter(&pg->mdpage.pv_mtx);
+ while ((pv = pg_to_pvh(pg))->pv_pmap != NULL) {
+ pmap = pv->pv_pmap;
va = pv->pv_va;
- pmap_remove(pv->pv_pmap, va, va + PAGE_SIZE);
+
+ /*
+ * The PV list lock has to be released for
+ * pmap_do_remove(). The lock ordering prevents locking
+ * the pmap before the release, so another CPU might
+ * remove or replace the page at the virtual address in
+ * the pmap. Continue with this PV entry only if the
+ * list head is unchanged after reacquiring the locks.
+ */
+ pmap_reference(pmap);
+ mtx_leave(&pg->mdpage.pv_mtx);
+ mtx_enter(&pmap->pm_dir_mtx);
+ mtx_enter(&pg->mdpage.pv_mtx);
+ if (pg_to_pvh(pg)->pv_pmap != pmap ||
+ pg_to_pvh(pg)->pv_va != va) {
+ mtx_leave(&pg->mdpage.pv_mtx);
+ mtx_leave(&pmap->pm_dir_mtx);
+ pmap_destroy(pmap);
+ mtx_enter(&pg->mdpage.pv_mtx);
+ continue;
+ }
+ mtx_leave(&pg->mdpage.pv_mtx);
+
+ pmap_do_remove(pmap, va, va + PAGE_SIZE);
+
+ mtx_leave(&pmap->pm_dir_mtx);
+ pmap_destroy(pmap);
+ mtx_enter(&pg->mdpage.pv_mtx);
}
- splx(s);
+ mtx_leave(&pg->mdpage.pv_mtx);
}
}
@@ -796,6 +857,7 @@ pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
p = (prot & PROT_WRITE) ? PG_M : PG_RO;
if (pmap == pmap_kernel()) {
+ mtx_enter(&pmap->pm_pte_mtx);
/*
* Change entries in kernel pmap.
* This will trap if the page is writeable (in order to set
@@ -825,6 +887,7 @@ pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
*/
pmap_update_kernel_page(sva, entry);
}
+ mtx_leave(&pmap->pm_pte_mtx);
return;
}
@@ -832,6 +895,7 @@ pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
if (eva > VM_MAXUSER_ADDRESS)
panic("pmap_protect: uva not in range");
#endif
+ mtx_enter(&pmap->pm_pte_mtx);
while (sva < eva) {
nssva = mips_trunc_seg(sva) + NBSEG;
if (nssva == 0 || nssva > eva)
@@ -861,6 +925,7 @@ pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
pmap_update_user_page(pmap, sva, entry);
}
}
+ mtx_leave(&pmap->pm_pte_mtx);
}
/*
@@ -898,9 +963,13 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
}
#endif
+ mtx_enter(&pmap->pm_dir_mtx);
+
pg = PHYS_TO_VM_PAGE(pa);
if (pg != NULL) {
+ mtx_enter(&pg->mdpage.pv_mtx);
+
if (!(prot & PROT_WRITE)) {
npte = PG_ROPAGE;
} else {
@@ -945,18 +1014,33 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
}
if (pmap == pmap_kernel()) {
+ mtx_enter(&pmap->pm_pte_mtx);
if (pg != NULL) {
if (pmap_enter_pv(pmap, va, pg, &npte) != 0) {
- if (flags & PMAP_CANFAIL)
+ if (flags & PMAP_CANFAIL) {
+ mtx_leave(&pmap->pm_pte_mtx);
+ mtx_leave(&pg->mdpage.pv_mtx);
+ mtx_leave(&pmap->pm_dir_mtx);
return ENOMEM;
+ }
panic("pmap_enter: pmap_enter_pv() failed");
}
}
pte = kvtopte(va);
if ((*pte & PG_V) && pa != pfn_to_pad(*pte)) {
- pmap_remove(pmap, va, va + PAGE_SIZE);
+ mtx_leave(&pmap->pm_pte_mtx);
+ if (pg != NULL)
+ mtx_leave(&pg->mdpage.pv_mtx);
+
+ pmap_do_remove(pmap, va, va + PAGE_SIZE);
stat_count(enter_stats.mchange);
+
+ if (pg != NULL)
+ mtx_enter(&pg->mdpage.pv_mtx);
+ mtx_enter(&pmap->pm_pte_mtx);
+
+ KASSERT((*pte & PG_V) == 0);
}
if ((*pte & PG_V) == 0) {
pmap->pm_stats.resident_count++;
@@ -977,12 +1061,18 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
*/
*pte = npte;
pmap_update_kernel_page(va, npte);
+
+ mtx_leave(&pmap->pm_pte_mtx);
+ if (pg != NULL)
+ mtx_leave(&pg->mdpage.pv_mtx);
+ mtx_leave(&pmap->pm_dir_mtx);
return 0;
}
/*
* User space mapping. Do table build.
*/
+ mtx_enter(&pmap->pm_pte_mtx);
if ((pte = pmap_segmap(pmap, va)) == NULL) {
unsigned int wflags = PR_WAITOK | PR_ZERO;
@@ -990,16 +1080,25 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
wflags |= PR_LIMITFAIL;
pte = (pt_entry_t *)pool_get(&pmap_pg_pool, wflags);
- if (pte == NULL)
+ if (pte == NULL) {
+ mtx_leave(&pmap->pm_pte_mtx);
+ if (pg != NULL)
+ mtx_leave(&pg->mdpage.pv_mtx);
+ mtx_leave(&pmap->pm_dir_mtx);
return ENOMEM; /* can only happen if PMAP_CANFAIL */
+ }
pmap_segmap(pmap, va) = pte;
}
if (pg != NULL) {
if (pmap_enter_pv(pmap, va, pg, &npte) != 0) {
- if (flags & PMAP_CANFAIL)
+ if (flags & PMAP_CANFAIL) {
+ mtx_leave(&pmap->pm_pte_mtx);
+ mtx_leave(&pg->mdpage.pv_mtx);
+ mtx_leave(&pmap->pm_dir_mtx);
return ENOMEM;
+ }
panic("pmap_enter: pmap_enter_pv() failed");
}
}
@@ -1012,8 +1111,18 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
* MIPS pages in a OpenBSD page.
*/
if ((*pte & PG_V) && pa != pfn_to_pad(*pte)) {
- pmap_remove(pmap, va, va + PAGE_SIZE);
+ mtx_leave(&pmap->pm_pte_mtx);
+ if (pg != NULL)
+ mtx_leave(&pg->mdpage.pv_mtx);
+
+ pmap_do_remove(pmap, va, va + PAGE_SIZE);
stat_count(enter_stats.mchange);
+
+ if (pg != NULL)
+ mtx_enter(&pg->mdpage.pv_mtx);
+ mtx_enter(&pmap->pm_pte_mtx);
+
+ KASSERT((*pte & PG_V) == 0);
}
if ((*pte & PG_V) == 0) {
pmap->pm_stats.resident_count++;
@@ -1065,6 +1174,10 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
if (pg != NULL && (prot & PROT_EXEC))
Mips_InvalidateICache(ci, va, PAGE_SIZE);
+ mtx_leave(&pmap->pm_pte_mtx);
+ if (pg != NULL)
+ mtx_leave(&pg->mdpage.pv_mtx);
+ mtx_leave(&pmap->pm_dir_mtx);
return 0;
}
@@ -1087,6 +1200,8 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
npte |= PG_RWPAGE;
else
npte |= PG_ROPAGE;
+ mtx_enter(&pmap_kernel()->pm_dir_mtx);
+ mtx_enter(&pmap_kernel()->pm_pte_mtx);
pte = kvtopte(va);
if ((*pte & PG_V) == 0) {
pmap_kernel()->pm_stats.resident_count++;
@@ -1097,6 +1212,8 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
}
*pte = npte;
pmap_update_kernel_page(va, npte);
+ mtx_leave(&pmap_kernel()->pm_pte_mtx);
+ mtx_leave(&pmap_kernel()->pm_dir_mtx);
}
/*
@@ -1120,6 +1237,8 @@ pmap_kremove(vaddr_t va, vsize_t len)
eva >= VM_MAX_KERNEL_ADDRESS || eva < va)
panic("pmap_kremove: va %p len %lx", (void *)va, len);
#endif
+ mtx_enter(&pmap_kernel()->pm_dir_mtx);
+ mtx_enter(&pmap_kernel()->pm_pte_mtx);
pte = kvtopte(va);
for (; va < eva; va += PAGE_SIZE, pte++) {
entry = *pte;
@@ -1132,6 +1251,8 @@ pmap_kremove(vaddr_t va, vsize_t len)
pmap_kernel()->pm_stats.wired_count--;
pmap_kernel()->pm_stats.resident_count--;
}
+ mtx_leave(&pmap_kernel()->pm_pte_mtx);
+ mtx_leave(&pmap_kernel()->pm_dir_mtx);
}
void
@@ -1139,6 +1260,7 @@ pmap_unwire(pmap_t pmap, vaddr_t va)
{
pt_entry_t *pte;
+ mtx_enter(&pmap->pm_pte_mtx);
if (pmap == pmap_kernel())
pte = kvtopte(va);
else {
@@ -1153,6 +1275,7 @@ pmap_unwire(pmap_t pmap, vaddr_t va)
pmap->pm_stats.wired_count--;
}
}
+ mtx_leave(&pmap->pm_pte_mtx);
}
/*
@@ -1185,13 +1308,16 @@ pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
va >= VM_MAX_KERNEL_ADDRESS)
panic("pmap_extract(%p, %p)", pmap, (void *)va);
#endif
+ mtx_enter(&pmap->pm_pte_mtx);
pte = kvtopte(va);
if (*pte & PG_V)
pa = pfn_to_pad(*pte) | (va & PAGE_MASK);
else
rv = FALSE;
+ mtx_leave(&pmap->pm_pte_mtx);
}
} else {
+ mtx_enter(&pmap->pm_pte_mtx);
if (!(pte = pmap_segmap(pmap, va)))
rv = FALSE;
else {
@@ -1201,6 +1327,7 @@ pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
else
rv = FALSE;
}
+ mtx_leave(&pmap->pm_pte_mtx);
}
if (rv != FALSE)
*pap = pa;
@@ -1259,14 +1386,17 @@ pmap_zero_page(struct vm_page *pg)
DPRINTF(PDB_FOLLOW, ("pmap_zero_page(%p)\n", (void *)phys));
va = (vaddr_t)PHYS_TO_XKPHYS(phys, CCA_CACHED);
- pv = pg_to_pvh(pg);
+ mtx_enter(&pg->mdpage.pv_mtx);
if (pg->pg_flags & PGF_UNCACHED)
df = 1;
else if (pg->pg_flags & PGF_CACHED) {
+ pv = pg_to_pvh(pg);
df = ((pv->pv_va ^ va) & cache_valias_mask) != 0;
if (df)
Mips_SyncDCachePage(ci, pv->pv_va, phys);
}
+ mtx_leave(&pg->mdpage.pv_mtx);
+
mem_zero_page(va);
if (df || cache_valias_mask != 0)
Mips_HitSyncDCache(ci, va, PAGE_SIZE);
@@ -1301,6 +1431,7 @@ pmap_copy_page(struct vm_page *srcpg, struct vm_page *dstpg)
DPRINTF(PDB_FOLLOW,
("pmap_copy_page(%p, %p)\n", (void *)src, (void *)dst));
+ mtx_enter(&srcpg->mdpage.pv_mtx);
pv = pg_to_pvh(srcpg);
if (srcpg->pg_flags & PGF_UNCACHED)
sf = 1;
@@ -1309,6 +1440,9 @@ pmap_copy_page(struct vm_page *srcpg, struct vm_page *dstpg)
if (sf)
Mips_SyncDCachePage(ci, pv->pv_va, src);
}
+ mtx_leave(&srcpg->mdpage.pv_mtx);
+
+ mtx_enter(&dstpg->mdpage.pv_mtx);
pv = pg_to_pvh(dstpg);
if (dstpg->pg_flags & PGF_UNCACHED)
df = 1;
@@ -1317,6 +1451,7 @@ pmap_copy_page(struct vm_page *srcpg, struct vm_page *dstpg)
if (df)
Mips_SyncDCachePage(ci, pv->pv_va, dst);
}
+ mtx_leave(&dstpg->mdpage.pv_mtx);
memcpy((void *)d, (void *)s, PAGE_SIZE);
@@ -1342,22 +1477,23 @@ pmap_clear_modify(struct vm_page *pg)
pv_entry_t pv;
pt_entry_t *pte, entry;
boolean_t rv = FALSE;
- int s;
paddr_t pa;
struct cpu_info *ci = curcpu();
DPRINTF(PDB_FOLLOW,
("pmap_clear_modify(%p)\n", (void *)VM_PAGE_TO_PHYS(pg)));
- pa = VM_PAGE_TO_PHYS(pg);
- pv = pg_to_pvh(pg);
- s = splvm();
+ mtx_enter(&pg->mdpage.pv_mtx);
+
if (pg->pg_flags & PGF_ATTR_MOD) {
atomic_clearbits_int(&pg->pg_flags, PGF_ATTR_MOD);
rv = TRUE;
}
- for (; pv != NULL; pv = pv->pv_next) {
+ pa = VM_PAGE_TO_PHYS(pg);
+ pv = pg_to_pvh(pg);
+ for (; pv != NULL && pv->pv_pmap != NULL; pv = pv->pv_next) {
+ mtx_enter(&pv->pv_pmap->pm_pte_mtx);
if (pv->pv_pmap == pmap_kernel()) {
#ifdef DIAGNOSTIC
if (pv->pv_va < VM_MIN_KERNEL_ADDRESS ||
@@ -1376,7 +1512,7 @@ pmap_clear_modify(struct vm_page *pg)
*pte = entry;
pmap_update_kernel_page(pv->pv_va, entry);
}
- } else if (pv->pv_pmap != NULL) {
+ } else {
if ((pte = pmap_segmap(pv->pv_pmap, pv->pv_va)) == NULL)
continue;
pte += uvtopte(pv->pv_va);
@@ -1390,8 +1526,10 @@ pmap_clear_modify(struct vm_page *pg)
pmap_update_user_page(pv->pv_pmap, pv->pv_va, entry);
}
}
+ mtx_leave(&pv->pv_pmap->pm_pte_mtx);
}
- splx(s);
+
+ mtx_leave(&pg->mdpage.pv_mtx);
return rv;
}
@@ -1457,26 +1595,26 @@ pmap_is_page_ro(pmap_t pmap, vaddr_t va, pt_entry_t entry)
return ((entry & PG_RO) != 0);
}
-
/*
* Walk the PV tree for a physical page and change all its
* mappings to cached or uncached.
*/
void
-pmap_page_cache(vm_page_t pg, u_int mode)
+pmap_do_page_cache(vm_page_t pg, u_int mode)
{
pv_entry_t pv;
pt_entry_t *pte, entry;
pt_entry_t newmode;
- int s;
+
+ MUTEX_ASSERT_LOCKED(&pg->mdpage.pv_mtx);
DPRINTF(PDB_FOLLOW|PDB_ENTER, ("pmap_page_cache(%p)\n", pg));
newmode = mode & PGF_CACHED ? PG_CACHED : PG_UNCACHED;
- pv = pg_to_pvh(pg);
- s = splvm();
- for (; pv != NULL; pv = pv->pv_next) {
+ pv = pg_to_pvh(pg);
+ for (; pv != NULL && pv->pv_pmap != NULL; pv = pv->pv_next) {
+ mtx_enter(&pv->pv_pmap->pm_pte_mtx);
if (pv->pv_pmap == pmap_kernel()) {
#ifdef DIAGNOSTIC
if (pv->pv_va < VM_MIN_KERNEL_ADDRESS ||
@@ -1490,7 +1628,7 @@ pmap_page_cache(vm_page_t pg, u_int mode)
*pte = entry;
pmap_update_kernel_page(pv->pv_va, entry);
}
- } else if (pv->pv_pmap != NULL) {
+ } else {
if ((pte = pmap_segmap(pv->pv_pmap, pv->pv_va))) {
pte += uvtopte(pv->pv_va);
entry = *pte;
@@ -1501,10 +1639,18 @@ pmap_page_cache(vm_page_t pg, u_int mode)
}
}
}
+ mtx_leave(&pv->pv_pmap->pm_pte_mtx);
}
atomic_clearbits_int(&pg->pg_flags, PGF_CACHED | PGF_UNCACHED);
atomic_setbits_int(&pg->pg_flags, mode);
- splx(s);
+}
+
+void
+pmap_page_cache(vm_page_t pg, u_int mode)
+{
+ mtx_enter(&pg->mdpage.pv_mtx);
+ pmap_do_page_cache(pg, mode);
+ mtx_leave(&pg->mdpage.pv_mtx);
}
/*
@@ -1564,11 +1710,13 @@ int
pmap_enter_pv(pmap_t pmap, vaddr_t va, vm_page_t pg, pt_entry_t *npte)
{
pv_entry_t pv, npv;
- int s;
+
+ MUTEX_ASSERT_LOCKED(&pmap->pm_dir_mtx);
+ MUTEX_ASSERT_LOCKED(&pg->mdpage.pv_mtx);
+ MUTEX_ASSERT_LOCKED(&pmap->pm_pte_mtx);
pv = pg_to_pvh(pg);
- s = splvm();
if (pv->pv_pmap == NULL) {
/*
* No entries yet, use header as the first entry
@@ -1608,10 +1756,8 @@ pmap_enter_pv(pmap_t pmap, vaddr_t va, vm_page_t pg, pt_entry_t *npte)
* we are only changing the protection bits.
*/
for (npv = pv; npv; npv = npv->pv_next) {
- if (pmap == npv->pv_pmap && va == npv->pv_va) {
- splx(s);
+ if (pmap == npv->pv_pmap && va == npv->pv_va)
return 0;
- }
}
DPRINTF(PDB_PVENTRY,
@@ -1619,10 +1765,8 @@ pmap_enter_pv(pmap_t pmap, vaddr_t va, vm_page_t pg, pt_entry_t *npte)
pmap, (void *)va, (void *)VM_PAGE_TO_PHYS(pg)));
npv = pmap_pv_alloc();
- if (npv == NULL) {
- splx(s);
+ if (npv == NULL)
return ENOMEM;
- }
if ((*npte & PG_CACHED) != 0 &&
(pg->pg_flags & PGF_CACHED) != 0 && cache_valias_mask != 0) {
@@ -1641,7 +1785,10 @@ pmap_enter_pv(pmap_t pmap, vaddr_t va, vm_page_t pg, pt_entry_t *npte)
__func__, (void *)VM_PAGE_TO_PHYS(pg),
(void *)pv->pv_va, (void *)va);
#endif
- pmap_page_cache(pg, 0);
+ mtx_leave(&pmap->pm_pte_mtx);
+ pmap_do_page_cache(pg, 0);
+ mtx_enter(&pmap->pm_pte_mtx);
+
Mips_SyncDCachePage(curcpu(), pv->pv_va,
VM_PAGE_TO_PHYS(pg));
*npte = (*npte & ~PG_CACHEMODE) | PG_UNCACHED;
@@ -1657,7 +1804,6 @@ pmap_enter_pv(pmap_t pmap, vaddr_t va, vm_page_t pg, pt_entry_t *npte)
stat_count(enter_stats.secondpv);
}
- splx(s);
return 0;
}
@@ -1669,7 +1815,8 @@ pmap_remove_pv(pmap_t pmap, vaddr_t va, paddr_t pa)
{
pv_entry_t pv, npv;
vm_page_t pg;
- int s;
+
+ MUTEX_ASSERT_LOCKED(&pmap->pm_dir_mtx);
DPRINTF(PDB_FOLLOW|PDB_PVENTRY,
("pmap_remove_pv(%p, %p, %p)\n", pmap, (void *)va, (void *)pa));
@@ -1681,13 +1828,14 @@ pmap_remove_pv(pmap_t pmap, vaddr_t va, paddr_t pa)
if (pg == NULL)
return;
- pv = pg_to_pvh(pg);
- s = splvm();
+ mtx_enter(&pg->mdpage.pv_mtx);
+
/*
* If we are removing the first entry on the list, copy up
* the next entry, if any, and free that pv item since the
* first root item can't be freed. Else walk the list.
*/
+ pv = pg_to_pvh(pg);
if (pmap == pv->pv_pmap && va == pv->pv_va) {
npv = pv->pv_next;
if (npv) {
@@ -1716,6 +1864,7 @@ pmap_remove_pv(pmap_t pmap, vaddr_t va, paddr_t pa)
}
}
+ pv = pg_to_pvh(pg);
if ((pg->pg_flags & (PGF_CACHED | PGF_UNCACHED)) == 0 &&
cache_valias_mask != 0 && pv->pv_pmap != NULL) {
/*
@@ -1723,7 +1872,6 @@ pmap_remove_pv(pmap_t pmap, vaddr_t va, paddr_t pa)
* check if it can be mapped cached again after the current
* entry's removal.
*/
- pv = pg_to_pvh(pg);
va = pv->pv_va;
for (pv = pv->pv_next; pv != NULL; pv = pv->pv_next) {
if (((pv->pv_va ^ va) & cache_valias_mask) != 0)
@@ -1735,11 +1883,11 @@ pmap_remove_pv(pmap_t pmap, vaddr_t va, paddr_t pa)
printf("%s: caching page pa %p, va %p again\n",
__func__, (void *)VM_PAGE_TO_PHYS(pg), (void *)va);
#endif
- pmap_page_cache(pg, PGF_CACHED);
+ pmap_do_page_cache(pg, PGF_CACHED);
}
}
- splx(s);
+ mtx_leave(&pg->mdpage.pv_mtx);
}
/*
diff --git a/sys/arch/mips64/mips64/trap.c b/sys/arch/mips64/mips64/trap.c
index f05a6e7c5ab..82cf8baea4c 100644
--- a/sys/arch/mips64/mips64/trap.c
+++ b/sys/arch/mips64/mips64/trap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: trap.c,v 1.110 2015/09/27 09:11:11 miod Exp $ */
+/* $OpenBSD: trap.c,v 1.111 2015/12/31 04:25:51 visa Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@@ -285,14 +285,24 @@ itsa(struct trap_frame *trapframe, struct cpu_info *ci, struct proc *p,
paddr_t pa;
vm_page_t pg;
+ mtx_enter(&pmap_kernel()->pm_pte_mtx);
pte = kvtopte(trapframe->badvaddr);
entry = *pte;
-#ifdef DIAGNOSTIC
+#ifdef MULTIPROCESSOR
+ /* Another CPU might have changed the mapping. */
+ if (!(entry & PG_V) || (entry & PG_M)) {
+ pmap_update_kernel_page(
+ trapframe->badvaddr & ~PGOFSET, entry);
+ mtx_leave(&pmap_kernel()->pm_pte_mtx);
+ return;
+ }
+#else
if (!(entry & PG_V) || (entry & PG_M))
panic("trap: ktlbmod: invalid pte");
#endif
if (pmap_is_page_ro(pmap_kernel(),
trunc_page(trapframe->badvaddr), entry)) {
+ mtx_leave(&pmap_kernel()->pm_pte_mtx);
/* write to read only page in the kernel */
ftype = PROT_WRITE;
pcb = &p->p_addr->u_pcb;
@@ -300,7 +310,6 @@ itsa(struct trap_frame *trapframe, struct cpu_info *ci, struct proc *p,
}
entry |= PG_M;
*pte = entry;
- KERNEL_LOCK();
pmap_update_kernel_page(trapframe->badvaddr & ~PGOFSET,
entry);
pa = pfn_to_pad(entry);
@@ -308,7 +317,7 @@ itsa(struct trap_frame *trapframe, struct cpu_info *ci, struct proc *p,
if (pg == NULL)
panic("trap: ktlbmod: unmanaged page");
pmap_set_modify(pg);
- KERNEL_UNLOCK();
+ mtx_leave(&pmap_kernel()->pm_pte_mtx);
return;
}
/* FALLTHROUGH */
@@ -320,16 +329,26 @@ itsa(struct trap_frame *trapframe, struct cpu_info *ci, struct proc *p,
vm_page_t pg;
pmap_t pmap = p->p_vmspace->vm_map.pmap;
+ mtx_enter(&pmap->pm_pte_mtx);
if (!(pte = pmap_segmap(pmap, trapframe->badvaddr)))
panic("trap: utlbmod: invalid segmap");
pte += uvtopte(trapframe->badvaddr);
entry = *pte;
-#ifdef DIAGNOSTIC
+#ifdef MULTIPROCESSOR
+ /* Another CPU might have changed the mapping. */
+ if (!(entry & PG_V) || (entry & PG_M)) {
+ pmap_update_user_page(pmap,
+ (trapframe->badvaddr & ~PGOFSET), entry);
+ mtx_leave(&pmap->pm_pte_mtx);
+ return;
+ }
+#else
if (!(entry & PG_V) || (entry & PG_M))
panic("trap: utlbmod: invalid pte");
#endif
if (pmap_is_page_ro(pmap,
trunc_page(trapframe->badvaddr), entry)) {
+ mtx_leave(&pmap->pm_pte_mtx);
/* write to read only page */
ftype = PROT_WRITE;
pcb = &p->p_addr->u_pcb;
@@ -337,7 +356,6 @@ itsa(struct trap_frame *trapframe, struct cpu_info *ci, struct proc *p,
}
entry |= PG_M;
*pte = entry;
- KERNEL_LOCK();
pmap_update_user_page(pmap, (trapframe->badvaddr & ~PGOFSET),
entry);
pa = pfn_to_pad(entry);
@@ -345,7 +363,7 @@ itsa(struct trap_frame *trapframe, struct cpu_info *ci, struct proc *p,
if (pg == NULL)
panic("trap: utlbmod: unmanaged page");
pmap_set_modify(pg);
- KERNEL_UNLOCK();
+ mtx_leave(&pmap->pm_pte_mtx);
return;
}