summaryrefslogtreecommitdiff
path: root/sys/arch
diff options
context:
space:
mode:
authorTheo de Raadt <deraadt@cvs.openbsd.org>1995-12-15 13:50:38 +0000
committerTheo de Raadt <deraadt@cvs.openbsd.org>1995-12-15 13:50:38 +0000
commitf5e1bb00af2da73350a4a4d728921024c7f3437a (patch)
treefd9f7a10f8e2442841702015ecff8aadfd556a67 /sys/arch
parent24c128184f2f8f2be8466c441c7ac0abef6fbbe1 (diff)
from netbsd:
Prevent possible race condition in ctx_alloc(). Remove some bogus casts Make pmap_prefer() also return a preferred virtual address when there's no associated physical page
Diffstat (limited to 'sys/arch')
-rw-r--r--sys/arch/sparc/sparc/pmap.c68
1 files changed, 40 insertions, 28 deletions
diff --git a/sys/arch/sparc/sparc/pmap.c b/sys/arch/sparc/sparc/pmap.c
index 38c717eb532..be83605c3ab 100644
--- a/sys/arch/sparc/sparc/pmap.c
+++ b/sys/arch/sparc/sparc/pmap.c
@@ -1079,7 +1079,7 @@ ctx_alloc(pm)
register struct pmap *pm;
{
register union ctxinfo *c;
- register int cnum, i;
+ register int s, cnum, i, doflush;
register struct regmap *rp;
register int gap_start, gap_end;
register unsigned long va;
@@ -1093,10 +1093,11 @@ ctx_alloc(pm)
gap_start = pm->pm_gap_start;
gap_end = pm->pm_gap_end;
+ s = splpmap();
if ((c = ctx_freelist) != NULL) {
ctx_freelist = c->c_nextfree;
cnum = c - ctxinfo;
- setcontext(cnum);
+ doflush = 0;
} else {
if ((ctx_kick += ctx_kickdir) >= ncontext) {
ctx_kick = ncontext - 1;
@@ -1114,17 +1115,20 @@ ctx_alloc(pm)
cnum, c->c_pmap);
#endif
c->c_pmap->pm_ctx = NULL;
- setcontext(cnum);
- if (vactype != VAC_NONE)
- cache_flush_context();
+ doflush = (vactype != VAC_NONE);
if (gap_start < c->c_pmap->pm_gap_start)
gap_start = c->c_pmap->pm_gap_start;
if (gap_end > c->c_pmap->pm_gap_end)
gap_end = c->c_pmap->pm_gap_end;
}
+
+ setcontext(cnum);
c->c_pmap = pm;
pm->pm_ctx = c;
pm->pm_ctxnum = cnum;
+ splx(s);
+ if (doflush)
+ cache_flush_context();
/*
* Write pmap's region (3-level MMU) or segment table into the MMU.
@@ -1408,7 +1412,7 @@ pv_unlink(pv, pm, va)
pv->pv_next = npv->pv_next;
pv->pv_pmap = npv->pv_pmap;
pv->pv_va = npv->pv_va;
- free((caddr_t)npv, M_VMPVENT);
+ free(npv, M_VMPVENT);
} else
pv->pv_pmap = NULL;
} else {
@@ -1422,7 +1426,7 @@ pv_unlink(pv, pm, va)
break;
}
prev->pv_next = npv->pv_next;
- free((caddr_t)npv, M_VMPVENT);
+ free(npv, M_VMPVENT);
}
if (pv->pv_flags & PV_NC) {
/*
@@ -2066,7 +2070,7 @@ pmap_destroy(pm)
simple_unlock(&pm->pm_lock);
if (count == 0) {
pmap_release(pm);
- free((caddr_t)pm, M_VMPMAP);
+ free(pm, M_VMPMAP);
}
}
@@ -2124,7 +2128,7 @@ pmap_release(pm)
}
#endif
if (pm->pm_regstore)
- free((caddr_t)pm->pm_regstore, M_VMPMAP);
+ free(pm->pm_regstore, M_VMPMAP);
}
/*
@@ -2382,10 +2386,10 @@ pmap_rmu(pm, va, endva, vr, vs)
*pte = 0;
}
if ((sp->sg_npte = nleft) == 0) {
- free((caddr_t)pte0, M_VMPMAP);
+ free(pte0, M_VMPMAP);
sp->sg_pte = NULL;
if (--rp->rg_nsegmap == 0) {
- free((caddr_t)rp->rg_segmap, M_VMPMAP);
+ free(rp->rg_segmap, M_VMPMAP);
rp->rg_segmap = NULL;
#ifdef MMU_3L
if (mmu_3l && rp->rg_smeg != reginval) {
@@ -2472,12 +2476,12 @@ if (pm->pm_ctx == NULL) {
setsegmap(vs << SGSHIFT, seginval);
}
#endif
- free((caddr_t)pte0, M_VMPMAP);
+ free(pte0, M_VMPMAP);
sp->sg_pte = NULL;
me_free(pm, pmeg);
if (--rp->rg_nsegmap == 0) {
- free((caddr_t)rp->rg_segmap, M_VMPMAP);
+ free(rp->rg_segmap, M_VMPMAP);
rp->rg_segmap = NULL;
GAP_WIDEN(pm,vr);
@@ -2567,10 +2571,10 @@ pmap_page_protect(pa, prot)
if (nleft) {
sp->sg_pte[VA_VPG(va)] = 0;
} else {
- free((caddr_t)sp->sg_pte, M_VMPMAP);
+ free(sp->sg_pte, M_VMPMAP);
sp->sg_pte = NULL;
if (--rp->rg_nsegmap == 0) {
- free((caddr_t)rp->rg_segmap, M_VMPMAP);
+ free(rp->rg_segmap, M_VMPMAP);
rp->rg_segmap = NULL;
GAP_WIDEN(pm,vr);
#ifdef MMU_3L
@@ -2646,7 +2650,7 @@ pmap_page_protect(pa, prot)
setsegmap(vs << SGSHIFT, seginval);
}
#endif
- free((caddr_t)sp->sg_pte, M_VMPMAP);
+ free(sp->sg_pte, M_VMPMAP);
sp->sg_pte = NULL;
me_free(pm, sp->sg_pmeg);
@@ -2658,7 +2662,7 @@ pmap_page_protect(pa, prot)
region_free(pm, rp->rg_smeg);
}
#endif
- free((caddr_t)rp->rg_segmap, M_VMPMAP);
+ free(rp->rg_segmap, M_VMPMAP);
rp->rg_segmap = NULL;
GAP_WIDEN(pm,vr);
}
@@ -2667,7 +2671,7 @@ pmap_page_protect(pa, prot)
nextpv:
npv = pv->pv_next;
if (pv != pv0)
- free((caddr_t)pv, M_VMPVENT);
+ free(pv, M_VMPVENT);
if ((pv = npv) == NULL)
break;
}
@@ -3133,7 +3137,7 @@ rretry:
sp = (struct segmap *)malloc((u_long)size, M_VMPMAP, M_WAITOK);
if (rp->rg_segmap != NULL) {
printf("pmap_enter: segment filled during sleep\n"); /* can this happen? */
- free((caddr_t)sp, M_VMPMAP);
+ free(sp, M_VMPMAP);
goto rretry;
}
bzero((caddr_t)sp, size);
@@ -3153,7 +3157,7 @@ sretry:
pte = (int *)malloc((u_long)size, M_VMPMAP, M_WAITOK);
if (sp->sg_pte != NULL) {
printf("pmap_enter: pte filled during sleep\n"); /* can this happen? */
- free((caddr_t)pte, M_VMPMAP);
+ free(pte, M_VMPMAP);
goto sretry;
}
#ifdef DEBUG
@@ -3626,23 +3630,31 @@ pmap_prefer(pa, va)
register struct pvlist *pv;
register long m, d;
- if (cputyp == CPU_SUN4M)
- /* does the sun4m have the cache alias problem? */
- return va;
-
m = CACHE_ALIAS_DIST;
+ if (m == 0) /* m=0 => no cache aliasing */
+ return (va);
+
+ if (pa == (vm_offset_t)-1) {
+ /*
+ * Do not consider physical address. Just return
+ * a cache aligned address.
+ */
+ if (VA_INHOLE(va))
+ va = MMU_HOLE_END;
+
+ /* XXX - knowledge about `exec' formats; can we get by without? */
+ va -= USRTEXT;
+ va = (va + m - 1) & ~(m - 1);
+ return (va + USRTEXT);
+ }
if ((pa & (PMAP_TNC & ~PMAP_NC)) || !managed(pa))
return va;
pv = pvhead(pa);
if (pv->pv_pmap == NULL) {
-#if 0
- return ((va + m - 1) & ~(m - 1));
-#else
/* Unusable, tell caller to try another one */
return (vm_offset_t)-1;
-#endif
}
d = (long)(pv->pv_va & (m - 1)) - (long)(va & (m - 1));