summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorMartin Pieuchot <mpi@cvs.openbsd.org>2015-06-05 09:25:22 +0000
committerMartin Pieuchot <mpi@cvs.openbsd.org>2015-06-05 09:25:22 +0000
commitf978333abcdf61ee0d15c79c7f2b54970e3f2593 (patch)
treeaf243b241a8f48207b962cd47d8797fa7a9b6bb6 /sys
parent624f180e920a11182864f91434ec2b4b4b89e129 (diff)
Introduce pmap_pted_ro() a simple wrapper for the 32/64 bits versions
that does not call pmap_vp_lookup(). Carreful readers would have notice the removal of the bits on the virtual address with a page mask, this change allows me to find the 13 years old bug fixed in r1.145. ok kettenis@, deraadt@, dlg@
Diffstat (limited to 'sys')
-rw-r--r--sys/arch/powerpc/powerpc/pmap.c59
1 files changed, 28 insertions, 31 deletions
diff --git a/sys/arch/powerpc/powerpc/pmap.c b/sys/arch/powerpc/powerpc/pmap.c
index ccef94a1497..103e73133c5 100644
--- a/sys/arch/powerpc/powerpc/pmap.c
+++ b/sys/arch/powerpc/powerpc/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.148 2015/06/05 09:18:50 mpi Exp $ */
+/* $OpenBSD: pmap.c,v 1.149 2015/06/05 09:25:21 mpi Exp $ */
/*
* Copyright (c) 2001, 2002, 2007 Dale Rahn.
@@ -132,8 +132,9 @@ static inline void tlbie(vaddr_t ea);
void tlbia(void);
void pmap_attr_save(paddr_t pa, u_int32_t bits);
-void pmap_page_ro64(pmap_t pm, vaddr_t va, vm_prot_t prot);
-void pmap_page_ro32(pmap_t pm, vaddr_t va, vm_prot_t prot);
+void pmap_pted_ro(struct pte_desc *, vm_prot_t);
+void pmap_pted_ro64(struct pte_desc *, vm_prot_t);
+void pmap_pted_ro32(struct pte_desc *, vm_prot_t);
/*
* Some functions are called in real mode and cannot be profiled.
@@ -1990,17 +1991,23 @@ pmap_syncicache_user_virt(pmap_t pm, vaddr_t va)
}
void
-pmap_page_ro64(pmap_t pm, vaddr_t va, vm_prot_t prot)
+pmap_pted_ro(struct pte_desc *pted, vm_prot_t prot)
{
+ if (ppc_proc_is_64b)
+ pmap_pted_ro64(pted, prot);
+ else
+ pmap_pted_ro32(pted, prot);
+}
+
+void
+pmap_pted_ro64(struct pte_desc *pted, vm_prot_t prot)
+{
+ pmap_t pm = pted->pted_pmap;
+ vaddr_t va = pted->pted_va & ~PAGE_MASK;
struct pte_64 *ptp64;
- struct pte_desc *pted;
struct vm_page *pg;
int sr, idx;
- pted = pmap_vp_lookup(pm, va);
- if (pted == NULL || !PTED_VALID(pted))
- return;
-
pg = PHYS_TO_VM_PAGE(pted->p.pted_pte64.pte_lo & PTE_RPGN_64);
if (pg->pg_flags & PG_PMAP_EXE) {
if ((prot & (PROT_WRITE | PROT_EXEC)) == PROT_WRITE) {
@@ -2049,17 +2056,14 @@ pmap_page_ro64(pmap_t pm, vaddr_t va, vm_prot_t prot)
}
void
-pmap_page_ro32(pmap_t pm, vaddr_t va, vm_prot_t prot)
+pmap_pted_ro32(struct pte_desc *pted, vm_prot_t prot)
{
+ pmap_t pm = pted->pted_pmap;
+ vaddr_t va = pted->pted_va & ~PAGE_MASK;
struct pte_32 *ptp32;
- struct pte_desc *pted;
struct vm_page *pg = NULL;
int sr, idx;
- pted = pmap_vp_lookup(pm, va);
- if (pted == NULL || !PTED_VALID(pted))
- return;
-
pg = PHYS_TO_VM_PAGE(pted->p.pted_pte32.pte_lo & PTE_RPGN_32);
if (pg->pg_flags & PG_PMAP_EXE) {
if ((prot & (PROT_WRITE | PROT_EXEC)) == PROT_WRITE) {
@@ -2125,28 +2129,21 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
return;
}
- LIST_FOREACH(pted, &(pg->mdpage.pv_list), pted_pv_list) {
- if (ppc_proc_is_64b)
- pmap_page_ro64(pted->pted_pmap, pted->pted_va, prot);
- else
- pmap_page_ro32(pted->pted_pmap, pted->pted_va, prot);
- }
+ LIST_FOREACH(pted, &(pg->mdpage.pv_list), pted_pv_list)
+ pmap_pted_ro(pted, prot);
}
void
pmap_protect(pmap_t pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
{
if (prot & (PROT_READ | PROT_EXEC)) {
- if (ppc_proc_is_64b) {
- while (sva < eva) {
- pmap_page_ro64(pm, sva, prot);
- sva += PAGE_SIZE;
- }
- } else {
- while (sva < eva) {
- pmap_page_ro32(pm, sva, prot);
- sva += PAGE_SIZE;
- }
+ struct pte_desc *pted;
+
+ while (sva < eva) {
+ pted = pmap_vp_lookup(pm, sva);
+ if (pted && PTED_VALID(pted))
+ pmap_pted_ro(pted, prot);
+ sva += PAGE_SIZE;
}
return;
}