diff options
Diffstat (limited to 'sys/uvm')
-rw-r--r-- | sys/uvm/uvm_amap.c | 4 | ||||
-rw-r--r-- | sys/uvm/uvm_anon.c | 6 | ||||
-rw-r--r-- | sys/uvm/uvm_aobj.c | 10 | ||||
-rw-r--r-- | sys/uvm/uvm_device.c | 3 | ||||
-rw-r--r-- | sys/uvm/uvm_extern.h | 57 | ||||
-rw-r--r-- | sys/uvm/uvm_fault.c | 47 | ||||
-rw-r--r-- | sys/uvm/uvm_glue.c | 9 | ||||
-rw-r--r-- | sys/uvm/uvm_init.c | 10 | ||||
-rw-r--r-- | sys/uvm/uvm_km.c | 58 | ||||
-rw-r--r-- | sys/uvm/uvm_map.c | 41 | ||||
-rw-r--r-- | sys/uvm/uvm_mmap.c | 40 | ||||
-rw-r--r-- | sys/uvm/uvm_object.c | 5 | ||||
-rw-r--r-- | sys/uvm/uvm_page.c | 6 | ||||
-rw-r--r-- | sys/uvm/uvm_pager.c | 8 | ||||
-rw-r--r-- | sys/uvm/uvm_pdaemon.c | 12 | ||||
-rw-r--r-- | sys/uvm/uvm_pmap.h | 4 | ||||
-rw-r--r-- | sys/uvm/uvm_unix.c | 10 | ||||
-rw-r--r-- | sys/uvm/uvm_vnode.c | 18 |
18 files changed, 152 insertions, 196 deletions
diff --git a/sys/uvm/uvm_amap.c b/sys/uvm/uvm_amap.c index f020dcb043a..1c258b40c65 100644 --- a/sys/uvm/uvm_amap.c +++ b/sys/uvm/uvm_amap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_amap.c,v 1.54 2014/09/14 14:17:27 jsg Exp $ */ +/* $OpenBSD: uvm_amap.c,v 1.55 2014/11/16 12:31:00 deraadt Exp $ */ /* $NetBSD: uvm_amap.c,v 1.27 2000/11/25 06:27:59 chs Exp $ */ /* @@ -1032,7 +1032,7 @@ amap_add(struct vm_aref *aref, vaddr_t offset, struct vm_anon *anon, if (amap->am_anon[slot]->an_page != NULL && (amap->am_flags & AMAP_SHARED) != 0) { pmap_page_protect(amap->am_anon[slot]->an_page, - VM_PROT_NONE); + PROT_NONE); /* * XXX: suppose page is supposed to be wired somewhere? */ diff --git a/sys/uvm/uvm_anon.c b/sys/uvm/uvm_anon.c index c1b979961b0..72823d7fe8d 100644 --- a/sys/uvm/uvm_anon.c +++ b/sys/uvm/uvm_anon.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_anon.c,v 1.40 2014/09/14 14:17:27 jsg Exp $ */ +/* $OpenBSD: uvm_anon.c,v 1.41 2014/11/16 12:31:00 deraadt Exp $ */ /* $NetBSD: uvm_anon.c,v 1.10 2000/11/25 06:27:59 chs Exp $ */ /* @@ -121,7 +121,7 @@ uvm_anfree(struct vm_anon *anon) atomic_setbits_int(&pg->pg_flags, PG_RELEASED); return; } - pmap_page_protect(pg, VM_PROT_NONE); + pmap_page_protect(pg, PROT_NONE); uvm_lock_pageq(); /* lock out pagedaemon */ uvm_pagefree(pg); /* bye bye */ uvm_unlock_pageq(); /* free the daemon */ @@ -250,7 +250,7 @@ uvm_anon_pagein(struct vm_anon *anon) /* deactivate the page (to put it on a page queue) */ pmap_clear_reference(pg); - pmap_page_protect(pg, VM_PROT_NONE); + pmap_page_protect(pg, PROT_NONE); uvm_lock_pageq(); uvm_pagedeactivate(pg); uvm_unlock_pageq(); diff --git a/sys/uvm/uvm_aobj.c b/sys/uvm/uvm_aobj.c index 12c18b351e7..f4d9addc226 100644 --- a/sys/uvm/uvm_aobj.c +++ b/sys/uvm/uvm_aobj.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_aobj.c,v 1.69 2014/09/14 14:17:27 jsg Exp $ */ +/* $OpenBSD: uvm_aobj.c,v 1.70 2014/11/16 12:31:00 deraadt Exp $ */ /* $NetBSD: uvm_aobj.c,v 1.39 2001/02/18 21:19:08 chs Exp $ */ /* @@ -878,7 +878,7 @@ uao_detach_locked(struct uvm_object *uobj) uvm_lock_pageq(); continue; } - pmap_page_protect(pg, VM_PROT_NONE); + pmap_page_protect(pg, PROT_NONE); uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT); uvm_pagefree(pg); } @@ -970,7 +970,7 @@ uao_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags) uvm_lock_pageq(); /* zap all mappings for the page. */ - pmap_page_protect(pp, VM_PROT_NONE); + pmap_page_protect(pp, PROT_NONE); /* ...and deactivate the page. */ uvm_pagedeactivate(pp); @@ -991,7 +991,7 @@ uao_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags) continue; /* zap all mappings for the page. */ - pmap_page_protect(pp, VM_PROT_NONE); + pmap_page_protect(pp, PROT_NONE); uao_dropswap(uobj, pp->offset >> PAGE_SHIFT); uvm_lock_pageq(); @@ -1418,7 +1418,7 @@ uao_pagein_page(struct uvm_aobj *aobj, int pageidx) pg = NULL; npages = 1; rv = uao_get(&aobj->u_obj, (voff_t)pageidx << PAGE_SHIFT, - &pg, &npages, 0, VM_PROT_READ|VM_PROT_WRITE, 0, 0); + &pg, &npages, 0, PROT_READ | PROT_WRITE, 0, 0); switch (rv) { case VM_PAGER_OK: diff --git a/sys/uvm/uvm_device.c b/sys/uvm/uvm_device.c index b58f0b3053b..cfdd845079f 100644 --- a/sys/uvm/uvm_device.c +++ b/sys/uvm/uvm_device.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_device.c,v 1.49 2014/09/14 14:17:27 jsg Exp $ */ +/* $OpenBSD: uvm_device.c,v 1.50 2014/11/16 12:31:00 deraadt Exp $ */ /* $NetBSD: uvm_device.c,v 1.30 2000/11/25 06:27:59 chs Exp $ */ /* @@ -118,7 +118,6 @@ udv_attach(dev_t device, vm_prot_t accessprot, voff_t off, vsize_t size) * Check that the specified range of the device allows the * desired protection. * - * XXX assumes VM_PROT_* == PROT_* * XXX clobbers off and size, but nothing else here needs them. */ while (size != 0) { diff --git a/sys/uvm/uvm_extern.h b/sys/uvm/uvm_extern.h index d1c1e1bc97e..8780317d9bb 100644 --- a/sys/uvm/uvm_extern.h +++ b/sys/uvm/uvm_extern.h @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_extern.h,v 1.122 2014/11/15 21:42:07 deraadt Exp $ */ +/* $OpenBSD: uvm_extern.h,v 1.123 2014/11/16 12:31:00 deraadt Exp $ */ /* $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $ */ /* @@ -81,22 +81,7 @@ struct vm_page; typedef struct vm_page *vm_page_t; /* protections bits */ -#define UVM_PROT_MASK 0x07 /* protection mask */ -#define UVM_PROT_NONE 0x00 /* protection none */ -#define UVM_PROT_ALL 0x07 /* everything */ -#define UVM_PROT_READ 0x01 /* read */ -#define UVM_PROT_WRITE 0x02 /* write */ -#define UVM_PROT_EXEC 0x04 /* exec */ - -/* protection short codes */ -#define UVM_PROT_R 0x01 /* read */ -#define UVM_PROT_W 0x02 /* write */ -#define UVM_PROT_RW 0x03 /* read-write */ -#define UVM_PROT_X 0x04 /* exec */ -#define UVM_PROT_RX 0x05 /* read-exec */ -#define UVM_PROT_WX 0x06 /* write-exec */ -#define UVM_PROT_RWX 0x07 /* read-write-exec */ - +#define PROT_MASK (PROT_READ | PROT_WRITE | PROT_EXEC) /* 0x08: not used */ /* inherit codes */ @@ -107,44 +92,11 @@ typedef struct vm_page *vm_page_t; #define UVM_INH_ZERO 0x30 /* "zero" */ /* 0x40, 0x80: not used */ - /* bits 0x700: max protection, 0x800: not used */ - /* bits 0x7000: advice, 0x8000: not used */ typedef int vm_prot_t; -/* - * Protection values, defined as bits within the vm_prot_t type - * - * These are funky definitions from old CMU VM and are kept - * for compatibility reasons, one day they are going to die, - * just like everybody else. - */ - -#define VM_PROT_NONE ((vm_prot_t) 0x00) - -#define VM_PROT_READ ((vm_prot_t) 0x01) /* read permission */ -#define VM_PROT_WRITE ((vm_prot_t) 0x02) /* write permission */ -#define VM_PROT_EXECUTE ((vm_prot_t) 0x04) /* execute permission */ - -/* - * The default protection for newly-created virtual memory - */ - -#define VM_PROT_DEFAULT (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE) - -/* - * The maximum privileges possible, for parameter checking. - */ - -#define VM_PROT_ALL (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE) - -/* advice: matches MADV_* from sys/mman.h */ -#define UVM_ADV_NORMAL 0x0 /* 'normal' */ -#define UVM_ADV_RANDOM 0x1 /* 'random' */ -#define UVM_ADV_SEQUENTIAL 0x2 /* 'sequential' */ -/* 0x3: will need, 0x4: dontneed */ #define UVM_ADV_MASK 0x7 /* mask */ /* mapping flags */ @@ -159,9 +111,9 @@ typedef int vm_prot_t; #define UVM_FLAG_NOFAULT 0x1000000 /* don't fault */ /* macros to extract info */ -#define UVM_PROTECTION(X) ((X) & UVM_PROT_MASK) +#define UVM_PROTECTION(X) ((X) & PROT_MASK) #define UVM_INHERIT(X) (((X) & UVM_INH_MASK) >> 4) -#define UVM_MAXPROTECTION(X) (((X) >> 8) & UVM_PROT_MASK) +#define UVM_MAXPROTECTION(X) (((X) >> 8) & PROT_MASK) #define UVM_ADVICE(X) (((X) >> 12) & UVM_ADV_MASK) #define UVM_MAPFLAG(prot, maxprot, inh, advice, flags) \ @@ -209,6 +161,7 @@ typedef int vm_prot_t; #include <sys/queue.h> #include <sys/tree.h> #include <sys/lock.h> +#include <sys/mman.h> #ifdef _KERNEL struct buf; diff --git a/sys/uvm/uvm_fault.c b/sys/uvm/uvm_fault.c index b45b43c3024..4c7cf86f945 100644 --- a/sys/uvm/uvm_fault.c +++ b/sys/uvm/uvm_fault.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_fault.c,v 1.78 2014/10/03 17:41:00 kettenis Exp $ */ +/* $OpenBSD: uvm_fault.c,v 1.79 2014/11/16 12:31:00 deraadt Exp $ */ /* $NetBSD: uvm_fault.c,v 1.51 2000/08/06 00:22:53 thorpej Exp $ */ /* @@ -184,7 +184,7 @@ uvmfault_anonflush(struct vm_anon **anons, int n) if (pg && (pg->pg_flags & PG_BUSY) == 0 && pg->loan_count == 0) { uvm_lock_pageq(); if (pg->wire_count == 0) { - pmap_page_protect(pg, VM_PROT_NONE); + pmap_page_protect(pg, PROT_NONE); uvm_pagedeactivate(pg); } uvm_unlock_pageq(); @@ -206,15 +206,15 @@ uvmfault_init() npages = atop(16384); if (npages > 0) { KASSERT(npages <= UVM_MAXRANGE / 2); - uvmadvice[UVM_ADV_NORMAL].nforw = npages; - uvmadvice[UVM_ADV_NORMAL].nback = npages - 1; + uvmadvice[POSIX_MADV_NORMAL].nforw = npages; + uvmadvice[POSIX_MADV_NORMAL].nback = npages - 1; } npages = atop(32768); if (npages > 0) { KASSERT(npages <= UVM_MAXRANGE / 2); - uvmadvice[UVM_ADV_SEQUENTIAL].nforw = npages - 1; - uvmadvice[UVM_ADV_SEQUENTIAL].nback = npages; + uvmadvice[POSIX_MADV_SEQUENTIAL].nforw = npages - 1; + uvmadvice[POSIX_MADV_SEQUENTIAL].nback = npages; } } @@ -380,7 +380,7 @@ uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap, * anon and try again. */ if (pg->pg_flags & PG_RELEASED) { - pmap_page_protect(pg, VM_PROT_NONE); + pmap_page_protect(pg, PROT_NONE); uvm_anfree(anon); /* frees page for us */ if (locked) uvmfault_unlockall(ufi, amap, NULL, @@ -506,7 +506,7 @@ uvmfault_update_stats(struct uvm_faultinfo *ufi) * the map locked off during I/O. */ #define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \ - ~VM_PROT_WRITE : VM_PROT_ALL) + ~PROT_WRITE : PROT_MASK) int uvm_fault(vm_map_t orig_map, vaddr_t vaddr, vm_fault_t fault_type, vm_prot_t access_type) @@ -571,7 +571,7 @@ ReFault: /* handle "needs_copy" case. */ if (UVM_ET_ISNEEDSCOPY(ufi.entry)) { - if ((access_type & VM_PROT_WRITE) || + if ((access_type & PROT_WRITE) || (ufi.entry->object.uvm_obj == NULL)) { /* need to clear */ uvmfault_unlockmaps(&ufi, FALSE); @@ -583,7 +583,7 @@ ReFault: * ensure that we pmap_enter page R/O since * needs_copy is still true */ - enter_prot &= ~VM_PROT_WRITE; + enter_prot &= ~PROT_WRITE; } } @@ -710,7 +710,7 @@ ReFault: */ (void) pmap_enter(ufi.orig_map->pmap, currva, VM_PAGE_TO_PHYS(anon->an_page), - (anon->an_ref > 1) ? (enter_prot & ~VM_PROT_WRITE) : + (anon->an_ref > 1) ? (enter_prot & ~PROT_WRITE) : enter_prot, PMAP_CANFAIL | (VM_MAPENT_ISWIRED(ufi.entry) ? PMAP_WIRED : 0)); @@ -887,12 +887,12 @@ ReFault: /* special handling for loaned pages */ if (anon->an_page->loan_count) { - if ((access_type & VM_PROT_WRITE) == 0) { + if ((access_type & PROT_WRITE) == 0) { /* * for read faults on loaned pages we just cap the * protection at read-only. */ - enter_prot = enter_prot & ~VM_PROT_WRITE; + enter_prot = enter_prot & ~PROT_WRITE; } else { /* * note that we can't allow writes into a loaned page! @@ -923,8 +923,7 @@ ReFault: uvm_pagecopy(anon->an_page, pg); /* force reload */ - pmap_page_protect(anon->an_page, - VM_PROT_NONE); + pmap_page_protect(anon->an_page, PROT_NONE); uvm_lock_pageq(); /* KILL loan */ if (uobj) /* if we were loaning */ @@ -963,7 +962,7 @@ ReFault: * if we are out of anon VM we kill the process (XXX: could wait?). */ - if ((access_type & VM_PROT_WRITE) != 0 && anon->an_ref > 1) { + if ((access_type & PROT_WRITE) != 0 && anon->an_ref > 1) { uvmexp.flt_acow++; oanon = anon; /* oanon = old */ anon = uvm_analloc(); @@ -1008,7 +1007,7 @@ ReFault: oanon = anon; pg = anon->an_page; if (anon->an_ref > 1) /* disallow writes to ref > 1 anons */ - enter_prot = enter_prot & ~VM_PROT_WRITE; + enter_prot = enter_prot & ~PROT_WRITE; } /* @@ -1077,7 +1076,7 @@ Case2: promote = TRUE; /* always need anon here */ } else { KASSERT(uobjpage != PGO_DONTCARE); - promote = (access_type & VM_PROT_WRITE) && + promote = (access_type & PROT_WRITE) && UVM_ET_ISCOPYONWRITE(ufi.entry); } @@ -1172,7 +1171,7 @@ Case2: */ uvmexp.flt_obj++; if (UVM_ET_ISCOPYONWRITE(ufi.entry)) - enter_prot &= ~VM_PROT_WRITE; + enter_prot &= ~PROT_WRITE; pg = uobjpage; /* map in the actual object */ /* assert(uobjpage != PGO_DONTCARE) */ @@ -1183,10 +1182,10 @@ Case2: */ if (uobjpage->loan_count) { - if ((access_type & VM_PROT_WRITE) == 0) { + if ((access_type & PROT_WRITE) == 0) { /* read fault: cap the protection at readonly */ /* cap! */ - enter_prot = enter_prot & ~VM_PROT_WRITE; + enter_prot = enter_prot & ~PROT_WRITE; } else { /* write fault: must break the loan here */ /* alloc new un-owned page */ @@ -1227,7 +1226,7 @@ Case2: uvm_pagecopy(uobjpage, pg); /* old -> new */ atomic_clearbits_int(&pg->pg_flags, PG_FAKE|PG_CLEAN); - pmap_page_protect(uobjpage, VM_PROT_NONE); + pmap_page_protect(uobjpage, PROT_NONE); if (uobjpage->pg_flags & PG_WANTED) wakeup(uobjpage); atomic_clearbits_int(&uobjpage->pg_flags, @@ -1320,7 +1319,7 @@ Case2: * procs see it */ if ((amap_flags(amap) & AMAP_SHARED) != 0) { - pmap_page_protect(uobjpage, VM_PROT_NONE); + pmap_page_protect(uobjpage, PROT_NONE); } /* dispose of uobjpage. drop handle to uobj as well. */ @@ -1427,7 +1426,7 @@ uvm_fault_wire(vm_map_t map, vaddr_t start, vaddr_t end, vm_prot_t access_type) /* * now fault it in a page at a time. if the fault fails then we have - * to undo what we have done. note that in uvm_fault VM_PROT_NONE + * to undo what we have done. note that in uvm_fault PROT_NONE * is replaced with the max protection if fault_type is VM_FAULT_WIRE. */ for (va = start ; va < end ; va += PAGE_SIZE) { diff --git a/sys/uvm/uvm_glue.c b/sys/uvm/uvm_glue.c index cd01ee5a67e..703cdd82b9e 100644 --- a/sys/uvm/uvm_glue.c +++ b/sys/uvm/uvm_glue.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_glue.c,v 1.66 2014/07/11 16:35:40 jsg Exp $ */ +/* $OpenBSD: uvm_glue.c,v 1.67 2014/11/16 12:31:00 deraadt Exp $ */ /* $NetBSD: uvm_glue.c,v 1.44 2001/02/06 19:54:44 eeh Exp $ */ /* @@ -89,7 +89,7 @@ uvm_kernacc(caddr_t addr, size_t len, int rw) { boolean_t rv; vaddr_t saddr, eaddr; - vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE; + vm_prot_t prot = rw == B_READ ? PROT_READ : PROT_WRITE; saddr = trunc_page((vaddr_t)addr); eaddr = round_page((vaddr_t)addr + len); @@ -120,7 +120,7 @@ uvm_chgkprot(caddr_t addr, size_t len, int rw) paddr_t pa; vaddr_t sva, eva; - prot = rw == B_READ ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE; + prot = rw == B_READ ? PROT_READ : PROT_READ | PROT_WRITE; eva = round_page((vaddr_t)addr + len); for (sva = trunc_page((vaddr_t)addr); sva < eva; sva += PAGE_SIZE) { /* @@ -240,8 +240,7 @@ uvm_vslock_device(struct proc *p, void *addr, size_t len, while ((pg = TAILQ_FIRST(&pgl)) != NULL) { TAILQ_REMOVE(&pgl, pg, pageq); - pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), - VM_PROT_READ|VM_PROT_WRITE); + pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE); va += PAGE_SIZE; } pmap_update(pmap_kernel()); diff --git a/sys/uvm/uvm_init.c b/sys/uvm/uvm_init.c index 980d1f2f71c..b6120802173 100644 --- a/sys/uvm/uvm_init.c +++ b/sys/uvm/uvm_init.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_init.c,v 1.34 2014/09/14 14:17:27 jsg Exp $ */ +/* $OpenBSD: uvm_init.c,v 1.35 2014/11/16 12:31:00 deraadt Exp $ */ /* $NetBSD: uvm_init.c,v 1.14 2000/06/27 17:29:23 mrg Exp $ */ /* @@ -147,15 +147,15 @@ uvm_init(void) #ifdef DEADBEEF0 kvm_start = trunc_page(DEADBEEF0) - PAGE_SIZE; if (uvm_map(kernel_map, &kvm_start, 3 * PAGE_SIZE, - NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE, - UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_RANDOM, UVM_FLAG_FIXED))) + NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(PROT_NONE, + PROT_NONE, UVM_INH_NONE, POSIX_MADV_RANDOM, UVM_FLAG_FIXED))) panic("uvm_init: cannot reserve dead beef @0x%x", DEADBEEF0); #endif #ifdef DEADBEEF1 kvm_start = trunc_page(DEADBEEF1) - PAGE_SIZE; if (uvm_map(kernel_map, &kvm_start, 3 * PAGE_SIZE, - NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE, - UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_RANDOM, UVM_FLAG_FIXED))) + NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(PROT_NONE, + PROT_NONE, UVM_INH_NONE, POSIX_MADV_RANDOM, UVM_FLAG_FIXED))) panic("uvm_init: cannot reserve dead beef @0x%x", DEADBEEF1); #endif /* diff --git a/sys/uvm/uvm_km.c b/sys/uvm/uvm_km.c index aa34283e9fd..21518979482 100644 --- a/sys/uvm/uvm_km.c +++ b/sys/uvm/uvm_km.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_km.c,v 1.116 2014/11/13 00:47:44 tedu Exp $ */ +/* $OpenBSD: uvm_km.c,v 1.117 2014/11/16 12:31:00 deraadt Exp $ */ /* $NetBSD: uvm_km.c,v 1.42 2001/01/14 02:10:01 thorpej Exp $ */ /* @@ -182,8 +182,8 @@ uvm_km_init(vaddr_t start, vaddr_t end) ); kernel_map_store.pmap = pmap_kernel(); if (base != start && uvm_map(&kernel_map_store, &base, start - base, - NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, - UVM_INH_NONE, UVM_ADV_RANDOM,UVM_FLAG_FIXED)) != 0) + NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(PROT_MASK, PROT_MASK, + UVM_INH_NONE, POSIX_MADV_RANDOM, UVM_FLAG_FIXED)) != 0) panic("uvm_km_init: could not reserve space for kernel"); kernel_map = &kernel_map_store; @@ -209,8 +209,8 @@ uvm_km_suballoc(struct vm_map *map, vaddr_t *min, vaddr_t *max, vsize_t size, /* first allocate a blank spot in the parent map */ if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET, 0, - UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, - UVM_ADV_RANDOM, mapflags)) != 0) { + UVM_MAPFLAG(PROT_MASK, PROT_MASK, UVM_INH_NONE, + POSIX_MADV_RANDOM, mapflags)) != 0) { panic("uvm_km_suballoc: unable to allocate space in parent map"); } @@ -338,8 +338,9 @@ uvm_km_kmemalloc_pla(struct vm_map *map, struct uvm_object *obj, vsize_t size, /* allocate some virtual space */ if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET, - valign, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_NONE, - UVM_ADV_RANDOM, (flags & UVM_KMF_TRYLOCK))) != 0)) { + valign, UVM_MAPFLAG(PROT_READ | PROT_WRITE, + PROT_READ | PROT_WRITE, UVM_INH_NONE, + POSIX_MADV_RANDOM, (flags & UVM_KMF_TRYLOCK))) != 0)) { return(0); } @@ -390,11 +391,11 @@ uvm_km_kmemalloc_pla(struct vm_map *map, struct uvm_object *obj, vsize_t size, */ if (obj == NULL) { pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), - UVM_PROT_RW); + PROT_READ | PROT_WRITE); } else { pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), - UVM_PROT_RW, - PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE); + PROT_READ | PROT_WRITE, + PROT_READ | PROT_WRITE | PMAP_WIRED); } loopva += PAGE_SIZE; offset += PAGE_SIZE; @@ -454,8 +455,8 @@ uvm_km_alloc1(struct vm_map *map, vsize_t size, vsize_t align, boolean_t zeroit) /* allocate some virtual space */ if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object, - UVM_UNKNOWN_OFFSET, align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, - UVM_INH_NONE, UVM_ADV_RANDOM, 0)) != 0)) { + UVM_UNKNOWN_OFFSET, align, UVM_MAPFLAG(PROT_MASK, PROT_MASK, + UVM_INH_NONE, POSIX_MADV_RANDOM, 0)) != 0)) { return(0); } @@ -491,7 +492,9 @@ uvm_km_alloc1(struct vm_map *map, vsize_t size, vsize_t align, boolean_t zeroit) * object, so we always use regular old pmap_enter(). */ pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), - UVM_PROT_ALL, PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE); + PROT_READ | PROT_WRITE | PROT_EXEC, + PROT_READ | PROT_WRITE | PMAP_WIRED); + /* XXX why is the above executable? */ loopva += PAGE_SIZE; offset += PAGE_SIZE; @@ -540,8 +543,8 @@ uvm_km_valloc_align(struct vm_map *map, vsize_t size, vsize_t align, int flags) /* allocate some virtual space, demand filled by kernel_object. */ if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object, - UVM_UNKNOWN_OFFSET, align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, - UVM_INH_NONE, UVM_ADV_RANDOM, flags)) != 0)) { + UVM_UNKNOWN_OFFSET, align, UVM_MAPFLAG(PROT_MASK, PROT_MASK, + UVM_INH_NONE, POSIX_MADV_RANDOM, flags)) != 0)) { return(0); } @@ -574,8 +577,8 @@ uvm_km_valloc_prefer_wait(struct vm_map *map, vsize_t size, voff_t prefer) * by kernel_object. */ if (__predict_true(uvm_map(map, &kva, size, uvm.kernel_object, - prefer, 0, UVM_MAPFLAG(UVM_PROT_ALL, - UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, 0)) == 0)) { + prefer, 0, UVM_MAPFLAG(PROT_MASK, + PROT_MASK, UVM_INH_NONE, POSIX_MADV_RANDOM, 0)) == 0)) { return(kva); } @@ -658,8 +661,9 @@ uvm_km_page_init(void) addr = vm_map_min(kernel_map); if (uvm_map(kernel_map, &addr, (vsize_t)bulk << PAGE_SHIFT, NULL, UVM_UNKNOWN_OFFSET, 0, - UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_NONE, - UVM_ADV_RANDOM, UVM_KMF_TRYLOCK)) != 0) { + UVM_MAPFLAG(PROT_READ | PROT_WRITE, + PROT_READ | PROT_WRITE, UVM_INH_NONE, + POSIX_MADV_RANDOM, UVM_KMF_TRYLOCK)) != 0) { bulk /= 2; continue; } @@ -721,8 +725,9 @@ uvm_km_thread(void *arg) * So, only use UVM_KMF_TRYLOCK for the first page * if fp != NULL */ - flags = UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, - UVM_INH_NONE, UVM_ADV_RANDOM, + flags = UVM_MAPFLAG(PROT_READ | PROT_WRITE, + PROT_READ | PROT_WRITE, + UVM_INH_NONE, POSIX_MADV_RANDOM, fp != NULL ? UVM_KMF_TRYLOCK : 0); memset(pg, 0, sizeof(pg)); for (i = 0; i < nitems(pg); i++) { @@ -734,8 +739,9 @@ uvm_km_thread(void *arg) } /* made progress, so don't sleep for more */ - flags = UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, - UVM_INH_NONE, UVM_ADV_RANDOM, + flags = UVM_MAPFLAG(PROT_READ | PROT_WRITE, + PROT_READ | PROT_WRITE, + UVM_INH_NONE, POSIX_MADV_RANDOM, UVM_KMF_TRYLOCK); } @@ -865,9 +871,9 @@ km_alloc(size_t sz, const struct kmem_va_mode *kv, #endif alloc_va: if (kv->kv_executable) { - prot = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; + prot = PROT_READ | PROT_WRITE | PROT_EXEC; } else { - prot = VM_PROT_READ | VM_PROT_WRITE; + prot = PROT_READ | PROT_WRITE; } if (kp->kp_pageable) { @@ -914,7 +920,7 @@ try_map: va = vm_map_min(map); if (uvm_map(map, &va, sz, uobj, kd->kd_prefer, kv->kv_align, UVM_MAPFLAG(prot, prot, UVM_INH_NONE, - UVM_ADV_RANDOM, mapflags))) { + POSIX_MADV_RANDOM, mapflags))) { if (kv->kv_wait && kd->kd_waitok) { tsleep(map, PVM, "km_allocva", 0); goto try_map; diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c index 1437471d631..ea6a385caba 100644 --- a/sys/uvm/uvm_map.c +++ b/sys/uvm/uvm_map.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_map.c,v 1.177 2014/11/13 00:47:44 tedu Exp $ */ +/* $OpenBSD: uvm_map.c,v 1.178 2014/11/16 12:31:00 deraadt Exp $ */ /* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */ /* @@ -1066,7 +1066,7 @@ uvm_map(struct vm_map *map, vaddr_t *addr, vsize_t sz, * Note: we enforce the alignment restriction, * but ignore pmap_prefer. */ - } else if ((maxprot & VM_PROT_EXECUTE) != 0 && + } else if ((maxprot & PROT_EXEC) != 0 && map->uaddr_exe != NULL) { /* Run selection algorithm for executables. */ error = uvm_addr_invoke(map, map->uaddr_exe, &first, &last, @@ -1871,7 +1871,7 @@ uvm_map_pageable_wire(struct vm_map *map, struct vm_map_entry *first, iter = RB_NEXT(uvm_map_addr, &map->addr, iter)) { KDASSERT(iter->start >= start_addr && iter->end <= end_addr); if (UVM_ET_ISHOLE(iter) || iter->start == iter->end || - iter->protection == VM_PROT_NONE) + iter->protection == PROT_NONE) continue; /* @@ -1882,7 +1882,7 @@ uvm_map_pageable_wire(struct vm_map *map, struct vm_map_entry *first, */ if (!VM_MAPENT_ISWIRED(iter) && !UVM_ET_ISSUBMAP(iter) && UVM_ET_ISNEEDSCOPY(iter) && - ((iter->protection & VM_PROT_WRITE) || + ((iter->protection & PROT_WRITE) || iter->object.uvm_obj == NULL)) { amap_copy(map, iter, M_WAITOK, TRUE, iter->start, iter->end); @@ -1903,7 +1903,7 @@ uvm_map_pageable_wire(struct vm_map *map, struct vm_map_entry *first, for (iter = first; error == 0 && iter != end; iter = RB_NEXT(uvm_map_addr, &map->addr, iter)) { if (UVM_ET_ISHOLE(iter) || iter->start == iter->end || - iter->protection == VM_PROT_NONE) + iter->protection == PROT_NONE) continue; error = uvm_fault_wire(map, iter->start, iter->end, @@ -1931,7 +1931,7 @@ uvm_map_pageable_wire(struct vm_map *map, struct vm_map_entry *first, first = RB_NEXT(uvm_map_addr, &map->addr, first)) { if (UVM_ET_ISHOLE(first) || first->start == first->end || - first->protection == VM_PROT_NONE) + first->protection == PROT_NONE) continue; first->wired_count--; @@ -1945,7 +1945,7 @@ uvm_map_pageable_wire(struct vm_map *map, struct vm_map_entry *first, for (; iter != end; iter = RB_NEXT(uvm_map_addr, &map->addr, iter)) { if (UVM_ET_ISHOLE(iter) || iter->start == iter->end || - iter->protection == VM_PROT_NONE) + iter->protection == PROT_NONE) continue; iter->wired_count--; @@ -2910,7 +2910,7 @@ uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end, */ if (iter->protection != old_prot) { mask = UVM_ET_ISCOPYONWRITE(iter) ? - ~VM_PROT_WRITE : VM_PROT_ALL; + ~PROT_WRITE : PROT_MASK; /* update pmap */ if ((iter->protection & mask) == PROT_NONE && @@ -2935,13 +2935,13 @@ uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end, /* * If the map is configured to lock any future mappings, - * wire this entry now if the old protection was VM_PROT_NONE - * and the new protection is not VM_PROT_NONE. + * wire this entry now if the old protection was PROT_NONE + * and the new protection is not PROT_NONE. */ if ((map->flags & VM_MAP_WIREFUTURE) != 0 && VM_MAPENT_ISWIRED(iter) == 0 && - old_prot == VM_PROT_NONE && - new_prot != VM_PROT_NONE) { + old_prot == PROT_NONE && + new_prot != PROT_NONE) { if (uvm_map_pageable(map, iter->start, iter->end, FALSE, UVM_LK_ENTER | UVM_LK_EXIT) != 0) { /* @@ -3347,13 +3347,12 @@ uvm_mapent_forkcopy(struct vmspace *new_vm, struct vm_map *new_map, * calling pmap_protect needlessly. */ if (!UVM_ET_ISNEEDSCOPY(old_entry)) { - if (old_entry->max_protection & - VM_PROT_WRITE) { + if (old_entry->max_protection & PROT_WRITE) { pmap_protect(old_map->pmap, old_entry->start, old_entry->end, old_entry->protection & - ~VM_PROT_WRITE); + ~PROT_WRITE); pmap_update(old_map->pmap); } old_entry->etype |= UVM_ET_NEEDSCOPY; @@ -3366,7 +3365,7 @@ uvm_mapent_forkcopy(struct vmspace *new_vm, struct vm_map *new_map, * we only need to protect the child if the * parent has write access. */ - if (old_entry->max_protection & VM_PROT_WRITE) + if (old_entry->max_protection & PROT_WRITE) protect_child = TRUE; else protect_child = FALSE; @@ -3386,7 +3385,7 @@ uvm_mapent_forkcopy(struct vmspace *new_vm, struct vm_map *new_map, pmap_protect(new_map->pmap, new_entry->start, new_entry->end, new_entry->protection & - ~VM_PROT_WRITE); + ~PROT_WRITE); } } @@ -3535,7 +3534,7 @@ uvm_map_hint(struct vmspace *vm, vm_prot_t prot) * If executable skip first two pages, otherwise start * after data + heap region. */ - if ((prot & VM_PROT_EXECUTE) != 0 && + if ((prot & PROT_EXEC) != 0 && (vaddr_t)vm->vm_daddr >= I386_MAX_EXE_ADDR) { addr = (PAGE_SIZE*2) + (arc4random() & (I386_MAX_EXE_ADDR / 2 - 1)); @@ -3878,7 +3877,7 @@ uvm_map_extract(struct vm_map *srcmap, vaddr_t start, vsize_t len, if (uvm_map_findspace(kernel_map, &tmp1, &tmp2, &dstaddr, len, MAX(PAGE_SIZE, PMAP_PREFER_ALIGN()), PMAP_PREFER_OFFSET(start), - VM_PROT_NONE, 0) != 0) { + PROT_NONE, 0) != 0) { error = ENOMEM; goto fail2; } @@ -4065,7 +4064,7 @@ deactivate_it: KASSERT(pg->uanon == anon); /* zap all mappings for the page. */ - pmap_page_protect(pg, VM_PROT_NONE); + pmap_page_protect(pg, PROT_NONE); /* ...and deactivate the page. */ uvm_pagedeactivate(pg); @@ -4108,7 +4107,7 @@ flush_object: */ if (uobj != NULL && ((flags & PGO_FREE) == 0 || - ((entry->max_protection & VM_PROT_WRITE) != 0 && + ((entry->max_protection & PROT_WRITE) != 0 && (entry->etype & UVM_ET_COPYONWRITE) == 0))) { rv = uobj->pgops->pgo_flush(uobj, cp_start - entry->start + entry->offset, diff --git a/sys/uvm/uvm_mmap.c b/sys/uvm/uvm_mmap.c index 9f914fe2140..3ed329127b7 100644 --- a/sys/uvm/uvm_mmap.c +++ b/sys/uvm/uvm_mmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_mmap.c,v 1.99 2014/10/03 17:41:00 kettenis Exp $ */ +/* $OpenBSD: uvm_mmap.c,v 1.100 2014/11/16 12:31:00 deraadt Exp $ */ /* $NetBSD: uvm_mmap.c,v 1.49 2001/02/18 21:19:08 chs Exp $ */ /* @@ -127,7 +127,7 @@ sys_mquery(struct proc *p, void *v, register_t *retval) size = (vsize_t) SCARG(uap, len); fd = SCARG(uap, fd); - if ((prot & VM_PROT_ALL) != prot) + if ((prot & PROT_MASK) != prot) return (EINVAL); if (SCARG(uap, flags) & MAP_FIXED) @@ -210,7 +210,7 @@ sys_mincore(struct proc *p, void *v, register_t *retval) * Lock down vec, so our returned status isn't outdated by * storing the status byte for a page. */ - if ((error = uvm_vslock(p, vec, npgs, VM_PROT_WRITE)) != 0) { + if ((error = uvm_vslock(p, vec, npgs, PROT_WRITE)) != 0) { free(pgs, M_TEMP, 0); return (error); } @@ -341,7 +341,7 @@ sys_mmap(struct proc *p, void *v, register_t *retval) * Fixup the old deprecated MAP_COPY into MAP_PRIVATE, and * validate the flags. */ - if ((prot & VM_PROT_ALL) != prot) + if ((prot & PROT_MASK) != prot) return (EINVAL); if ((flags & MAP_FLAGMASK) != flags) return (EINVAL); @@ -435,11 +435,11 @@ sys_mmap(struct proc *p, void *v, register_t *retval) } /* now check protection */ - maxprot = VM_PROT_EXECUTE; + maxprot = PROT_EXEC; /* check read access */ if (fp->f_flag & FREAD) - maxprot |= VM_PROT_READ; + maxprot |= PROT_READ; else if (prot & PROT_READ) { error = EACCES; goto out; @@ -458,7 +458,7 @@ sys_mmap(struct proc *p, void *v, register_t *retval) VOP_GETATTR(vp, &va, p->p_ucred, p))) goto out; if ((va.va_flags & (IMMUTABLE|APPEND)) == 0) - maxprot |= VM_PROT_WRITE; + maxprot |= PROT_WRITE; else if (prot & PROT_WRITE) { error = EPERM; goto out; @@ -469,7 +469,7 @@ sys_mmap(struct proc *p, void *v, register_t *retval) } } else { /* MAP_PRIVATE mappings can always write to */ - maxprot |= VM_PROT_WRITE; + maxprot |= PROT_WRITE; } /* set handle to vnode */ @@ -485,7 +485,7 @@ sys_mmap(struct proc *p, void *v, register_t *retval) is_anon: /* label for SunOS style /dev/zero */ handle = NULL; - maxprot = VM_PROT_ALL; + maxprot = PROT_MASK; pos = 0; } @@ -604,7 +604,7 @@ sys_munmap(struct proc *p, void *v, register_t *retval) * interesting system call semantic: make sure entire range is * allocated before allowing an unmap. */ - if (!uvm_map_checkprot(map, addr, addr + size, VM_PROT_NONE)) { + if (!uvm_map_checkprot(map, addr, addr + size, PROT_NONE)) { vm_map_unlock(map); return (EINVAL); } @@ -642,7 +642,7 @@ sys_mprotect(struct proc *p, void *v, register_t *retval) size = (vsize_t)SCARG(uap, len); prot = SCARG(uap, prot); - if ((prot & VM_PROT_ALL) != prot) + if ((prot & PROT_MASK) != prot) return (EINVAL); /* @@ -904,7 +904,7 @@ uvm_mmap(vm_map_t map, vaddr_t *addr, vsize_t size, vm_prot_t prot, struct uvm_object *uobj; struct vnode *vp; int error; - int advice = UVM_ADV_NORMAL; + int advice = POSIX_MADV_NORMAL; uvm_flag_t uvmflag = 0; vsize_t align = 0; /* userland page size */ @@ -950,7 +950,7 @@ uvm_mmap(vm_map_t map, vaddr_t *addr, vsize_t size, vm_prot_t prot, vp = (struct vnode *) handle; /* get vnode */ if (vp->v_type != VCHR) { uobj = uvn_attach(vp, (flags & MAP_SHARED) ? - maxprot : (maxprot & ~VM_PROT_WRITE)); + maxprot : (maxprot & ~PROT_WRITE)); /* * XXXCDC: hack from old code @@ -976,27 +976,27 @@ uvm_mmap(vm_map_t map, vaddr_t *addr, vsize_t size, vm_prot_t prot, * the uncache to kill the uvn and trigger I/O. */ if (flags & MAP_SHARED) { - if ((prot & VM_PROT_WRITE) || - (maxprot & VM_PROT_WRITE)) { + if ((prot & PROT_WRITE) || + (maxprot & PROT_WRITE)) { uvm_vnp_uncache(vp); } } } else { uobj = udv_attach(vp->v_rdev, (flags & MAP_SHARED) ? maxprot : - (maxprot & ~VM_PROT_WRITE), foff, size); + (maxprot & ~PROT_WRITE), foff, size); /* * XXX Some devices don't like to be mapped with * XXX PROT_EXEC, but we don't really have a * XXX better way of handling this, right now */ if (uobj == NULL && (prot & PROT_EXEC) == 0) { - maxprot &= ~VM_PROT_EXECUTE; + maxprot &= ~PROT_EXEC; uobj = udv_attach(vp->v_rdev, (flags & MAP_SHARED) ? maxprot : - (maxprot & ~VM_PROT_WRITE), foff, size); + (maxprot & ~PROT_WRITE), foff, size); } - advice = UVM_ADV_RANDOM; + advice = POSIX_MADV_RANDOM; } if (uobj == NULL) @@ -1020,7 +1020,7 @@ uvm_mmap(vm_map_t map, vaddr_t *addr, vsize_t size, vm_prot_t prot, * POSIX 1003.1b -- if our address space was configured * to lock all future mappings, wire the one we just made. */ - if (prot == VM_PROT_NONE) { + if (prot == PROT_NONE) { /* * No more work to do in this case. */ diff --git a/sys/uvm/uvm_object.c b/sys/uvm/uvm_object.c index afe6bd5993a..0e8246998eb 100644 --- a/sys/uvm/uvm_object.c +++ b/sys/uvm/uvm_object.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_object.c,v 1.9 2014/09/14 14:17:27 jsg Exp $ */ +/* $OpenBSD: uvm_object.c,v 1.10 2014/11/16 12:31:00 deraadt Exp $ */ /* * Copyright (c) 2006 The NetBSD Foundation, Inc. @@ -35,6 +35,7 @@ */ #include <sys/param.h> +#include <sys/mman.h> #include <uvm/uvm.h> @@ -79,7 +80,7 @@ uvm_objwire(struct uvm_object *uobj, voff_t start, voff_t end, /* Get the pages */ memset(pgs, 0, sizeof(pgs)); error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, 0, - VM_PROT_READ | VM_PROT_WRITE, UVM_ADV_SEQUENTIAL, + PROT_READ | PROT_WRITE, POSIX_MADV_SEQUENTIAL, PGO_ALLPAGES | PGO_SYNCIO); if (error) diff --git a/sys/uvm/uvm_page.c b/sys/uvm/uvm_page.c index cf9064ccc6a..3c93ee8907b 100644 --- a/sys/uvm/uvm_page.c +++ b/sys/uvm/uvm_page.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_page.c,v 1.131 2014/07/11 16:35:40 jsg Exp $ */ +/* $OpenBSD: uvm_page.c,v 1.132 2014/11/16 12:31:00 deraadt Exp $ */ /* $NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $ */ /* @@ -385,7 +385,7 @@ uvm_pageboot_alloc(vsize_t size) * Note this memory is no longer managed, so using * pmap_kenter is safe. */ - pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE); + pmap_kenter_pa(vaddr, paddr, PROT_READ | PROT_WRITE); } pmap_update(pmap_kernel()); return(addr); @@ -1097,7 +1097,7 @@ uvm_page_unbusy(struct vm_page **pgs, int npgs) uobj = pg->uobject; if (uobj != NULL) { uvm_lock_pageq(); - pmap_page_protect(pg, VM_PROT_NONE); + pmap_page_protect(pg, PROT_NONE); /* XXX won't happen right now */ if (pg->pg_flags & PQ_AOBJ) uao_dropswap(uobj, diff --git a/sys/uvm/uvm_pager.c b/sys/uvm/uvm_pager.c index 784d81373e5..63b276f1a7f 100644 --- a/sys/uvm/uvm_pager.c +++ b/sys/uvm/uvm_pager.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_pager.c,v 1.69 2014/09/14 14:17:27 jsg Exp $ */ +/* $OpenBSD: uvm_pager.c,v 1.70 2014/11/16 12:31:00 deraadt Exp $ */ /* $NetBSD: uvm_pager.c,v 1.36 2000/11/27 18:26:41 chs Exp $ */ /* @@ -233,9 +233,9 @@ uvm_pagermapin(struct vm_page **pps, int npages, int flags) vsize_t size; struct vm_page *pp; - prot = VM_PROT_READ; + prot = PROT_READ; if (flags & UVMPAGER_MAPIN_READ) - prot |= VM_PROT_WRITE; + prot |= PROT_WRITE; size = ptoa(npages); KASSERT(size <= MAXBSIZE); @@ -395,7 +395,7 @@ uvm_mk_pcluster(struct uvm_object *uobj, struct vm_page **pps, int *npages, PG_RELEASED); /* XXX: protect wired page? see above comment. */ - pmap_page_protect(pclust, VM_PROT_READ); + pmap_page_protect(pclust, PROT_READ); if (!forward) { ppsp--; /* back up one page */ *ppsp = pclust; diff --git a/sys/uvm/uvm_pdaemon.c b/sys/uvm/uvm_pdaemon.c index d69dbd053a1..32d7b40060a 100644 --- a/sys/uvm/uvm_pdaemon.c +++ b/sys/uvm/uvm_pdaemon.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_pdaemon.c,v 1.73 2014/09/14 14:17:27 jsg Exp $ */ +/* $OpenBSD: uvm_pdaemon.c,v 1.74 2014/11/16 12:31:00 deraadt Exp $ */ /* $NetBSD: uvm_pdaemon.c,v 1.23 2000/08/20 10:24:14 bjh21 Exp $ */ /* @@ -473,7 +473,7 @@ uvmpd_scan_inactive(struct pglist *pglst) } /* zap all mappings with pmap_page_protect... */ - pmap_page_protect(p, VM_PROT_NONE); + pmap_page_protect(p, PROT_NONE); uvm_pagefree(p); uvmexp.pdfreed++; @@ -546,7 +546,7 @@ uvmpd_scan_inactive(struct pglist *pglst) swap_backed = ((p->pg_flags & PQ_SWAPBACKED) != 0); atomic_setbits_int(&p->pg_flags, PG_BUSY); UVM_PAGE_OWN(p, "scan_inactive"); - pmap_page_protect(p, VM_PROT_READ); + pmap_page_protect(p, PROT_READ); uvmexp.pgswapout++; /* @@ -741,7 +741,7 @@ uvmpd_scan_inactive(struct pglist *pglst) p->uanon = NULL; uvm_anfree(anon); /* kills anon */ - pmap_page_protect(p, VM_PROT_NONE); + pmap_page_protect(p, PROT_NONE); anon = NULL; uvm_lock_pageq(); nextpg = TAILQ_NEXT(p, pageq); @@ -920,7 +920,7 @@ uvmpd_scan(void) * inactive pages. */ if (inactive_shortage > 0) { - pmap_page_protect(p, VM_PROT_NONE); + pmap_page_protect(p, PROT_NONE); /* no need to check wire_count as pg is "active" */ uvm_pagedeactivate(p); uvmexp.pddeact++; @@ -961,7 +961,7 @@ uvmpd_drop(struct pglist *pglst) } /* zap all mappings with pmap_page_protect... */ - pmap_page_protect(p, VM_PROT_NONE); + pmap_page_protect(p, PROT_NONE); uvm_pagefree(p); } } diff --git a/sys/uvm/uvm_pmap.h b/sys/uvm/uvm_pmap.h index ec9af64fcb0..85dcb0037e5 100644 --- a/sys/uvm/uvm_pmap.h +++ b/sys/uvm/uvm_pmap.h @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_pmap.h,v 1.22 2010/12/26 15:41:00 miod Exp $ */ +/* $OpenBSD: uvm_pmap.h,v 1.23 2014/11/16 12:31:01 deraadt Exp $ */ /* $NetBSD: uvm_pmap.h,v 1.1 2000/06/27 09:00:14 mrg Exp $ */ /* @@ -86,7 +86,7 @@ typedef struct pmap_statistics *pmap_statistics_t; #include <machine/pmap.h> /* - * Flags passed to pmap_enter(). Note the bottom 3 bits are VM_PROT_* + * Flags passed to pmap_enter(). Note the bottom 3 bits are PROT_* * bits, used to indicate the access type that was made (to seed modified * and referenced information). */ diff --git a/sys/uvm/uvm_unix.c b/sys/uvm/uvm_unix.c index 193fd99cf23..81c82bff826 100644 --- a/sys/uvm/uvm_unix.c +++ b/sys/uvm/uvm_unix.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_unix.c,v 1.50 2014/07/11 16:35:40 jsg Exp $ */ +/* $OpenBSD: uvm_unix.c,v 1.51 2014/11/16 12:31:01 deraadt Exp $ */ /* $NetBSD: uvm_unix.c,v 1.18 2000/09/13 15:00:25 thorpej Exp $ */ /* @@ -85,8 +85,8 @@ sys_obreak(struct proc *p, void *v, register_t *retval) if (new > old) { error = uvm_map(&vm->vm_map, &old, new - old, NULL, UVM_UNKNOWN_OFFSET, 0, - UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RWX, UVM_INH_COPY, - UVM_ADV_NORMAL, UVM_FLAG_AMAPPAD|UVM_FLAG_FIXED| + UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_MASK, UVM_INH_COPY, + POSIX_MADV_NORMAL, UVM_FLAG_AMAPPAD|UVM_FLAG_FIXED| UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW)); if (error) { uprintf("sbrk: grow %ld failed, error = %d\n", @@ -159,7 +159,7 @@ uvm_coredump(struct proc *p, struct vnode *vp, struct ucred *cred, panic("uvm_coredump: user process with submap?"); } - if (!(entry->protection & VM_PROT_WRITE) && + if (!(entry->protection & PROT_WRITE) && entry->start != p->p_p->ps_sigcode) continue; @@ -268,7 +268,7 @@ uvm_coredump_walkmap(struct proc *p, void *iocookie, panic("uvm_coredump: user process with submap?"); } - if (!(entry->protection & VM_PROT_WRITE) && + if (!(entry->protection & PROT_WRITE) && entry->start != p->p_p->ps_sigcode) continue; diff --git a/sys/uvm/uvm_vnode.c b/sys/uvm/uvm_vnode.c index 07411a7d6ca..43fda694ace 100644 --- a/sys/uvm/uvm_vnode.c +++ b/sys/uvm/uvm_vnode.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_vnode.c,v 1.84 2014/07/11 16:35:40 jsg Exp $ */ +/* $OpenBSD: uvm_vnode.c,v 1.85 2014/11/16 12:31:01 deraadt Exp $ */ /* $NetBSD: uvm_vnode.c,v 1.36 2000/11/24 20:34:01 chs Exp $ */ /* @@ -170,7 +170,7 @@ uvn_attach(struct vnode *vp, vm_prot_t accessprot) uvn->u_obj.uo_refs++; /* bump uvn ref! */ /* check for new writeable uvn */ - if ((accessprot & VM_PROT_WRITE) != 0 && + if ((accessprot & PROT_WRITE) != 0 && (uvn->u_flags & UVM_VNODE_WRITEABLE) == 0) { LIST_INSERT_HEAD(&uvn_wlist, uvn, u_wlist); /* we are now on wlist! */ @@ -236,7 +236,7 @@ uvn_attach(struct vnode *vp, vm_prot_t accessprot) uvn->u_size = used_vnode_size; /* if write access, we need to add it to the wlist */ - if (accessprot & VM_PROT_WRITE) { + if (accessprot & PROT_WRITE) { LIST_INSERT_HEAD(&uvn_wlist, uvn, u_wlist); uvn->u_flags |= UVM_VNODE_WRITEABLE; /* we are on wlist! */ } @@ -648,7 +648,7 @@ uvn_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags) if ((pp->pg_flags & PG_CLEAN) != 0 && (flags & PGO_FREE) != 0 && (pp->pg_flags & PQ_ACTIVE) != 0) - pmap_page_protect(pp, VM_PROT_NONE); + pmap_page_protect(pp, PROT_NONE); if ((pp->pg_flags & PG_CLEAN) != 0 && pmap_is_modified(pp)) atomic_clearbits_int(&pp->pg_flags, PG_CLEAN); @@ -661,7 +661,7 @@ uvn_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags) if (!needs_clean) { if (flags & PGO_DEACTIVATE) { if (pp->wire_count == 0) { - pmap_page_protect(pp, VM_PROT_NONE); + pmap_page_protect(pp, PROT_NONE); uvm_pagedeactivate(pp); } } else if (flags & PGO_FREE) { @@ -674,7 +674,7 @@ uvn_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags) curoff -= PAGE_SIZE; continue; } else { - pmap_page_protect(pp, VM_PROT_NONE); + pmap_page_protect(pp, PROT_NONE); /* removed page from object */ uvm_pagefree(pp); } @@ -692,7 +692,7 @@ uvn_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags) */ atomic_setbits_int(&pp->pg_flags, PG_BUSY); UVM_PAGE_OWN(pp, "uvn_flush"); - pmap_page_protect(pp, VM_PROT_READ); + pmap_page_protect(pp, PROT_READ); /* if we're async, free the page in aiodoned */ if ((flags & (PGO_FREE|PGO_SYNCIO)) == PGO_FREE) atomic_setbits_int(&pp->pg_flags, PG_RELEASED); @@ -786,7 +786,7 @@ ReTry: /* dispose of page */ if (flags & PGO_DEACTIVATE) { if (ptmp->wire_count == 0) { - pmap_page_protect(ptmp, VM_PROT_NONE); + pmap_page_protect(ptmp, PROT_NONE); uvm_pagedeactivate(ptmp); } } else if (flags & PGO_FREE && @@ -802,7 +802,7 @@ ReTry: "lost!\n"); retval = FALSE; } - pmap_page_protect(ptmp, VM_PROT_NONE); + pmap_page_protect(ptmp, PROT_NONE); uvm_pagefree(ptmp); } |