summaryrefslogtreecommitdiff
path: root/sys/uvm/uvm_map.c
diff options
context:
space:
mode:
authorArtur Grabowski <art@cvs.openbsd.org>2001-11-07 01:18:02 +0000
committerArtur Grabowski <art@cvs.openbsd.org>2001-11-07 01:18:02 +0000
commitc1c5c105f41ad5a43d2d86df94c7b57a4e86d03f (patch)
treef35ef0cfc30ec497901dac2701302652e736c1f4 /sys/uvm/uvm_map.c
parent0cf5ca3327d0f3f051ffeeedbbf725cdb3b5b201 (diff)
Add an alignment argument to uvm_map that specifies an alignment hint
for the virtual address.
Diffstat (limited to 'sys/uvm/uvm_map.c')
-rw-r--r--sys/uvm/uvm_map.c84
1 files changed, 61 insertions, 23 deletions
diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c
index b754d9ccde4..d3d00972295 100644
--- a/sys/uvm/uvm_map.c
+++ b/sys/uvm/uvm_map.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_map.c,v 1.27 2001/11/06 13:36:52 art Exp $ */
-/* $NetBSD: uvm_map.c,v 1.80 2000/08/01 00:53:11 wiz Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.28 2001/11/07 01:18:01 art Exp $ */
+/* $NetBSD: uvm_map.c,v 1.81 2000/09/13 15:00:25 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -456,16 +456,23 @@ uvm_map_clip_end(map, entry, end)
* case [4] is for kernel mappings where we don't know the offset until
* we've found a virtual address. note that kernel object offsets are
* always relative to vm_map_min(kernel_map).
+ *
+ * => if `align' is non-zero, we try to align the virtual address to
+ * the specified alignment. this is only a hint; if we can't
+ * do it, the address will be unaligned. this is provided as
+ * a mechanism for large pages.
+ *
* => XXXCDC: need way to map in external amap?
*/
int
-uvm_map(map, startp, size, uobj, uoffset, flags)
+uvm_map(map, startp, size, uobj, uoffset, align, flags)
vm_map_t map;
vaddr_t *startp; /* IN/OUT */
vsize_t size;
struct uvm_object *uobj;
voff_t uoffset;
+ vsize_t align;
uvm_flag_t flags;
{
vm_map_entry_t prev_entry, new_entry;
@@ -500,7 +507,7 @@ uvm_map(map, startp, size, uobj, uoffset, flags)
vm_map_lock(map); /* could sleep here */
}
if ((prev_entry = uvm_map_findspace(map, *startp, size, startp,
- uobj, uoffset, flags & UVM_FLAG_FIXED)) == NULL) {
+ uobj, uoffset, align, flags)) == NULL) {
UVMHIST_LOG(maphist,"<- uvm_map_findspace failed!",0,0,0,0);
vm_map_unlock(map);
return (KERN_NO_SPACE);
@@ -775,39 +782,54 @@ uvm_map_lookup_entry(map, address, entry)
return (FALSE);
}
-
/*
* uvm_map_findspace: find "length" sized space in "map".
*
- * => "hint" is a hint about where we want it, unless fixed is true
- * (in which case we insist on using "hint").
+ * => "hint" is a hint about where we want it, unless FINDSPACE_FIXED is
+ * set (in which case we insist on using "hint").
* => "result" is VA returned
* => uobj/uoffset are to be used to handle VAC alignment, if required
+ * => if `align' is non-zero, we attempt to align to that value.
* => caller must at least have read-locked map
* => returns NULL on failure, or pointer to prev. map entry if success
* => note this is a cross between the old vm_map_findspace and vm_map_find
*/
vm_map_entry_t
-uvm_map_findspace(map, hint, length, result, uobj, uoffset, fixed)
+uvm_map_findspace(map, hint, length, result, uobj, uoffset, align, flags)
vm_map_t map;
vaddr_t hint;
vsize_t length;
vaddr_t *result; /* OUT */
struct uvm_object *uobj;
voff_t uoffset;
- boolean_t fixed;
+ vsize_t align;
+ int flags;
{
vm_map_entry_t entry, next, tmp;
- vaddr_t end;
+ vaddr_t end, orig_hint;
UVMHIST_FUNC("uvm_map_findspace");
UVMHIST_CALLED(maphist);
- UVMHIST_LOG(maphist, "(map=0x%x, hint=0x%x, len=%d, fixed=%d)",
- map, hint, length, fixed);
+ UVMHIST_LOG(maphist, "(map=0x%x, hint=0x%x, len=%d, flags=0x%x)",
+ map, hint, length, flags);
+
+#ifdef DIAGNOSTIC
+ if ((align & (align - 1)) != 0)
+ panic("uvm_map_findspace: alignment not power of 2");
+ if ((flags & UVM_FLAG_FIXED) != 0 && align != 0)
+ panic("uvm_map_findslace: fixed and alignment both specified");
+#endif
+
+ /*
+ * remember the original hint. if we are aligning, then we
+ * may have to try again with no alignment constraint if
+ * we fail the first time.
+ */
+ orig_hint = hint;
if (hint < map->min_offset) { /* check ranges ... */
- if (fixed) {
+ if (flags & UVM_FLAG_FIXED) {
UVMHIST_LOG(maphist,"<- VA below map range",0,0,0,0);
return(NULL);
}
@@ -824,13 +846,13 @@ uvm_map_findspace(map, hint, length, result, uobj, uoffset, fixed)
* something at this address, we have to start after it.
*/
- if (!fixed && hint == map->min_offset) {
+ if ((flags & UVM_FLAG_FIXED) == 0 && hint == map->min_offset) {
if ((entry = map->first_free) != &map->header)
hint = entry->end;
} else {
if (uvm_map_lookup_entry(map, hint, &tmp)) {
/* "hint" address already in use ... */
- if (fixed) {
+ if (flags & UVM_FLAG_FIXED) {
UVMHIST_LOG(maphist,"<- fixed & VA in use",
0, 0, 0, 0);
return(NULL);
@@ -860,18 +882,33 @@ uvm_map_findspace(map, hint, length, result, uobj, uoffset, fixed)
* push hint forward as needed to avoid VAC alias problems.
* we only do this if a valid offset is specified.
*/
- if (!fixed && uoffset != UVM_UNKNOWN_OFFSET)
- PMAP_PREFER(uoffset, &hint);
+ if ((flags & UVM_FLAG_FIXED) == 0 &&
+ uoffset != UVM_UNKNOWN_OFFSET)
+ PMAP_PREFER(uoffset, &hint);
#endif
+ if (align != 0) {
+ if ((hint & (align - 1)) != 0)
+ hint = roundup(hint, align);
+ /*
+ * XXX Should we PMAP_PREFER() here again?
+ */
+ }
end = hint + length;
if (end > map->max_offset || end < hint) {
UVMHIST_LOG(maphist,"<- failed (off end)", 0,0,0,0);
+ if (align != 0) {
+ UVMHIST_LOG(maphist,
+ "calling recursively, no align",
+ 0,0,0,0);
+ return (uvm_map_findspace(map, orig_hint,
+ length, result, uobj, uoffset, 0, flags));
+ }
return (NULL);
}
next = entry->next;
if (next == &map->header || next->start >= end)
break;
- if (fixed) {
+ if (flags & UVM_FLAG_FIXED) {
UVMHIST_LOG(maphist,"<- fixed mapping failed", 0,0,0,0);
return(NULL); /* only one shot at it ... */
}
@@ -961,7 +998,7 @@ uvm_unmap_remove(map, start, end, entry_list)
UVM_MAP_CLIP_END(map, entry, end);
next = entry->next;
len = entry->end - entry->start;
-
+
/*
* unwire before removing addresses from the pmap; otherwise
* unwiring will put the entries back into the pmap (XXX).
@@ -1156,10 +1193,11 @@ uvm_unmap_detach(first_entry, amap_unref_flags)
*/
int
-uvm_map_reserve(map, size, offset, raddr)
+uvm_map_reserve(map, size, offset, align, raddr)
vm_map_t map;
vsize_t size;
- vaddr_t offset; /* hint for pmap_prefer */
+ vaddr_t offset; /* hint for pmap_prefer */
+ vsize_t align; /* alignment hint */
vaddr_t *raddr; /* IN:hint, OUT: reserved VA */
{
UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist);
@@ -1175,7 +1213,7 @@ uvm_map_reserve(map, size, offset, raddr)
* reserve some virtual space.
*/
- if (uvm_map(map, raddr, size, NULL, offset,
+ if (uvm_map(map, raddr, size, NULL, offset, 0,
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != KERN_SUCCESS) {
UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
@@ -1353,7 +1391,7 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
*/
dstaddr = vm_map_min(dstmap);
- if (uvm_map_reserve(dstmap, len, start, &dstaddr) == FALSE)
+ if (uvm_map_reserve(dstmap, len, start, 0, &dstaddr) == FALSE)
return(ENOMEM);
*dstaddrp = dstaddr; /* pass address back to caller */
UVMHIST_LOG(maphist, " dstaddr=0x%x", dstaddr,0,0,0);