summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorStefan Kempf <stefan@cvs.openbsd.org>2016-05-08 16:29:58 +0000
committerStefan Kempf <stefan@cvs.openbsd.org>2016-05-08 16:29:58 +0000
commit7365e475047fcfe8ce1b68f379c6cd790c4ea85d (patch)
tree7bb1b97fc2c17b86015c7eaf951e753ca48de8e1 /sys
parent0217dbc1f7efa91e79932688c7212bb5e1f6cddb (diff)
Additional parameter for amap_alloc().
It is supposed to control whether an amap should allocate memory to store anon pointers lazily or upfront. Needed for upcoming amap changes. ok kettenis@
Diffstat (limited to 'sys')
-rw-r--r--sys/uvm/uvm_amap.c42
-rw-r--r--sys/uvm/uvm_amap.h4
-rw-r--r--sys/uvm/uvm_map.c6
3 files changed, 29 insertions, 23 deletions
diff --git a/sys/uvm/uvm_amap.c b/sys/uvm/uvm_amap.c
index c31a1b4a187..977a95a2154 100644
--- a/sys/uvm/uvm_amap.c
+++ b/sys/uvm/uvm_amap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_amap.c,v 1.67 2016/05/08 11:52:32 stefan Exp $ */
+/* $OpenBSD: uvm_amap.c,v 1.68 2016/05/08 16:29:57 stefan Exp $ */
/* $NetBSD: uvm_amap.c,v 1.27 2000/11/25 06:27:59 chs Exp $ */
/*
@@ -65,7 +65,7 @@ static char amap_slot_pool_names[UVM_AMAP_CHUNK][13];
* local functions
*/
-static struct vm_amap *amap_alloc1(int, int);
+static struct vm_amap *amap_alloc1(int, int, int);
static __inline void amap_list_insert(struct vm_amap *);
static __inline void amap_list_remove(struct vm_amap *);
@@ -177,7 +177,7 @@ amap_init(void)
* init the overlay.
*/
static inline struct vm_amap *
-amap_alloc1(int slots, int waitf)
+amap_alloc1(int slots, int waitf, int lazyalloc)
{
struct vm_amap *amap;
@@ -226,14 +226,14 @@ fail1:
* => reference count to new amap is set to one
*/
struct vm_amap *
-amap_alloc(vaddr_t sz, int waitf)
+amap_alloc(vaddr_t sz, int waitf, int lazyalloc)
{
struct vm_amap *amap;
int slots;
AMAP_B2SLOT(slots, sz); /* load slots */
- amap = amap_alloc1(slots, waitf);
+ amap = amap_alloc1(slots, waitf, lazyalloc);
if (amap) {
memset(amap->am_anon, 0,
amap->am_nslot * sizeof(struct vm_anon *));
@@ -330,7 +330,7 @@ amap_copy(struct vm_map *map, struct vm_map_entry *entry, int waitf,
boolean_t canchunk, vaddr_t startva, vaddr_t endva)
{
struct vm_amap *amap, *srcamap;
- int slots, lcv;
+ int slots, lcv, lazyalloc = 0;
vaddr_t chunksize;
/* is there a map to copy? if not, create one from scratch. */
@@ -339,22 +339,28 @@ amap_copy(struct vm_map *map, struct vm_map_entry *entry, int waitf,
* check to see if we have a large amap that we can
* chunk. we align startva/endva to chunk-sized
* boundaries and then clip to them.
+ *
+ * if we cannot chunk the amap, allocate it in a way
+ * that makes it grow or shrink dynamically with
+ * the number of slots.
*/
- if (canchunk && atop(entry->end - entry->start) >=
- UVM_AMAP_LARGE) {
- /* convert slots to bytes */
- chunksize = UVM_AMAP_CHUNK << PAGE_SHIFT;
- startva = (startva / chunksize) * chunksize;
- endva = roundup(endva, chunksize);
- UVM_MAP_CLIP_START(map, entry, startva);
- /* watch out for endva wrap-around! */
- if (endva >= startva)
- UVM_MAP_CLIP_END(map, entry, endva);
+ if (atop(entry->end - entry->start) >= UVM_AMAP_LARGE) {
+ if (canchunk) {
+ /* convert slots to bytes */
+ chunksize = UVM_AMAP_CHUNK << PAGE_SHIFT;
+ startva = (startva / chunksize) * chunksize;
+ endva = roundup(endva, chunksize);
+ UVM_MAP_CLIP_START(map, entry, startva);
+ /* watch out for endva wrap-around! */
+ if (endva >= startva)
+ UVM_MAP_CLIP_END(map, entry, endva);
+ } else
+ lazyalloc = 1;
}
entry->aref.ar_pageoff = 0;
entry->aref.ar_amap = amap_alloc(entry->end - entry->start,
- waitf);
+ waitf, lazyalloc);
if (entry->aref.ar_amap != NULL)
entry->etype &= ~UVM_ET_NEEDSCOPY;
return;
@@ -373,7 +379,7 @@ amap_copy(struct vm_map *map, struct vm_map_entry *entry, int waitf,
/* looks like we need to copy the map. */
AMAP_B2SLOT(slots, entry->end - entry->start);
- amap = amap_alloc1(slots, waitf);
+ amap = amap_alloc1(slots, waitf, lazyalloc);
if (amap == NULL)
return;
srcamap = entry->aref.ar_amap;
diff --git a/sys/uvm/uvm_amap.h b/sys/uvm/uvm_amap.h
index e76dead3f28..1c5abfb8ab1 100644
--- a/sys/uvm/uvm_amap.h
+++ b/sys/uvm/uvm_amap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_amap.h,v 1.25 2016/05/08 11:52:32 stefan Exp $ */
+/* $OpenBSD: uvm_amap.h,v 1.26 2016/05/08 16:29:57 stefan Exp $ */
/* $NetBSD: uvm_amap.h,v 1.14 2001/02/18 21:19:08 chs Exp $ */
/*
@@ -68,7 +68,7 @@ void amap_populate(struct vm_aref *, vaddr_t);
int amap_add(struct vm_aref *, vaddr_t, struct vm_anon *,
boolean_t);
/* allocate a new amap */
-struct vm_amap *amap_alloc(vaddr_t, int);
+struct vm_amap *amap_alloc(vaddr_t, int, int);
/* clear amap needs-copy flag */
void amap_copy(vm_map_t, vm_map_entry_t, int, boolean_t, vaddr_t,
vaddr_t);
diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c
index aa951176026..7629829280f 100644
--- a/sys/uvm/uvm_map.c
+++ b/sys/uvm/uvm_map.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_map.c,v 1.212 2016/05/05 11:23:39 stefan Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.213 2016/05/08 16:29:57 stefan Exp $ */
/* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */
/*
@@ -1064,7 +1064,7 @@ uvm_mapanon(struct vm_map *map, vaddr_t *addr, vsize_t sz,
if (flags & UVM_FLAG_OVERLAY) {
KERNEL_LOCK();
entry->aref.ar_pageoff = 0;
- entry->aref.ar_amap = amap_alloc(sz, M_WAITOK);
+ entry->aref.ar_amap = amap_alloc(sz, M_WAITOK, 0);
KERNEL_UNLOCK();
}
@@ -1320,7 +1320,7 @@ uvm_map(struct vm_map *map, vaddr_t *addr, vsize_t sz,
}
if (flags & UVM_FLAG_OVERLAY) {
entry->aref.ar_pageoff = 0;
- entry->aref.ar_amap = amap_alloc(sz, M_WAITOK);
+ entry->aref.ar_amap = amap_alloc(sz, M_WAITOK, 0);
}
/* Update map and process statistics. */