summaryrefslogtreecommitdiff
path: root/sys/uvm/uvm_amap.c
diff options
context:
space:
mode:
authorStefan Kempf <stefan@cvs.openbsd.org>2016-03-27 09:51:38 +0000
committerStefan Kempf <stefan@cvs.openbsd.org>2016-03-27 09:51:38 +0000
commita38f987a633128a4ede6cc630e0acd1e3f00a607 (patch)
tree9219cb6b1ae7dbee22ae3b7d1c6e1493c62e87c1 /sys/uvm/uvm_amap.c
parent08fb3cc6c0a31a785e86cda5febc086662da9dc7 (diff)
amap_extend is never called, remove it.
In the code, this function is called when vm_map_entries are merged. However, only kernel map entries are merged, and these do not use amaps. Therefore amap_extend() is never called at runtime. ok millert@, KASSERT suggestion and ok mpi@
Diffstat (limited to 'sys/uvm/uvm_amap.c')
-rw-r--r--sys/uvm/uvm_amap.c170
1 files changed, 1 insertions, 169 deletions
diff --git a/sys/uvm/uvm_amap.c b/sys/uvm/uvm_amap.c
index dc1506e74d3..6472c0f4bee 100644
--- a/sys/uvm/uvm_amap.c
+++ b/sys/uvm/uvm_amap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_amap.c,v 1.62 2016/03/16 16:53:43 stefan Exp $ */
+/* $OpenBSD: uvm_amap.c,v 1.63 2016/03/27 09:51:37 stefan Exp $ */
/* $NetBSD: uvm_amap.c,v 1.27 2000/11/25 06:27:59 chs Exp $ */
/*
@@ -279,174 +279,6 @@ amap_free(struct vm_amap *amap)
}
/*
- * amap_extend: extend the size of an amap (if needed)
- *
- * => called from uvm_map when we want to extend an amap to cover
- * a new mapping (rather than allocate a new one)
- * => to safely extend an amap it should have a reference count of
- * one (thus it can't be shared)
- * => XXXCDC: support padding at this level?
- */
-int
-amap_extend(struct vm_map_entry *entry, vsize_t addsize)
-{
- struct vm_amap *amap = entry->aref.ar_amap;
- int slotoff = entry->aref.ar_pageoff;
- int slotmapped, slotadd, slotneed, slotalloc;
-#ifdef UVM_AMAP_PPREF
- int *newppref, *oldppref;
-#endif
- u_int *newsl, *newbck, *oldsl, *oldbck;
- struct vm_anon **newover, **oldover;
- int slotadded;
-
- /*
- * first, determine how many slots we need in the amap. don't
- * forget that ar_pageoff could be non-zero: this means that
- * there are some unused slots before us in the amap.
- */
- AMAP_B2SLOT(slotmapped, entry->end - entry->start); /* slots mapped */
- AMAP_B2SLOT(slotadd, addsize); /* slots to add */
- slotneed = slotoff + slotmapped + slotadd;
-
- /*
- * case 1: we already have enough slots in the map and thus
- * only need to bump the reference counts on the slots we are
- * adding.
- */
- if (amap->am_nslot >= slotneed) {
-#ifdef UVM_AMAP_PPREF
- if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
- amap_pp_adjref(amap, slotoff + slotmapped, slotadd, 1);
- }
-#endif
- return (0);
- }
-
- /*
- * case 2: we pre-allocated slots for use and we just need to
- * bump nslot up to take account for these slots.
- */
- if (amap->am_maxslot >= slotneed) {
-#ifdef UVM_AMAP_PPREF
- if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
- if ((slotoff + slotmapped) < amap->am_nslot)
- amap_pp_adjref(amap, slotoff + slotmapped,
- (amap->am_nslot - (slotoff + slotmapped)),
- 1);
- pp_setreflen(amap->am_ppref, amap->am_nslot, 1,
- slotneed - amap->am_nslot);
- }
-#endif
- amap->am_nslot = slotneed;
- /*
- * no need to zero am_anon since that was done at
- * alloc time and we never shrink an allocation.
- */
- return (0);
- }
-
- /*
- * case 3: we need to malloc a new amap and copy all the amap
- * data over from old amap to the new one.
- *
- * XXXCDC: could we take advantage of a kernel realloc()?
- */
- if (slotneed >= UVM_AMAP_LARGE)
- return E2BIG;
-
- if (slotneed > UVM_AMAP_CHUNK)
- slotalloc = malloc_roundup(slotneed * MALLOC_SLOT_UNIT) /
- MALLOC_SLOT_UNIT;
- else
- slotalloc = slotneed;
-
-#ifdef UVM_AMAP_PPREF
- newppref = NULL;
- if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
- newppref = mallocarray(slotalloc, sizeof(int), M_UVMAMAP,
- M_WAITOK | M_CANFAIL);
- if (newppref == NULL) {
- /* give up if malloc fails */
- free(amap->am_ppref, M_UVMAMAP, 0);
- amap->am_ppref = PPREF_NONE;
- }
- }
-#endif
- if (slotneed > UVM_AMAP_CHUNK)
- newsl = malloc(slotalloc * MALLOC_SLOT_UNIT, M_UVMAMAP,
- M_WAITOK | M_CANFAIL);
- else
- newsl = pool_get(&uvm_amap_slot_pools[slotalloc - 1],
- PR_WAITOK | PR_LIMITFAIL);
- if (newsl == NULL) {
-#ifdef UVM_AMAP_PPREF
- if (newppref != NULL) {
- free(newppref, M_UVMAMAP, 0);
- }
-#endif
- return (ENOMEM);
- }
- newbck = (int *)(((char *)newsl) + slotalloc * sizeof(int));
- newover = (struct vm_anon **)(((char *)newbck) + slotalloc *
- sizeof(int));
- KASSERT(amap->am_maxslot < slotneed);
-
- /* now copy everything over to new malloc'd areas... */
- slotadded = slotalloc - amap->am_nslot;
-
- /* do am_slots */
- oldsl = amap->am_slots;
- memcpy(newsl, oldsl, sizeof(int) * amap->am_nused);
- amap->am_slots = newsl;
-
- /* do am_anon */
- oldover = amap->am_anon;
- memcpy(newover, oldover, sizeof(struct vm_anon *) * amap->am_nslot);
- memset(newover + amap->am_nslot, 0, sizeof(struct vm_anon *) *
- slotadded);
- amap->am_anon = newover;
-
- /* do am_bckptr */
- oldbck = amap->am_bckptr;
- memcpy(newbck, oldbck, sizeof(int) * amap->am_nslot);
- memset(newbck + amap->am_nslot, 0, sizeof(int) * slotadded); /* XXX: needed? */
- amap->am_bckptr = newbck;
-
-#ifdef UVM_AMAP_PPREF
- /* do ppref */
- oldppref = amap->am_ppref;
- if (newppref) {
- memcpy(newppref, oldppref, sizeof(int) * amap->am_nslot);
- memset(newppref + amap->am_nslot, 0, sizeof(int) * slotadded);
- amap->am_ppref = newppref;
- if ((slotoff + slotmapped) < amap->am_nslot)
- amap_pp_adjref(amap, slotoff + slotmapped,
- (amap->am_nslot - (slotoff + slotmapped)), 1);
- pp_setreflen(newppref, amap->am_nslot, 1,
- slotneed - amap->am_nslot);
- }
-#endif
-
- /* free */
- if (amap->am_maxslot > UVM_AMAP_CHUNK)
- free(oldsl, M_UVMAMAP, 0);
- else
- pool_put(&uvm_amap_slot_pools[amap->am_maxslot - 1],
- oldsl);
-
- /* and update master values */
- amap->am_nslot = slotneed;
- amap->am_maxslot = slotalloc;
-
-#ifdef UVM_AMAP_PPREF
- if (oldppref && oldppref != PPREF_NONE)
- free(oldppref, M_UVMAMAP, 0);
-#endif
- return (0);
-}
-
-/*
* amap_wipeout: wipeout all anon's in an amap; then free the amap!
*
* => called from amap_unref when the final reference to an amap is