summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAriane van der Steldt <ariane@cvs.openbsd.org>2011-07-06 10:25:22 +0000
committerAriane van der Steldt <ariane@cvs.openbsd.org>2011-07-06 10:25:22 +0000
commitfbec5903cbd875a2350fe71e012021e43aec38c8 (patch)
tree04e844c5fdd9f30e2bc6f31db12952453c997253
parent02909a2dc2ffdde76a1229b482e840081b7466c3 (diff)
Hibernate memory allocator.
A simple first-fit allocator, intended to manage small ranges of memory. This is currently not called. tested and ok mlarkin@, prodded by deraadt@
-rw-r--r--sys/conf/files3
-rw-r--r--sys/kern/subr_hiballoc.c206
-rw-r--r--sys/sys/hiballoc.h24
3 files changed, 232 insertions, 1 deletions
diff --git a/sys/conf/files b/sys/conf/files
index 7d0cdedbdb0..22329b3925f 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -1,4 +1,4 @@
-# $OpenBSD: files,v 1.517 2011/07/04 20:57:54 deraadt Exp $
+# $OpenBSD: files,v 1.518 2011/07/06 10:25:21 ariane Exp $
# $NetBSD: files,v 1.87 1996/05/19 17:17:50 jonathan Exp $
# @(#)files.newconf 7.5 (Berkeley) 5/10/93
@@ -721,6 +721,7 @@ file kern/subr_autoconf.c
file kern/subr_disk.c
file kern/subr_evcount.c
file kern/subr_extent.c
+file kern/subr_hiballoc.c !small_kernel
file kern/subr_log.c
file kern/subr_pool.c
file kern/dma_alloc.c
diff --git a/sys/kern/subr_hiballoc.c b/sys/kern/subr_hiballoc.c
new file mode 100644
index 00000000000..9d2e344b6af
--- /dev/null
+++ b/sys/kern/subr_hiballoc.c
@@ -0,0 +1,206 @@
+#include <sys/hiballoc.h>
+#include <sys/param.h>
+#include <sys/tree.h>
+#include <sys/types.h>
+#include <sys/systm.h>
+
+
+/*
+ * Hib alloc enforced alignment.
+ */
+#define HIB_ALIGN 8 /* bytes alignment */
+
+/*
+ * sizeof builtin operation, but with alignment constraint.
+ */
+#define HIB_SIZEOF(_type) roundup(sizeof(_type), HIB_ALIGN)
+
+struct hiballoc_entry
+{
+ size_t hibe_use;
+ size_t hibe_space;
+ RB_ENTRY(hiballoc_entry) hibe_entry;
+};
+
+/*
+ * Compare hiballoc entries based on the address they manage.
+ *
+ * Since the address is fixed, relative to struct hiballoc_entry,
+ * we just compare the hiballoc_entry pointers.
+ */
+static __inline int
+hibe_cmp(struct hiballoc_entry *l, struct hiballoc_entry *r)
+{
+ return l < r ? -1 : (l > r);
+}
+
+RB_PROTOTYPE(hiballoc_addr, hiballoc_entry, hibe_entry, hibe_cmp)
+
+/*
+ * Given a hiballoc entry, return the address it manages.
+ */
+static __inline void*
+hib_entry_to_addr(struct hiballoc_entry *entry)
+{
+ caddr_t addr;
+
+ addr = (caddr_t)entry;
+ addr += HIB_SIZEOF(struct hiballoc_entry);
+ return addr;
+}
+
+/*
+ * Given an address, find the hiballoc that corresponds.
+ */
+static __inline struct hiballoc_entry*
+hib_addr_to_entry(void* addr_param)
+{
+ caddr_t addr;
+
+ addr = (caddr_t)addr_param;
+ addr -= HIB_SIZEOF(struct hiballoc_entry);
+ return (struct hiballoc_entry*)addr;
+}
+
+RB_GENERATE(hiballoc_addr, hiballoc_entry, hibe_entry, hibe_cmp)
+
+/*
+ * Allocate memory from the arena.
+ *
+ * Returns NULL if no memory is available.
+ */
+void*
+hib_alloc(struct hiballoc_arena *arena, size_t alloc_sz)
+{
+ struct hiballoc_entry *entry;
+ size_t find_sz;
+
+ /*
+ * Enforce alignment of HIB_ALIGN bytes.
+ *
+ * Note that, because the entry is put in front of the allocation,
+ * 0-byte allocations are guaranteed a unique address.
+ */
+ alloc_sz = roundup(alloc_sz, HIB_ALIGN);
+
+ /*
+ * Find an entry with hibe_space >= find_sz.
+ *
+ * If the root node is not large enough, we switch to tree traversal.
+ * Because all entries are made at the bottom of the free space,
+ * traversal from the end has a slightly better chance of yielding
+ * a sufficiently large space.
+ */
+ find_sz = alloc_sz + HIB_SIZEOF(struct hiballoc_entry);
+ entry = RB_ROOT(&arena->hib_addrs);
+ if (entry != NULL && entry->hibe_space < find_sz) {
+ RB_FOREACH_REVERSE(entry, hiballoc_addr, &arena->hib_addrs) {
+ if (entry->hibe_space >= find_sz)
+ break;
+ }
+ }
+
+ /*
+ * Insufficient or too fragmented memory.
+ */
+ if (entry == NULL)
+ return NULL;
+
+ /*
+ * Create new entry in allocated space.
+ */
+ struct hiballoc_entry *new_entry = (struct hiballoc_entry*)(
+ (caddr_t)hib_entry_to_addr(entry) + entry->hibe_use);
+ new_entry->hibe_space = entry->hibe_space - find_sz;
+ new_entry->hibe_use = alloc_sz;
+
+ /*
+ * Insert entry.
+ */
+ if (RB_INSERT(hiballoc_addr, &arena->hib_addrs, new_entry) != NULL)
+ panic("hib_alloc: insert failure");
+ entry->hibe_space = 0;
+
+ /* Return address managed by entry. */
+ return hib_entry_to_addr(new_entry);
+}
+
+/*
+ * Free a pointer previously allocated from this arena.
+ *
+ * If addr is NULL, this will be silently accepted.
+ */
+void
+hib_free(struct hiballoc_arena *arena, void *addr)
+{
+ struct hiballoc_entry *entry, *prev;
+
+ if (addr == NULL)
+ return;
+
+ /*
+ * Derive entry from addr and check it is really in this arena.
+ */
+ entry = hib_addr_to_entry(addr);
+ if (RB_FIND(hiballoc_addr, &arena->hib_addrs, entry) != entry)
+ panic("hib_free: freed item %p not in hib arena", addr);
+
+ /*
+ * Give the space in entry to its predecessor.
+ *
+ * If entry has no predecessor, change its used space into free space
+ * instead.
+ */
+ prev = RB_PREV(hiballoc_addr, &arena->hib_addrs, entry);
+ if (prev != NULL &&
+ (void*)((caddr_t)prev + HIB_SIZEOF(struct hiballoc_entry) +
+ prev->hibe_use + prev->hibe_space) == entry) {
+ /* Merge entry. */
+ RB_REMOVE(hiballoc_addr, &arena->hib_addrs, entry);
+ prev->hibe_space += HIB_SIZEOF(struct hiballoc_entry) +
+ entry->hibe_use + entry->hibe_space;
+ } else {
+ /* Flip used memory to free space. */
+ entry->hibe_space += entry->hibe_use;
+ entry->hibe_use = 0;
+ }
+}
+
+/*
+ * Initialize hiballoc.
+ *
+ * The allocator will manage memmory at ptr, which is len bytes.
+ */
+int
+hiballoc_init(struct hiballoc_arena *arena, void *p_ptr, size_t p_len)
+{
+ struct hiballoc_entry *entry;
+ caddr_t ptr;
+ size_t len;
+
+ RB_INIT(&arena->hib_addrs);
+
+ /*
+ * Hib allocator enforces HIB_ALIGN alignment.
+ * Fixup ptr and len.
+ */
+ ptr = (caddr_t)roundup((vaddr_t)p_ptr, HIB_ALIGN);
+ len = p_len - ((size_t)ptr - (size_t)p_ptr);
+ len &= ~((size_t)HIB_ALIGN - 1);
+
+ /*
+ * Insufficient memory to be able to allocate and also do bookkeeping.
+ */
+ if (len <= HIB_SIZEOF(struct hiballoc_entry))
+ return ENOMEM;
+
+ /*
+ * Create entry describing space.
+ */
+ entry = (struct hiballoc_entry*)ptr;
+ entry->hibe_use = 0;
+ entry->hibe_space = len - HIB_SIZEOF(struct hiballoc_entry);
+ RB_INSERT(hiballoc_addr, &arena->hib_addrs, entry);
+
+ return 0;
+}
diff --git a/sys/sys/hiballoc.h b/sys/sys/hiballoc.h
new file mode 100644
index 00000000000..51d8ad6bc75
--- /dev/null
+++ b/sys/sys/hiballoc.h
@@ -0,0 +1,24 @@
+#ifndef _SYS_HIBALLOC_H_
+#define _SYS_HIBALLOC_H_
+
+#include <sys/types.h>
+#include <sys/tree.h>
+
+struct hiballoc_entry;
+
+/*
+ * Hibernate allocator.
+ *
+ * Allocator operates from an arena, that is pre-allocated by the caller.
+ */
+struct hiballoc_arena
+{
+ RB_HEAD(hiballoc_addr, hiballoc_entry)
+ hib_addrs;
+};
+
+void *hib_alloc(struct hiballoc_arena*, size_t);
+void hib_free(struct hiballoc_arena*, void*);
+int hiballoc_init(struct hiballoc_arena*, void*, size_t len);
+
+#endif /* _SYS_HIBALLOC_H_ */