summaryrefslogtreecommitdiff
path: root/lib/mesa/src/gallium/auxiliary/pipebuffer
diff options
context:
space:
mode:
authorJonathan Gray <jsg@cvs.openbsd.org>2016-12-11 08:40:05 +0000
committerJonathan Gray <jsg@cvs.openbsd.org>2016-12-11 08:40:05 +0000
commit21ab4c9f31674b113c24177398ed39f29b7cd8e6 (patch)
tree8be392d7a792d9663c2586396be77bfd506f5164 /lib/mesa/src/gallium/auxiliary/pipebuffer
parenta8f0a7916e26e550dd2a26e7188835c481978004 (diff)
Import Mesa 13.0.2
Diffstat (limited to 'lib/mesa/src/gallium/auxiliary/pipebuffer')
-rw-r--r--lib/mesa/src/gallium/auxiliary/pipebuffer/pb_buffer.h6
-rw-r--r--lib/mesa/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c231
-rw-r--r--lib/mesa/src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c4
-rw-r--r--lib/mesa/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c7
-rw-r--r--lib/mesa/src/gallium/auxiliary/pipebuffer/pb_cache.c80
-rw-r--r--lib/mesa/src/gallium/auxiliary/pipebuffer/pb_cache.h12
-rw-r--r--lib/mesa/src/gallium/auxiliary/pipebuffer/pb_slab.c252
-rw-r--r--lib/mesa/src/gallium/auxiliary/pipebuffer/pb_slab.h155
8 files changed, 573 insertions, 174 deletions
diff --git a/lib/mesa/src/gallium/auxiliary/pipebuffer/pb_buffer.h b/lib/mesa/src/gallium/auxiliary/pipebuffer/pb_buffer.h
index 803c1d391..33c23068c 100644
--- a/lib/mesa/src/gallium/auxiliary/pipebuffer/pb_buffer.h
+++ b/lib/mesa/src/gallium/auxiliary/pipebuffer/pb_buffer.h
@@ -87,9 +87,9 @@ struct pb_desc
/**
- * Size. Regular (32bit) unsigned for now.
+ * 64-bit type for GPU buffer sizes and offsets.
*/
-typedef unsigned pb_size;
+typedef uint64_t pb_size;
/**
@@ -98,8 +98,8 @@ typedef unsigned pb_size;
struct pb_buffer
{
struct pipe_reference reference;
- unsigned size;
unsigned alignment;
+ pb_size size;
unsigned usage;
/**
diff --git a/lib/mesa/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c b/lib/mesa/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c
index 2678268e9..7717d784f 100644
--- a/lib/mesa/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c
+++ b/lib/mesa/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c
@@ -40,6 +40,7 @@
#include <unistd.h>
#include <sched.h>
#endif
+#include <inttypes.h>
#include "pipe/p_compiler.h"
#include "pipe/p_defines.h"
@@ -108,14 +109,14 @@ struct fenced_manager
*/
struct fenced_buffer
{
- /*
+ /**
* Immutable members.
*/
struct pb_buffer base;
struct fenced_manager *mgr;
- /*
+ /**
* Following members are mutable and protected by fenced_manager::mutex.
*/
@@ -205,10 +206,10 @@ fenced_manager_dump_locked(struct fenced_manager *fenced_mgr)
curr = fenced_mgr->unfenced.next;
next = curr->next;
- while(curr != &fenced_mgr->unfenced) {
+ while (curr != &fenced_mgr->unfenced) {
fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
assert(!fenced_buf->fence);
- debug_printf("%10p %7u %8u %7s\n",
+ debug_printf("%10p %"PRIu64" %8u %7s\n",
(void *) fenced_buf,
fenced_buf->base.size,
p_atomic_read(&fenced_buf->base.reference.count),
@@ -219,12 +220,12 @@ fenced_manager_dump_locked(struct fenced_manager *fenced_mgr)
curr = fenced_mgr->fenced.next;
next = curr->next;
- while(curr != &fenced_mgr->fenced) {
+ while (curr != &fenced_mgr->fenced) {
int signaled;
fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
assert(fenced_buf->buffer);
signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
- debug_printf("%10p %7u %8u %7s %10p %s\n",
+ debug_printf("%10p %"PRIu64" %8u %7s %10p %s\n",
(void *) fenced_buf,
fenced_buf->base.size,
p_atomic_read(&fenced_buf->base.reference.count),
@@ -340,7 +341,7 @@ fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr,
assert(pipe_is_referenced(&fenced_buf->base.reference));
assert(fenced_buf->fence);
- if(fenced_buf->fence) {
+ if (fenced_buf->fence) {
struct pipe_fence_handle *fence = NULL;
int finished;
boolean proceed;
@@ -355,8 +356,7 @@ fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr,
assert(pipe_is_referenced(&fenced_buf->base.reference));
- /*
- * Only proceed if the fence object didn't change in the meanwhile.
+ /* Only proceed if the fence object didn't change in the meanwhile.
* Otherwise assume the work has been already carried out by another
* thread that re-aquired the lock before us.
*/
@@ -364,14 +364,9 @@ fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr,
ops->fence_reference(ops, &fence, NULL);
- if(proceed && finished == 0) {
- /*
- * Remove from the fenced list
- */
-
- boolean destroyed;
-
- destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
+ if (proceed && finished == 0) {
+ /* Remove from the fenced list. */
+ boolean destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
/* TODO: remove consequents buffers with the same fence? */
@@ -405,36 +400,33 @@ fenced_manager_check_signalled_locked(struct fenced_manager *fenced_mgr,
curr = fenced_mgr->fenced.next;
next = curr->next;
- while(curr != &fenced_mgr->fenced) {
+ while (curr != &fenced_mgr->fenced) {
fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
- if(fenced_buf->fence != prev_fence) {
- int signaled;
+ if (fenced_buf->fence != prev_fence) {
+ int signaled;
- if (wait) {
- signaled = ops->fence_finish(ops, fenced_buf->fence, 0);
+ if (wait) {
+ signaled = ops->fence_finish(ops, fenced_buf->fence, 0);
- /*
- * Don't return just now. Instead preemptively check if the
- * following buffers' fences already expired, without further waits.
- */
- wait = FALSE;
- }
- else {
- signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
- }
+ /* Don't return just now. Instead preemptively check if the
+ * following buffers' fences already expired, without further waits.
+ */
+ wait = FALSE;
+ } else {
+ signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
+ }
- if (signaled != 0) {
- return ret;
+ if (signaled != 0) {
+ return ret;
}
- prev_fence = fenced_buf->fence;
- }
- else {
+ prev_fence = fenced_buf->fence;
+ } else {
/* This buffer's fence object is identical to the previous buffer's
* fence object, so no need to check the fence again.
*/
- assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0);
+ assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0);
}
fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
@@ -462,22 +454,21 @@ fenced_manager_free_gpu_storage_locked(struct fenced_manager *fenced_mgr)
curr = fenced_mgr->unfenced.next;
next = curr->next;
- while(curr != &fenced_mgr->unfenced) {
+ while (curr != &fenced_mgr->unfenced) {
fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
- /*
- * We can only move storage if the buffer is not mapped and not
+ /* We can only move storage if the buffer is not mapped and not
* validated.
*/
- if(fenced_buf->buffer &&
+ if (fenced_buf->buffer &&
!fenced_buf->mapcount &&
!fenced_buf->vl) {
enum pipe_error ret;
ret = fenced_buffer_create_cpu_storage_locked(fenced_mgr, fenced_buf);
- if(ret == PIPE_OK) {
+ if (ret == PIPE_OK) {
ret = fenced_buffer_copy_storage_to_cpu_locked(fenced_buf);
- if(ret == PIPE_OK) {
+ if (ret == PIPE_OK) {
fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
return TRUE;
}
@@ -499,7 +490,7 @@ fenced_manager_free_gpu_storage_locked(struct fenced_manager *fenced_mgr)
static void
fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer *fenced_buf)
{
- if(fenced_buf->data) {
+ if (fenced_buf->data) {
align_free(fenced_buf->data);
fenced_buf->data = NULL;
assert(fenced_buf->mgr->cpu_total_size >= fenced_buf->size);
@@ -516,14 +507,14 @@ fenced_buffer_create_cpu_storage_locked(struct fenced_manager *fenced_mgr,
struct fenced_buffer *fenced_buf)
{
assert(!fenced_buf->data);
- if(fenced_buf->data)
+ if (fenced_buf->data)
return PIPE_OK;
if (fenced_mgr->cpu_total_size + fenced_buf->size > fenced_mgr->max_cpu_total_size)
return PIPE_ERROR_OUT_OF_MEMORY;
fenced_buf->data = align_malloc(fenced_buf->size, fenced_buf->desc.alignment);
- if(!fenced_buf->data)
+ if (!fenced_buf->data)
return PIPE_ERROR_OUT_OF_MEMORY;
fenced_mgr->cpu_total_size += fenced_buf->size;
@@ -538,7 +529,7 @@ fenced_buffer_create_cpu_storage_locked(struct fenced_manager *fenced_mgr,
static void
fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf)
{
- if(fenced_buf->buffer) {
+ if (fenced_buf->buffer) {
pb_reference(&fenced_buf->buffer, NULL);
}
}
@@ -575,41 +566,37 @@ fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
{
assert(!fenced_buf->buffer);
- /*
- * Check for signaled buffers before trying to allocate.
- */
+ /* Check for signaled buffers before trying to allocate. */
fenced_manager_check_signalled_locked(fenced_mgr, FALSE);
fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
- /*
- * Keep trying while there is some sort of progress:
+ /* Keep trying while there is some sort of progress:
* - fences are expiring,
* - or buffers are being being swapped out from GPU memory into CPU memory.
*/
- while(!fenced_buf->buffer &&
+ while (!fenced_buf->buffer &&
(fenced_manager_check_signalled_locked(fenced_mgr, FALSE) ||
fenced_manager_free_gpu_storage_locked(fenced_mgr))) {
fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
}
- if(!fenced_buf->buffer && wait) {
- /*
- * Same as before, but this time around, wait to free buffers if
+ if (!fenced_buf->buffer && wait) {
+ /* Same as before, but this time around, wait to free buffers if
* necessary.
*/
- while(!fenced_buf->buffer &&
+ while (!fenced_buf->buffer &&
(fenced_manager_check_signalled_locked(fenced_mgr, TRUE) ||
fenced_manager_free_gpu_storage_locked(fenced_mgr))) {
fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
}
}
- if(!fenced_buf->buffer) {
- if(0)
+ if (!fenced_buf->buffer) {
+ if (0)
fenced_manager_dump_locked(fenced_mgr);
- /* give up */
+ /* Give up. */
return PIPE_ERROR_OUT_OF_MEMORY;
}
@@ -686,18 +673,16 @@ fenced_buffer_map(struct pb_buffer *buf,
assert(!(flags & PB_USAGE_GPU_READ_WRITE));
- /*
- * Serialize writes.
- */
- while((fenced_buf->flags & PB_USAGE_GPU_WRITE) ||
+ /* Serialize writes. */
+ while ((fenced_buf->flags & PB_USAGE_GPU_WRITE) ||
((fenced_buf->flags & PB_USAGE_GPU_READ) &&
(flags & PB_USAGE_CPU_WRITE))) {
- /*
- * Don't wait for the GPU to finish accessing it, if blocking is forbidden.
+ /* Don't wait for the GPU to finish accessing it,
+ * if blocking is forbidden.
*/
- if((flags & PB_USAGE_DONTBLOCK) &&
- ops->fence_signalled(ops, fenced_buf->fence, 0) != 0) {
+ if ((flags & PB_USAGE_DONTBLOCK) &&
+ ops->fence_signalled(ops, fenced_buf->fence, 0) != 0) {
goto done;
}
@@ -705,17 +690,15 @@ fenced_buffer_map(struct pb_buffer *buf,
break;
}
- /*
- * Wait for the GPU to finish accessing. This will release and re-acquire
+ /* Wait for the GPU to finish accessing. This will release and re-acquire
* the mutex, so all copies of mutable state must be discarded.
*/
fenced_buffer_finish_locked(fenced_mgr, fenced_buf);
}
- if(fenced_buf->buffer) {
+ if (fenced_buf->buffer) {
map = pb_map(fenced_buf->buffer, flags, flush_ctx);
- }
- else {
+ } else {
assert(fenced_buf->data);
map = fenced_buf->data;
}
@@ -725,7 +708,7 @@ fenced_buffer_map(struct pb_buffer *buf,
fenced_buf->flags |= flags & PB_USAGE_CPU_READ_WRITE;
}
-done:
+ done:
pipe_mutex_unlock(fenced_mgr->mutex);
return map;
@@ -741,12 +724,12 @@ fenced_buffer_unmap(struct pb_buffer *buf)
pipe_mutex_lock(fenced_mgr->mutex);
assert(fenced_buf->mapcount);
- if(fenced_buf->mapcount) {
+ if (fenced_buf->mapcount) {
if (fenced_buf->buffer)
pb_unmap(fenced_buf->buffer);
--fenced_buf->mapcount;
- if(!fenced_buf->mapcount)
- fenced_buf->flags &= ~PB_USAGE_CPU_READ_WRITE;
+ if (!fenced_buf->mapcount)
+ fenced_buf->flags &= ~PB_USAGE_CPU_READ_WRITE;
}
pipe_mutex_unlock(fenced_mgr->mutex);
@@ -765,7 +748,7 @@ fenced_buffer_validate(struct pb_buffer *buf,
pipe_mutex_lock(fenced_mgr->mutex);
if (!vl) {
- /* invalidate */
+ /* Invalidate. */
fenced_buf->vl = NULL;
fenced_buf->validation_flags = 0;
ret = PIPE_OK;
@@ -776,40 +759,37 @@ fenced_buffer_validate(struct pb_buffer *buf,
assert(!(flags & ~PB_USAGE_GPU_READ_WRITE));
flags &= PB_USAGE_GPU_READ_WRITE;
- /* Buffer cannot be validated in two different lists */
- if(fenced_buf->vl && fenced_buf->vl != vl) {
+ /* Buffer cannot be validated in two different lists. */
+ if (fenced_buf->vl && fenced_buf->vl != vl) {
ret = PIPE_ERROR_RETRY;
goto done;
}
- if(fenced_buf->vl == vl &&
+ if (fenced_buf->vl == vl &&
(fenced_buf->validation_flags & flags) == flags) {
- /* Nothing to do -- buffer already validated */
+ /* Nothing to do -- buffer already validated. */
ret = PIPE_OK;
goto done;
}
- /*
- * Create and update GPU storage.
- */
- if(!fenced_buf->buffer) {
+ /* Create and update GPU storage. */
+ if (!fenced_buf->buffer) {
assert(!fenced_buf->mapcount);
ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, TRUE);
- if(ret != PIPE_OK) {
+ if (ret != PIPE_OK) {
goto done;
}
ret = fenced_buffer_copy_storage_to_gpu_locked(fenced_buf);
- if(ret != PIPE_OK) {
+ if (ret != PIPE_OK) {
fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
goto done;
}
- if(fenced_buf->mapcount) {
+ if (fenced_buf->mapcount) {
debug_printf("warning: validating a buffer while it is still mapped\n");
- }
- else {
+ } else {
fenced_buffer_destroy_cpu_storage_locked(fenced_buf);
}
}
@@ -821,7 +801,7 @@ fenced_buffer_validate(struct pb_buffer *buf,
fenced_buf->vl = vl;
fenced_buf->validation_flags |= flags;
-done:
+ done:
pipe_mutex_unlock(fenced_mgr->mutex);
return ret;
@@ -841,13 +821,12 @@ fenced_buffer_fence(struct pb_buffer *buf,
assert(pipe_is_referenced(&fenced_buf->base.reference));
assert(fenced_buf->buffer);
- if(fence != fenced_buf->fence) {
+ if (fence != fenced_buf->fence) {
assert(fenced_buf->vl);
assert(fenced_buf->validation_flags);
if (fenced_buf->fence) {
- boolean destroyed;
- destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
+ MAYBE_UNUSED boolean destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
assert(!destroyed);
}
if (fence) {
@@ -876,16 +855,15 @@ fenced_buffer_get_base_buffer(struct pb_buffer *buf,
pipe_mutex_lock(fenced_mgr->mutex);
- /*
- * This should only be called when the buffer is validated. Typically
+ /* This should only be called when the buffer is validated. Typically
* when processing relocations.
*/
assert(fenced_buf->vl);
assert(fenced_buf->buffer);
- if(fenced_buf->buffer)
+ if (fenced_buf->buffer) {
pb_get_base_buffer(fenced_buf->buffer, base_buf, offset);
- else {
+ } else {
*base_buf = buf;
*offset = 0;
}
@@ -896,12 +874,12 @@ fenced_buffer_get_base_buffer(struct pb_buffer *buf,
static const struct pb_vtbl
fenced_buffer_vtbl = {
- fenced_buffer_destroy,
- fenced_buffer_map,
- fenced_buffer_unmap,
- fenced_buffer_validate,
- fenced_buffer_fence,
- fenced_buffer_get_base_buffer
+ fenced_buffer_destroy,
+ fenced_buffer_map,
+ fenced_buffer_unmap,
+ fenced_buffer_validate,
+ fenced_buffer_fence,
+ fenced_buffer_get_base_buffer
};
@@ -917,12 +895,11 @@ fenced_bufmgr_create_buffer(struct pb_manager *mgr,
struct fenced_buffer *fenced_buf;
enum pipe_error ret;
- /*
- * Don't stall the GPU, waste time evicting buffers, or waste memory
+ /* Don't stall the GPU, waste time evicting buffers, or waste memory
* trying to create a buffer that will most likely never fit into the
* graphics aperture.
*/
- if(size > fenced_mgr->max_buffer_size) {
+ if (size > fenced_mgr->max_buffer_size) {
goto no_buffer;
}
@@ -942,29 +919,21 @@ fenced_bufmgr_create_buffer(struct pb_manager *mgr,
pipe_mutex_lock(fenced_mgr->mutex);
- /*
- * Try to create GPU storage without stalling,
- */
+ /* Try to create GPU storage without stalling. */
ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, FALSE);
- /*
- * Attempt to use CPU memory to avoid stalling the GPU.
- */
- if(ret != PIPE_OK) {
+ /* Attempt to use CPU memory to avoid stalling the GPU. */
+ if (ret != PIPE_OK) {
ret = fenced_buffer_create_cpu_storage_locked(fenced_mgr, fenced_buf);
}
- /*
- * Create GPU storage, waiting for some to be available.
- */
- if(ret != PIPE_OK) {
+ /* Create GPU storage, waiting for some to be available. */
+ if (ret != PIPE_OK) {
ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, TRUE);
}
- /*
- * Give up.
- */
- if(ret != PIPE_OK) {
+ /* Give up. */
+ if (ret != PIPE_OK) {
goto no_storage;
}
@@ -976,10 +945,10 @@ fenced_bufmgr_create_buffer(struct pb_manager *mgr,
return &fenced_buf->base;
-no_storage:
+ no_storage:
pipe_mutex_unlock(fenced_mgr->mutex);
FREE(fenced_buf);
-no_buffer:
+ no_buffer:
return NULL;
}
@@ -990,12 +959,12 @@ fenced_bufmgr_flush(struct pb_manager *mgr)
struct fenced_manager *fenced_mgr = fenced_manager(mgr);
pipe_mutex_lock(fenced_mgr->mutex);
- while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
+ while (fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
;
pipe_mutex_unlock(fenced_mgr->mutex);
assert(fenced_mgr->provider->flush);
- if(fenced_mgr->provider->flush)
+ if (fenced_mgr->provider->flush)
fenced_mgr->provider->flush(fenced_mgr->provider);
}
@@ -1007,25 +976,25 @@ fenced_bufmgr_destroy(struct pb_manager *mgr)
pipe_mutex_lock(fenced_mgr->mutex);
- /* Wait on outstanding fences */
+ /* Wait on outstanding fences. */
while (fenced_mgr->num_fenced) {
pipe_mutex_unlock(fenced_mgr->mutex);
#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
sched_yield();
#endif
pipe_mutex_lock(fenced_mgr->mutex);
- while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
+ while (fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
;
}
#ifdef DEBUG
- /*assert(!fenced_mgr->num_unfenced);*/
+ /* assert(!fenced_mgr->num_unfenced); */
#endif
pipe_mutex_unlock(fenced_mgr->mutex);
pipe_mutex_destroy(fenced_mgr->mutex);
- if(fenced_mgr->provider)
+ if (fenced_mgr->provider)
fenced_mgr->provider->destroy(fenced_mgr->provider);
fenced_mgr->ops->destroy(fenced_mgr->ops);
diff --git a/lib/mesa/src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c b/lib/mesa/src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c
index 4dbf3ff97..250f739c8 100644
--- a/lib/mesa/src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c
+++ b/lib/mesa/src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c
@@ -210,7 +210,7 @@ pb_cache_manager_create_buffer(struct pb_manager *_mgr,
/* get a buffer from the cache */
buf = (struct pb_cache_buffer *)
pb_cache_reclaim_buffer(&mgr->cache, size, desc->alignment,
- desc->usage);
+ desc->usage, 0);
if (buf)
return &buf->base;
@@ -243,7 +243,7 @@ pb_cache_manager_create_buffer(struct pb_manager *_mgr,
buf->base.vtbl = &pb_cache_buffer_vtbl;
buf->mgr = mgr;
- pb_cache_init_entry(&mgr->cache, &buf->cache_entry, &buf->base);
+ pb_cache_init_entry(&mgr->cache, &buf->cache_entry, &buf->base, 0);
return &buf->base;
}
diff --git a/lib/mesa/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c b/lib/mesa/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c
index 3d3a7aba7..4e36866e0 100644
--- a/lib/mesa/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c
+++ b/lib/mesa/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c
@@ -41,6 +41,7 @@
#include "util/list.h"
#include "util/u_time.h"
#include "util/u_debug_stack.h"
+#include <inttypes.h>
#include "pb_buffer.h"
#include "pb_bufmgr.h"
@@ -190,7 +191,7 @@ pb_debug_buffer_check(struct pb_debug_buffer *buf)
underflow = !check_random_pattern(map, buf->underflow_size,
&min_ofs, &max_ofs);
if(underflow) {
- debug_printf("buffer underflow (offset -%u%s to -%u bytes) detected\n",
+ debug_printf("buffer underflow (offset -%"PRIu64"%s to -%"PRIu64" bytes) detected\n",
buf->underflow_size - min_ofs,
min_ofs == 0 ? "+" : "",
buf->underflow_size - max_ofs);
@@ -200,7 +201,7 @@ pb_debug_buffer_check(struct pb_debug_buffer *buf)
buf->overflow_size,
&min_ofs, &max_ofs);
if(overflow) {
- debug_printf("buffer overflow (size %u plus offset %u to %u%s bytes) detected\n",
+ debug_printf("buffer overflow (size %"PRIu64" plus offset %"PRIu64" to %"PRIu64"%s bytes) detected\n",
buf->base.size,
min_ofs,
max_ofs,
@@ -349,7 +350,7 @@ pb_debug_manager_dump_locked(struct pb_debug_manager *mgr)
buf = LIST_ENTRY(struct pb_debug_buffer, curr, head);
debug_printf("buffer = %p\n", (void *) buf);
- debug_printf(" .size = 0x%x\n", buf->base.size);
+ debug_printf(" .size = 0x%"PRIx64"\n", buf->base.size);
debug_backtrace_dump(buf->create_backtrace, PB_DEBUG_CREATE_BACKTRACE);
curr = next;
diff --git a/lib/mesa/src/gallium/auxiliary/pipebuffer/pb_cache.c b/lib/mesa/src/gallium/auxiliary/pipebuffer/pb_cache.c
index ebd06b0e0..a1ca67883 100644
--- a/lib/mesa/src/gallium/auxiliary/pipebuffer/pb_cache.c
+++ b/lib/mesa/src/gallium/auxiliary/pipebuffer/pb_cache.c
@@ -38,22 +38,23 @@ static void
destroy_buffer_locked(struct pb_cache_entry *entry)
{
struct pb_cache *mgr = entry->mgr;
+ struct pb_buffer *buf = entry->buffer;
- assert(!pipe_is_referenced(&entry->buffer->reference));
+ assert(!pipe_is_referenced(&buf->reference));
if (entry->head.next) {
LIST_DEL(&entry->head);
assert(mgr->num_buffers);
--mgr->num_buffers;
- mgr->cache_size -= entry->buffer->size;
+ mgr->cache_size -= buf->size;
}
- entry->mgr->destroy_buffer(entry->buffer);
+ mgr->destroy_buffer(buf);
}
/**
* Free as many cache buffers from the list head as possible.
*/
static void
-release_expired_buffers_locked(struct pb_cache *mgr)
+release_expired_buffers_locked(struct list_head *cache)
{
struct list_head *curr, *next;
struct pb_cache_entry *entry;
@@ -61,9 +62,9 @@ release_expired_buffers_locked(struct pb_cache *mgr)
now = os_time_get();
- curr = mgr->cache.next;
+ curr = cache->next;
next = curr->next;
- while (curr != &mgr->cache) {
+ while (curr != cache) {
entry = LIST_ENTRY(struct pb_cache_entry, curr, head);
if (!os_time_timeout(entry->start, entry->end, now))
@@ -84,24 +85,28 @@ void
pb_cache_add_buffer(struct pb_cache_entry *entry)
{
struct pb_cache *mgr = entry->mgr;
+ struct list_head *cache = &mgr->buckets[entry->bucket_index];
+ struct pb_buffer *buf = entry->buffer;
+ unsigned i;
pipe_mutex_lock(mgr->mutex);
- assert(!pipe_is_referenced(&entry->buffer->reference));
+ assert(!pipe_is_referenced(&buf->reference));
- release_expired_buffers_locked(mgr);
+ for (i = 0; i < ARRAY_SIZE(mgr->buckets); i++)
+ release_expired_buffers_locked(&mgr->buckets[i]);
/* Directly release any buffer that exceeds the limit. */
- if (mgr->cache_size + entry->buffer->size > mgr->max_cache_size) {
- entry->mgr->destroy_buffer(entry->buffer);
+ if (mgr->cache_size + buf->size > mgr->max_cache_size) {
+ mgr->destroy_buffer(buf);
pipe_mutex_unlock(mgr->mutex);
return;
}
entry->start = os_time_get();
entry->end = entry->start + mgr->usecs;
- LIST_ADDTAIL(&entry->head, &mgr->cache);
+ LIST_ADDTAIL(&entry->head, cache);
++mgr->num_buffers;
- mgr->cache_size += entry->buffer->size;
+ mgr->cache_size += buf->size;
pipe_mutex_unlock(mgr->mutex);
}
@@ -114,25 +119,24 @@ static int
pb_cache_is_buffer_compat(struct pb_cache_entry *entry,
pb_size size, unsigned alignment, unsigned usage)
{
+ struct pb_cache *mgr = entry->mgr;
struct pb_buffer *buf = entry->buffer;
- if (usage & entry->mgr->bypass_usage)
- return 0;
-
- if (buf->size < size)
+ if (!pb_check_usage(usage, buf->usage))
return 0;
/* be lenient with size */
- if (buf->size > (unsigned) (entry->mgr->size_factor * size))
+ if (buf->size < size ||
+ buf->size > (unsigned) (mgr->size_factor * size))
return 0;
- if (!pb_check_alignment(alignment, buf->alignment))
+ if (usage & mgr->bypass_usage)
return 0;
- if (!pb_check_usage(usage, buf->usage))
+ if (!pb_check_alignment(alignment, buf->alignment))
return 0;
- return entry->mgr->can_reclaim(buf) ? 1 : -1;
+ return mgr->can_reclaim(buf) ? 1 : -1;
}
/**
@@ -141,23 +145,25 @@ pb_cache_is_buffer_compat(struct pb_cache_entry *entry,
*/
struct pb_buffer *
pb_cache_reclaim_buffer(struct pb_cache *mgr, pb_size size,
- unsigned alignment, unsigned usage)
+ unsigned alignment, unsigned usage,
+ unsigned bucket_index)
{
struct pb_cache_entry *entry;
struct pb_cache_entry *cur_entry;
struct list_head *cur, *next;
int64_t now;
int ret = 0;
+ struct list_head *cache = &mgr->buckets[bucket_index];
pipe_mutex_lock(mgr->mutex);
entry = NULL;
- cur = mgr->cache.next;
+ cur = cache->next;
next = cur->next;
/* search in the expired buffers, freeing them in the process */
now = os_time_get();
- while (cur != &mgr->cache) {
+ while (cur != cache) {
cur_entry = LIST_ENTRY(struct pb_cache_entry, cur, head);
if (!entry && (ret = pb_cache_is_buffer_compat(cur_entry, size,
@@ -179,7 +185,7 @@ pb_cache_reclaim_buffer(struct pb_cache *mgr, pb_size size,
/* keep searching in the hot buffers */
if (!entry && ret != -1) {
- while (cur != &mgr->cache) {
+ while (cur != cache) {
cur_entry = LIST_ENTRY(struct pb_cache_entry, cur, head);
ret = pb_cache_is_buffer_compat(cur_entry, size, alignment, usage);
@@ -220,26 +226,32 @@ pb_cache_release_all_buffers(struct pb_cache *mgr)
{
struct list_head *curr, *next;
struct pb_cache_entry *buf;
+ unsigned i;
pipe_mutex_lock(mgr->mutex);
- curr = mgr->cache.next;
- next = curr->next;
- while (curr != &mgr->cache) {
- buf = LIST_ENTRY(struct pb_cache_entry, curr, head);
- destroy_buffer_locked(buf);
- curr = next;
+ for (i = 0; i < ARRAY_SIZE(mgr->buckets); i++) {
+ struct list_head *cache = &mgr->buckets[i];
+
+ curr = cache->next;
next = curr->next;
+ while (curr != cache) {
+ buf = LIST_ENTRY(struct pb_cache_entry, curr, head);
+ destroy_buffer_locked(buf);
+ curr = next;
+ next = curr->next;
+ }
}
pipe_mutex_unlock(mgr->mutex);
}
void
pb_cache_init_entry(struct pb_cache *mgr, struct pb_cache_entry *entry,
- struct pb_buffer *buf)
+ struct pb_buffer *buf, unsigned bucket_index)
{
memset(entry, 0, sizeof(*entry));
entry->buffer = buf;
entry->mgr = mgr;
+ entry->bucket_index = bucket_index;
}
/**
@@ -263,7 +275,11 @@ pb_cache_init(struct pb_cache *mgr, uint usecs, float size_factor,
void (*destroy_buffer)(struct pb_buffer *buf),
bool (*can_reclaim)(struct pb_buffer *buf))
{
- LIST_INITHEAD(&mgr->cache);
+ unsigned i;
+
+ for (i = 0; i < ARRAY_SIZE(mgr->buckets); i++)
+ LIST_INITHEAD(&mgr->buckets[i]);
+
pipe_mutex_init(mgr->mutex);
mgr->cache_size = 0;
mgr->max_cache_size = maximum_cache_size;
diff --git a/lib/mesa/src/gallium/auxiliary/pipebuffer/pb_cache.h b/lib/mesa/src/gallium/auxiliary/pipebuffer/pb_cache.h
index f0fa01226..7000fcd1c 100644
--- a/lib/mesa/src/gallium/auxiliary/pipebuffer/pb_cache.h
+++ b/lib/mesa/src/gallium/auxiliary/pipebuffer/pb_cache.h
@@ -42,11 +42,16 @@ struct pb_cache_entry
struct pb_buffer *buffer; /**< Pointer to the structure this is part of. */
struct pb_cache *mgr;
int64_t start, end; /**< Caching time interval */
+ unsigned bucket_index;
};
struct pb_cache
{
- struct list_head cache;
+ /* The cache is divided into buckets for minimizing cache misses.
+ * The driver controls which buffer goes into which bucket.
+ */
+ struct list_head buckets[4];
+
pipe_mutex mutex;
uint64_t cache_size;
uint64_t max_cache_size;
@@ -61,10 +66,11 @@ struct pb_cache
void pb_cache_add_buffer(struct pb_cache_entry *entry);
struct pb_buffer *pb_cache_reclaim_buffer(struct pb_cache *mgr, pb_size size,
- unsigned alignment, unsigned usage);
+ unsigned alignment, unsigned usage,
+ unsigned bucket_index);
void pb_cache_release_all_buffers(struct pb_cache *mgr);
void pb_cache_init_entry(struct pb_cache *mgr, struct pb_cache_entry *entry,
- struct pb_buffer *buf);
+ struct pb_buffer *buf, unsigned bucket_index);
void pb_cache_init(struct pb_cache *mgr, uint usecs, float size_factor,
unsigned bypass_usage, uint64_t maximum_cache_size,
void (*destroy_buffer)(struct pb_buffer *buf),
diff --git a/lib/mesa/src/gallium/auxiliary/pipebuffer/pb_slab.c b/lib/mesa/src/gallium/auxiliary/pipebuffer/pb_slab.c
new file mode 100644
index 000000000..79529dfe5
--- /dev/null
+++ b/lib/mesa/src/gallium/auxiliary/pipebuffer/pb_slab.c
@@ -0,0 +1,252 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "pb_slab.h"
+
+#include "util/u_math.h"
+#include "util/u_memory.h"
+
+/* All slab allocations from the same heap and with the same size belong
+ * to the same group.
+ */
+struct pb_slab_group
+{
+ /* Slabs with allocation candidates. Typically, slabs in this list should
+ * have some free entries.
+ *
+ * However, when the head becomes full we purposefully keep it around
+ * until the next allocation attempt, at which time we try a reclaim.
+ * The intention is to keep serving allocations from the same slab as long
+ * as possible for better locality.
+ *
+ * Due to a race in new slab allocation, additional slabs in this list
+ * can be fully allocated as well.
+ */
+ struct list_head slabs;
+};
+
+
+static void
+pb_slab_reclaim(struct pb_slabs *slabs, struct pb_slab_entry *entry)
+{
+ struct pb_slab *slab = entry->slab;
+
+ LIST_DEL(&entry->head); /* remove from reclaim list */
+ LIST_ADD(&entry->head, &slab->free);
+ slab->num_free++;
+
+ /* Add slab to the group's list if it isn't already linked. */
+ if (!slab->head.next) {
+ struct pb_slab_group *group = &slabs->groups[entry->group_index];
+ LIST_ADDTAIL(&slab->head, &group->slabs);
+ }
+
+ if (slab->num_free >= slab->num_entries) {
+ LIST_DEL(&slab->head);
+ slabs->slab_free(slabs->priv, slab);
+ }
+}
+
+static void
+pb_slabs_reclaim_locked(struct pb_slabs *slabs)
+{
+ while (!LIST_IS_EMPTY(&slabs->reclaim)) {
+ struct pb_slab_entry *entry =
+ LIST_ENTRY(struct pb_slab_entry, slabs->reclaim.next, head);
+
+ if (!slabs->can_reclaim(slabs->priv, entry))
+ break;
+
+ pb_slab_reclaim(slabs, entry);
+ }
+}
+
+/* Allocate a slab entry of the given size from the given heap.
+ *
+ * This will try to re-use entries that have previously been freed. However,
+ * if no entries are free (or all free entries are still "in flight" as
+ * determined by the can_reclaim fallback function), a new slab will be
+ * requested via the slab_alloc callback.
+ *
+ * Note that slab_free can also be called by this function.
+ */
+struct pb_slab_entry *
+pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap)
+{
+ unsigned order = MAX2(slabs->min_order, util_logbase2_ceil(size));
+ unsigned group_index;
+ struct pb_slab_group *group;
+ struct pb_slab *slab;
+ struct pb_slab_entry *entry;
+
+ assert(order < slabs->min_order + slabs->num_orders);
+ assert(heap < slabs->num_heaps);
+
+ group_index = heap * slabs->num_orders + (order - slabs->min_order);
+ group = &slabs->groups[group_index];
+
+ pipe_mutex_lock(slabs->mutex);
+
+ /* If there is no candidate slab at all, or the first slab has no free
+ * entries, try reclaiming entries.
+ */
+ if (LIST_IS_EMPTY(&group->slabs) ||
+ LIST_IS_EMPTY(&LIST_ENTRY(struct pb_slab, group->slabs.next, head)->free))
+ pb_slabs_reclaim_locked(slabs);
+
+ /* Remove slabs without free entries. */
+ while (!LIST_IS_EMPTY(&group->slabs)) {
+ slab = LIST_ENTRY(struct pb_slab, group->slabs.next, head);
+ if (!LIST_IS_EMPTY(&slab->free))
+ break;
+
+ LIST_DEL(&slab->head);
+ }
+
+ if (LIST_IS_EMPTY(&group->slabs)) {
+ /* Drop the mutex temporarily to prevent a deadlock where the allocation
+ * calls back into slab functions (most likely to happen for
+ * pb_slab_reclaim if memory is low).
+ *
+ * There's a chance that racing threads will end up allocating multiple
+ * slabs for the same group, but that doesn't hurt correctness.
+ */
+ pipe_mutex_unlock(slabs->mutex);
+ slab = slabs->slab_alloc(slabs->priv, heap, 1 << order, group_index);
+ if (!slab)
+ return NULL;
+ pipe_mutex_lock(slabs->mutex);
+
+ LIST_ADD(&slab->head, &group->slabs);
+ }
+
+ entry = LIST_ENTRY(struct pb_slab_entry, slab->free.next, head);
+ LIST_DEL(&entry->head);
+ slab->num_free--;
+
+ pipe_mutex_unlock(slabs->mutex);
+
+ return entry;
+}
+
+/* Free the given slab entry.
+ *
+ * The entry may still be in use e.g. by in-flight command submissions. The
+ * can_reclaim callback function will be called to determine whether the entry
+ * can be handed out again by pb_slab_alloc.
+ */
+void
+pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry)
+{
+ pipe_mutex_lock(slabs->mutex);
+ LIST_ADDTAIL(&entry->head, &slabs->reclaim);
+ pipe_mutex_unlock(slabs->mutex);
+}
+
+/* Check if any of the entries handed to pb_slab_free are ready to be re-used.
+ *
+ * This may end up freeing some slabs and is therefore useful to try to reclaim
+ * some no longer used memory. However, calling this function is not strictly
+ * required since pb_slab_alloc will eventually do the same thing.
+ */
+void
+pb_slabs_reclaim(struct pb_slabs *slabs)
+{
+ pipe_mutex_lock(slabs->mutex);
+ pb_slabs_reclaim_locked(slabs);
+ pipe_mutex_unlock(slabs->mutex);
+}
+
+/* Initialize the slabs manager.
+ *
+ * The minimum and maximum size of slab entries are 2^min_order and
+ * 2^max_order, respectively.
+ *
+ * priv will be passed to the given callback functions.
+ */
+bool
+pb_slabs_init(struct pb_slabs *slabs,
+ unsigned min_order, unsigned max_order,
+ unsigned num_heaps,
+ void *priv,
+ slab_can_reclaim_fn *can_reclaim,
+ slab_alloc_fn *slab_alloc,
+ slab_free_fn *slab_free)
+{
+ unsigned num_groups;
+ unsigned i;
+
+ assert(min_order <= max_order);
+ assert(max_order < sizeof(unsigned) * 8 - 1);
+
+ slabs->min_order = min_order;
+ slabs->num_orders = max_order - min_order + 1;
+ slabs->num_heaps = num_heaps;
+
+ slabs->priv = priv;
+ slabs->can_reclaim = can_reclaim;
+ slabs->slab_alloc = slab_alloc;
+ slabs->slab_free = slab_free;
+
+ LIST_INITHEAD(&slabs->reclaim);
+
+ num_groups = slabs->num_orders * slabs->num_heaps;
+ slabs->groups = CALLOC(num_groups, sizeof(*slabs->groups));
+ if (!slabs->groups)
+ return false;
+
+ for (i = 0; i < num_groups; ++i) {
+ struct pb_slab_group *group = &slabs->groups[i];
+ LIST_INITHEAD(&group->slabs);
+ }
+
+ pipe_mutex_init(slabs->mutex);
+
+ return true;
+}
+
+/* Shutdown the slab manager.
+ *
+ * This will free all allocated slabs and internal structures, even if some
+ * of the slab entries are still in flight (i.e. if can_reclaim would return
+ * false).
+ */
+void
+pb_slabs_deinit(struct pb_slabs *slabs)
+{
+ /* Reclaim all slab entries (even those that are still in flight). This
+ * implicitly calls slab_free for everything.
+ */
+ while (!LIST_IS_EMPTY(&slabs->reclaim)) {
+ struct pb_slab_entry *entry =
+ LIST_ENTRY(struct pb_slab_entry, slabs->reclaim.next, head);
+ pb_slab_reclaim(slabs, entry);
+ }
+
+ FREE(slabs->groups);
+ pipe_mutex_destroy(slabs->mutex);
+}
diff --git a/lib/mesa/src/gallium/auxiliary/pipebuffer/pb_slab.h b/lib/mesa/src/gallium/auxiliary/pipebuffer/pb_slab.h
new file mode 100644
index 000000000..e779d95e0
--- /dev/null
+++ b/lib/mesa/src/gallium/auxiliary/pipebuffer/pb_slab.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/**
+ * \file
+ *
+ * Helper library for carving out smaller allocations (called "(slab) entries")
+ * from larger buffers (called "slabs").
+ *
+ * The library supports maintaining separate heaps (e.g. VRAM vs. GTT). The
+ * meaning of each heap is treated as opaque by this library.
+ *
+ * The library allows delaying the re-use of an entry, i.e. an entry may be
+ * freed by calling \ref pb_slab_free even while the corresponding buffer
+ * region is still in use by the GPU. A callback function is called to
+ * determine when it is safe to allocate the entry again; the user of this
+ * library is expected to maintain the required fences or similar.
+ */
+
+#ifndef PB_SLAB_H
+#define PB_SLAB_H
+
+#include "pb_buffer.h"
+#include "util/list.h"
+#include "os/os_thread.h"
+
+struct pb_slab;
+struct pb_slabs;
+struct pb_slab_group;
+
+/* Descriptor of a slab entry.
+ *
+ * The user of this utility library is expected to embed this in a larger
+ * structure that describes a buffer object.
+ */
+struct pb_slab_entry
+{
+ struct list_head head;
+ struct pb_slab *slab; /* the slab that contains this buffer */
+ unsigned group_index; /* index into pb_slabs::groups */
+};
+
+/* Descriptor of a slab from which many entries are carved out.
+ *
+ * The user of this utility library is expected to embed this in a larger
+ * structure that describes a buffer object.
+ */
+struct pb_slab
+{
+ struct list_head head;
+
+ struct list_head free; /* list of free pb_slab_entry structures */
+ unsigned num_free; /* number of entries in free list */
+ unsigned num_entries; /* total number of entries */
+};
+
+/* Callback function that is called when a new slab needs to be allocated
+ * for fulfilling allocation requests of the given size from the given heap.
+ *
+ * The callback must allocate a pb_slab structure and the desired number
+ * of entries. All entries that belong to the slab must be added to the free
+ * list. Entries' pb_slab_entry structures must be initialized with the given
+ * group_index.
+ *
+ * The callback may call pb_slab functions.
+ */
+typedef struct pb_slab *(slab_alloc_fn)(void *priv,
+ unsigned heap,
+ unsigned entry_size,
+ unsigned group_index);
+
+/* Callback function that is called when all entries of a slab have been freed.
+ *
+ * The callback must free the slab and all its entries. It must not call any of
+ * the pb_slab functions, or a deadlock (recursive mutex lock) may occur.
+ */
+typedef void (slab_free_fn)(void *priv, struct pb_slab *);
+
+/* Callback function to determine whether a given entry can already be reused.
+ */
+typedef bool (slab_can_reclaim_fn)(void *priv, struct pb_slab_entry *);
+
+/* Manager of slab allocations. The user of this utility library should embed
+ * this in a structure somewhere and call pb_slab_init/deinit at init/shutdown
+ * time.
+ */
+struct pb_slabs
+{
+ pipe_mutex mutex;
+
+ unsigned min_order;
+ unsigned num_orders;
+ unsigned num_heaps;
+
+ /* One group per (heap, order) pair. */
+ struct pb_slab_group *groups;
+
+ /* List of entries waiting to be reclaimed, i.e. they have been passed to
+ * pb_slab_free, but may not be safe for re-use yet. The tail points at
+ * the most-recently freed entry.
+ */
+ struct list_head reclaim;
+
+ void *priv;
+ slab_can_reclaim_fn *can_reclaim;
+ slab_alloc_fn *slab_alloc;
+ slab_free_fn *slab_free;
+};
+
+struct pb_slab_entry *
+pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap);
+
+void
+pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry);
+
+void
+pb_slabs_reclaim(struct pb_slabs *slabs);
+
+bool
+pb_slabs_init(struct pb_slabs *slabs,
+ unsigned min_order, unsigned max_order,
+ unsigned num_heaps,
+ void *priv,
+ slab_can_reclaim_fn *can_reclaim,
+ slab_alloc_fn *slab_alloc,
+ slab_free_fn *slab_free);
+
+void
+pb_slabs_deinit(struct pb_slabs *slabs);
+
+#endif