summaryrefslogtreecommitdiff
path: root/lib/mesa/src/gallium/winsys/amdgpu/drm
diff options
context:
space:
mode:
authorJonathan Gray <jsg@cvs.openbsd.org>2015-11-22 02:46:45 +0000
committerJonathan Gray <jsg@cvs.openbsd.org>2015-11-22 02:46:45 +0000
commit3e40341f9dcd7c1bbc9afb8ddb812304820396cf (patch)
tree274b3f522afe1da16ab2b5347758c908bc23fac4 /lib/mesa/src/gallium/winsys/amdgpu/drm
parent7b644ad52b574bec410d557155d666ac17fdf51a (diff)
import Mesa 11.0.6
Diffstat (limited to 'lib/mesa/src/gallium/winsys/amdgpu/drm')
-rw-r--r--lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c1290
-rw-r--r--lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_bo.h97
-rw-r--r--lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c1385
-rw-r--r--lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_cs.h157
-rw-r--r--lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_surface.c877
-rw-r--r--lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c343
-rw-r--r--lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.h30
7 files changed, 918 insertions, 3261 deletions
diff --git a/lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c b/lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
index 6bdcce53d..fe55dc310 100644
--- a/lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
+++ b/lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
@@ -36,41 +36,50 @@
#include <amdgpu_drm.h>
#include <xf86drm.h>
#include <stdio.h>
-#include <inttypes.h>
-/* Set to 1 for verbose output showing committed sparse buffer ranges. */
-#define DEBUG_SPARSE_COMMITS 0
+static const struct pb_vtbl amdgpu_winsys_bo_vtbl;
-struct amdgpu_sparse_backing_chunk {
- uint32_t begin, end;
-};
+static inline struct amdgpu_winsys_bo *amdgpu_winsys_bo(struct pb_buffer *bo)
+{
+ assert(bo->vtbl == &amdgpu_winsys_bo_vtbl);
+ return (struct amdgpu_winsys_bo *)bo;
+}
-static struct pb_buffer *
-amdgpu_bo_create(struct radeon_winsys *rws,
- uint64_t size,
- unsigned alignment,
- enum radeon_bo_domain domain,
- enum radeon_bo_flag flags);
+struct amdgpu_bomgr {
+ struct pb_manager base;
+ struct amdgpu_winsys *rws;
+};
-static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
- enum radeon_bo_usage usage)
+static struct amdgpu_winsys *get_winsys(struct pb_manager *mgr)
{
- struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
- struct amdgpu_winsys *ws = bo->ws;
- int64_t abs_timeout;
+ return ((struct amdgpu_bomgr*)mgr)->rws;
+}
- if (timeout == 0) {
- if (p_atomic_read(&bo->num_active_ioctls))
- return false;
+static struct amdgpu_winsys_bo *get_amdgpu_winsys_bo(struct pb_buffer *_buf)
+{
+ struct amdgpu_winsys_bo *bo = NULL;
+ if (_buf->vtbl == &amdgpu_winsys_bo_vtbl) {
+ bo = amdgpu_winsys_bo(_buf);
} else {
- abs_timeout = os_time_get_absolute_timeout(timeout);
+ struct pb_buffer *base_buf;
+ pb_size offset;
+ pb_get_base_buffer(_buf, &base_buf, &offset);
- /* Wait if any ioctl is being submitted with this buffer. */
- if (!os_wait_until_zero_abs_timeout(&bo->num_active_ioctls, abs_timeout))
- return false;
+ if (base_buf->vtbl == &amdgpu_winsys_bo_vtbl)
+ bo = amdgpu_winsys_bo(base_buf);
}
+ return bo;
+}
+
+static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
+ enum radeon_bo_usage usage)
+{
+ struct amdgpu_winsys_bo *bo = get_amdgpu_winsys_bo(_buf);
+ struct amdgpu_winsys *ws = bo->rws;
+ int i;
+
if (bo->is_shared) {
/* We can't use user fences for shared buffers, because user fences
* are local to this process only. If we want to wait for all buffer
@@ -87,137 +96,91 @@ static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
}
if (timeout == 0) {
- unsigned idle_fences;
- bool buffer_idle;
-
- mtx_lock(&ws->bo_fence_lock);
-
- for (idle_fences = 0; idle_fences < bo->num_fences; ++idle_fences) {
- if (!amdgpu_fence_wait(bo->fences[idle_fences], 0, false))
- break;
- }
-
- /* Release the idle fences to avoid checking them again later. */
- for (unsigned i = 0; i < idle_fences; ++i)
- amdgpu_fence_reference(&bo->fences[i], NULL);
-
- memmove(&bo->fences[0], &bo->fences[idle_fences],
- (bo->num_fences - idle_fences) * sizeof(*bo->fences));
- bo->num_fences -= idle_fences;
-
- buffer_idle = !bo->num_fences;
- mtx_unlock(&ws->bo_fence_lock);
+ /* Timeout == 0 is quite simple. */
+ pipe_mutex_lock(ws->bo_fence_lock);
+ for (i = 0; i < RING_LAST; i++)
+ if (bo->fence[i]) {
+ if (amdgpu_fence_wait(bo->fence[i], 0, false)) {
+ /* Release the idle fence to avoid checking it again later. */
+ amdgpu_fence_reference(&bo->fence[i], NULL);
+ } else {
+ pipe_mutex_unlock(ws->bo_fence_lock);
+ return false;
+ }
+ }
+ pipe_mutex_unlock(ws->bo_fence_lock);
+ return true;
- return buffer_idle;
} else {
+ struct pipe_fence_handle *fence[RING_LAST] = {};
+ bool fence_idle[RING_LAST] = {};
bool buffer_idle = true;
-
- mtx_lock(&ws->bo_fence_lock);
- while (bo->num_fences && buffer_idle) {
- struct pipe_fence_handle *fence = NULL;
- bool fence_idle = false;
-
- amdgpu_fence_reference(&fence, bo->fences[0]);
-
- /* Wait for the fence. */
- mtx_unlock(&ws->bo_fence_lock);
- if (amdgpu_fence_wait(fence, abs_timeout, true))
- fence_idle = true;
- else
- buffer_idle = false;
- mtx_lock(&ws->bo_fence_lock);
-
- /* Release an idle fence to avoid checking it again later, keeping in
- * mind that the fence array may have been modified by other threads.
- */
- if (fence_idle && bo->num_fences && bo->fences[0] == fence) {
- amdgpu_fence_reference(&bo->fences[0], NULL);
- memmove(&bo->fences[0], &bo->fences[1],
- (bo->num_fences - 1) * sizeof(*bo->fences));
- bo->num_fences--;
+ int64_t abs_timeout = os_time_get_absolute_timeout(timeout);
+
+ /* Take references to all fences, so that we can wait for them
+ * without the lock. */
+ pipe_mutex_lock(ws->bo_fence_lock);
+ for (i = 0; i < RING_LAST; i++)
+ amdgpu_fence_reference(&fence[i], bo->fence[i]);
+ pipe_mutex_unlock(ws->bo_fence_lock);
+
+ /* Now wait for the fences. */
+ for (i = 0; i < RING_LAST; i++) {
+ if (fence[i]) {
+ if (amdgpu_fence_wait(fence[i], abs_timeout, true))
+ fence_idle[i] = true;
+ else
+ buffer_idle = false;
}
+ }
+
+ /* Release idle fences to avoid checking them again later. */
+ pipe_mutex_lock(ws->bo_fence_lock);
+ for (i = 0; i < RING_LAST; i++) {
+ if (fence[i] == bo->fence[i] && fence_idle[i])
+ amdgpu_fence_reference(&bo->fence[i], NULL);
- amdgpu_fence_reference(&fence, NULL);
+ amdgpu_fence_reference(&fence[i], NULL);
}
- mtx_unlock(&ws->bo_fence_lock);
+ pipe_mutex_unlock(ws->bo_fence_lock);
return buffer_idle;
}
}
static enum radeon_bo_domain amdgpu_bo_get_initial_domain(
- struct pb_buffer *buf)
+ struct radeon_winsys_cs_handle *buf)
{
return ((struct amdgpu_winsys_bo*)buf)->initial_domain;
}
-static void amdgpu_bo_remove_fences(struct amdgpu_winsys_bo *bo)
-{
- for (unsigned i = 0; i < bo->num_fences; ++i)
- amdgpu_fence_reference(&bo->fences[i], NULL);
-
- FREE(bo->fences);
- bo->num_fences = 0;
- bo->max_fences = 0;
-}
-
-void amdgpu_bo_destroy(struct pb_buffer *_buf)
+static void amdgpu_bo_destroy(struct pb_buffer *_buf)
{
struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
-
- assert(bo->bo && "must not be called for slab entries");
-
- mtx_lock(&bo->ws->global_bo_list_lock);
- LIST_DEL(&bo->u.real.global_list_item);
- bo->ws->num_buffers--;
- mtx_unlock(&bo->ws->global_bo_list_lock);
+ int i;
amdgpu_bo_va_op(bo->bo, 0, bo->base.size, bo->va, 0, AMDGPU_VA_OP_UNMAP);
- amdgpu_va_range_free(bo->u.real.va_handle);
+ amdgpu_va_range_free(bo->va_handle);
amdgpu_bo_free(bo->bo);
- amdgpu_bo_remove_fences(bo);
+ for (i = 0; i < RING_LAST; i++)
+ amdgpu_fence_reference(&bo->fence[i], NULL);
if (bo->initial_domain & RADEON_DOMAIN_VRAM)
- bo->ws->allocated_vram -= align64(bo->base.size, bo->ws->info.gart_page_size);
+ bo->rws->allocated_vram -= align(bo->base.size, bo->rws->gart_page_size);
else if (bo->initial_domain & RADEON_DOMAIN_GTT)
- bo->ws->allocated_gtt -= align64(bo->base.size, bo->ws->info.gart_page_size);
-
- if (bo->u.real.map_count >= 1) {
- if (bo->initial_domain & RADEON_DOMAIN_VRAM)
- bo->ws->mapped_vram -= bo->base.size;
- else if (bo->initial_domain & RADEON_DOMAIN_GTT)
- bo->ws->mapped_gtt -= bo->base.size;
- bo->ws->num_mapped_buffers--;
- }
-
+ bo->rws->allocated_gtt -= align(bo->base.size, bo->rws->gart_page_size);
FREE(bo);
}
-static void amdgpu_bo_destroy_or_cache(struct pb_buffer *_buf)
-{
- struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
-
- assert(bo->bo); /* slab buffers have a separate vtbl */
-
- if (bo->u.real.use_reusable_pool)
- pb_cache_add_buffer(&bo->u.real.cache_entry);
- else
- amdgpu_bo_destroy(_buf);
-}
-
-static void *amdgpu_bo_map(struct pb_buffer *buf,
+static void *amdgpu_bo_map(struct radeon_winsys_cs_handle *buf,
struct radeon_winsys_cs *rcs,
enum pipe_transfer_usage usage)
{
struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
- struct amdgpu_winsys_bo *real;
struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
int r;
void *cpu = NULL;
- uint64_t offset = 0;
-
- assert(!bo->sparse);
/* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
@@ -263,160 +226,114 @@ static void *amdgpu_bo_map(struct pb_buffer *buf,
* (neither one is changing it).
*
* Only check whether the buffer is being used for write. */
- if (cs) {
- if (amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
- RADEON_USAGE_WRITE)) {
- cs->flush_cs(cs->flush_data, 0, NULL);
- } else {
- /* Try to avoid busy-waiting in amdgpu_bo_wait. */
- if (p_atomic_read(&bo->num_active_ioctls))
- amdgpu_cs_sync_flush(rcs);
- }
+ if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
+ RADEON_USAGE_WRITE)) {
+ cs->flush_cs(cs->flush_data, 0, NULL);
}
-
amdgpu_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
RADEON_USAGE_WRITE);
} else {
/* Mapping for write. */
- if (cs) {
- if (amdgpu_bo_is_referenced_by_cs(cs, bo)) {
- cs->flush_cs(cs->flush_data, 0, NULL);
- } else {
- /* Try to avoid busy-waiting in amdgpu_bo_wait. */
- if (p_atomic_read(&bo->num_active_ioctls))
- amdgpu_cs_sync_flush(rcs);
- }
- }
+ if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo))
+ cs->flush_cs(cs->flush_data, 0, NULL);
amdgpu_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
RADEON_USAGE_READWRITE);
}
- bo->ws->buffer_wait_time += os_time_get_nano() - time;
+ bo->rws->buffer_wait_time += os_time_get_nano() - time;
}
}
/* If the buffer is created from user memory, return the user pointer. */
if (bo->user_ptr)
- return bo->user_ptr;
-
- if (bo->bo) {
- real = bo;
- } else {
- real = bo->u.slab.real;
- offset = bo->va - real->va;
- }
+ return bo->user_ptr;
- r = amdgpu_bo_cpu_map(real->bo, &cpu);
- if (r) {
- /* Clear the cache and try again. */
- pb_cache_release_all_buffers(&real->ws->bo_cache);
- r = amdgpu_bo_cpu_map(real->bo, &cpu);
- if (r)
- return NULL;
- }
-
- if (p_atomic_inc_return(&real->u.real.map_count) == 1) {
- if (real->initial_domain & RADEON_DOMAIN_VRAM)
- real->ws->mapped_vram += real->base.size;
- else if (real->initial_domain & RADEON_DOMAIN_GTT)
- real->ws->mapped_gtt += real->base.size;
- real->ws->num_mapped_buffers++;
- }
- return (uint8_t*)cpu + offset;
+ r = amdgpu_bo_cpu_map(bo->bo, &cpu);
+ return r ? NULL : cpu;
}
-static void amdgpu_bo_unmap(struct pb_buffer *buf)
+static void amdgpu_bo_unmap(struct radeon_winsys_cs_handle *buf)
{
struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
- struct amdgpu_winsys_bo *real;
- assert(!bo->sparse);
-
- if (bo->user_ptr)
- return;
+ amdgpu_bo_cpu_unmap(bo->bo);
+}
- real = bo->bo ? bo : bo->u.slab.real;
+static void amdgpu_bo_get_base_buffer(struct pb_buffer *buf,
+ struct pb_buffer **base_buf,
+ unsigned *offset)
+{
+ *base_buf = buf;
+ *offset = 0;
+}
- if (p_atomic_dec_zero(&real->u.real.map_count)) {
- if (real->initial_domain & RADEON_DOMAIN_VRAM)
- real->ws->mapped_vram -= real->base.size;
- else if (real->initial_domain & RADEON_DOMAIN_GTT)
- real->ws->mapped_gtt -= real->base.size;
- real->ws->num_mapped_buffers--;
- }
+static enum pipe_error amdgpu_bo_validate(struct pb_buffer *_buf,
+ struct pb_validate *vl,
+ unsigned flags)
+{
+ /* Always pinned */
+ return PIPE_OK;
+}
- amdgpu_bo_cpu_unmap(real->bo);
+static void amdgpu_bo_fence(struct pb_buffer *buf,
+ struct pipe_fence_handle *fence)
+{
}
static const struct pb_vtbl amdgpu_winsys_bo_vtbl = {
- amdgpu_bo_destroy_or_cache
- /* other functions are never called */
+ amdgpu_bo_destroy,
+ NULL, /* never called */
+ NULL, /* never called */
+ amdgpu_bo_validate,
+ amdgpu_bo_fence,
+ amdgpu_bo_get_base_buffer,
};
-static void amdgpu_add_buffer_to_global_list(struct amdgpu_winsys_bo *bo)
-{
- struct amdgpu_winsys *ws = bo->ws;
-
- assert(bo->bo);
-
- mtx_lock(&ws->global_bo_list_lock);
- LIST_ADDTAIL(&bo->u.real.global_list_item, &ws->global_bo_list);
- ws->num_buffers++;
- mtx_unlock(&ws->global_bo_list_lock);
-}
-
-static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws,
- uint64_t size,
- unsigned alignment,
- unsigned usage,
- enum radeon_bo_domain initial_domain,
- unsigned flags,
- unsigned pb_cache_bucket)
+static struct pb_buffer *amdgpu_bomgr_create_bo(struct pb_manager *_mgr,
+ pb_size size,
+ const struct pb_desc *desc)
{
+ struct amdgpu_winsys *rws = get_winsys(_mgr);
+ struct amdgpu_bo_desc *rdesc = (struct amdgpu_bo_desc*)desc;
struct amdgpu_bo_alloc_request request = {0};
amdgpu_bo_handle buf_handle;
uint64_t va = 0;
struct amdgpu_winsys_bo *bo;
amdgpu_va_handle va_handle;
- unsigned va_gap_size;
int r;
- assert(initial_domain & RADEON_DOMAIN_VRAM_GTT);
+ assert(rdesc->initial_domain & RADEON_DOMAIN_VRAM_GTT);
bo = CALLOC_STRUCT(amdgpu_winsys_bo);
if (!bo) {
return NULL;
}
- pb_cache_init_entry(&ws->bo_cache, &bo->u.real.cache_entry, &bo->base,
- pb_cache_bucket);
request.alloc_size = size;
- request.phys_alignment = alignment;
+ request.phys_alignment = desc->alignment;
- if (initial_domain & RADEON_DOMAIN_VRAM)
+ if (rdesc->initial_domain & RADEON_DOMAIN_VRAM) {
request.preferred_heap |= AMDGPU_GEM_DOMAIN_VRAM;
- if (initial_domain & RADEON_DOMAIN_GTT)
+ if (rdesc->flags & RADEON_FLAG_CPU_ACCESS)
+ request.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ }
+ if (rdesc->initial_domain & RADEON_DOMAIN_GTT) {
request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT;
+ if (rdesc->flags & RADEON_FLAG_GTT_WC)
+ request.flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+ }
- if (flags & RADEON_FLAG_CPU_ACCESS)
- request.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- if (flags & RADEON_FLAG_NO_CPU_ACCESS)
- request.flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
- if (flags & RADEON_FLAG_GTT_WC)
- request.flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
-
- r = amdgpu_bo_alloc(ws->dev, &request, &buf_handle);
+ r = amdgpu_bo_alloc(rws->dev, &request, &buf_handle);
if (r) {
fprintf(stderr, "amdgpu: Failed to allocate a buffer:\n");
- fprintf(stderr, "amdgpu: size : %"PRIu64" bytes\n", size);
- fprintf(stderr, "amdgpu: alignment : %u bytes\n", alignment);
- fprintf(stderr, "amdgpu: domains : %u\n", initial_domain);
+ fprintf(stderr, "amdgpu: size : %d bytes\n", size);
+ fprintf(stderr, "amdgpu: alignment : %d bytes\n", desc->alignment);
+ fprintf(stderr, "amdgpu: domains : %d\n", rdesc->initial_domain);
goto error_bo_alloc;
}
- va_gap_size = ws->check_vm ? MAX2(4 * alignment, 64 * 1024) : 0;
- r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
- size + va_gap_size, alignment, 0, &va, &va_handle, 0);
+ r = amdgpu_va_range_alloc(rws->dev, amdgpu_gpu_va_range_general,
+ size, desc->alignment, 0, &va, &va_handle, 0);
if (r)
goto error_va_alloc;
@@ -425,25 +342,23 @@ static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws,
goto error_va_map;
pipe_reference_init(&bo->base.reference, 1);
- bo->base.alignment = alignment;
- bo->base.usage = usage;
+ bo->base.alignment = desc->alignment;
+ bo->base.usage = desc->usage;
bo->base.size = size;
bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
- bo->ws = ws;
+ bo->rws = rws;
bo->bo = buf_handle;
bo->va = va;
- bo->u.real.va_handle = va_handle;
- bo->initial_domain = initial_domain;
- bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
-
- if (initial_domain & RADEON_DOMAIN_VRAM)
- ws->allocated_vram += align64(size, ws->info.gart_page_size);
- else if (initial_domain & RADEON_DOMAIN_GTT)
- ws->allocated_gtt += align64(size, ws->info.gart_page_size);
+ bo->va_handle = va_handle;
+ bo->initial_domain = rdesc->initial_domain;
+ bo->unique_id = __sync_fetch_and_add(&rws->next_bo_unique_id, 1);
- amdgpu_add_buffer_to_global_list(bo);
+ if (rdesc->initial_domain & RADEON_DOMAIN_VRAM)
+ rws->allocated_vram += align(size, rws->gart_page_size);
+ else if (rdesc->initial_domain & RADEON_DOMAIN_GTT)
+ rws->allocated_gtt += align(size, rws->gart_page_size);
- return bo;
+ return &bo->base;
error_va_map:
amdgpu_va_range_free(va_handle);
@@ -456,581 +371,48 @@ error_bo_alloc:
return NULL;
}
-bool amdgpu_bo_can_reclaim(struct pb_buffer *_buf)
+static void amdgpu_bomgr_flush(struct pb_manager *mgr)
{
- struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
-
- if (amdgpu_bo_is_referenced_by_any_cs(bo)) {
- return false;
- }
-
- return amdgpu_bo_wait(_buf, 0, RADEON_USAGE_READWRITE);
-}
-
-bool amdgpu_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry)
-{
- struct amdgpu_winsys_bo *bo = NULL; /* fix container_of */
- bo = container_of(entry, bo, u.slab.entry);
-
- return amdgpu_bo_can_reclaim(&bo->base);
+ /* NOP */
}
-static void amdgpu_bo_slab_destroy(struct pb_buffer *_buf)
+/* This is for the cache bufmgr. */
+static boolean amdgpu_bomgr_is_buffer_busy(struct pb_manager *_mgr,
+ struct pb_buffer *_buf)
{
struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
- assert(!bo->bo);
-
- pb_slab_free(&bo->ws->bo_slabs, &bo->u.slab.entry);
-}
-
-static const struct pb_vtbl amdgpu_winsys_bo_slab_vtbl = {
- amdgpu_bo_slab_destroy
- /* other functions are never called */
-};
-
-struct pb_slab *amdgpu_bo_slab_alloc(void *priv, unsigned heap,
- unsigned entry_size,
- unsigned group_index)
-{
- struct amdgpu_winsys *ws = priv;
- struct amdgpu_slab *slab = CALLOC_STRUCT(amdgpu_slab);
- enum radeon_bo_domain domains;
- enum radeon_bo_flag flags = 0;
- uint32_t base_id;
-
- if (!slab)
- return NULL;
-
- if (heap & 1)
- flags |= RADEON_FLAG_GTT_WC;
- if (heap & 2)
- flags |= RADEON_FLAG_CPU_ACCESS;
-
- switch (heap >> 2) {
- case 0:
- domains = RADEON_DOMAIN_VRAM;
- break;
- default:
- case 1:
- domains = RADEON_DOMAIN_VRAM_GTT;
- break;
- case 2:
- domains = RADEON_DOMAIN_GTT;
- break;
- }
-
- slab->buffer = amdgpu_winsys_bo(amdgpu_bo_create(&ws->base,
- 64 * 1024, 64 * 1024,
- domains, flags));
- if (!slab->buffer)
- goto fail;
-
- assert(slab->buffer->bo);
-
- slab->base.num_entries = slab->buffer->base.size / entry_size;
- slab->base.num_free = slab->base.num_entries;
- slab->entries = CALLOC(slab->base.num_entries, sizeof(*slab->entries));
- if (!slab->entries)
- goto fail_buffer;
-
- LIST_INITHEAD(&slab->base.free);
-
- base_id = __sync_fetch_and_add(&ws->next_bo_unique_id, slab->base.num_entries);
-
- for (unsigned i = 0; i < slab->base.num_entries; ++i) {
- struct amdgpu_winsys_bo *bo = &slab->entries[i];
-
- bo->base.alignment = entry_size;
- bo->base.usage = slab->buffer->base.usage;
- bo->base.size = entry_size;
- bo->base.vtbl = &amdgpu_winsys_bo_slab_vtbl;
- bo->ws = ws;
- bo->va = slab->buffer->va + i * entry_size;
- bo->initial_domain = domains;
- bo->unique_id = base_id + i;
- bo->u.slab.entry.slab = &slab->base;
- bo->u.slab.entry.group_index = group_index;
- bo->u.slab.real = slab->buffer;
-
- LIST_ADDTAIL(&bo->u.slab.entry.head, &slab->base.free);
- }
-
- return &slab->base;
-
-fail_buffer:
- amdgpu_winsys_bo_reference(&slab->buffer, NULL);
-fail:
- FREE(slab);
- return NULL;
-}
-
-void amdgpu_bo_slab_free(void *priv, struct pb_slab *pslab)
-{
- struct amdgpu_slab *slab = amdgpu_slab(pslab);
-
- for (unsigned i = 0; i < slab->base.num_entries; ++i)
- amdgpu_bo_remove_fences(&slab->entries[i]);
-
- FREE(slab->entries);
- amdgpu_winsys_bo_reference(&slab->buffer, NULL);
- FREE(slab);
-}
-
-#if DEBUG_SPARSE_COMMITS
-static void
-sparse_dump(struct amdgpu_winsys_bo *bo, const char *func)
-{
- fprintf(stderr, "%s: %p (size=%"PRIu64", num_va_pages=%u) @ %s\n"
- "Commitments:\n",
- __func__, bo, bo->base.size, bo->u.sparse.num_va_pages, func);
-
- struct amdgpu_sparse_backing *span_backing = NULL;
- uint32_t span_first_backing_page = 0;
- uint32_t span_first_va_page = 0;
- uint32_t va_page = 0;
-
- for (;;) {
- struct amdgpu_sparse_backing *backing = 0;
- uint32_t backing_page = 0;
-
- if (va_page < bo->u.sparse.num_va_pages) {
- backing = bo->u.sparse.commitments[va_page].backing;
- backing_page = bo->u.sparse.commitments[va_page].page;
- }
-
- if (span_backing &&
- (backing != span_backing ||
- backing_page != span_first_backing_page + (va_page - span_first_va_page))) {
- fprintf(stderr, " %u..%u: backing=%p:%u..%u\n",
- span_first_va_page, va_page - 1, span_backing,
- span_first_backing_page,
- span_first_backing_page + (va_page - span_first_va_page) - 1);
-
- span_backing = NULL;
- }
-
- if (va_page >= bo->u.sparse.num_va_pages)
- break;
-
- if (backing && !span_backing) {
- span_backing = backing;
- span_first_backing_page = backing_page;
- span_first_va_page = va_page;
- }
-
- va_page++;
- }
-
- fprintf(stderr, "Backing:\n");
-
- list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
- fprintf(stderr, " %p (size=%"PRIu64")\n", backing, backing->bo->base.size);
- for (unsigned i = 0; i < backing->num_chunks; ++i)
- fprintf(stderr, " %u..%u\n", backing->chunks[i].begin, backing->chunks[i].end);
- }
-}
-#endif
-
-/*
- * Attempt to allocate the given number of backing pages. Fewer pages may be
- * allocated (depending on the fragmentation of existing backing buffers),
- * which will be reflected by a change to *pnum_pages.
- */
-static struct amdgpu_sparse_backing *
-sparse_backing_alloc(struct amdgpu_winsys_bo *bo, uint32_t *pstart_page, uint32_t *pnum_pages)
-{
- struct amdgpu_sparse_backing *best_backing;
- unsigned best_idx;
- uint32_t best_num_pages;
-
- best_backing = NULL;
- best_idx = 0;
- best_num_pages = 0;
-
- /* This is a very simple and inefficient best-fit algorithm. */
- list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
- for (unsigned idx = 0; idx < backing->num_chunks; ++idx) {
- uint32_t cur_num_pages = backing->chunks[idx].end - backing->chunks[idx].begin;
- if ((best_num_pages < *pnum_pages && cur_num_pages > best_num_pages) ||
- (best_num_pages > *pnum_pages && cur_num_pages < best_num_pages)) {
- best_backing = backing;
- best_idx = idx;
- best_num_pages = cur_num_pages;
- }
- }
- }
-
- /* Allocate a new backing buffer if necessary. */
- if (!best_backing) {
- struct pb_buffer *buf;
- uint64_t size;
- uint32_t pages;
-
- best_backing = CALLOC_STRUCT(amdgpu_sparse_backing);
- if (!best_backing)
- return NULL;
-
- best_backing->max_chunks = 4;
- best_backing->chunks = CALLOC(best_backing->max_chunks,
- sizeof(*best_backing->chunks));
- if (!best_backing->chunks) {
- FREE(best_backing);
- return NULL;
- }
-
- assert(bo->u.sparse.num_backing_pages < DIV_ROUND_UP(bo->base.size, RADEON_SPARSE_PAGE_SIZE));
-
- size = MIN3(bo->base.size / 16,
- 8 * 1024 * 1024,
- bo->base.size - (uint64_t)bo->u.sparse.num_backing_pages * RADEON_SPARSE_PAGE_SIZE);
- size = MAX2(size, RADEON_SPARSE_PAGE_SIZE);
-
- buf = amdgpu_bo_create(&bo->ws->base, size, RADEON_SPARSE_PAGE_SIZE,
- bo->initial_domain,
- bo->u.sparse.flags | RADEON_FLAG_HANDLE);
- if (!buf) {
- FREE(best_backing->chunks);
- FREE(best_backing);
- return NULL;
- }
-
- /* We might have gotten a bigger buffer than requested via caching. */
- pages = buf->size / RADEON_SPARSE_PAGE_SIZE;
-
- best_backing->bo = amdgpu_winsys_bo(buf);
- best_backing->num_chunks = 1;
- best_backing->chunks[0].begin = 0;
- best_backing->chunks[0].end = pages;
-
- list_add(&best_backing->list, &bo->u.sparse.backing);
- bo->u.sparse.num_backing_pages += pages;
-
- best_idx = 0;
- best_num_pages = pages;
- }
-
- *pnum_pages = MIN2(*pnum_pages, best_num_pages);
- *pstart_page = best_backing->chunks[best_idx].begin;
- best_backing->chunks[best_idx].begin += *pnum_pages;
-
- if (best_backing->chunks[best_idx].begin >= best_backing->chunks[best_idx].end) {
- memmove(&best_backing->chunks[best_idx], &best_backing->chunks[best_idx + 1],
- sizeof(*best_backing->chunks) * (best_backing->num_chunks - best_idx - 1));
- best_backing->num_chunks--;
- }
-
- return best_backing;
-}
-
-static void
-sparse_free_backing_buffer(struct amdgpu_winsys_bo *bo,
- struct amdgpu_sparse_backing *backing)
-{
- struct amdgpu_winsys *ws = backing->bo->ws;
-
- bo->u.sparse.num_backing_pages -= backing->bo->base.size / RADEON_SPARSE_PAGE_SIZE;
-
- mtx_lock(&ws->bo_fence_lock);
- amdgpu_add_fences(backing->bo, bo->num_fences, bo->fences);
- mtx_unlock(&ws->bo_fence_lock);
-
- list_del(&backing->list);
- amdgpu_winsys_bo_reference(&backing->bo, NULL);
- FREE(backing->chunks);
- FREE(backing);
-}
-
-/*
- * Return a range of pages from the given backing buffer back into the
- * free structure.
- */
-static bool
-sparse_backing_free(struct amdgpu_winsys_bo *bo,
- struct amdgpu_sparse_backing *backing,
- uint32_t start_page, uint32_t num_pages)
-{
- uint32_t end_page = start_page + num_pages;
- unsigned low = 0;
- unsigned high = backing->num_chunks;
-
- /* Find the first chunk with begin >= start_page. */
- while (low < high) {
- unsigned mid = low + (high - low) / 2;
-
- if (backing->chunks[mid].begin >= start_page)
- high = mid;
- else
- low = mid + 1;
+ if (amdgpu_bo_is_referenced_by_any_cs(bo)) {
+ return TRUE;
}
- assert(low >= backing->num_chunks || end_page <= backing->chunks[low].begin);
- assert(low == 0 || backing->chunks[low - 1].end <= start_page);
-
- if (low > 0 && backing->chunks[low - 1].end == start_page) {
- backing->chunks[low - 1].end = end_page;
-
- if (low < backing->num_chunks && end_page == backing->chunks[low].begin) {
- backing->chunks[low - 1].end = backing->chunks[low].end;
- memmove(&backing->chunks[low], &backing->chunks[low + 1],
- sizeof(*backing->chunks) * (backing->num_chunks - low - 1));
- backing->num_chunks--;
- }
- } else if (low < backing->num_chunks && end_page == backing->chunks[low].begin) {
- backing->chunks[low].begin = start_page;
- } else {
- if (backing->num_chunks >= backing->max_chunks) {
- unsigned new_max_chunks = 2 * backing->max_chunks;
- struct amdgpu_sparse_backing_chunk *new_chunks =
- REALLOC(backing->chunks,
- sizeof(*backing->chunks) * backing->max_chunks,
- sizeof(*backing->chunks) * new_max_chunks);
- if (!new_chunks)
- return false;
-
- backing->max_chunks = new_max_chunks;
- backing->chunks = new_chunks;
- }
-
- memmove(&backing->chunks[low + 1], &backing->chunks[low],
- sizeof(*backing->chunks) * (backing->num_chunks - low));
- backing->chunks[low].begin = start_page;
- backing->chunks[low].end = end_page;
- backing->num_chunks++;
+ if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0, RADEON_USAGE_READWRITE)) {
+ return TRUE;
}
- if (backing->num_chunks == 1 && backing->chunks[0].begin == 0 &&
- backing->chunks[0].end == backing->bo->base.size / RADEON_SPARSE_PAGE_SIZE)
- sparse_free_backing_buffer(bo, backing);
-
- return true;
+ return FALSE;
}
-static void amdgpu_bo_sparse_destroy(struct pb_buffer *_buf)
+static void amdgpu_bomgr_destroy(struct pb_manager *mgr)
{
- struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
- int r;
-
- assert(!bo->bo && bo->sparse);
-
- r = amdgpu_bo_va_op_raw(bo->ws->dev, NULL, 0,
- (uint64_t)bo->u.sparse.num_va_pages * RADEON_SPARSE_PAGE_SIZE,
- bo->va, 0, AMDGPU_VA_OP_CLEAR);
- if (r) {
- fprintf(stderr, "amdgpu: clearing PRT VA region on destroy failed (%d)\n", r);
- }
-
- while (!list_empty(&bo->u.sparse.backing)) {
- struct amdgpu_sparse_backing *dummy = NULL;
- sparse_free_backing_buffer(bo,
- container_of(bo->u.sparse.backing.next,
- dummy, list));
- }
-
- amdgpu_va_range_free(bo->u.sparse.va_handle);
- mtx_destroy(&bo->u.sparse.commit_lock);
- FREE(bo->u.sparse.commitments);
- FREE(bo);
+ FREE(mgr);
}
-static const struct pb_vtbl amdgpu_winsys_bo_sparse_vtbl = {
- amdgpu_bo_sparse_destroy
- /* other functions are never called */
-};
-
-static struct pb_buffer *
-amdgpu_bo_sparse_create(struct amdgpu_winsys *ws, uint64_t size,
- enum radeon_bo_domain domain,
- enum radeon_bo_flag flags)
+struct pb_manager *amdgpu_bomgr_create(struct amdgpu_winsys *rws)
{
- struct amdgpu_winsys_bo *bo;
- uint64_t map_size;
- uint64_t va_gap_size;
- int r;
+ struct amdgpu_bomgr *mgr;
- /* We use 32-bit page numbers; refuse to attempt allocating sparse buffers
- * that exceed this limit. This is not really a restriction: we don't have
- * that much virtual address space anyway.
- */
- if (size > (uint64_t)INT32_MAX * RADEON_SPARSE_PAGE_SIZE)
+ mgr = CALLOC_STRUCT(amdgpu_bomgr);
+ if (!mgr)
return NULL;
- bo = CALLOC_STRUCT(amdgpu_winsys_bo);
- if (!bo)
- return NULL;
+ mgr->base.destroy = amdgpu_bomgr_destroy;
+ mgr->base.create_buffer = amdgpu_bomgr_create_bo;
+ mgr->base.flush = amdgpu_bomgr_flush;
+ mgr->base.is_buffer_busy = amdgpu_bomgr_is_buffer_busy;
- pipe_reference_init(&bo->base.reference, 1);
- bo->base.alignment = RADEON_SPARSE_PAGE_SIZE;
- bo->base.size = size;
- bo->base.vtbl = &amdgpu_winsys_bo_sparse_vtbl;
- bo->ws = ws;
- bo->initial_domain = domain;
- bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
- bo->sparse = true;
- bo->u.sparse.flags = flags & ~RADEON_FLAG_SPARSE;
-
- bo->u.sparse.num_va_pages = DIV_ROUND_UP(size, RADEON_SPARSE_PAGE_SIZE);
- bo->u.sparse.commitments = CALLOC(bo->u.sparse.num_va_pages,
- sizeof(*bo->u.sparse.commitments));
- if (!bo->u.sparse.commitments)
- goto error_alloc_commitments;
-
- mtx_init(&bo->u.sparse.commit_lock, mtx_plain);
- LIST_INITHEAD(&bo->u.sparse.backing);
-
- /* For simplicity, we always map a multiple of the page size. */
- map_size = align64(size, RADEON_SPARSE_PAGE_SIZE);
- va_gap_size = ws->check_vm ? 4 * RADEON_SPARSE_PAGE_SIZE : 0;
- r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
- map_size + va_gap_size, RADEON_SPARSE_PAGE_SIZE,
- 0, &bo->va, &bo->u.sparse.va_handle, 0);
- if (r)
- goto error_va_alloc;
-
- r = amdgpu_bo_va_op_raw(bo->ws->dev, NULL, 0, size, bo->va,
- AMDGPU_VM_PAGE_PRT, AMDGPU_VA_OP_MAP);
- if (r)
- goto error_va_map;
-
- return &bo->base;
-
-error_va_map:
- amdgpu_va_range_free(bo->u.sparse.va_handle);
-error_va_alloc:
- mtx_destroy(&bo->u.sparse.commit_lock);
- FREE(bo->u.sparse.commitments);
-error_alloc_commitments:
- FREE(bo);
- return NULL;
-}
-
-static bool
-amdgpu_bo_sparse_commit(struct pb_buffer *buf, uint64_t offset, uint64_t size,
- bool commit)
-{
- struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buf);
- struct amdgpu_sparse_commitment *comm;
- uint32_t va_page, end_va_page;
- bool ok = true;
- int r;
-
- assert(bo->sparse);
- assert(offset % RADEON_SPARSE_PAGE_SIZE == 0);
- assert(offset <= bo->base.size);
- assert(size <= bo->base.size - offset);
- assert(size % RADEON_SPARSE_PAGE_SIZE == 0 || offset + size == bo->base.size);
-
- comm = bo->u.sparse.commitments;
- va_page = offset / RADEON_SPARSE_PAGE_SIZE;
- end_va_page = va_page + DIV_ROUND_UP(size, RADEON_SPARSE_PAGE_SIZE);
-
- mtx_lock(&bo->u.sparse.commit_lock);
-
-#if DEBUG_SPARSE_COMMITS
- sparse_dump(bo, __func__);
-#endif
-
- if (commit) {
- while (va_page < end_va_page) {
- uint32_t span_va_page;
-
- /* Skip pages that are already committed. */
- if (comm[va_page].backing) {
- va_page++;
- continue;
- }
-
- /* Determine length of uncommitted span. */
- span_va_page = va_page;
- while (va_page < end_va_page && !comm[va_page].backing)
- va_page++;
-
- /* Fill the uncommitted span with chunks of backing memory. */
- while (span_va_page < va_page) {
- struct amdgpu_sparse_backing *backing;
- uint32_t backing_start, backing_size;
-
- backing_size = va_page - span_va_page;
- backing = sparse_backing_alloc(bo, &backing_start, &backing_size);
- if (!backing) {
- ok = false;
- goto out;
- }
-
- r = amdgpu_bo_va_op_raw(bo->ws->dev, backing->bo->bo,
- (uint64_t)backing_start * RADEON_SPARSE_PAGE_SIZE,
- (uint64_t)backing_size * RADEON_SPARSE_PAGE_SIZE,
- bo->va + (uint64_t)span_va_page * RADEON_SPARSE_PAGE_SIZE,
- AMDGPU_VM_PAGE_READABLE |
- AMDGPU_VM_PAGE_WRITEABLE |
- AMDGPU_VM_PAGE_EXECUTABLE,
- AMDGPU_VA_OP_REPLACE);
- if (r) {
- ok = sparse_backing_free(bo, backing, backing_start, backing_size);
- assert(ok && "sufficient memory should already be allocated");
-
- ok = false;
- goto out;
- }
-
- while (backing_size) {
- comm[span_va_page].backing = backing;
- comm[span_va_page].page = backing_start;
- span_va_page++;
- backing_start++;
- backing_size--;
- }
- }
- }
- } else {
- r = amdgpu_bo_va_op_raw(bo->ws->dev, NULL, 0,
- (uint64_t)(end_va_page - va_page) * RADEON_SPARSE_PAGE_SIZE,
- bo->va + (uint64_t)va_page * RADEON_SPARSE_PAGE_SIZE,
- AMDGPU_VM_PAGE_PRT, AMDGPU_VA_OP_REPLACE);
- if (r) {
- ok = false;
- goto out;
- }
-
- while (va_page < end_va_page) {
- struct amdgpu_sparse_backing *backing;
- uint32_t backing_start;
- uint32_t span_pages;
-
- /* Skip pages that are already uncommitted. */
- if (!comm[va_page].backing) {
- va_page++;
- continue;
- }
-
- /* Group contiguous spans of pages. */
- backing = comm[va_page].backing;
- backing_start = comm[va_page].page;
- comm[va_page].backing = NULL;
-
- span_pages = 1;
- va_page++;
-
- while (va_page < end_va_page &&
- comm[va_page].backing == backing &&
- comm[va_page].page == backing_start + span_pages) {
- comm[va_page].backing = NULL;
- va_page++;
- span_pages++;
- }
-
- if (!sparse_backing_free(bo, backing, backing_start, span_pages)) {
- /* Couldn't allocate tracking data structures, so we have to leak */
- fprintf(stderr, "amdgpu: leaking PRT backing memory\n");
- ok = false;
- }
- }
- }
-out:
-
- mtx_unlock(&bo->u.sparse.commit_lock);
-
- return ok;
+ mgr->rws = rws;
+ return &mgr->base;
}
static unsigned eg_tile_split(unsigned tile_split)
@@ -1062,209 +444,152 @@ static unsigned eg_tile_split_rev(unsigned eg_tile_split)
}
}
-static void amdgpu_buffer_get_metadata(struct pb_buffer *_buf,
- struct radeon_bo_metadata *md)
+static void amdgpu_bo_get_tiling(struct pb_buffer *_buf,
+ enum radeon_bo_layout *microtiled,
+ enum radeon_bo_layout *macrotiled,
+ unsigned *bankw, unsigned *bankh,
+ unsigned *tile_split,
+ unsigned *stencil_tile_split,
+ unsigned *mtilea,
+ bool *scanout)
{
- struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
+ struct amdgpu_winsys_bo *bo = get_amdgpu_winsys_bo(_buf);
struct amdgpu_bo_info info = {0};
- uint64_t tiling_flags;
+ uint32_t tiling_flags;
int r;
- assert(bo->bo && "must not be called for slab entries");
-
r = amdgpu_bo_query_info(bo->bo, &info);
if (r)
return;
tiling_flags = info.metadata.tiling_info;
- if (bo->ws->info.chip_class >= GFX9) {
- md->u.gfx9.swizzle_mode = AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
- } else {
- md->u.legacy.microtile = RADEON_LAYOUT_LINEAR;
- md->u.legacy.macrotile = RADEON_LAYOUT_LINEAR;
-
- if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 4) /* 2D_TILED_THIN1 */
- md->u.legacy.macrotile = RADEON_LAYOUT_TILED;
- else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 2) /* 1D_TILED_THIN1 */
- md->u.legacy.microtile = RADEON_LAYOUT_TILED;
-
- md->u.legacy.pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
- md->u.legacy.bankw = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
- md->u.legacy.bankh = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
- md->u.legacy.tile_split = eg_tile_split(AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT));
- md->u.legacy.mtilea = 1 << AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
- md->u.legacy.num_banks = 2 << AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
- md->u.legacy.scanout = AMDGPU_TILING_GET(tiling_flags, MICRO_TILE_MODE) == 0; /* DISPLAY */
- }
-
- md->size_metadata = info.metadata.size_metadata;
- memcpy(md->metadata, info.metadata.umd_metadata, sizeof(md->metadata));
-}
-
-static void amdgpu_buffer_set_metadata(struct pb_buffer *_buf,
- struct radeon_bo_metadata *md)
-{
- struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
+ *microtiled = RADEON_LAYOUT_LINEAR;
+ *macrotiled = RADEON_LAYOUT_LINEAR;
+
+ if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 4) /* 2D_TILED_THIN1 */
+ *macrotiled = RADEON_LAYOUT_TILED;
+ else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 2) /* 1D_TILED_THIN1 */
+ *microtiled = RADEON_LAYOUT_TILED;
+
+ if (bankw && tile_split && mtilea && tile_split) {
+ *bankw = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
+ *bankh = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
+ *tile_split = eg_tile_split(AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT));
+ *mtilea = 1 << AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
+ }
+ if (scanout)
+ *scanout = AMDGPU_TILING_GET(tiling_flags, MICRO_TILE_MODE) == 0; /* DISPLAY */
+}
+
+static void amdgpu_bo_set_tiling(struct pb_buffer *_buf,
+ struct radeon_winsys_cs *rcs,
+ enum radeon_bo_layout microtiled,
+ enum radeon_bo_layout macrotiled,
+ unsigned pipe_config,
+ unsigned bankw, unsigned bankh,
+ unsigned tile_split,
+ unsigned stencil_tile_split,
+ unsigned mtilea, unsigned num_banks,
+ uint32_t pitch,
+ bool scanout)
+{
+ struct amdgpu_winsys_bo *bo = get_amdgpu_winsys_bo(_buf);
struct amdgpu_bo_metadata metadata = {0};
- uint64_t tiling_flags = 0;
+ uint32_t tiling_flags = 0;
- assert(bo->bo && "must not be called for slab entries");
-
- if (bo->ws->info.chip_class >= GFX9) {
- tiling_flags |= AMDGPU_TILING_SET(SWIZZLE_MODE, md->u.gfx9.swizzle_mode);
- } else {
- if (md->u.legacy.macrotile == RADEON_LAYOUT_TILED)
- tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4); /* 2D_TILED_THIN1 */
- else if (md->u.legacy.microtile == RADEON_LAYOUT_TILED)
- tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2); /* 1D_TILED_THIN1 */
- else
- tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1); /* LINEAR_ALIGNED */
-
- tiling_flags |= AMDGPU_TILING_SET(PIPE_CONFIG, md->u.legacy.pipe_config);
- tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH, util_logbase2(md->u.legacy.bankw));
- tiling_flags |= AMDGPU_TILING_SET(BANK_HEIGHT, util_logbase2(md->u.legacy.bankh));
- if (md->u.legacy.tile_split)
- tiling_flags |= AMDGPU_TILING_SET(TILE_SPLIT, eg_tile_split_rev(md->u.legacy.tile_split));
- tiling_flags |= AMDGPU_TILING_SET(MACRO_TILE_ASPECT, util_logbase2(md->u.legacy.mtilea));
- tiling_flags |= AMDGPU_TILING_SET(NUM_BANKS, util_logbase2(md->u.legacy.num_banks)-1);
-
- if (md->u.legacy.scanout)
- tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 0); /* DISPLAY_MICRO_TILING */
- else
- tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 1); /* THIN_MICRO_TILING */
- }
+ if (macrotiled == RADEON_LAYOUT_TILED)
+ tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4); /* 2D_TILED_THIN1 */
+ else if (microtiled == RADEON_LAYOUT_TILED)
+ tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2); /* 1D_TILED_THIN1 */
+ else
+ tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1); /* LINEAR_ALIGNED */
+
+ tiling_flags |= AMDGPU_TILING_SET(PIPE_CONFIG, pipe_config);
+ tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH, util_logbase2(bankw));
+ tiling_flags |= AMDGPU_TILING_SET(BANK_HEIGHT, util_logbase2(bankh));
+ if (tile_split)
+ tiling_flags |= AMDGPU_TILING_SET(TILE_SPLIT, eg_tile_split_rev(tile_split));
+ tiling_flags |= AMDGPU_TILING_SET(MACRO_TILE_ASPECT, util_logbase2(mtilea));
+ tiling_flags |= AMDGPU_TILING_SET(NUM_BANKS, util_logbase2(num_banks)-1);
+
+ if (scanout)
+ tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 0); /* DISPLAY_MICRO_TILING */
+ else
+ tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 1); /* THIN_MICRO_TILING */
metadata.tiling_info = tiling_flags;
- metadata.size_metadata = md->size_metadata;
- memcpy(metadata.umd_metadata, md->metadata, sizeof(md->metadata));
amdgpu_bo_set_metadata(bo->bo, &metadata);
}
+static struct radeon_winsys_cs_handle *amdgpu_get_cs_handle(struct pb_buffer *_buf)
+{
+ /* return a direct pointer to amdgpu_winsys_bo. */
+ return (struct radeon_winsys_cs_handle*)get_amdgpu_winsys_bo(_buf);
+}
+
static struct pb_buffer *
amdgpu_bo_create(struct radeon_winsys *rws,
- uint64_t size,
+ unsigned size,
unsigned alignment,
+ boolean use_reusable_pool,
enum radeon_bo_domain domain,
enum radeon_bo_flag flags)
{
struct amdgpu_winsys *ws = amdgpu_winsys(rws);
- struct amdgpu_winsys_bo *bo;
- unsigned usage = 0, pb_cache_bucket;
-
- /* Sub-allocate small buffers from slabs. */
- if (!(flags & (RADEON_FLAG_HANDLE | RADEON_FLAG_SPARSE)) &&
- size <= (1 << AMDGPU_SLAB_MAX_SIZE_LOG2) &&
- alignment <= MAX2(1 << AMDGPU_SLAB_MIN_SIZE_LOG2, util_next_power_of_two(size))) {
- struct pb_slab_entry *entry;
- unsigned heap = 0;
-
- if (flags & RADEON_FLAG_GTT_WC)
- heap |= 1;
- if (flags & RADEON_FLAG_CPU_ACCESS)
- heap |= 2;
- if (flags & ~(RADEON_FLAG_GTT_WC | RADEON_FLAG_CPU_ACCESS))
- goto no_slab;
-
- switch (domain) {
- case RADEON_DOMAIN_VRAM:
- heap |= 0 * 4;
- break;
- case RADEON_DOMAIN_VRAM_GTT:
- heap |= 1 * 4;
- break;
- case RADEON_DOMAIN_GTT:
- heap |= 2 * 4;
- break;
- default:
- goto no_slab;
- }
-
- entry = pb_slab_alloc(&ws->bo_slabs, size, heap);
- if (!entry) {
- /* Clear the cache and try again. */
- pb_cache_release_all_buffers(&ws->bo_cache);
-
- entry = pb_slab_alloc(&ws->bo_slabs, size, heap);
- }
- if (!entry)
- return NULL;
-
- bo = NULL;
- bo = container_of(entry, bo, u.slab.entry);
-
- pipe_reference_init(&bo->base.reference, 1);
-
- return &bo->base;
- }
-no_slab:
-
- if (flags & RADEON_FLAG_SPARSE) {
- assert(RADEON_SPARSE_PAGE_SIZE % alignment == 0);
- assert(!(flags & RADEON_FLAG_CPU_ACCESS));
-
- flags |= RADEON_FLAG_NO_CPU_ACCESS;
-
- return amdgpu_bo_sparse_create(ws, size, domain, flags);
+ struct amdgpu_bo_desc desc;
+ struct pb_manager *provider;
+ struct pb_buffer *buffer;
+
+ /* Don't use VRAM if the GPU doesn't have much. This is only the initial
+ * domain. The kernel is free to move the buffer if it wants to.
+ *
+ * 64MB means no VRAM by todays standards.
+ */
+ if (domain & RADEON_DOMAIN_VRAM && ws->info.vram_size <= 64*1024*1024) {
+ domain = RADEON_DOMAIN_GTT;
+ flags = RADEON_FLAG_GTT_WC;
}
- /* This flag is irrelevant for the cache. */
- flags &= ~RADEON_FLAG_HANDLE;
+ memset(&desc, 0, sizeof(desc));
+ desc.base.alignment = alignment;
/* Align size to page size. This is the minimum alignment for normal
* BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
* like constant/uniform buffers, can benefit from better and more reuse.
*/
- size = align64(size, ws->info.gart_page_size);
- alignment = align(alignment, ws->info.gart_page_size);
+ size = align(size, ws->gart_page_size);
/* Only set one usage bit each for domains and flags, or the cache manager
* might consider different sets of domains / flags compatible
*/
if (domain == RADEON_DOMAIN_VRAM_GTT)
- usage = 1 << 2;
+ desc.base.usage = 1 << 2;
else
- usage = domain >> 1;
- assert(flags < sizeof(usage) * 8 - 3);
- usage |= 1 << (flags + 3);
-
- /* Determine the pb_cache bucket for minimizing pb_cache misses. */
- pb_cache_bucket = 0;
- if (domain & RADEON_DOMAIN_VRAM) /* VRAM or VRAM+GTT */
- pb_cache_bucket += 1;
- if (flags == RADEON_FLAG_GTT_WC) /* WC */
- pb_cache_bucket += 2;
- assert(pb_cache_bucket < ARRAY_SIZE(ws->bo_cache.buckets));
-
- /* Get a buffer from the cache. */
- bo = (struct amdgpu_winsys_bo*)
- pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment, usage,
- pb_cache_bucket);
- if (bo)
- return &bo->base;
-
- /* Create a new one. */
- bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags,
- pb_cache_bucket);
- if (!bo) {
- /* Clear the cache and try again. */
- pb_slabs_reclaim(&ws->bo_slabs);
- pb_cache_release_all_buffers(&ws->bo_cache);
- bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags,
- pb_cache_bucket);
- if (!bo)
- return NULL;
- }
+ desc.base.usage = domain >> 1;
+ assert(flags < sizeof(desc.base.usage) * 8 - 3);
+ desc.base.usage |= 1 << (flags + 3);
- bo->u.real.use_reusable_pool = true;
- return &bo->base;
+ desc.initial_domain = domain;
+ desc.flags = flags;
+
+ /* Assign a buffer manager. */
+ if (use_reusable_pool)
+ provider = ws->cman;
+ else
+ provider = ws->kman;
+
+ buffer = provider->create_buffer(provider, size, &desc.base);
+ if (!buffer)
+ return NULL;
+
+ return (struct pb_buffer*)buffer;
}
static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
struct winsys_handle *whandle,
- unsigned *stride,
- unsigned *offset)
+ unsigned *stride)
{
struct amdgpu_winsys *ws = amdgpu_winsys(rws);
struct amdgpu_winsys_bo *bo;
@@ -1319,27 +644,24 @@ static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
pipe_reference_init(&bo->base.reference, 1);
bo->base.alignment = info.phys_alignment;
+ bo->base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ;
bo->bo = result.buf_handle;
bo->base.size = result.alloc_size;
bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
- bo->ws = ws;
+ bo->rws = ws;
bo->va = va;
- bo->u.real.va_handle = va_handle;
+ bo->va_handle = va_handle;
bo->initial_domain = initial;
bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
bo->is_shared = true;
if (stride)
*stride = whandle->stride;
- if (offset)
- *offset = whandle->offset;
if (bo->initial_domain & RADEON_DOMAIN_VRAM)
- ws->allocated_vram += align64(bo->base.size, ws->info.gart_page_size);
+ ws->allocated_vram += align(bo->base.size, ws->gart_page_size);
else if (bo->initial_domain & RADEON_DOMAIN_GTT)
- ws->allocated_gtt += align64(bo->base.size, ws->info.gart_page_size);
-
- amdgpu_add_buffer_to_global_list(bo);
+ ws->allocated_gtt += align(bo->base.size, ws->gart_page_size);
return &bo->base;
@@ -1354,21 +676,16 @@ error:
return NULL;
}
-static bool amdgpu_bo_get_handle(struct pb_buffer *buffer,
- unsigned stride, unsigned offset,
- unsigned slice_size,
- struct winsys_handle *whandle)
+static boolean amdgpu_bo_get_handle(struct pb_buffer *buffer,
+ unsigned stride,
+ struct winsys_handle *whandle)
{
- struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buffer);
+ struct amdgpu_winsys_bo *bo = get_amdgpu_winsys_bo(buffer);
enum amdgpu_bo_handle_type type;
int r;
- if (!bo->bo) {
- offset += bo->va - bo->u.slab.real->va;
- bo = bo->u.slab.real;
- }
-
- bo->u.real.use_reusable_pool = false;
+ if ((void*)bo != (void*)buffer)
+ pb_cache_manager_remove_buffer(buffer);
switch (whandle->type) {
case DRM_API_HANDLE_TYPE_SHARED:
@@ -1381,22 +698,20 @@ static bool amdgpu_bo_get_handle(struct pb_buffer *buffer,
type = amdgpu_bo_handle_type_kms;
break;
default:
- return false;
+ return FALSE;
}
r = amdgpu_bo_export(bo->bo, type, &whandle->handle);
if (r)
- return false;
+ return FALSE;
whandle->stride = stride;
- whandle->offset = offset;
- whandle->offset += slice_size * whandle->layer;
bo->is_shared = true;
- return true;
+ return TRUE;
}
static struct pb_buffer *amdgpu_bo_from_ptr(struct radeon_winsys *rws,
- void *pointer, uint64_t size)
+ void *pointer, unsigned size)
{
struct amdgpu_winsys *ws = amdgpu_winsys(rws);
amdgpu_bo_handle buf_handle;
@@ -1422,18 +737,17 @@ static struct pb_buffer *amdgpu_bo_from_ptr(struct radeon_winsys *rws,
pipe_reference_init(&bo->base.reference, 1);
bo->bo = buf_handle;
bo->base.alignment = 0;
+ bo->base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ;
bo->base.size = size;
bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
- bo->ws = ws;
+ bo->rws = ws;
bo->user_ptr = pointer;
bo->va = va;
- bo->u.real.va_handle = va_handle;
+ bo->va_handle = va_handle;
bo->initial_domain = RADEON_DOMAIN_GTT;
bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
- ws->allocated_gtt += align64(bo->base.size, ws->info.gart_page_size);
-
- amdgpu_add_buffer_to_global_list(bo);
+ ws->allocated_gtt += align(bo->base.size, ws->gart_page_size);
return (struct pb_buffer*)bo;
@@ -1448,29 +762,23 @@ error:
return NULL;
}
-static bool amdgpu_bo_is_user_ptr(struct pb_buffer *buf)
-{
- return ((struct amdgpu_winsys_bo*)buf)->user_ptr != NULL;
-}
-
-static uint64_t amdgpu_bo_get_va(struct pb_buffer *buf)
+static uint64_t amdgpu_bo_get_va(struct radeon_winsys_cs_handle *buf)
{
return ((struct amdgpu_winsys_bo*)buf)->va;
}
-void amdgpu_bo_init_functions(struct amdgpu_winsys *ws)
+void amdgpu_bomgr_init_functions(struct amdgpu_winsys *ws)
{
- ws->base.buffer_set_metadata = amdgpu_buffer_set_metadata;
- ws->base.buffer_get_metadata = amdgpu_buffer_get_metadata;
+ ws->base.buffer_get_cs_handle = amdgpu_get_cs_handle;
+ ws->base.buffer_set_tiling = amdgpu_bo_set_tiling;
+ ws->base.buffer_get_tiling = amdgpu_bo_get_tiling;
ws->base.buffer_map = amdgpu_bo_map;
ws->base.buffer_unmap = amdgpu_bo_unmap;
ws->base.buffer_wait = amdgpu_bo_wait;
ws->base.buffer_create = amdgpu_bo_create;
ws->base.buffer_from_handle = amdgpu_bo_from_handle;
ws->base.buffer_from_ptr = amdgpu_bo_from_ptr;
- ws->base.buffer_is_user_ptr = amdgpu_bo_is_user_ptr;
ws->base.buffer_get_handle = amdgpu_bo_get_handle;
- ws->base.buffer_commit = amdgpu_bo_sparse_commit;
ws->base.buffer_get_virtual_address = amdgpu_bo_get_va;
ws->base.buffer_get_initial_domain = amdgpu_bo_get_initial_domain;
}
diff --git a/lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_bo.h b/lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_bo.h
index 1311344b8..3739fd136 100644
--- a/lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_bo.h
+++ b/lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_bo.h
@@ -34,116 +34,41 @@
#define AMDGPU_BO_H
#include "amdgpu_winsys.h"
+#include "pipebuffer/pb_bufmgr.h"
-#include "pipebuffer/pb_slab.h"
+struct amdgpu_bo_desc {
+ struct pb_desc base;
-struct amdgpu_sparse_backing_chunk;
-
-/*
- * Sub-allocation information for a real buffer used as backing memory of a
- * sparse buffer.
- */
-struct amdgpu_sparse_backing {
- struct list_head list;
-
- struct amdgpu_winsys_bo *bo;
-
- /* Sorted list of free chunks. */
- struct amdgpu_sparse_backing_chunk *chunks;
- uint32_t max_chunks;
- uint32_t num_chunks;
-};
-
-struct amdgpu_sparse_commitment {
- struct amdgpu_sparse_backing *backing;
- uint32_t page;
+ enum radeon_bo_domain initial_domain;
+ unsigned flags;
};
struct amdgpu_winsys_bo {
struct pb_buffer base;
- union {
- struct {
- struct pb_cache_entry cache_entry;
-
- amdgpu_va_handle va_handle;
- int map_count;
- bool use_reusable_pool;
-
- struct list_head global_list_item;
- } real;
- struct {
- struct pb_slab_entry entry;
- struct amdgpu_winsys_bo *real;
- } slab;
- struct {
- mtx_t commit_lock;
- amdgpu_va_handle va_handle;
- enum radeon_bo_flag flags;
-
- uint32_t num_va_pages;
- uint32_t num_backing_pages;
-
- struct list_head backing;
-
- /* Commitment information for each page of the virtual memory area. */
- struct amdgpu_sparse_commitment *commitments;
- } sparse;
- } u;
- struct amdgpu_winsys *ws;
+ struct amdgpu_winsys *rws;
void *user_ptr; /* from buffer_from_ptr */
- amdgpu_bo_handle bo; /* NULL for slab entries and sparse buffers */
- bool sparse;
+ amdgpu_bo_handle bo;
uint32_t unique_id;
+ amdgpu_va_handle va_handle;
uint64_t va;
enum radeon_bo_domain initial_domain;
/* how many command streams is this bo referenced in? */
int num_cs_references;
- /* how many command streams, which are being emitted in a separate
- * thread, is this bo referenced in? */
- volatile int num_active_ioctls;
-
/* whether buffer_get_handle or buffer_from_handle was called,
* it can only transition from false to true
*/
volatile int is_shared; /* bool (int for atomicity) */
/* Fences for buffer synchronization. */
- unsigned num_fences;
- unsigned max_fences;
- struct pipe_fence_handle **fences;
-};
-
-struct amdgpu_slab {
- struct pb_slab base;
- struct amdgpu_winsys_bo *buffer;
- struct amdgpu_winsys_bo *entries;
+ struct pipe_fence_handle *fence[RING_LAST];
};
-bool amdgpu_bo_can_reclaim(struct pb_buffer *_buf);
-void amdgpu_bo_destroy(struct pb_buffer *_buf);
-void amdgpu_bo_init_functions(struct amdgpu_winsys *ws);
-
-bool amdgpu_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry);
-struct pb_slab *amdgpu_bo_slab_alloc(void *priv, unsigned heap,
- unsigned entry_size,
- unsigned group_index);
-void amdgpu_bo_slab_free(void *priv, struct pb_slab *slab);
-
-static inline
-struct amdgpu_winsys_bo *amdgpu_winsys_bo(struct pb_buffer *bo)
-{
- return (struct amdgpu_winsys_bo *)bo;
-}
-
-static inline
-struct amdgpu_slab *amdgpu_slab(struct pb_slab *slab)
-{
- return (struct amdgpu_slab *)slab;
-}
+struct pb_manager *amdgpu_bomgr_create(struct amdgpu_winsys *rws);
+void amdgpu_bomgr_init_functions(struct amdgpu_winsys *ws);
static inline
void amdgpu_winsys_bo_reference(struct amdgpu_winsys_bo **dst,
diff --git a/lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c b/lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
index 6295c61dd..0f42298c2 100644
--- a/lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
+++ b/lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
@@ -35,9 +35,6 @@
#include <stdio.h>
#include <amdgpu_drm.h>
-#include "amd/common/sid.h"
-
-DEBUG_GET_ONCE_BOOL_OPTION(noop, "RADEON_NOOP", false)
/* FENCES */
@@ -53,7 +50,6 @@ amdgpu_fence_create(struct amdgpu_ctx *ctx, unsigned ip_type,
fence->fence.ip_type = ip_type;
fence->fence.ip_instance = ip_instance;
fence->fence.ring = ring;
- fence->submission_in_progress = true;
p_atomic_inc(&ctx->refcount);
return (struct pipe_fence_handle *)fence;
}
@@ -66,7 +62,6 @@ static void amdgpu_fence_submitted(struct pipe_fence_handle *fence,
rfence->fence.fence = request->seq_no;
rfence->user_fence_cpu_address = user_fence_cpu_address;
- rfence->submission_in_progress = false;
}
static void amdgpu_fence_signalled(struct pipe_fence_handle *fence)
@@ -74,7 +69,6 @@ static void amdgpu_fence_signalled(struct pipe_fence_handle *fence)
struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
rfence->signalled = true;
- rfence->submission_in_progress = false;
}
bool amdgpu_fence_wait(struct pipe_fence_handle *fence, uint64_t timeout,
@@ -94,25 +88,11 @@ bool amdgpu_fence_wait(struct pipe_fence_handle *fence, uint64_t timeout,
else
abs_timeout = os_time_get_absolute_timeout(timeout);
- /* The fence might not have a number assigned if its IB is being
- * submitted in the other thread right now. Wait until the submission
- * is done. */
- if (!os_wait_until_zero_abs_timeout(&rfence->submission_in_progress,
- abs_timeout))
- return false;
-
user_fence_cpu = rfence->user_fence_cpu_address;
- if (user_fence_cpu) {
- if (*user_fence_cpu >= rfence->fence.fence) {
- rfence->signalled = true;
- return true;
- }
-
- /* No timeout, just query: no need for the ioctl. */
- if (!absolute && !timeout)
- return false;
+ if (user_fence_cpu && *user_fence_cpu >= rfence->fence.fence) {
+ rfence->signalled = true;
+ return true;
}
-
/* Now use the libdrm query. */
r = amdgpu_cs_query_fence_status(&rfence->fence,
abs_timeout,
@@ -120,7 +100,7 @@ bool amdgpu_fence_wait(struct pipe_fence_handle *fence, uint64_t timeout,
&expired);
if (r) {
fprintf(stderr, "amdgpu: amdgpu_cs_query_fence_status failed.\n");
- return false;
+ return FALSE;
}
if (expired) {
@@ -139,31 +119,6 @@ static bool amdgpu_fence_wait_rel_timeout(struct radeon_winsys *rws,
return amdgpu_fence_wait(fence, timeout, false);
}
-static struct pipe_fence_handle *
-amdgpu_cs_get_next_fence(struct radeon_winsys_cs *rcs)
-{
- struct amdgpu_cs *cs = amdgpu_cs(rcs);
- struct pipe_fence_handle *fence = NULL;
-
- if (debug_get_option_noop())
- return NULL;
-
- if (cs->next_fence) {
- amdgpu_fence_reference(&fence, cs->next_fence);
- return fence;
- }
-
- fence = amdgpu_fence_create(cs->ctx,
- cs->csc->request.ip_type,
- cs->csc->request.ip_instance,
- cs->csc->request.ring);
- if (!fence)
- return NULL;
-
- amdgpu_fence_reference(&cs->next_fence, fence);
- return fence;
-}
-
/* CONTEXTS */
static struct radeon_winsys_ctx *amdgpu_ctx_create(struct radeon_winsys *ws)
@@ -173,47 +128,41 @@ static struct radeon_winsys_ctx *amdgpu_ctx_create(struct radeon_winsys *ws)
struct amdgpu_bo_alloc_request alloc_buffer = {};
amdgpu_bo_handle buf_handle;
- if (!ctx)
- return NULL;
-
ctx->ws = amdgpu_winsys(ws);
ctx->refcount = 1;
- ctx->initial_num_total_rejected_cs = ctx->ws->num_total_rejected_cs;
r = amdgpu_cs_ctx_create(ctx->ws->dev, &ctx->ctx);
if (r) {
fprintf(stderr, "amdgpu: amdgpu_cs_ctx_create failed. (%i)\n", r);
- goto error_create;
+ FREE(ctx);
+ return NULL;
}
- alloc_buffer.alloc_size = ctx->ws->info.gart_page_size;
- alloc_buffer.phys_alignment = ctx->ws->info.gart_page_size;
+ alloc_buffer.alloc_size = 4 * 1024;
+ alloc_buffer.phys_alignment = 4 *1024;
alloc_buffer.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
r = amdgpu_bo_alloc(ctx->ws->dev, &alloc_buffer, &buf_handle);
if (r) {
fprintf(stderr, "amdgpu: amdgpu_bo_alloc failed. (%i)\n", r);
- goto error_user_fence_alloc;
+ amdgpu_cs_ctx_free(ctx->ctx);
+ FREE(ctx);
+ return NULL;
}
r = amdgpu_bo_cpu_map(buf_handle, (void**)&ctx->user_fence_cpu_address_base);
if (r) {
fprintf(stderr, "amdgpu: amdgpu_bo_cpu_map failed. (%i)\n", r);
- goto error_user_fence_map;
+ amdgpu_bo_free(buf_handle);
+ amdgpu_cs_ctx_free(ctx->ctx);
+ FREE(ctx);
+ return NULL;
}
memset(ctx->user_fence_cpu_address_base, 0, alloc_buffer.alloc_size);
ctx->user_fence_bo = buf_handle;
return (struct radeon_winsys_ctx*)ctx;
-
-error_user_fence_map:
- amdgpu_bo_free(buf_handle);
-error_user_fence_alloc:
- amdgpu_cs_ctx_free(ctx->ctx);
-error_create:
- FREE(ctx);
- return NULL;
}
static void amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)
@@ -228,13 +177,6 @@ amdgpu_ctx_query_reset_status(struct radeon_winsys_ctx *rwctx)
uint32_t result, hangs;
int r;
- /* Return a failure due to a rejected command submission. */
- if (ctx->ws->num_total_rejected_cs > ctx->initial_num_total_rejected_cs) {
- return ctx->num_rejected_cs ? PIPE_GUILTY_CONTEXT_RESET :
- PIPE_INNOCENT_CONTEXT_RESET;
- }
-
- /* Return a failure due to a GPU hang. */
r = amdgpu_cs_query_reset_state(ctx->ctx, &result, &hangs);
if (r) {
fprintf(stderr, "amdgpu: amdgpu_cs_query_reset_state failed. (%i)\n", r);
@@ -256,445 +198,56 @@ amdgpu_ctx_query_reset_status(struct radeon_winsys_ctx *rwctx)
/* COMMAND SUBMISSION */
-static bool amdgpu_cs_has_user_fence(struct amdgpu_cs_context *cs)
-{
- return cs->request.ip_type != AMDGPU_HW_IP_UVD &&
- cs->request.ip_type != AMDGPU_HW_IP_VCE;
-}
-
-static bool amdgpu_cs_has_chaining(struct amdgpu_cs *cs)
-{
- return cs->ctx->ws->info.chip_class >= CIK &&
- cs->ring_type == RING_GFX;
-}
-
-static unsigned amdgpu_cs_epilog_dws(enum ring_type ring_type)
-{
- if (ring_type == RING_GFX)
- return 4; /* for chaining */
-
- return 0;
-}
-
-int amdgpu_lookup_buffer(struct amdgpu_cs_context *cs, struct amdgpu_winsys_bo *bo)
-{
- unsigned hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
- int i = cs->buffer_indices_hashlist[hash];
- struct amdgpu_cs_buffer *buffers;
- int num_buffers;
-
- if (bo->bo) {
- buffers = cs->real_buffers;
- num_buffers = cs->num_real_buffers;
- } else if (!bo->sparse) {
- buffers = cs->slab_buffers;
- num_buffers = cs->num_slab_buffers;
- } else {
- buffers = cs->sparse_buffers;
- num_buffers = cs->num_sparse_buffers;
- }
-
- /* not found or found */
- if (i < 0 || (i < num_buffers && buffers[i].bo == bo))
- return i;
-
- /* Hash collision, look for the BO in the list of buffers linearly. */
- for (i = num_buffers - 1; i >= 0; i--) {
- if (buffers[i].bo == bo) {
- /* Put this buffer in the hash list.
- * This will prevent additional hash collisions if there are
- * several consecutive lookup_buffer calls for the same buffer.
- *
- * Example: Assuming buffers A,B,C collide in the hash list,
- * the following sequence of buffers:
- * AAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCC
- * will collide here: ^ and here: ^,
- * meaning that we should get very few collisions in the end. */
- cs->buffer_indices_hashlist[hash] = i;
- return i;
- }
- }
- return -1;
-}
-
-static int
-amdgpu_do_add_real_buffer(struct amdgpu_cs_context *cs, struct amdgpu_winsys_bo *bo)
-{
- struct amdgpu_cs_buffer *buffer;
- int idx;
-
- /* New buffer, check if the backing array is large enough. */
- if (cs->num_real_buffers >= cs->max_real_buffers) {
- unsigned new_max =
- MAX2(cs->max_real_buffers + 16, (unsigned)(cs->max_real_buffers * 1.3));
- struct amdgpu_cs_buffer *new_buffers;
-
- new_buffers = MALLOC(new_max * sizeof(*new_buffers));
-
- if (!new_buffers) {
- fprintf(stderr, "amdgpu_do_add_buffer: allocation failed\n");
- FREE(new_buffers);
- return -1;
- }
-
- memcpy(new_buffers, cs->real_buffers, cs->num_real_buffers * sizeof(*new_buffers));
-
- FREE(cs->real_buffers);
-
- cs->max_real_buffers = new_max;
- cs->real_buffers = new_buffers;
- }
-
- idx = cs->num_real_buffers;
- buffer = &cs->real_buffers[idx];
-
- memset(buffer, 0, sizeof(*buffer));
- amdgpu_winsys_bo_reference(&buffer->bo, bo);
- p_atomic_inc(&bo->num_cs_references);
- cs->num_real_buffers++;
-
- return idx;
-}
-
-static int
-amdgpu_lookup_or_add_real_buffer(struct amdgpu_cs *acs, struct amdgpu_winsys_bo *bo)
-{
- struct amdgpu_cs_context *cs = acs->csc;
- unsigned hash;
- int idx = amdgpu_lookup_buffer(cs, bo);
-
- if (idx >= 0)
- return idx;
-
- idx = amdgpu_do_add_real_buffer(cs, bo);
-
- hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
- cs->buffer_indices_hashlist[hash] = idx;
-
- if (bo->initial_domain & RADEON_DOMAIN_VRAM)
- acs->main.base.used_vram += bo->base.size;
- else if (bo->initial_domain & RADEON_DOMAIN_GTT)
- acs->main.base.used_gart += bo->base.size;
-
- return idx;
-}
-
-static int amdgpu_lookup_or_add_slab_buffer(struct amdgpu_cs *acs,
- struct amdgpu_winsys_bo *bo)
-{
- struct amdgpu_cs_context *cs = acs->csc;
- struct amdgpu_cs_buffer *buffer;
- unsigned hash;
- int idx = amdgpu_lookup_buffer(cs, bo);
- int real_idx;
-
- if (idx >= 0)
- return idx;
-
- real_idx = amdgpu_lookup_or_add_real_buffer(acs, bo->u.slab.real);
- if (real_idx < 0)
- return -1;
-
- /* New buffer, check if the backing array is large enough. */
- if (cs->num_slab_buffers >= cs->max_slab_buffers) {
- unsigned new_max =
- MAX2(cs->max_slab_buffers + 16, (unsigned)(cs->max_slab_buffers * 1.3));
- struct amdgpu_cs_buffer *new_buffers;
-
- new_buffers = REALLOC(cs->slab_buffers,
- cs->max_slab_buffers * sizeof(*new_buffers),
- new_max * sizeof(*new_buffers));
- if (!new_buffers) {
- fprintf(stderr, "amdgpu_lookup_or_add_slab_buffer: allocation failed\n");
- return -1;
- }
-
- cs->max_slab_buffers = new_max;
- cs->slab_buffers = new_buffers;
- }
-
- idx = cs->num_slab_buffers;
- buffer = &cs->slab_buffers[idx];
-
- memset(buffer, 0, sizeof(*buffer));
- amdgpu_winsys_bo_reference(&buffer->bo, bo);
- buffer->u.slab.real_idx = real_idx;
- p_atomic_inc(&bo->num_cs_references);
- cs->num_slab_buffers++;
-
- hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
- cs->buffer_indices_hashlist[hash] = idx;
-
- return idx;
-}
-
-static int amdgpu_lookup_or_add_sparse_buffer(struct amdgpu_cs *acs,
- struct amdgpu_winsys_bo *bo)
-{
- struct amdgpu_cs_context *cs = acs->csc;
- struct amdgpu_cs_buffer *buffer;
- unsigned hash;
- int idx = amdgpu_lookup_buffer(cs, bo);
-
- if (idx >= 0)
- return idx;
-
- /* New buffer, check if the backing array is large enough. */
- if (cs->num_sparse_buffers >= cs->max_sparse_buffers) {
- unsigned new_max =
- MAX2(cs->max_sparse_buffers + 16, (unsigned)(cs->max_sparse_buffers * 1.3));
- struct amdgpu_cs_buffer *new_buffers;
-
- new_buffers = REALLOC(cs->sparse_buffers,
- cs->max_sparse_buffers * sizeof(*new_buffers),
- new_max * sizeof(*new_buffers));
- if (!new_buffers) {
- fprintf(stderr, "amdgpu_lookup_or_add_sparse_buffer: allocation failed\n");
- return -1;
- }
-
- cs->max_sparse_buffers = new_max;
- cs->sparse_buffers = new_buffers;
- }
-
- idx = cs->num_sparse_buffers;
- buffer = &cs->sparse_buffers[idx];
-
- memset(buffer, 0, sizeof(*buffer));
- amdgpu_winsys_bo_reference(&buffer->bo, bo);
- p_atomic_inc(&bo->num_cs_references);
- cs->num_sparse_buffers++;
-
- hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
- cs->buffer_indices_hashlist[hash] = idx;
-
- /* We delay adding the backing buffers until we really have to. However,
- * we cannot delay accounting for memory use.
- */
- mtx_lock(&bo->u.sparse.commit_lock);
-
- list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
- if (bo->initial_domain & RADEON_DOMAIN_VRAM)
- acs->main.base.used_vram += backing->bo->base.size;
- else if (bo->initial_domain & RADEON_DOMAIN_GTT)
- acs->main.base.used_gart += backing->bo->base.size;
- }
-
- mtx_unlock(&bo->u.sparse.commit_lock);
-
- return idx;
-}
-
-static unsigned amdgpu_cs_add_buffer(struct radeon_winsys_cs *rcs,
- struct pb_buffer *buf,
- enum radeon_bo_usage usage,
- enum radeon_bo_domain domains,
- enum radeon_bo_priority priority)
-{
- /* Don't use the "domains" parameter. Amdgpu doesn't support changing
- * the buffer placement during command submission.
- */
- struct amdgpu_cs *acs = amdgpu_cs(rcs);
- struct amdgpu_cs_context *cs = acs->csc;
- struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
- struct amdgpu_cs_buffer *buffer;
- int index;
-
- /* Fast exit for no-op calls.
- * This is very effective with suballocators and linear uploaders that
- * are outside of the winsys.
- */
- if (bo == cs->last_added_bo &&
- (usage & cs->last_added_bo_usage) == usage &&
- (1ull << priority) & cs->last_added_bo_priority_usage)
- return cs->last_added_bo_index;
-
- if (!bo->sparse) {
- if (!bo->bo) {
- index = amdgpu_lookup_or_add_slab_buffer(acs, bo);
- if (index < 0)
- return 0;
-
- buffer = &cs->slab_buffers[index];
- buffer->usage |= usage;
-
- usage &= ~RADEON_USAGE_SYNCHRONIZED;
- index = buffer->u.slab.real_idx;
- } else {
- index = amdgpu_lookup_or_add_real_buffer(acs, bo);
- if (index < 0)
- return 0;
- }
-
- buffer = &cs->real_buffers[index];
- } else {
- index = amdgpu_lookup_or_add_sparse_buffer(acs, bo);
- if (index < 0)
- return 0;
-
- buffer = &cs->sparse_buffers[index];
- }
-
- buffer->u.real.priority_usage |= 1llu << priority;
- buffer->usage |= usage;
-
- cs->last_added_bo = bo;
- cs->last_added_bo_index = index;
- cs->last_added_bo_usage = buffer->usage;
- cs->last_added_bo_priority_usage = buffer->u.real.priority_usage;
- return index;
-}
-
-static bool amdgpu_ib_new_buffer(struct amdgpu_winsys *ws, struct amdgpu_ib *ib)
-{
- struct pb_buffer *pb;
- uint8_t *mapped;
- unsigned buffer_size;
-
- /* Always create a buffer that is at least as large as the maximum seen IB
- * size, aligned to a power of two (and multiplied by 4 to reduce internal
- * fragmentation if chaining is not available). Limit to 512k dwords, which
- * is the largest power of two that fits into the size field of the
- * INDIRECT_BUFFER packet.
- */
- if (amdgpu_cs_has_chaining(amdgpu_cs_from_ib(ib)))
- buffer_size = 4 *util_next_power_of_two(ib->max_ib_size);
- else
- buffer_size = 4 *util_next_power_of_two(4 * ib->max_ib_size);
-
- buffer_size = MIN2(buffer_size, 4 * 512 * 1024);
-
- switch (ib->ib_type) {
- case IB_CONST_PREAMBLE:
- buffer_size = MAX2(buffer_size, 4 * 1024);
- break;
- case IB_CONST:
- buffer_size = MAX2(buffer_size, 16 * 1024 * 4);
- break;
- case IB_MAIN:
- buffer_size = MAX2(buffer_size, 8 * 1024 * 4);
- break;
- default:
- unreachable("unhandled IB type");
- }
-
- pb = ws->base.buffer_create(&ws->base, buffer_size,
- ws->info.gart_page_size,
- RADEON_DOMAIN_GTT,
- RADEON_FLAG_CPU_ACCESS);
- if (!pb)
- return false;
-
- mapped = ws->base.buffer_map(pb, NULL, PIPE_TRANSFER_WRITE);
- if (!mapped) {
- pb_reference(&pb, NULL);
- return false;
- }
-
- pb_reference(&ib->big_ib_buffer, pb);
- pb_reference(&pb, NULL);
-
- ib->ib_mapped = mapped;
- ib->used_ib_space = 0;
-
- return true;
-}
-
-static unsigned amdgpu_ib_max_submit_dwords(enum ib_type ib_type)
+static bool amdgpu_get_new_ib(struct amdgpu_cs *cs)
{
- switch (ib_type) {
- case IB_MAIN:
- /* Smaller submits means the GPU gets busy sooner and there is less
- * waiting for buffers and fences. Proof:
- * http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
- */
- return 20 * 1024;
- case IB_CONST_PREAMBLE:
- case IB_CONST:
- /* There isn't really any reason to limit CE IB size beyond the natural
- * limit implied by the main IB, except perhaps GTT size. Just return
- * an extremely large value that we never get anywhere close to.
- */
- return 16 * 1024 * 1024;
- default:
- unreachable("bad ib_type");
- }
-}
+ /* The maximum size is 4MB - 1B, which is unaligned.
+ * Use aligned size 4MB - 16B. */
+ const unsigned max_ib_size = (1024 * 1024 - 16) * 4;
+ const unsigned min_ib_size = 24 * 1024 * 4;
-static bool amdgpu_get_new_ib(struct radeon_winsys *ws, struct amdgpu_cs *cs,
- enum ib_type ib_type)
-{
- struct amdgpu_winsys *aws = (struct amdgpu_winsys*)ws;
- /* Small IBs are better than big IBs, because the GPU goes idle quicker
- * and there is less waiting for buffers and fences. Proof:
- * http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
- */
- struct amdgpu_ib *ib = NULL;
- struct amdgpu_cs_ib_info *info = &cs->csc->ib[ib_type];
- unsigned ib_size = 0;
-
- switch (ib_type) {
- case IB_CONST_PREAMBLE:
- ib = &cs->const_preamble_ib;
- ib_size = 256 * 4;
- break;
- case IB_CONST:
- ib = &cs->const_ib;
- ib_size = 8 * 1024 * 4;
- break;
- case IB_MAIN:
- ib = &cs->main;
- ib_size = 4 * 1024 * 4;
- break;
- default:
- unreachable("unhandled IB type");
- }
-
- if (!amdgpu_cs_has_chaining(cs)) {
- ib_size = MAX2(ib_size,
- 4 * MIN2(util_next_power_of_two(ib->max_ib_size),
- amdgpu_ib_max_submit_dwords(ib_type)));
- }
-
- ib->max_ib_size = ib->max_ib_size - ib->max_ib_size / 32;
-
- ib->base.prev_dw = 0;
- ib->base.num_prev = 0;
- ib->base.current.cdw = 0;
- ib->base.current.buf = NULL;
+ cs->base.cdw = 0;
+ cs->base.buf = NULL;
/* Allocate a new buffer for IBs if the current buffer is all used. */
- if (!ib->big_ib_buffer ||
- ib->used_ib_space + ib_size > ib->big_ib_buffer->size) {
- if (!amdgpu_ib_new_buffer(aws, ib))
+ if (!cs->big_ib_buffer ||
+ cs->used_ib_space + min_ib_size > cs->big_ib_buffer->size) {
+ struct radeon_winsys *ws = &cs->ctx->ws->base;
+ struct radeon_winsys_cs_handle *winsys_bo;
+
+ pb_reference(&cs->big_ib_buffer, NULL);
+ cs->big_ib_winsys_buffer = NULL;
+ cs->ib_mapped = NULL;
+ cs->used_ib_space = 0;
+
+ cs->big_ib_buffer = ws->buffer_create(ws, max_ib_size,
+ 4096, true,
+ RADEON_DOMAIN_GTT,
+ RADEON_FLAG_CPU_ACCESS);
+ if (!cs->big_ib_buffer)
return false;
- }
- info->ib_mc_address = amdgpu_winsys_bo(ib->big_ib_buffer)->va +
- ib->used_ib_space;
- info->size = 0;
- ib->ptr_ib_size = &info->size;
+ winsys_bo = ws->buffer_get_cs_handle(cs->big_ib_buffer);
- amdgpu_cs_add_buffer(&cs->main.base, ib->big_ib_buffer,
- RADEON_USAGE_READ, 0, RADEON_PRIO_IB1);
+ cs->ib_mapped = ws->buffer_map(winsys_bo, NULL, PIPE_TRANSFER_WRITE);
+ if (!cs->ib_mapped) {
+ pb_reference(&cs->big_ib_buffer, NULL);
+ return false;
+ }
- ib->base.current.buf = (uint32_t*)(ib->ib_mapped + ib->used_ib_space);
+ cs->big_ib_winsys_buffer = (struct amdgpu_winsys_bo*)winsys_bo;
+ }
- ib_size = ib->big_ib_buffer->size - ib->used_ib_space;
- ib->base.current.max_dw = ib_size / 4 - amdgpu_cs_epilog_dws(cs->ring_type);
+ cs->ib.ib_mc_address = cs->big_ib_winsys_buffer->va + cs->used_ib_space;
+ cs->base.buf = (uint32_t*)(cs->ib_mapped + cs->used_ib_space);
+ cs->base.max_dw = (cs->big_ib_buffer->size - cs->used_ib_space) / 4;
return true;
}
-static void amdgpu_ib_finalize(struct amdgpu_ib *ib)
+static boolean amdgpu_init_cs_context(struct amdgpu_cs *cs,
+ enum ring_type ring_type)
{
- *ib->ptr_ib_size |= ib->base.current.cdw;
- ib->used_ib_space += ib->base.current.cdw * 4;
- ib->max_ib_size = MAX2(ib->max_ib_size, ib->base.prev_dw + ib->base.current.cdw);
-}
+ int i;
-static bool amdgpu_init_cs_context(struct amdgpu_cs_context *cs,
- enum ring_type ring_type)
-{
switch (ring_type) {
case RING_DMA:
cs->request.ip_type = AMDGPU_HW_IP_DMA;
@@ -718,57 +271,62 @@ static bool amdgpu_init_cs_context(struct amdgpu_cs_context *cs,
break;
}
- memset(cs->buffer_indices_hashlist, -1, sizeof(cs->buffer_indices_hashlist));
- cs->last_added_bo = NULL;
-
cs->request.number_of_ibs = 1;
- cs->request.ibs = &cs->ib[IB_MAIN];
+ cs->request.ibs = &cs->ib;
- cs->ib[IB_CONST].flags = AMDGPU_IB_FLAG_CE;
- cs->ib[IB_CONST_PREAMBLE].flags = AMDGPU_IB_FLAG_CE |
- AMDGPU_IB_FLAG_PREAMBLE;
+ cs->max_num_buffers = 512;
+ cs->buffers = (struct amdgpu_cs_buffer*)
+ CALLOC(1, cs->max_num_buffers * sizeof(struct amdgpu_cs_buffer));
+ if (!cs->buffers) {
+ return FALSE;
+ }
- return true;
+ cs->handles = CALLOC(1, cs->max_num_buffers * sizeof(amdgpu_bo_handle));
+ if (!cs->handles) {
+ FREE(cs->buffers);
+ return FALSE;
+ }
+
+ cs->flags = CALLOC(1, cs->max_num_buffers);
+ if (!cs->flags) {
+ FREE(cs->handles);
+ FREE(cs->buffers);
+ return FALSE;
+ }
+
+ for (i = 0; i < Elements(cs->buffer_indices_hashlist); i++) {
+ cs->buffer_indices_hashlist[i] = -1;
+ }
+ return TRUE;
}
-static void amdgpu_cs_context_cleanup(struct amdgpu_cs_context *cs)
+static void amdgpu_cs_context_cleanup(struct amdgpu_cs *cs)
{
unsigned i;
- for (i = 0; i < cs->num_real_buffers; i++) {
- p_atomic_dec(&cs->real_buffers[i].bo->num_cs_references);
- amdgpu_winsys_bo_reference(&cs->real_buffers[i].bo, NULL);
- }
- for (i = 0; i < cs->num_slab_buffers; i++) {
- p_atomic_dec(&cs->slab_buffers[i].bo->num_cs_references);
- amdgpu_winsys_bo_reference(&cs->slab_buffers[i].bo, NULL);
- }
- for (i = 0; i < cs->num_sparse_buffers; i++) {
- p_atomic_dec(&cs->sparse_buffers[i].bo->num_cs_references);
- amdgpu_winsys_bo_reference(&cs->sparse_buffers[i].bo, NULL);
+ for (i = 0; i < cs->num_buffers; i++) {
+ p_atomic_dec(&cs->buffers[i].bo->num_cs_references);
+ amdgpu_winsys_bo_reference(&cs->buffers[i].bo, NULL);
+ cs->handles[i] = NULL;
+ cs->flags[i] = 0;
}
- for (i = 0; i < cs->num_fence_dependencies; i++)
- amdgpu_fence_reference(&cs->fence_dependencies[i], NULL);
- cs->num_real_buffers = 0;
- cs->num_slab_buffers = 0;
- cs->num_sparse_buffers = 0;
- cs->num_fence_dependencies = 0;
- amdgpu_fence_reference(&cs->fence, NULL);
+ cs->num_buffers = 0;
+ cs->used_gart = 0;
+ cs->used_vram = 0;
- memset(cs->buffer_indices_hashlist, -1, sizeof(cs->buffer_indices_hashlist));
- cs->last_added_bo = NULL;
+ for (i = 0; i < Elements(cs->buffer_indices_hashlist); i++) {
+ cs->buffer_indices_hashlist[i] = -1;
+ }
}
-static void amdgpu_destroy_cs_context(struct amdgpu_cs_context *cs)
+static void amdgpu_destroy_cs_context(struct amdgpu_cs *cs)
{
amdgpu_cs_context_cleanup(cs);
FREE(cs->flags);
- FREE(cs->real_buffers);
+ FREE(cs->buffers);
FREE(cs->handles);
- FREE(cs->slab_buffers);
- FREE(cs->sparse_buffers);
- FREE(cs->fence_dependencies);
+ FREE(cs->request.dependencies);
}
@@ -777,7 +335,8 @@ amdgpu_cs_create(struct radeon_winsys_ctx *rwctx,
enum ring_type ring_type,
void (*flush)(void *ctx, unsigned flags,
struct pipe_fence_handle **fence),
- void *flush_ctx)
+ void *flush_ctx,
+ struct radeon_winsys_cs_handle *trace_buf)
{
struct amdgpu_ctx *ctx = (struct amdgpu_ctx*)rwctx;
struct amdgpu_cs *cs;
@@ -787,655 +346,338 @@ amdgpu_cs_create(struct radeon_winsys_ctx *rwctx,
return NULL;
}
- util_queue_fence_init(&cs->flush_completed);
-
cs->ctx = ctx;
cs->flush_cs = flush;
cs->flush_data = flush_ctx;
- cs->ring_type = ring_type;
+ cs->base.ring_type = ring_type;
- cs->main.ib_type = IB_MAIN;
- cs->const_ib.ib_type = IB_CONST;
- cs->const_preamble_ib.ib_type = IB_CONST_PREAMBLE;
-
- if (!amdgpu_init_cs_context(&cs->csc1, ring_type)) {
- FREE(cs);
- return NULL;
- }
-
- if (!amdgpu_init_cs_context(&cs->csc2, ring_type)) {
- amdgpu_destroy_cs_context(&cs->csc1);
+ if (!amdgpu_init_cs_context(cs, ring_type)) {
FREE(cs);
return NULL;
}
- /* Set the first submission context as current. */
- cs->csc = &cs->csc1;
- cs->cst = &cs->csc2;
-
- if (!amdgpu_get_new_ib(&ctx->ws->base, cs, IB_MAIN)) {
- amdgpu_destroy_cs_context(&cs->csc2);
- amdgpu_destroy_cs_context(&cs->csc1);
+ if (!amdgpu_get_new_ib(cs)) {
+ amdgpu_destroy_cs_context(cs);
FREE(cs);
return NULL;
}
p_atomic_inc(&ctx->ws->num_cs);
- return &cs->main.base;
+ return &cs->base;
}
-static struct radeon_winsys_cs *
-amdgpu_cs_add_const_ib(struct radeon_winsys_cs *rcs)
-{
- struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
- struct amdgpu_winsys *ws = cs->ctx->ws;
-
- /* only one const IB can be added */
- if (cs->ring_type != RING_GFX || cs->const_ib.ib_mapped)
- return NULL;
-
- if (!amdgpu_get_new_ib(&ws->base, cs, IB_CONST))
- return NULL;
+#define OUT_CS(cs, value) (cs)->buf[(cs)->cdw++] = (value)
- cs->csc->request.number_of_ibs = 2;
- cs->csc->request.ibs = &cs->csc->ib[IB_CONST];
-
- cs->cst->request.number_of_ibs = 2;
- cs->cst->request.ibs = &cs->cst->ib[IB_CONST];
-
- return &cs->const_ib.base;
-}
-
-static struct radeon_winsys_cs *
-amdgpu_cs_add_const_preamble_ib(struct radeon_winsys_cs *rcs)
+int amdgpu_get_reloc(struct amdgpu_cs *cs, struct amdgpu_winsys_bo *bo)
{
- struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
- struct amdgpu_winsys *ws = cs->ctx->ws;
-
- /* only one const preamble IB can be added and only when the const IB has
- * also been mapped */
- if (cs->ring_type != RING_GFX || !cs->const_ib.ib_mapped ||
- cs->const_preamble_ib.ib_mapped)
- return NULL;
-
- if (!amdgpu_get_new_ib(&ws->base, cs, IB_CONST_PREAMBLE))
- return NULL;
-
- cs->csc->request.number_of_ibs = 3;
- cs->csc->request.ibs = &cs->csc->ib[IB_CONST_PREAMBLE];
-
- cs->cst->request.number_of_ibs = 3;
- cs->cst->request.ibs = &cs->cst->ib[IB_CONST_PREAMBLE];
+ unsigned hash = bo->unique_id & (Elements(cs->buffer_indices_hashlist)-1);
+ int i = cs->buffer_indices_hashlist[hash];
- return &cs->const_preamble_ib.base;
-}
+ /* not found or found */
+ if (i == -1 || cs->buffers[i].bo == bo)
+ return i;
-static bool amdgpu_cs_validate(struct radeon_winsys_cs *rcs)
-{
- return true;
+ /* Hash collision, look for the BO in the list of relocs linearly. */
+ for (i = cs->num_buffers - 1; i >= 0; i--) {
+ if (cs->buffers[i].bo == bo) {
+ /* Put this reloc in the hash list.
+ * This will prevent additional hash collisions if there are
+ * several consecutive get_reloc calls for the same buffer.
+ *
+ * Example: Assuming buffers A,B,C collide in the hash list,
+ * the following sequence of relocs:
+ * AAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCC
+ * will collide here: ^ and here: ^,
+ * meaning that we should get very few collisions in the end. */
+ cs->buffer_indices_hashlist[hash] = i;
+ return i;
+ }
+ }
+ return -1;
}
-static bool amdgpu_cs_check_space(struct radeon_winsys_cs *rcs, unsigned dw)
+static unsigned amdgpu_add_reloc(struct amdgpu_cs *cs,
+ struct amdgpu_winsys_bo *bo,
+ enum radeon_bo_usage usage,
+ enum radeon_bo_domain domains,
+ unsigned priority,
+ enum radeon_bo_domain *added_domains)
{
- struct amdgpu_ib *ib = amdgpu_ib(rcs);
- struct amdgpu_cs *cs = amdgpu_cs_from_ib(ib);
- unsigned requested_size = rcs->prev_dw + rcs->current.cdw + dw;
- uint64_t va;
- uint32_t *new_ptr_ib_size;
+ struct amdgpu_cs_buffer *reloc;
+ unsigned hash = bo->unique_id & (Elements(cs->buffer_indices_hashlist)-1);
+ int i = -1;
- assert(rcs->current.cdw <= rcs->current.max_dw);
+ priority = MIN2(priority, 15);
+ *added_domains = 0;
- if (requested_size > amdgpu_ib_max_submit_dwords(ib->ib_type))
- return false;
+ i = amdgpu_get_reloc(cs, bo);
- ib->max_ib_size = MAX2(ib->max_ib_size, requested_size);
-
- if (rcs->current.max_dw - rcs->current.cdw >= dw)
- return true;
-
- if (!amdgpu_cs_has_chaining(cs))
- return false;
-
- /* Allocate a new chunk */
- if (rcs->num_prev >= rcs->max_prev) {
- unsigned new_max_prev = MAX2(1, 2 * rcs->max_prev);
- struct radeon_winsys_cs_chunk *new_prev;
-
- new_prev = REALLOC(rcs->prev,
- sizeof(*new_prev) * rcs->max_prev,
- sizeof(*new_prev) * new_max_prev);
- if (!new_prev)
- return false;
-
- rcs->prev = new_prev;
- rcs->max_prev = new_max_prev;
+ if (i >= 0) {
+ reloc = &cs->buffers[i];
+ reloc->usage |= usage;
+ *added_domains = domains & ~reloc->domains;
+ reloc->domains |= domains;
+ cs->flags[i] = MAX2(cs->flags[i], priority);
+ return i;
}
- if (!amdgpu_ib_new_buffer(cs->ctx->ws, ib))
- return false;
-
- assert(ib->used_ib_space == 0);
- va = amdgpu_winsys_bo(ib->big_ib_buffer)->va;
-
- /* This space was originally reserved. */
- rcs->current.max_dw += 4;
- assert(ib->used_ib_space + 4 * rcs->current.max_dw <= ib->big_ib_buffer->size);
+ /* New relocation, check if the backing array is large enough. */
+ if (cs->num_buffers >= cs->max_num_buffers) {
+ uint32_t size;
+ cs->max_num_buffers += 10;
- /* Pad with NOPs and add INDIRECT_BUFFER packet */
- while ((rcs->current.cdw & 7) != 4)
- radeon_emit(rcs, 0xffff1000); /* type3 nop packet */
+ size = cs->max_num_buffers * sizeof(struct amdgpu_cs_buffer);
+ cs->buffers = realloc(cs->buffers, size);
- radeon_emit(rcs, PKT3(ib->ib_type == IB_MAIN ? PKT3_INDIRECT_BUFFER_CIK
- : PKT3_INDIRECT_BUFFER_CONST, 2, 0));
- radeon_emit(rcs, va);
- radeon_emit(rcs, va >> 32);
- new_ptr_ib_size = &rcs->current.buf[rcs->current.cdw];
- radeon_emit(rcs, S_3F2_CHAIN(1) | S_3F2_VALID(1));
+ size = cs->max_num_buffers * sizeof(amdgpu_bo_handle);
+ cs->handles = realloc(cs->handles, size);
- assert((rcs->current.cdw & 7) == 0);
- assert(rcs->current.cdw <= rcs->current.max_dw);
-
- *ib->ptr_ib_size |= rcs->current.cdw;
- ib->ptr_ib_size = new_ptr_ib_size;
-
- /* Hook up the new chunk */
- rcs->prev[rcs->num_prev].buf = rcs->current.buf;
- rcs->prev[rcs->num_prev].cdw = rcs->current.cdw;
- rcs->prev[rcs->num_prev].max_dw = rcs->current.cdw; /* no modifications */
- rcs->num_prev++;
-
- ib->base.prev_dw += ib->base.current.cdw;
- ib->base.current.cdw = 0;
-
- ib->base.current.buf = (uint32_t*)(ib->ib_mapped + ib->used_ib_space);
- ib->base.current.max_dw = ib->big_ib_buffer->size / 4 - amdgpu_cs_epilog_dws(cs->ring_type);
-
- amdgpu_cs_add_buffer(&cs->main.base, ib->big_ib_buffer,
- RADEON_USAGE_READ, 0, RADEON_PRIO_IB1);
-
- return true;
-}
-
-static unsigned amdgpu_cs_get_buffer_list(struct radeon_winsys_cs *rcs,
- struct radeon_bo_list_item *list)
-{
- struct amdgpu_cs_context *cs = amdgpu_cs(rcs)->csc;
- int i;
-
- if (list) {
- for (i = 0; i < cs->num_real_buffers; i++) {
- list[i].bo_size = cs->real_buffers[i].bo->base.size;
- list[i].vm_address = cs->real_buffers[i].bo->va;
- list[i].priority_usage = cs->real_buffers[i].u.real.priority_usage;
- }
- }
- return cs->num_real_buffers;
-}
-
-DEBUG_GET_ONCE_BOOL_OPTION(all_bos, "RADEON_ALL_BOS", false)
-
-static void amdgpu_add_fence_dependency(struct amdgpu_cs *acs,
- struct amdgpu_cs_buffer *buffer)
-{
- struct amdgpu_cs_context *cs = acs->csc;
- struct amdgpu_winsys_bo *bo = buffer->bo;
- unsigned new_num_fences = 0;
-
- for (unsigned j = 0; j < bo->num_fences; ++j) {
- struct amdgpu_fence *bo_fence = (void *)bo->fences[j];
- unsigned idx;
-
- if (bo_fence->ctx == acs->ctx &&
- bo_fence->fence.ip_type == cs->request.ip_type &&
- bo_fence->fence.ip_instance == cs->request.ip_instance &&
- bo_fence->fence.ring == cs->request.ring)
- continue;
-
- if (amdgpu_fence_wait((void *)bo_fence, 0, false))
- continue;
-
- amdgpu_fence_reference(&bo->fences[new_num_fences], bo->fences[j]);
- new_num_fences++;
-
- if (!(buffer->usage & RADEON_USAGE_SYNCHRONIZED))
- continue;
-
- idx = cs->num_fence_dependencies++;
- if (idx >= cs->max_fence_dependencies) {
- unsigned size;
- const unsigned increment = 8;
-
- cs->max_fence_dependencies = idx + increment;
- size = cs->max_fence_dependencies * sizeof(cs->fence_dependencies[0]);
- cs->fence_dependencies = realloc(cs->fence_dependencies, size);
- /* Clear the newly-allocated elements. */
- memset(cs->fence_dependencies + idx, 0,
- increment * sizeof(cs->fence_dependencies[0]));
- }
-
- amdgpu_fence_reference(&cs->fence_dependencies[idx],
- (struct pipe_fence_handle*)bo_fence);
+ cs->flags = realloc(cs->flags, cs->max_num_buffers);
}
- for (unsigned j = new_num_fences; j < bo->num_fences; ++j)
- amdgpu_fence_reference(&bo->fences[j], NULL);
+ /* Initialize the new relocation. */
+ cs->buffers[cs->num_buffers].bo = NULL;
+ amdgpu_winsys_bo_reference(&cs->buffers[cs->num_buffers].bo, bo);
+ cs->handles[cs->num_buffers] = bo->bo;
+ cs->flags[cs->num_buffers] = priority;
+ p_atomic_inc(&bo->num_cs_references);
+ reloc = &cs->buffers[cs->num_buffers];
+ reloc->bo = bo;
+ reloc->usage = usage;
+ reloc->domains = domains;
+
+ cs->buffer_indices_hashlist[hash] = cs->num_buffers;
- bo->num_fences = new_num_fences;
+ *added_domains = domains;
+ return cs->num_buffers++;
}
-/* Add the given list of fences to the buffer's fence list.
- *
- * Must be called with the winsys bo_fence_lock held.
- */
-void amdgpu_add_fences(struct amdgpu_winsys_bo *bo,
- unsigned num_fences,
- struct pipe_fence_handle **fences)
+static unsigned amdgpu_cs_add_reloc(struct radeon_winsys_cs *rcs,
+ struct radeon_winsys_cs_handle *buf,
+ enum radeon_bo_usage usage,
+ enum radeon_bo_domain domains,
+ enum radeon_bo_priority priority)
{
- if (bo->num_fences + num_fences > bo->max_fences) {
- unsigned new_max_fences = MAX2(bo->num_fences + num_fences, bo->max_fences * 2);
- struct pipe_fence_handle **new_fences =
- REALLOC(bo->fences,
- bo->num_fences * sizeof(*new_fences),
- new_max_fences * sizeof(*new_fences));
- if (likely(new_fences)) {
- bo->fences = new_fences;
- bo->max_fences = new_max_fences;
- } else {
- unsigned drop;
-
- fprintf(stderr, "amdgpu_add_fences: allocation failure, dropping fence(s)\n");
- if (!bo->num_fences)
- return;
+ /* Don't use the "domains" parameter. Amdgpu doesn't support changing
+ * the buffer placement during command submission.
+ */
+ struct amdgpu_cs *cs = amdgpu_cs(rcs);
+ struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
+ enum radeon_bo_domain added_domains;
+ unsigned index = amdgpu_add_reloc(cs, bo, usage, bo->initial_domain,
+ priority, &added_domains);
- bo->num_fences--; /* prefer to keep the most recent fence if possible */
- amdgpu_fence_reference(&bo->fences[bo->num_fences], NULL);
+ if (added_domains & RADEON_DOMAIN_GTT)
+ cs->used_gart += bo->base.size;
+ if (added_domains & RADEON_DOMAIN_VRAM)
+ cs->used_vram += bo->base.size;
- drop = bo->num_fences + num_fences - bo->max_fences;
- num_fences -= drop;
- fences += drop;
- }
- }
-
- for (unsigned i = 0; i < num_fences; ++i) {
- bo->fences[bo->num_fences] = NULL;
- amdgpu_fence_reference(&bo->fences[bo->num_fences], fences[i]);
- bo->num_fences++;
- }
+ return index;
}
-static void amdgpu_add_fence_dependencies_list(struct amdgpu_cs *acs,
- struct pipe_fence_handle *fence,
- unsigned num_buffers,
- struct amdgpu_cs_buffer *buffers)
+static int amdgpu_cs_get_reloc(struct radeon_winsys_cs *rcs,
+ struct radeon_winsys_cs_handle *buf)
{
- for (unsigned i = 0; i < num_buffers; i++) {
- struct amdgpu_cs_buffer *buffer = &buffers[i];
- struct amdgpu_winsys_bo *bo = buffer->bo;
+ struct amdgpu_cs *cs = amdgpu_cs(rcs);
- amdgpu_add_fence_dependency(acs, buffer);
- p_atomic_inc(&bo->num_active_ioctls);
- amdgpu_add_fences(bo, 1, &fence);
- }
+ return amdgpu_get_reloc(cs, (struct amdgpu_winsys_bo*)buf);
}
-/* Since the kernel driver doesn't synchronize execution between different
- * rings automatically, we have to add fence dependencies manually.
- */
-static void amdgpu_add_fence_dependencies(struct amdgpu_cs *acs)
+static boolean amdgpu_cs_validate(struct radeon_winsys_cs *rcs)
{
- struct amdgpu_cs_context *cs = acs->csc;
-
- cs->num_fence_dependencies = 0;
-
- amdgpu_add_fence_dependencies_list(acs, cs->fence, cs->num_real_buffers, cs->real_buffers);
- amdgpu_add_fence_dependencies_list(acs, cs->fence, cs->num_slab_buffers, cs->slab_buffers);
- amdgpu_add_fence_dependencies_list(acs, cs->fence, cs->num_sparse_buffers, cs->sparse_buffers);
+ return TRUE;
}
-/* Add backing of sparse buffers to the buffer list.
- *
- * This is done late, during submission, to keep the buffer list short before
- * submit, and to avoid managing fences for the backing buffers.
- */
-static bool amdgpu_add_sparse_backing_buffers(struct amdgpu_cs_context *cs)
+static boolean amdgpu_cs_memory_below_limit(struct radeon_winsys_cs *rcs, uint64_t vram, uint64_t gtt)
{
- for (unsigned i = 0; i < cs->num_sparse_buffers; ++i) {
- struct amdgpu_cs_buffer *buffer = &cs->sparse_buffers[i];
- struct amdgpu_winsys_bo *bo = buffer->bo;
-
- mtx_lock(&bo->u.sparse.commit_lock);
-
- list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
- /* We can directly add the buffer here, because we know that each
- * backing buffer occurs only once.
- */
- int idx = amdgpu_do_add_real_buffer(cs, backing->bo);
- if (idx < 0) {
- fprintf(stderr, "%s: failed to add buffer\n", __FUNCTION__);
- mtx_unlock(&bo->u.sparse.commit_lock);
- return false;
- }
-
- cs->real_buffers[idx].usage = buffer->usage & ~RADEON_USAGE_SYNCHRONIZED;
- cs->real_buffers[idx].u.real.priority_usage = buffer->u.real.priority_usage;
- p_atomic_inc(&backing->bo->num_active_ioctls);
- }
-
- mtx_unlock(&bo->u.sparse.commit_lock);
- }
+ struct amdgpu_cs *cs = amdgpu_cs(rcs);
+ boolean status =
+ (cs->used_gart + gtt) < cs->ctx->ws->info.gart_size * 0.7 &&
+ (cs->used_vram + vram) < cs->ctx->ws->info.vram_size * 0.7;
- return true;
+ return status;
}
-void amdgpu_cs_submit_ib(void *job, int thread_index)
+static void amdgpu_cs_do_submission(struct amdgpu_cs *cs,
+ struct pipe_fence_handle **out_fence)
{
- struct amdgpu_cs *acs = (struct amdgpu_cs*)job;
- struct amdgpu_winsys *ws = acs->ctx->ws;
- struct amdgpu_cs_context *cs = acs->cst;
- int i, r;
- struct amdgpu_cs_fence *dependencies = NULL;
-
- /* Set dependencies (input fences). */
- if (cs->num_fence_dependencies) {
- dependencies = alloca(sizeof(dependencies[0]) *
- cs->num_fence_dependencies);
- unsigned num = 0;
-
- for (i = 0; i < cs->num_fence_dependencies; i++) {
- struct amdgpu_fence *fence =
- (struct amdgpu_fence*)cs->fence_dependencies[i];
-
- /* Past fences can't be unsubmitted because we have only 1 CS thread. */
- assert(!fence->submission_in_progress);
- memcpy(&dependencies[num++], &fence->fence, sizeof(dependencies[0]));
- }
- cs->request.dependencies = dependencies;
- cs->request.number_of_dependencies = num;
- } else {
- cs->request.dependencies = NULL;
- cs->request.number_of_dependencies = 0;
- }
-
- /* Set the output fence. */
- cs->request.fence_info.handle = NULL;
- if (amdgpu_cs_has_user_fence(cs)) {
- cs->request.fence_info.handle = acs->ctx->user_fence_bo;
- cs->request.fence_info.offset = acs->ring_type;
- }
-
- /* Create the buffer list.
- * Use a buffer list containing all allocated buffers if requested.
- */
- if (debug_get_option_all_bos()) {
- struct amdgpu_winsys_bo *bo;
- amdgpu_bo_handle *handles;
- unsigned num = 0;
-
- mtx_lock(&ws->global_bo_list_lock);
-
- handles = malloc(sizeof(handles[0]) * ws->num_buffers);
- if (!handles) {
- mtx_unlock(&ws->global_bo_list_lock);
- amdgpu_cs_context_cleanup(cs);
- cs->error_code = -ENOMEM;
- return;
- }
-
- LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, u.real.global_list_item) {
- assert(num < ws->num_buffers);
- handles[num++] = bo->bo;
- }
-
- r = amdgpu_bo_list_create(ws->dev, ws->num_buffers,
- handles, NULL,
- &cs->request.resources);
- free(handles);
- mtx_unlock(&ws->global_bo_list_lock);
- } else {
- if (!amdgpu_add_sparse_backing_buffers(cs)) {
- r = -ENOMEM;
- goto bo_list_error;
- }
-
- if (cs->max_real_submit < cs->num_real_buffers) {
- FREE(cs->handles);
- FREE(cs->flags);
-
- cs->handles = MALLOC(sizeof(*cs->handles) * cs->num_real_buffers);
- cs->flags = MALLOC(sizeof(*cs->flags) * cs->num_real_buffers);
+ struct amdgpu_winsys *ws = cs->ctx->ws;
+ struct pipe_fence_handle *fence;
+ int i, j, r;
- if (!cs->handles || !cs->flags) {
- cs->max_real_submit = 0;
- r = -ENOMEM;
- goto bo_list_error;
+ /* Create a fence. */
+ fence = amdgpu_fence_create(cs->ctx,
+ cs->request.ip_type,
+ cs->request.ip_instance,
+ cs->request.ring);
+ if (out_fence)
+ amdgpu_fence_reference(out_fence, fence);
+
+ cs->request.number_of_dependencies = 0;
+
+ /* Since the kernel driver doesn't synchronize execution between different
+ * rings automatically, we have to add fence dependencies manually. */
+ pipe_mutex_lock(ws->bo_fence_lock);
+ for (i = 0; i < cs->num_buffers; i++) {
+ for (j = 0; j < RING_LAST; j++) {
+ struct amdgpu_cs_fence *dep;
+ unsigned idx;
+
+ struct amdgpu_fence *bo_fence = (void *)cs->buffers[i].bo->fence[j];
+ if (!bo_fence)
+ continue;
+
+ if (bo_fence->ctx == cs->ctx &&
+ bo_fence->fence.ip_type == cs->request.ip_type &&
+ bo_fence->fence.ip_instance == cs->request.ip_instance &&
+ bo_fence->fence.ring == cs->request.ring)
+ continue;
+
+ if (amdgpu_fence_wait((void *)bo_fence, 0, false))
+ continue;
+
+ idx = cs->request.number_of_dependencies++;
+ if (idx >= cs->max_dependencies) {
+ unsigned size;
+
+ cs->max_dependencies = idx + 8;
+ size = cs->max_dependencies * sizeof(struct amdgpu_cs_fence);
+ cs->request.dependencies = realloc(cs->request.dependencies, size);
}
- }
- for (i = 0; i < cs->num_real_buffers; ++i) {
- struct amdgpu_cs_buffer *buffer = &cs->real_buffers[i];
-
- assert(buffer->u.real.priority_usage != 0);
-
- cs->handles[i] = buffer->bo->bo;
- cs->flags[i] = (util_last_bit64(buffer->u.real.priority_usage) - 1) / 4;
+ dep = &cs->request.dependencies[idx];
+ memcpy(dep, &bo_fence->fence, sizeof(*dep));
}
-
- r = amdgpu_bo_list_create(ws->dev, cs->num_real_buffers,
- cs->handles, cs->flags,
- &cs->request.resources);
}
-bo_list_error:
- if (r) {
- fprintf(stderr, "amdgpu: buffer list creation failed (%d)\n", r);
- cs->request.resources = NULL;
- amdgpu_fence_signalled(cs->fence);
- cs->error_code = r;
- goto cleanup;
+ cs->request.fence_info.handle = NULL;
+ if (cs->request.ip_type != AMDGPU_HW_IP_UVD && cs->request.ip_type != AMDGPU_HW_IP_VCE) {
+ cs->request.fence_info.handle = cs->ctx->user_fence_bo;
+ cs->request.fence_info.offset = cs->base.ring_type;
}
- if (acs->ctx->num_rejected_cs)
- r = -ECANCELED;
- else
- r = amdgpu_cs_submit(acs->ctx->ctx, 0, &cs->request, 1);
-
- cs->error_code = r;
+ r = amdgpu_cs_submit(cs->ctx->ctx, 0, &cs->request, 1);
if (r) {
if (r == -ENOMEM)
fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
- else if (r == -ECANCELED)
- fprintf(stderr, "amdgpu: The CS has been cancelled because the context is lost.\n");
else
fprintf(stderr, "amdgpu: The CS has been rejected, "
- "see dmesg for more information (%i).\n", r);
-
- amdgpu_fence_signalled(cs->fence);
+ "see dmesg for more information.\n");
- acs->ctx->num_rejected_cs++;
- ws->num_total_rejected_cs++;
+ amdgpu_fence_signalled(fence);
} else {
/* Success. */
uint64_t *user_fence = NULL;
- if (amdgpu_cs_has_user_fence(cs))
- user_fence = acs->ctx->user_fence_cpu_address_base +
+ if (cs->request.ip_type != AMDGPU_HW_IP_UVD && cs->request.ip_type != AMDGPU_HW_IP_VCE)
+ user_fence = cs->ctx->user_fence_cpu_address_base +
cs->request.fence_info.offset;
- amdgpu_fence_submitted(cs->fence, &cs->request, user_fence);
- }
-
- /* Cleanup. */
- if (cs->request.resources)
- amdgpu_bo_list_destroy(cs->request.resources);
+ amdgpu_fence_submitted(fence, &cs->request, user_fence);
-cleanup:
- for (i = 0; i < cs->num_real_buffers; i++)
- p_atomic_dec(&cs->real_buffers[i].bo->num_active_ioctls);
- for (i = 0; i < cs->num_slab_buffers; i++)
- p_atomic_dec(&cs->slab_buffers[i].bo->num_active_ioctls);
- for (i = 0; i < cs->num_sparse_buffers; i++)
- p_atomic_dec(&cs->sparse_buffers[i].bo->num_active_ioctls);
-
- amdgpu_cs_context_cleanup(cs);
+ for (i = 0; i < cs->num_buffers; i++)
+ amdgpu_fence_reference(&cs->buffers[i].bo->fence[cs->base.ring_type],
+ fence);
+ }
+ pipe_mutex_unlock(ws->bo_fence_lock);
+ amdgpu_fence_reference(&fence, NULL);
}
-/* Make sure the previous submission is completed. */
-void amdgpu_cs_sync_flush(struct radeon_winsys_cs *rcs)
+static void amdgpu_cs_sync_flush(struct radeon_winsys_cs *rcs)
{
- struct amdgpu_cs *cs = amdgpu_cs(rcs);
-
- /* Wait for any pending ioctl of this CS to complete. */
- util_queue_fence_wait(&cs->flush_completed);
+ /* no-op */
}
-static int amdgpu_cs_flush(struct radeon_winsys_cs *rcs,
- unsigned flags,
- struct pipe_fence_handle **fence)
+DEBUG_GET_ONCE_BOOL_OPTION(noop, "RADEON_NOOP", FALSE)
+
+static void amdgpu_cs_flush(struct radeon_winsys_cs *rcs,
+ unsigned flags,
+ struct pipe_fence_handle **fence,
+ uint32_t cs_trace_id)
{
struct amdgpu_cs *cs = amdgpu_cs(rcs);
struct amdgpu_winsys *ws = cs->ctx->ws;
- int error_code = 0;
-
- rcs->current.max_dw += amdgpu_cs_epilog_dws(cs->ring_type);
- switch (cs->ring_type) {
+ switch (cs->base.ring_type) {
case RING_DMA:
/* pad DMA ring to 8 DWs */
if (ws->info.chip_class <= SI) {
- while (rcs->current.cdw & 7)
- radeon_emit(rcs, 0xf0000000); /* NOP packet */
+ while (rcs->cdw & 7)
+ OUT_CS(&cs->base, 0xf0000000); /* NOP packet */
} else {
- while (rcs->current.cdw & 7)
- radeon_emit(rcs, 0x00000000); /* NOP packet */
+ while (rcs->cdw & 7)
+ OUT_CS(&cs->base, 0x00000000); /* NOP packet */
}
break;
case RING_GFX:
- /* pad GFX ring to 8 DWs to meet CP fetch alignment requirements */
- if (ws->info.gfx_ib_pad_with_type2) {
- while (rcs->current.cdw & 7)
- radeon_emit(rcs, 0x80000000); /* type2 nop packet */
+ /* pad DMA ring to 8 DWs to meet CP fetch alignment requirements
+ * r6xx, requires at least 4 dw alignment to avoid a hw bug.
+ */
+ if (ws->info.chip_class <= SI) {
+ while (rcs->cdw & 7)
+ OUT_CS(&cs->base, 0x80000000); /* type2 nop packet */
} else {
- while (rcs->current.cdw & 7)
- radeon_emit(rcs, 0xffff1000); /* type3 nop packet */
+ while (rcs->cdw & 7)
+ OUT_CS(&cs->base, 0xffff1000); /* type3 nop packet */
}
-
- /* Also pad the const IB. */
- if (cs->const_ib.ib_mapped)
- while (!cs->const_ib.base.current.cdw || (cs->const_ib.base.current.cdw & 7))
- radeon_emit(&cs->const_ib.base, 0xffff1000); /* type3 nop packet */
-
- if (cs->const_preamble_ib.ib_mapped)
- while (!cs->const_preamble_ib.base.current.cdw || (cs->const_preamble_ib.base.current.cdw & 7))
- radeon_emit(&cs->const_preamble_ib.base, 0xffff1000);
break;
case RING_UVD:
- while (rcs->current.cdw & 15)
- radeon_emit(rcs, 0x80000000); /* type2 nop packet */
+ while (rcs->cdw & 15)
+ OUT_CS(&cs->base, 0x80000000); /* type2 nop packet */
break;
default:
break;
}
- if (rcs->current.cdw > rcs->current.max_dw) {
+ if (rcs->cdw > rcs->max_dw) {
fprintf(stderr, "amdgpu: command stream overflowed\n");
}
+ amdgpu_cs_add_reloc(rcs, (void*)cs->big_ib_winsys_buffer,
+ RADEON_USAGE_READ, 0, RADEON_PRIO_MIN);
+
/* If the CS is not empty or overflowed.... */
- if (likely(radeon_emitted(&cs->main.base, 0) &&
- cs->main.base.current.cdw <= cs->main.base.current.max_dw &&
- !debug_get_option_noop())) {
- struct amdgpu_cs_context *cur = cs->csc;
-
- /* Set IB sizes. */
- amdgpu_ib_finalize(&cs->main);
-
- if (cs->const_ib.ib_mapped)
- amdgpu_ib_finalize(&cs->const_ib);
-
- if (cs->const_preamble_ib.ib_mapped)
- amdgpu_ib_finalize(&cs->const_preamble_ib);
-
- /* Create a fence. */
- amdgpu_fence_reference(&cur->fence, NULL);
- if (cs->next_fence) {
- /* just move the reference */
- cur->fence = cs->next_fence;
- cs->next_fence = NULL;
- } else {
- cur->fence = amdgpu_fence_create(cs->ctx,
- cur->request.ip_type,
- cur->request.ip_instance,
- cur->request.ring);
- }
- if (fence)
- amdgpu_fence_reference(fence, cur->fence);
-
- amdgpu_cs_sync_flush(rcs);
-
- /* Prepare buffers.
- *
- * This fence must be held until the submission is queued to ensure
- * that the order of fence dependency updates matches the order of
- * submissions.
- */
- mtx_lock(&ws->bo_fence_lock);
- amdgpu_add_fence_dependencies(cs);
-
- /* Swap command streams. "cst" is going to be submitted. */
- cs->csc = cs->cst;
- cs->cst = cur;
-
- /* Submit. */
- util_queue_add_job(&ws->cs_queue, cs, &cs->flush_completed,
- amdgpu_cs_submit_ib, NULL);
- /* The submission has been queued, unlock the fence now. */
- mtx_unlock(&ws->bo_fence_lock);
-
- if (!(flags & RADEON_FLUSH_ASYNC)) {
- amdgpu_cs_sync_flush(rcs);
- error_code = cur->error_code;
+ if (cs->base.cdw && cs->base.cdw <= cs->base.max_dw && !debug_get_option_noop()) {
+ int r;
+
+ r = amdgpu_bo_list_create(ws->dev, cs->num_buffers,
+ cs->handles, cs->flags,
+ &cs->request.resources);
+
+ if (r) {
+ fprintf(stderr, "amdgpu: resource list creation failed (%d)\n", r);
+ cs->request.resources = NULL;
+ goto cleanup;
}
- } else {
- amdgpu_cs_context_cleanup(cs->csc);
- }
- amdgpu_get_new_ib(&ws->base, cs, IB_MAIN);
- if (cs->const_ib.ib_mapped)
- amdgpu_get_new_ib(&ws->base, cs, IB_CONST);
- if (cs->const_preamble_ib.ib_mapped)
- amdgpu_get_new_ib(&ws->base, cs, IB_CONST_PREAMBLE);
+ cs->ib.size = cs->base.cdw;
+ cs->used_ib_space += cs->base.cdw * 4;
+
+ amdgpu_cs_do_submission(cs, fence);
- cs->main.base.used_gart = 0;
- cs->main.base.used_vram = 0;
+ /* Cleanup. */
+ if (cs->request.resources)
+ amdgpu_bo_list_destroy(cs->request.resources);
+ }
- if (cs->ring_type == RING_GFX)
- ws->num_gfx_IBs++;
- else if (cs->ring_type == RING_DMA)
- ws->num_sdma_IBs++;
+cleanup:
+ amdgpu_cs_context_cleanup(cs);
+ amdgpu_get_new_ib(cs);
- return error_code;
+ ws->num_cs_flushes++;
}
static void amdgpu_cs_destroy(struct radeon_winsys_cs *rcs)
{
struct amdgpu_cs *cs = amdgpu_cs(rcs);
- amdgpu_cs_sync_flush(rcs);
- util_queue_fence_destroy(&cs->flush_completed);
+ amdgpu_destroy_cs_context(cs);
p_atomic_dec(&cs->ctx->ws->num_cs);
- pb_reference(&cs->main.big_ib_buffer, NULL);
- FREE(cs->main.base.prev);
- pb_reference(&cs->const_ib.big_ib_buffer, NULL);
- FREE(cs->const_ib.base.prev);
- pb_reference(&cs->const_preamble_ib.big_ib_buffer, NULL);
- FREE(cs->const_preamble_ib.base.prev);
- amdgpu_destroy_cs_context(&cs->csc1);
- amdgpu_destroy_cs_context(&cs->csc2);
- amdgpu_fence_reference(&cs->next_fence, NULL);
+ pb_reference(&cs->big_ib_buffer, NULL);
FREE(cs);
}
-static bool amdgpu_bo_is_referenced(struct radeon_winsys_cs *rcs,
- struct pb_buffer *_buf,
- enum radeon_bo_usage usage)
+static boolean amdgpu_bo_is_referenced(struct radeon_winsys_cs *rcs,
+ struct radeon_winsys_cs_handle *_buf,
+ enum radeon_bo_usage usage)
{
struct amdgpu_cs *cs = amdgpu_cs(rcs);
struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)_buf;
@@ -1449,15 +691,12 @@ void amdgpu_cs_init_functions(struct amdgpu_winsys *ws)
ws->base.ctx_destroy = amdgpu_ctx_destroy;
ws->base.ctx_query_reset_status = amdgpu_ctx_query_reset_status;
ws->base.cs_create = amdgpu_cs_create;
- ws->base.cs_add_const_ib = amdgpu_cs_add_const_ib;
- ws->base.cs_add_const_preamble_ib = amdgpu_cs_add_const_preamble_ib;
ws->base.cs_destroy = amdgpu_cs_destroy;
- ws->base.cs_add_buffer = amdgpu_cs_add_buffer;
+ ws->base.cs_add_reloc = amdgpu_cs_add_reloc;
+ ws->base.cs_get_reloc = amdgpu_cs_get_reloc;
ws->base.cs_validate = amdgpu_cs_validate;
- ws->base.cs_check_space = amdgpu_cs_check_space;
- ws->base.cs_get_buffer_list = amdgpu_cs_get_buffer_list;
+ ws->base.cs_memory_below_limit = amdgpu_cs_memory_below_limit;
ws->base.cs_flush = amdgpu_cs_flush;
- ws->base.cs_get_next_fence = amdgpu_cs_get_next_fence;
ws->base.cs_is_buffer_referenced = amdgpu_bo_is_referenced;
ws->base.cs_sync_flush = amdgpu_cs_sync_flush;
ws->base.fence_wait = amdgpu_fence_wait_rel_timeout;
diff --git a/lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_cs.h b/lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_cs.h
index d83c1e0fe..12c6b624b 100644
--- a/lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_cs.h
+++ b/lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_cs.h
@@ -41,103 +41,46 @@ struct amdgpu_ctx {
amdgpu_bo_handle user_fence_bo;
uint64_t *user_fence_cpu_address_base;
int refcount;
- unsigned initial_num_total_rejected_cs;
- unsigned num_rejected_cs;
};
struct amdgpu_cs_buffer {
struct amdgpu_winsys_bo *bo;
- union {
- struct {
- uint64_t priority_usage;
- } real;
- struct {
- uint32_t real_idx; /* index of underlying real BO */
- } slab;
- } u;
enum radeon_bo_usage usage;
+ enum radeon_bo_domain domains;
};
-enum ib_type {
- IB_CONST_PREAMBLE = 0,
- IB_CONST = 1, /* the const IB must be first */
- IB_MAIN = 2,
- IB_NUM
-};
-struct amdgpu_ib {
+struct amdgpu_cs {
struct radeon_winsys_cs base;
+ struct amdgpu_ctx *ctx;
+
+ /* Flush CS. */
+ void (*flush_cs)(void *ctx, unsigned flags, struct pipe_fence_handle **fence);
+ void *flush_data;
/* A buffer out of which new IBs are allocated. */
- struct pb_buffer *big_ib_buffer;
- uint8_t *ib_mapped;
- unsigned used_ib_space;
- unsigned max_ib_size;
- uint32_t *ptr_ib_size;
- enum ib_type ib_type;
-};
+ struct pb_buffer *big_ib_buffer; /* for holding the reference */
+ struct amdgpu_winsys_bo *big_ib_winsys_buffer;
+ uint8_t *ib_mapped;
+ unsigned used_ib_space;
-struct amdgpu_cs_context {
+ /* amdgpu_cs_submit parameters */
struct amdgpu_cs_request request;
- struct amdgpu_cs_ib_info ib[IB_NUM];
-
- /* Buffers. */
- unsigned max_real_buffers;
- unsigned num_real_buffers;
- struct amdgpu_cs_buffer *real_buffers;
+ struct amdgpu_cs_ib_info ib;
- unsigned max_real_submit;
+ /* Relocs. */
+ unsigned max_num_buffers;
+ unsigned num_buffers;
amdgpu_bo_handle *handles;
uint8_t *flags;
+ struct amdgpu_cs_buffer *buffers;
- unsigned num_slab_buffers;
- unsigned max_slab_buffers;
- struct amdgpu_cs_buffer *slab_buffers;
+ int buffer_indices_hashlist[512];
- unsigned num_sparse_buffers;
- unsigned max_sparse_buffers;
- struct amdgpu_cs_buffer *sparse_buffers;
+ uint64_t used_vram;
+ uint64_t used_gart;
- int buffer_indices_hashlist[4096];
-
- struct amdgpu_winsys_bo *last_added_bo;
- unsigned last_added_bo_index;
- unsigned last_added_bo_usage;
- uint64_t last_added_bo_priority_usage;
-
- struct pipe_fence_handle **fence_dependencies;
- unsigned num_fence_dependencies;
- unsigned max_fence_dependencies;
-
- struct pipe_fence_handle *fence;
-
- /* the error returned from cs_flush for non-async submissions */
- int error_code;
-};
-
-struct amdgpu_cs {
- struct amdgpu_ib main; /* must be first because this is inherited */
- struct amdgpu_ib const_ib; /* optional constant engine IB */
- struct amdgpu_ib const_preamble_ib;
- struct amdgpu_ctx *ctx;
- enum ring_type ring_type;
-
- /* We flip between these two CS. While one is being consumed
- * by the kernel in another thread, the other one is being filled
- * by the pipe driver. */
- struct amdgpu_cs_context csc1;
- struct amdgpu_cs_context csc2;
- /* The currently-used CS. */
- struct amdgpu_cs_context *csc;
- /* The CS being currently-owned by the other thread. */
- struct amdgpu_cs_context *cst;
-
- /* Flush CS. */
- void (*flush_cs)(void *ctx, unsigned flags, struct pipe_fence_handle **fence);
- void *flush_data;
-
- struct util_queue_fence flush_completed;
- struct pipe_fence_handle *next_fence;
+ unsigned max_dependencies;
};
struct amdgpu_fence {
@@ -147,9 +90,6 @@ struct amdgpu_fence {
struct amdgpu_cs_fence fence;
uint64_t *user_fence_cpu_address;
- /* If the fence is unknown due to an IB still being submitted
- * in the other thread. */
- volatile int submission_in_progress; /* bool (int for atomicity) */
volatile int signalled; /* bool (int for atomicity) */
};
@@ -175,71 +115,41 @@ static inline void amdgpu_fence_reference(struct pipe_fence_handle **dst,
*rdst = rsrc;
}
-int amdgpu_lookup_buffer(struct amdgpu_cs_context *cs, struct amdgpu_winsys_bo *bo);
-
-static inline struct amdgpu_ib *
-amdgpu_ib(struct radeon_winsys_cs *base)
-{
- return (struct amdgpu_ib *)base;
-}
+int amdgpu_get_reloc(struct amdgpu_cs *csc, struct amdgpu_winsys_bo *bo);
static inline struct amdgpu_cs *
amdgpu_cs(struct radeon_winsys_cs *base)
{
- assert(amdgpu_ib(base)->ib_type == IB_MAIN);
return (struct amdgpu_cs*)base;
}
-#define get_container(member_ptr, container_type, container_member) \
- (container_type *)((char *)(member_ptr) - offsetof(container_type, container_member))
-
-static inline struct amdgpu_cs *
-amdgpu_cs_from_ib(struct amdgpu_ib *ib)
-{
- switch (ib->ib_type) {
- case IB_MAIN:
- return get_container(ib, struct amdgpu_cs, main);
- case IB_CONST:
- return get_container(ib, struct amdgpu_cs, const_ib);
- case IB_CONST_PREAMBLE:
- return get_container(ib, struct amdgpu_cs, const_preamble_ib);
- default:
- unreachable("bad ib_type");
- }
-}
-
-static inline bool
+static inline boolean
amdgpu_bo_is_referenced_by_cs(struct amdgpu_cs *cs,
struct amdgpu_winsys_bo *bo)
{
int num_refs = bo->num_cs_references;
- return num_refs == bo->ws->num_cs ||
- (num_refs && amdgpu_lookup_buffer(cs->csc, bo) != -1);
+ return num_refs == bo->rws->num_cs ||
+ (num_refs && amdgpu_get_reloc(cs, bo) != -1);
}
-static inline bool
+static inline boolean
amdgpu_bo_is_referenced_by_cs_with_usage(struct amdgpu_cs *cs,
struct amdgpu_winsys_bo *bo,
enum radeon_bo_usage usage)
{
int index;
- struct amdgpu_cs_buffer *buffer;
if (!bo->num_cs_references)
- return false;
+ return FALSE;
- index = amdgpu_lookup_buffer(cs->csc, bo);
+ index = amdgpu_get_reloc(cs, bo);
if (index == -1)
- return false;
-
- buffer = bo->bo ? &cs->csc->real_buffers[index] :
- bo->sparse ? &cs->csc->sparse_buffers[index] :
- &cs->csc->slab_buffers[index];
+ return FALSE;
- return (buffer->usage & usage) != 0;
+ return (cs->buffers[index].usage & usage) != 0;
}
-static inline bool
+static inline boolean
amdgpu_bo_is_referenced_by_any_cs(struct amdgpu_winsys_bo *bo)
{
return bo->num_cs_references != 0;
@@ -247,11 +157,6 @@ amdgpu_bo_is_referenced_by_any_cs(struct amdgpu_winsys_bo *bo)
bool amdgpu_fence_wait(struct pipe_fence_handle *fence, uint64_t timeout,
bool absolute);
-void amdgpu_add_fences(struct amdgpu_winsys_bo *bo,
- unsigned num_fences,
- struct pipe_fence_handle **fences);
-void amdgpu_cs_sync_flush(struct radeon_winsys_cs *rcs);
void amdgpu_cs_init_functions(struct amdgpu_winsys *ws);
-void amdgpu_cs_submit_ib(void *job, int thread_index);
#endif
diff --git a/lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_surface.c b/lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_surface.c
index 4d532e397..358df3810 100644
--- a/lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_surface.c
+++ b/lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_surface.c
@@ -30,25 +30,36 @@
*/
#include "amdgpu_winsys.h"
-#include "util/u_format.h"
+
+#ifndef NO_ENTRIES
+#define NO_ENTRIES 32
+#endif
+
+#ifndef NO_MACRO_ENTRIES
+#define NO_MACRO_ENTRIES 16
+#endif
#ifndef CIASICIDGFXENGINE_SOUTHERNISLAND
#define CIASICIDGFXENGINE_SOUTHERNISLAND 0x0000000A
#endif
-#ifndef CIASICIDGFXENGINE_ARCTICISLAND
-#define CIASICIDGFXENGINE_ARCTICISLAND 0x0000000D
-#endif
-static int amdgpu_surface_sanity(const struct pipe_resource *tex)
+static int amdgpu_surface_sanity(const struct radeon_surf *surf)
{
+ unsigned type = RADEON_SURF_GET(surf->flags, TYPE);
+
+ if (!(surf->flags & RADEON_SURF_HAS_TILE_MODE_INDEX))
+ return -EINVAL;
+
/* all dimension must be at least 1 ! */
- if (!tex->width0 || !tex->height0 || !tex->depth0 ||
- !tex->array_size)
+ if (!surf->npix_x || !surf->npix_y || !surf->npix_z ||
+ !surf->array_size)
return -EINVAL;
- switch (tex->nr_samples) {
- case 0:
+ if (!surf->blk_w || !surf->blk_h || !surf->blk_d)
+ return -EINVAL;
+
+ switch (surf->nsamples) {
case 1:
case 2:
case 4:
@@ -58,28 +69,26 @@ static int amdgpu_surface_sanity(const struct pipe_resource *tex)
return -EINVAL;
}
- switch (tex->target) {
- case PIPE_TEXTURE_1D:
- if (tex->height0 > 1)
+ switch (type) {
+ case RADEON_SURF_TYPE_1D:
+ if (surf->npix_y > 1)
return -EINVAL;
/* fall through */
- case PIPE_TEXTURE_2D:
- case PIPE_TEXTURE_RECT:
- if (tex->depth0 > 1 || tex->array_size > 1)
+ case RADEON_SURF_TYPE_2D:
+ case RADEON_SURF_TYPE_CUBEMAP:
+ if (surf->npix_z > 1 || surf->array_size > 1)
return -EINVAL;
break;
- case PIPE_TEXTURE_3D:
- if (tex->array_size > 1)
+ case RADEON_SURF_TYPE_3D:
+ if (surf->array_size > 1)
return -EINVAL;
break;
- case PIPE_TEXTURE_1D_ARRAY:
- if (tex->height0 > 1)
+ case RADEON_SURF_TYPE_1D_ARRAY:
+ if (surf->npix_y > 1)
return -EINVAL;
/* fall through */
- case PIPE_TEXTURE_CUBE:
- case PIPE_TEXTURE_2D_ARRAY:
- case PIPE_TEXTURE_CUBE_ARRAY:
- if (tex->depth0 > 1)
+ case RADEON_SURF_TYPE_2D_ARRAY:
+ if (surf->npix_z > 1)
return -EINVAL;
break;
default:
@@ -99,6 +108,26 @@ static ADDR_E_RETURNCODE ADDR_API freeSysMem(const ADDR_FREESYSMEM_INPUT * pInpu
return ADDR_OK;
}
+/**
+ * This returns the number of banks for the surface.
+ * Possible values: 2, 4, 8, 16.
+ */
+static uint32_t cik_num_banks(struct amdgpu_winsys *ws,
+ struct radeon_surf *surf)
+{
+ unsigned index, tileb;
+
+ tileb = 8 * 8 * surf->bpe;
+ tileb = MIN2(surf->tile_split, tileb);
+
+ for (index = 0; tileb > 64; index++) {
+ tileb >>= 1;
+ }
+ assert(index < 16);
+
+ return 2 << ((ws->amdinfo.gb_macro_tile_mode[index] >> 6) & 0x3);
+}
+
ADDR_HANDLE amdgpu_addr_create(struct amdgpu_winsys *ws)
{
ADDR_CREATE_INPUT addrCreateInput = {0};
@@ -110,41 +139,29 @@ ADDR_HANDLE amdgpu_addr_create(struct amdgpu_winsys *ws)
addrCreateInput.size = sizeof(ADDR_CREATE_INPUT);
addrCreateOutput.size = sizeof(ADDR_CREATE_OUTPUT);
+ regValue.noOfBanks = ws->amdinfo.mc_arb_ramcfg & 0x3;
regValue.gbAddrConfig = ws->amdinfo.gb_addr_cfg;
- createFlags.value = 0;
-
- if (ws->info.chip_class >= GFX9) {
- addrCreateInput.chipEngine = CIASICIDGFXENGINE_ARCTICISLAND;
- regValue.blockVarSizeLog2 = 0;
- } else {
- regValue.noOfBanks = ws->amdinfo.mc_arb_ramcfg & 0x3;
- regValue.noOfRanks = (ws->amdinfo.mc_arb_ramcfg & 0x4) >> 2;
-
- regValue.backendDisables = ws->amdinfo.enabled_rb_pipes_mask;
- regValue.pTileConfig = ws->amdinfo.gb_tile_mode;
- regValue.noOfEntries = ARRAY_SIZE(ws->amdinfo.gb_tile_mode);
- if (ws->info.chip_class == SI) {
- regValue.pMacroTileConfig = NULL;
- regValue.noOfMacroEntries = 0;
- } else {
- regValue.pMacroTileConfig = ws->amdinfo.gb_macro_tile_mode;
- regValue.noOfMacroEntries = ARRAY_SIZE(ws->amdinfo.gb_macro_tile_mode);
- }
+ regValue.noOfRanks = (ws->amdinfo.mc_arb_ramcfg & 0x4) >> 2;
- createFlags.useTileIndex = 1;
- createFlags.useHtileSliceAlign = 1;
+ regValue.backendDisables = ws->amdinfo.backend_disable[0];
+ regValue.pTileConfig = ws->amdinfo.gb_tile_mode;
+ regValue.noOfEntries = sizeof(ws->amdinfo.gb_tile_mode) /
+ sizeof(ws->amdinfo.gb_tile_mode[0]);
+ regValue.pMacroTileConfig = ws->amdinfo.gb_macro_tile_mode;
+ regValue.noOfMacroEntries = sizeof(ws->amdinfo.gb_macro_tile_mode) /
+ sizeof(ws->amdinfo.gb_macro_tile_mode[0]);
- addrCreateInput.chipEngine = CIASICIDGFXENGINE_SOUTHERNISLAND;
- addrCreateInput.chipFamily = ws->family;
- addrCreateInput.chipRevision = ws->rev_id;
- }
+ createFlags.value = 0;
+ createFlags.useTileIndex = 1;
+ createFlags.degradeBaseLevel = 1;
+ addrCreateInput.chipEngine = CIASICIDGFXENGINE_SOUTHERNISLAND;
addrCreateInput.chipFamily = ws->family;
addrCreateInput.chipRevision = ws->rev_id;
+ addrCreateInput.createFlags = createFlags;
addrCreateInput.callbacks.allocSysMem = allocSysMem;
addrCreateInput.callbacks.freeSysMem = freeSysMem;
addrCreateInput.callbacks.debugPrint = 0;
- addrCreateInput.createFlags = createFlags;
addrCreateInput.regValue = regValue;
addrRet = AddrCreate(&addrCreateInput, &addrCreateOutput);
@@ -154,38 +171,33 @@ ADDR_HANDLE amdgpu_addr_create(struct amdgpu_winsys *ws)
return addrCreateOutput.hLib;
}
-static int gfx6_compute_level(struct amdgpu_winsys *ws,
- const struct pipe_resource *tex,
- struct radeon_surf *surf, bool is_stencil,
- unsigned level, bool compressed,
- ADDR_COMPUTE_SURFACE_INFO_INPUT *AddrSurfInfoIn,
- ADDR_COMPUTE_SURFACE_INFO_OUTPUT *AddrSurfInfoOut,
- ADDR_COMPUTE_DCCINFO_INPUT *AddrDccIn,
- ADDR_COMPUTE_DCCINFO_OUTPUT *AddrDccOut,
- ADDR_COMPUTE_HTILE_INFO_INPUT *AddrHtileIn,
- ADDR_COMPUTE_HTILE_INFO_OUTPUT *AddrHtileOut)
+static int compute_level(struct amdgpu_winsys *ws,
+ struct radeon_surf *surf, bool is_stencil,
+ unsigned level, unsigned type, bool compressed,
+ ADDR_COMPUTE_SURFACE_INFO_INPUT *AddrSurfInfoIn,
+ ADDR_COMPUTE_SURFACE_INFO_OUTPUT *AddrSurfInfoOut)
{
- struct legacy_surf_level *surf_level;
+ struct radeon_surf_level *surf_level;
ADDR_E_RETURNCODE ret;
AddrSurfInfoIn->mipLevel = level;
- AddrSurfInfoIn->width = u_minify(tex->width0, level);
- AddrSurfInfoIn->height = u_minify(tex->height0, level);
+ AddrSurfInfoIn->width = u_minify(surf->npix_x, level);
+ AddrSurfInfoIn->height = u_minify(surf->npix_y, level);
- if (tex->target == PIPE_TEXTURE_3D)
- AddrSurfInfoIn->numSlices = u_minify(tex->depth0, level);
- else if (tex->target == PIPE_TEXTURE_CUBE)
+ if (type == RADEON_SURF_TYPE_3D)
+ AddrSurfInfoIn->numSlices = u_minify(surf->npix_z, level);
+ else if (type == RADEON_SURF_TYPE_CUBEMAP)
AddrSurfInfoIn->numSlices = 6;
else
- AddrSurfInfoIn->numSlices = tex->array_size;
+ AddrSurfInfoIn->numSlices = surf->array_size;
if (level > 0) {
/* Set the base level pitch. This is needed for calculation
* of non-zero levels. */
if (is_stencil)
- AddrSurfInfoIn->basePitch = surf->u.legacy.stencil_level[0].nblk_x;
+ AddrSurfInfoIn->basePitch = surf->stencil_level[0].nblk_x;
else
- AddrSurfInfoIn->basePitch = surf->u.legacy.level[0].nblk_x;
+ AddrSurfInfoIn->basePitch = surf->level[0].nblk_x;
/* Convert blocks to pixels for compressed formats. */
if (compressed)
@@ -199,13 +211,24 @@ static int gfx6_compute_level(struct amdgpu_winsys *ws,
return ret;
}
- surf_level = is_stencil ? &surf->u.legacy.stencil_level[level] : &surf->u.legacy.level[level];
- surf_level->offset = align64(surf->surf_size, AddrSurfInfoOut->baseAlign);
+ surf_level = is_stencil ? &surf->stencil_level[level] : &surf->level[level];
+ surf_level->offset = align(surf->bo_size, AddrSurfInfoOut->baseAlign);
surf_level->slice_size = AddrSurfInfoOut->sliceSize;
+ surf_level->pitch_bytes = AddrSurfInfoOut->pitch * (is_stencil ? 1 : surf->bpe);
+ surf_level->npix_x = u_minify(surf->npix_x, level);
+ surf_level->npix_y = u_minify(surf->npix_y, level);
+ surf_level->npix_z = u_minify(surf->npix_z, level);
surf_level->nblk_x = AddrSurfInfoOut->pitch;
surf_level->nblk_y = AddrSurfInfoOut->height;
+ if (type == RADEON_SURF_TYPE_3D)
+ surf_level->nblk_z = AddrSurfInfoOut->depth;
+ else
+ surf_level->nblk_z = 1;
switch (AddrSurfInfoOut->tileMode) {
+ case ADDR_TM_LINEAR_GENERAL:
+ surf_level->mode = RADEON_SURF_MODE_LINEAR;
+ break;
case ADDR_TM_LINEAR_ALIGNED:
surf_level->mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
break;
@@ -220,144 +243,53 @@ static int gfx6_compute_level(struct amdgpu_winsys *ws,
}
if (is_stencil)
- surf->u.legacy.stencil_tiling_index[level] = AddrSurfInfoOut->tileIndex;
+ surf->stencil_tiling_index[level] = AddrSurfInfoOut->tileIndex;
else
- surf->u.legacy.tiling_index[level] = AddrSurfInfoOut->tileIndex;
-
- surf->surf_size = surf_level->offset + AddrSurfInfoOut->surfSize;
-
- /* Clear DCC fields at the beginning. */
- surf_level->dcc_offset = 0;
-
- /* The previous level's flag tells us if we can use DCC for this level. */
- if (AddrSurfInfoIn->flags.dccCompatible &&
- (level == 0 || AddrDccOut->subLvlCompressible)) {
- AddrDccIn->colorSurfSize = AddrSurfInfoOut->surfSize;
- AddrDccIn->tileMode = AddrSurfInfoOut->tileMode;
- AddrDccIn->tileInfo = *AddrSurfInfoOut->pTileInfo;
- AddrDccIn->tileIndex = AddrSurfInfoOut->tileIndex;
- AddrDccIn->macroModeIndex = AddrSurfInfoOut->macroModeIndex;
-
- ret = AddrComputeDccInfo(ws->addrlib,
- AddrDccIn,
- AddrDccOut);
-
- if (ret == ADDR_OK) {
- surf_level->dcc_offset = surf->dcc_size;
- surf_level->dcc_fast_clear_size = AddrDccOut->dccFastClearSize;
- surf->num_dcc_levels = level + 1;
- surf->dcc_size = surf_level->dcc_offset + AddrDccOut->dccRamSize;
- surf->dcc_alignment = MAX2(surf->dcc_alignment, AddrDccOut->dccRamBaseAlign);
- }
- }
-
- /* TC-compatible HTILE. */
- if (!is_stencil &&
- AddrSurfInfoIn->flags.depth &&
- AddrSurfInfoIn->flags.tcCompatible &&
- surf_level->mode == RADEON_SURF_MODE_2D &&
- level == 0) {
- AddrHtileIn->flags.tcCompatible = 1;
- AddrHtileIn->pitch = AddrSurfInfoOut->pitch;
- AddrHtileIn->height = AddrSurfInfoOut->height;
- AddrHtileIn->numSlices = AddrSurfInfoOut->depth;
- AddrHtileIn->blockWidth = ADDR_HTILE_BLOCKSIZE_8;
- AddrHtileIn->blockHeight = ADDR_HTILE_BLOCKSIZE_8;
- AddrHtileIn->pTileInfo = AddrSurfInfoOut->pTileInfo;
- AddrHtileIn->tileIndex = AddrSurfInfoOut->tileIndex;
- AddrHtileIn->macroModeIndex = AddrSurfInfoOut->macroModeIndex;
-
- ret = AddrComputeHtileInfo(ws->addrlib,
- AddrHtileIn,
- AddrHtileOut);
-
- if (ret == ADDR_OK) {
- surf->htile_size = AddrHtileOut->htileBytes;
- surf->htile_alignment = AddrHtileOut->baseAlign;
- }
- }
+ surf->tiling_index[level] = AddrSurfInfoOut->tileIndex;
+ surf->bo_size = surf_level->offset + AddrSurfInfoOut->surfSize;
return 0;
}
-#define G_009910_MICRO_TILE_MODE(x) (((x) >> 0) & 0x03)
-#define G_009910_MICRO_TILE_MODE_NEW(x) (((x) >> 22) & 0x07)
-
-static void gfx6_set_micro_tile_mode(struct radeon_surf *surf,
- struct radeon_info *info)
-{
- uint32_t tile_mode = info->si_tile_mode_array[surf->u.legacy.tiling_index[0]];
-
- if (info->chip_class >= CIK)
- surf->micro_tile_mode = G_009910_MICRO_TILE_MODE_NEW(tile_mode);
- else
- surf->micro_tile_mode = G_009910_MICRO_TILE_MODE(tile_mode);
-}
-
-static unsigned cik_get_macro_tile_index(struct radeon_surf *surf)
-{
- unsigned index, tileb;
-
- tileb = 8 * 8 * surf->bpe;
- tileb = MIN2(surf->u.legacy.tile_split, tileb);
-
- for (index = 0; tileb > 64; index++)
- tileb >>= 1;
-
- assert(index < 16);
- return index;
-}
-
-static int gfx6_surface_init(struct radeon_winsys *rws,
- const struct pipe_resource *tex,
- unsigned flags, unsigned bpe,
- enum radeon_surf_mode mode,
- struct radeon_surf *surf)
+static int amdgpu_surface_init(struct radeon_winsys *rws,
+ struct radeon_surf *surf)
{
struct amdgpu_winsys *ws = (struct amdgpu_winsys*)rws;
- unsigned level;
+ unsigned level, mode, type;
bool compressed;
ADDR_COMPUTE_SURFACE_INFO_INPUT AddrSurfInfoIn = {0};
ADDR_COMPUTE_SURFACE_INFO_OUTPUT AddrSurfInfoOut = {0};
- ADDR_COMPUTE_DCCINFO_INPUT AddrDccIn = {0};
- ADDR_COMPUTE_DCCINFO_OUTPUT AddrDccOut = {0};
- ADDR_COMPUTE_HTILE_INFO_INPUT AddrHtileIn = {0};
- ADDR_COMPUTE_HTILE_INFO_OUTPUT AddrHtileOut = {0};
ADDR_TILEINFO AddrTileInfoIn = {0};
ADDR_TILEINFO AddrTileInfoOut = {0};
int r;
- r = amdgpu_surface_sanity(tex);
+ r = amdgpu_surface_sanity(surf);
if (r)
return r;
AddrSurfInfoIn.size = sizeof(ADDR_COMPUTE_SURFACE_INFO_INPUT);
AddrSurfInfoOut.size = sizeof(ADDR_COMPUTE_SURFACE_INFO_OUTPUT);
- AddrDccIn.size = sizeof(ADDR_COMPUTE_DCCINFO_INPUT);
- AddrDccOut.size = sizeof(ADDR_COMPUTE_DCCINFO_OUTPUT);
- AddrHtileIn.size = sizeof(ADDR_COMPUTE_HTILE_INFO_INPUT);
- AddrHtileOut.size = sizeof(ADDR_COMPUTE_HTILE_INFO_OUTPUT);
AddrSurfInfoOut.pTileInfo = &AddrTileInfoOut;
- surf->blk_w = util_format_get_blockwidth(tex->format);
- surf->blk_h = util_format_get_blockheight(tex->format);
- surf->bpe = bpe;
- surf->flags = flags;
-
+ type = RADEON_SURF_GET(surf->flags, TYPE);
+ mode = RADEON_SURF_GET(surf->flags, MODE);
compressed = surf->blk_w == 4 && surf->blk_h == 4;
/* MSAA and FMASK require 2D tiling. */
- if (tex->nr_samples > 1 ||
- (flags & RADEON_SURF_FMASK))
+ if (surf->nsamples > 1 ||
+ (surf->flags & RADEON_SURF_FMASK))
mode = RADEON_SURF_MODE_2D;
/* DB doesn't support linear layouts. */
- if (flags & (RADEON_SURF_Z_OR_SBUFFER) &&
+ if (surf->flags & (RADEON_SURF_Z_OR_SBUFFER) &&
mode < RADEON_SURF_MODE_1D)
mode = RADEON_SURF_MODE_1D;
/* Set the requested tiling mode. */
switch (mode) {
+ case RADEON_SURF_MODE_LINEAR:
+ AddrSurfInfoIn.tileMode = ADDR_TM_LINEAR_GENERAL;
+ break;
case RADEON_SURF_MODE_LINEAR_ALIGNED:
AddrSurfInfoIn.tileMode = ADDR_TM_LINEAR_ALIGNED;
break;
@@ -374,7 +306,7 @@ static int gfx6_surface_init(struct radeon_winsys *rws,
/* The format must be set correctly for the allocation of compressed
* textures to work. In other cases, setting the bpp is sufficient. */
if (compressed) {
- switch (bpe) {
+ switch (surf->bpe) {
case 8:
AddrSurfInfoIn.format = ADDR_FMT_BC1;
break;
@@ -386,79 +318,43 @@ static int gfx6_surface_init(struct radeon_winsys *rws,
}
}
else {
- AddrDccIn.bpp = AddrSurfInfoIn.bpp = bpe * 8;
+ AddrSurfInfoIn.bpp = surf->bpe * 8;
}
- AddrDccIn.numSamples = AddrSurfInfoIn.numSamples =
- tex->nr_samples ? tex->nr_samples : 1;
+ AddrSurfInfoIn.numSamples = surf->nsamples;
AddrSurfInfoIn.tileIndex = -1;
/* Set the micro tile type. */
- if (flags & RADEON_SURF_SCANOUT)
+ if (surf->flags & RADEON_SURF_SCANOUT)
AddrSurfInfoIn.tileType = ADDR_DISPLAYABLE;
- else if (flags & (RADEON_SURF_Z_OR_SBUFFER | RADEON_SURF_FMASK))
+ else if (surf->flags & RADEON_SURF_Z_OR_SBUFFER)
AddrSurfInfoIn.tileType = ADDR_DEPTH_SAMPLE_ORDER;
else
AddrSurfInfoIn.tileType = ADDR_NON_DISPLAYABLE;
- AddrSurfInfoIn.flags.color = !(flags & RADEON_SURF_Z_OR_SBUFFER);
- AddrSurfInfoIn.flags.depth = (flags & RADEON_SURF_ZBUFFER) != 0;
- AddrSurfInfoIn.flags.cube = tex->target == PIPE_TEXTURE_CUBE;
- AddrSurfInfoIn.flags.fmask = (flags & RADEON_SURF_FMASK) != 0;
- AddrSurfInfoIn.flags.display = (flags & RADEON_SURF_SCANOUT) != 0;
- AddrSurfInfoIn.flags.pow2Pad = tex->last_level > 0;
- AddrSurfInfoIn.flags.tcCompatible = (flags & RADEON_SURF_TC_COMPATIBLE_HTILE) != 0;
-
- /* Only degrade the tile mode for space if TC-compatible HTILE hasn't been
- * requested, because TC-compatible HTILE requires 2D tiling.
- */
- AddrSurfInfoIn.flags.opt4Space = !AddrSurfInfoIn.flags.tcCompatible &&
- !AddrSurfInfoIn.flags.fmask &&
- tex->nr_samples <= 1 &&
- (flags & RADEON_SURF_OPTIMIZE_FOR_SPACE);
-
- /* DCC notes:
- * - If we add MSAA support, keep in mind that CB can't decompress 8bpp
- * with samples >= 4.
- * - Mipmapped array textures have low performance (discovered by a closed
- * driver team).
- */
- AddrSurfInfoIn.flags.dccCompatible = ws->info.chip_class >= VI &&
- !(flags & RADEON_SURF_Z_OR_SBUFFER) &&
- !(flags & RADEON_SURF_DISABLE_DCC) &&
- !compressed && AddrDccIn.numSamples <= 1 &&
- ((tex->array_size == 1 && tex->depth0 == 1) ||
- tex->last_level == 0);
-
- AddrSurfInfoIn.flags.noStencil = (flags & RADEON_SURF_SBUFFER) == 0;
- AddrSurfInfoIn.flags.compressZ = AddrSurfInfoIn.flags.depth;
-
- /* noStencil = 0 can result in a depth part that is incompatible with
- * mipmapped texturing. So set noStencil = 1 when mipmaps are requested (in
- * this case, we may end up setting stencil_adjusted).
- *
- * TODO: update addrlib to a newer version, remove this, and
- * use flags.matchStencilTileCfg = 1 as an alternative fix.
- */
- if (tex->last_level > 0)
- AddrSurfInfoIn.flags.noStencil = 1;
+ AddrSurfInfoIn.flags.color = !(surf->flags & RADEON_SURF_Z_OR_SBUFFER);
+ AddrSurfInfoIn.flags.depth = (surf->flags & RADEON_SURF_ZBUFFER) != 0;
+ AddrSurfInfoIn.flags.stencil = (surf->flags & RADEON_SURF_SBUFFER) != 0;
+ AddrSurfInfoIn.flags.cube = type == RADEON_SURF_TYPE_CUBEMAP;
+ AddrSurfInfoIn.flags.display = (surf->flags & RADEON_SURF_SCANOUT) != 0;
+ AddrSurfInfoIn.flags.pow2Pad = surf->last_level > 0;
+ AddrSurfInfoIn.flags.degrade4Space = 1;
+
+ /* This disables incorrect calculations (hacks) in addrlib. */
+ AddrSurfInfoIn.flags.noStencil = 1;
/* Set preferred macrotile parameters. This is usually required
* for shared resources. This is for 2D tiling only. */
if (AddrSurfInfoIn.tileMode >= ADDR_TM_2D_TILED_THIN1 &&
- surf->u.legacy.bankw && surf->u.legacy.bankh &&
- surf->u.legacy.mtilea && surf->u.legacy.tile_split) {
- assert(!(flags & RADEON_SURF_FMASK));
-
+ surf->bankw && surf->bankh && surf->mtilea && surf->tile_split) {
/* If any of these parameters are incorrect, the calculation
* will fail. */
- AddrTileInfoIn.banks = surf->u.legacy.num_banks;
- AddrTileInfoIn.bankWidth = surf->u.legacy.bankw;
- AddrTileInfoIn.bankHeight = surf->u.legacy.bankh;
- AddrTileInfoIn.macroAspectRatio = surf->u.legacy.mtilea;
- AddrTileInfoIn.tileSplitBytes = surf->u.legacy.tile_split;
- AddrTileInfoIn.pipeConfig = surf->u.legacy.pipe_config + 1; /* +1 compared to GB_TILE_MODE */
- AddrSurfInfoIn.flags.opt4Space = 0;
+ AddrTileInfoIn.banks = cik_num_banks(ws, surf);
+ AddrTileInfoIn.bankWidth = surf->bankw;
+ AddrTileInfoIn.bankHeight = surf->bankh;
+ AddrTileInfoIn.macroAspectRatio = surf->mtilea;
+ AddrTileInfoIn.tileSplitBytes = surf->tile_split;
+ AddrSurfInfoIn.flags.degrade4Space = 0;
AddrSurfInfoIn.pTileInfo = &AddrTileInfoIn;
/* If AddrSurfInfoIn.pTileInfo is set, Addrlib doesn't set
@@ -469,531 +365,74 @@ static int gfx6_surface_init(struct radeon_winsys *rws,
* For now, just figure it out here.
* Note that only 2D_TILE_THIN1 is handled here.
*/
- assert(!(flags & RADEON_SURF_Z_OR_SBUFFER));
+ assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
assert(AddrSurfInfoIn.tileMode == ADDR_TM_2D_TILED_THIN1);
- if (ws->info.chip_class == SI) {
- if (AddrSurfInfoIn.tileType == ADDR_DISPLAYABLE) {
- if (bpe == 2)
- AddrSurfInfoIn.tileIndex = 11; /* 16bpp */
- else
- AddrSurfInfoIn.tileIndex = 12; /* 32bpp */
- } else {
- if (bpe == 1)
- AddrSurfInfoIn.tileIndex = 14; /* 8bpp */
- else if (bpe == 2)
- AddrSurfInfoIn.tileIndex = 15; /* 16bpp */
- else if (bpe == 4)
- AddrSurfInfoIn.tileIndex = 16; /* 32bpp */
- else
- AddrSurfInfoIn.tileIndex = 17; /* 64bpp (and 128bpp) */
- }
- } else {
- /* CIK - VI */
- if (AddrSurfInfoIn.tileType == ADDR_DISPLAYABLE)
- AddrSurfInfoIn.tileIndex = 10; /* 2D displayable */
- else
- AddrSurfInfoIn.tileIndex = 14; /* 2D non-displayable */
-
- /* Addrlib doesn't set this if tileIndex is forced like above. */
- AddrSurfInfoOut.macroModeIndex = cik_get_macro_tile_index(surf);
- }
+ if (AddrSurfInfoIn.tileType == ADDR_DISPLAYABLE)
+ AddrSurfInfoIn.tileIndex = 10; /* 2D displayable */
+ else
+ AddrSurfInfoIn.tileIndex = 14; /* 2D non-displayable */
}
- surf->num_dcc_levels = 0;
- surf->surf_size = 0;
- surf->dcc_size = 0;
- surf->dcc_alignment = 1;
- surf->htile_size = 0;
- surf->htile_alignment = 1;
+ surf->bo_size = 0;
/* Calculate texture layout information. */
- for (level = 0; level <= tex->last_level; level++) {
- r = gfx6_compute_level(ws, tex, surf, false, level, compressed,
- &AddrSurfInfoIn, &AddrSurfInfoOut,
- &AddrDccIn, &AddrDccOut, &AddrHtileIn, &AddrHtileOut);
+ for (level = 0; level <= surf->last_level; level++) {
+ r = compute_level(ws, surf, false, level, type, compressed,
+ &AddrSurfInfoIn, &AddrSurfInfoOut);
if (r)
return r;
if (level == 0) {
- surf->surf_alignment = AddrSurfInfoOut.baseAlign;
- surf->u.legacy.pipe_config = AddrSurfInfoOut.pTileInfo->pipeConfig - 1;
- gfx6_set_micro_tile_mode(surf, &ws->info);
+ surf->bo_alignment = AddrSurfInfoOut.baseAlign;
+ surf->pipe_config = AddrSurfInfoOut.pTileInfo->pipeConfig - 1;
/* For 2D modes only. */
if (AddrSurfInfoOut.tileMode >= ADDR_TM_2D_TILED_THIN1) {
- surf->u.legacy.bankw = AddrSurfInfoOut.pTileInfo->bankWidth;
- surf->u.legacy.bankh = AddrSurfInfoOut.pTileInfo->bankHeight;
- surf->u.legacy.mtilea = AddrSurfInfoOut.pTileInfo->macroAspectRatio;
- surf->u.legacy.tile_split = AddrSurfInfoOut.pTileInfo->tileSplitBytes;
- surf->u.legacy.num_banks = AddrSurfInfoOut.pTileInfo->banks;
- surf->u.legacy.macro_tile_index = AddrSurfInfoOut.macroModeIndex;
- } else {
- surf->u.legacy.macro_tile_index = 0;
+ surf->bankw = AddrSurfInfoOut.pTileInfo->bankWidth;
+ surf->bankh = AddrSurfInfoOut.pTileInfo->bankHeight;
+ surf->mtilea = AddrSurfInfoOut.pTileInfo->macroAspectRatio;
+ surf->tile_split = AddrSurfInfoOut.pTileInfo->tileSplitBytes;
+ surf->num_banks = AddrSurfInfoOut.pTileInfo->banks;
}
}
}
/* Calculate texture layout information for stencil. */
- if (flags & RADEON_SURF_SBUFFER) {
+ if (surf->flags & RADEON_SURF_SBUFFER) {
AddrSurfInfoIn.bpp = 8;
- AddrSurfInfoIn.flags.depth = 0;
- AddrSurfInfoIn.flags.stencil = 1;
- AddrSurfInfoIn.flags.tcCompatible = 0;
/* This will be ignored if AddrSurfInfoIn.pTileInfo is NULL. */
- AddrTileInfoIn.tileSplitBytes = surf->u.legacy.stencil_tile_split;
+ AddrTileInfoIn.tileSplitBytes = surf->stencil_tile_split;
- for (level = 0; level <= tex->last_level; level++) {
- r = gfx6_compute_level(ws, tex, surf, true, level, compressed,
- &AddrSurfInfoIn, &AddrSurfInfoOut,
- &AddrDccIn, &AddrDccOut,
- NULL, NULL);
+ for (level = 0; level <= surf->last_level; level++) {
+ r = compute_level(ws, surf, true, level, type, compressed,
+ &AddrSurfInfoIn, &AddrSurfInfoOut);
if (r)
return r;
- /* DB uses the depth pitch for both stencil and depth. */
- if (surf->u.legacy.stencil_level[level].nblk_x != surf->u.legacy.level[level].nblk_x)
- surf->u.legacy.stencil_adjusted = true;
-
if (level == 0) {
+ surf->stencil_offset = surf->stencil_level[0].offset;
+
/* For 2D modes only. */
if (AddrSurfInfoOut.tileMode >= ADDR_TM_2D_TILED_THIN1) {
- surf->u.legacy.stencil_tile_split =
+ surf->stencil_tile_split =
AddrSurfInfoOut.pTileInfo->tileSplitBytes;
}
}
}
}
- /* Recalculate the whole DCC miptree size including disabled levels.
- * This is what addrlib does, but calling addrlib would be a lot more
- * complicated.
- */
- if (surf->dcc_size && tex->last_level > 0) {
- surf->dcc_size = align64(surf->surf_size >> 8,
- ws->info.pipe_interleave_bytes *
- ws->info.num_tile_pipes);
- }
-
- /* Make sure HTILE covers the whole miptree, because the shader reads
- * TC-compatible HTILE even for levels where it's disabled by DB.
- */
- if (surf->htile_size && tex->last_level)
- surf->htile_size *= 2;
-
- surf->is_linear = surf->u.legacy.level[0].mode == RADEON_SURF_MODE_LINEAR_ALIGNED;
- return 0;
-}
-
-/* This is only called when expecting a tiled layout. */
-static int
-gfx9_get_preferred_swizzle_mode(struct amdgpu_winsys *ws,
- ADDR2_COMPUTE_SURFACE_INFO_INPUT *in,
- bool is_fmask, AddrSwizzleMode *swizzle_mode)
-{
- ADDR_E_RETURNCODE ret;
- ADDR2_GET_PREFERRED_SURF_SETTING_INPUT sin = {0};
- ADDR2_GET_PREFERRED_SURF_SETTING_OUTPUT sout = {0};
-
- sin.size = sizeof(ADDR2_GET_PREFERRED_SURF_SETTING_INPUT);
- sout.size = sizeof(ADDR2_GET_PREFERRED_SURF_SETTING_OUTPUT);
-
- sin.flags = in->flags;
- sin.resourceType = in->resourceType;
- sin.format = in->format;
- sin.resourceLoction = ADDR_RSRC_LOC_INVIS;
- /* TODO: We could allow some of these: */
- sin.forbiddenBlock.micro = 1; /* don't allow the 256B swizzle modes */
- sin.forbiddenBlock.var = 1; /* don't allow the variable-sized swizzle modes */
- sin.forbiddenBlock.linear = 1; /* don't allow linear swizzle modes */
- sin.bpp = in->bpp;
- sin.width = in->width;
- sin.height = in->height;
- sin.numSlices = in->numSlices;
- sin.numMipLevels = in->numMipLevels;
- sin.numSamples = in->numSamples;
- sin.numFrags = in->numFrags;
-
- if (is_fmask) {
- sin.flags.color = 0;
- sin.flags.fmask = 1;
- }
-
- ret = Addr2GetPreferredSurfaceSetting(ws->addrlib, &sin, &sout);
- if (ret != ADDR_OK)
- return ret;
-
- *swizzle_mode = sout.swizzleMode;
- return 0;
-}
-
-static int gfx9_compute_miptree(struct amdgpu_winsys *ws,
- struct radeon_surf *surf, bool compressed,
- ADDR2_COMPUTE_SURFACE_INFO_INPUT *in)
-{
- ADDR2_MIP_INFO mip_info[RADEON_SURF_MAX_LEVELS] = {};
- ADDR2_COMPUTE_SURFACE_INFO_OUTPUT out = {0};
- ADDR_E_RETURNCODE ret;
-
- out.size = sizeof(ADDR2_COMPUTE_SURFACE_INFO_OUTPUT);
- out.pMipInfo = mip_info;
-
- ret = Addr2ComputeSurfaceInfo(ws->addrlib, in, &out);
- if (ret != ADDR_OK)
- return ret;
-
- if (in->flags.stencil) {
- surf->u.gfx9.stencil.swizzle_mode = in->swizzleMode;
- surf->u.gfx9.stencil.epitch = out.epitchIsHeight ? out.mipChainHeight - 1 :
- out.mipChainPitch - 1;
- surf->surf_alignment = MAX2(surf->surf_alignment, out.baseAlign);
- surf->u.gfx9.stencil_offset = align(surf->surf_size, out.baseAlign);
- surf->surf_size = surf->u.gfx9.stencil_offset + out.surfSize;
- return 0;
- }
-
- surf->u.gfx9.surf.swizzle_mode = in->swizzleMode;
- surf->u.gfx9.surf.epitch = out.epitchIsHeight ? out.mipChainHeight - 1 :
- out.mipChainPitch - 1;
-
- /* CMASK fast clear uses these even if FMASK isn't allocated.
- * FMASK only supports the Z swizzle modes, whose numbers are multiples of 4.
- */
- surf->u.gfx9.fmask.swizzle_mode = surf->u.gfx9.surf.swizzle_mode & ~0x3;
- surf->u.gfx9.fmask.epitch = surf->u.gfx9.surf.epitch;
-
- surf->u.gfx9.surf_slice_size = out.sliceSize;
- surf->u.gfx9.surf_pitch = out.pitch;
- surf->u.gfx9.surf_height = out.height;
- surf->surf_size = out.surfSize;
- surf->surf_alignment = out.baseAlign;
-
- if (in->swizzleMode == ADDR_SW_LINEAR) {
- for (unsigned i = 0; i < in->numMipLevels; i++)
- surf->u.gfx9.offset[i] = mip_info[i].offset;
- }
-
- if (in->flags.depth) {
- assert(in->swizzleMode != ADDR_SW_LINEAR);
-
- /* HTILE */
- ADDR2_COMPUTE_HTILE_INFO_INPUT hin = {0};
- ADDR2_COMPUTE_HTILE_INFO_OUTPUT hout = {0};
-
- hin.size = sizeof(ADDR2_COMPUTE_HTILE_INFO_INPUT);
- hout.size = sizeof(ADDR2_COMPUTE_HTILE_INFO_OUTPUT);
-
- hin.hTileFlags.pipeAligned = 1;
- hin.hTileFlags.rbAligned = 1;
- hin.depthFlags = in->flags;
- hin.swizzleMode = in->swizzleMode;
- hin.unalignedWidth = in->width;
- hin.unalignedHeight = in->height;
- hin.numSlices = in->numSlices;
- hin.numMipLevels = in->numMipLevels;
-
- ret = Addr2ComputeHtileInfo(ws->addrlib, &hin, &hout);
- if (ret != ADDR_OK)
- return ret;
-
- surf->u.gfx9.htile.rb_aligned = hin.hTileFlags.rbAligned;
- surf->u.gfx9.htile.pipe_aligned = hin.hTileFlags.pipeAligned;
- surf->htile_size = hout.htileBytes;
- surf->htile_alignment = hout.baseAlign;
- } else {
- /* DCC */
- if (!(surf->flags & RADEON_SURF_DISABLE_DCC) &&
- !(surf->flags & RADEON_SURF_SCANOUT) &&
- !compressed &&
- in->swizzleMode != ADDR_SW_LINEAR &&
- /* TODO: We could support DCC with MSAA. */
- in->numSamples == 1) {
- ADDR2_COMPUTE_DCCINFO_INPUT din = {0};
- ADDR2_COMPUTE_DCCINFO_OUTPUT dout = {0};
-
- din.size = sizeof(ADDR2_COMPUTE_DCCINFO_INPUT);
- dout.size = sizeof(ADDR2_COMPUTE_DCCINFO_OUTPUT);
-
- din.dccKeyFlags.pipeAligned = 1;
- din.dccKeyFlags.rbAligned = 1;
- din.colorFlags = in->flags;
- din.resourceType = in->resourceType;
- din.swizzleMode = in->swizzleMode;
- din.bpp = in->bpp;
- din.unalignedWidth = in->width;
- din.unalignedHeight = in->height;
- din.numSlices = in->numSlices;
- din.numFrags = in->numFrags;
- din.numMipLevels = in->numMipLevels;
- din.dataSurfaceSize = out.surfSize;
-
- ret = Addr2ComputeDccInfo(ws->addrlib, &din, &dout);
- if (ret != ADDR_OK)
- return ret;
-
- surf->u.gfx9.dcc.rb_aligned = din.dccKeyFlags.rbAligned;
- surf->u.gfx9.dcc.pipe_aligned = din.dccKeyFlags.pipeAligned;
- surf->u.gfx9.dcc_pitch_max = dout.pitch - 1;
- surf->dcc_size = dout.dccRamSize;
- surf->dcc_alignment = dout.dccRamBaseAlign;
- }
-
- /* FMASK */
- if (in->numSamples > 1) {
- ADDR2_COMPUTE_FMASK_INFO_INPUT fin = {0};
- ADDR2_COMPUTE_FMASK_INFO_OUTPUT fout = {0};
-
- fin.size = sizeof(ADDR2_COMPUTE_FMASK_INFO_INPUT);
- fout.size = sizeof(ADDR2_COMPUTE_FMASK_INFO_OUTPUT);
-
- ret = gfx9_get_preferred_swizzle_mode(ws, in, true, &fin.swizzleMode);
- if (ret != ADDR_OK)
- return ret;
-
- fin.unalignedWidth = in->width;
- fin.unalignedHeight = in->height;
- fin.numSlices = in->numSlices;
- fin.numSamples = in->numSamples;
- fin.numFrags = in->numFrags;
-
- ret = Addr2ComputeFmaskInfo(ws->addrlib, &fin, &fout);
- if (ret != ADDR_OK)
- return ret;
-
- surf->u.gfx9.fmask.swizzle_mode = fin.swizzleMode;
- surf->u.gfx9.fmask.epitch = fout.pitch - 1;
- surf->u.gfx9.fmask_size = fout.fmaskBytes;
- surf->u.gfx9.fmask_alignment = fout.baseAlign;
- }
-
- /* CMASK */
- if (in->swizzleMode != ADDR_SW_LINEAR) {
- ADDR2_COMPUTE_CMASK_INFO_INPUT cin = {0};
- ADDR2_COMPUTE_CMASK_INFO_OUTPUT cout = {0};
-
- cin.size = sizeof(ADDR2_COMPUTE_CMASK_INFO_INPUT);
- cout.size = sizeof(ADDR2_COMPUTE_CMASK_INFO_OUTPUT);
-
- cin.cMaskFlags.pipeAligned = 1;
- cin.cMaskFlags.rbAligned = 1;
- cin.colorFlags = in->flags;
- cin.resourceType = in->resourceType;
- cin.unalignedWidth = in->width;
- cin.unalignedHeight = in->height;
- cin.numSlices = in->numSlices;
-
- if (in->numSamples > 1)
- cin.swizzleMode = surf->u.gfx9.fmask.swizzle_mode;
- else
- cin.swizzleMode = in->swizzleMode;
-
- ret = Addr2ComputeCmaskInfo(ws->addrlib, &cin, &cout);
- if (ret != ADDR_OK)
- return ret;
-
- surf->u.gfx9.cmask.rb_aligned = cin.cMaskFlags.rbAligned;
- surf->u.gfx9.cmask.pipe_aligned = cin.cMaskFlags.pipeAligned;
- surf->u.gfx9.cmask_size = cout.cmaskBytes;
- surf->u.gfx9.cmask_alignment = cout.baseAlign;
- }
- }
-
return 0;
}
-static int gfx9_surface_init(struct radeon_winsys *rws,
- const struct pipe_resource *tex,
- unsigned flags, unsigned bpe,
- enum radeon_surf_mode mode,
- struct radeon_surf *surf)
+static int amdgpu_surface_best(struct radeon_winsys *rws,
+ struct radeon_surf *surf)
{
- struct amdgpu_winsys *ws = (struct amdgpu_winsys*)rws;
- bool compressed;
- ADDR2_COMPUTE_SURFACE_INFO_INPUT AddrSurfInfoIn = {0};
- int r;
-
- assert(!(flags & RADEON_SURF_FMASK));
-
- r = amdgpu_surface_sanity(tex);
- if (r)
- return r;
-
- AddrSurfInfoIn.size = sizeof(ADDR2_COMPUTE_SURFACE_INFO_INPUT);
-
- surf->blk_w = util_format_get_blockwidth(tex->format);
- surf->blk_h = util_format_get_blockheight(tex->format);
- surf->bpe = bpe;
- surf->flags = flags;
-
- compressed = surf->blk_w == 4 && surf->blk_h == 4;
-
- /* The format must be set correctly for the allocation of compressed
- * textures to work. In other cases, setting the bpp is sufficient. */
- if (compressed) {
- switch (bpe) {
- case 8:
- AddrSurfInfoIn.format = ADDR_FMT_BC1;
- break;
- case 16:
- AddrSurfInfoIn.format = ADDR_FMT_BC3;
- break;
- default:
- assert(0);
- }
- } else {
- AddrSurfInfoIn.bpp = bpe * 8;
- }
-
- AddrSurfInfoIn.flags.color = !(flags & RADEON_SURF_Z_OR_SBUFFER);
- AddrSurfInfoIn.flags.depth = (flags & RADEON_SURF_ZBUFFER) != 0;
- AddrSurfInfoIn.flags.display = (flags & RADEON_SURF_SCANOUT) != 0;
- AddrSurfInfoIn.flags.texture = 1;
- AddrSurfInfoIn.flags.opt4space = 1;
-
- AddrSurfInfoIn.numMipLevels = tex->last_level + 1;
- AddrSurfInfoIn.numSamples = tex->nr_samples ? tex->nr_samples : 1;
- AddrSurfInfoIn.numFrags = AddrSurfInfoIn.numSamples;
-
- switch (tex->target) {
- /* GFX9 doesn't support 1D depth textures, so allocate all 1D textures
- * as 2D to avoid having shader variants for 1D vs 2D, so all shaders
- * must sample 1D textures as 2D. */
- case PIPE_TEXTURE_1D:
- case PIPE_TEXTURE_1D_ARRAY:
- case PIPE_TEXTURE_2D:
- case PIPE_TEXTURE_2D_ARRAY:
- case PIPE_TEXTURE_RECT:
- case PIPE_TEXTURE_CUBE:
- case PIPE_TEXTURE_CUBE_ARRAY:
- case PIPE_TEXTURE_3D:
- if (tex->target == PIPE_TEXTURE_3D)
- AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_3D;
- else
- AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_2D;
-
- AddrSurfInfoIn.width = tex->width0;
- AddrSurfInfoIn.height = tex->height0;
-
- if (tex->target == PIPE_TEXTURE_3D)
- AddrSurfInfoIn.numSlices = tex->depth0;
- else if (tex->target == PIPE_TEXTURE_CUBE)
- AddrSurfInfoIn.numSlices = 6;
- else
- AddrSurfInfoIn.numSlices = tex->array_size;
-
- switch (mode) {
- case RADEON_SURF_MODE_LINEAR_ALIGNED:
- assert(tex->nr_samples <= 1);
- assert(!(flags & RADEON_SURF_Z_OR_SBUFFER));
- AddrSurfInfoIn.swizzleMode = ADDR_SW_LINEAR;
- break;
-
- case RADEON_SURF_MODE_1D:
- case RADEON_SURF_MODE_2D:
- r = gfx9_get_preferred_swizzle_mode(ws, &AddrSurfInfoIn, false,
- &AddrSurfInfoIn.swizzleMode);
- if (r)
- return r;
- break;
-
- default:
- assert(0);
- }
- break;
-
- default:
- assert(0);
- }
-
- surf->u.gfx9.resource_type = AddrSurfInfoIn.resourceType;
-
- surf->surf_size = 0;
- surf->dcc_size = 0;
- surf->htile_size = 0;
- surf->u.gfx9.surf_offset = 0;
- surf->u.gfx9.stencil_offset = 0;
- surf->u.gfx9.fmask_size = 0;
- surf->u.gfx9.cmask_size = 0;
-
- /* Calculate texture layout information. */
- r = gfx9_compute_miptree(ws, surf, compressed, &AddrSurfInfoIn);
- if (r)
- return r;
-
- /* Calculate texture layout information for stencil. */
- if (flags & RADEON_SURF_SBUFFER) {
- AddrSurfInfoIn.bpp = 8;
- AddrSurfInfoIn.flags.depth = 0;
- AddrSurfInfoIn.flags.stencil = 1;
-
- r = gfx9_compute_miptree(ws, surf, compressed, &AddrSurfInfoIn);
- if (r)
- return r;
- }
-
- surf->is_linear = surf->u.gfx9.surf.swizzle_mode == ADDR_SW_LINEAR;
- surf->num_dcc_levels = surf->dcc_size ? tex->last_level + 1 : 0;
-
- switch (surf->u.gfx9.surf.swizzle_mode) {
- /* S = standard. */
- case ADDR_SW_256B_S:
- case ADDR_SW_4KB_S:
- case ADDR_SW_64KB_S:
- case ADDR_SW_VAR_S:
- case ADDR_SW_64KB_S_T:
- case ADDR_SW_4KB_S_X:
- case ADDR_SW_64KB_S_X:
- case ADDR_SW_VAR_S_X:
- surf->micro_tile_mode = RADEON_MICRO_MODE_THIN;
- break;
-
- /* D = display. */
- case ADDR_SW_LINEAR:
- case ADDR_SW_256B_D:
- case ADDR_SW_4KB_D:
- case ADDR_SW_64KB_D:
- case ADDR_SW_VAR_D:
- case ADDR_SW_64KB_D_T:
- case ADDR_SW_4KB_D_X:
- case ADDR_SW_64KB_D_X:
- case ADDR_SW_VAR_D_X:
- surf->micro_tile_mode = RADEON_MICRO_MODE_DISPLAY;
- break;
-
- /* R = rotated. */
- case ADDR_SW_256B_R:
- case ADDR_SW_4KB_R:
- case ADDR_SW_64KB_R:
- case ADDR_SW_VAR_R:
- case ADDR_SW_64KB_R_T:
- case ADDR_SW_4KB_R_X:
- case ADDR_SW_64KB_R_X:
- case ADDR_SW_VAR_R_X:
- surf->micro_tile_mode = RADEON_MICRO_MODE_ROTATED;
- break;
-
- /* Z = depth. */
- case ADDR_SW_4KB_Z:
- case ADDR_SW_64KB_Z:
- case ADDR_SW_VAR_Z:
- case ADDR_SW_64KB_Z_T:
- case ADDR_SW_4KB_Z_X:
- case ADDR_SW_64KB_Z_X:
- case ADDR_SW_VAR_Z_X:
- surf->micro_tile_mode = RADEON_MICRO_MODE_DEPTH;
- break;
-
- default:
- assert(0);
- }
-
return 0;
}
void amdgpu_surface_init_functions(struct amdgpu_winsys *ws)
{
- if (ws->info.chip_class >= GFX9)
- ws->base.surface_init = gfx9_surface_init;
- else
- ws->base.surface_init = gfx6_surface_init;
+ ws->base.surface_init = amdgpu_surface_init;
+ ws->base.surface_best = amdgpu_surface_best;
}
diff --git a/lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c b/lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c
index 70319db80..824f0d380 100644
--- a/lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c
+++ b/lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c
@@ -39,9 +39,7 @@
#include <xf86drm.h>
#include <stdio.h>
#include <sys/stat.h>
-#include "amd/common/amdgpu_id.h"
-#include "amd/common/sid.h"
-#include "amd/common/gfx9d.h"
+#include "amdgpu_id.h"
#define CIK_TILE_MODE_COLOR_2D 14
@@ -62,7 +60,7 @@
#define CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_16X16 17
static struct util_hash_table *dev_tab = NULL;
-static mtx_t dev_tab_mutex = _MTX_INITIALIZER_NP;
+pipe_static_mutex(dev_tab_mutex);
static unsigned cik_get_num_tile_pipes(struct amdgpu_gpu_info *info)
{
@@ -70,6 +68,7 @@ static unsigned cik_get_num_tile_pipes(struct amdgpu_gpu_info *info)
switch (CIK__GB_TILE_MODE__PIPE_CONFIG(mode2d)) {
case CIK__PIPE_CONFIG__ADDR_SURF_P2:
+ default:
return 2;
case CIK__PIPE_CONFIG__ADDR_SURF_P4_8x16:
case CIK__PIPE_CONFIG__ADDR_SURF_P4_16x16:
@@ -87,35 +86,31 @@ static unsigned cik_get_num_tile_pipes(struct amdgpu_gpu_info *info)
case CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_8X16:
case CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_16X16:
return 16;
- default:
- fprintf(stderr, "Invalid CIK pipe configuration, assuming P2\n");
- assert(!"this should never occur");
- return 2;
}
}
+/* Convert Sea Islands register values GB_ADDR_CFG and MC_ADDR_CFG
+ * into GB_TILING_CONFIG register which is only present on R600-R700. */
+static unsigned r600_get_gb_tiling_config(struct amdgpu_gpu_info *info)
+{
+ unsigned num_pipes = info->gb_addr_cfg & 0x7;
+ unsigned num_banks = info->mc_arb_ramcfg & 0x3;
+ unsigned pipe_interleave_bytes = (info->gb_addr_cfg >> 4) & 0x7;
+ unsigned row_size = (info->gb_addr_cfg >> 28) & 0x3;
+
+ return num_pipes | (num_banks << 4) |
+ (pipe_interleave_bytes << 8) |
+ (row_size << 12);
+}
+
/* Helper function to do the ioctls needed for setup and init. */
-static bool do_winsys_init(struct amdgpu_winsys *ws, int fd)
+static boolean do_winsys_init(struct amdgpu_winsys *ws)
{
struct amdgpu_buffer_size_alignments alignment_info = {};
- struct amdgpu_heap_info vram, vram_vis, gtt;
+ struct amdgpu_heap_info vram, gtt;
struct drm_amdgpu_info_hw_ip dma = {}, uvd = {}, vce = {};
- uint32_t vce_version = 0, vce_feature = 0, uvd_version = 0, uvd_feature = 0;
- uint32_t unused_feature;
+ uint32_t vce_version = 0, vce_feature = 0;
int r, i, j;
- drmDevicePtr devinfo;
-
- /* Get PCI info. */
- r = drmGetDevice2(fd, 0, &devinfo);
- if (r) {
- fprintf(stderr, "amdgpu: drmGetDevice2 failed.\n");
- goto fail;
- }
- ws->info.pci_domain = devinfo->businfo.pci->domain;
- ws->info.pci_bus = devinfo->businfo.pci->bus;
- ws->info.pci_dev = devinfo->businfo.pci->dev;
- ws->info.pci_func = devinfo->businfo.pci->func;
- drmFreeDevice(&devinfo);
/* Query hardware and driver information. */
r = amdgpu_query_gpu_info(ws->dev, &ws->amdinfo);
@@ -136,14 +131,6 @@ static bool do_winsys_init(struct amdgpu_winsys *ws, int fd)
goto fail;
}
- r = amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- &vram_vis);
- if (r) {
- fprintf(stderr, "amdgpu: amdgpu_query_heap_info(vram_vis) failed.\n");
- goto fail;
- }
-
r = amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_GTT, 0, &gtt);
if (r) {
fprintf(stderr, "amdgpu: amdgpu_query_heap_info(gtt) failed.\n");
@@ -162,34 +149,6 @@ static bool do_winsys_init(struct amdgpu_winsys *ws, int fd)
goto fail;
}
- r = amdgpu_query_firmware_version(ws->dev, AMDGPU_INFO_FW_GFX_ME, 0, 0,
- &ws->info.me_fw_version, &unused_feature);
- if (r) {
- fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(me) failed.\n");
- goto fail;
- }
-
- r = amdgpu_query_firmware_version(ws->dev, AMDGPU_INFO_FW_GFX_PFP, 0, 0,
- &ws->info.pfp_fw_version, &unused_feature);
- if (r) {
- fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(pfp) failed.\n");
- goto fail;
- }
-
- r = amdgpu_query_firmware_version(ws->dev, AMDGPU_INFO_FW_GFX_CE, 0, 0,
- &ws->info.ce_fw_version, &unused_feature);
- if (r) {
- fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(ce) failed.\n");
- goto fail;
- }
-
- r = amdgpu_query_firmware_version(ws->dev, AMDGPU_INFO_FW_UVD, 0, 0,
- &uvd_version, &uvd_feature);
- if (r) {
- fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(uvd) failed.\n");
- goto fail;
- }
-
r = amdgpu_query_hw_ip_info(ws->dev, AMDGPU_HW_IP_VCE, 0, &vce);
if (r) {
fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(vce) failed.\n");
@@ -217,48 +176,26 @@ static bool do_winsys_init(struct amdgpu_winsys *ws, int fd)
goto fail;
}
- if (ws->info.family >= CHIP_VEGA10)
- ws->info.chip_class = GFX9;
- else if (ws->info.family >= CHIP_TONGA)
+ if (ws->info.family >= CHIP_TONGA)
ws->info.chip_class = VI;
else if (ws->info.family >= CHIP_BONAIRE)
ws->info.chip_class = CIK;
- else if (ws->info.family >= CHIP_TAHITI)
- ws->info.chip_class = SI;
else {
fprintf(stderr, "amdgpu: Unknown family.\n");
goto fail;
}
- /* LLVM 5.0 is required for GFX9. */
- if (ws->info.chip_class >= GFX9 && HAVE_LLVM < 0x0500) {
- fprintf(stderr, "amdgpu: LLVM 5.0 is required, got LLVM %i.%i\n",
- HAVE_LLVM >> 8, HAVE_LLVM & 255);
+ /* LLVM 3.6 is required for VI. */
+ if (ws->info.chip_class >= VI &&
+ (HAVE_LLVM < 0x0306 ||
+ (HAVE_LLVM == 0x0306 && MESA_LLVM_VERSION_PATCH < 1))) {
+ fprintf(stderr, "amdgpu: LLVM 3.6.1 is required, got LLVM %i.%i.%i\n",
+ HAVE_LLVM >> 8, HAVE_LLVM & 255, MESA_LLVM_VERSION_PATCH);
goto fail;
}
/* family and rev_id are for addrlib */
switch (ws->info.family) {
- case CHIP_TAHITI:
- ws->family = FAMILY_SI;
- ws->rev_id = SI_TAHITI_P_A0;
- break;
- case CHIP_PITCAIRN:
- ws->family = FAMILY_SI;
- ws->rev_id = SI_PITCAIRN_PM_A0;
- break;
- case CHIP_VERDE:
- ws->family = FAMILY_SI;
- ws->rev_id = SI_CAPEVERDE_M_A0;
- break;
- case CHIP_OLAND:
- ws->family = FAMILY_SI;
- ws->rev_id = SI_OLAND_M_A0;
- break;
- case CHIP_HAINAN:
- ws->family = FAMILY_SI;
- ws->rev_id = SI_HAINAN_V_A0;
- break;
case CHIP_BONAIRE:
ws->family = FAMILY_CI;
ws->rev_id = CI_BONAIRE_M_A0;
@@ -299,26 +236,6 @@ static bool do_winsys_init(struct amdgpu_winsys *ws, int fd)
ws->family = FAMILY_VI;
ws->rev_id = VI_FIJI_P_A0;
break;
- case CHIP_POLARIS10:
- ws->family = FAMILY_VI;
- ws->rev_id = VI_POLARIS10_P_A0;
- break;
- case CHIP_POLARIS11:
- ws->family = FAMILY_VI;
- ws->rev_id = VI_POLARIS11_M_A0;
- break;
- case CHIP_POLARIS12:
- ws->family = FAMILY_VI;
- ws->rev_id = VI_POLARIS12_V_A0;
- break;
- case CHIP_VEGA10:
- ws->family = FAMILY_AI;
- ws->rev_id = AI_VEGA10_P_A0;
- break;
- case CHIP_RAVEN:
- ws->family = FAMILY_RV;
- ws->rev_id = RAVEN_A0;
- break;
default:
fprintf(stderr, "amdgpu: Unknown family.\n");
goto fail;
@@ -330,92 +247,69 @@ static bool do_winsys_init(struct amdgpu_winsys *ws, int fd)
goto fail;
}
- /* Set which chips have dedicated VRAM. */
- ws->info.has_dedicated_vram =
- !(ws->amdinfo.ids_flags & AMDGPU_IDS_FLAGS_FUSION);
-
/* Set hardware information. */
ws->info.gart_size = gtt.heap_size;
ws->info.vram_size = vram.heap_size;
- ws->info.vram_vis_size = vram_vis.heap_size;
- /* The kernel can split large buffers in VRAM but not in GTT, so large
- * allocations can fail or cause buffer movement failures in the kernel.
- */
- ws->info.max_alloc_size = MIN2(ws->info.vram_size * 0.9, ws->info.gart_size * 0.7);
/* convert the shader clock from KHz to MHz */
- ws->info.max_shader_clock = ws->amdinfo.max_engine_clk / 1000;
+ ws->info.max_sclk = ws->amdinfo.max_engine_clk / 1000;
ws->info.max_se = ws->amdinfo.num_shader_engines;
ws->info.max_sh_per_se = ws->amdinfo.num_shader_arrays_per_engine;
ws->info.has_uvd = uvd.available_rings != 0;
- ws->info.uvd_fw_version =
- uvd.available_rings ? uvd_version : 0;
ws->info.vce_fw_version =
vce.available_rings ? vce_version : 0;
- ws->info.has_userptr = true;
- ws->info.num_render_backends = ws->amdinfo.rb_pipes;
- ws->info.clock_crystal_freq = ws->amdinfo.gpu_counter_freq;
- ws->info.tcc_cache_line_size = 64; /* TC L2 line size on GCN */
- if (ws->info.chip_class == GFX9) {
- ws->info.num_tile_pipes = 1 << G_0098F8_NUM_PIPES(ws->amdinfo.gb_addr_cfg);
- ws->info.pipe_interleave_bytes =
- 256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX9(ws->amdinfo.gb_addr_cfg);
- } else {
- ws->info.num_tile_pipes = cik_get_num_tile_pipes(&ws->amdinfo);
- ws->info.pipe_interleave_bytes =
- 256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX6(ws->amdinfo.gb_addr_cfg);
- }
- ws->info.has_virtual_memory = true;
- ws->info.has_sdma = dma.available_rings != 0;
-
- /* Get the number of good compute units. */
- ws->info.num_good_compute_units = 0;
+ ws->info.has_userptr = TRUE;
+ ws->info.r600_num_backends = ws->amdinfo.rb_pipes;
+ ws->info.r600_clock_crystal_freq = ws->amdinfo.gpu_counter_freq;
+ ws->info.r600_tiling_config = r600_get_gb_tiling_config(&ws->amdinfo);
+ ws->info.r600_num_tile_pipes = cik_get_num_tile_pipes(&ws->amdinfo);
+ ws->info.r600_max_pipes = ws->amdinfo.max_quad_shader_pipes; /* TODO: is this correct? */
+ ws->info.r600_virtual_address = TRUE;
+ ws->info.r600_has_dma = dma.available_rings != 0;
+
+ /* Guess what the maximum compute unit number is by looking at the mask
+ * of enabled CUs.
+ */
for (i = 0; i < ws->info.max_se; i++)
- for (j = 0; j < ws->info.max_sh_per_se; j++)
- ws->info.num_good_compute_units +=
- util_bitcount(ws->amdinfo.cu_bitmap[i][j]);
+ for (j = 0; j < ws->info.max_sh_per_se; j++) {
+ unsigned max = util_last_bit(ws->amdinfo.cu_bitmap[i][j]);
+
+ if (ws->info.max_compute_units < max)
+ ws->info.max_compute_units = max;
+ }
+ ws->info.max_compute_units *= ws->info.max_se * ws->info.max_sh_per_se;
memcpy(ws->info.si_tile_mode_array, ws->amdinfo.gb_tile_mode,
sizeof(ws->amdinfo.gb_tile_mode));
- ws->info.enabled_rb_mask = ws->amdinfo.enabled_rb_pipes_mask;
+ ws->info.si_tile_mode_array_valid = TRUE;
+ ws->info.si_backend_enabled_mask = ws->amdinfo.enabled_rb_pipes_mask;
memcpy(ws->info.cik_macrotile_mode_array, ws->amdinfo.gb_macro_tile_mode,
sizeof(ws->amdinfo.gb_macro_tile_mode));
+ ws->info.cik_macrotile_mode_array_valid = TRUE;
- ws->info.gart_page_size = alignment_info.size_remote;
-
- if (ws->info.chip_class == SI)
- ws->info.gfx_ib_pad_with_type2 = TRUE;
+ ws->gart_page_size = alignment_info.size_remote;
- ws->check_vm = strstr(debug_get_option("R600_DEBUG", ""), "check_vm") != NULL;
-
- return true;
+ return TRUE;
fail:
if (ws->addrlib)
AddrDestroy(ws->addrlib);
amdgpu_device_deinitialize(ws->dev);
ws->dev = NULL;
- return false;
-}
-
-static void do_winsys_deinit(struct amdgpu_winsys *ws)
-{
- AddrDestroy(ws->addrlib);
- amdgpu_device_deinitialize(ws->dev);
+ return FALSE;
}
static void amdgpu_winsys_destroy(struct radeon_winsys *rws)
{
struct amdgpu_winsys *ws = (struct amdgpu_winsys*)rws;
- if (util_queue_is_initialized(&ws->cs_queue))
- util_queue_destroy(&ws->cs_queue);
+ pipe_mutex_destroy(ws->bo_fence_lock);
+
+ ws->cman->destroy(ws->cman);
+ ws->kman->destroy(ws->kman);
+ AddrDestroy(ws->addrlib);
- mtx_destroy(&ws->bo_fence_lock);
- pb_slabs_deinit(&ws->bo_slabs);
- pb_cache_deinit(&ws->bo_cache);
- mtx_destroy(&ws->global_bo_list_lock);
- do_winsys_deinit(ws);
+ amdgpu_device_deinitialize(ws->dev);
FREE(rws);
}
@@ -425,11 +319,11 @@ static void amdgpu_winsys_query_info(struct radeon_winsys *rws,
*info = ((struct amdgpu_winsys *)rws)->info;
}
-static bool amdgpu_cs_request_feature(struct radeon_winsys_cs *rcs,
- enum radeon_feature_id fid,
- bool enable)
+static boolean amdgpu_cs_request_feature(struct radeon_winsys_cs *rcs,
+ enum radeon_feature_id fid,
+ boolean enable)
{
- return false;
+ return FALSE;
}
static uint64_t amdgpu_query_value(struct radeon_winsys *rws,
@@ -444,63 +338,41 @@ static uint64_t amdgpu_query_value(struct radeon_winsys *rws,
return ws->allocated_vram;
case RADEON_REQUESTED_GTT_MEMORY:
return ws->allocated_gtt;
- case RADEON_MAPPED_VRAM:
- return ws->mapped_vram;
- case RADEON_MAPPED_GTT:
- return ws->mapped_gtt;
case RADEON_BUFFER_WAIT_TIME_NS:
return ws->buffer_wait_time;
- case RADEON_NUM_MAPPED_BUFFERS:
- return ws->num_mapped_buffers;
case RADEON_TIMESTAMP:
amdgpu_query_info(ws->dev, AMDGPU_INFO_TIMESTAMP, 8, &retval);
return retval;
- case RADEON_NUM_GFX_IBS:
- return ws->num_gfx_IBs;
- case RADEON_NUM_SDMA_IBS:
- return ws->num_sdma_IBs;
+ case RADEON_NUM_CS_FLUSHES:
+ return ws->num_cs_flushes;
case RADEON_NUM_BYTES_MOVED:
amdgpu_query_info(ws->dev, AMDGPU_INFO_NUM_BYTES_MOVED, 8, &retval);
return retval;
- case RADEON_NUM_EVICTIONS:
- amdgpu_query_info(ws->dev, AMDGPU_INFO_NUM_EVICTIONS, 8, &retval);
- return retval;
case RADEON_VRAM_USAGE:
amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_VRAM, 0, &heap);
return heap.heap_usage;
- case RADEON_VRAM_VIS_USAGE:
- amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, &heap);
- return heap.heap_usage;
case RADEON_GTT_USAGE:
amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_GTT, 0, &heap);
return heap.heap_usage;
case RADEON_GPU_TEMPERATURE:
- amdgpu_query_sensor_info(ws->dev, AMDGPU_INFO_SENSOR_GPU_TEMP, 4, &retval);
- return retval;
case RADEON_CURRENT_SCLK:
- amdgpu_query_sensor_info(ws->dev, AMDGPU_INFO_SENSOR_GFX_SCLK, 4, &retval);
- return retval;
case RADEON_CURRENT_MCLK:
- amdgpu_query_sensor_info(ws->dev, AMDGPU_INFO_SENSOR_GFX_MCLK, 4, &retval);
- return retval;
+ return 0;
case RADEON_GPU_RESET_COUNTER:
assert(0);
return 0;
- case RADEON_CS_THREAD_TIME:
- return util_queue_get_thread_time_nano(&ws->cs_queue, 0);
}
return 0;
}
-static bool amdgpu_read_registers(struct radeon_winsys *rws,
+static void amdgpu_read_registers(struct radeon_winsys *rws,
unsigned reg_offset,
unsigned num_registers, uint32_t *out)
{
struct amdgpu_winsys *ws = (struct amdgpu_winsys*)rws;
- return amdgpu_read_mm_registers(ws->dev, reg_offset / 4, num_registers,
- 0xffffffff, 0, out) == 0;
+ amdgpu_read_mm_registers(ws->dev, reg_offset / 4, num_registers,
+ 0xffffffff, 0, out);
}
static unsigned hash_dev(void *key)
@@ -517,9 +389,9 @@ static int compare_dev(void *key1, void *key2)
return key1 != key2;
}
-static bool amdgpu_winsys_unref(struct radeon_winsys *rws)
+static bool amdgpu_winsys_unref(struct radeon_winsys *ws)
{
- struct amdgpu_winsys *ws = (struct amdgpu_winsys*)rws;
+ struct amdgpu_winsys *rws = (struct amdgpu_winsys*)ws;
bool destroy;
/* When the reference counter drops to zero, remove the device pointer
@@ -527,13 +399,13 @@ static bool amdgpu_winsys_unref(struct radeon_winsys *rws)
* This must happen while the mutex is locked, so that
* amdgpu_winsys_create in another thread doesn't get the winsys
* from the table when the counter drops to 0. */
- mtx_lock(&dev_tab_mutex);
+ pipe_mutex_lock(dev_tab_mutex);
- destroy = pipe_reference(&ws->reference, NULL);
+ destroy = pipe_reference(&rws->reference, NULL);
if (destroy && dev_tab)
- util_hash_table_remove(dev_tab, ws->dev);
+ util_hash_table_remove(dev_tab, rws->dev);
- mtx_unlock(&dev_tab_mutex);
+ pipe_mutex_unlock(dev_tab_mutex);
return destroy;
}
@@ -553,7 +425,7 @@ amdgpu_winsys_create(int fd, radeon_screen_create_t screen_create)
drmFreeVersion(version);
/* Look up the winsys from the dev table. */
- mtx_lock(&dev_tab_mutex);
+ pipe_mutex_lock(dev_tab_mutex);
if (!dev_tab)
dev_tab = util_hash_table_create(hash_dev, compare_dev);
@@ -561,7 +433,7 @@ amdgpu_winsys_create(int fd, radeon_screen_create_t screen_create)
* for the same fd. */
r = amdgpu_device_initialize(fd, &drm_major, &drm_minor, &dev);
if (r) {
- mtx_unlock(&dev_tab_mutex);
+ pipe_mutex_unlock(dev_tab_mutex);
fprintf(stderr, "amdgpu: amdgpu_device_initialize failed.\n");
return NULL;
}
@@ -570,37 +442,32 @@ amdgpu_winsys_create(int fd, radeon_screen_create_t screen_create)
ws = util_hash_table_get(dev_tab, dev);
if (ws) {
pipe_reference(NULL, &ws->reference);
- mtx_unlock(&dev_tab_mutex);
+ pipe_mutex_unlock(dev_tab_mutex);
return &ws->base;
}
/* Create a new winsys. */
ws = CALLOC_STRUCT(amdgpu_winsys);
- if (!ws)
- goto fail;
+ if (!ws) {
+ pipe_mutex_unlock(dev_tab_mutex);
+ return NULL;
+ }
ws->dev = dev;
ws->info.drm_major = drm_major;
ws->info.drm_minor = drm_minor;
- if (!do_winsys_init(ws, fd))
- goto fail_alloc;
+ if (!do_winsys_init(ws))
+ goto fail;
/* Create managers. */
- pb_cache_init(&ws->bo_cache, 500000, ws->check_vm ? 1.0f : 2.0f, 0,
- (ws->info.vram_size + ws->info.gart_size) / 8,
- amdgpu_bo_destroy, amdgpu_bo_can_reclaim);
-
- if (!pb_slabs_init(&ws->bo_slabs,
- AMDGPU_SLAB_MIN_SIZE_LOG2, AMDGPU_SLAB_MAX_SIZE_LOG2,
- 12, /* number of heaps (domain/flags combinations) */
- ws,
- amdgpu_bo_can_reclaim_slab,
- amdgpu_bo_slab_alloc,
- amdgpu_bo_slab_free))
- goto fail_cache;
-
- ws->info.min_alloc_size = 1 << AMDGPU_SLAB_MIN_SIZE_LOG2;
+ ws->kman = amdgpu_bomgr_create(ws);
+ if (!ws->kman)
+ goto fail;
+ ws->cman = pb_cache_manager_create(ws->kman, 500000, 2.0f, 0,
+ (ws->info.vram_size + ws->info.gart_size) / 8);
+ if (!ws->cman)
+ goto fail;
/* init reference */
pipe_reference_init(&ws->reference, 1);
@@ -613,19 +480,11 @@ amdgpu_winsys_create(int fd, radeon_screen_create_t screen_create)
ws->base.query_value = amdgpu_query_value;
ws->base.read_registers = amdgpu_read_registers;
- amdgpu_bo_init_functions(ws);
+ amdgpu_bomgr_init_functions(ws);
amdgpu_cs_init_functions(ws);
amdgpu_surface_init_functions(ws);
- LIST_INITHEAD(&ws->global_bo_list);
- (void) mtx_init(&ws->global_bo_list_lock, mtx_plain);
- (void) mtx_init(&ws->bo_fence_lock, mtx_plain);
-
- if (!util_queue_init(&ws->cs_queue, "amdgpu_cs", 8, 1)) {
- amdgpu_winsys_destroy(&ws->base);
- mtx_unlock(&dev_tab_mutex);
- return NULL;
- }
+ pipe_mutex_init(ws->bo_fence_lock);
/* Create the screen at the end. The winsys must be initialized
* completely.
@@ -635,7 +494,7 @@ amdgpu_winsys_create(int fd, radeon_screen_create_t screen_create)
ws->base.screen = screen_create(&ws->base);
if (!ws->base.screen) {
amdgpu_winsys_destroy(&ws->base);
- mtx_unlock(&dev_tab_mutex);
+ pipe_mutex_unlock(dev_tab_mutex);
return NULL;
}
@@ -644,16 +503,16 @@ amdgpu_winsys_create(int fd, radeon_screen_create_t screen_create)
/* We must unlock the mutex once the winsys is fully initialized, so that
* other threads attempting to create the winsys from the same fd will
* get a fully initialized winsys and not just half-way initialized. */
- mtx_unlock(&dev_tab_mutex);
+ pipe_mutex_unlock(dev_tab_mutex);
return &ws->base;
-fail_cache:
- pb_cache_deinit(&ws->bo_cache);
- do_winsys_deinit(ws);
-fail_alloc:
- FREE(ws);
fail:
- mtx_unlock(&dev_tab_mutex);
+ pipe_mutex_unlock(dev_tab_mutex);
+ if (ws->cman)
+ ws->cman->destroy(ws->cman);
+ if (ws->kman)
+ ws->kman->destroy(ws->kman);
+ FREE(ws);
return NULL;
}
diff --git a/lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.h b/lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.h
index a5154ffe7..4d07644c9 100644
--- a/lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.h
+++ b/lib/mesa/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.h
@@ -32,56 +32,38 @@
#ifndef AMDGPU_WINSYS_H
#define AMDGPU_WINSYS_H
-#include "pipebuffer/pb_cache.h"
-#include "pipebuffer/pb_slab.h"
#include "gallium/drivers/radeon/radeon_winsys.h"
#include "addrlib/addrinterface.h"
-#include "util/u_queue.h"
+#include "os/os_thread.h"
#include <amdgpu.h>
struct amdgpu_cs;
-#define AMDGPU_SLAB_MIN_SIZE_LOG2 9
-#define AMDGPU_SLAB_MAX_SIZE_LOG2 14
-
struct amdgpu_winsys {
struct radeon_winsys base;
struct pipe_reference reference;
- struct pb_cache bo_cache;
- struct pb_slabs bo_slabs;
amdgpu_device_handle dev;
- mtx_t bo_fence_lock;
+ pipe_mutex bo_fence_lock;
int num_cs; /* The number of command streams created. */
- unsigned num_total_rejected_cs;
uint32_t next_bo_unique_id;
uint64_t allocated_vram;
uint64_t allocated_gtt;
- uint64_t mapped_vram;
- uint64_t mapped_gtt;
uint64_t buffer_wait_time; /* time spent in buffer_wait in ns */
- uint64_t num_gfx_IBs;
- uint64_t num_sdma_IBs;
- uint64_t num_mapped_buffers;
+ uint64_t num_cs_flushes;
+ unsigned gart_page_size;
struct radeon_info info;
- /* multithreaded IB submission */
- struct util_queue cs_queue;
+ struct pb_manager *kman;
+ struct pb_manager *cman;
struct amdgpu_gpu_info amdinfo;
ADDR_HANDLE addrlib;
uint32_t rev_id;
unsigned family;
-
- bool check_vm;
-
- /* List of all allocated buffers */
- mtx_t global_bo_list_lock;
- struct list_head global_bo_list;
- unsigned num_buffers;
};
static inline struct amdgpu_winsys *