summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorJonathan Gray <jsg@cvs.openbsd.org>2019-01-29 10:59:58 +0000
committerJonathan Gray <jsg@cvs.openbsd.org>2019-01-29 10:59:58 +0000
commit6977d012bd2b03e783c592092d1164c5afa3f68d (patch)
tree8a601d7c22ea6f247a8033142b00de35c0d39ad7 /lib
parent2f2dc1d8cfd26f13fd68a3a92c5b2f71ef661bc1 (diff)
Import Mesa 18.3.2
Diffstat (limited to 'lib')
-rw-r--r--lib/mesa/src/gallium/drivers/radeonsi/si_dma_cs.c173
-rw-r--r--lib/mesa/src/gallium/drivers/radeonsi/si_test_dma.c15
2 files changed, 46 insertions, 142 deletions
diff --git a/lib/mesa/src/gallium/drivers/radeonsi/si_dma_cs.c b/lib/mesa/src/gallium/drivers/radeonsi/si_dma_cs.c
index c58b2b103..ffa2f5ae6 100644
--- a/lib/mesa/src/gallium/drivers/radeonsi/si_dma_cs.c
+++ b/lib/mesa/src/gallium/drivers/radeonsi/si_dma_cs.c
@@ -27,22 +27,22 @@
static void si_dma_emit_wait_idle(struct si_context *sctx)
{
- struct radeon_cmdbuf *cs = sctx->sdma_cs;
+ struct radeon_cmdbuf *cs = sctx->dma_cs;
/* NOP waits for idle. */
- if (sctx->chip_class >= GFX7)
+ if (sctx->chip_class >= CIK)
radeon_emit(cs, 0x00000000); /* NOP */
else
radeon_emit(cs, 0xf0000000); /* NOP */
}
-void si_dma_emit_timestamp(struct si_context *sctx, struct si_resource *dst,
+void si_dma_emit_timestamp(struct si_context *sctx, struct r600_resource *dst,
uint64_t offset)
{
- struct radeon_cmdbuf *cs = sctx->sdma_cs;
+ struct radeon_cmdbuf *cs = sctx->dma_cs;
uint64_t va = dst->gpu_address + offset;
- if (sctx->chip_class == GFX6) {
+ if (sctx->chip_class == SI) {
unreachable("SI DMA doesn't support the timestamp packet.");
return;
}
@@ -50,7 +50,7 @@ void si_dma_emit_timestamp(struct si_context *sctx, struct si_resource *dst,
/* Mark the buffer range of destination as valid (initialized),
* so that transfer_map knows it should wait for the GPU when mapping
* that range. */
- util_range_add(&dst->b.b, &dst->valid_buffer_range, offset, offset + 8);
+ util_range_add(&dst->valid_buffer_range, offset, offset + 8);
assert(va % 8 == 0);
@@ -67,16 +67,15 @@ void si_dma_emit_timestamp(struct si_context *sctx, struct si_resource *dst,
void si_sdma_clear_buffer(struct si_context *sctx, struct pipe_resource *dst,
uint64_t offset, uint64_t size, unsigned clear_value)
{
- struct radeon_cmdbuf *cs = sctx->sdma_cs;
+ struct radeon_cmdbuf *cs = sctx->dma_cs;
unsigned i, ncopy, csize;
- struct si_resource *sdst = si_resource(dst);
+ struct r600_resource *rdst = r600_resource(dst);
assert(offset % 4 == 0);
assert(size);
assert(size % 4 == 0);
- if (!cs || dst->flags & PIPE_RESOURCE_FLAG_SPARSE ||
- sctx->screen->debug_flags & DBG(NO_SDMA_CLEARS)) {
+ if (!cs || dst->flags & PIPE_RESOURCE_FLAG_SPARSE) {
sctx->b.clear_buffer(&sctx->b, dst, offset, size, &clear_value, 4);
return;
}
@@ -84,14 +83,14 @@ void si_sdma_clear_buffer(struct si_context *sctx, struct pipe_resource *dst,
/* Mark the buffer range of destination as valid (initialized),
* so that transfer_map knows it should wait for the GPU when mapping
* that range. */
- util_range_add(dst, &sdst->valid_buffer_range, offset, offset + size);
+ util_range_add(&rdst->valid_buffer_range, offset, offset + size);
- offset += sdst->gpu_address;
+ offset += rdst->gpu_address;
- if (sctx->chip_class == GFX6) {
+ if (sctx->chip_class == SI) {
/* the same maximum size as for copying */
ncopy = DIV_ROUND_UP(size, SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE);
- si_need_dma_space(sctx, ncopy * 4, sdst, NULL);
+ si_need_dma_space(sctx, ncopy * 4, rdst, NULL);
for (i = 0; i < ncopy; i++) {
csize = MIN2(size, SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE);
@@ -106,10 +105,10 @@ void si_sdma_clear_buffer(struct si_context *sctx, struct pipe_resource *dst,
return;
}
- /* The following code is for Sea Islands and later. */
+ /* The following code is for CI, VI, Vega/Raven, etc. */
/* the same maximum size as for copying */
ncopy = DIV_ROUND_UP(size, CIK_SDMA_COPY_MAX_SIZE);
- si_need_dma_space(sctx, ncopy * 5, sdst, NULL);
+ si_need_dma_space(sctx, ncopy * 5, rdst, NULL);
for (i = 0; i < ncopy; i++) {
csize = MIN2(size, CIK_SDMA_COPY_MAX_SIZE);
@@ -118,108 +117,17 @@ void si_sdma_clear_buffer(struct si_context *sctx, struct pipe_resource *dst,
radeon_emit(cs, offset);
radeon_emit(cs, offset >> 32);
radeon_emit(cs, clear_value);
- /* dw count */
- radeon_emit(cs, (sctx->chip_class >= GFX9 ? csize - 1 : csize) & 0xfffffffc);
- offset += csize;
- size -= csize;
- }
-}
-
-void si_sdma_copy_buffer(struct si_context *sctx, struct pipe_resource *dst,
- struct pipe_resource *src, uint64_t dst_offset,
- uint64_t src_offset, uint64_t size)
-{
- struct radeon_cmdbuf *cs = sctx->sdma_cs;
- unsigned i, ncopy, csize;
- struct si_resource *sdst = si_resource(dst);
- struct si_resource *ssrc = si_resource(src);
-
- if (!cs ||
- dst->flags & PIPE_RESOURCE_FLAG_SPARSE ||
- src->flags & PIPE_RESOURCE_FLAG_SPARSE) {
- si_copy_buffer(sctx, dst, src, dst_offset, src_offset, size);
- return;
- }
-
- /* Mark the buffer range of destination as valid (initialized),
- * so that transfer_map knows it should wait for the GPU when mapping
- * that range. */
- util_range_add(dst, &sdst->valid_buffer_range, dst_offset,
- dst_offset + size);
-
- dst_offset += sdst->gpu_address;
- src_offset += ssrc->gpu_address;
-
- if (sctx->chip_class == GFX6) {
- unsigned max_size, sub_cmd, shift;
-
- /* see whether we should use the dword-aligned or byte-aligned copy */
- if (!(dst_offset % 4) && !(src_offset % 4) && !(size % 4)) {
- sub_cmd = SI_DMA_COPY_DWORD_ALIGNED;
- shift = 2;
- max_size = SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE;
- } else {
- sub_cmd = SI_DMA_COPY_BYTE_ALIGNED;
- shift = 0;
- max_size = SI_DMA_COPY_MAX_BYTE_ALIGNED_SIZE;
- }
-
- ncopy = DIV_ROUND_UP(size, max_size);
- si_need_dma_space(sctx, ncopy * 5, sdst, ssrc);
-
- for (i = 0; i < ncopy; i++) {
- csize = MIN2(size, max_size);
- radeon_emit(cs, SI_DMA_PACKET(SI_DMA_PACKET_COPY, sub_cmd,
- csize >> shift));
- radeon_emit(cs, dst_offset);
- radeon_emit(cs, src_offset);
- radeon_emit(cs, (dst_offset >> 32UL) & 0xff);
- radeon_emit(cs, (src_offset >> 32UL) & 0xff);
- dst_offset += csize;
- src_offset += csize;
- size -= csize;
- }
- return;
- }
-
- /* The following code is for CI and later. */
- unsigned align = ~0u;
- ncopy = DIV_ROUND_UP(size, CIK_SDMA_COPY_MAX_SIZE);
-
- /* Align copy size to dw if src/dst address are dw aligned */
- if ((src_offset & 0x3) == 0 &&
- (dst_offset & 0x3) == 0 &&
- size > 4 &&
- (size & 3) != 0) {
- align = ~0x3u;
- ncopy++;
- }
-
- si_need_dma_space(sctx, ncopy * 7, sdst, ssrc);
-
- for (i = 0; i < ncopy; i++) {
- csize = size >= 4 ? MIN2(size & align, CIK_SDMA_COPY_MAX_SIZE) : size;
- radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
- CIK_SDMA_COPY_SUB_OPCODE_LINEAR,
- 0));
radeon_emit(cs, sctx->chip_class >= GFX9 ? csize - 1 : csize);
- radeon_emit(cs, 0); /* src/dst endian swap */
- radeon_emit(cs, src_offset);
- radeon_emit(cs, src_offset >> 32);
- radeon_emit(cs, dst_offset);
- radeon_emit(cs, dst_offset >> 32);
- dst_offset += csize;
- src_offset += csize;
+ offset += csize;
size -= csize;
}
}
void si_need_dma_space(struct si_context *ctx, unsigned num_dw,
- struct si_resource *dst, struct si_resource *src)
+ struct r600_resource *dst, struct r600_resource *src)
{
- struct radeon_winsys *ws = ctx->ws;
- uint64_t vram = ctx->sdma_cs->used_vram;
- uint64_t gtt = ctx->sdma_cs->used_gart;
+ uint64_t vram = ctx->dma_cs->used_vram;
+ uint64_t gtt = ctx->dma_cs->used_gart;
if (dst) {
vram += dst->vram_usage;
@@ -231,14 +139,13 @@ void si_need_dma_space(struct si_context *ctx, unsigned num_dw,
}
/* Flush the GFX IB if DMA depends on it. */
- if (!ctx->sdma_uploads_in_progress &&
- radeon_emitted(ctx->gfx_cs, ctx->initial_gfx_cs_size) &&
+ if (radeon_emitted(ctx->gfx_cs, ctx->initial_gfx_cs_size) &&
((dst &&
- ws->cs_is_buffer_referenced(ctx->gfx_cs, dst->buf,
- RADEON_USAGE_READWRITE)) ||
+ ctx->ws->cs_is_buffer_referenced(ctx->gfx_cs, dst->buf,
+ RADEON_USAGE_READWRITE)) ||
(src &&
- ws->cs_is_buffer_referenced(ctx->gfx_cs, src->buf,
- RADEON_USAGE_WRITE))))
+ ctx->ws->cs_is_buffer_referenced(ctx->gfx_cs, src->buf,
+ RADEON_USAGE_WRITE))))
si_flush_gfx_cs(ctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
/* Flush if there's not enough space, or if the memory usage per IB
@@ -254,33 +161,31 @@ void si_need_dma_space(struct si_context *ctx, unsigned num_dw,
* engine busy while uploads are being submitted.
*/
num_dw++; /* for emit_wait_idle below */
- if (!ctx->sdma_uploads_in_progress &&
- (!ws->cs_check_space(ctx->sdma_cs, num_dw, false) ||
- ctx->sdma_cs->used_vram + ctx->sdma_cs->used_gart > 64 * 1024 * 1024 ||
- !radeon_cs_memory_below_limit(ctx->screen, ctx->sdma_cs, vram, gtt))) {
+ if (!ctx->ws->cs_check_space(ctx->dma_cs, num_dw) ||
+ ctx->dma_cs->used_vram + ctx->dma_cs->used_gart > 64 * 1024 * 1024 ||
+ !radeon_cs_memory_below_limit(ctx->screen, ctx->dma_cs, vram, gtt)) {
si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
- assert((num_dw + ctx->sdma_cs->current.cdw) <= ctx->sdma_cs->current.max_dw);
+ assert((num_dw + ctx->dma_cs->current.cdw) <= ctx->dma_cs->current.max_dw);
}
/* Wait for idle if either buffer has been used in the IB before to
* prevent read-after-write hazards.
*/
if ((dst &&
- ws->cs_is_buffer_referenced(ctx->sdma_cs, dst->buf,
- RADEON_USAGE_READWRITE)) ||
+ ctx->ws->cs_is_buffer_referenced(ctx->dma_cs, dst->buf,
+ RADEON_USAGE_READWRITE)) ||
(src &&
- ws->cs_is_buffer_referenced(ctx->sdma_cs, src->buf,
- RADEON_USAGE_WRITE)))
+ ctx->ws->cs_is_buffer_referenced(ctx->dma_cs, src->buf,
+ RADEON_USAGE_WRITE)))
si_dma_emit_wait_idle(ctx);
- unsigned sync = ctx->sdma_uploads_in_progress ? 0 : RADEON_USAGE_SYNCHRONIZED;
if (dst) {
- ws->cs_add_buffer(ctx->sdma_cs, dst->buf, RADEON_USAGE_WRITE | sync,
- dst->domains, 0);
+ radeon_add_to_buffer_list(ctx, ctx->dma_cs, dst,
+ RADEON_USAGE_WRITE, 0);
}
if (src) {
- ws->cs_add_buffer(ctx->sdma_cs, src->buf, RADEON_USAGE_READ | sync,
- src->domains, 0);
+ radeon_add_to_buffer_list(ctx, ctx->dma_cs, src,
+ RADEON_USAGE_READ, 0);
}
/* this function is called before all DMA calls, so increment this. */
@@ -290,7 +195,7 @@ void si_need_dma_space(struct si_context *ctx, unsigned num_dw,
void si_flush_dma_cs(struct si_context *ctx, unsigned flags,
struct pipe_fence_handle **fence)
{
- struct radeon_cmdbuf *cs = ctx->sdma_cs;
+ struct radeon_cmdbuf *cs = ctx->dma_cs;
struct radeon_saved_cs saved;
bool check_vm = (ctx->screen->debug_flags & DBG(CHECK_VM)) != 0;
@@ -323,8 +228,8 @@ void si_screen_clear_buffer(struct si_screen *sscreen, struct pipe_resource *dst
{
struct si_context *ctx = (struct si_context*)sscreen->aux_context;
- simple_mtx_lock(&sscreen->aux_context_lock);
+ mtx_lock(&sscreen->aux_context_lock);
si_sdma_clear_buffer(ctx, dst, offset, size, value);
sscreen->aux_context->flush(sscreen->aux_context, NULL, 0);
- simple_mtx_unlock(&sscreen->aux_context_lock);
+ mtx_unlock(&sscreen->aux_context_lock);
}
diff --git a/lib/mesa/src/gallium/drivers/radeonsi/si_test_dma.c b/lib/mesa/src/gallium/drivers/radeonsi/si_test_dma.c
index b3ab2750f..90a2032cd 100644
--- a/lib/mesa/src/gallium/drivers/radeonsi/si_test_dma.c
+++ b/lib/mesa/src/gallium/drivers/radeonsi/si_test_dma.c
@@ -192,10 +192,11 @@ void si_test_dma(struct si_screen *sscreen)
struct pipe_context *ctx = screen->context_create(screen, NULL, 0);
struct si_context *sctx = (struct si_context*)ctx;
uint64_t max_alloc_size;
- unsigned i, iterations, num_partial_copies, max_tex_side;
+ unsigned i, iterations, num_partial_copies, max_levels, max_tex_side;
unsigned num_pass = 0, num_fail = 0;
- max_tex_side = screen->get_param(screen, PIPE_CAP_MAX_TEXTURE_2D_SIZE);
+ max_levels = screen->get_param(screen, PIPE_CAP_MAX_TEXTURE_2D_LEVELS);
+ max_tex_side = 1 << (max_levels - 1);
/* Max 128 MB allowed for both textures. */
max_alloc_size = 128 * 1024 * 1024;
@@ -223,7 +224,7 @@ void si_test_dma(struct si_screen *sscreen)
struct si_texture *ssrc;
struct cpu_texture src_cpu, dst_cpu;
unsigned bpp, max_width, max_height, max_depth, j, num;
- unsigned gfx_blits = 0, dma_blits = 0, cs_blits = 0, max_tex_side_gen;
+ unsigned gfx_blits = 0, dma_blits = 0, max_tex_side_gen;
unsigned max_tex_layers;
bool pass;
bool do_partial_copies = rand() & 1;
@@ -308,7 +309,7 @@ void si_test_dma(struct si_screen *sscreen)
/* clear dst pixels */
uint32_t zero = 0;
si_clear_buffer(sctx, dst, 0, sdst->surface.surf_size, &zero, 4,
- SI_COHERENCY_SHADER, false);
+ SI_COHERENCY_SHADER);
memset(dst_cpu.ptr, 0, dst_cpu.layer_stride * tdst.array_size);
/* preparation */
@@ -323,7 +324,6 @@ void si_test_dma(struct si_screen *sscreen)
struct pipe_box box;
unsigned old_num_draw_calls = sctx->num_draw_calls;
unsigned old_num_dma_calls = sctx->num_dma_calls;
- unsigned old_num_cs_calls = sctx->num_compute_calls;
if (!do_partial_copies) {
/* copy whole src to dst */
@@ -383,7 +383,6 @@ void si_test_dma(struct si_screen *sscreen)
/* See which engine was used. */
gfx_blits += sctx->num_draw_calls > old_num_draw_calls;
dma_blits += sctx->num_dma_calls > old_num_dma_calls;
- cs_blits += sctx->num_compute_calls > old_num_cs_calls;
/* CPU copy */
util_copy_box(dst_cpu.ptr, tdst.format, dst_cpu.stride,
@@ -400,8 +399,8 @@ void si_test_dma(struct si_screen *sscreen)
else
num_fail++;
- printf("BLITs: GFX = %2u, DMA = %2u, CS = %2u, %s [%u/%u]\n",
- gfx_blits, dma_blits, cs_blits, pass ? "pass" : "fail",
+ printf("BLITs: GFX = %2u, DMA = %2u, %s [%u/%u]\n",
+ gfx_blits, dma_blits, pass ? "pass" : "fail",
num_pass, num_pass+num_fail);
/* cleanup */