summaryrefslogtreecommitdiff
path: root/lib/mesa/src/gallium/drivers/radeonsi/si_dma_cs.c
diff options
context:
space:
mode:
authorJonathan Gray <jsg@cvs.openbsd.org>2020-09-22 01:35:23 +0000
committerJonathan Gray <jsg@cvs.openbsd.org>2020-09-22 01:35:23 +0000
commit27e7bb02bd0f89f96d9e3b402b46c2c97ee4defe (patch)
tree274247cd9317896d1f4f7b12d469b85dbbae9205 /lib/mesa/src/gallium/drivers/radeonsi/si_dma_cs.c
parent294c26e88dc461cbb485be73fd6624ec386b4f0b (diff)
Import Mesa 20.0.8
Diffstat (limited to 'lib/mesa/src/gallium/drivers/radeonsi/si_dma_cs.c')
-rw-r--r--lib/mesa/src/gallium/drivers/radeonsi/si_dma_cs.c523
1 files changed, 274 insertions, 249 deletions
diff --git a/lib/mesa/src/gallium/drivers/radeonsi/si_dma_cs.c b/lib/mesa/src/gallium/drivers/radeonsi/si_dma_cs.c
index 673c3310a..c58b2b103 100644
--- a/lib/mesa/src/gallium/drivers/radeonsi/si_dma_cs.c
+++ b/lib/mesa/src/gallium/drivers/radeonsi/si_dma_cs.c
@@ -27,279 +27,304 @@
static void si_dma_emit_wait_idle(struct si_context *sctx)
{
- struct radeon_cmdbuf *cs = sctx->sdma_cs;
+ struct radeon_cmdbuf *cs = sctx->sdma_cs;
- /* NOP waits for idle. */
- if (sctx->chip_class >= GFX7)
- radeon_emit(cs, 0x00000000); /* NOP */
- else
- radeon_emit(cs, 0xf0000000); /* NOP */
+ /* NOP waits for idle. */
+ if (sctx->chip_class >= GFX7)
+ radeon_emit(cs, 0x00000000); /* NOP */
+ else
+ radeon_emit(cs, 0xf0000000); /* NOP */
}
-void si_dma_emit_timestamp(struct si_context *sctx, struct si_resource *dst, uint64_t offset)
+void si_dma_emit_timestamp(struct si_context *sctx, struct si_resource *dst,
+ uint64_t offset)
{
- struct radeon_cmdbuf *cs = sctx->sdma_cs;
- uint64_t va = dst->gpu_address + offset;
+ struct radeon_cmdbuf *cs = sctx->sdma_cs;
+ uint64_t va = dst->gpu_address + offset;
- if (sctx->chip_class == GFX6) {
- unreachable("SI DMA doesn't support the timestamp packet.");
- return;
- }
+ if (sctx->chip_class == GFX6) {
+ unreachable("SI DMA doesn't support the timestamp packet.");
+ return;
+ }
- /* Mark the buffer range of destination as valid (initialized),
- * so that transfer_map knows it should wait for the GPU when mapping
- * that range. */
- util_range_add(&dst->b.b, &dst->valid_buffer_range, offset, offset + 8);
+ /* Mark the buffer range of destination as valid (initialized),
+ * so that transfer_map knows it should wait for the GPU when mapping
+ * that range. */
+ util_range_add(&dst->b.b, &dst->valid_buffer_range, offset, offset + 8);
- assert(va % 8 == 0);
+ assert(va % 8 == 0);
- si_need_dma_space(sctx, 4, dst, NULL);
- si_dma_emit_wait_idle(sctx);
+ si_need_dma_space(sctx, 4, dst, NULL);
+ si_dma_emit_wait_idle(sctx);
- radeon_emit(
- cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_TIMESTAMP, SDMA_TS_SUB_OPCODE_GET_GLOBAL_TIMESTAMP, 0));
- radeon_emit(cs, va);
- radeon_emit(cs, va >> 32);
+ radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_TIMESTAMP,
+ SDMA_TS_SUB_OPCODE_GET_GLOBAL_TIMESTAMP,
+ 0));
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
}
-void si_sdma_clear_buffer(struct si_context *sctx, struct pipe_resource *dst, uint64_t offset,
- uint64_t size, unsigned clear_value)
+void si_sdma_clear_buffer(struct si_context *sctx, struct pipe_resource *dst,
+ uint64_t offset, uint64_t size, unsigned clear_value)
{
- struct radeon_cmdbuf *cs = sctx->sdma_cs;
- unsigned i, ncopy, csize;
- struct si_resource *sdst = si_resource(dst);
-
- assert(offset % 4 == 0);
- assert(size);
- assert(size % 4 == 0);
-
- if (!cs || dst->flags & PIPE_RESOURCE_FLAG_SPARSE ||
- sctx->screen->debug_flags & DBG(NO_SDMA_CLEARS)) {
- sctx->b.clear_buffer(&sctx->b, dst, offset, size, &clear_value, 4);
- return;
- }
-
- /* Mark the buffer range of destination as valid (initialized),
- * so that transfer_map knows it should wait for the GPU when mapping
- * that range. */
- util_range_add(dst, &sdst->valid_buffer_range, offset, offset + size);
-
- offset += sdst->gpu_address;
-
- if (sctx->chip_class == GFX6) {
- /* the same maximum size as for copying */
- ncopy = DIV_ROUND_UP(size, SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE);
- si_need_dma_space(sctx, ncopy * 4, sdst, NULL);
-
- for (i = 0; i < ncopy; i++) {
- csize = MIN2(size, SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE);
- radeon_emit(cs, SI_DMA_PACKET(SI_DMA_PACKET_CONSTANT_FILL, 0, csize / 4));
- radeon_emit(cs, offset);
- radeon_emit(cs, clear_value);
- radeon_emit(cs, (offset >> 32) << 16);
- offset += csize;
- size -= csize;
- }
- return;
- }
-
- /* The following code is for Sea Islands and later. */
- /* the same maximum size as for copying */
- ncopy = DIV_ROUND_UP(size, CIK_SDMA_COPY_MAX_SIZE);
- si_need_dma_space(sctx, ncopy * 5, sdst, NULL);
-
- for (i = 0; i < ncopy; i++) {
- csize = MIN2(size, CIK_SDMA_COPY_MAX_SIZE);
- radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_PACKET_CONSTANT_FILL, 0, 0x8000 /* dword copy */));
- radeon_emit(cs, offset);
- radeon_emit(cs, offset >> 32);
- radeon_emit(cs, clear_value);
- /* dw count */
- radeon_emit(cs, (sctx->chip_class >= GFX9 ? csize - 1 : csize) & 0xfffffffc);
- offset += csize;
- size -= csize;
- }
+ struct radeon_cmdbuf *cs = sctx->sdma_cs;
+ unsigned i, ncopy, csize;
+ struct si_resource *sdst = si_resource(dst);
+
+ assert(offset % 4 == 0);
+ assert(size);
+ assert(size % 4 == 0);
+
+ if (!cs || dst->flags & PIPE_RESOURCE_FLAG_SPARSE ||
+ sctx->screen->debug_flags & DBG(NO_SDMA_CLEARS)) {
+ sctx->b.clear_buffer(&sctx->b, dst, offset, size, &clear_value, 4);
+ return;
+ }
+
+ /* Mark the buffer range of destination as valid (initialized),
+ * so that transfer_map knows it should wait for the GPU when mapping
+ * that range. */
+ util_range_add(dst, &sdst->valid_buffer_range, offset, offset + size);
+
+ offset += sdst->gpu_address;
+
+ if (sctx->chip_class == GFX6) {
+ /* the same maximum size as for copying */
+ ncopy = DIV_ROUND_UP(size, SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE);
+ si_need_dma_space(sctx, ncopy * 4, sdst, NULL);
+
+ for (i = 0; i < ncopy; i++) {
+ csize = MIN2(size, SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE);
+ radeon_emit(cs, SI_DMA_PACKET(SI_DMA_PACKET_CONSTANT_FILL, 0,
+ csize / 4));
+ radeon_emit(cs, offset);
+ radeon_emit(cs, clear_value);
+ radeon_emit(cs, (offset >> 32) << 16);
+ offset += csize;
+ size -= csize;
+ }
+ return;
+ }
+
+ /* The following code is for Sea Islands and later. */
+ /* the same maximum size as for copying */
+ ncopy = DIV_ROUND_UP(size, CIK_SDMA_COPY_MAX_SIZE);
+ si_need_dma_space(sctx, ncopy * 5, sdst, NULL);
+
+ for (i = 0; i < ncopy; i++) {
+ csize = MIN2(size, CIK_SDMA_COPY_MAX_SIZE);
+ radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_PACKET_CONSTANT_FILL, 0,
+ 0x8000 /* dword copy */));
+ radeon_emit(cs, offset);
+ radeon_emit(cs, offset >> 32);
+ radeon_emit(cs, clear_value);
+ /* dw count */
+ radeon_emit(cs, (sctx->chip_class >= GFX9 ? csize - 1 : csize) & 0xfffffffc);
+ offset += csize;
+ size -= csize;
+ }
}
void si_sdma_copy_buffer(struct si_context *sctx, struct pipe_resource *dst,
- struct pipe_resource *src, uint64_t dst_offset, uint64_t src_offset,
- uint64_t size)
+ struct pipe_resource *src, uint64_t dst_offset,
+ uint64_t src_offset, uint64_t size)
{
- struct radeon_cmdbuf *cs = sctx->sdma_cs;
- unsigned i, ncopy, csize;
- struct si_resource *sdst = si_resource(dst);
- struct si_resource *ssrc = si_resource(src);
-
- if (!cs || dst->flags & PIPE_RESOURCE_FLAG_SPARSE || src->flags & PIPE_RESOURCE_FLAG_SPARSE) {
- si_copy_buffer(sctx, dst, src, dst_offset, src_offset, size);
- return;
- }
-
- /* Mark the buffer range of destination as valid (initialized),
- * so that transfer_map knows it should wait for the GPU when mapping
- * that range. */
- util_range_add(dst, &sdst->valid_buffer_range, dst_offset, dst_offset + size);
-
- dst_offset += sdst->gpu_address;
- src_offset += ssrc->gpu_address;
-
- if (sctx->chip_class == GFX6) {
- unsigned max_size, sub_cmd, shift;
-
- /* see whether we should use the dword-aligned or byte-aligned copy */
- if (!(dst_offset % 4) && !(src_offset % 4) && !(size % 4)) {
- sub_cmd = SI_DMA_COPY_DWORD_ALIGNED;
- shift = 2;
- max_size = SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE;
- } else {
- sub_cmd = SI_DMA_COPY_BYTE_ALIGNED;
- shift = 0;
- max_size = SI_DMA_COPY_MAX_BYTE_ALIGNED_SIZE;
- }
-
- ncopy = DIV_ROUND_UP(size, max_size);
- si_need_dma_space(sctx, ncopy * 5, sdst, ssrc);
-
- for (i = 0; i < ncopy; i++) {
- csize = MIN2(size, max_size);
- radeon_emit(cs, SI_DMA_PACKET(SI_DMA_PACKET_COPY, sub_cmd, csize >> shift));
- radeon_emit(cs, dst_offset);
- radeon_emit(cs, src_offset);
- radeon_emit(cs, (dst_offset >> 32UL) & 0xff);
- radeon_emit(cs, (src_offset >> 32UL) & 0xff);
- dst_offset += csize;
- src_offset += csize;
- size -= csize;
- }
- return;
- }
-
- /* The following code is for CI and later. */
- unsigned align = ~0u;
- ncopy = DIV_ROUND_UP(size, CIK_SDMA_COPY_MAX_SIZE);
-
- /* Align copy size to dw if src/dst address are dw aligned */
- if ((src_offset & 0x3) == 0 && (dst_offset & 0x3) == 0 && size > 4 && (size & 3) != 0) {
- align = ~0x3u;
- ncopy++;
- }
-
- si_need_dma_space(sctx, ncopy * 7, sdst, ssrc);
-
- for (i = 0; i < ncopy; i++) {
- csize = size >= 4 ? MIN2(size & align, CIK_SDMA_COPY_MAX_SIZE) : size;
- radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY, CIK_SDMA_COPY_SUB_OPCODE_LINEAR, 0));
- radeon_emit(cs, sctx->chip_class >= GFX9 ? csize - 1 : csize);
- radeon_emit(cs, 0); /* src/dst endian swap */
- radeon_emit(cs, src_offset);
- radeon_emit(cs, src_offset >> 32);
- radeon_emit(cs, dst_offset);
- radeon_emit(cs, dst_offset >> 32);
- dst_offset += csize;
- src_offset += csize;
- size -= csize;
- }
+ struct radeon_cmdbuf *cs = sctx->sdma_cs;
+ unsigned i, ncopy, csize;
+ struct si_resource *sdst = si_resource(dst);
+ struct si_resource *ssrc = si_resource(src);
+
+ if (!cs ||
+ dst->flags & PIPE_RESOURCE_FLAG_SPARSE ||
+ src->flags & PIPE_RESOURCE_FLAG_SPARSE) {
+ si_copy_buffer(sctx, dst, src, dst_offset, src_offset, size);
+ return;
+ }
+
+ /* Mark the buffer range of destination as valid (initialized),
+ * so that transfer_map knows it should wait for the GPU when mapping
+ * that range. */
+ util_range_add(dst, &sdst->valid_buffer_range, dst_offset,
+ dst_offset + size);
+
+ dst_offset += sdst->gpu_address;
+ src_offset += ssrc->gpu_address;
+
+ if (sctx->chip_class == GFX6) {
+ unsigned max_size, sub_cmd, shift;
+
+ /* see whether we should use the dword-aligned or byte-aligned copy */
+ if (!(dst_offset % 4) && !(src_offset % 4) && !(size % 4)) {
+ sub_cmd = SI_DMA_COPY_DWORD_ALIGNED;
+ shift = 2;
+ max_size = SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE;
+ } else {
+ sub_cmd = SI_DMA_COPY_BYTE_ALIGNED;
+ shift = 0;
+ max_size = SI_DMA_COPY_MAX_BYTE_ALIGNED_SIZE;
+ }
+
+ ncopy = DIV_ROUND_UP(size, max_size);
+ si_need_dma_space(sctx, ncopy * 5, sdst, ssrc);
+
+ for (i = 0; i < ncopy; i++) {
+ csize = MIN2(size, max_size);
+ radeon_emit(cs, SI_DMA_PACKET(SI_DMA_PACKET_COPY, sub_cmd,
+ csize >> shift));
+ radeon_emit(cs, dst_offset);
+ radeon_emit(cs, src_offset);
+ radeon_emit(cs, (dst_offset >> 32UL) & 0xff);
+ radeon_emit(cs, (src_offset >> 32UL) & 0xff);
+ dst_offset += csize;
+ src_offset += csize;
+ size -= csize;
+ }
+ return;
+ }
+
+ /* The following code is for CI and later. */
+ unsigned align = ~0u;
+ ncopy = DIV_ROUND_UP(size, CIK_SDMA_COPY_MAX_SIZE);
+
+ /* Align copy size to dw if src/dst address are dw aligned */
+ if ((src_offset & 0x3) == 0 &&
+ (dst_offset & 0x3) == 0 &&
+ size > 4 &&
+ (size & 3) != 0) {
+ align = ~0x3u;
+ ncopy++;
+ }
+
+ si_need_dma_space(sctx, ncopy * 7, sdst, ssrc);
+
+ for (i = 0; i < ncopy; i++) {
+ csize = size >= 4 ? MIN2(size & align, CIK_SDMA_COPY_MAX_SIZE) : size;
+ radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
+ CIK_SDMA_COPY_SUB_OPCODE_LINEAR,
+ 0));
+ radeon_emit(cs, sctx->chip_class >= GFX9 ? csize - 1 : csize);
+ radeon_emit(cs, 0); /* src/dst endian swap */
+ radeon_emit(cs, src_offset);
+ radeon_emit(cs, src_offset >> 32);
+ radeon_emit(cs, dst_offset);
+ radeon_emit(cs, dst_offset >> 32);
+ dst_offset += csize;
+ src_offset += csize;
+ size -= csize;
+ }
}
-void si_need_dma_space(struct si_context *ctx, unsigned num_dw, struct si_resource *dst,
- struct si_resource *src)
+void si_need_dma_space(struct si_context *ctx, unsigned num_dw,
+ struct si_resource *dst, struct si_resource *src)
{
- struct radeon_winsys *ws = ctx->ws;
- uint64_t vram = ctx->sdma_cs->used_vram;
- uint64_t gtt = ctx->sdma_cs->used_gart;
-
- if (dst) {
- vram += dst->vram_usage;
- gtt += dst->gart_usage;
- }
- if (src) {
- vram += src->vram_usage;
- gtt += src->gart_usage;
- }
-
- /* Flush the GFX IB if DMA depends on it. */
- if (!ctx->sdma_uploads_in_progress && radeon_emitted(ctx->gfx_cs, ctx->initial_gfx_cs_size) &&
- ((dst && ws->cs_is_buffer_referenced(ctx->gfx_cs, dst->buf, RADEON_USAGE_READWRITE)) ||
- (src && ws->cs_is_buffer_referenced(ctx->gfx_cs, src->buf, RADEON_USAGE_WRITE))))
- si_flush_gfx_cs(ctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
-
- /* Flush if there's not enough space, or if the memory usage per IB
- * is too large.
- *
- * IBs using too little memory are limited by the IB submission overhead.
- * IBs using too much memory are limited by the kernel/TTM overhead.
- * Too long IBs create CPU-GPU pipeline bubbles and add latency.
- *
- * This heuristic makes sure that DMA requests are executed
- * very soon after the call is made and lowers memory usage.
- * It improves texture upload performance by keeping the DMA
- * engine busy while uploads are being submitted.
- */
- num_dw++; /* for emit_wait_idle below */
- if (!ctx->sdma_uploads_in_progress &&
- (!ws->cs_check_space(ctx->sdma_cs, num_dw, false) ||
- ctx->sdma_cs->used_vram + ctx->sdma_cs->used_gart > 64 * 1024 * 1024 ||
- !radeon_cs_memory_below_limit(ctx->screen, ctx->sdma_cs, vram, gtt))) {
- si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
- assert((num_dw + ctx->sdma_cs->current.cdw) <= ctx->sdma_cs->current.max_dw);
- }
-
- /* Wait for idle if either buffer has been used in the IB before to
- * prevent read-after-write hazards.
- */
- if ((dst && ws->cs_is_buffer_referenced(ctx->sdma_cs, dst->buf, RADEON_USAGE_READWRITE)) ||
- (src && ws->cs_is_buffer_referenced(ctx->sdma_cs, src->buf, RADEON_USAGE_WRITE)))
- si_dma_emit_wait_idle(ctx);
-
- unsigned sync = ctx->sdma_uploads_in_progress ? 0 : RADEON_USAGE_SYNCHRONIZED;
- if (dst) {
- ws->cs_add_buffer(ctx->sdma_cs, dst->buf, RADEON_USAGE_WRITE | sync, dst->domains, 0);
- }
- if (src) {
- ws->cs_add_buffer(ctx->sdma_cs, src->buf, RADEON_USAGE_READ | sync, src->domains, 0);
- }
-
- /* this function is called before all DMA calls, so increment this. */
- ctx->num_dma_calls++;
+ struct radeon_winsys *ws = ctx->ws;
+ uint64_t vram = ctx->sdma_cs->used_vram;
+ uint64_t gtt = ctx->sdma_cs->used_gart;
+
+ if (dst) {
+ vram += dst->vram_usage;
+ gtt += dst->gart_usage;
+ }
+ if (src) {
+ vram += src->vram_usage;
+ gtt += src->gart_usage;
+ }
+
+ /* Flush the GFX IB if DMA depends on it. */
+ if (!ctx->sdma_uploads_in_progress &&
+ radeon_emitted(ctx->gfx_cs, ctx->initial_gfx_cs_size) &&
+ ((dst &&
+ ws->cs_is_buffer_referenced(ctx->gfx_cs, dst->buf,
+ RADEON_USAGE_READWRITE)) ||
+ (src &&
+ ws->cs_is_buffer_referenced(ctx->gfx_cs, src->buf,
+ RADEON_USAGE_WRITE))))
+ si_flush_gfx_cs(ctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
+
+ /* Flush if there's not enough space, or if the memory usage per IB
+ * is too large.
+ *
+ * IBs using too little memory are limited by the IB submission overhead.
+ * IBs using too much memory are limited by the kernel/TTM overhead.
+ * Too long IBs create CPU-GPU pipeline bubbles and add latency.
+ *
+ * This heuristic makes sure that DMA requests are executed
+ * very soon after the call is made and lowers memory usage.
+ * It improves texture upload performance by keeping the DMA
+ * engine busy while uploads are being submitted.
+ */
+ num_dw++; /* for emit_wait_idle below */
+ if (!ctx->sdma_uploads_in_progress &&
+ (!ws->cs_check_space(ctx->sdma_cs, num_dw, false) ||
+ ctx->sdma_cs->used_vram + ctx->sdma_cs->used_gart > 64 * 1024 * 1024 ||
+ !radeon_cs_memory_below_limit(ctx->screen, ctx->sdma_cs, vram, gtt))) {
+ si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
+ assert((num_dw + ctx->sdma_cs->current.cdw) <= ctx->sdma_cs->current.max_dw);
+ }
+
+ /* Wait for idle if either buffer has been used in the IB before to
+ * prevent read-after-write hazards.
+ */
+ if ((dst &&
+ ws->cs_is_buffer_referenced(ctx->sdma_cs, dst->buf,
+ RADEON_USAGE_READWRITE)) ||
+ (src &&
+ ws->cs_is_buffer_referenced(ctx->sdma_cs, src->buf,
+ RADEON_USAGE_WRITE)))
+ si_dma_emit_wait_idle(ctx);
+
+ unsigned sync = ctx->sdma_uploads_in_progress ? 0 : RADEON_USAGE_SYNCHRONIZED;
+ if (dst) {
+ ws->cs_add_buffer(ctx->sdma_cs, dst->buf, RADEON_USAGE_WRITE | sync,
+ dst->domains, 0);
+ }
+ if (src) {
+ ws->cs_add_buffer(ctx->sdma_cs, src->buf, RADEON_USAGE_READ | sync,
+ src->domains, 0);
+ }
+
+ /* this function is called before all DMA calls, so increment this. */
+ ctx->num_dma_calls++;
}
-void si_flush_dma_cs(struct si_context *ctx, unsigned flags, struct pipe_fence_handle **fence)
+void si_flush_dma_cs(struct si_context *ctx, unsigned flags,
+ struct pipe_fence_handle **fence)
{
- struct radeon_cmdbuf *cs = ctx->sdma_cs;
- struct radeon_saved_cs saved;
- bool check_vm = (ctx->screen->debug_flags & DBG(CHECK_VM)) != 0;
-
- if (!radeon_emitted(cs, 0)) {
- if (fence)
- ctx->ws->fence_reference(fence, ctx->last_sdma_fence);
- return;
- }
-
- if (check_vm)
- si_save_cs(ctx->ws, cs, &saved, true);
-
- ctx->ws->cs_flush(cs, flags, &ctx->last_sdma_fence);
- if (fence)
- ctx->ws->fence_reference(fence, ctx->last_sdma_fence);
-
- if (check_vm) {
- /* Use conservative timeout 800ms, after which we won't wait any
- * longer and assume the GPU is hung.
- */
- ctx->ws->fence_wait(ctx->ws, ctx->last_sdma_fence, 800 * 1000 * 1000);
-
- si_check_vm_faults(ctx, &saved, RING_DMA);
- si_clear_saved_cs(&saved);
- }
+ struct radeon_cmdbuf *cs = ctx->sdma_cs;
+ struct radeon_saved_cs saved;
+ bool check_vm = (ctx->screen->debug_flags & DBG(CHECK_VM)) != 0;
+
+ if (!radeon_emitted(cs, 0)) {
+ if (fence)
+ ctx->ws->fence_reference(fence, ctx->last_sdma_fence);
+ return;
+ }
+
+ if (check_vm)
+ si_save_cs(ctx->ws, cs, &saved, true);
+
+ ctx->ws->cs_flush(cs, flags, &ctx->last_sdma_fence);
+ if (fence)
+ ctx->ws->fence_reference(fence, ctx->last_sdma_fence);
+
+ if (check_vm) {
+ /* Use conservative timeout 800ms, after which we won't wait any
+ * longer and assume the GPU is hung.
+ */
+ ctx->ws->fence_wait(ctx->ws, ctx->last_sdma_fence, 800*1000*1000);
+
+ si_check_vm_faults(ctx, &saved, RING_DMA);
+ si_clear_saved_cs(&saved);
+ }
}
-void si_screen_clear_buffer(struct si_screen *sscreen, struct pipe_resource *dst, uint64_t offset,
- uint64_t size, unsigned value)
+void si_screen_clear_buffer(struct si_screen *sscreen, struct pipe_resource *dst,
+ uint64_t offset, uint64_t size, unsigned value)
{
- struct si_context *ctx = (struct si_context *)sscreen->aux_context;
+ struct si_context *ctx = (struct si_context*)sscreen->aux_context;
- simple_mtx_lock(&sscreen->aux_context_lock);
- si_sdma_clear_buffer(ctx, dst, offset, size, value);
- sscreen->aux_context->flush(sscreen->aux_context, NULL, 0);
- simple_mtx_unlock(&sscreen->aux_context_lock);
+ simple_mtx_lock(&sscreen->aux_context_lock);
+ si_sdma_clear_buffer(ctx, dst, offset, size, value);
+ sscreen->aux_context->flush(sscreen->aux_context, NULL, 0);
+ simple_mtx_unlock(&sscreen->aux_context_lock);
}