diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2012-01-29 19:36:55 +0000 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2012-01-29 20:12:39 +0000 |
commit | ca252e5b51d7b2f5a7b2c2e0d8fdb024b08096db (patch) | |
tree | 030711de6bfab1fc28462e320f99801b3ea3dd94 /src/sna | |
parent | 3aee521bf236994628c4d103a2b8f391a4be2aa7 (diff) |
sna: Detect batch overflow and fallback rather an risk an ENOSPC
Having noticed that eog was failing to perform a 8k x 8k copy with
compiz running on a 965gm, it was time the checks for batch overflow
were implemented.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'src/sna')
-rw-r--r-- | src/sna/gen2_render.c | 40 | ||||
-rw-r--r-- | src/sna/gen3_render.c | 40 | ||||
-rw-r--r-- | src/sna/gen4_render.c | 48 | ||||
-rw-r--r-- | src/sna/gen5_render.c | 47 | ||||
-rw-r--r-- | src/sna/gen6_render.c | 35 | ||||
-rw-r--r-- | src/sna/gen7_render.c | 35 |
6 files changed, 207 insertions, 38 deletions
diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c index 398988a3..7250d665 100644 --- a/src/sna/gen2_render.c +++ b/src/sna/gen2_render.c @@ -1824,12 +1824,20 @@ gen2_render_composite(struct sna *sna, if (!kgem_check_bo(&sna->kgem, tmp->dst.bo, tmp->src.bo, tmp->mask.bo, - NULL)) + NULL)) { kgem_submit(&sna->kgem); + if (!kgem_check_bo(&sna->kgem, + tmp->dst.bo, tmp->src.bo, tmp->mask.bo, + NULL)) + goto cleanup_mask; + } gen2_emit_composite_state(sna, tmp); return TRUE; +cleanup_mask: + if (tmp->mask.bo) + kgem_bo_destroy(&sna->kgem, tmp->mask.bo); cleanup_src: if (tmp->src.bo) kgem_bo_destroy(&sna->kgem, tmp->src.bo); @@ -2235,12 +2243,20 @@ gen2_render_composite_spans(struct sna *sna, if (!kgem_check_bo(&sna->kgem, tmp->base.dst.bo, tmp->base.src.bo, - NULL)) + NULL)) { kgem_submit(&sna->kgem); + if (!kgem_check_bo(&sna->kgem, + tmp->base.dst.bo, tmp->base.src.bo, + NULL)) + goto cleanup_src; + } gen2_emit_composite_spans_state(sna, tmp); return TRUE; +cleanup_src: + if (tmp->base.src.bo) + kgem_bo_destroy(&sna->kgem, tmp->base.src.bo); cleanup_dst: if (tmp->base.redirect.real_bo) kgem_bo_destroy(&sna->kgem, tmp->base.dst.bo); @@ -2435,8 +2451,10 @@ gen2_render_fill_boxes(struct sna *sna, tmp.floats_per_vertex = 2; tmp.floats_per_rect = 6; - if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) + if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) { kgem_submit(&sna->kgem); + assert(kgem_check_bo(&sna->kgem, dst_bo, NULL)); + } gen2_emit_fill_composite_state(sna, &tmp, pixel); @@ -2675,6 +2693,7 @@ gen2_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo, if (gen2_render_fill_one_try_blt(sna, dst, bo, color, x1, y1, x2, y2, alu)) return TRUE; + assert(kgem_check_bo(&sna->kgem, bo, NULL)); } tmp.op = alu; @@ -2835,14 +2854,19 @@ gen2_render_copy_boxes(struct sna *sna, uint8_t alu, too_large(src->drawable.width, src->drawable.height) || src_bo->pitch > MAX_3D_PITCH || too_large(dst->drawable.width, dst->drawable.height) || - dst_bo->pitch < 8 || dst_bo->pitch > MAX_3D_PITCH) + dst_bo->pitch < 8 || dst_bo->pitch > MAX_3D_PITCH) { +fallback: return sna_blt_copy_boxes_fallback(sna, alu, src, src_bo, src_dx, src_dy, dst, dst_bo, dst_dx, dst_dy, box, n); + } - if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) + if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) { kgem_submit(&sna->kgem); + if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) + goto fallback; + } memset(&tmp, 0, sizeof(tmp)); tmp.op = alu; @@ -2960,6 +2984,7 @@ gen2_render_copy(struct sna *sna, uint8_t alu, too_large(dst->drawable.width, dst->drawable.height) || src_bo->pitch > MAX_3D_PITCH || dst_bo->pitch < 8 || dst_bo->pitch > MAX_3D_PITCH) { +fallback: if (!sna_blt_compare_depth(&src->drawable, &dst->drawable)) return FALSE; @@ -2982,8 +3007,11 @@ gen2_render_copy(struct sna *sna, uint8_t alu, tmp->base.floats_per_vertex = 4; tmp->base.floats_per_rect = 12; - if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) + if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) { kgem_submit(&sna->kgem); + if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) + goto fallback; + } tmp->blt = gen2_render_copy_blt; tmp->done = gen2_render_copy_done; diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c index da90d823..784d3998 100644 --- a/src/sna/gen3_render.c +++ b/src/sna/gen3_render.c @@ -2836,8 +2836,13 @@ gen3_render_composite(struct sna *sna, if (!kgem_check_bo(&sna->kgem, tmp->dst.bo, tmp->src.bo, tmp->mask.bo, - NULL)) + NULL)) { kgem_submit(&sna->kgem); + if (!kgem_check_bo(&sna->kgem, + tmp->dst.bo, tmp->src.bo, tmp->mask.bo, + NULL)) + goto cleanup_mask; + } gen3_emit_composite_state(sna, tmp); gen3_align_vertex(sna, tmp); @@ -3267,13 +3272,21 @@ gen3_render_composite_spans(struct sna *sna, if (!kgem_check_bo(&sna->kgem, tmp->base.dst.bo, tmp->base.src.bo, - NULL)) + NULL)) { kgem_submit(&sna->kgem); + if (!kgem_check_bo(&sna->kgem, + tmp->base.dst.bo, tmp->base.src.bo, + NULL)) + goto cleanup_src; + } gen3_emit_composite_state(sna, &tmp->base); gen3_align_vertex(sna, &tmp->base); return TRUE; +cleanup_src: + if (tmp->base.src.bo) + kgem_bo_destroy(&sna->kgem, tmp->base.src.bo); cleanup_dst: if (tmp->base.redirect.real_bo) kgem_bo_destroy(&sna->kgem, tmp->base.dst.bo); @@ -3830,14 +3843,19 @@ gen3_render_copy_boxes(struct sna *sna, uint8_t alu, src_bo->pitch > MAX_3D_PITCH || too_large(src->drawable.width, src->drawable.height) || dst_bo->pitch > MAX_3D_PITCH || - too_large(dst->drawable.width, dst->drawable.height)) + too_large(dst->drawable.width, dst->drawable.height)) { +fallback: return sna_blt_copy_boxes_fallback(sna, alu, src, src_bo, src_dx, src_dy, dst, dst_bo, dst_dx, dst_dy, box, n); + } - if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) + if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) { kgem_submit(&sna->kgem); + if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) + goto fallback; + } memset(&tmp, 0, sizeof(tmp)); tmp.op = alu == GXcopy ? PictOpSrc : PictOpClear; @@ -3961,6 +3979,7 @@ gen3_render_copy(struct sna *sna, uint8_t alu, too_large(src->drawable.width, src->drawable.height) || too_large(dst->drawable.width, dst->drawable.height) || src_bo->pitch > MAX_3D_PITCH || dst_bo->pitch > MAX_3D_PITCH) { +fallback: if (!sna_blt_compare_depth(&src->drawable, &dst->drawable)) return FALSE; @@ -3984,8 +4003,11 @@ gen3_render_copy(struct sna *sna, uint8_t alu, tmp->base.mask.bo = NULL; tmp->base.mask.u.gen3.type = SHADER_NONE; - if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) + if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) { kgem_submit(&sna->kgem); + if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) + goto fallback; + } tmp->blt = gen3_render_copy_blt; tmp->done = gen3_render_copy_done; @@ -4139,8 +4161,10 @@ gen3_render_fill_boxes(struct sna *sna, tmp.mask.u.gen3.type = SHADER_NONE; tmp.u.gen3.num_constants = 0; - if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) + if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) { kgem_submit(&sna->kgem); + assert(kgem_check_bo(&sna->kgem, dst_bo, NULL)); + } gen3_emit_composite_state(sna, &tmp); gen3_align_vertex(sna, &tmp); @@ -4293,8 +4317,10 @@ gen3_render_fill(struct sna *sna, uint8_t alu, tmp->base.mask.u.gen3.type = SHADER_NONE; tmp->base.u.gen3.num_constants = 0; - if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) + if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) { kgem_submit(&sna->kgem); + assert(kgem_check_bo(&sna->kgem, dst_bo, NULL)); + } tmp->blt = gen3_render_fill_op_blt; tmp->box = gen3_render_fill_op_box; diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c index d9542eae..ffdcbb71 100644 --- a/src/sna/gen4_render.c +++ b/src/sna/gen4_render.c @@ -556,7 +556,7 @@ static Bool gen4_check_dst_format(PictFormat format) case PICT_x4r4g4b4: return TRUE; default: - DBG(("%s: unhandled format: %x\n", __FUNCTION__, format)); + DBG(("%s: unhandled format: %x\n", __FUNCTION__, (int)format)); return FALSE; } } @@ -1726,8 +1726,10 @@ gen4_render_video(struct sna *sna, tmp.floats_per_vertex = 3; tmp.u.gen4.ve_id = 1; - if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL)) + if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL)) { kgem_submit(&sna->kgem); + assert(kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL)); + } gen4_video_bind_surfaces(sna, &tmp, frame); gen4_align_vertex(sna, &tmp); @@ -2319,13 +2321,21 @@ gen4_render_composite(struct sna *sna, if (!kgem_check_bo(&sna->kgem, tmp->dst.bo, tmp->src.bo, tmp->mask.bo, - NULL)) + NULL)) { kgem_submit(&sna->kgem); + if (!kgem_check_bo(&sna->kgem, + tmp->dst.bo, tmp->src.bo, tmp->mask.bo, + NULL)) + goto cleanup_mask; + } gen4_bind_surfaces(sna, tmp); gen4_align_vertex(sna, tmp); return TRUE; +cleanup_mask: + if (tmp->mask.bo) + kgem_bo_destroy(&sna->kgem, tmp->mask.bo); cleanup_src: if (tmp->src.bo) kgem_bo_destroy(&sna->kgem, tmp->src.bo); @@ -2400,6 +2410,8 @@ gen4_render_copy_boxes(struct sna *sna, uint8_t alu, { struct sna_composite_op tmp; + DBG(("%s x %d\n", __FUNCTION__, n)); + #if NO_COPY_BOXES if (!sna_blt_compare_depth(&src->drawable, &dst->drawable)) return FALSE; @@ -2472,8 +2484,11 @@ fallback: tmp.u.gen4.wm_kernel = WM_KERNEL; tmp.u.gen4.ve_id = 1; - if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) + if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) { kgem_submit(&sna->kgem); + if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) + goto fallback; + } gen4_copy_bind_surfaces(sna, &tmp); gen4_align_vertex(sna, &tmp); @@ -2512,6 +2527,12 @@ gen4_render_copy(struct sna *sna, uint8_t alu, PixmapPtr dst, struct kgem_bo *dst_bo, struct sna_copy_op *op) { + DBG(("%s: src=%ld, dst=%ld, alu=%d\n", + __FUNCTION__, + src->drawable.serialNumber, + dst->drawable.serialNumber, + alu)); + #if NO_COPY if (!sna_blt_compare_depth(&src->drawable, &dst->drawable)) return FALSE; @@ -2575,8 +2596,11 @@ fallback: op->base.u.gen4.wm_kernel = WM_KERNEL; op->base.u.gen4.ve_id = 1; - if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) + if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) { kgem_submit(&sna->kgem); + if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) + goto fallback; + } gen4_copy_bind_surfaces(sna, &op->base); gen4_align_vertex(sna, &op->base); @@ -2731,8 +2755,10 @@ gen4_render_fill_boxes(struct sna *sna, tmp.u.gen4.wm_kernel = WM_KERNEL; tmp.u.gen4.ve_id = 1; - if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) + if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) { kgem_submit(&sna->kgem); + assert(kgem_check_bo(&sna->kgem, dst_bo, NULL)); + } gen4_fill_bind_surfaces(sna, &tmp); gen4_align_vertex(sna, &tmp); @@ -2844,8 +2870,10 @@ gen4_render_fill(struct sna *sna, uint8_t alu, op->base.u.gen4.wm_kernel = WM_KERNEL; op->base.u.gen4.ve_id = 1; - if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) + if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) { kgem_submit(&sna->kgem); + assert(kgem_check_bo(&sna->kgem, dst_bo, NULL)); + } gen4_fill_bind_surfaces(sna, &op->base); gen4_align_vertex(sna, &op->base); @@ -2884,6 +2912,8 @@ gen4_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo, { struct sna_composite_op tmp; + DBG(("%s: color=%08x\n", __FUNCTION__, color)); + #if NO_FILL_ONE return gen4_render_fill_one_try_blt(sna, dst, bo, color, x1, y1, x2, y2, alu); @@ -2929,8 +2959,10 @@ gen4_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo, tmp.u.gen4.wm_kernel = WM_KERNEL; tmp.u.gen4.ve_id = 1; - if (!kgem_check_bo(&sna->kgem, bo, NULL)) + if (!kgem_check_bo(&sna->kgem, bo, NULL)) { _kgem_submit(&sna->kgem); + assert(kgem_check_bo(&sna->kgem, bo, NULL)); + } gen4_fill_bind_surfaces(sna, &tmp); gen4_align_vertex(sna, &tmp); diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c index 34651210..dc1e720a 100644 --- a/src/sna/gen5_render.c +++ b/src/sna/gen5_render.c @@ -557,7 +557,7 @@ static Bool gen5_check_dst_format(PictFormat format) case PICT_x4r4g4b4: return TRUE; default: - DBG(("%s: unhandled format: %x\n", __FUNCTION__, format)); + DBG(("%s: unhandled format: %x\n", __FUNCTION__, (int)format)); return FALSE; } } @@ -1759,8 +1759,10 @@ gen5_render_video(struct sna *sna, tmp.floats_per_vertex = 3; tmp.floats_per_rect = 9; - if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL)) + if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL)) { kgem_submit(&sna->kgem); + assert(kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL)); + } gen5_video_bind_surfaces(sna, &tmp, frame); gen5_align_vertex(sna, &tmp); @@ -2352,8 +2354,12 @@ gen5_render_composite(struct sna *sna, tmp->done = gen5_render_composite_done; if (!kgem_check_bo(&sna->kgem, - tmp->dst.bo, tmp->src.bo, tmp->mask.bo, NULL)) + tmp->dst.bo, tmp->src.bo, tmp->mask.bo, NULL)) { kgem_submit(&sna->kgem); + if (!kgem_check_bo(&sna->kgem, + tmp->dst.bo, tmp->src.bo, tmp->mask.bo, NULL)) + goto cleanup_mask; + } if (kgem_bo_is_dirty(tmp->src.bo) || kgem_bo_is_dirty(tmp->mask.bo)) { if (mask == NULL && @@ -2372,6 +2378,9 @@ gen5_render_composite(struct sna *sna, gen5_align_vertex(sna, tmp); return TRUE; +cleanup_mask: + if (tmp->mask.bo) + kgem_bo_destroy(&sna->kgem, tmp->mask.bo); cleanup_src: if (tmp->src.bo) kgem_bo_destroy(&sna->kgem, tmp->src.bo); @@ -2671,13 +2680,21 @@ gen5_render_composite_spans(struct sna *sna, if (!kgem_check_bo(&sna->kgem, tmp->base.dst.bo, tmp->base.src.bo, - NULL)) + NULL)) { kgem_submit(&sna->kgem); + if (!kgem_check_bo(&sna->kgem, + tmp->base.dst.bo, tmp->base.src.bo, + NULL)) + goto cleanup_src; + } gen5_bind_surfaces(sna, &tmp->base); gen5_align_vertex(sna, &tmp->base); return TRUE; +cleanup_src: + if (tmp->base.src.bo) + kgem_bo_destroy(&sna->kgem, tmp->base.src.bo); cleanup_dst: if (tmp->base.redirect.real_bo) kgem_bo_destroy(&sna->kgem, tmp->base.dst.bo); @@ -2796,8 +2813,11 @@ fallback: tmp.u.gen5.wm_kernel = WM_KERNEL; tmp.u.gen5.ve_id = 1; - if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) + if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) { kgem_submit(&sna->kgem); + if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) + goto fallback; + } if (kgem_bo_is_dirty(src_bo)) { if (sna_blt_compare_depth(&src->drawable, &dst->drawable) && @@ -2946,8 +2966,11 @@ fallback: op->base.u.gen5.wm_kernel = WM_KERNEL; op->base.u.gen5.ve_id = 1; - if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) + if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) { kgem_submit(&sna->kgem); + if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) + goto fallback; + } if (kgem_bo_is_dirty(src_bo)) { if (sna_blt_compare_depth(&src->drawable, &dst->drawable) && @@ -3093,8 +3116,10 @@ gen5_render_fill_boxes(struct sna *sna, tmp.u.gen5.wm_kernel = WM_KERNEL; tmp.u.gen5.ve_id = 1; - if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) + if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) { kgem_submit(&sna->kgem); + assert(kgem_check_bo(&sna->kgem, dst_bo, NULL)); + } gen5_fill_bind_surfaces(sna, &tmp); gen5_align_vertex(sna, &tmp); @@ -3280,8 +3305,10 @@ gen5_render_fill(struct sna *sna, uint8_t alu, op->base.u.gen5.wm_kernel = WM_KERNEL; op->base.u.gen5.ve_id = 1; - if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) + if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) { kgem_submit(&sna->kgem); + assert(kgem_check_bo(&sna->kgem, dst_bo, NULL)); + } gen5_fill_bind_surfaces(sna, &op->base); gen5_align_vertex(sna, &op->base); @@ -3369,8 +3396,10 @@ gen5_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo, tmp.u.gen5.wm_kernel = WM_KERNEL; tmp.u.gen5.ve_id = 1; - if (!kgem_check_bo(&sna->kgem, bo, NULL)) + if (!kgem_check_bo(&sna->kgem, bo, NULL)) { _kgem_submit(&sna->kgem); + assert(kgem_check_bo(&sna->kgem, bo, NULL)); + } gen5_fill_bind_surfaces(sna, &tmp); gen5_align_vertex(sna, &tmp); diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c index 0d244f18..93410b6a 100644 --- a/src/sna/gen6_render.c +++ b/src/sna/gen6_render.c @@ -1971,6 +1971,7 @@ gen6_render_video(struct sna *sna, kgem_set_mode(&sna->kgem, KGEM_RENDER); if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL)) { kgem_submit(&sna->kgem); + assert(kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL)); _kgem_set_mode(&sna->kgem, KGEM_RENDER); } @@ -2596,6 +2597,10 @@ gen6_render_composite(struct sna *sna, tmp->dst.bo, tmp->src.bo, tmp->mask.bo, NULL)) { kgem_submit(&sna->kgem); + if (!kgem_check_bo(&sna->kgem, + tmp->dst.bo, tmp->src.bo, tmp->mask.bo, + NULL)) + goto cleanup_mask; _kgem_set_mode(&sna->kgem, KGEM_RENDER); } @@ -2603,6 +2608,9 @@ gen6_render_composite(struct sna *sna, gen6_align_vertex(sna, tmp); return TRUE; +cleanup_mask: + if (tmp->mask.bo) + kgem_bo_destroy(&sna->kgem, tmp->mask.bo); cleanup_src: if (tmp->src.bo) kgem_bo_destroy(&sna->kgem, tmp->src.bo); @@ -3000,6 +3008,10 @@ gen6_render_composite_spans(struct sna *sna, tmp->base.dst.bo, tmp->base.src.bo, NULL)) { kgem_submit(&sna->kgem); + if (!kgem_check_bo(&sna->kgem, + tmp->base.dst.bo, tmp->base.src.bo, + NULL)) + goto cleanup_src; _kgem_set_mode(&sna->kgem, KGEM_RENDER); } @@ -3007,6 +3019,9 @@ gen6_render_composite_spans(struct sna *sna, gen6_align_vertex(sna, &tmp->base); return TRUE; +cleanup_src: + if (tmp->base.src.bo) + kgem_bo_destroy(&sna->kgem, tmp->base.src.bo); cleanup_dst: if (tmp->base.redirect.real_bo) kgem_bo_destroy(&sna->kgem, tmp->base.dst.bo); @@ -3198,6 +3213,8 @@ fallback: kgem_set_mode(&sna->kgem, KGEM_RENDER); if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) { kgem_submit(&sna->kgem); + if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) + goto fallback; _kgem_set_mode(&sna->kgem, KGEM_RENDER); } @@ -3358,6 +3375,8 @@ fallback: kgem_set_mode(&sna->kgem, KGEM_RENDER); if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) { kgem_submit(&sna->kgem); + if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) + goto fallback; _kgem_set_mode(&sna->kgem, KGEM_RENDER); } @@ -3508,8 +3527,10 @@ gen6_render_fill_boxes(struct sna *sna, tmp.u.gen6.nr_inputs = 1; tmp.u.gen6.ve_id = 1; - if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) + if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) { kgem_submit(&sna->kgem); + assert(kgem_check_bo(&sna->kgem, dst_bo, NULL)); + } gen6_emit_fill_state(sna, &tmp); gen6_align_vertex(sna, &tmp); @@ -3705,8 +3726,10 @@ gen6_render_fill(struct sna *sna, uint8_t alu, op->base.u.gen6.nr_inputs = 1; op->base.u.gen6.ve_id = 1; - if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) + if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) { kgem_submit(&sna->kgem); + assert(kgem_check_bo(&sna->kgem, dst_bo, NULL)); + } gen6_emit_fill_state(sna, &op->base); gen6_align_vertex(sna, &op->base); @@ -3796,8 +3819,10 @@ gen6_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo, tmp.u.gen6.nr_inputs = 1; tmp.u.gen6.ve_id = 1; - if (!kgem_check_bo(&sna->kgem, bo, NULL)) + if (!kgem_check_bo(&sna->kgem, bo, NULL)) { _kgem_submit(&sna->kgem); + assert(kgem_check_bo(&sna->kgem, bo, NULL)); + } gen6_emit_fill_state(sna, &tmp); gen6_align_vertex(sna, &tmp); @@ -3893,8 +3918,10 @@ gen6_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo) tmp.u.gen6.nr_inputs = 1; tmp.u.gen6.ve_id = 1; - if (!kgem_check_bo(&sna->kgem, bo, NULL)) + if (!kgem_check_bo(&sna->kgem, bo, NULL)) { _kgem_submit(&sna->kgem); + assert(kgem_check_bo(&sna->kgem, bo, NULL)); + } gen6_emit_fill_state(sna, &tmp); gen6_align_vertex(sna, &tmp); diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c index ff046315..e2486c65 100644 --- a/src/sna/gen7_render.c +++ b/src/sna/gen7_render.c @@ -2036,6 +2036,7 @@ gen7_render_video(struct sna *sna, kgem_set_mode(&sna->kgem, KGEM_RENDER); if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL)) { kgem_submit(&sna->kgem); + assert(kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL)); _kgem_set_mode(&sna->kgem, KGEM_RENDER); } @@ -2662,6 +2663,10 @@ gen7_render_composite(struct sna *sna, tmp->dst.bo, tmp->src.bo, tmp->mask.bo, NULL)) { kgem_submit(&sna->kgem); + if (!kgem_check_bo(&sna->kgem, + tmp->dst.bo, tmp->src.bo, tmp->mask.bo, + NULL)) + goto cleanup_mask; _kgem_set_mode(&sna->kgem, KGEM_RENDER); } @@ -2669,6 +2674,9 @@ gen7_render_composite(struct sna *sna, gen7_align_vertex(sna, tmp); return TRUE; +cleanup_mask: + if (tmp->mask.bo) + kgem_bo_destroy(&sna->kgem, tmp->mask.bo); cleanup_src: if (tmp->src.bo) kgem_bo_destroy(&sna->kgem, tmp->src.bo); @@ -3065,6 +3073,10 @@ gen7_render_composite_spans(struct sna *sna, tmp->base.dst.bo, tmp->base.src.bo, NULL)) { kgem_submit(&sna->kgem); + if (!kgem_check_bo(&sna->kgem, + tmp->base.dst.bo, tmp->base.src.bo, + NULL)) + goto cleanup_src; _kgem_set_mode(&sna->kgem, KGEM_RENDER); } @@ -3072,6 +3084,9 @@ gen7_render_composite_spans(struct sna *sna, gen7_align_vertex(sna, &tmp->base); return TRUE; +cleanup_src: + if (tmp->base.src.bo) + kgem_bo_destroy(&sna->kgem, tmp->base.src.bo); cleanup_dst: if (tmp->base.redirect.real_bo) kgem_bo_destroy(&sna->kgem, tmp->base.dst.bo); @@ -3252,6 +3267,8 @@ fallback: kgem_set_mode(&sna->kgem, KGEM_RENDER); if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) { kgem_submit(&sna->kgem); + if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) + goto fallback; _kgem_set_mode(&sna->kgem, KGEM_RENDER); } @@ -3412,6 +3429,8 @@ fallback: kgem_set_mode(&sna->kgem, KGEM_RENDER); if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) { kgem_submit(&sna->kgem); + if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) + goto fallback; _kgem_set_mode(&sna->kgem, KGEM_RENDER); } @@ -3564,8 +3583,10 @@ gen7_render_fill_boxes(struct sna *sna, tmp.u.gen7.nr_inputs = 1; tmp.u.gen7.ve_id = 1; - if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) + if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) { kgem_submit(&sna->kgem); + assert(kgem_check_bo(&sna->kgem, dst_bo, NULL)); + } gen7_emit_fill_state(sna, &tmp); gen7_align_vertex(sna, &tmp); @@ -3761,8 +3782,10 @@ gen7_render_fill(struct sna *sna, uint8_t alu, op->base.u.gen7.nr_inputs = 1; op->base.u.gen7.ve_id = 1; - if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) + if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) { kgem_submit(&sna->kgem); + assert(kgem_check_bo(&sna->kgem, dst_bo, NULL)); + } gen7_emit_fill_state(sna, &op->base); gen7_align_vertex(sna, &op->base); @@ -3852,8 +3875,10 @@ gen7_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo, tmp.u.gen7.nr_inputs = 1; tmp.u.gen7.ve_id = 1; - if (!kgem_check_bo(&sna->kgem, bo, NULL)) + if (!kgem_check_bo(&sna->kgem, bo, NULL)) { _kgem_submit(&sna->kgem); + assert(kgem_check_bo(&sna->kgem, bo, NULL)); + } gen7_emit_fill_state(sna, &tmp); gen7_align_vertex(sna, &tmp); @@ -3949,8 +3974,10 @@ gen7_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo) tmp.u.gen7.nr_inputs = 1; tmp.u.gen7.ve_id = 1; - if (!kgem_check_bo(&sna->kgem, bo, NULL)) + if (!kgem_check_bo(&sna->kgem, bo, NULL)) { _kgem_submit(&sna->kgem); + assert(kgem_check_bo(&sna->kgem, bo, NULL)); + } gen7_emit_fill_state(sna, &tmp); gen7_align_vertex(sna, &tmp); |