diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2010-03-01 22:57:40 +0100 |
---|---|---|
committer | Owain G. Ainsworth <oga@openbsd.org> | 2010-05-16 19:59:11 +0100 |
commit | 8d4fc8dabb5394636e4fea3ee1b18ffb02bde9dd (patch) | |
tree | b0275dbc363248d01a6b8b5e53ba13234a7e6a78 | |
parent | 261aa1ccd93242816527d49166e8d288cf12efc8 (diff) |
libIntelXvMC: kill ums leftovers
On i965 class hw, kernel_exec_fencing was 1 always, anyway. And on
i945, this patch kills a memory leak (dunno how, but it does).
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
(cherry picked from commit 5018fd3097d77a5f31af4cb27e39daa37557b64e)
Signed-off-by: Owain G. Ainsworth <oga@openbsd.org>
(oga: note that while we use UMS still, we can trust kernel_exec_fencing
to always be one because we have had modern GEM right from the start.)
-rw-r--r-- | src/xvmc/i965_xvmc.c | 12 | ||||
-rw-r--r-- | src/xvmc/intel_batchbuffer.c | 20 | ||||
-rw-r--r-- | src/xvmc/intel_xvmc.c | 2 | ||||
-rw-r--r-- | src/xvmc/intel_xvmc.h | 1 | ||||
-rw-r--r-- | src/xvmc/xvmc_vld.c | 20 |
5 files changed, 11 insertions, 44 deletions
diff --git a/src/xvmc/i965_xvmc.c b/src/xvmc/i965_xvmc.c index ff67995f..3de60546 100644 --- a/src/xvmc/i965_xvmc.c +++ b/src/xvmc/i965_xvmc.c @@ -731,11 +731,8 @@ static Status render_surface(Display * display, } if (media_state.indirect_data.bo) { - if (xvmc_driver->kernel_exec_fencing) - drm_intel_gem_bo_unmap_gtt(media_state. - indirect_data.bo); - else - drm_intel_bo_unmap(media_state.indirect_data.bo); + drm_intel_gem_bo_unmap_gtt(media_state. + indirect_data.bo); drm_intel_bo_unreference(media_state.indirect_data.bo); } @@ -755,10 +752,7 @@ static Status render_surface(Display * display, interface_descriptor(&media_state); vfe_state(&media_state); - if (xvmc_driver->kernel_exec_fencing) - drm_intel_gem_bo_map_gtt(media_state.indirect_data.bo); - else - drm_intel_bo_map(media_state.indirect_data.bo, 1); + drm_intel_gem_bo_map_gtt(media_state.indirect_data.bo); block_ptr = media_state.indirect_data.bo->virtual; for (i = first_macroblock; i < num_macroblocks + first_macroblock; i++) { diff --git a/src/xvmc/intel_batchbuffer.c b/src/xvmc/intel_batchbuffer.c index ebaac7a4..5272172f 100644 --- a/src/xvmc/intel_batchbuffer.c +++ b/src/xvmc/intel_batchbuffer.c @@ -108,10 +108,7 @@ Bool intelInitBatchBuffer(void) return False; } - if (xvmc_driver->kernel_exec_fencing) - drm_intel_gem_bo_map_gtt(xvmc_driver->batch.buf); - else - drm_intel_bo_map(xvmc_driver->batch.buf, 1); + drm_intel_gem_bo_map_gtt(xvmc_driver->batch.buf); xvmc_driver->batch.init_ptr = xvmc_driver->batch.buf->virtual; xvmc_driver->batch.size = BATCH_SIZE; @@ -122,10 +119,7 @@ Bool intelInitBatchBuffer(void) void intelFiniBatchBuffer(void) { - if (xvmc_driver->kernel_exec_fencing) - drm_intel_gem_bo_unmap_gtt(xvmc_driver->batch.buf); - else - drm_intel_bo_unmap(xvmc_driver->batch.buf); + drm_intel_gem_bo_unmap_gtt(xvmc_driver->batch.buf); drm_intel_bo_unreference(xvmc_driver->batch.buf); } @@ -134,10 +128,7 @@ void intelFlushBatch(Bool refill) { i965_end_batch(); - if (xvmc_driver->kernel_exec_fencing) - drm_intel_gem_bo_unmap_gtt(xvmc_driver->batch.buf); - else - drm_intel_bo_unmap(xvmc_driver->batch.buf); + drm_intel_gem_bo_unmap_gtt(xvmc_driver->batch.buf); drm_intel_bo_exec(xvmc_driver->batch.buf, xvmc_driver->batch.ptr - xvmc_driver->batch.init_ptr, @@ -153,10 +144,7 @@ void intelFlushBatch(Bool refill) fprintf(stderr, "unable to alloc batch buffer\n"); } - if (xvmc_driver->kernel_exec_fencing) - drm_intel_gem_bo_map_gtt(xvmc_driver->batch.buf); - else - drm_intel_bo_map(xvmc_driver->batch.buf, 1); + drm_intel_gem_bo_map_gtt(xvmc_driver->batch.buf); xvmc_driver->batch.init_ptr = xvmc_driver->batch.buf->virtual; xvmc_driver->batch.size = BATCH_SIZE; diff --git a/src/xvmc/intel_xvmc.c b/src/xvmc/intel_xvmc.c index c94ae4a1..328d3c1e 100644 --- a/src/xvmc/intel_xvmc.c +++ b/src/xvmc/intel_xvmc.c @@ -421,8 +421,6 @@ _X_EXPORT Status XvMCCreateContext(Display * display, XvPortID port, XVMC_INFO("decoder type is %s", intel_xvmc_decoder_string(comm->type)); - xvmc_driver->kernel_exec_fencing = comm->kernel_exec_fencing; - /* assign local ctx info */ intel_ctx = intel_xvmc_new_context(display); if (!intel_ctx) { diff --git a/src/xvmc/intel_xvmc.h b/src/xvmc/intel_xvmc.h index 67c7b380..263fc33d 100644 --- a/src/xvmc/intel_xvmc.h +++ b/src/xvmc/intel_xvmc.h @@ -132,7 +132,6 @@ typedef struct _intel_xvmc_driver { int fd; /* drm file handler */ dri_bufmgr *bufmgr; - unsigned int kernel_exec_fencing:1; struct { unsigned int init_offset; diff --git a/src/xvmc/xvmc_vld.c b/src/xvmc/xvmc_vld.c index dca05737..bea1ec76 100644 --- a/src/xvmc/xvmc_vld.c +++ b/src/xvmc/xvmc_vld.c @@ -1010,10 +1010,7 @@ static Status put_slice2(Display * display, XvMCContext * context, q_scale_code = bit_buf >> 27; if (media_state.slice_data.bo) { - if (xvmc_driver->kernel_exec_fencing) - drm_intel_gem_bo_unmap_gtt(media_state.slice_data.bo); - else - drm_intel_bo_unmap(media_state.slice_data.bo); + drm_intel_gem_bo_unmap_gtt(media_state.slice_data.bo); drm_intel_bo_unreference(media_state.slice_data.bo); } @@ -1022,10 +1019,7 @@ static Status put_slice2(Display * display, XvMCContext * context, VLD_MAX_SLICE_SIZE, 64); if (!media_state.slice_data.bo) return BadAlloc; - if (xvmc_driver->kernel_exec_fencing) - drm_intel_gem_bo_map_gtt(media_state.slice_data.bo); - else - drm_intel_bo_map(media_state.slice_data.bo, 1); + drm_intel_gem_bo_map_gtt(media_state.slice_data.bo); memcpy(media_state.slice_data.bo->virtual, slice, nbytes); @@ -1110,10 +1104,7 @@ static Status render_surface(Display * display, return ret; if (media_state.mb_data.bo) { - if (xvmc_driver->kernel_exec_fencing) - drm_intel_gem_bo_unmap_gtt(media_state.mb_data.bo); - else - drm_intel_bo_unmap(media_state.mb_data.bo); + drm_intel_gem_bo_unmap_gtt(media_state.mb_data.bo); drm_intel_bo_unreference(media_state.mb_data.bo); } @@ -1125,10 +1116,7 @@ static Status render_surface(Display * display, surface_size, 64); if (!media_state.mb_data.bo) return BadAlloc; - if (xvmc_driver->kernel_exec_fencing) - drm_intel_gem_bo_map_gtt(media_state.mb_data.bo); - else - drm_intel_bo_map(media_state.mb_data.bo, 1); + drm_intel_gem_bo_map_gtt(media_state.mb_data.bo); block_ptr = media_state.mb_data.bo->virtual; unsigned short *mb_block_ptr; |