summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorLi Shao Hua <shaohua.li@intel.com>2009-05-19 16:27:32 +0800
committerZou Nan hai <nanhai.zou@intel.com>2009-05-19 16:27:32 +0800
commit52054b6a4c1ca5117c9750361f71aedd91220c39 (patch)
tree14371ab2a95d6d62527b4be6502a8759efaf1674 /src
parentb622860429e00d6ab4407980232659c283a8fe8d (diff)
switch XvMC to gem
Diffstat (limited to 'src')
-rw-r--r--src/i830_hwmc.h3
-rw-r--r--src/i965_hwmc.c90
-rw-r--r--src/i965_hwmc.h3
-rw-r--r--src/xvmc/Makefile.am2
-rw-r--r--src/xvmc/i965_xvmc.c634
-rw-r--r--src/xvmc/intel_batchbuffer.c234
-rw-r--r--src/xvmc/intel_batchbuffer.h10
-rw-r--r--src/xvmc/intel_xvmc.c16
-rw-r--r--src/xvmc/intel_xvmc.h10
-rw-r--r--src/xvmc/xvmc_vld.c457
10 files changed, 815 insertions, 644 deletions
diff --git a/src/i830_hwmc.h b/src/i830_hwmc.h
index ad95e1e5..7db6a86a 100644
--- a/src/i830_hwmc.h
+++ b/src/i830_hwmc.h
@@ -66,6 +66,7 @@ struct hwmc_buffer
struct _intel_xvmc_common {
unsigned int type;
struct hwmc_buffer batchbuffer;
+ unsigned int kernel_exec_fencing:1;
};
/* Intel private XvMC command to DDX driver */
@@ -76,7 +77,7 @@ struct intel_xvmc_command {
unsigned int subPicNo;
unsigned int flags;
unsigned int real_id;
- unsigned int surf_offset;
+ uint32_t handle;
unsigned int pad[5];
};
diff --git a/src/i965_hwmc.c b/src/i965_hwmc.c
index efcdf127..4dc2e3ac 100644
--- a/src/i965_hwmc.c
+++ b/src/i965_hwmc.c
@@ -38,6 +38,7 @@
#define _INTEL_XVMC_SERVER_
#include "i830_hwmc.h"
#include "i965_hwmc.h"
+#include "intel_bufmgr.h"
#define STRIDE(w) (w)
#define SIZE_YUV420(w, h) (h * (STRIDE(w) + STRIDE(w >> 1)))
@@ -50,40 +51,6 @@
static PutImageFuncPtr XvPutImage;
-static int alloc_drm_memory(ScrnInfoPtr pScrn,
- struct drm_memory_block *mem,
- char *name, size_t size)
-{
- I830Ptr pI830 = I830PTR(pScrn);
- if ((mem->buffer = i830_allocate_memory(pScrn,
- name, size, PITCH_NONE, GTT_PAGE_SIZE,
- ALIGN_BOTH_ENDS, TILE_NONE)) == NULL) {
- ErrorF("Fail to alloc \n");
- return BadAlloc;
- }
-
- if (drmAddMap(pI830->drmSubFD,
- (drm_handle_t)(mem->buffer->offset + pI830->LinearAddr),
- size, DRM_AGP, 0,
- (drmAddress)&mem->handle) < 0) {
- ErrorF("Fail to map %d \n", errno);
- i830_free_memory(pScrn, mem->buffer);
- return BadAlloc;
- }
-
- mem->size = size;
- mem->offset = mem->buffer->offset;
- return Success;
-}
-
-static void free_drm_memory(ScrnInfoPtr pScrn,
- struct drm_memory_block *mem)
-{
- I830Ptr pI830 = I830PTR(pScrn);
- drmRmMap(pI830->drmSubFD, mem->handle);
- i830_free_memory(pScrn, mem->buffer);
-}
-
static int create_context(ScrnInfoPtr pScrn,
XvMCContextPtr context, int *num_privates, CARD32 **private)
{
@@ -106,31 +73,8 @@ static int create_context(ScrnInfoPtr pScrn,
private_context->is_g4x = IS_G4X(I830);
private_context->is_965_q = IS_965_Q(I830);
+ private_context->comm.kernel_exec_fencing = I830->kernel_exec_fencing;
private_context->comm.type = xvmc_driver->flag;
- private_context->comm.batchbuffer.offset = xvmc_driver->batch->offset;
- private_context->comm.batchbuffer.size = xvmc_driver->batch->size;
- private_context->comm.batchbuffer.handle = xvmc_driver->batch_handle;
-
- if (alloc_drm_memory(pScrn, &private_context->static_buffer,
- "XVMC static buffers",
- I965_MC_STATIC_BUFFER_SIZE)) {
- ErrorF("Unable to allocate and map static buffer for XVMC\n");
- return BadAlloc;
- }
-
- if (alloc_drm_memory(pScrn, &private_context->blocks,
- "XVMC blocks", blocksize)) {
- ErrorF("Unable to allocate and map block buffer for XVMC\n");
- return BadAlloc;
- }
-
- if (IS_G4X(I830)) {
- if (alloc_drm_memory(pScrn, &private_context->slice,
- "XVMC vld slice", VLD_MAX_SLICE_LEN)) {
- ErrorF("Unable to allocate and vld slice buffer for XVMC\n");
- return BadAlloc;
- }
- }
*num_privates = sizeof(*private_context)/sizeof(CARD32);
*private = (CARD32 *)private_context;
@@ -143,12 +87,7 @@ static int create_context(ScrnInfoPtr pScrn,
static void destroy_context(ScrnInfoPtr pScrn, XvMCContextPtr context)
{
struct i965_xvmc_context *private_context;
- I830Ptr pI830 = I830PTR(pScrn);
private_context = context->driver_priv;
- free_drm_memory(pScrn, &private_context->static_buffer);
- free_drm_memory(pScrn, &private_context->blocks);
- if (IS_G4X(pI830))
- free_drm_memory(pScrn, &private_context->slice);
Xfree(private_context);
}
@@ -159,7 +98,6 @@ static int create_surface(ScrnInfoPtr pScrn, XvMCSurfacePtr surface,
struct i965_xvmc_surface *priv_surface, *surface_dup;
struct i965_xvmc_context *priv_ctx = ctx->driver_priv;
- size_t bufsize = SIZE_YUV420(ctx->width, ctx->height);
int i;
for (i = 0 ; i < I965_MAX_SURFACES; i++) {
if (priv_ctx->surfaces[i] == NULL) {
@@ -172,13 +110,10 @@ static int create_surface(ScrnInfoPtr pScrn, XvMCSurfacePtr surface,
priv_surface->no = i;
priv_surface->handle = priv_surface;
+ priv_surface->w = ctx->width;
+ priv_surface->h = ctx->height;
priv_ctx->surfaces[i] = surface->driver_priv
= priv_surface;
- if (alloc_drm_memory(pScrn, &priv_surface->buffer,
- "surface buffer\n", (bufsize+0xFFF)&~(0xFFF))) {
- ErrorF("Unable to allocate surface buffer\n");
- return BadAlloc;
- }
memcpy(surface_dup, priv_surface, sizeof(*priv_surface));
*num_priv = sizeof(*priv_surface)/sizeof(CARD32);
*priv = (CARD32 *)surface_dup;
@@ -200,7 +135,6 @@ static void destory_surface(ScrnInfoPtr pScrn, XvMCSurfacePtr surface)
struct i965_xvmc_surface *priv_surface = surface->driver_priv;
struct i965_xvmc_context *priv_ctx = ctx->driver_priv;
priv_ctx->surfaces[priv_surface->no] = NULL;
- free_drm_memory(pScrn, &priv_surface->buffer);
Xfree(priv_surface);
}
@@ -224,21 +158,27 @@ static int put_image(ScrnInfoPtr pScrn,
{
I830Ptr pI830 = I830PTR(pScrn);
struct intel_xvmc_command *cmd = (struct intel_xvmc_command *)buf;
+ dri_bo *bo;
+
if (id == FOURCC_XVMC) {
- buf = pI830->FbBase + cmd->surf_offset;
+ bo = intel_bo_gem_create_from_name(pI830->bufmgr, "surface", cmd->handle);
+ dri_bo_pin(bo, 0x1000);
+ buf = pI830->FbBase + bo->offset;
}
XvPutImage(pScrn, src_x, src_y, drw_x, drw_y, src_w, src_h,
drw_w, drw_h, id, buf, width, height, sync, clipBoxes,
data, pDraw);
+
+ if (id == FOURCC_XVMC) {
+ dri_bo_unpin(bo);
+ dri_bo_unreference(bo);
+ }
+
return Success;
}
static Bool init(ScrnInfoPtr screen_info, XF86VideoAdaptorPtr adaptor)
{
- if (!intel_xvmc_init_batch(screen_info)) {
- ErrorF("[XvMC] fail to init batch buffer\n");
- return FALSE;
- }
XvPutImage = adaptor->PutImage;
adaptor->PutImage = put_image;
diff --git a/src/i965_hwmc.h b/src/i965_hwmc.h
index 9db0f359..fb61516c 100644
--- a/src/i965_hwmc.h
+++ b/src/i965_hwmc.h
@@ -10,9 +10,10 @@ struct drm_memory_block {
};
struct i965_xvmc_surface {
- struct drm_memory_block buffer;
+ int w, h;
unsigned int no;
void *handle;
+ dri_bo *bo;
};
struct i965_xvmc_context {
diff --git a/src/xvmc/Makefile.am b/src/xvmc/Makefile.am
index 45670605..fc402bb4 100644
--- a/src/xvmc/Makefile.am
+++ b/src/xvmc/Makefile.am
@@ -31,4 +31,4 @@ libIntelXvMC_la_SOURCES = intel_xvmc.c \
libIntelXvMC_la_CFLAGS = @XORG_CFLAGS@ @DRM_CFLAGS@ @DRI_CFLAGS@ \
@XVMCLIB_CFLAGS@ -I$(top_srcdir)/src -DTRUE=1 -DFALSE=0
libIntelXvMC_la_LDFLAGS = -version-number 1:0:0
-libIntelXvMC_la_LIBADD = @DRI_LIBS@ @DRM_LIBS@ @XVMCLIB_LIBS@ @XEXT_LIBS@ -lpthread
+libIntelXvMC_la_LIBADD = @DRI_LIBS@ @DRM_LIBS@ @XVMCLIB_LIBS@ @XEXT_LIBS@ -lpthread -ldrm_intel
diff --git a/src/xvmc/i965_xvmc.c b/src/xvmc/i965_xvmc.c
index 4b1c4812..51a7ae63 100644
--- a/src/xvmc/i965_xvmc.c
+++ b/src/xvmc/i965_xvmc.c
@@ -32,13 +32,15 @@
#include "i965_hwmc.h"
#define BATCH_STRUCT(x) intelBatchbufferData(&x, sizeof(x), 0)
#define URB_SIZE 256 /* XXX */
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
+
enum interface {
- INTRA_INTERFACE, /* non field intra */
+ INTRA_INTERFACE = 0, /* non field intra */
NULL_INTERFACE, /* fill with white, do nothing, for debug */
FORWARD_INTERFACE, /* non field forward predict */
BACKWARD_INTERFACE, /* non field backward predict */
F_B_INTERFACE, /* non field forward and backward predict */
- FIELD_INTRA_INTERFACE, /* field intra */
FIELD_FORWARD_INTERFACE, /* field forward predict */
FIELD_BACKWARD_INTERFACE, /* field backward predict */
FIELD_F_B_INTERFACE, /* field forward and backward predict */
@@ -94,65 +96,137 @@ static const uint32_t dual_prime_igd_kernel_static[][4]= {
#include "shader/mc/dual_prime_igd.g4b"
};
+struct kernel_struct{
+ const uint32_t (*bin)[4];
+ uint32_t size;
+};
+
+struct kernel_struct kernels_igd[] = {
+ {ipicture_kernel_static, sizeof(ipicture_kernel_static)},
+ {null_kernel_static, sizeof(null_kernel_static)},
+ {frame_forward_igd_kernel_static, sizeof(frame_forward_igd_kernel_static)},
+ {frame_backward_igd_kernel_static, sizeof(frame_backward_igd_kernel_static)},
+ {frame_f_b_igd_kernel_static, sizeof(frame_f_b_igd_kernel_static)},
+ {field_forward_igd_kernel_static, sizeof(field_forward_igd_kernel_static)},
+ {field_backward_igd_kernel_static, sizeof(field_backward_igd_kernel_static)},
+ {field_f_b_igd_kernel_static, sizeof(field_f_b_igd_kernel_static)},
+ {dual_prime_igd_kernel_static, sizeof(dual_prime_igd_kernel_static)}
+};
+
+struct kernel_struct kernels_965[] = {
+ {ipicture_kernel_static, sizeof(ipicture_kernel_static)},
+ {null_kernel_static, sizeof(null_kernel_static)},
+ {frame_forward_kernel_static, sizeof(frame_forward_kernel_static)},
+ {frame_backward_kernel_static, sizeof(frame_backward_kernel_static)},
+ {frame_f_b_kernel_static, sizeof(frame_f_b_kernel_static)},
+ {field_forward_kernel_static, sizeof(field_forward_kernel_static)},
+ {field_backward_kernel_static, sizeof(field_backward_kernel_static)},
+ {field_f_b_kernel_static, sizeof(field_f_b_kernel_static)},
+ {dual_prime_kernel_static, sizeof(dual_prime_kernel_static)}
+};
+
#define ALIGN(i,m) (((i) + (m) - 1) & ~((m) - 1))
#define MAX_SURFACE_NUM 10
#define DESCRIPTOR_NUM 12
+struct media_kernel_obj {
+ dri_bo *bo;
+};
+
+struct interface_descriptor_obj {
+ dri_bo *bo;
+ struct media_kernel_obj kernels[DESCRIPTOR_NUM];
+};
+
+struct vfe_state_obj {
+ dri_bo *bo;
+ struct interface_descriptor_obj interface;
+};
+
+struct surface_obj {
+ dri_bo *bo;
+};
+
+struct surface_state_obj {
+ struct surface_obj surface;
+ dri_bo *bo;
+};
+
+struct binding_table_obj {
+ dri_bo *bo;
+ struct surface_state_obj surface_states[MAX_SURFACE_NUM];
+};
+
+struct indirect_data_obj {
+ dri_bo *bo;
+};
+
struct media_state {
- unsigned long state_base;
- void *state_ptr;
- unsigned int binding_table_entry_count;
- unsigned long vfe_state_offset;
- unsigned long interface_descriptor_offset[DESCRIPTOR_NUM];
- unsigned long ipicture_kernel_offset;
- unsigned long frame_forward_kernel_offset;
- unsigned long frame_backward_kernel_offset;
- unsigned long frame_f_b_kernel_offset;
- unsigned long ipicture_field_kernel_offset;
- unsigned long field_forward_kernel_offset;
- unsigned long field_backward_kernel_offset;
- unsigned long field_f_b_kernel_offset;
- unsigned long dual_prime_kernel_offset;
- unsigned long null_kernel_offset;
- unsigned long surface_offsets[MAX_SURFACE_NUM];
- unsigned long binding_table_offset;
unsigned int is_g4x:1;
unsigned int is_965_q:1;
+
+ struct vfe_state_obj vfe_state;
+ struct binding_table_obj binding_table;
+ struct indirect_data_obj indirect_data;
};
struct media_state media_state;
-static int map_buffer(struct drm_memory_block *mem)
+static int free_object(struct media_state *s)
{
- return (drmMap(xvmc_driver->fd,
- mem->handle, mem->size, &mem->ptr));
+ int i;
+#define FREE_ONE_BO(bo) drm_intel_bo_unreference(bo)
+ FREE_ONE_BO(s->vfe_state.bo);
+ FREE_ONE_BO(s->vfe_state.interface.bo);
+ for (i = 0; i < DESCRIPTOR_NUM; i++)
+ FREE_ONE_BO(s->vfe_state.interface.kernels[i].bo);
+ FREE_ONE_BO(s->binding_table.bo);
+ for (i = 0; i < MAX_SURFACE_NUM; i++)
+ FREE_ONE_BO(s->binding_table.surface_states[i].bo);
+ FREE_ONE_BO(s->indirect_data.bo);
}
-static void unmap_buffer(struct drm_memory_block *mem)
+static int alloc_object(struct media_state *s)
{
- drmUnmap(mem->ptr, mem->size);
+ int i;
+
+ for (i = 0; i < MAX_SURFACE_NUM; i++) {
+ s->binding_table.surface_states[i].bo =
+ drm_intel_bo_alloc(xvmc_driver->bufmgr, "surface_state",
+ sizeof(struct brw_surface_state), 0x1000);
+ if (!s->binding_table.surface_states[i].bo)
+ goto out;
+ }
+ return 0;
+out:
+ free_object(s);
+ return BadAlloc;
}
+
static Status destroy_context(Display *display, XvMCContext *context)
{
struct i965_xvmc_context *private_context;
private_context = context->privData;
- unmap_buffer(&private_context->static_buffer);
- unmap_buffer(&private_context->blocks);
+ free_object(&media_state);
Xfree(private_context);
return Success;
}
+#define STRIDE(w) (w)
+#define SIZE_YUV420(w, h) (h * (STRIDE(w) + STRIDE(w >> 1)))
+
static Status create_surface(Display *display,
XvMCContext *context, XvMCSurface *surface, int priv_count,
CARD32 *priv_data)
{
struct i965_xvmc_surface *priv_surface =
(struct i965_xvmc_surface *)priv_data;
- if (map_buffer(&priv_surface->buffer))
- return BadAlloc;
+ size_t size = SIZE_YUV420(priv_surface->w, priv_surface->h);
surface->privData = priv_data;
+ priv_surface->bo = drm_intel_bo_alloc(xvmc_driver->bufmgr, "surface",
+ size, 0x1000);
return Success;
}
@@ -160,7 +234,9 @@ static Status destroy_surface(Display *display, XvMCSurface *surface)
{
struct i965_xvmc_surface *priv_surface =
surface->privData;
- unmap_buffer(&priv_surface->buffer);
+ XSync(display, False);
+
+ drm_intel_bo_unreference(priv_surface->bo);
return Success;
}
@@ -206,21 +282,6 @@ static void urb_layout()
OUT_BATCH(BRW_URB_FENCE |
UF0_VFE_REALLOC |
UF0_CS_REALLOC |
- 1);
- OUT_BATCH(0);
- OUT_BATCH(((URB_SIZE)<< UF2_VFE_FENCE_SHIFT) | /* VFE_SIZE */
- ((URB_SIZE)<< UF2_CS_FENCE_SHIFT)); /* CS_SIZE is 0 */
- ADVANCE_BATCH();
-}
-
-/* clear previous urb layout */
-static void clear_urb_state()
-{
- BATCH_LOCALS;
- align_urb_fence();
- BEGIN_BATCH(3);
- OUT_BATCH(BRW_URB_FENCE |
- UF0_CS_REALLOC |
UF0_SF_REALLOC |
UF0_CLIP_REALLOC |
UF0_GS_REALLOC |
@@ -229,8 +290,9 @@ static void clear_urb_state()
OUT_BATCH((0 << UF1_CLIP_FENCE_SHIFT) |
(0 << UF1_GS_FENCE_SHIFT) |
(0 << UF1_VS_FENCE_SHIFT));
- OUT_BATCH((0 << UF2_CS_FENCE_SHIFT) |
- (0 << UF2_SF_FENCE_SHIFT));
+
+ OUT_BATCH(((URB_SIZE)<< UF2_VFE_FENCE_SHIFT) | /* VFE_SIZE */
+ ((URB_SIZE)<< UF2_CS_FENCE_SHIFT)); /* CS_SIZE is 0 */
ADVANCE_BATCH();
}
@@ -240,62 +302,89 @@ static void media_state_pointers(struct media_state *media_state)
BEGIN_BATCH(3);
OUT_BATCH(BRW_MEDIA_STATE_POINTERS|1);
OUT_BATCH(0);
- OUT_BATCH(media_state->vfe_state_offset);
- ADVANCE_BATCH();
-}
-
-static void cs_urb_layout()
-{
- BATCH_LOCALS;
- BEGIN_BATCH(2);
- OUT_BATCH(BRW_CS_URB_STATE | 0);
- OUT_BATCH((0 << 4) | /* URB Entry Allocation Size */
- (0 << 0)); /* Number of URB Entries */
+ OUT_RELOC(media_state->vfe_state.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
ADVANCE_BATCH();
}
/* setup 2D surface for media_read or media_write
*/
-static void setup_media_surface(struct media_state *media_state,
- int surface_num, unsigned long offset, int w, int h)
+static Status setup_media_surface(struct media_state *media_state,
+ int surface_num, dri_bo *bo, unsigned long offset, int w, int h, Bool write)
{
- struct brw_surface_state *ss;
- ss = media_state->state_ptr +
- (media_state->surface_offsets[surface_num] - media_state->state_base);
+ struct brw_surface_state s, *ss = &s;
+
memset(ss, 0, sizeof(struct brw_surface_state));
ss->ss0.surface_type = BRW_SURFACE_2D;
ss->ss0.surface_format = BRW_SURFACEFORMAT_R8_SINT;
- ss->ss1.base_addr = offset;
+ ss->ss1.base_addr = offset + bo->offset;
ss->ss2.width = w - 1;
ss->ss2.height = h - 1;
ss->ss3.pitch = w - 1;
+
+ if (media_state->binding_table.surface_states[surface_num].bo)
+ drm_intel_bo_unreference(media_state->binding_table.surface_states[surface_num].bo);
+ media_state->binding_table.surface_states[surface_num].bo =
+ drm_intel_bo_alloc(xvmc_driver->bufmgr, "surface_state",
+ sizeof(struct brw_surface_state), 0x1000);
+ if (!media_state->binding_table.surface_states[surface_num].bo)
+ return BadAlloc;
+
+ drm_intel_bo_subdata(
+ media_state->binding_table.surface_states[surface_num].bo,
+ 0, sizeof(*ss), ss);
+
+ drm_intel_bo_emit_reloc(media_state->binding_table.surface_states[surface_num].bo,
+ offsetof(struct brw_surface_state, ss1),
+ bo,
+ offset,
+ I915_GEM_DOMAIN_RENDER, write?I915_GEM_DOMAIN_RENDER:0);
+
+ return Success;
}
-static void setup_surfaces(struct media_state *media_state,
- unsigned long dst_offset, unsigned long past_offset,
- unsigned long future_offset,
+static Status setup_surfaces(struct media_state *media_state,
+ dri_bo *dst_bo, dri_bo *past_bo, dri_bo *future_bo,
int w, int h)
{
- setup_media_surface(media_state, 0, dst_offset, w, h);
- setup_media_surface(media_state, 1, dst_offset+w*h, w/2, h/2);
- setup_media_surface(media_state, 2, dst_offset+w*h + w*h/4, w/2, h/2);
- if (past_offset) {
- setup_media_surface(media_state, 4, past_offset, w, h);
- setup_media_surface(media_state, 5, past_offset+w*h, w/2, h/2);
- setup_media_surface(media_state, 6, past_offset+w*h + w*h/4, w/2, h/2);
+ Status ret;
+ ret = setup_media_surface(media_state, 0, dst_bo, 0, w, h, TRUE);
+ if (ret != Success)
+ return ret;
+ ret = setup_media_surface(media_state, 1, dst_bo, w*h, w/2, h/2, TRUE);
+ if (ret != Success)
+ return ret;
+ ret = setup_media_surface(media_state, 2, dst_bo, w*h + w*h/4, w/2, h/2, TRUE);
+ if (ret != Success)
+ return ret;
+ if (past_bo) {
+ ret = setup_media_surface(media_state, 4, past_bo, 0, w, h, FALSE);
+ if (ret != Success)
+ return ret;
+ ret = setup_media_surface(media_state, 5, past_bo, w*h, w/2, h/2, FALSE);
+ if (ret != Success)
+ return ret;
+ ret = setup_media_surface(media_state, 6, past_bo, w*h + w*h/4, w/2, h/2, FALSE);
+ if (ret != Success)
+ return ret;
}
- if (future_offset) {
- setup_media_surface(media_state, 7, future_offset, w, h);
- setup_media_surface(media_state, 8, future_offset+w*h, w/2, h/2);
- setup_media_surface(media_state, 9, future_offset+w*h + w*h/4, w/2, h/2);
+ if (future_bo) {
+ ret = setup_media_surface(media_state, 7, future_bo, 0, w, h, FALSE);
+ if (ret != Success)
+ return ret;
+ ret = setup_media_surface(media_state, 8, future_bo, w*h, w/2, h/2, FALSE);
+ if (ret != Success)
+ return ret;
+ ret = setup_media_surface(media_state, 9, future_bo, w*h + w*h/4, w/2, h/2, FALSE);
+ if (ret != Success)
+ return ret;
}
+ return Success;
}
/* BUFFER SURFACE has a strange format
* the size of the surface is in part of w h and d component
*/
-static void setup_blocks(struct media_state *media_state,
- unsigned long offset, unsigned int block_size)
+static Status setup_blocks(struct media_state *media_state, unsigned int block_size)
{
union element{
struct {
@@ -306,22 +395,39 @@ static void setup_blocks(struct media_state *media_state,
}whd;
unsigned int size;
}e;
- struct brw_surface_state *ss;
- ss = media_state->state_ptr +
- (media_state->surface_offsets[3] - media_state->state_base);
- memset(ss, 0, sizeof(struct brw_surface_state));
- ss->ss0.surface_type = BRW_SURFACE_BUFFER;
- ss->ss0.surface_format = BRW_SURFACEFORMAT_R8_UINT;
- ss->ss1.base_addr = offset;
+ struct brw_surface_state ss;
+ memset(&ss, 0, sizeof(struct brw_surface_state));
+ ss.ss0.surface_type = BRW_SURFACE_BUFFER;
+ ss.ss0.surface_format = BRW_SURFACEFORMAT_R8_UINT;
+ ss.ss1.base_addr = media_state->indirect_data.bo->offset;
+
e.size = block_size - 1;
- ss->ss2.width = e.whd.w;
- ss->ss2.height = e.whd.h;
- ss->ss3.depth = e.whd.d;
- ss->ss3.pitch = block_size - 1;
+ ss.ss2.width = e.whd.w;
+ ss.ss2.height = e.whd.h;
+ ss.ss3.depth = e.whd.d;
+ ss.ss3.pitch = block_size - 1;
+
+ if (media_state->binding_table.surface_states[3].bo)
+ drm_intel_bo_unreference(media_state->binding_table.surface_states[3].bo);
+
+ media_state->binding_table.surface_states[3].bo =
+ drm_intel_bo_alloc(xvmc_driver->bufmgr, "surface_state",
+ sizeof(struct brw_surface_state), 0x1000);
+ if (!media_state->binding_table.surface_states[3].bo)
+ return BadAlloc;
+
+ drm_intel_bo_subdata(media_state->binding_table.surface_states[3].bo, 0,
+ sizeof(ss), &ss);
+
+ drm_intel_bo_emit_reloc(media_state->binding_table.surface_states[3].bo,
+ offsetof(struct brw_surface_state, ss1),
+ media_state->indirect_data.bo, 0,
+ I915_GEM_DOMAIN_SAMPLER, 0);
+ return Success;
}
/* setup state base address */
-static void state_base_address(int offset)
+static void state_base_address()
{
BATCH_LOCALS;
BEGIN_BATCH(6);
@@ -330,7 +436,7 @@ static void state_base_address(int offset)
OUT_BATCH(0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(0 | BASE_ADDRESS_MODIFY);
- OUT_BATCH((0xFFFFF<<12) | BASE_ADDRESS_MODIFY);
+ OUT_BATCH(BASE_ADDRESS_MODIFY);
ADVANCE_BATCH();
}
@@ -358,12 +464,13 @@ static void send_media_object(XvMCMacroBlock *mb, int offset, enum interface int
OUT_BATCH(0);
}else {
OUT_BATCH(6*128);
- OUT_BATCH(offset);
+ OUT_RELOC(media_state.indirect_data.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, offset);
}
OUT_BATCH(mb->x<<4); //g1.0
OUT_BATCH(mb->y<<4);
- OUT_BATCH(offset); //g1.8
+ OUT_RELOC(media_state.indirect_data.bo, //g1.8
+ I915_GEM_DOMAIN_INSTRUCTION, 0, offset);
OUT_BATCH_SHORT(mb->coded_block_pattern); //g1.12
OUT_BATCH_SHORT(mb->PMV[0][0][0]); //g1.14
OUT_BATCH_SHORT(mb->PMV[0][0][1]); //g1.16
@@ -384,178 +491,152 @@ static void send_media_object(XvMCMacroBlock *mb, int offset, enum interface int
ADVANCE_BATCH();
}
-static void binding_tables(struct media_state *media_state)
+static Status binding_tables(struct media_state *media_state)
{
- unsigned int *binding_table;
+ unsigned int binding_table[MAX_SURFACE_NUM];
int i;
- binding_table = media_state->state_ptr +
- (media_state->binding_table_offset - media_state->state_base);
+
+ if (media_state->binding_table.bo)
+ drm_intel_bo_unreference(media_state->binding_table.bo);
+ media_state->binding_table.bo =
+ drm_intel_bo_alloc(xvmc_driver->bufmgr, "binding_table",
+ MAX_SURFACE_NUM*4, 0x1000);
+ if (!media_state->binding_table.bo)
+ return BadAlloc;
+
+ for (i = 0; i < MAX_SURFACE_NUM; i++)
+ binding_table[i] = media_state->binding_table.surface_states[i].bo->offset;
+ drm_intel_bo_subdata(media_state->binding_table.bo, 0, sizeof(binding_table),
+ binding_table);
+
for (i = 0; i < MAX_SURFACE_NUM; i++)
- binding_table[i] = media_state->surface_offsets[i];
+ drm_intel_bo_emit_reloc(media_state->binding_table.bo,
+ i * sizeof(unsigned int),
+ media_state->binding_table.surface_states[i].bo, 0,
+ I915_GEM_DOMAIN_INSTRUCTION, 0);
+ return Success;
}
-static void media_kernels(struct media_state *media_state)
+static int media_kernels(struct media_state *media_state)
{
- void *kernel;
-#define LOAD_KERNEL(name) kernel = media_state->state_ptr +\
- (media_state->name##_kernel_offset - media_state->state_base);\
- memcpy(kernel, name##_kernel_static, sizeof(name##_kernel_static));
-#define LOAD_KERNEL_IGD(name) kernel = media_state->state_ptr +\
- (media_state->name##_kernel_offset - media_state->state_base);\
- memcpy(kernel, name##_igd_kernel_static, sizeof(name##_igd_kernel_static));
-
- LOAD_KERNEL(ipicture);
- LOAD_KERNEL(null);
+ struct kernel_struct *kernels;
+ int kernel_array_size, i;
+
if (media_state->is_g4x) {
- LOAD_KERNEL_IGD(frame_forward);
- LOAD_KERNEL_IGD(field_forward);
- LOAD_KERNEL_IGD(frame_backward);
- LOAD_KERNEL_IGD(field_backward);
- LOAD_KERNEL_IGD(frame_f_b);
- LOAD_KERNEL_IGD(field_f_b);
- LOAD_KERNEL_IGD(dual_prime);
-
- }else {
- LOAD_KERNEL(frame_forward);
- LOAD_KERNEL(field_forward);
- LOAD_KERNEL(frame_backward);
- LOAD_KERNEL(field_backward);
- LOAD_KERNEL(frame_f_b);
- LOAD_KERNEL(field_f_b);
- LOAD_KERNEL(dual_prime);
+ kernels = kernels_igd;
+ kernel_array_size = ARRAY_SIZE(kernels_igd);
+ } else {
+ kernels = kernels_965;
+ kernel_array_size = ARRAY_SIZE(kernels_965);
+ }
+
+ for (i = 0; i < kernel_array_size; i++) {
+ media_state->vfe_state.interface.kernels[i].bo =
+ drm_intel_bo_alloc(xvmc_driver->bufmgr, "kernel",
+ kernels[i].size, 0x1000);
+ if (!media_state->vfe_state.interface.kernels[i].bo)
+ goto out;
}
+
+ for (i = 0; i < kernel_array_size; i++) {
+ dri_bo *bo = media_state->vfe_state.interface.kernels[i].bo;
+ drm_intel_bo_subdata(bo, 0, kernels[i].size, kernels[i].bin);
+ }
+ return 0;
+out:
+ free_object(media_state);
+ return BadAlloc;
}
static void setup_interface(struct media_state *media_state,
- enum interface interface, unsigned int kernel_offset)
+ enum interface i)
{
- struct brw_interface_descriptor *desc;
- desc = media_state->state_ptr +
- (media_state->interface_descriptor_offset[interface]
- - media_state->state_base);
- memset(desc, 0, sizeof(*desc));
- desc->desc0.grf_reg_blocks = 15;
- desc->desc0.kernel_start_pointer = kernel_offset >> 6;
- desc->desc1.floating_point_mode = BRW_FLOATING_POINT_NON_IEEE_754;
+ struct brw_interface_descriptor desc;
+ memset(&desc, 0, sizeof(desc));
+
+ desc.desc0.grf_reg_blocks = 15;
+ desc.desc0.kernel_start_pointer =
+ media_state->vfe_state.interface.kernels[i].bo->offset >> 6;
+
+ desc.desc1.floating_point_mode = BRW_FLOATING_POINT_NON_IEEE_754;
/* use same binding table for all interface
* may change this if it affect performance
*/
- desc->desc3.binding_table_entry_count = MAX_SURFACE_NUM;
- desc->desc3.binding_table_pointer = media_state->binding_table_offset >> 5;
+ desc.desc3.binding_table_entry_count = MAX_SURFACE_NUM;
+ desc.desc3.binding_table_pointer = media_state->binding_table.bo->offset >> 5;
+
+ drm_intel_bo_subdata(media_state->vfe_state.interface.bo, i*sizeof(desc),
+ sizeof(desc), &desc);
+
+ drm_intel_bo_emit_reloc(
+ media_state->vfe_state.interface.bo,
+ i * sizeof(desc) +
+ offsetof(struct brw_interface_descriptor, desc0),
+ media_state->vfe_state.interface.kernels[i].bo,
+ desc.desc0.grf_reg_blocks,
+ I915_GEM_DOMAIN_INSTRUCTION, 0);
+
+ drm_intel_bo_emit_reloc(
+ media_state->vfe_state.interface.bo,
+ i * sizeof(desc) +
+ offsetof(struct brw_interface_descriptor, desc3),
+ media_state->binding_table.bo,
+ desc.desc3.binding_table_entry_count,
+ I915_GEM_DOMAIN_INSTRUCTION, 0);
}
-static void interface_descriptor(struct media_state *media_state)
+static Status interface_descriptor(struct media_state *media_state)
{
- setup_interface(media_state, INTRA_INTERFACE,
- media_state->ipicture_kernel_offset);
- setup_interface(media_state, NULL_INTERFACE,
- media_state->null_kernel_offset);
- setup_interface(media_state, FORWARD_INTERFACE,
- media_state->frame_forward_kernel_offset);
- setup_interface(media_state, FIELD_FORWARD_INTERFACE,
- media_state->field_forward_kernel_offset);
- setup_interface(media_state, BACKWARD_INTERFACE,
- media_state->frame_backward_kernel_offset);
- setup_interface(media_state, FIELD_BACKWARD_INTERFACE,
- media_state->field_backward_kernel_offset);
- setup_interface(media_state, F_B_INTERFACE,
- media_state->frame_f_b_kernel_offset);
- setup_interface(media_state, FIELD_F_B_INTERFACE,
- media_state->field_f_b_kernel_offset);
- setup_interface(media_state, DUAL_PRIME_INTERFACE,
- media_state->dual_prime_kernel_offset);
+ if (media_state->vfe_state.interface.bo)
+ drm_intel_bo_unreference(media_state->vfe_state.interface.bo);
+ media_state->vfe_state.interface.bo = drm_intel_bo_alloc(xvmc_driver->bufmgr,
+ "interfaces", DESCRIPTOR_NUM*sizeof(struct brw_interface_descriptor),
+ 0x1000);
+ if (!media_state->vfe_state.interface.bo)
+ return BadAlloc;
+
+ setup_interface(media_state, INTRA_INTERFACE);
+ setup_interface(media_state, NULL_INTERFACE);
+ setup_interface(media_state, FORWARD_INTERFACE);
+ setup_interface(media_state, FIELD_FORWARD_INTERFACE);
+ setup_interface(media_state, BACKWARD_INTERFACE);
+ setup_interface(media_state, FIELD_BACKWARD_INTERFACE);
+ setup_interface(media_state, F_B_INTERFACE);
+ setup_interface(media_state, FIELD_F_B_INTERFACE);
+ setup_interface(media_state, DUAL_PRIME_INTERFACE);
+ return Success;
}
-static void vfe_state(struct media_state *media_state)
+static Status vfe_state(struct media_state *media_state)
{
- struct brw_vfe_state *state;
- state = media_state->state_ptr +
- (media_state->vfe_state_offset - media_state->state_base);
- memset(state, 0, sizeof(*state));
+ struct brw_vfe_state state;
+ memset(&state, 0, sizeof(state));
+
/* no scratch space */
- state->vfe1.vfe_mode = VFE_GENERIC_MODE;
- state->vfe1.num_urb_entries = 1;
+ state.vfe1.vfe_mode = VFE_GENERIC_MODE;
+ state.vfe1.num_urb_entries = 1;
/* XXX TODO */
/* should carefully caculate those values for performance */
- state->vfe1.urb_entry_alloc_size = 2;
- state->vfe1.max_threads = 31;
- state->vfe2.interface_descriptor_base =
- media_state->interface_descriptor_offset[0] >> 4;
-}
-
-static void calc_state_layouts(struct media_state *media_state)
-{
- int i;
- media_state->vfe_state_offset = ALIGN(media_state->state_base, 64);
- media_state->interface_descriptor_offset[0] =
- ALIGN(media_state->vfe_state_offset + sizeof(struct brw_vfe_state), 64);
- for (i = 1; i < DESCRIPTOR_NUM; i++)
- media_state->interface_descriptor_offset[i] =
- media_state->interface_descriptor_offset[i-1]
- + sizeof(struct brw_interface_descriptor);
- media_state->binding_table_offset =
- ALIGN(media_state->interface_descriptor_offset[DESCRIPTOR_NUM - 1]
- + sizeof(struct brw_interface_descriptor), 64);
- media_state->surface_offsets[0] =
- ALIGN(media_state->binding_table_offset
- + 4*media_state->binding_table_entry_count , 32);
- for (i = 1; i < MAX_SURFACE_NUM; i++)
- media_state->surface_offsets[i] =
- ALIGN(media_state->surface_offsets[i - 1]
- + sizeof(struct brw_surface_state) , 32);
- media_state->ipicture_kernel_offset =
- ALIGN(media_state->surface_offsets[MAX_SURFACE_NUM - 1]
- + sizeof(struct brw_surface_state) , 64);
-
- media_state->frame_forward_kernel_offset =
- ALIGN(media_state->ipicture_kernel_offset +
- sizeof(ipicture_kernel_static), 64);
- if(!media_state->is_g4x) {
- media_state->field_forward_kernel_offset =
- ALIGN(media_state->frame_forward_kernel_offset +
- sizeof(frame_forward_kernel_static), 64);
- media_state->frame_backward_kernel_offset =
- ALIGN(media_state->field_forward_kernel_offset +
- sizeof(field_forward_kernel_static), 64);
- media_state->field_backward_kernel_offset =
- ALIGN(media_state->frame_backward_kernel_offset +
- sizeof(frame_backward_kernel_static), 64);
- media_state->frame_f_b_kernel_offset =
- ALIGN(media_state->field_backward_kernel_offset +
- sizeof(field_backward_kernel_static), 64);
- media_state->field_f_b_kernel_offset =
- ALIGN(media_state->frame_f_b_kernel_offset +
- sizeof(frame_f_b_kernel_static), 64);
- media_state->null_kernel_offset =
- ALIGN(media_state->field_f_b_kernel_offset +
- sizeof(field_f_b_kernel_static), 64);
- media_state->dual_prime_kernel_offset =
- ALIGN(media_state->null_kernel_offset +
- sizeof(null_kernel_static), 64);
- } else {
- media_state->field_forward_kernel_offset =
- ALIGN(media_state->frame_forward_kernel_offset +
- sizeof(frame_forward_igd_kernel_static), 64);
- media_state->frame_backward_kernel_offset =
- ALIGN(media_state->field_forward_kernel_offset +
- sizeof(field_forward_igd_kernel_static), 64);
- media_state->field_backward_kernel_offset =
- ALIGN(media_state->frame_backward_kernel_offset +
- sizeof(frame_backward_igd_kernel_static), 64);
- media_state->frame_f_b_kernel_offset =
- ALIGN(media_state->field_backward_kernel_offset +
- sizeof(field_backward_igd_kernel_static), 64);
- media_state->field_f_b_kernel_offset =
- ALIGN(media_state->frame_f_b_kernel_offset +
- sizeof(frame_f_b_igd_kernel_static), 64);
- media_state->null_kernel_offset =
- ALIGN(media_state->field_f_b_kernel_offset +
- sizeof(field_f_b_igd_kernel_static), 64);
- media_state->dual_prime_kernel_offset =
- ALIGN(media_state->null_kernel_offset +
- sizeof(null_kernel_static), 64);
- }
+ state.vfe1.urb_entry_alloc_size = 2;
+ state.vfe1.max_threads = 31;
+ state.vfe2.interface_descriptor_base =
+ media_state->vfe_state.interface.bo->offset >> 4;
+
+ if (media_state->vfe_state.bo)
+ drm_intel_bo_unreference(media_state->vfe_state.bo);
+ media_state->vfe_state.bo = drm_intel_bo_alloc(xvmc_driver->bufmgr,
+ "vfe state", sizeof(struct brw_vfe_state), 0x1000);
+ if (!media_state->vfe_state.bo)
+ return BadAlloc;
+
+ drm_intel_bo_subdata(media_state->vfe_state.bo, 0, sizeof(state), &state);
+
+ drm_intel_bo_emit_reloc(media_state->vfe_state.bo,
+ offsetof(struct brw_vfe_state, vfe2),
+ media_state->vfe_state.interface.bo, 0,
+ I915_GEM_DOMAIN_INSTRUCTION, 0);
+ return Success;
}
static Status render_surface(Display *display,
@@ -588,13 +669,35 @@ static Status render_surface(Display *display,
XVMC_ERR("Can't find intel xvmc context\n");
return BadValue;
}
+
+ if (media_state.indirect_data.bo) {
+ if (xvmc_driver->kernel_exec_fencing)
+ drm_intel_gem_bo_unmap_gtt(media_state.indirect_data.bo);
+ else
+ drm_intel_bo_unmap(media_state.indirect_data.bo);
+
+ drm_intel_bo_unreference(media_state.indirect_data.bo);
+ }
+ media_state.indirect_data.bo = drm_intel_bo_alloc(xvmc_driver->bufmgr,
+ "indirect data", 128*6*num_macroblocks, 64);
+ if (!media_state.indirect_data.bo)
+ return BadAlloc;
setup_surfaces(&media_state,
- priv_target_surface->buffer.offset,
- past_surface? priv_past_surface->buffer.offset:0,
- future_surface?priv_future_surface->buffer.offset:0,
+ priv_target_surface->bo,
+ past_surface? priv_past_surface->bo:NULL,
+ future_surface?priv_future_surface->bo:NULL,
context->width, context->height);
+ setup_blocks(&media_state, 128*6*num_macroblocks);
+ binding_tables(&media_state);
+ interface_descriptor(&media_state);
+ vfe_state(&media_state);
+
+ if (xvmc_driver->kernel_exec_fencing)
+ drm_intel_gem_bo_map_gtt(media_state.indirect_data.bo);
+ else
+ drm_intel_bo_map(media_state.indirect_data.bo, 1);
- block_ptr = i965_ctx->blocks.ptr;
+ block_ptr = media_state.indirect_data.bo->virtual;
for (i = first_macroblock;
i < num_macroblocks + first_macroblock; i++) {
unsigned short *mb_block_ptr;
@@ -635,20 +738,15 @@ static Status render_surface(Display *display,
memcpy(block_ptr, mb_block_ptr, 128);
block_ptr += 64;
}
-
{
- int block_offset;
- block_offset = media_state.is_965_q?0:i965_ctx->blocks.offset;
+ int block_offset = 0;
LOCK_HARDWARE(intel_ctx->hw_context);
- state_base_address(block_offset);
+ state_base_address();
flush();
clear_sf_state();
- clear_urb_state();
pipeline_select(&media_state);
urb_layout();
media_state_pointers(&media_state);
- cs_urb_layout();
-
for (i = first_macroblock;
i < num_macroblocks + first_macroblock;
i++, block_offset += 128*6) {
@@ -700,8 +798,11 @@ static Status put_surface(Display *display,XvMCSurface *surface,
{
struct i965_xvmc_surface *private_surface =
surface->privData;
+ uint32_t handle = 0;
+
+ drm_intel_bo_flink(private_surface->bo, &handle);
+ data->handle = handle;
- data->surf_offset = private_surface->buffer.offset;
return Success;
}
@@ -718,25 +819,14 @@ static Status create_context(Display *display, XvMCContext *context,
struct i965_xvmc_context *i965_ctx;
i965_ctx = (struct i965_xvmc_context *)priv_data;
context->privData = i965_ctx;
- if (map_buffer(&i965_ctx->static_buffer))
- return BadAlloc;
- if(map_buffer(&i965_ctx->blocks))
- return BadAlloc;
- {
- media_state.state_base = i965_ctx->static_buffer.offset;
- media_state.state_ptr = i965_ctx->static_buffer.ptr;
- media_state.is_g4x = i965_ctx->is_g4x;
- media_state.is_965_q = i965_ctx->is_965_q;
- media_state.binding_table_entry_count = MAX_SURFACE_NUM;
- calc_state_layouts(&media_state);
- vfe_state(&media_state);
- interface_descriptor(&media_state);
- media_kernels(&media_state);
- setup_blocks(&media_state,
- i965_ctx->blocks.offset,
- 6*context->width*context->height*sizeof(short));
- binding_tables(&media_state);
- }
+
+ media_state.is_g4x = i965_ctx->is_g4x;
+ media_state.is_965_q = i965_ctx->is_965_q;
+
+ if (alloc_object(&media_state))
+ return BadAlloc;
+ if (media_kernels(&media_state))
+ return BadAlloc;
return Success;
}
diff --git a/src/xvmc/intel_batchbuffer.c b/src/xvmc/intel_batchbuffer.c
index 6d4b496e..02fbd5ae 100644
--- a/src/xvmc/intel_batchbuffer.c
+++ b/src/xvmc/intel_batchbuffer.c
@@ -45,9 +45,10 @@
#include "intel_xvmc.h"
#include "intel_batchbuffer.h"
-
+#include "brw_defines.h"
+#include "brw_structs.h"
#define MI_BATCH_BUFFER_END (0xA << 23)
-
+#define BATCH_SIZE 8*1024 /* one bo is allocated each time, so the size can be small */
static int intelEmitIrqLocked(void)
{
drmI830IrqEmit ie;
@@ -82,192 +83,105 @@ static void intelWaitIrq(int seq)
}
}
-static void intelDestroyBatchBuffer(void)
+static void i965_end_batch(void)
{
- if (xvmc_driver->alloc.offset) {
- xvmc_driver->alloc.ptr = NULL;
- xvmc_driver->alloc.offset = 0;
- } else if (xvmc_driver->alloc.ptr) {
- free(xvmc_driver->alloc.ptr);
- xvmc_driver->alloc.ptr = NULL;
- }
-
- memset(&xvmc_driver->batch, 0, sizeof(xvmc_driver->batch));
+ unsigned int size = xvmc_driver->batch.ptr -
+ xvmc_driver->batch.init_ptr;
+ if ((size & 4) == 0) {
+ *(unsigned int*)xvmc_driver->batch.ptr = 0;
+ xvmc_driver->batch.ptr += 4;
+ }
+ *(unsigned int*)xvmc_driver->batch.ptr = MI_BATCH_BUFFER_END;
+ xvmc_driver->batch.ptr += 4;
}
-
Bool intelInitBatchBuffer(void)
{
- if (drmMap(xvmc_driver->fd,
- xvmc_driver->batchbuffer.handle,
- xvmc_driver->batchbuffer.size,
- (drmAddress *)&xvmc_driver->batchbuffer.map) != 0) {
- XVMC_ERR("fail to map batch buffer\n");
+ int i;
+
+ if((xvmc_driver->batch.buf =
+ drm_intel_bo_alloc(xvmc_driver->bufmgr,
+ "batch buffer", BATCH_SIZE, 0x1000)) == NULL) {
+ fprintf(stderr, "unable to alloc batch buffer\n");
return False;
}
- if (xvmc_driver->batchbuffer.map) {
- xvmc_driver->alloc.size = xvmc_driver->batchbuffer.size;
- xvmc_driver->alloc.offset = xvmc_driver->batchbuffer.offset;
- xvmc_driver->alloc.ptr = xvmc_driver->batchbuffer.map;
- } else {
- xvmc_driver->alloc.size = 8 * 1024;
- xvmc_driver->alloc.offset = 0;
- xvmc_driver->alloc.ptr = malloc(xvmc_driver->alloc.size);
- }
+ if (xvmc_driver->kernel_exec_fencing)
+ drm_intel_gem_bo_map_gtt(xvmc_driver->batch.buf);
+ else
+ drm_intel_bo_map(xvmc_driver->batch.buf, 1);
- xvmc_driver->alloc.active_buf = 0;
- assert(xvmc_driver->alloc.ptr);
+ xvmc_driver->batch.init_ptr = xvmc_driver->batch.buf->virtual;
+ xvmc_driver->batch.size = BATCH_SIZE;
+ xvmc_driver->batch.space = BATCH_SIZE;
+ xvmc_driver->batch.ptr = xvmc_driver->batch.init_ptr;
return True;
}
void intelFiniBatchBuffer(void)
{
- if (xvmc_driver->batchbuffer.map) {
- drmUnmap(xvmc_driver->batchbuffer.map, xvmc_driver->batchbuffer.size);
- xvmc_driver->batchbuffer.map = NULL;
- }
- intelDestroyBatchBuffer();
-}
-
-static void intelBatchbufferRequireSpace(unsigned int sz)
-{
- if (xvmc_driver->batch.space < sz)
- intelFlushBatch(TRUE);
-}
-
-void intelBatchbufferData(const void *data, unsigned bytes, unsigned flags)
-{
- assert((bytes & 0x3) == 0);
-
- intelBatchbufferRequireSpace(bytes);
- memcpy(xvmc_driver->batch.ptr, data, bytes);
- xvmc_driver->batch.ptr += bytes;
- xvmc_driver->batch.space -= bytes;
+ if (xvmc_driver->kernel_exec_fencing)
+ drm_intel_gem_bo_unmap_gtt(xvmc_driver->batch.buf);
+ else
+ drm_intel_bo_unmap(xvmc_driver->batch.buf);
- assert(xvmc_driver->batch.space >= 0);
+ drm_intel_bo_unreference(xvmc_driver->batch.buf);
}
-#define MI_FLUSH ((0 << 29) | (4 << 23))
-#define FLUSH_MAP_CACHE (1 << 0)
-#define FLUSH_RENDER_CACHE (0 << 2)
-#define FLUSH_WRITE_DIRTY_STATE (1 << 4)
-static void intelRefillBatchLocked(Bool allow_unlock)
+void intelFlushBatch(Bool refill )
{
- unsigned half = xvmc_driver->alloc.size >> 1;
- unsigned buf = (xvmc_driver->alloc.active_buf ^= 1);
- unsigned dword[2];
-
- dword[0] = MI_FLUSH | FLUSH_WRITE_DIRTY_STATE | FLUSH_RENDER_CACHE | FLUSH_MAP_CACHE;
- dword[1] = 0;
- intelCmdIoctl((char *)&dword[0], sizeof(dword));
-
- xvmc_driver->alloc.irq_emitted = intelEmitIrqLocked();
+ i965_end_batch();
+
+ if (xvmc_driver->kernel_exec_fencing)
+ drm_intel_gem_bo_unmap_gtt(xvmc_driver->batch.buf);
+ else
+ drm_intel_bo_unmap(xvmc_driver->batch.buf);
+
+ drm_intel_bo_exec(xvmc_driver->batch.buf,
+ xvmc_driver->batch.ptr - xvmc_driver->batch.init_ptr,
+ 0, 0, 0);
+ //dri_bo_wait_rendering(xvmc_driver->batch.buf);
+
+ drm_intel_bo_unreference(xvmc_driver->batch.buf);
+ if((xvmc_driver->batch.buf =
+ drm_intel_bo_alloc(xvmc_driver->bufmgr,
+ "batch buffer", BATCH_SIZE, 0x1000)) == NULL) {
+ fprintf(stderr, "unable to alloc batch buffer\n");
+ }
- if (xvmc_driver->alloc.irq_emitted) {
- intelWaitIrq(xvmc_driver->alloc.irq_emitted);
- }
+ if (xvmc_driver->kernel_exec_fencing)
+ drm_intel_gem_bo_map_gtt(xvmc_driver->batch.buf);
+ else
+ drm_intel_bo_map(xvmc_driver->batch.buf, 1);
- xvmc_driver->batch.start_offset = xvmc_driver->alloc.offset + buf * half;
- xvmc_driver->batch.ptr = (unsigned char *)xvmc_driver->alloc.ptr + buf * half;
- xvmc_driver->batch.size = half - 8;
- xvmc_driver->batch.space = half - 8;
- assert(xvmc_driver->batch.space >= 0);
+ xvmc_driver->batch.init_ptr = xvmc_driver->batch.buf->virtual;
+ xvmc_driver->batch.size = BATCH_SIZE;
+ xvmc_driver->batch.space = BATCH_SIZE;
+ xvmc_driver->batch.ptr = xvmc_driver->batch.init_ptr;
}
-
-static void intelFlushBatchLocked(Bool ignore_cliprects,
- Bool refill,
- Bool allow_unlock)
+void intelBatchbufferRequireSpace(int size)
{
- drmI830BatchBuffer batch;
-
- if (xvmc_driver->batch.space != xvmc_driver->batch.size) {
-
- batch.start = xvmc_driver->batch.start_offset;
- batch.used = xvmc_driver->batch.size - xvmc_driver->batch.space;
- batch.cliprects = 0;
- batch.num_cliprects = 0;
- batch.DR1 = 0;
- batch.DR4 = 0;
-
- if (xvmc_driver->alloc.offset) {
- if ((batch.used & 0x4) == 0) {
- ((int *)xvmc_driver->batch.ptr)[0] = 0;
- ((int *)xvmc_driver->batch.ptr)[1] = MI_BATCH_BUFFER_END;
- batch.used += 0x8;
- xvmc_driver->batch.ptr += 0x8;
- } else {
- ((int *)xvmc_driver->batch.ptr)[0] = MI_BATCH_BUFFER_END;
- batch.used += 0x4;
- xvmc_driver->batch.ptr += 0x4;
- }
- }
-
- xvmc_driver->batch.start_offset += batch.used;
- xvmc_driver->batch.size -= batch.used;
-
- if (xvmc_driver->batch.size < 8) {
- refill = TRUE;
- xvmc_driver->batch.space = xvmc_driver->batch.size = 0;
- }
- else {
- xvmc_driver->batch.size -= 8;
- xvmc_driver->batch.space = xvmc_driver->batch.size;
- }
-
- assert(xvmc_driver->batch.space >= 0);
- assert(batch.start >= xvmc_driver->alloc.offset);
- assert(batch.start < xvmc_driver->alloc.offset + xvmc_driver->alloc.size);
- assert(batch.start + batch.used > xvmc_driver->alloc.offset);
- assert(batch.start + batch.used <= xvmc_driver->alloc.offset + xvmc_driver->alloc.size);
-
- if (xvmc_driver->alloc.offset) {
- if (drmCommandWrite(xvmc_driver->fd, DRM_I830_BATCHBUFFER, &batch, sizeof(batch))) {
- fprintf(stderr, "DRM_I830_BATCHBUFFER: %d\n", -errno);
- exit(1);
- }
- } else {
- drmI830CmdBuffer cmd;
- cmd.buf = (char *)xvmc_driver->alloc.ptr + batch.start;
- cmd.sz = batch.used;
- cmd.DR1 = batch.DR1;
- cmd.DR4 = batch.DR4;
- cmd.num_cliprects = batch.num_cliprects;
- cmd.cliprects = batch.cliprects;
-
- if (drmCommandWrite(xvmc_driver->fd, DRM_I830_CMDBUFFER,
- &cmd, sizeof(cmd))) {
- fprintf(stderr, "DRM_I915_CMDBUFFER: %d\n", -errno);
- exit(1);
- }
- }
- }
-
- if (refill)
- intelRefillBatchLocked(allow_unlock);
+ assert(xvmc_driver->batch.ptr - xvmc_driver->batch.init_ptr + size <
+ xvmc_driver->batch.size - 8);
+ if (xvmc_driver->batch.ptr - xvmc_driver->batch.init_ptr + size
+ >= xvmc_driver->batch.size - 8)
+ intelFlushBatch(1);
}
-void intelFlushBatch(Bool refill )
+void intelBatchbufferData(const void *data, unsigned bytes, unsigned flags)
{
- intelFlushBatchLocked(FALSE, refill, TRUE);
+ intelBatchbufferRequireSpace(bytes);
+ memcpy(xvmc_driver->batch.ptr, data, bytes);
+ xvmc_driver->batch.ptr += bytes;
+ xvmc_driver->batch.space -= bytes;
}
-void intelCmdIoctl(char *buf, unsigned used)
+void intel_batch_emit_reloc(dri_bo *bo, uint32_t read_domain,
+ uint32_t write_domain, uint32_t delta, unsigned char *ptr)
{
- drmI830CmdBuffer cmd;
-
- cmd.buf = buf;
- cmd.sz = used;
- cmd.cliprects = 0;
- cmd.num_cliprects = 0;
- cmd.DR1 = 0;
- cmd.DR4 = 0;
-
- if (drmCommandWrite(xvmc_driver->fd, DRM_I830_CMDBUFFER,
- &cmd, sizeof(cmd))) {
- fprintf(stderr, "DRM_I830_CMDBUFFER: %d\n", -errno);
- exit(1);
- }
+ drm_intel_bo_emit_reloc(xvmc_driver->batch.buf,
+ ptr - xvmc_driver->batch.init_ptr, bo, delta,
+ read_domain, write_domain);
}
diff --git a/src/xvmc/intel_batchbuffer.h b/src/xvmc/intel_batchbuffer.h
index d4d8037a..ea9058ce 100644
--- a/src/xvmc/intel_batchbuffer.h
+++ b/src/xvmc/intel_batchbuffer.h
@@ -10,6 +10,7 @@ extern int VERBOSE;
#define BEGIN_BATCH(n) \
do { \
+ assert(xvmc_driver->batch.space >= (n) *4); \
if (xvmc_driver->batch.space < (n)*4) \
intelFlushBatch(TRUE); \
batch_ptr = xvmc_driver->batch.ptr; \
@@ -21,6 +22,13 @@ extern int VERBOSE;
batch_ptr += 4; \
} while (0)
+#define OUT_RELOC(bo,read_domains,write_domains,delta) \
+ do { \
+ *(unsigned int *)batch_ptr = delta + bo->offset; \
+ intel_batch_emit_reloc(bo, read_domains, write_domains, delta, batch_ptr); \
+ batch_ptr += 4; \
+ } while (0)
+
#define OUT_BATCH_SHORT(n) \
do { \
*(short *)batch_ptr = (n); \
@@ -44,4 +52,6 @@ extern void intelBatchbufferData(const void *, unsigned, unsigned);
extern Bool intelInitBatchBuffer(void);
extern void intelFiniBatchBuffer(void);
extern void intelCmdIoctl(char *, unsigned);
+extern void intel_batch_emit_reloc(dri_bo *bo, uint32_t read_domain,
+ uint32_t write_domain, uint32_t delta, unsigned char *);
#endif /* _INTEL_BATCHBUFFER_H */
diff --git a/src/xvmc/intel_xvmc.c b/src/xvmc/intel_xvmc.c
index 71ae77b4..6f559028 100644
--- a/src/xvmc/intel_xvmc.c
+++ b/src/xvmc/intel_xvmc.c
@@ -342,9 +342,7 @@ _X_EXPORT Status XvMCCreateContext(Display *display, XvPortID port,
XVMC_INFO("decoder type is %s", intel_xvmc_decoder_string(comm->type));
- xvmc_driver->batchbuffer.handle = comm->batchbuffer.handle;
- xvmc_driver->batchbuffer.offset = comm->batchbuffer.offset;
- xvmc_driver->batchbuffer.size = comm->batchbuffer.size;
+ xvmc_driver->kernel_exec_fencing = comm->kernel_exec_fencing;
/* assign local ctx info */
intel_ctx = intel_xvmc_new_context(display);
@@ -410,6 +408,13 @@ _X_EXPORT Status XvMCCreateContext(Display *display, XvPortID port,
return ret;
}
+ if ((xvmc_driver->bufmgr =
+ intel_bufmgr_gem_init(xvmc_driver->fd, 1024*64)) == NULL) {
+ XVMC_ERR("Can't init bufmgr\n");
+ return BadAlloc;
+ }
+ drm_intel_bufmgr_gem_enable_reuse(xvmc_driver->bufmgr);
+
/* call driver hook.
* driver hook should free priv_data after return if success.*/
ret = (xvmc_driver->create_context)(display, context, priv_count, priv_data);
@@ -451,6 +456,10 @@ _X_EXPORT Status XvMCDestroyContext(Display *display, XvMCContext *context)
return ret;
}
+ intelFiniBatchBuffer();
+
+ dri_bufmgr_destroy(xvmc_driver->bufmgr);
+
intel_xvmc_free_context(context->context_id);
ret = _xvmc_destroy_context(display, context);
@@ -466,7 +475,6 @@ _X_EXPORT Status XvMCDestroyContext(Display *display, XvMCContext *context)
close(xvmc_driver->fd);
xvmc_driver->fd = -1;
- intelFiniBatchBuffer();
intel_xvmc_dump_close();
}
return Success;
diff --git a/src/xvmc/intel_xvmc.h b/src/xvmc/intel_xvmc.h
index f9c517af..862b30e3 100644
--- a/src/xvmc/intel_xvmc.h
+++ b/src/xvmc/intel_xvmc.h
@@ -53,8 +53,9 @@
#include <X11/extensions/XvMClib.h>
#include <X11/extensions/vldXvMC.h>
#include <drm_sarea.h>
-
+#include "i915_drm.h"
#include "xf86dri.h"
+#include "intel_bufmgr.h"
#include "intel_batchbuffer.h"
@@ -131,11 +132,16 @@ typedef struct _intel_xvmc_driver {
int fd; /* drm file handler */
+ dri_bufmgr *bufmgr;
+ unsigned int kernel_exec_fencing:1;
+
struct {
- unsigned int start_offset;
+ unsigned int init_offset;
unsigned int size;
unsigned int space;
unsigned char *ptr;
+ unsigned char *init_ptr;
+ dri_bo *buf;
} batch;
struct
diff --git a/src/xvmc/xvmc_vld.c b/src/xvmc/xvmc_vld.c
index 204cfb7c..9bb98942 100644
--- a/src/xvmc/xvmc_vld.c
+++ b/src/xvmc/xvmc_vld.c
@@ -35,6 +35,8 @@
#define BATCH_STRUCT(x) intelBatchbufferData(&x, sizeof(x), 0)
+#define VLD_MAX_SLICE_SIZE (32 * 1024)
+
#define CS_SIZE 30
#define URB_SIZE 384
/* idct table */
@@ -125,25 +127,89 @@ struct media_kernel {
#define MEDIA_KERNEL_NUM (sizeof(media_kernels)/sizeof(media_kernels[0]))
+struct media_kernel_obj {
+ dri_bo *bo;
+};
+
+struct interface_descriptor_obj {
+ dri_bo *bo;
+ struct media_kernel_obj kernels[MEDIA_KERNEL_NUM];
+};
+
+struct vfe_state_obj {
+ dri_bo *bo;
+ struct interface_descriptor_obj interface;
+};
+
+struct vld_state_obj {
+ dri_bo *bo;
+};
+
+struct surface_obj {
+ dri_bo *bo;
+};
+
+struct surface_state_obj {
+ struct surface_obj surface;
+ dri_bo *bo;
+};
+
+struct binding_table_obj {
+ dri_bo *bo;
+ struct surface_state_obj surface_states[I965_MAX_SURFACES];
+};
+
+struct slice_data_obj {
+ dri_bo *bo;
+};
+
+struct cs_state_obj {
+ dri_bo *bo;
+};
+
static struct media_state {
- unsigned long state_base;
- void *state_ptr;
- unsigned long vld_state_offset;
- unsigned long vfe_state_offset;
- unsigned long interface_descriptor_offsets[16];
- unsigned long kernel_offsets[MEDIA_KERNEL_NUM];
- unsigned long cs_offset;
- unsigned long surface_state_offsets[I965_MAX_SURFACES];
- unsigned long binding_table_offset;
+ struct vfe_state_obj vfe_state;
+ struct vld_state_obj vld_state;
+ struct binding_table_obj binding_table;
+ struct cs_state_obj cs_object;
+ struct slice_data_obj slice_data;
} media_state;
-static int map_buffer(struct drm_memory_block *mem)
+/* XvMCQMatrix * 2 + idct_table + 8 * kernel offset pointer */
+#define CS_OBJECT_SIZE (32*20 + sizeof(unsigned int) * 8)
+static int free_object(struct media_state *s)
{
- return drmMap(xvmc_driver->fd, mem->handle, mem->size, &mem->ptr);
+ int i;
+#define FREE_ONE_BO(bo) \
+ if (bo) \
+ drm_intel_bo_unreference(bo)
+ FREE_ONE_BO(s->vfe_state.bo);
+ FREE_ONE_BO(s->vfe_state.interface.bo);
+ for (i = 0; i < MEDIA_KERNEL_NUM; i++)
+ FREE_ONE_BO(s->vfe_state.interface.kernels[i].bo);
+ FREE_ONE_BO(s->binding_table.bo);
+ for (i = 0; i < I965_MAX_SURFACES; i++)
+ FREE_ONE_BO(s->binding_table.surface_states[i].bo);
+ FREE_ONE_BO(s->slice_data.bo);
+ FREE_ONE_BO(s->cs_object.bo);
+ FREE_ONE_BO(s->vld_state.bo);
}
-static void unmap_buffer(struct drm_memory_block *mem)
+
+static int alloc_object(struct media_state *s)
{
- drmUnmap(mem->ptr, mem->size);
+ int i;
+
+ for (i = 0; i < I965_MAX_SURFACES; i++) {
+ s->binding_table.surface_states[i].bo =
+ drm_intel_bo_alloc(xvmc_driver->bufmgr, "surface_state",
+ sizeof(struct brw_surface_state), 0x1000);
+ if (!s->binding_table.surface_states[i].bo)
+ goto out;
+ }
+ return 0;
+out:
+ free_object(s);
+ return BadAlloc;
}
static void flush()
@@ -156,47 +222,9 @@ static void flush()
BATCH_STRUCT(f);
}
-static void calc_state_layout()
-{
- int i;
- media_state.vld_state_offset = media_state.state_base;
- media_state.vfe_state_offset =
- ALIGN(media_state.vld_state_offset + sizeof(struct brw_vld_state), 64);
- media_state.interface_descriptor_offsets[0] =
- ALIGN(media_state.vfe_state_offset + sizeof(struct brw_vfe_state), 64);
- for (i = 1; i < 16; i++)
- media_state.interface_descriptor_offsets[i] =
- media_state.interface_descriptor_offsets[i - 1]
- + sizeof(struct brw_interface_descriptor);
- media_state.binding_table_offset =
- ALIGN(media_state.interface_descriptor_offsets[15] +
- + sizeof(struct brw_interface_descriptor), 64);
- media_state.surface_state_offsets[0] = ALIGN(media_state.binding_table_offset
- + 4*I965_MAX_SURFACES, 32);
- for (i = 1; i < I965_MAX_SURFACES; i++)
- media_state.surface_state_offsets[i] =
- ALIGN(media_state.surface_state_offsets[i-1]
- + sizeof(struct brw_surface_state), 32);
-
- media_state.kernel_offsets[0] =
- ALIGN(media_state.surface_state_offsets[I965_MAX_SURFACES - 1]
- + sizeof(struct brw_surface_state), 64);
- for (i = 1; i < MEDIA_KERNEL_NUM; i++)
- media_state.kernel_offsets[i] =
- ALIGN(media_state.kernel_offsets[i-1] + media_kernels[i-1].size, 64);
- media_state.cs_offset = ALIGN(media_state.kernel_offsets[MEDIA_KERNEL_NUM-1]
- + media_kernels[MEDIA_KERNEL_NUM-1].size, 64);
-}
-
-static void *offset_to_ptr(unsigned long offset)
+static Status vfe_state()
{
- return media_state.state_ptr + (offset - media_state.state_base);
-}
-
-static void vfe_state()
-{
- struct brw_vfe_state *vfe_state;
- vfe_state = offset_to_ptr(media_state.vfe_state_offset);
+ struct brw_vfe_state tmp, *vfe_state = &tmp;
memset(vfe_state, 0, sizeof(*vfe_state));
vfe_state->vfe0.extend_vfe_state_present = 1;
vfe_state->vfe1.vfe_mode = VFE_VLD_MODE;
@@ -204,59 +232,148 @@ static void vfe_state()
vfe_state->vfe1.children_present = 0;
vfe_state->vfe1.urb_entry_alloc_size = 2;
vfe_state->vfe1.max_threads = 31;
+ vfe_state->vfe2.interface_descriptor_base =
+ media_state.vfe_state.interface.bo->offset >> 4;
+
+ if (media_state.vfe_state.bo)
+ drm_intel_bo_unreference(media_state.vfe_state.bo);
- vfe_state->vfe2.interface_descriptor_base =
- media_state.interface_descriptor_offsets[0] >> 4;
+ media_state.vfe_state.bo = drm_intel_bo_alloc(xvmc_driver->bufmgr,
+ "vfe state", sizeof(struct brw_vfe_state), 0x1000);
+ if (!media_state.vfe_state.bo)
+ return BadAlloc;
+
+ drm_intel_bo_subdata(media_state.vfe_state.bo, 0, sizeof(tmp), &tmp);
+
+ drm_intel_bo_emit_reloc(media_state.vfe_state.bo,
+ offsetof(struct brw_vfe_state, vfe2),
+ media_state.vfe_state.interface.bo, 0,
+ I915_GEM_DOMAIN_INSTRUCTION, 0);
+ return Success;
}
-static void interface_descriptor()
+static Status interface_descriptor()
{
int i;
- struct brw_interface_descriptor *desc;
+ struct brw_interface_descriptor tmp, *desc = &tmp;
+
+ if (media_state.vfe_state.interface.bo)
+ drm_intel_bo_unreference(media_state.vfe_state.interface.bo);
+
+ media_state.vfe_state.interface.bo = drm_intel_bo_alloc(xvmc_driver->bufmgr,
+ "interfaces", MEDIA_KERNEL_NUM*sizeof(struct brw_interface_descriptor),
+ 0x1000);
+ if (!media_state.vfe_state.interface.bo)
+ return BadAlloc;
+
for (i = 0; i < MEDIA_KERNEL_NUM; i++) {
- desc = offset_to_ptr(media_state.interface_descriptor_offsets[i]);
memset(desc, 0, sizeof(*desc));
desc->desc0.grf_reg_blocks = 15;
- desc->desc0.kernel_start_pointer = media_state.kernel_offsets[i] >> 6;
+ desc->desc0.kernel_start_pointer =
+ media_state.vfe_state.interface.kernels[i].bo->offset >> 6;
desc->desc1.const_urb_entry_read_offset = 0;
desc->desc1.const_urb_entry_read_len = 30;
desc->desc3.binding_table_entry_count = I965_MAX_SURFACES - 1;
- desc->desc3.binding_table_pointer = media_state.binding_table_offset>>5;
+ desc->desc3.binding_table_pointer =
+ media_state.binding_table.bo->offset >> 5;
+
+ drm_intel_bo_subdata(media_state.vfe_state.interface.bo, i*sizeof(tmp), sizeof(tmp), desc);
+
+ drm_intel_bo_emit_reloc(
+ media_state.vfe_state.interface.bo,
+ i * sizeof(*desc) +
+ offsetof(struct brw_interface_descriptor, desc0),
+ media_state.vfe_state.interface.kernels[i].bo,
+ desc->desc0.grf_reg_blocks,
+ I915_GEM_DOMAIN_INSTRUCTION, 0);
+
+ drm_intel_bo_emit_reloc(
+ media_state.vfe_state.interface.bo,
+ i * sizeof(*desc) +
+ offsetof(struct brw_interface_descriptor, desc3),
+ media_state.binding_table.bo,
+ desc->desc3.binding_table_entry_count,
+ I915_GEM_DOMAIN_INSTRUCTION, 0);
}
+ return Success;
}
-static void setup_media_kernels()
+static int setup_media_kernels()
{
int i;
- void *kernel_ptr;
+
for (i = 0; i < MEDIA_KERNEL_NUM; i++) {
- kernel_ptr = offset_to_ptr(media_state.kernel_offsets[i]);
- memcpy(kernel_ptr, media_kernels[i].bin, media_kernels[i].size);
+ media_state.vfe_state.interface.kernels[i].bo =
+ drm_intel_bo_alloc(xvmc_driver->bufmgr, "kernel",
+ media_kernels[i].size, 0x1000);
+ if (!media_state.vfe_state.interface.kernels[i].bo)
+ goto out;
}
+
+ for (i = 0; i < MEDIA_KERNEL_NUM; i++) {
+ dri_bo *bo = media_state.vfe_state.interface.kernels[i].bo;
+ drm_intel_bo_subdata(bo, 0, media_kernels[i].size, media_kernels[i].bin);
+ }
+ return 0;
+out:
+ free_object(&media_state);
+ return BadAlloc;
}
-static void binding_tables()
+static Status binding_tables()
{
- unsigned int *table;
+ unsigned int table[I965_MAX_SURFACES];
int i;
- table = offset_to_ptr(media_state.binding_table_offset);
- for (i = 0; i < I965_MAX_SURFACES; i++)
- table[i] = media_state.surface_state_offsets[i];
+
+ if (media_state.binding_table.bo)
+ drm_intel_bo_unreference(media_state.binding_table.bo);
+ media_state.binding_table.bo =
+ drm_intel_bo_alloc(xvmc_driver->bufmgr, "binding_table",
+ I965_MAX_SURFACES*4, 0x1000);
+ if (!media_state.binding_table.bo)
+ return BadAlloc;
+
+ for (i = 0; i < I965_MAX_SURFACES; i++) {
+ table[i] = media_state.binding_table.surface_states[i].bo->offset;
+ drm_intel_bo_emit_reloc(media_state.binding_table.bo,
+ i * sizeof(unsigned int),
+ media_state.binding_table.surface_states[i].bo, 0,
+ I915_GEM_DOMAIN_INSTRUCTION, 0);
+ }
+
+ drm_intel_bo_subdata(media_state.binding_table.bo, 0, sizeof(table), table);
+ return Success;
}
-static void cs_init()
+static Status cs_init()
{
- void *buf;
+ char buf[CS_OBJECT_SIZE];
unsigned int *lib_reloc;
int i;
- buf = offset_to_ptr(media_state.cs_offset);
+
+ if (media_state.cs_object.bo)
+ drm_intel_bo_unreference(media_state.cs_object.bo);
+
+ media_state.cs_object.bo = drm_intel_bo_alloc(xvmc_driver->bufmgr, "cs object", CS_OBJECT_SIZE, 64);
+ if (!media_state.cs_object.bo)
+ return BadAlloc;
+
memcpy(buf + 32*4, idct_table, sizeof(idct_table));
/* idct lib reloction */
- lib_reloc = buf + 32*20;
+ lib_reloc = (unsigned int *)(buf + 32*20);
for (i = 0; i < 8; i++)
- lib_reloc[i] = media_state.kernel_offsets[LIB_INTERFACE];
+ lib_reloc[i] = media_state.vfe_state.interface.kernels[LIB_INTERFACE].bo->offset;
+ drm_intel_bo_subdata(media_state.cs_object.bo, 32*4, 32*16 + 8*sizeof(unsigned int), buf + 32*4);
+
+ for (i = 0; i < 8; i++)
+ drm_intel_bo_emit_reloc(media_state.cs_object.bo,
+ 32*20 + sizeof(unsigned int) * i,
+ media_state.vfe_state.interface.kernels[LIB_INTERFACE].bo, 0,
+ I915_GEM_DOMAIN_INSTRUCTION, 0);
+
+ return Success;
}
static Status create_context(Display *display, XvMCContext *context,
@@ -265,18 +382,12 @@ static Status create_context(Display *display, XvMCContext *context,
struct i965_xvmc_context *i965_ctx;
i965_ctx = (struct i965_xvmc_context *)priv_data;
context->privData = priv_data;
- if (map_buffer(&i965_ctx->static_buffer))
- return BadAlloc;
- if (map_buffer(&i965_ctx->slice))
- return BadAlloc;
- media_state.state_base = i965_ctx->static_buffer.offset;
- media_state.state_ptr = i965_ctx->static_buffer.ptr;
- calc_state_layout();
- vfe_state();
- interface_descriptor();
- setup_media_kernels();
- binding_tables();
- cs_init();
+
+ if (alloc_object(&media_state))
+ return BadAlloc;
+
+ if (setup_media_kernels())
+ return BadAlloc;
return Success;
}
@@ -284,34 +395,45 @@ static Status destroy_context(Display *display, XvMCContext *context)
{
struct i965_xvmc_context *i965_ctx;
i965_ctx = context->privData;
- unmap_buffer(&i965_ctx->slice);
- unmap_buffer(&i965_ctx->static_buffer);
Xfree(i965_ctx);
return Success;
}
+#define STRIDE(w) (w)
+#define SIZE_YUV420(w, h) (h * (STRIDE(w) + STRIDE(w >> 1)))
static Status create_surface(Display *display,
XvMCContext *context, XvMCSurface *surface, int priv_count,
CARD32 *priv_data)
{
- struct i965_xvmc_surface *x;
+ struct i965_xvmc_surface *priv_surface =
+ (struct i965_xvmc_surface *)priv_data;
+ size_t size = SIZE_YUV420(priv_surface->w, priv_surface->h);
surface->privData = priv_data;
- x = surface->privData;
+ priv_surface->bo = drm_intel_bo_alloc(xvmc_driver->bufmgr, "surface",
+ size, 0x1000);
+
return Success;
}
static Status destroy_surface(Display *display,
XvMCSurface *surface)
{
+ struct i965_xvmc_surface *priv_surface =
+ surface->privData;
+ XSync(display, False);
+ drm_intel_bo_unreference(priv_surface->bo);
return Success;
}
static Status load_qmatrix(Display *display, XvMCContext *context,
const XvMCQMatrix *qmx)
{
- unsigned char *qmatrix;
- qmatrix = offset_to_ptr(media_state.cs_offset);
- memcpy(qmatrix, qmx->intra_quantiser_matrix, 64);
- memcpy(qmatrix + 64, qmx->non_intra_quantiser_matrix, 64);
+ Status ret;
+ ret = cs_init();
+ if (ret != Success)
+ return ret;
+ drm_intel_bo_subdata(media_state.cs_object.bo, 0, 64, qmx->intra_quantiser_matrix);
+ drm_intel_bo_subdata(media_state.cs_object.bo, 64, 64, qmx->non_intra_quantiser_matrix);
+
return Success;
}
@@ -322,12 +444,18 @@ static Status get_surface_status(Display *display, XvMCSurface *surface,
return Success;
}
-static void vld_state(const XvMCMpegControl *control)
+static Status vld_state(const XvMCMpegControl *control)
{
- struct brw_vld_state *vld;
- vld = offset_to_ptr(media_state.vld_state_offset);
- memset(vld, 0, sizeof(*vld));
+ struct brw_vld_state tmp, *vld = &tmp;
+
+ if (media_state.vld_state.bo)
+ drm_intel_bo_unreference(media_state.vld_state.bo);
+ media_state.vld_state.bo = drm_intel_bo_alloc(xvmc_driver->bufmgr,
+ "vld state", sizeof(struct brw_vld_state), 64);
+ if (!media_state.vld_state.bo)
+ return BadAlloc;
+ memset(vld, 0, sizeof(*vld));
vld->vld0.f_code_0_0 = control->FHMV_range + 1;
vld->vld0.f_code_0_1 = control->FVMV_range + 1;
vld->vld0.f_code_1_0 = control->BHMV_range + 1;
@@ -362,44 +490,80 @@ static void vld_state(const XvMCMpegControl *control)
vld->desc_remap_table1.index_13 = FIELD_BACKWARD_INTERFACE;
vld->desc_remap_table1.index_14 = F_B_INTERFACE;
vld->desc_remap_table1.index_15 = FIELD_F_B_INTERFACE;
+
+ drm_intel_bo_subdata(media_state.vld_state.bo, 0, sizeof(tmp), vld);
+ return Success;
}
-static void setup_media_surface(int binding_table_index,
- unsigned long offset, int w, int h)
+static Status setup_media_surface(int index, dri_bo *bo,
+ unsigned long offset, int w, int h, Bool write)
{
- struct brw_surface_state *ss;
- ss = offset_to_ptr(media_state.surface_state_offsets[binding_table_index]);
+ struct brw_surface_state tmp, *ss = &tmp;
memset(ss, 0, sizeof(*ss));
ss->ss0.surface_type = BRW_SURFACE_2D;
ss->ss0.surface_format = BRW_SURFACEFORMAT_R8_SINT;
- ss->ss1.base_addr = offset;
+ ss->ss1.base_addr = offset + bo->offset;
ss->ss2.width = w - 1;
ss->ss2.height = h - 1;
ss->ss3.pitch = w - 1;
+
+ if (media_state.binding_table.surface_states[index].bo)
+ drm_intel_bo_unreference(media_state.binding_table.surface_states[index].bo);
+
+ media_state.binding_table.surface_states[index].bo =
+ drm_intel_bo_alloc(xvmc_driver->bufmgr, "surface_state",
+ sizeof(struct brw_surface_state), 0x1000);
+ if (!media_state.binding_table.surface_states[index].bo)
+ return BadAlloc;
+
+ drm_intel_bo_subdata(
+ media_state.binding_table.surface_states[index].bo,
+ 0, sizeof(*ss), ss);
+ drm_intel_bo_emit_reloc(media_state.binding_table.surface_states[index].bo,
+ offsetof(struct brw_surface_state, ss1),
+ bo, offset,
+ I915_GEM_DOMAIN_RENDER, write?I915_GEM_DOMAIN_RENDER:0);
+ return Success;
}
-static void setup_surface(struct i965_xvmc_surface *target,
+static Status setup_surface(struct i965_xvmc_surface *target,
struct i965_xvmc_surface *past,
struct i965_xvmc_surface *future,
int w, int h)
{
- unsigned long dst_offset, past_offset, future_offset;
- dst_offset = target->buffer.offset;
- setup_media_surface(0, dst_offset, w, h);
- setup_media_surface(1, dst_offset + w*h, w/2, h/2);
- setup_media_surface(2, dst_offset + w*h + w*h/4, w/2, h/2);
+ Status ret;
+ ret = setup_media_surface(0, target->bo, 0, w, h, TRUE);
+ if (ret != Success)
+ return ret;
+ ret = setup_media_surface(1, target->bo, w*h, w/2, h/2, TRUE);
+ if (ret != Success)
+ return ret;
+ ret = setup_media_surface(2, target->bo, w*h + w*h/4, w/2, h/2, TRUE);
+ if (ret != Success)
+ return ret;
if (past) {
- past_offset = past->buffer.offset;
- setup_media_surface(4, past_offset, w, h);
- setup_media_surface(5, past_offset + w*h, w/2, h/2);
- setup_media_surface(6, past_offset + w*h + w*h/4, w/2, h/2);
+ ret = setup_media_surface(4, past->bo, 0, w, h, FALSE);
+ if (ret != Success)
+ return ret;
+ ret = setup_media_surface(5, past->bo, w*h, w/2, h/2, FALSE);
+ if (ret != Success)
+ return ret;
+ ret = setup_media_surface(6, past->bo, w*h + w*h/4, w/2, h/2, FALSE);
+ if (ret != Success)
+ return ret;
}
if (future) {
- future_offset = future->buffer.offset;
- setup_media_surface(7, future_offset, w, h);
- setup_media_surface(8, future_offset + w*h, w/2, h/2);
- setup_media_surface(9, future_offset + w*h + w*h/4, w/2, h/2);
+ ret = setup_media_surface(7, future->bo, 0, w, h, FALSE);
+ if (ret != Success)
+ return ret;
+ ret = setup_media_surface(8, future->bo, w*h, w/2, h/2, FALSE);
+ if (ret != Success)
+ return ret;
+ ret = setup_media_surface(9, future->bo, w*h + w*h/4, w/2, h/2, FALSE);
+ if (ret != Success)
+ return ret;
}
+ return Success;
}
static Status begin_surface(Display *display, XvMCContext *context,
@@ -411,13 +575,30 @@ static Status begin_surface(Display *display, XvMCContext *context,
struct i965_xvmc_contex *i965_ctx;
struct i965_xvmc_surface *priv_target, *priv_past, *priv_future;
intel_xvmc_context_ptr intel_ctx;
+ Status ret;
+
intel_ctx = intel_xvmc_find_context(context->context_id);
priv_target = target->privData;
priv_past = past?past->privData:NULL;
priv_future = future?future->privData:NULL;
- vld_state(control);
- setup_surface(priv_target, priv_past, priv_future,
+
+ ret = vld_state(control);
+ if (ret != Success)
+ return ret;
+ ret = setup_surface(priv_target, priv_past, priv_future,
context->width, context->height);
+ if (ret != Success)
+ return ret;
+ ret = binding_tables();
+ if (ret != Success)
+ return ret;
+ ret = interface_descriptor();
+ if (ret != Success)
+ return ret;
+ ret = vfe_state();
+ if (ret != Success)
+ return ret;
+
LOCK_HARDWARE(intel_ctx->hw_context);
flush();
UNLOCK_HARDWARE(intel_ctx->hw_context);
@@ -455,8 +636,8 @@ static void media_state_pointers()
BATCH_LOCALS;
BEGIN_BATCH(3);
OUT_BATCH(BRW_MEDIA_STATE_POINTERS|1);
- OUT_BATCH(media_state.vld_state_offset|1);
- OUT_BATCH(media_state.vfe_state_offset);
+ OUT_RELOC(media_state.vld_state.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
+ OUT_RELOC(media_state.vfe_state.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
ADVANCE_BATCH();
}
static void align_urb_fence()
@@ -513,11 +694,11 @@ static void cs_buffer()
BATCH_LOCALS;
BEGIN_BATCH(2);
OUT_BATCH(BRW_CONSTANT_BUFFER|0|(1<<8));
- OUT_BATCH(media_state.cs_offset|CS_SIZE);
+ OUT_RELOC(media_state.cs_object.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, CS_SIZE);
ADVANCE_BATCH();
}
-static void vld_send_media_object(unsigned long slice_offset,
+static void vld_send_media_object(dri_bo *bo,
int slice_len, int mb_h_pos, int mb_v_pos, int mb_bit_offset,
int mb_count, int q_scale_code)
{
@@ -526,11 +707,12 @@ static void vld_send_media_object(unsigned long slice_offset,
OUT_BATCH(BRW_MEDIA_OBJECT|4);
OUT_BATCH(0);
OUT_BATCH(slice_len);
- OUT_BATCH(slice_offset);
+ OUT_RELOC(bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
OUT_BATCH((mb_h_pos<<24)|(mb_v_pos<<16)|(mb_count<<8)|(mb_bit_offset));
OUT_BATCH(q_scale_code<<24);
ADVANCE_BATCH();
}
+
static Status put_slice2(Display *display, XvMCContext *context,
unsigned char *slice, int nbytes, int sliceCode)
{
@@ -545,9 +727,26 @@ static Status put_slice2(Display *display, XvMCContext *context,
q_scale_code = bit_buf>>27;
- memcpy(i965_ctx->slice.ptr, slice, nbytes);
- intel_ctx = intel_xvmc_find_context(context->context_id);
+ if (media_state.slice_data.bo) {
+ if (xvmc_driver->kernel_exec_fencing)
+ drm_intel_gem_bo_unmap_gtt(media_state.slice_data.bo);
+ else
+ drm_intel_bo_unmap(media_state.slice_data.bo);
+ drm_intel_bo_unreference(media_state.slice_data.bo);
+ }
+ media_state.slice_data.bo = drm_intel_bo_alloc(xvmc_driver->bufmgr,
+ "slice data", VLD_MAX_SLICE_SIZE, 64);
+ if (!media_state.slice_data.bo)
+ return BadAlloc;
+ if (xvmc_driver->kernel_exec_fencing)
+ drm_intel_gem_bo_map_gtt(media_state.slice_data.bo);
+ else
+ drm_intel_bo_map(media_state.slice_data.bo, 1);
+
+ memcpy(media_state.slice_data.bo->virtual, slice, nbytes);
+
+ intel_ctx = intel_xvmc_find_context(context->context_id);
LOCK_HARDWARE(intel_ctx->hw_context);
state_base_address();
pipeline_select(&media_state);
@@ -555,7 +754,7 @@ static Status put_slice2(Display *display, XvMCContext *context,
urb_layout();
cs_urb_layout();
cs_buffer();
- vld_send_media_object(i965_ctx->slice.offset,
+ vld_send_media_object(media_state.slice_data.bo,
nbytes,
0, mb_row, 6, 127, q_scale_code);
intelFlushBatch(TRUE);
@@ -573,8 +772,10 @@ static Status put_surface(Display *display,XvMCSurface *surface,
{
struct i965_xvmc_surface *private_surface =
surface->privData;
+ uint32_t handle;
- data->surf_offset = private_surface->buffer.offset;
+ drm_intel_bo_flink(private_surface->bo, &handle);
+ data->handle = handle;
return Success;
}