summaryrefslogtreecommitdiff
path: root/src/r6xx_accel.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2012-06-15 10:05:03 +0100
committerDave Airlie <airlied@redhat.com>2012-06-15 15:41:52 +0100
commit18d5ae3bd9075ac1a2ee21b071ac133e2e634b62 (patch)
tree82e60e279e52956df3c4d4f124c8d489d2ed106d /src/r6xx_accel.c
parent248e912c487636d7352cfad43c03fc9f19fc2215 (diff)
radeon: drop all UMS/DRI1/XAA/overlay support.
This overhauls the radeon driver and removes all the old UMS-only code, it drops all the UMS, DRI1, XAA, overlay Xv, video capture, tv tuners There are probably a lot more cleanups that will fall out of this afterwards. So far this is compile/build tested. Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'src/r6xx_accel.c')
-rw-r--r--src/r6xx_accel.c712
1 files changed, 312 insertions, 400 deletions
diff --git a/src/r6xx_accel.c b/src/r6xx_accel.c
index 8d254241..5fe643dd 100644
--- a/src/r6xx_accel.c
+++ b/src/r6xx_accel.c
@@ -65,100 +65,56 @@ static const uint32_t R600_ROP[16] = {
#define KMS_MULTI_OP 1
/* Flush the indirect buffer to the kernel for submission to the card */
-void R600CPFlushIndirect(ScrnInfoPtr pScrn, drmBufPtr ib)
+void R600CPFlushIndirect(ScrnInfoPtr pScrn)
{
- RADEONInfoPtr info = RADEONPTR(pScrn);
- drmBufPtr buffer = ib;
- int start = 0;
- drm_radeon_indirect_t indirect;
-
-#if defined(XF86DRM_MODE)
- if (info->cs) {
- radeon_cs_flush_indirect(pScrn);
- return;
- }
-#endif
-
- if (!buffer) return;
-
- //xf86DrvMsg(pScrn->scrnIndex, X_INFO, "Flushing buffer %d\n",
- // buffer->idx);
-
- while (buffer->used & 0x3c){
- BEGIN_BATCH(1);
- E32(buffer, CP_PACKET2()); /* fill up to multiple of 16 dwords */
- END_BATCH();
- }
-
- info->accel_state->vbo.vb_offset = 0;
- info->accel_state->vbo.vb_start_op = -1;
-
- //ErrorF("buffer bytes: %d\n", buffer->used);
-
- indirect.idx = buffer->idx;
- indirect.start = start;
- indirect.end = buffer->used;
- indirect.discard = 1;
-
- drmCommandWriteRead(info->dri->drmFD, DRM_RADEON_INDIRECT,
- &indirect, sizeof(drm_radeon_indirect_t));
-
+ radeon_cs_flush_indirect(pScrn);
}
-void R600IBDiscard(ScrnInfoPtr pScrn, drmBufPtr ib)
+void R600IBDiscard(ScrnInfoPtr pScrn)
{
-#if defined(XF86DRM_MODE)
- RADEONInfoPtr info = RADEONPTR(pScrn);
- if (info->cs) {
- radeon_ib_discard(pScrn);
- }
-#endif
- if (!ib) return;
-
- ib->used = 0;
- R600CPFlushIndirect(pScrn, ib);
+ radeon_ib_discard(pScrn);
}
void
-r600_wait_3d_idle_clean(ScrnInfoPtr pScrn, drmBufPtr ib)
+r600_wait_3d_idle_clean(ScrnInfoPtr pScrn)
{
RADEONInfoPtr info = RADEONPTR(pScrn);
//flush caches, don't generate timestamp
BEGIN_BATCH(5);
- PACK3(ib, IT_EVENT_WRITE, 1);
- E32(ib, CACHE_FLUSH_AND_INV_EVENT);
+ PACK3(IT_EVENT_WRITE, 1);
+ E32(CACHE_FLUSH_AND_INV_EVENT);
// wait for 3D idle clean
- EREG(ib, WAIT_UNTIL, (WAIT_3D_IDLE_bit |
+ EREG(WAIT_UNTIL, (WAIT_3D_IDLE_bit |
WAIT_3D_IDLECLEAN_bit));
END_BATCH();
}
void
-r600_wait_3d_idle(ScrnInfoPtr pScrn, drmBufPtr ib)
+r600_wait_3d_idle(ScrnInfoPtr pScrn)
{
RADEONInfoPtr info = RADEONPTR(pScrn);
BEGIN_BATCH(3);
- EREG(ib, WAIT_UNTIL, WAIT_3D_IDLE_bit);
+ EREG(WAIT_UNTIL, WAIT_3D_IDLE_bit);
END_BATCH();
}
void
-r600_start_3d(ScrnInfoPtr pScrn, drmBufPtr ib)
+r600_start_3d(ScrnInfoPtr pScrn)
{
RADEONInfoPtr info = RADEONPTR(pScrn);
if (info->ChipFamily < CHIP_FAMILY_RV770) {
BEGIN_BATCH(5);
- PACK3(ib, IT_START_3D_CMDBUF, 1);
- E32(ib, 0);
+ PACK3(IT_START_3D_CMDBUF, 1);
+ E32(0);
} else
BEGIN_BATCH(3);
- PACK3(ib, IT_CONTEXT_CONTROL, 2);
- E32(ib, 0x80000000);
- E32(ib, 0x80000000);
+ PACK3(IT_CONTEXT_CONTROL, 2);
+ E32(0x80000000);
+ E32(0x80000000);
END_BATCH();
}
@@ -169,7 +125,7 @@ r600_start_3d(ScrnInfoPtr pScrn, drmBufPtr ib)
// asic stack/thread/gpr limits - need to query the drm
static void
-r600_sq_setup(ScrnInfoPtr pScrn, drmBufPtr ib, sq_config_t *sq_conf)
+r600_sq_setup(ScrnInfoPtr pScrn, sq_config_t *sq_conf)
{
uint32_t sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
uint32_t sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
@@ -209,25 +165,24 @@ r600_sq_setup(ScrnInfoPtr pScrn, drmBufPtr ib, sq_config_t *sq_conf)
(sq_conf->num_es_stack_entries << NUM_ES_STACK_ENTRIES_shift));
BEGIN_BATCH(8);
- PACK0(ib, SQ_CONFIG, 6);
- E32(ib, sq_config);
- E32(ib, sq_gpr_resource_mgmt_1);
- E32(ib, sq_gpr_resource_mgmt_2);
- E32(ib, sq_thread_resource_mgmt);
- E32(ib, sq_stack_resource_mgmt_1);
- E32(ib, sq_stack_resource_mgmt_2);
+ PACK0(SQ_CONFIG, 6);
+ E32(sq_config);
+ E32(sq_gpr_resource_mgmt_1);
+ E32(sq_gpr_resource_mgmt_2);
+ E32(sq_thread_resource_mgmt);
+ E32(sq_stack_resource_mgmt_1);
+ E32(sq_stack_resource_mgmt_2);
END_BATCH();
}
void
-r600_set_render_target(ScrnInfoPtr pScrn, drmBufPtr ib, cb_config_t *cb_conf, uint32_t domain)
+r600_set_render_target(ScrnInfoPtr pScrn, cb_config_t *cb_conf, uint32_t domain)
{
uint32_t cb_color_info, cb_color_control;
unsigned pitch, slice, h, array_mode;
RADEONInfoPtr info = RADEONPTR(pScrn);
-#if defined(XF86DRM_MODE)
if (cb_conf->surface) {
switch (cb_conf->surface->level[0].mode) {
case RADEON_SURF_MODE_1D:
@@ -243,7 +198,6 @@ r600_set_render_target(ScrnInfoPtr pScrn, drmBufPtr ib, cb_config_t *cb_conf, ui
pitch = (cb_conf->surface->level[0].nblk_x >> 3) - 1;
slice = ((cb_conf->surface->level[0].nblk_x * cb_conf->surface->level[0].nblk_y) / 64) - 1;
} else
-#endif
{
array_mode = cb_conf->array_mode;
pitch = (cb_conf->w / 8) - 1;
@@ -277,7 +231,7 @@ r600_set_render_target(ScrnInfoPtr pScrn, drmBufPtr ib, cb_config_t *cb_conf, ui
cb_color_info |= SOURCE_FORMAT_bit;
BEGIN_BATCH(3 + 2);
- EREG(ib, (CB_COLOR0_BASE + (4 * cb_conf->id)), (cb_conf->base >> 8));
+ EREG((CB_COLOR0_BASE + (4 * cb_conf->id)), (cb_conf->base >> 8));
RELOC_BATCH(cb_conf->bo, 0, domain);
END_BATCH();
@@ -285,8 +239,8 @@ r600_set_render_target(ScrnInfoPtr pScrn, drmBufPtr ib, cb_config_t *cb_conf, ui
if ((info->ChipFamily > CHIP_FAMILY_R600) &&
(info->ChipFamily < CHIP_FAMILY_RV770)) {
BEGIN_BATCH(2);
- PACK3(ib, IT_SURFACE_BASE_UPDATE, 1);
- E32(ib, (2 << cb_conf->id));
+ PACK3(IT_SURFACE_BASE_UPDATE, 1);
+ E32((2 << cb_conf->id));
END_BATCH();
}
/* Set CMASK & TILE buffer to the offset of color buffer as
@@ -294,47 +248,47 @@ r600_set_render_target(ScrnInfoPtr pScrn, drmBufPtr ib, cb_config_t *cb_conf, ui
* then have a valid cmd stream
*/
BEGIN_BATCH(3 + 2);
- EREG(ib, (CB_COLOR0_TILE + (4 * cb_conf->id)), (0 >> 8)); // CMASK per-tile data base/256
+ EREG((CB_COLOR0_TILE + (4 * cb_conf->id)), (0 >> 8)); // CMASK per-tile data base/256
RELOC_BATCH(cb_conf->bo, 0, domain);
END_BATCH();
BEGIN_BATCH(3 + 2);
- EREG(ib, (CB_COLOR0_FRAG + (4 * cb_conf->id)), (0 >> 8)); // FMASK per-tile data base/256
+ EREG((CB_COLOR0_FRAG + (4 * cb_conf->id)), (0 >> 8)); // FMASK per-tile data base/256
RELOC_BATCH(cb_conf->bo, 0, domain);
END_BATCH();
BEGIN_BATCH(9);
// pitch only for ARRAY_LINEAR_GENERAL, other tiling modes require addrlib
- EREG(ib, (CB_COLOR0_SIZE + (4 * cb_conf->id)), ((pitch << PITCH_TILE_MAX_shift) |
+ EREG((CB_COLOR0_SIZE + (4 * cb_conf->id)), ((pitch << PITCH_TILE_MAX_shift) |
(slice << SLICE_TILE_MAX_shift)));
- EREG(ib, (CB_COLOR0_VIEW + (4 * cb_conf->id)), ((0 << SLICE_START_shift) |
+ EREG((CB_COLOR0_VIEW + (4 * cb_conf->id)), ((0 << SLICE_START_shift) |
(0 << SLICE_MAX_shift)));
- EREG(ib, (CB_COLOR0_MASK + (4 * cb_conf->id)), ((0 << CMASK_BLOCK_MAX_shift) |
+ EREG((CB_COLOR0_MASK + (4 * cb_conf->id)), ((0 << CMASK_BLOCK_MAX_shift) |
(0 << FMASK_TILE_MAX_shift)));
END_BATCH();
BEGIN_BATCH(3 + 2);
- EREG(ib, (CB_COLOR0_INFO + (4 * cb_conf->id)), cb_color_info);
+ EREG((CB_COLOR0_INFO + (4 * cb_conf->id)), cb_color_info);
RELOC_BATCH(cb_conf->bo, 0, domain);
END_BATCH();
BEGIN_BATCH(9);
- EREG(ib, CB_TARGET_MASK, (cb_conf->pmask << TARGET0_ENABLE_shift));
+ EREG(CB_TARGET_MASK, (cb_conf->pmask << TARGET0_ENABLE_shift));
cb_color_control = R600_ROP[cb_conf->rop] |
(cb_conf->blend_enable << TARGET_BLEND_ENABLE_shift);
if (info->ChipFamily == CHIP_FAMILY_R600) {
/* no per-MRT blend on R600 */
- EREG(ib, CB_COLOR_CONTROL, cb_color_control);
- EREG(ib, CB_BLEND_CONTROL, cb_conf->blendcntl);
+ EREG(CB_COLOR_CONTROL, cb_color_control);
+ EREG(CB_BLEND_CONTROL, cb_conf->blendcntl);
} else {
if (cb_conf->blend_enable)
cb_color_control |= PER_MRT_BLEND_bit;
- EREG(ib, CB_COLOR_CONTROL, cb_color_control);
- EREG(ib, CB_BLEND0_CONTROL, cb_conf->blendcntl);
+ EREG(CB_COLOR_CONTROL, cb_color_control);
+ EREG(CB_BLEND0_CONTROL, cb_conf->blendcntl);
}
END_BATCH();
}
static void
-r600_cp_set_surface_sync(ScrnInfoPtr pScrn, drmBufPtr ib, uint32_t sync_type,
+r600_cp_set_surface_sync(ScrnInfoPtr pScrn, uint32_t sync_type,
uint32_t size, uint64_t mc_addr,
struct radeon_bo *bo, uint32_t rdomains, uint32_t wdomain)
{
@@ -346,22 +300,22 @@ r600_cp_set_surface_sync(ScrnInfoPtr pScrn, drmBufPtr ib, uint32_t sync_type,
cp_coher_size = ((size + 255) >> 8);
BEGIN_BATCH(5 + 2);
- PACK3(ib, IT_SURFACE_SYNC, 4);
- E32(ib, sync_type);
- E32(ib, cp_coher_size);
- E32(ib, (mc_addr >> 8));
- E32(ib, 10); /* poll interval */
+ PACK3(IT_SURFACE_SYNC, 4);
+ E32(sync_type);
+ E32(cp_coher_size);
+ E32((mc_addr >> 8));
+ E32(10); /* poll interval */
RELOC_BATCH(bo, rdomains, wdomain);
END_BATCH();
}
/* inserts a wait for vline in the command stream */
void
-r600_cp_wait_vline_sync(ScrnInfoPtr pScrn, drmBufPtr ib, PixmapPtr pPix,
+r600_cp_wait_vline_sync(ScrnInfoPtr pScrn, PixmapPtr pPix,
xf86CrtcPtr crtc, int start, int stop)
{
RADEONInfoPtr info = RADEONPTR(pScrn);
- uint32_t offset;
+ drmmode_crtc_private_ptr drmmode_crtc;
if (!crtc)
return;
@@ -369,21 +323,8 @@ r600_cp_wait_vline_sync(ScrnInfoPtr pScrn, drmBufPtr ib, PixmapPtr pPix,
if (!crtc->enabled)
return;
- if (info->cs) {
- if (pPix != pScrn->pScreen->GetScreenPixmap(pScrn->pScreen))
- return;
- } else {
-#ifdef USE_EXA
- if (info->useEXA)
- offset = exaGetPixmapOffset(pPix);
- else
-#endif
- offset = pPix->devPrivate.ptr - info->FB;
-
- /* if drawing to front buffer */
- if (offset != 0)
- return;
- }
+ if (pPix != pScrn->pScreen->GetScreenPixmap(pScrn->pScreen))
+ return;
start = max(start, crtc->y);
stop = min(stop, crtc->y + crtc->mode.VDisplay);
@@ -391,68 +332,45 @@ r600_cp_wait_vline_sync(ScrnInfoPtr pScrn, drmBufPtr ib, PixmapPtr pPix,
if (start >= stop)
return;
-#if defined(XF86DRM_MODE)
- if (info->cs) {
- drmmode_crtc_private_ptr drmmode_crtc = crtc->driver_private;
-
- BEGIN_BATCH(11);
- /* set the VLINE range */
- EREG(ib, AVIVO_D1MODE_VLINE_START_END, /* this is just a marker */
- (start << AVIVO_D1MODE_VLINE_START_SHIFT) |
- (stop << AVIVO_D1MODE_VLINE_END_SHIFT));
-
- /* tell the CP to poll the VLINE state register */
- PACK3(ib, IT_WAIT_REG_MEM, 6);
- E32(ib, IT_WAIT_REG | IT_WAIT_EQ);
- E32(ib, IT_WAIT_ADDR(AVIVO_D1MODE_VLINE_STATUS));
- E32(ib, 0);
- E32(ib, 0); // Ref value
- E32(ib, AVIVO_D1MODE_VLINE_STAT); // Mask
- E32(ib, 10); // Wait interval
- /* add crtc reloc */
- PACK3(ib, IT_NOP, 1);
- E32(ib, drmmode_crtc->mode_crtc->crtc_id);
- END_BATCH();
- } else
-#endif
- {
- RADEONCrtcPrivatePtr radeon_crtc = crtc->driver_private;
-
- BEGIN_BATCH(9);
- /* set the VLINE range */
- EREG(ib, AVIVO_D1MODE_VLINE_START_END + radeon_crtc->crtc_offset,
- (start << AVIVO_D1MODE_VLINE_START_SHIFT) |
- (stop << AVIVO_D1MODE_VLINE_END_SHIFT));
-
- /* tell the CP to poll the VLINE state register */
- PACK3(ib, IT_WAIT_REG_MEM, 6);
- E32(ib, IT_WAIT_REG | IT_WAIT_EQ);
- E32(ib, IT_WAIT_ADDR(AVIVO_D1MODE_VLINE_STATUS + radeon_crtc->crtc_offset));
- E32(ib, 0);
- E32(ib, 0); // Ref value
- E32(ib, AVIVO_D1MODE_VLINE_STAT); // Mask
- E32(ib, 10); // Wait interval
- END_BATCH();
- }
+ drmmode_crtc = crtc->driver_private;
+
+ BEGIN_BATCH(11);
+ /* set the VLINE range */
+ EREG(AVIVO_D1MODE_VLINE_START_END, /* this is just a marker */
+ (start << AVIVO_D1MODE_VLINE_START_SHIFT) |
+ (stop << AVIVO_D1MODE_VLINE_END_SHIFT));
+
+ /* tell the CP to poll the VLINE state register */
+ PACK3(IT_WAIT_REG_MEM, 6);
+ E32(IT_WAIT_REG | IT_WAIT_EQ);
+ E32(IT_WAIT_ADDR(AVIVO_D1MODE_VLINE_STATUS));
+ E32(0);
+ E32(0); // Ref value
+ E32(AVIVO_D1MODE_VLINE_STAT); // Mask
+ E32(10); // Wait interval
+ /* add crtc reloc */
+ PACK3(IT_NOP, 1);
+ E32(drmmode_crtc->mode_crtc->crtc_id);
+ END_BATCH();
}
void
-r600_set_spi(ScrnInfoPtr pScrn, drmBufPtr ib, int vs_export_count, int num_interp)
+r600_set_spi(ScrnInfoPtr pScrn, int vs_export_count, int num_interp)
{
RADEONInfoPtr info = RADEONPTR(pScrn);
BEGIN_BATCH(8);
/* Interpolator setup */
- EREG(ib, SPI_VS_OUT_CONFIG, (vs_export_count << VS_EXPORT_COUNT_shift));
- PACK0(ib, SPI_PS_IN_CONTROL_0, 3);
- E32(ib, (num_interp << NUM_INTERP_shift));
- E32(ib, 0);
- E32(ib, 0);
+ EREG(SPI_VS_OUT_CONFIG, (vs_export_count << VS_EXPORT_COUNT_shift));
+ PACK0(SPI_PS_IN_CONTROL_0, 3);
+ E32((num_interp << NUM_INTERP_shift));
+ E32(0);
+ E32(0);
END_BATCH();
}
void
-r600_fs_setup(ScrnInfoPtr pScrn, drmBufPtr ib, shader_config_t *fs_conf, uint32_t domain)
+r600_fs_setup(ScrnInfoPtr pScrn, shader_config_t *fs_conf, uint32_t domain)
{
RADEONInfoPtr info = RADEONPTR(pScrn);
uint32_t sq_pgm_resources;
@@ -464,18 +382,18 @@ r600_fs_setup(ScrnInfoPtr pScrn, drmBufPtr ib, shader_config_t *fs_conf, uint32_
sq_pgm_resources |= SQ_PGM_RESOURCES_FS__DX10_CLAMP_bit;
BEGIN_BATCH(3 + 2);
- EREG(ib, SQ_PGM_START_FS, fs_conf->shader_addr >> 8);
+ EREG(SQ_PGM_START_FS, fs_conf->shader_addr >> 8);
RELOC_BATCH(fs_conf->bo, domain, 0);
END_BATCH();
BEGIN_BATCH(6);
- EREG(ib, SQ_PGM_RESOURCES_FS, sq_pgm_resources);
- EREG(ib, SQ_PGM_CF_OFFSET_FS, 0);
+ EREG(SQ_PGM_RESOURCES_FS, sq_pgm_resources);
+ EREG(SQ_PGM_CF_OFFSET_FS, 0);
END_BATCH();
}
void
-r600_vs_setup(ScrnInfoPtr pScrn, drmBufPtr ib, shader_config_t *vs_conf, uint32_t domain)
+r600_vs_setup(ScrnInfoPtr pScrn, shader_config_t *vs_conf, uint32_t domain)
{
RADEONInfoPtr info = RADEONPTR(pScrn);
uint32_t sq_pgm_resources;
@@ -491,23 +409,23 @@ r600_vs_setup(ScrnInfoPtr pScrn, drmBufPtr ib, shader_config_t *vs_conf, uint32_
sq_pgm_resources |= UNCACHED_FIRST_INST_bit;
/* flush SQ cache */
- r600_cp_set_surface_sync(pScrn, ib, SH_ACTION_ENA_bit,
+ r600_cp_set_surface_sync(pScrn, SH_ACTION_ENA_bit,
vs_conf->shader_size, vs_conf->shader_addr,
vs_conf->bo, domain, 0);
BEGIN_BATCH(3 + 2);
- EREG(ib, SQ_PGM_START_VS, vs_conf->shader_addr >> 8);
+ EREG(SQ_PGM_START_VS, vs_conf->shader_addr >> 8);
RELOC_BATCH(vs_conf->bo, domain, 0);
END_BATCH();
BEGIN_BATCH(6);
- EREG(ib, SQ_PGM_RESOURCES_VS, sq_pgm_resources);
- EREG(ib, SQ_PGM_CF_OFFSET_VS, 0);
+ EREG(SQ_PGM_RESOURCES_VS, sq_pgm_resources);
+ EREG(SQ_PGM_CF_OFFSET_VS, 0);
END_BATCH();
}
void
-r600_ps_setup(ScrnInfoPtr pScrn, drmBufPtr ib, shader_config_t *ps_conf, uint32_t domain)
+r600_ps_setup(ScrnInfoPtr pScrn, shader_config_t *ps_conf, uint32_t domain)
{
RADEONInfoPtr info = RADEONPTR(pScrn);
uint32_t sq_pgm_resources;
@@ -525,50 +443,50 @@ r600_ps_setup(ScrnInfoPtr pScrn, drmBufPtr ib, shader_config_t *ps_conf, uint32_
sq_pgm_resources |= CLAMP_CONSTS_bit;
/* flush SQ cache */
- r600_cp_set_surface_sync(pScrn, ib, SH_ACTION_ENA_bit,
+ r600_cp_set_surface_sync(pScrn, SH_ACTION_ENA_bit,
ps_conf->shader_size, ps_conf->shader_addr,
ps_conf->bo, domain, 0);
BEGIN_BATCH(3 + 2);
- EREG(ib, SQ_PGM_START_PS, ps_conf->shader_addr >> 8);
+ EREG(SQ_PGM_START_PS, ps_conf->shader_addr >> 8);
RELOC_BATCH(ps_conf->bo, domain, 0);
END_BATCH();
BEGIN_BATCH(9);
- EREG(ib, SQ_PGM_RESOURCES_PS, sq_pgm_resources);
- EREG(ib, SQ_PGM_EXPORTS_PS, ps_conf->export_mode);
- EREG(ib, SQ_PGM_CF_OFFSET_PS, 0);
+ EREG(SQ_PGM_RESOURCES_PS, sq_pgm_resources);
+ EREG(SQ_PGM_EXPORTS_PS, ps_conf->export_mode);
+ EREG(SQ_PGM_CF_OFFSET_PS, 0);
END_BATCH();
}
void
-r600_set_alu_consts(ScrnInfoPtr pScrn, drmBufPtr ib, int offset, int count, float *const_buf)
+r600_set_alu_consts(ScrnInfoPtr pScrn, int offset, int count, float *const_buf)
{
RADEONInfoPtr info = RADEONPTR(pScrn);
int i;
const int countreg = count * (SQ_ALU_CONSTANT_offset >> 2);
BEGIN_BATCH(2 + countreg);
- PACK0(ib, SQ_ALU_CONSTANT + offset * SQ_ALU_CONSTANT_offset, countreg);
+ PACK0(SQ_ALU_CONSTANT + offset * SQ_ALU_CONSTANT_offset, countreg);
for (i = 0; i < countreg; i++)
- EFLOAT(ib, const_buf[i]);
+ EFLOAT(const_buf[i]);
END_BATCH();
}
void
-r600_set_bool_consts(ScrnInfoPtr pScrn, drmBufPtr ib, int offset, uint32_t val)
+r600_set_bool_consts(ScrnInfoPtr pScrn, int offset, uint32_t val)
{
RADEONInfoPtr info = RADEONPTR(pScrn);
/* bool register order is: ps, vs, gs; one register each
* 1 bits per bool; 32 bools each for ps, vs, gs.
*/
BEGIN_BATCH(3);
- EREG(ib, SQ_BOOL_CONST + offset * SQ_BOOL_CONST_offset, val);
+ EREG(SQ_BOOL_CONST + offset * SQ_BOOL_CONST_offset, val);
END_BATCH();
}
static void
-r600_set_vtx_resource(ScrnInfoPtr pScrn, drmBufPtr ib, vtx_resource_t *res, uint32_t domain)
+r600_set_vtx_resource(ScrnInfoPtr pScrn, vtx_resource_t *res, uint32_t domain)
{
RADEONInfoPtr info = RADEONPTR(pScrn);
struct radeon_accel_state *accel_state = info->accel_state;
@@ -594,38 +512,37 @@ r600_set_vtx_resource(ScrnInfoPtr pScrn, drmBufPtr ib, vtx_resource_t *res, uint
(info->ChipFamily == CHIP_FAMILY_RS780) ||
(info->ChipFamily == CHIP_FAMILY_RS880) ||
(info->ChipFamily == CHIP_FAMILY_RV710))
- r600_cp_set_surface_sync(pScrn, ib, TC_ACTION_ENA_bit,
+ r600_cp_set_surface_sync(pScrn, TC_ACTION_ENA_bit,
accel_state->vbo.vb_offset, accel_state->vbo.vb_mc_addr,
res->bo,
domain, 0);
else
- r600_cp_set_surface_sync(pScrn, ib, VC_ACTION_ENA_bit,
+ r600_cp_set_surface_sync(pScrn, VC_ACTION_ENA_bit,
accel_state->vbo.vb_offset, accel_state->vbo.vb_mc_addr,
res->bo,
domain, 0);
BEGIN_BATCH(9 + 2);
- PACK0(ib, SQ_VTX_RESOURCE + res->id * SQ_VTX_RESOURCE_offset, 7);
- E32(ib, res->vb_addr & 0xffffffff); // 0: BASE_ADDRESS
- E32(ib, (res->vtx_num_entries << 2) - 1); // 1: SIZE
- E32(ib, sq_vtx_constant_word2); // 2: BASE_HI, STRIDE, CLAMP, FORMAT, ENDIAN
- E32(ib, res->mem_req_size << MEM_REQUEST_SIZE_shift); // 3: MEM_REQUEST_SIZE ?!?
- E32(ib, 0); // 4: n/a
- E32(ib, 0); // 5: n/a
- E32(ib, SQ_TEX_VTX_VALID_BUFFER << SQ_VTX_CONSTANT_WORD6_0__TYPE_shift); // 6: TYPE
+ PACK0(SQ_VTX_RESOURCE + res->id * SQ_VTX_RESOURCE_offset, 7);
+ E32(res->vb_addr & 0xffffffff); // 0: BASE_ADDRESS
+ E32((res->vtx_num_entries << 2) - 1); // 1: SIZE
+ E32(sq_vtx_constant_word2); // 2: BASE_HI, STRIDE, CLAMP, FORMAT, ENDIAN
+ E32(res->mem_req_size << MEM_REQUEST_SIZE_shift); // 3: MEM_REQUEST_SIZE ?!?
+ E32(0); // 4: n/a
+ E32(0); // 5: n/a
+ E32(SQ_TEX_VTX_VALID_BUFFER << SQ_VTX_CONSTANT_WORD6_0__TYPE_shift); // 6: TYPE
RELOC_BATCH(res->bo, domain, 0);
END_BATCH();
}
void
-r600_set_tex_resource(ScrnInfoPtr pScrn, drmBufPtr ib, tex_resource_t *tex_res, uint32_t domain)
+r600_set_tex_resource(ScrnInfoPtr pScrn, tex_resource_t *tex_res, uint32_t domain)
{
RADEONInfoPtr info = RADEONPTR(pScrn);
uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
uint32_t sq_tex_resource_word5, sq_tex_resource_word6;
uint32_t array_mode, pitch;
-#if defined(XF86DRM_MODE)
if (tex_res->surface) {
switch (tex_res->surface->level[0].mode) {
case RADEON_SURF_MODE_1D:
@@ -640,7 +557,6 @@ r600_set_tex_resource(ScrnInfoPtr pScrn, drmBufPtr ib, tex_resource_t *tex_res,
}
pitch = tex_res->surface->level[0].nblk_x >> 3;
} else
-#endif
{
array_mode = tex_res->tile_mode;
pitch = (tex_res->pitch + 7) >> 3;
@@ -693,26 +609,26 @@ r600_set_tex_resource(ScrnInfoPtr pScrn, drmBufPtr ib, tex_resource_t *tex_res,
sq_tex_resource_word6 |= INTERLACED_bit;
/* flush texture cache */
- r600_cp_set_surface_sync(pScrn, ib, TC_ACTION_ENA_bit,
+ r600_cp_set_surface_sync(pScrn, TC_ACTION_ENA_bit,
tex_res->size, tex_res->base,
tex_res->bo, domain, 0);
BEGIN_BATCH(9 + 4);
- PACK0(ib, SQ_TEX_RESOURCE + tex_res->id * SQ_TEX_RESOURCE_offset, 7);
- E32(ib, sq_tex_resource_word0);
- E32(ib, sq_tex_resource_word1);
- E32(ib, ((tex_res->base) >> 8));
- E32(ib, ((tex_res->mip_base) >> 8));
- E32(ib, sq_tex_resource_word4);
- E32(ib, sq_tex_resource_word5);
- E32(ib, sq_tex_resource_word6);
+ PACK0(SQ_TEX_RESOURCE + tex_res->id * SQ_TEX_RESOURCE_offset, 7);
+ E32(sq_tex_resource_word0);
+ E32(sq_tex_resource_word1);
+ E32(((tex_res->base) >> 8));
+ E32(((tex_res->mip_base) >> 8));
+ E32(sq_tex_resource_word4);
+ E32(sq_tex_resource_word5);
+ E32(sq_tex_resource_word6);
RELOC_BATCH(tex_res->bo, domain, 0);
RELOC_BATCH(tex_res->mip_bo, domain, 0);
END_BATCH();
}
void
-r600_set_tex_sampler (ScrnInfoPtr pScrn, drmBufPtr ib, tex_sampler_t *s)
+r600_set_tex_sampler (ScrnInfoPtr pScrn, tex_sampler_t *s)
{
RADEONInfoPtr info = RADEONPTR(pScrn);
uint32_t sq_tex_sampler_word0, sq_tex_sampler_word1, sq_tex_sampler_word2;
@@ -755,83 +671,83 @@ r600_set_tex_sampler (ScrnInfoPtr pScrn, drmBufPtr ib, tex_sampler_t *s)
sq_tex_sampler_word2 |= SQ_TEX_SAMPLER_WORD2_0__TYPE_bit;
BEGIN_BATCH(5);
- PACK0(ib, SQ_TEX_SAMPLER_WORD + s->id * SQ_TEX_SAMPLER_WORD_offset, 3);
- E32(ib, sq_tex_sampler_word0);
- E32(ib, sq_tex_sampler_word1);
- E32(ib, sq_tex_sampler_word2);
+ PACK0(SQ_TEX_SAMPLER_WORD + s->id * SQ_TEX_SAMPLER_WORD_offset, 3);
+ E32(sq_tex_sampler_word0);
+ E32(sq_tex_sampler_word1);
+ E32(sq_tex_sampler_word2);
END_BATCH();
}
//XXX deal with clip offsets in clip setup
void
-r600_set_screen_scissor(ScrnInfoPtr pScrn, drmBufPtr ib, int x1, int y1, int x2, int y2)
+r600_set_screen_scissor(ScrnInfoPtr pScrn, int x1, int y1, int x2, int y2)
{
RADEONInfoPtr info = RADEONPTR(pScrn);
BEGIN_BATCH(4);
- PACK0(ib, PA_SC_SCREEN_SCISSOR_TL, 2);
- E32(ib, ((x1 << PA_SC_SCREEN_SCISSOR_TL__TL_X_shift) |
+ PACK0(PA_SC_SCREEN_SCISSOR_TL, 2);
+ E32(((x1 << PA_SC_SCREEN_SCISSOR_TL__TL_X_shift) |
(y1 << PA_SC_SCREEN_SCISSOR_TL__TL_Y_shift)));
- E32(ib, ((x2 << PA_SC_SCREEN_SCISSOR_BR__BR_X_shift) |
+ E32(((x2 << PA_SC_SCREEN_SCISSOR_BR__BR_X_shift) |
(y2 << PA_SC_SCREEN_SCISSOR_BR__BR_Y_shift)));
END_BATCH();
}
void
-r600_set_vport_scissor(ScrnInfoPtr pScrn, drmBufPtr ib, int id, int x1, int y1, int x2, int y2)
+r600_set_vport_scissor(ScrnInfoPtr pScrn, int id, int x1, int y1, int x2, int y2)
{
RADEONInfoPtr info = RADEONPTR(pScrn);
BEGIN_BATCH(4);
- PACK0(ib, PA_SC_VPORT_SCISSOR_0_TL + id * PA_SC_VPORT_SCISSOR_0_TL_offset, 2);
- E32(ib, ((x1 << PA_SC_VPORT_SCISSOR_0_TL__TL_X_shift) |
+ PACK0(PA_SC_VPORT_SCISSOR_0_TL + id * PA_SC_VPORT_SCISSOR_0_TL_offset, 2);
+ E32(((x1 << PA_SC_VPORT_SCISSOR_0_TL__TL_X_shift) |
(y1 << PA_SC_VPORT_SCISSOR_0_TL__TL_Y_shift) |
WINDOW_OFFSET_DISABLE_bit));
- E32(ib, ((x2 << PA_SC_VPORT_SCISSOR_0_BR__BR_X_shift) |
+ E32(((x2 << PA_SC_VPORT_SCISSOR_0_BR__BR_X_shift) |
(y2 << PA_SC_VPORT_SCISSOR_0_BR__BR_Y_shift)));
END_BATCH();
}
void
-r600_set_generic_scissor(ScrnInfoPtr pScrn, drmBufPtr ib, int x1, int y1, int x2, int y2)
+r600_set_generic_scissor(ScrnInfoPtr pScrn, int x1, int y1, int x2, int y2)
{
RADEONInfoPtr info = RADEONPTR(pScrn);
BEGIN_BATCH(4);
- PACK0(ib, PA_SC_GENERIC_SCISSOR_TL, 2);
- E32(ib, ((x1 << PA_SC_GENERIC_SCISSOR_TL__TL_X_shift) |
+ PACK0(PA_SC_GENERIC_SCISSOR_TL, 2);
+ E32(((x1 << PA_SC_GENERIC_SCISSOR_TL__TL_X_shift) |
(y1 << PA_SC_GENERIC_SCISSOR_TL__TL_Y_shift) |
WINDOW_OFFSET_DISABLE_bit));
- E32(ib, ((x2 << PA_SC_GENERIC_SCISSOR_BR__BR_X_shift) |
+ E32(((x2 << PA_SC_GENERIC_SCISSOR_BR__BR_X_shift) |
(y2 << PA_SC_GENERIC_SCISSOR_TL__TL_Y_shift)));
END_BATCH();
}
void
-r600_set_window_scissor(ScrnInfoPtr pScrn, drmBufPtr ib, int x1, int y1, int x2, int y2)
+r600_set_window_scissor(ScrnInfoPtr pScrn, int x1, int y1, int x2, int y2)
{
RADEONInfoPtr info = RADEONPTR(pScrn);
BEGIN_BATCH(4);
- PACK0(ib, PA_SC_WINDOW_SCISSOR_TL, 2);
- E32(ib, ((x1 << PA_SC_WINDOW_SCISSOR_TL__TL_X_shift) |
+ PACK0(PA_SC_WINDOW_SCISSOR_TL, 2);
+ E32(((x1 << PA_SC_WINDOW_SCISSOR_TL__TL_X_shift) |
(y1 << PA_SC_WINDOW_SCISSOR_TL__TL_Y_shift) |
WINDOW_OFFSET_DISABLE_bit));
- E32(ib, ((x2 << PA_SC_WINDOW_SCISSOR_BR__BR_X_shift) |
+ E32(((x2 << PA_SC_WINDOW_SCISSOR_BR__BR_X_shift) |
(y2 << PA_SC_WINDOW_SCISSOR_BR__BR_Y_shift)));
END_BATCH();
}
void
-r600_set_clip_rect(ScrnInfoPtr pScrn, drmBufPtr ib, int id, int x1, int y1, int x2, int y2)
+r600_set_clip_rect(ScrnInfoPtr pScrn, int id, int x1, int y1, int x2, int y2)
{
RADEONInfoPtr info = RADEONPTR(pScrn);
BEGIN_BATCH(4);
- PACK0(ib, PA_SC_CLIPRECT_0_TL + id * PA_SC_CLIPRECT_0_TL_offset, 2);
- E32(ib, ((x1 << PA_SC_CLIPRECT_0_TL__TL_X_shift) |
+ PACK0(PA_SC_CLIPRECT_0_TL + id * PA_SC_CLIPRECT_0_TL_offset, 2);
+ E32(((x1 << PA_SC_CLIPRECT_0_TL__TL_X_shift) |
(y1 << PA_SC_CLIPRECT_0_TL__TL_Y_shift)));
- E32(ib, ((x2 << PA_SC_CLIPRECT_0_BR__BR_X_shift) |
+ E32(((x2 << PA_SC_CLIPRECT_0_BR__BR_X_shift) |
(y2 << PA_SC_CLIPRECT_0_BR__BR_Y_shift)));
END_BATCH();
}
@@ -841,7 +757,7 @@ r600_set_clip_rect(ScrnInfoPtr pScrn, drmBufPtr ib, int id, int x1, int y1, int
*/
void
-r600_set_default_state(ScrnInfoPtr pScrn, drmBufPtr ib)
+r600_set_default_state(ScrnInfoPtr pScrn)
{
tex_resource_t tex_res;
shader_config_t fs_conf;
@@ -858,7 +774,7 @@ r600_set_default_state(ScrnInfoPtr pScrn, drmBufPtr ib)
accel_state->XInited3D = TRUE;
- r600_start_3d(pScrn, accel_state->ib);
+ r600_start_3d(pScrn);
// SQ
sq_conf.ps_prio = 0;
@@ -982,34 +898,34 @@ r600_set_default_state(ScrnInfoPtr pScrn, drmBufPtr ib)
break;
}
- r600_sq_setup(pScrn, ib, &sq_conf);
+ r600_sq_setup(pScrn, &sq_conf);
/* set fake reloc for unused depth */
BEGIN_BATCH(3 + 2);
- EREG(ib, DB_DEPTH_INFO, 0);
+ EREG(DB_DEPTH_INFO, 0);
RELOC_BATCH(accel_state->shaders_bo, RADEON_GEM_DOMAIN_VRAM, 0);
END_BATCH();
BEGIN_BATCH(80);
if (info->ChipFamily < CHIP_FAMILY_RV770) {
- EREG(ib, TA_CNTL_AUX, (( 3 << GRADIENT_CREDIT_shift) |
+ EREG(TA_CNTL_AUX, (( 3 << GRADIENT_CREDIT_shift) |
(28 << TD_FIFO_CREDIT_shift)));
- EREG(ib, VC_ENHANCE, 0);
- EREG(ib, R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0);
- EREG(ib, DB_DEBUG, 0x82000000); /* ? */
- EREG(ib, DB_WATERMARKS, ((4 << DEPTH_FREE_shift) |
+ EREG(VC_ENHANCE, 0);
+ EREG(R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0);
+ EREG(DB_DEBUG, 0x82000000); /* ? */
+ EREG(DB_WATERMARKS, ((4 << DEPTH_FREE_shift) |
(16 << DEPTH_FLUSH_shift) |
(0 << FORCE_SUMMARIZE_shift) |
(4 << DEPTH_PENDING_FREE_shift) |
(16 << DEPTH_CACHELINE_FREE_shift) |
0));
} else {
- EREG(ib, TA_CNTL_AUX, (( 2 << GRADIENT_CREDIT_shift) |
+ EREG(TA_CNTL_AUX, (( 2 << GRADIENT_CREDIT_shift) |
(28 << TD_FIFO_CREDIT_shift)));
- EREG(ib, VC_ENHANCE, 0);
- EREG(ib, R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, VS_PC_LIMIT_ENABLE_bit);
- EREG(ib, DB_DEBUG, 0);
- EREG(ib, DB_WATERMARKS, ((4 << DEPTH_FREE_shift) |
+ EREG(VC_ENHANCE, 0);
+ EREG(R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, VS_PC_LIMIT_ENABLE_bit);
+ EREG(DB_DEBUG, 0);
+ EREG(DB_WATERMARKS, ((4 << DEPTH_FREE_shift) |
(16 << DEPTH_FLUSH_shift) |
(0 << FORCE_SUMMARIZE_shift) |
(4 << DEPTH_PENDING_FREE_shift) |
@@ -1017,190 +933,190 @@ r600_set_default_state(ScrnInfoPtr pScrn, drmBufPtr ib)
0));
}
- PACK0(ib, SQ_VTX_BASE_VTX_LOC, 2);
- E32(ib, 0);
- E32(ib, 0);
-
- PACK0(ib, SQ_ESGS_RING_ITEMSIZE, 9);
- E32(ib, 0); // SQ_ESGS_RING_ITEMSIZE
- E32(ib, 0); // SQ_GSVS_RING_ITEMSIZE
- E32(ib, 0); // SQ_ESTMP_RING_ITEMSIZE
- E32(ib, 0); // SQ_GSTMP_RING_ITEMSIZE
- E32(ib, 0); // SQ_VSTMP_RING_ITEMSIZE
- E32(ib, 0); // SQ_PSTMP_RING_ITEMSIZE
- E32(ib, 0); // SQ_FBUF_RING_ITEMSIZE
- E32(ib, 0); // SQ_REDUC_RING_ITEMSIZE
- E32(ib, 0); // SQ_GS_VERT_ITEMSIZE
+ PACK0(SQ_VTX_BASE_VTX_LOC, 2);
+ E32(0);
+ E32(0);
+
+ PACK0(SQ_ESGS_RING_ITEMSIZE, 9);
+ E32(0); // SQ_ESGS_RING_ITEMSIZE
+ E32(0); // SQ_GSVS_RING_ITEMSIZE
+ E32(0); // SQ_ESTMP_RING_ITEMSIZE
+ E32(0); // SQ_GSTMP_RING_ITEMSIZE
+ E32(0); // SQ_VSTMP_RING_ITEMSIZE
+ E32(0); // SQ_PSTMP_RING_ITEMSIZE
+ E32(0); // SQ_FBUF_RING_ITEMSIZE
+ E32(0); // SQ_REDUC_RING_ITEMSIZE
+ E32(0); // SQ_GS_VERT_ITEMSIZE
// DB
- EREG(ib, DB_DEPTH_CONTROL, 0);
- PACK0(ib, DB_RENDER_CONTROL, 2);
- E32(ib, STENCIL_COMPRESS_DISABLE_bit | DEPTH_COMPRESS_DISABLE_bit);
+ EREG(DB_DEPTH_CONTROL, 0);
+ PACK0(DB_RENDER_CONTROL, 2);
+ E32(STENCIL_COMPRESS_DISABLE_bit | DEPTH_COMPRESS_DISABLE_bit);
if (info->ChipFamily < CHIP_FAMILY_RV770)
- E32(ib, FORCE_SHADER_Z_ORDER_bit);
+ E32(FORCE_SHADER_Z_ORDER_bit);
else
- E32(ib, 0);
- EREG(ib, DB_ALPHA_TO_MASK, ((2 << ALPHA_TO_MASK_OFFSET0_shift) |
+ E32(0);
+ EREG(DB_ALPHA_TO_MASK, ((2 << ALPHA_TO_MASK_OFFSET0_shift) |
(2 << ALPHA_TO_MASK_OFFSET1_shift) |
(2 << ALPHA_TO_MASK_OFFSET2_shift) |
(2 << ALPHA_TO_MASK_OFFSET3_shift)));
- EREG(ib, DB_SHADER_CONTROL, ((1 << Z_ORDER_shift) | /* EARLY_Z_THEN_LATE_Z */
+ EREG(DB_SHADER_CONTROL, ((1 << Z_ORDER_shift) | /* EARLY_Z_THEN_LATE_Z */
DUAL_EXPORT_ENABLE_bit)); /* Only useful if no depth export */
- PACK0(ib, DB_STENCIL_CLEAR, 2);
- E32(ib, 0); // DB_STENCIL_CLEAR
- E32(ib, 0); // DB_DEPTH_CLEAR
+ PACK0(DB_STENCIL_CLEAR, 2);
+ E32(0); // DB_STENCIL_CLEAR
+ E32(0); // DB_DEPTH_CLEAR
- PACK0(ib, DB_STENCILREFMASK, 3);
- E32(ib, 0); // DB_STENCILREFMASK
- E32(ib, 0); // DB_STENCILREFMASK_BF
- E32(ib, 0); // SX_ALPHA_REF
+ PACK0(DB_STENCILREFMASK, 3);
+ E32(0); // DB_STENCILREFMASK
+ E32(0); // DB_STENCILREFMASK_BF
+ E32(0); // SX_ALPHA_REF
- PACK0(ib, CB_CLRCMP_CONTROL, 4);
- E32(ib, 1 << CLRCMP_FCN_SEL_shift); // CB_CLRCMP_CONTROL: use CLRCMP_FCN_SRC
- E32(ib, 0); // CB_CLRCMP_SRC
- E32(ib, 0); // CB_CLRCMP_DST
- E32(ib, 0); // CB_CLRCMP_MSK
+ PACK0(CB_CLRCMP_CONTROL, 4);
+ E32(1 << CLRCMP_FCN_SEL_shift); // CB_CLRCMP_CONTROL: use CLRCMP_FCN_SRC
+ E32(0); // CB_CLRCMP_SRC
+ E32(0); // CB_CLRCMP_DST
+ E32(0); // CB_CLRCMP_MSK
- EREG(ib, CB_SHADER_MASK, OUTPUT0_ENABLE_mask);
- EREG(ib, R7xx_CB_SHADER_CONTROL, (RT0_ENABLE_bit));
+ EREG(CB_SHADER_MASK, OUTPUT0_ENABLE_mask);
+ EREG(R7xx_CB_SHADER_CONTROL, (RT0_ENABLE_bit));
- PACK0(ib, SX_ALPHA_TEST_CONTROL, 5);
- E32(ib, 0); // SX_ALPHA_TEST_CONTROL
- E32(ib, 0x00000000); // CB_BLEND_RED
- E32(ib, 0x00000000); // CB_BLEND_GREEN
- E32(ib, 0x00000000); // CB_BLEND_BLUE
- E32(ib, 0x00000000); // CB_BLEND_ALPHA
+ PACK0(SX_ALPHA_TEST_CONTROL, 5);
+ E32(0); // SX_ALPHA_TEST_CONTROL
+ E32(0x00000000); // CB_BLEND_RED
+ E32(0x00000000); // CB_BLEND_GREEN
+ E32(0x00000000); // CB_BLEND_BLUE
+ E32(0x00000000); // CB_BLEND_ALPHA
- EREG(ib, PA_SC_WINDOW_OFFSET, ((0 << WINDOW_X_OFFSET_shift) |
+ EREG(PA_SC_WINDOW_OFFSET, ((0 << WINDOW_X_OFFSET_shift) |
(0 << WINDOW_Y_OFFSET_shift)));
if (info->ChipFamily < CHIP_FAMILY_RV770)
- EREG(ib, R7xx_PA_SC_EDGERULE, 0x00000000);
+ EREG(R7xx_PA_SC_EDGERULE, 0x00000000);
else
- EREG(ib, R7xx_PA_SC_EDGERULE, 0xAAAAAAAA);
+ EREG(R7xx_PA_SC_EDGERULE, 0xAAAAAAAA);
- EREG(ib, PA_SC_CLIPRECT_RULE, CLIP_RULE_mask);
+ EREG(PA_SC_CLIPRECT_RULE, CLIP_RULE_mask);
END_BATCH();
/* clip boolean is set to always visible -> doesn't matter */
for (i = 0; i < PA_SC_CLIPRECT_0_TL_num; i++)
- r600_set_clip_rect(pScrn, ib, i, 0, 0, 8192, 8192);
+ r600_set_clip_rect(pScrn, i, 0, 0, 8192, 8192);
for (i = 0; i < PA_SC_VPORT_SCISSOR_0_TL_num; i++)
- r600_set_vport_scissor(pScrn, ib, i, 0, 0, 8192, 8192);
+ r600_set_vport_scissor(pScrn, i, 0, 0, 8192, 8192);
BEGIN_BATCH(49);
- PACK0(ib, PA_SC_MPASS_PS_CNTL, 2);
- E32(ib, 0);
+ PACK0(PA_SC_MPASS_PS_CNTL, 2);
+ E32(0);
if (info->ChipFamily < CHIP_FAMILY_RV770)
- E32(ib, (WALK_ORDER_ENABLE_bit | FORCE_EOV_CNTDWN_ENABLE_bit));
+ E32((WALK_ORDER_ENABLE_bit | FORCE_EOV_CNTDWN_ENABLE_bit));
else
- E32(ib, (FORCE_EOV_CNTDWN_ENABLE_bit | FORCE_EOV_REZ_ENABLE_bit |
+ E32((FORCE_EOV_CNTDWN_ENABLE_bit | FORCE_EOV_REZ_ENABLE_bit |
0x00500000)); /* ? */
- PACK0(ib, PA_SC_LINE_CNTL, 9);
- E32(ib, 0); // PA_SC_LINE_CNTL
- E32(ib, 0); // PA_SC_AA_CONFIG
- E32(ib, ((2 << PA_SU_VTX_CNTL__ROUND_MODE_shift) | PIX_CENTER_bit | // PA_SU_VTX_CNTL
+ PACK0(PA_SC_LINE_CNTL, 9);
+ E32(0); // PA_SC_LINE_CNTL
+ E32(0); // PA_SC_AA_CONFIG
+ E32(((2 << PA_SU_VTX_CNTL__ROUND_MODE_shift) | PIX_CENTER_bit | // PA_SU_VTX_CNTL
(5 << QUANT_MODE_shift))); /* Round to Even, fixed point 1/256 */
- EFLOAT(ib, 1.0); // PA_CL_GB_VERT_CLIP_ADJ
- EFLOAT(ib, 1.0); // PA_CL_GB_VERT_DISC_ADJ
- EFLOAT(ib, 1.0); // PA_CL_GB_HORZ_CLIP_ADJ
- EFLOAT(ib, 1.0); // PA_CL_GB_HORZ_DISC_ADJ
- E32(ib, 0); // PA_SC_AA_SAMPLE_LOCS_MCTX
- E32(ib, 0); // PA_SC_AA_SAMPLE_LOCS_8S_WD1_M
-
- EREG(ib, PA_SC_AA_MASK, 0xFFFFFFFF);
-
- PACK0(ib, PA_CL_CLIP_CNTL, 5);
- E32(ib, CLIP_DISABLE_bit); // PA_CL_CLIP_CNTL
- E32(ib, FACE_bit); // PA_SU_SC_MODE_CNTL
- E32(ib, VTX_XY_FMT_bit); // PA_CL_VTE_CNTL
- E32(ib, 0); // PA_CL_VS_OUT_CNTL
- E32(ib, 0); // PA_CL_NANINF_CNTL
-
- PACK0(ib, PA_SU_POLY_OFFSET_DB_FMT_CNTL, 6);
- E32(ib, 0); // PA_SU_POLY_OFFSET_DB_FMT_CNTL
- E32(ib, 0); // PA_SU_POLY_OFFSET_CLAMP
- E32(ib, 0); // PA_SU_POLY_OFFSET_FRONT_SCALE
- E32(ib, 0); // PA_SU_POLY_OFFSET_FRONT_OFFSET
- E32(ib, 0); // PA_SU_POLY_OFFSET_BACK_SCALE
- E32(ib, 0); // PA_SU_POLY_OFFSET_BACK_OFFSET
+ EFLOAT(1.0); // PA_CL_GB_VERT_CLIP_ADJ
+ EFLOAT(1.0); // PA_CL_GB_VERT_DISC_ADJ
+ EFLOAT(1.0); // PA_CL_GB_HORZ_CLIP_ADJ
+ EFLOAT(1.0); // PA_CL_GB_HORZ_DISC_ADJ
+ E32(0); // PA_SC_AA_SAMPLE_LOCS_MCTX
+ E32(0); // PA_SC_AA_SAMPLE_LOCS_8S_WD1_M
+
+ EREG(PA_SC_AA_MASK, 0xFFFFFFFF);
+
+ PACK0(PA_CL_CLIP_CNTL, 5);
+ E32(CLIP_DISABLE_bit); // PA_CL_CLIP_CNTL
+ E32(FACE_bit); // PA_SU_SC_MODE_CNTL
+ E32(VTX_XY_FMT_bit); // PA_CL_VTE_CNTL
+ E32(0); // PA_CL_VS_OUT_CNTL
+ E32(0); // PA_CL_NANINF_CNTL
+
+ PACK0(PA_SU_POLY_OFFSET_DB_FMT_CNTL, 6);
+ E32(0); // PA_SU_POLY_OFFSET_DB_FMT_CNTL
+ E32(0); // PA_SU_POLY_OFFSET_CLAMP
+ E32(0); // PA_SU_POLY_OFFSET_FRONT_SCALE
+ E32(0); // PA_SU_POLY_OFFSET_FRONT_OFFSET
+ E32(0); // PA_SU_POLY_OFFSET_BACK_SCALE
+ E32(0); // PA_SU_POLY_OFFSET_BACK_OFFSET
// SPI
if (info->ChipFamily < CHIP_FAMILY_RV770)
- EREG(ib, R7xx_SPI_THREAD_GROUPING, 0);
+ EREG(R7xx_SPI_THREAD_GROUPING, 0);
else
- EREG(ib, R7xx_SPI_THREAD_GROUPING, (1 << PS_GROUPING_shift));
+ EREG(R7xx_SPI_THREAD_GROUPING, (1 << PS_GROUPING_shift));
/* default Interpolator setup */
- EREG(ib, SPI_VS_OUT_ID_0, ((0 << SEMANTIC_0_shift) |
+ EREG(SPI_VS_OUT_ID_0, ((0 << SEMANTIC_0_shift) |
(1 << SEMANTIC_1_shift)));
- PACK0(ib, SPI_PS_INPUT_CNTL_0 + (0 << 2), 2);
+ PACK0(SPI_PS_INPUT_CNTL_0 + (0 << 2), 2);
/* SPI_PS_INPUT_CNTL_0 maps to GPR[0] - load with semantic id 0 */
- E32(ib, ((0 << SEMANTIC_shift) |
+ E32(((0 << SEMANTIC_shift) |
(0x01 << DEFAULT_VAL_shift) |
SEL_CENTROID_bit));
/* SPI_PS_INPUT_CNTL_1 maps to GPR[1] - load with semantic id 1 */
- E32(ib, ((1 << SEMANTIC_shift) |
+ E32(((1 << SEMANTIC_shift) |
(0x01 << DEFAULT_VAL_shift) |
SEL_CENTROID_bit));
- PACK0(ib, SPI_INPUT_Z, 4);
- E32(ib, 0); // SPI_INPUT_Z
- E32(ib, 0); // SPI_FOG_CNTL
- E32(ib, 0); // SPI_FOG_FUNC_SCALE
- E32(ib, 0); // SPI_FOG_FUNC_BIAS
+ PACK0(SPI_INPUT_Z, 4);
+ E32(0); // SPI_INPUT_Z
+ E32(0); // SPI_FOG_CNTL
+ E32(0); // SPI_FOG_FUNC_SCALE
+ E32(0); // SPI_FOG_FUNC_BIAS
END_BATCH();
// clear FS
fs_conf.bo = accel_state->shaders_bo;
- r600_fs_setup(pScrn, ib, &fs_conf, RADEON_GEM_DOMAIN_VRAM);
+ r600_fs_setup(pScrn, &fs_conf, RADEON_GEM_DOMAIN_VRAM);
// VGT
BEGIN_BATCH(46);
- PACK0(ib, VGT_MAX_VTX_INDX, 4);
- E32(ib, 0xffffff); // VGT_MAX_VTX_INDX
- E32(ib, 0); // VGT_MIN_VTX_INDX
- E32(ib, 0); // VGT_INDX_OFFSET
- E32(ib, 0); // VGT_MULTI_PRIM_IB_RESET_INDX
-
- EREG(ib, VGT_PRIMITIVEID_EN, 0);
- EREG(ib, VGT_MULTI_PRIM_IB_RESET_EN, 0);
-
- PACK0(ib, VGT_INSTANCE_STEP_RATE_0, 2);
- E32(ib, 0); // VGT_INSTANCE_STEP_RATE_0
- E32(ib, 0); // VGT_INSTANCE_STEP_RATE_1
-
- PACK0(ib, PA_SU_POINT_SIZE, 17);
- E32(ib, 0); // PA_SU_POINT_SIZE
- E32(ib, 0); // PA_SU_POINT_MINMAX
- E32(ib, (8 << PA_SU_LINE_CNTL__WIDTH_shift)); /* Line width 1 pixel */ // PA_SU_LINE_CNTL
- E32(ib, 0); // PA_SC_LINE_STIPPLE
- E32(ib, 0); // VGT_OUTPUT_PATH_CNTL
- E32(ib, 0); // VGT_HOS_CNTL
- E32(ib, 0); // VGT_HOS_MAX_TESS_LEVEL
- E32(ib, 0); // VGT_HOS_MIN_TESS_LEVEL
- E32(ib, 0); // VGT_HOS_REUSE_DEPTH
- E32(ib, 0); // VGT_GROUP_PRIM_TYPE
- E32(ib, 0); // VGT_GROUP_FIRST_DECR
- E32(ib, 0); // VGT_GROUP_DECR
- E32(ib, 0); // VGT_GROUP_VECT_0_CNTL
- E32(ib, 0); // VGT_GROUP_VECT_1_CNTL
- E32(ib, 0); // VGT_GROUP_VECT_0_FMT_CNTL
- E32(ib, 0); // VGT_GROUP_VECT_1_FMT_CNTL
- E32(ib, 0); // VGT_GS_MODE
-
- PACK0(ib, VGT_STRMOUT_EN, 3);
- E32(ib, 0); // VGT_STRMOUT_EN
- E32(ib, 0); // VGT_REUSE_OFF
- E32(ib, 0); // VGT_VTX_CNT_EN
-
- EREG(ib, VGT_STRMOUT_BUFFER_EN, 0);
- EREG(ib, SX_MISC, 0);
+ PACK0(VGT_MAX_VTX_INDX, 4);
+ E32(0xffffff); // VGT_MAX_VTX_INDX
+ E32(0); // VGT_MIN_VTX_INDX
+ E32(0); // VGT_INDX_OFFSET
+ E32(0); // VGT_MULTI_PRIM_IB_RESET_INDX
+
+ EREG(VGT_PRIMITIVEID_EN, 0);
+ EREG(VGT_MULTI_PRIM_IB_RESET_EN, 0);
+
+ PACK0(VGT_INSTANCE_STEP_RATE_0, 2);
+ E32(0); // VGT_INSTANCE_STEP_RATE_0
+ E32(0); // VGT_INSTANCE_STEP_RATE_1
+
+ PACK0(PA_SU_POINT_SIZE, 17);
+ E32(0); // PA_SU_POINT_SIZE
+ E32(0); // PA_SU_POINT_MINMAX
+ E32((8 << PA_SU_LINE_CNTL__WIDTH_shift)); /* Line width 1 pixel */ // PA_SU_LINE_CNTL
+ E32(0); // PA_SC_LINE_STIPPLE
+ E32(0); // VGT_OUTPUT_PATH_CNTL
+ E32(0); // VGT_HOS_CNTL
+ E32(0); // VGT_HOS_MAX_TESS_LEVEL
+ E32(0); // VGT_HOS_MIN_TESS_LEVEL
+ E32(0); // VGT_HOS_REUSE_DEPTH
+ E32(0); // VGT_GROUP_PRIM_TYPE
+ E32(0); // VGT_GROUP_FIRST_DECR
+ E32(0); // VGT_GROUP_DECR
+ E32(0); // VGT_GROUP_VECT_0_CNTL
+ E32(0); // VGT_GROUP_VECT_1_CNTL
+ E32(0); // VGT_GROUP_VECT_0_FMT_CNTL
+ E32(0); // VGT_GROUP_VECT_1_FMT_CNTL
+ E32(0); // VGT_GS_MODE
+
+ PACK0(VGT_STRMOUT_EN, 3);
+ E32(0); // VGT_STRMOUT_EN
+ E32(0); // VGT_REUSE_OFF
+ E32(0); // VGT_VTX_CNT_EN
+
+ EREG(VGT_STRMOUT_BUFFER_EN, 0);
+ EREG(SX_MISC, 0);
END_BATCH();
}
@@ -1210,7 +1126,7 @@ r600_set_default_state(ScrnInfoPtr pScrn, drmBufPtr ib)
*/
void
-r600_draw_immd(ScrnInfoPtr pScrn, drmBufPtr ib, draw_config_t *draw_conf, uint32_t *indices)
+r600_draw_immd(ScrnInfoPtr pScrn, draw_config_t *draw_conf, uint32_t *indices)
{
RADEONInfoPtr info = RADEONPTR(pScrn);
uint32_t i, count;
@@ -1223,52 +1139,52 @@ r600_draw_immd(ScrnInfoPtr pScrn, drmBufPtr ib, draw_config_t *draw_conf, uint32
count += draw_conf->num_indices;
BEGIN_BATCH(8 + count);
- EREG(ib, VGT_PRIMITIVE_TYPE, draw_conf->prim_type);
- PACK3(ib, IT_INDEX_TYPE, 1);
+ EREG(VGT_PRIMITIVE_TYPE, draw_conf->prim_type);
+ PACK3(IT_INDEX_TYPE, 1);
#if X_BYTE_ORDER == X_BIG_ENDIAN
- E32(ib, IT_INDEX_TYPE_SWAP_MODE(ENDIAN_8IN32) | draw_conf->index_type);
+ E32(IT_INDEX_TYPE_SWAP_MODE(ENDIAN_8IN32) | draw_conf->index_type);
#else
- E32(ib, draw_conf->index_type);
+ E32(draw_conf->index_type);
#endif
- PACK3(ib, IT_NUM_INSTANCES, 1);
- E32(ib, draw_conf->num_instances);
+ PACK3(IT_NUM_INSTANCES, 1);
+ E32(draw_conf->num_instances);
- PACK3(ib, IT_DRAW_INDEX_IMMD, count);
- E32(ib, draw_conf->num_indices);
- E32(ib, draw_conf->vgt_draw_initiator);
+ PACK3(IT_DRAW_INDEX_IMMD, count);
+ E32(draw_conf->num_indices);
+ E32(draw_conf->vgt_draw_initiator);
if (draw_conf->index_type == DI_INDEX_SIZE_16_BIT) {
for (i = 0; i < draw_conf->num_indices; i += 2) {
if ((i + 1) == draw_conf->num_indices)
- E32(ib, indices[i]);
+ E32(indices[i]);
else
- E32(ib, (indices[i] | (indices[i + 1] << 16)));
+ E32((indices[i] | (indices[i + 1] << 16)));
}
} else {
for (i = 0; i < draw_conf->num_indices; i++)
- E32(ib, indices[i]);
+ E32(indices[i]);
}
END_BATCH();
}
void
-r600_draw_auto(ScrnInfoPtr pScrn, drmBufPtr ib, draw_config_t *draw_conf)
+r600_draw_auto(ScrnInfoPtr pScrn, draw_config_t *draw_conf)
{
RADEONInfoPtr info = RADEONPTR(pScrn);
BEGIN_BATCH(10);
- EREG(ib, VGT_PRIMITIVE_TYPE, draw_conf->prim_type);
- PACK3(ib, IT_INDEX_TYPE, 1);
+ EREG(VGT_PRIMITIVE_TYPE, draw_conf->prim_type);
+ PACK3(IT_INDEX_TYPE, 1);
#if X_BYTE_ORDER == X_BIG_ENDIAN
- E32(ib, IT_INDEX_TYPE_SWAP_MODE(ENDIAN_8IN32) | draw_conf->index_type);
+ E32(IT_INDEX_TYPE_SWAP_MODE(ENDIAN_8IN32) | draw_conf->index_type);
#else
- E32(ib, draw_conf->index_type);
+ E32(draw_conf->index_type);
#endif
- PACK3(ib, IT_NUM_INSTANCES, 1);
- E32(ib, draw_conf->num_instances);
- PACK3(ib, IT_DRAW_INDEX_AUTO, 2);
- E32(ib, draw_conf->num_indices);
- E32(ib, draw_conf->vgt_draw_initiator);
+ PACK3(IT_NUM_INSTANCES, 1);
+ E32(draw_conf->num_instances);
+ PACK3(IT_DRAW_INDEX_AUTO, 2);
+ E32(draw_conf->num_indices);
+ E32(draw_conf->vgt_draw_initiator);
END_BATCH();
}
@@ -1286,7 +1202,7 @@ void r600_finish_op(ScrnInfoPtr pScrn, int vtx_size)
CLEAR (vtx_res);
if (accel_state->vbo.vb_offset == accel_state->vbo.vb_start_op) {
- R600IBDiscard(pScrn, accel_state->ib);
+ R600IBDiscard(pScrn);
return;
}
@@ -1301,7 +1217,7 @@ void r600_finish_op(ScrnInfoPtr pScrn, int vtx_size)
#if X_BYTE_ORDER == X_BIG_ENDIAN
vtx_res.endian = SQ_ENDIAN_8IN32;
#endif
- r600_set_vtx_resource(pScrn, accel_state->ib, &vtx_res, RADEON_GEM_DOMAIN_GTT);
+ r600_set_vtx_resource(pScrn, &vtx_res, RADEON_GEM_DOMAIN_GTT);
/* Draw */
draw_conf.prim_type = DI_PT_RECTLIST;
@@ -1310,22 +1226,18 @@ void r600_finish_op(ScrnInfoPtr pScrn, int vtx_size)
draw_conf.num_indices = vtx_res.vtx_num_entries / vtx_res.vtx_size_dw;
draw_conf.index_type = DI_INDEX_SIZE_16_BIT;
- r600_draw_auto(pScrn, accel_state->ib, &draw_conf);
+ r600_draw_auto(pScrn, &draw_conf);
/* XXX drm should handle this in fence submit */
- r600_wait_3d_idle_clean(pScrn, accel_state->ib);
+ r600_wait_3d_idle_clean(pScrn);
/* sync dst surface */
- r600_cp_set_surface_sync(pScrn, accel_state->ib, (CB_ACTION_ENA_bit | CB0_DEST_BASE_ENA_bit),
+ r600_cp_set_surface_sync(pScrn, (CB_ACTION_ENA_bit | CB0_DEST_BASE_ENA_bit),
accel_state->dst_size, accel_state->dst_obj.offset,
accel_state->dst_obj.bo, 0, accel_state->dst_obj.domain);
accel_state->vbo.vb_start_op = -1;
accel_state->ib_reset_op = 0;
-#if KMS_MULTI_OP
- if (!info->cs)
-#endif
- R600CPFlushIndirect(pScrn, accel_state->ib);
}