summaryrefslogtreecommitdiff
path: root/lib/mesa/src/gallium/drivers/r600/r600_state_common.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/mesa/src/gallium/drivers/r600/r600_state_common.c')
-rw-r--r--lib/mesa/src/gallium/drivers/r600/r600_state_common.c211
1 files changed, 139 insertions, 72 deletions
diff --git a/lib/mesa/src/gallium/drivers/r600/r600_state_common.c b/lib/mesa/src/gallium/drivers/r600/r600_state_common.c
index 4718286bd..2ded6c822 100644
--- a/lib/mesa/src/gallium/drivers/r600/r600_state_common.c
+++ b/lib/mesa/src/gallium/drivers/r600/r600_state_common.c
@@ -29,6 +29,7 @@
#include "r600d.h"
#include "util/format/u_format_s3tc.h"
+#include "util/u_draw.h"
#include "util/u_index_modify.h"
#include "util/u_memory.h"
#include "util/u_upload_mgr.h"
@@ -37,6 +38,10 @@
#include "tgsi/tgsi_scan.h"
#include "tgsi/tgsi_ureg.h"
+#include "nir.h"
+#include "nir/nir_to_tgsi_info.h"
+#include "tgsi/tgsi_from_mesa.h"
+
void r600_init_command_buffer(struct r600_command_buffer *cb, unsigned num_dw)
{
assert(!cb->buf);
@@ -72,12 +77,12 @@ void r600_init_atom(struct r600_context *rctx,
void r600_emit_cso_state(struct r600_context *rctx, struct r600_atom *atom)
{
- r600_emit_command_buffer(rctx->b.gfx.cs, ((struct r600_cso_state*)atom)->cb);
+ r600_emit_command_buffer(&rctx->b.gfx.cs, ((struct r600_cso_state*)atom)->cb);
}
void r600_emit_alphatest_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
struct r600_alphatest_state *a = (struct r600_alphatest_state*)atom;
unsigned alpha_ref = a->sx_alpha_ref;
@@ -245,7 +250,7 @@ static void r600_set_blend_color(struct pipe_context *ctx,
void r600_emit_blend_color(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
struct pipe_blend_color *state = &rctx->blend_color.state;
radeon_set_context_reg_seq(cs, R_028414_CB_BLEND_RED, 4);
@@ -257,7 +262,7 @@ void r600_emit_blend_color(struct r600_context *rctx, struct r600_atom *atom)
void r600_emit_vgt_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
struct r600_vgt_state *a = (struct r600_vgt_state *)atom;
radeon_set_context_reg(cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, a->vgt_multi_prim_ib_reset_en);
@@ -281,17 +286,17 @@ static void r600_set_clip_state(struct pipe_context *ctx,
}
static void r600_set_stencil_ref(struct pipe_context *ctx,
- const struct r600_stencil_ref *state)
+ const struct r600_stencil_ref state)
{
struct r600_context *rctx = (struct r600_context *)ctx;
- rctx->stencil_ref.state = *state;
+ rctx->stencil_ref.state = state;
r600_mark_atom_dirty(rctx, &rctx->stencil_ref.atom);
}
void r600_emit_stencil_ref(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
struct r600_stencil_ref_state *a = (struct r600_stencil_ref_state*)atom;
radeon_set_context_reg_seq(cs, R_028430_DB_STENCILREFMASK, 2);
@@ -306,25 +311,25 @@ void r600_emit_stencil_ref(struct r600_context *rctx, struct r600_atom *atom)
}
static void r600_set_pipe_stencil_ref(struct pipe_context *ctx,
- const struct pipe_stencil_ref *state)
+ const struct pipe_stencil_ref state)
{
struct r600_context *rctx = (struct r600_context *)ctx;
struct r600_dsa_state *dsa = (struct r600_dsa_state*)rctx->dsa_state.cso;
struct r600_stencil_ref ref;
- rctx->stencil_ref.pipe_state = *state;
+ rctx->stencil_ref.pipe_state = state;
if (!dsa)
return;
- ref.ref_value[0] = state->ref_value[0];
- ref.ref_value[1] = state->ref_value[1];
+ ref.ref_value[0] = state.ref_value[0];
+ ref.ref_value[1] = state.ref_value[1];
ref.valuemask[0] = dsa->valuemask[0];
ref.valuemask[1] = dsa->valuemask[1];
ref.writemask[0] = dsa->writemask[0];
ref.writemask[1] = dsa->writemask[1];
- r600_set_stencil_ref(ctx, &ref);
+ r600_set_stencil_ref(ctx, ref);
}
static void r600_bind_dsa_state(struct pipe_context *ctx, void *state)
@@ -357,7 +362,7 @@ static void r600_bind_dsa_state(struct pipe_context *ctx, void *state)
}
}
- r600_set_stencil_ref(ctx, &ref);
+ r600_set_stencil_ref(ctx, ref);
/* Update alphatest state. */
if (rctx->alphatest_state.sx_alpha_test_control != dsa->sx_alpha_test_control ||
@@ -562,6 +567,8 @@ void r600_vertex_buffers_dirty(struct r600_context *rctx)
static void r600_set_vertex_buffers(struct pipe_context *ctx,
unsigned start_slot, unsigned count,
+ unsigned unbind_num_trailing_slots,
+ bool take_ownership,
const struct pipe_vertex_buffer *input)
{
struct r600_context *rctx = (struct r600_context *)ctx;
@@ -582,7 +589,13 @@ static void r600_set_vertex_buffers(struct pipe_context *ctx,
if (input[i].buffer.resource) {
vb[i].stride = input[i].stride;
vb[i].buffer_offset = input[i].buffer_offset;
- pipe_resource_reference(&vb[i].buffer.resource, input[i].buffer.resource);
+ if (take_ownership) {
+ pipe_resource_reference(&vb[i].buffer.resource, NULL);
+ vb[i].buffer.resource = input[i].buffer.resource;
+ } else {
+ pipe_resource_reference(&vb[i].buffer.resource,
+ input[i].buffer.resource);
+ }
new_buffer_mask |= 1 << i;
r600_context_add_resource_size(ctx, input[i].buffer.resource);
} else {
@@ -598,6 +611,11 @@ static void r600_set_vertex_buffers(struct pipe_context *ctx,
disable_mask = ((1ull << count) - 1);
}
+ for (i = 0; i < unbind_num_trailing_slots; i++) {
+ pipe_resource_reference(&vb[count + i].buffer.resource, NULL);
+ }
+ disable_mask |= ((1ull << unbind_num_trailing_slots) - 1) << count;
+
disable_mask <<= start_slot;
new_buffer_mask <<= start_slot;
@@ -622,6 +640,7 @@ void r600_sampler_views_dirty(struct r600_context *rctx,
static void r600_set_sampler_views(struct pipe_context *pipe,
enum pipe_shader_type shader,
unsigned start, unsigned count,
+ unsigned unbind_num_trailing_slots,
struct pipe_sampler_view **views)
{
struct r600_context *rctx = (struct r600_context *) pipe;
@@ -815,9 +834,12 @@ static inline void r600_shader_selector_key(const struct pipe_context *ctx,
rctx->rasterizer && rctx->rasterizer->multisample_enable &&
!rctx->framebuffer.cb0_is_integer;
key->ps.nr_cbufs = rctx->framebuffer.state.nr_cbufs;
+ key->ps.apply_sample_id_mask = (rctx->ps_iter_samples > 1) || !rctx->rasterizer->multisample_enable;
/* Dual-source blending only makes sense with nr_cbufs == 1. */
- if (key->ps.nr_cbufs == 1 && rctx->dual_src_blend)
+ if (key->ps.nr_cbufs == 1 && rctx->dual_src_blend) {
key->ps.nr_cbufs = 2;
+ key->ps.dual_source_blend = 1;
+ }
break;
}
case PIPE_SHADER_TESS_EVAL:
@@ -906,14 +928,19 @@ int r600_shader_select(struct pipe_context *ctx,
}
struct r600_pipe_shader_selector *r600_create_shader_state_tokens(struct pipe_context *ctx,
- const struct tgsi_token *tokens,
+ const void *prog, enum pipe_shader_ir ir,
unsigned pipe_shader_type)
{
struct r600_pipe_shader_selector *sel = CALLOC_STRUCT(r600_pipe_shader_selector);
sel->type = pipe_shader_type;
- sel->tokens = tgsi_dup_tokens(tokens);
- tgsi_scan_shader(tokens, &sel->info);
+ if (ir == PIPE_SHADER_IR_TGSI) {
+ sel->tokens = tgsi_dup_tokens((const struct tgsi_token *)prog);
+ tgsi_scan_shader(sel->tokens, &sel->info);
+ } else if (ir == PIPE_SHADER_IR_NIR){
+ sel->nir = nir_shader_clone(NULL, (const nir_shader *)prog);
+ nir_tgsi_scan_shader(sel->nir, &sel->info, true);
+ }
return sel;
}
@@ -922,8 +949,16 @@ static void *r600_create_shader_state(struct pipe_context *ctx,
unsigned pipe_shader_type)
{
int i;
- struct r600_pipe_shader_selector *sel = r600_create_shader_state_tokens(ctx, state->tokens, pipe_shader_type);
-
+ struct r600_pipe_shader_selector *sel;
+
+ if (state->type == PIPE_SHADER_IR_TGSI)
+ sel = r600_create_shader_state_tokens(ctx, state->tokens, state->type, pipe_shader_type);
+ else if (state->type == PIPE_SHADER_IR_NIR) {
+ sel = r600_create_shader_state_tokens(ctx, state->ir.nir, state->type, pipe_shader_type);
+ } else
+ assert(0 && "Unknown shader type\n");
+
+ sel->ir_type = state->type;
sel->so = state->stream_output;
switch (pipe_shader_type) {
@@ -1082,7 +1117,14 @@ void r600_delete_shader_selector(struct pipe_context *ctx,
p = c;
}
- free(sel->tokens);
+ if (sel->ir_type == PIPE_SHADER_IR_TGSI) {
+ free(sel->tokens);
+ /* We might have converted the TGSI shader to a NIR shader */
+ if (sel->nir)
+ ralloc_free(sel->nir);
+ }
+ else if (sel->ir_type == PIPE_SHADER_IR_NIR)
+ ralloc_free(sel->nir);
free(sel);
}
@@ -1159,6 +1201,7 @@ void r600_constant_buffers_dirty(struct r600_context *rctx, struct r600_constbuf
static void r600_set_constant_buffer(struct pipe_context *ctx,
enum pipe_shader_type shader, uint index,
+ bool take_ownership,
const struct pipe_constant_buffer *input)
{
struct r600_context *rctx = (struct r600_context *)ctx;
@@ -1166,7 +1209,7 @@ static void r600_set_constant_buffer(struct pipe_context *ctx,
struct pipe_constant_buffer *cb;
const uint8_t *ptr;
- /* Note that the state tracker can unbind constant buffers by
+ /* Note that the gallium frontend can unbind constant buffers by
* passing NULL here.
*/
if (unlikely(!input || (!input->buffer && !input->user_buffer))) {
@@ -1209,7 +1252,12 @@ static void r600_set_constant_buffer(struct pipe_context *ctx,
} else {
/* Setup the hw buffer. */
cb->buffer_offset = input->buffer_offset;
- pipe_resource_reference(&cb->buffer, input->buffer);
+ if (take_ownership) {
+ pipe_resource_reference(&cb->buffer, NULL);
+ cb->buffer = input->buffer;
+ } else {
+ pipe_resource_reference(&cb->buffer, input->buffer);
+ }
r600_context_add_resource_size(ctx, input->buffer);
}
@@ -1315,7 +1363,7 @@ void r600_update_driver_const_buffers(struct r600_context *rctx, bool compute_on
cb.user_buffer = ptr;
cb.buffer_offset = 0;
cb.buffer_size = size;
- rctx->b.b.set_constant_buffer(&rctx->b.b, sh, R600_BUFFER_INFO_CONST_BUFFER, &cb);
+ rctx->b.b.set_constant_buffer(&rctx->b.b, sh, R600_BUFFER_INFO_CONST_BUFFER, false, &cb);
pipe_resource_reference(&cb.buffer, NULL);
}
}
@@ -1504,21 +1552,21 @@ static void update_gs_block_state(struct r600_context *rctx, unsigned enable)
if (enable) {
r600_set_constant_buffer(&rctx->b.b, PIPE_SHADER_GEOMETRY,
- R600_GS_RING_CONST_BUFFER, &rctx->gs_rings.esgs_ring);
+ R600_GS_RING_CONST_BUFFER, false, &rctx->gs_rings.esgs_ring);
if (rctx->tes_shader) {
r600_set_constant_buffer(&rctx->b.b, PIPE_SHADER_TESS_EVAL,
- R600_GS_RING_CONST_BUFFER, &rctx->gs_rings.gsvs_ring);
+ R600_GS_RING_CONST_BUFFER, false, &rctx->gs_rings.gsvs_ring);
} else {
r600_set_constant_buffer(&rctx->b.b, PIPE_SHADER_VERTEX,
- R600_GS_RING_CONST_BUFFER, &rctx->gs_rings.gsvs_ring);
+ R600_GS_RING_CONST_BUFFER, false, &rctx->gs_rings.gsvs_ring);
}
} else {
r600_set_constant_buffer(&rctx->b.b, PIPE_SHADER_GEOMETRY,
- R600_GS_RING_CONST_BUFFER, NULL);
+ R600_GS_RING_CONST_BUFFER, false, NULL);
r600_set_constant_buffer(&rctx->b.b, PIPE_SHADER_VERTEX,
- R600_GS_RING_CONST_BUFFER, NULL);
+ R600_GS_RING_CONST_BUFFER, false, NULL);
r600_set_constant_buffer(&rctx->b.b, PIPE_SHADER_TESS_EVAL,
- R600_GS_RING_CONST_BUFFER, NULL);
+ R600_GS_RING_CONST_BUFFER, false, NULL);
}
}
}
@@ -1638,7 +1686,7 @@ void r600_setup_scratch_area_for_shader(struct r600_context *rctx,
if (scratch->dirty ||
unlikely(shader->scratch_space_needed != scratch->item_size ||
size > scratch->size)) {
- struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
scratch->dirty = false;
@@ -1846,7 +1894,7 @@ static bool r600_update_derived_state(struct r600_context *rctx)
* to LS slots and won't reflect what is dirty as VS stage even if the
* TES didn't overwrite it. The story for re-enabled TES is similar.
* In any case, we're not allowed to submit any TES state when
- * TES is disabled (the state tracker may not do this but this looks
+ * TES is disabled (the gallium frontend may not do this but this looks
* like an optimization to me, not something which can be relied on).
*/
@@ -1982,7 +2030,7 @@ static bool r600_update_derived_state(struct r600_context *rctx)
void r600_emit_clip_misc_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
struct r600_clip_misc_state *state = &rctx->clip_misc_state;
radeon_set_context_reg(cs, R_028810_PA_CL_CLIP_CNTL,
@@ -2002,7 +2050,7 @@ void r600_emit_clip_misc_state(struct r600_context *rctx, struct r600_atom *atom
/* rast_prim is the primitive type after GS. */
static inline void r600_emit_rasterizer_prim_state(struct r600_context *rctx)
{
- struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
enum pipe_prim_type rast_prim = rctx->current_rast_prim;
/* Skip this if not rendering lines. */
@@ -2025,21 +2073,35 @@ static inline void r600_emit_rasterizer_prim_state(struct r600_context *rctx)
rctx->last_rast_prim = rast_prim;
}
-static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
+static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info,
+ const struct pipe_draw_indirect_info *indirect,
+ const struct pipe_draw_start_count *draws,
+ unsigned num_draws)
{
+ if (num_draws > 1) {
+ util_draw_multi(ctx, info, indirect, draws, num_draws);
+ return;
+ }
+
struct r600_context *rctx = (struct r600_context *)ctx;
- struct pipe_resource *indexbuf = info->has_user_indices ? NULL : info->index.resource;
- struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
+ struct pipe_resource *indexbuf = !info->index_size || info->has_user_indices ? NULL : info->index.resource;
+ struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
bool render_cond_bit = rctx->b.render_cond && !rctx->b.render_cond_force_off;
- bool has_user_indices = info->has_user_indices;
+ bool has_user_indices = info->index_size && info->has_user_indices;
uint64_t mask;
unsigned num_patches, dirty_tex_counter, index_offset = 0;
unsigned index_size = info->index_size;
int index_bias;
struct r600_shader_atomic combined_atomics[8];
- uint8_t atomic_used_mask;
+ uint8_t atomic_used_mask = 0;
+ struct pipe_stream_output_target *count_from_so = NULL;
+
+ if (indirect && indirect->count_from_stream_output) {
+ count_from_so = indirect->count_from_stream_output;
+ indirect = NULL;
+ }
- if (!info->indirect && !info->count && (index_size || !info->count_from_stream_output)) {
+ if (!indirect && !draws[0].count && (index_size || !count_from_so)) {
return;
}
@@ -2054,7 +2116,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
}
/* make sure that the gfx ring is only one active */
- if (radeon_emitted(rctx->b.dma.cs, 0)) {
+ if (radeon_emitted(&rctx->b.dma.cs, 0)) {
rctx->b.dma.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
}
@@ -2101,7 +2163,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
}
if (index_size) {
- index_offset += info->start * index_size;
+ index_offset += draws[0].start * index_size;
/* Translate 8-bit indices to 16-bit. */
if (unlikely(index_size == 1)) {
@@ -2110,17 +2172,17 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
void *ptr;
unsigned start, count;
- if (likely(!info->indirect)) {
+ if (likely(!indirect)) {
start = 0;
- count = info->count;
+ count = draws[0].count;
}
else {
/* Have to get start/count from indirect buffer, slow path ahead... */
- struct r600_resource *indirect_resource = (struct r600_resource *)info->indirect->buffer;
+ struct r600_resource *indirect_resource = (struct r600_resource *)indirect->buffer;
unsigned *data = r600_buffer_map_sync_with_rings(&rctx->b, indirect_resource,
- PIPE_TRANSFER_READ);
+ PIPE_MAP_READ);
if (data) {
- data += info->indirect->offset / sizeof(unsigned);
+ data += indirect->offset / sizeof(unsigned);
start = data[2] * index_size;
count = data[0];
}
@@ -2149,25 +2211,28 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
* and the indices are emitted via PKT3_DRAW_INDEX_IMMD.
* Indirect draws never use immediate indices.
* Note: Instanced rendering in combination with immediate indices hangs. */
- if (has_user_indices && (R600_BIG_ENDIAN || info->indirect ||
+ if (has_user_indices && (R600_BIG_ENDIAN || indirect ||
info->instance_count > 1 ||
- info->count*index_size > 20)) {
+ draws[0].count*index_size > 20)) {
+ unsigned start_offset = draws[0].start * index_size;
indexbuf = NULL;
- u_upload_data(ctx->stream_uploader, 0,
- info->count * index_size, 256,
- info->index.user, &index_offset, &indexbuf);
+ u_upload_data(ctx->stream_uploader, start_offset,
+ draws[0].count * index_size, 256,
+ (char*)info->index.user + start_offset,
+ &index_offset, &indexbuf);
+ index_offset -= start_offset;
has_user_indices = false;
}
index_bias = info->index_bias;
} else {
- index_bias = info->start;
+ index_bias = indirect ? 0 : draws[0].start;
}
/* Set the index offset and primitive restart. */
if (rctx->vgt_state.vgt_multi_prim_ib_reset_en != info->primitive_restart ||
rctx->vgt_state.vgt_multi_prim_ib_reset_indx != info->restart_index ||
rctx->vgt_state.vgt_indx_offset != index_bias ||
- (rctx->vgt_state.last_draw_was_indirect && !info->indirect)) {
+ (rctx->vgt_state.last_draw_was_indirect && !indirect)) {
rctx->vgt_state.vgt_multi_prim_ib_reset_en = info->primitive_restart;
rctx->vgt_state.vgt_multi_prim_ib_reset_indx = info->restart_index;
rctx->vgt_state.vgt_indx_offset = index_bias;
@@ -2247,7 +2312,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
}
/* Update start instance. */
- if (!info->indirect && rctx->last_start_instance != info->start_instance) {
+ if (!indirect && rctx->last_start_instance != info->start_instance) {
radeon_set_ctl_const(cs, R_03CFF4_SQ_VTX_START_INST_LOC, info->start_instance);
rctx->last_start_instance = info->start_instance;
}
@@ -2262,11 +2327,11 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
}
/* Draw packets. */
- if (likely(!info->indirect)) {
+ if (likely(!indirect)) {
radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, 0));
radeon_emit(cs, info->instance_count);
} else {
- uint64_t va = r600_resource(info->indirect->buffer)->gpu_address;
+ uint64_t va = r600_resource(indirect->buffer)->gpu_address;
assert(rctx->b.chip_class >= EVERGREEN);
// Invalidate so non-indirect draw calls reset this state
@@ -2280,7 +2345,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
- (struct r600_resource*)info->indirect->buffer,
+ (struct r600_resource*)indirect->buffer,
RADEON_USAGE_READ,
RADEON_PRIO_DRAW_INDIRECT));
}
@@ -2292,20 +2357,20 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
(VGT_INDEX_16 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_16_BIT : 0)));
if (has_user_indices) {
- unsigned size_bytes = info->count*index_size;
+ unsigned size_bytes = draws[0].count*index_size;
unsigned size_dw = align(size_bytes, 4) / 4;
radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_IMMD, 1 + size_dw, render_cond_bit));
- radeon_emit(cs, info->count);
+ radeon_emit(cs, draws[0].count);
radeon_emit(cs, V_0287F0_DI_SRC_SEL_IMMEDIATE);
- radeon_emit_array(cs, info->index.user, size_dw);
+ radeon_emit_array(cs, info->index.user + draws[0].start * index_size, size_dw);
} else {
uint64_t va = r600_resource(indexbuf)->gpu_address + index_offset;
- if (likely(!info->indirect)) {
+ if (likely(!indirect)) {
radeon_emit(cs, PKT3(PKT3_DRAW_INDEX, 3, render_cond_bit));
radeon_emit(cs, va);
radeon_emit(cs, (va >> 32UL) & 0xFF);
- radeon_emit(cs, info->count);
+ radeon_emit(cs, draws[0].count);
radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
@@ -2330,13 +2395,13 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
radeon_emit(cs, max_size);
radeon_emit(cs, PKT3(EG_PKT3_DRAW_INDEX_INDIRECT, 1, render_cond_bit));
- radeon_emit(cs, info->indirect->offset);
+ radeon_emit(cs, indirect->offset);
radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA);
}
}
} else {
- if (unlikely(info->count_from_stream_output)) {
- struct r600_so_target *t = (struct r600_so_target*)info->count_from_stream_output;
+ if (unlikely(count_from_so)) {
+ struct r600_so_target *t = (struct r600_so_target*)count_from_so;
uint64_t va = t->buf_filled_size->gpu_address + t->buf_filled_size_offset;
radeon_set_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE, t->stride_in_dw);
@@ -2354,16 +2419,16 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
RADEON_PRIO_SO_FILLED_SIZE));
}
- if (likely(!info->indirect)) {
+ if (likely(!indirect)) {
radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, render_cond_bit));
- radeon_emit(cs, info->count);
+ radeon_emit(cs, draws[0].count);
}
else {
radeon_emit(cs, PKT3(EG_PKT3_DRAW_INDIRECT, 1, render_cond_bit));
- radeon_emit(cs, info->indirect->offset);
+ radeon_emit(cs, indirect->offset);
}
radeon_emit(cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX |
- (info->count_from_stream_output ? S_0287F0_USE_OPAQUE(1) : 0));
+ (count_from_so ? S_0287F0_USE_OPAQUE(1) : 0));
}
/* SMX returns CONTEXT_DONE too early workaround */
@@ -2549,7 +2614,7 @@ bool sampler_state_needs_border_color(const struct pipe_sampler_state *state)
void r600_emit_shader(struct r600_context *rctx, struct r600_atom *a)
{
- struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
struct r600_pipe_shader *shader = ((struct r600_shader_state*)a)->shader;
if (!shader)
@@ -2757,6 +2822,7 @@ uint32_t r600_translate_texformat(struct pipe_screen *screen,
case PIPE_FORMAT_RGTC1_SNORM:
case PIPE_FORMAT_LATC1_SNORM:
word4 |= sign_bit[0];
+ FALLTHROUGH;
case PIPE_FORMAT_RGTC1_UNORM:
case PIPE_FORMAT_LATC1_UNORM:
result = FMT_BC4;
@@ -2764,6 +2830,7 @@ uint32_t r600_translate_texformat(struct pipe_screen *screen,
case PIPE_FORMAT_RGTC2_SNORM:
case PIPE_FORMAT_LATC2_SNORM:
word4 |= sign_bit[0] | sign_bit[1];
+ FALLTHROUGH;
case PIPE_FORMAT_RGTC2_UNORM:
case PIPE_FORMAT_LATC2_UNORM:
result = FMT_BC5;
@@ -2809,7 +2876,7 @@ uint32_t r600_translate_texformat(struct pipe_screen *screen,
goto out_word4;
case PIPE_FORMAT_BPTC_RGB_FLOAT:
word4 |= sign_bit[0] | sign_bit[1] | sign_bit[2];
- /* fall through */
+ FALLTHROUGH;
case PIPE_FORMAT_BPTC_RGB_UFLOAT:
result = FMT_BC6;
goto out_word4;