summaryrefslogtreecommitdiff
path: root/lib/mesa/src/gallium/drivers/zink
diff options
context:
space:
mode:
authorJonathan Gray <jsg@cvs.openbsd.org>2020-08-26 05:30:39 +0000
committerJonathan Gray <jsg@cvs.openbsd.org>2020-08-26 05:30:39 +0000
commit27c93456b58343162f7c4ad20ca6bea0c9a91646 (patch)
tree945c20b63e0b9975ee40f114c5312f8d8f1a2d0b /lib/mesa/src/gallium/drivers/zink
parent875b83a3ee95e248388fbf72271acc80f6f97987 (diff)
Import Mesa 20.1.6
Diffstat (limited to 'lib/mesa/src/gallium/drivers/zink')
-rw-r--r--lib/mesa/src/gallium/drivers/zink/meson.build65
-rw-r--r--lib/mesa/src/gallium/drivers/zink/nir_to_spirv/nir_to_spirv.c2059
-rw-r--r--lib/mesa/src/gallium/drivers/zink/nir_to_spirv/nir_to_spirv.h54
-rw-r--r--lib/mesa/src/gallium/drivers/zink/nir_to_spirv/spirv_builder.c1074
-rw-r--r--lib/mesa/src/gallium/drivers/zink/nir_to_spirv/spirv_builder.h321
-rw-r--r--lib/mesa/src/gallium/drivers/zink/nir_to_spirv/zink_nir_algebraic.py48
-rw-r--r--lib/mesa/src/gallium/drivers/zink/zink_batch.c119
-rw-r--r--lib/mesa/src/gallium/drivers/zink/zink_batch.h69
-rw-r--r--lib/mesa/src/gallium/drivers/zink/zink_blit.c211
-rw-r--r--lib/mesa/src/gallium/drivers/zink/zink_compiler.c303
-rw-r--r--lib/mesa/src/gallium/drivers/zink/zink_compiler.h69
-rw-r--r--lib/mesa/src/gallium/drivers/zink/zink_context.c1172
-rw-r--r--lib/mesa/src/gallium/drivers/zink/zink_context.h161
-rw-r--r--lib/mesa/src/gallium/drivers/zink/zink_draw.c312
-rw-r--r--lib/mesa/src/gallium/drivers/zink/zink_fence.c106
-rw-r--r--lib/mesa/src/gallium/drivers/zink/zink_fence.h60
-rw-r--r--lib/mesa/src/gallium/drivers/zink/zink_format.c153
-rw-r--r--lib/mesa/src/gallium/drivers/zink/zink_framebuffer.c86
-rw-r--r--lib/mesa/src/gallium/drivers/zink/zink_framebuffer.h75
-rw-r--r--lib/mesa/src/gallium/drivers/zink/zink_helpers.h37
-rw-r--r--lib/mesa/src/gallium/drivers/zink/zink_pipeline.c156
-rw-r--r--lib/mesa/src/gallium/drivers/zink/zink_pipeline.h62
-rw-r--r--lib/mesa/src/gallium/drivers/zink/zink_program.c250
-rw-r--r--lib/mesa/src/gallium/drivers/zink/zink_program.h61
-rw-r--r--lib/mesa/src/gallium/drivers/zink/zink_public.h35
-rw-r--r--lib/mesa/src/gallium/drivers/zink/zink_query.c262
-rw-r--r--lib/mesa/src/gallium/drivers/zink/zink_query.h36
-rw-r--r--lib/mesa/src/gallium/drivers/zink/zink_render_pass.c124
-rw-r--r--lib/mesa/src/gallium/drivers/zink/zink_render_pass.h75
-rw-r--r--lib/mesa/src/gallium/drivers/zink/zink_resource.c555
-rw-r--r--lib/mesa/src/gallium/drivers/zink/zink_resource.h71
-rw-r--r--lib/mesa/src/gallium/drivers/zink/zink_screen.c824
-rw-r--r--lib/mesa/src/gallium/drivers/zink/zink_screen.h76
-rw-r--r--lib/mesa/src/gallium/drivers/zink/zink_state.c455
-rw-r--r--lib/mesa/src/gallium/drivers/zink/zink_state.h90
-rw-r--r--lib/mesa/src/gallium/drivers/zink/zink_surface.c139
-rw-r--r--lib/mesa/src/gallium/drivers/zink/zink_surface.h47
37 files changed, 9872 insertions, 0 deletions
diff --git a/lib/mesa/src/gallium/drivers/zink/meson.build b/lib/mesa/src/gallium/drivers/zink/meson.build
new file mode 100644
index 000000000..15f79b623
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/meson.build
@@ -0,0 +1,65 @@
+# Copyright © 2018 Collabora Ltd
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+files_libzink = files(
+ 'nir_to_spirv/nir_to_spirv.c',
+ 'nir_to_spirv/spirv_builder.c',
+ 'zink_batch.c',
+ 'zink_blit.c',
+ 'zink_compiler.c',
+ 'zink_context.c',
+ 'zink_draw.c',
+ 'zink_fence.c',
+ 'zink_format.c',
+ 'zink_framebuffer.c',
+ 'zink_pipeline.c',
+ 'zink_program.c',
+ 'zink_query.c',
+ 'zink_render_pass.c',
+ 'zink_resource.c',
+ 'zink_screen.c',
+ 'zink_state.c',
+ 'zink_surface.c',
+)
+
+zink_nir_algebraic_c = custom_target(
+ 'zink_nir_algebraic.c',
+ input : 'nir_to_spirv/zink_nir_algebraic.py',
+ output : 'zink_nir_algebraic.c',
+ command : [
+ prog_python, '@INPUT@',
+ '-p', join_paths(meson.source_root(), 'src/compiler/nir/'),
+ ],
+ capture : true,
+ depend_files : nir_algebraic_py,
+)
+
+libzink = static_library(
+ 'zink',
+ [files_libzink, zink_nir_algebraic_c],
+ c_args : c_vis_args,
+ include_directories : [inc_include, inc_src, inc_mapi, inc_mesa, inc_gallium, inc_gallium_aux],
+ dependencies: [dep_vulkan, idep_nir_headers],
+)
+
+driver_zink = declare_dependency(
+ compile_args : '-DGALLIUM_ZINK',
+ link_with : [libzink],
+)
diff --git a/lib/mesa/src/gallium/drivers/zink/nir_to_spirv/nir_to_spirv.c b/lib/mesa/src/gallium/drivers/zink/nir_to_spirv/nir_to_spirv.c
new file mode 100644
index 000000000..857c7721f
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/nir_to_spirv/nir_to_spirv.c
@@ -0,0 +1,2059 @@
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "nir_to_spirv.h"
+#include "spirv_builder.h"
+
+#include "nir.h"
+#include "pipe/p_state.h"
+#include "util/u_memory.h"
+#include "util/hash_table.h"
+
+struct ntv_context {
+ struct spirv_builder builder;
+
+ SpvId GLSL_std_450;
+
+ gl_shader_stage stage;
+
+ SpvId ubos[128];
+ size_t num_ubos;
+ SpvId image_types[PIPE_MAX_SAMPLERS];
+ SpvId samplers[PIPE_MAX_SAMPLERS];
+ unsigned samplers_used : PIPE_MAX_SAMPLERS;
+ SpvId entry_ifaces[PIPE_MAX_SHADER_INPUTS * 4 + PIPE_MAX_SHADER_OUTPUTS * 4];
+ size_t num_entry_ifaces;
+
+ SpvId *defs;
+ size_t num_defs;
+
+ SpvId *regs;
+ size_t num_regs;
+
+ struct hash_table *vars; /* nir_variable -> SpvId */
+
+ const SpvId *block_ids;
+ size_t num_blocks;
+ bool block_started;
+ SpvId loop_break, loop_cont;
+
+ SpvId front_face_var, instance_id_var, vertex_id_var;
+};
+
+static SpvId
+get_fvec_constant(struct ntv_context *ctx, unsigned bit_size,
+ unsigned num_components, float value);
+
+static SpvId
+get_uvec_constant(struct ntv_context *ctx, unsigned bit_size,
+ unsigned num_components, uint32_t value);
+
+static SpvId
+get_ivec_constant(struct ntv_context *ctx, unsigned bit_size,
+ unsigned num_components, int32_t value);
+
+static SpvId
+emit_unop(struct ntv_context *ctx, SpvOp op, SpvId type, SpvId src);
+
+static SpvId
+emit_binop(struct ntv_context *ctx, SpvOp op, SpvId type,
+ SpvId src0, SpvId src1);
+
+static SpvId
+emit_triop(struct ntv_context *ctx, SpvOp op, SpvId type,
+ SpvId src0, SpvId src1, SpvId src2);
+
+static SpvId
+get_bvec_type(struct ntv_context *ctx, int num_components)
+{
+ SpvId bool_type = spirv_builder_type_bool(&ctx->builder);
+ if (num_components > 1)
+ return spirv_builder_type_vector(&ctx->builder, bool_type,
+ num_components);
+
+ assert(num_components == 1);
+ return bool_type;
+}
+
+static SpvId
+block_label(struct ntv_context *ctx, nir_block *block)
+{
+ assert(block->index < ctx->num_blocks);
+ return ctx->block_ids[block->index];
+}
+
+static SpvId
+emit_float_const(struct ntv_context *ctx, int bit_size, float value)
+{
+ assert(bit_size == 32);
+ return spirv_builder_const_float(&ctx->builder, bit_size, value);
+}
+
+static SpvId
+emit_uint_const(struct ntv_context *ctx, int bit_size, uint32_t value)
+{
+ assert(bit_size == 32);
+ return spirv_builder_const_uint(&ctx->builder, bit_size, value);
+}
+
+static SpvId
+emit_int_const(struct ntv_context *ctx, int bit_size, int32_t value)
+{
+ assert(bit_size == 32);
+ return spirv_builder_const_int(&ctx->builder, bit_size, value);
+}
+
+static SpvId
+get_fvec_type(struct ntv_context *ctx, unsigned bit_size, unsigned num_components)
+{
+ assert(bit_size == 32); // only 32-bit floats supported so far
+
+ SpvId float_type = spirv_builder_type_float(&ctx->builder, bit_size);
+ if (num_components > 1)
+ return spirv_builder_type_vector(&ctx->builder, float_type,
+ num_components);
+
+ assert(num_components == 1);
+ return float_type;
+}
+
+static SpvId
+get_ivec_type(struct ntv_context *ctx, unsigned bit_size, unsigned num_components)
+{
+ assert(bit_size == 32); // only 32-bit ints supported so far
+
+ SpvId int_type = spirv_builder_type_int(&ctx->builder, bit_size);
+ if (num_components > 1)
+ return spirv_builder_type_vector(&ctx->builder, int_type,
+ num_components);
+
+ assert(num_components == 1);
+ return int_type;
+}
+
+static SpvId
+get_uvec_type(struct ntv_context *ctx, unsigned bit_size, unsigned num_components)
+{
+ assert(bit_size == 32); // only 32-bit uints supported so far
+
+ SpvId uint_type = spirv_builder_type_uint(&ctx->builder, bit_size);
+ if (num_components > 1)
+ return spirv_builder_type_vector(&ctx->builder, uint_type,
+ num_components);
+
+ assert(num_components == 1);
+ return uint_type;
+}
+
+static SpvId
+get_dest_uvec_type(struct ntv_context *ctx, nir_dest *dest)
+{
+ unsigned bit_size = MAX2(nir_dest_bit_size(*dest), 32);
+ return get_uvec_type(ctx, bit_size, nir_dest_num_components(*dest));
+}
+
+static SpvId
+get_glsl_basetype(struct ntv_context *ctx, enum glsl_base_type type)
+{
+ switch (type) {
+ case GLSL_TYPE_BOOL:
+ return spirv_builder_type_bool(&ctx->builder);
+
+ case GLSL_TYPE_FLOAT:
+ return spirv_builder_type_float(&ctx->builder, 32);
+
+ case GLSL_TYPE_INT:
+ return spirv_builder_type_int(&ctx->builder, 32);
+
+ case GLSL_TYPE_UINT:
+ return spirv_builder_type_uint(&ctx->builder, 32);
+ /* TODO: handle more types */
+
+ default:
+ unreachable("unknown GLSL type");
+ }
+}
+
+static SpvId
+get_glsl_type(struct ntv_context *ctx, const struct glsl_type *type)
+{
+ assert(type);
+ if (glsl_type_is_scalar(type))
+ return get_glsl_basetype(ctx, glsl_get_base_type(type));
+
+ if (glsl_type_is_vector(type))
+ return spirv_builder_type_vector(&ctx->builder,
+ get_glsl_basetype(ctx, glsl_get_base_type(type)),
+ glsl_get_vector_elements(type));
+
+ if (glsl_type_is_array(type)) {
+ SpvId ret = spirv_builder_type_array(&ctx->builder,
+ get_glsl_type(ctx, glsl_get_array_element(type)),
+ emit_uint_const(ctx, 32, glsl_get_length(type)));
+ uint32_t stride = glsl_get_explicit_stride(type);
+ if (stride)
+ spirv_builder_emit_array_stride(&ctx->builder, ret, stride);
+ return ret;
+ }
+
+
+ unreachable("we shouldn't get here, I think...");
+}
+
+static void
+emit_input(struct ntv_context *ctx, struct nir_variable *var)
+{
+ SpvId var_type = get_glsl_type(ctx, var->type);
+ SpvId pointer_type = spirv_builder_type_pointer(&ctx->builder,
+ SpvStorageClassInput,
+ var_type);
+ SpvId var_id = spirv_builder_emit_var(&ctx->builder, pointer_type,
+ SpvStorageClassInput);
+
+ if (var->name)
+ spirv_builder_emit_name(&ctx->builder, var_id, var->name);
+
+ if (ctx->stage == MESA_SHADER_FRAGMENT) {
+ if (var->data.location >= VARYING_SLOT_VAR0)
+ spirv_builder_emit_location(&ctx->builder, var_id,
+ var->data.location -
+ VARYING_SLOT_VAR0 +
+ VARYING_SLOT_TEX0);
+ else if ((var->data.location >= VARYING_SLOT_COL0 &&
+ var->data.location <= VARYING_SLOT_TEX7) ||
+ var->data.location == VARYING_SLOT_BFC0 ||
+ var->data.location == VARYING_SLOT_BFC1) {
+ spirv_builder_emit_location(&ctx->builder, var_id,
+ var->data.location);
+ } else {
+ switch (var->data.location) {
+ case VARYING_SLOT_POS:
+ spirv_builder_emit_builtin(&ctx->builder, var_id, SpvBuiltInFragCoord);
+ break;
+
+ case VARYING_SLOT_PNTC:
+ spirv_builder_emit_builtin(&ctx->builder, var_id, SpvBuiltInPointCoord);
+ break;
+
+ default:
+ debug_printf("unknown varying slot: %s\n", gl_varying_slot_name(var->data.location));
+ unreachable("unexpected varying slot");
+ }
+ }
+ } else {
+ spirv_builder_emit_location(&ctx->builder, var_id,
+ var->data.driver_location);
+ }
+
+ if (var->data.location_frac)
+ spirv_builder_emit_component(&ctx->builder, var_id,
+ var->data.location_frac);
+
+ if (var->data.interpolation == INTERP_MODE_FLAT)
+ spirv_builder_emit_decoration(&ctx->builder, var_id, SpvDecorationFlat);
+
+ _mesa_hash_table_insert(ctx->vars, var, (void *)(intptr_t)var_id);
+
+ assert(ctx->num_entry_ifaces < ARRAY_SIZE(ctx->entry_ifaces));
+ ctx->entry_ifaces[ctx->num_entry_ifaces++] = var_id;
+}
+
+static void
+emit_output(struct ntv_context *ctx, struct nir_variable *var)
+{
+ SpvId var_type = get_glsl_type(ctx, var->type);
+ SpvId pointer_type = spirv_builder_type_pointer(&ctx->builder,
+ SpvStorageClassOutput,
+ var_type);
+ SpvId var_id = spirv_builder_emit_var(&ctx->builder, pointer_type,
+ SpvStorageClassOutput);
+ if (var->name)
+ spirv_builder_emit_name(&ctx->builder, var_id, var->name);
+
+
+ if (ctx->stage == MESA_SHADER_VERTEX) {
+ if (var->data.location >= VARYING_SLOT_VAR0)
+ spirv_builder_emit_location(&ctx->builder, var_id,
+ var->data.location -
+ VARYING_SLOT_VAR0 +
+ VARYING_SLOT_TEX0);
+ else if ((var->data.location >= VARYING_SLOT_COL0 &&
+ var->data.location <= VARYING_SLOT_TEX7) ||
+ var->data.location == VARYING_SLOT_BFC0 ||
+ var->data.location == VARYING_SLOT_BFC1) {
+ spirv_builder_emit_location(&ctx->builder, var_id,
+ var->data.location);
+ } else {
+ switch (var->data.location) {
+ case VARYING_SLOT_POS:
+ spirv_builder_emit_builtin(&ctx->builder, var_id, SpvBuiltInPosition);
+ break;
+
+ case VARYING_SLOT_PSIZ:
+ spirv_builder_emit_builtin(&ctx->builder, var_id, SpvBuiltInPointSize);
+ break;
+
+ case VARYING_SLOT_CLIP_DIST0:
+ assert(glsl_type_is_array(var->type));
+ spirv_builder_emit_builtin(&ctx->builder, var_id, SpvBuiltInClipDistance);
+ break;
+
+ default:
+ debug_printf("unknown varying slot: %s\n", gl_varying_slot_name(var->data.location));
+ unreachable("unexpected varying slot");
+ }
+ }
+ } else if (ctx->stage == MESA_SHADER_FRAGMENT) {
+ if (var->data.location >= FRAG_RESULT_DATA0)
+ spirv_builder_emit_location(&ctx->builder, var_id,
+ var->data.location - FRAG_RESULT_DATA0);
+ else {
+ switch (var->data.location) {
+ case FRAG_RESULT_COLOR:
+ spirv_builder_emit_location(&ctx->builder, var_id, 0);
+ spirv_builder_emit_index(&ctx->builder, var_id, var->data.index);
+ break;
+
+ case FRAG_RESULT_DEPTH:
+ spirv_builder_emit_builtin(&ctx->builder, var_id, SpvBuiltInFragDepth);
+ break;
+
+ default:
+ spirv_builder_emit_location(&ctx->builder, var_id,
+ var->data.driver_location);
+ }
+ }
+ }
+
+ if (var->data.location_frac)
+ spirv_builder_emit_component(&ctx->builder, var_id,
+ var->data.location_frac);
+
+ _mesa_hash_table_insert(ctx->vars, var, (void *)(intptr_t)var_id);
+
+ assert(ctx->num_entry_ifaces < ARRAY_SIZE(ctx->entry_ifaces));
+ ctx->entry_ifaces[ctx->num_entry_ifaces++] = var_id;
+}
+
+static SpvDim
+type_to_dim(enum glsl_sampler_dim gdim, bool *is_ms)
+{
+ *is_ms = false;
+ switch (gdim) {
+ case GLSL_SAMPLER_DIM_1D:
+ return SpvDim1D;
+ case GLSL_SAMPLER_DIM_2D:
+ return SpvDim2D;
+ case GLSL_SAMPLER_DIM_3D:
+ return SpvDim3D;
+ case GLSL_SAMPLER_DIM_CUBE:
+ return SpvDimCube;
+ case GLSL_SAMPLER_DIM_RECT:
+ return SpvDim2D;
+ case GLSL_SAMPLER_DIM_BUF:
+ return SpvDimBuffer;
+ case GLSL_SAMPLER_DIM_EXTERNAL:
+ return SpvDim2D; /* seems dodgy... */
+ case GLSL_SAMPLER_DIM_MS:
+ *is_ms = true;
+ return SpvDim2D;
+ default:
+ fprintf(stderr, "unknown sampler type %d\n", gdim);
+ break;
+ }
+ return SpvDim2D;
+}
+
+uint32_t
+zink_binding(gl_shader_stage stage, VkDescriptorType type, int index)
+{
+ if (stage == MESA_SHADER_NONE ||
+ stage >= MESA_SHADER_COMPUTE) {
+ unreachable("not supported");
+ } else {
+ uint32_t stage_offset = (uint32_t)stage * (PIPE_MAX_CONSTANT_BUFFERS +
+ PIPE_MAX_SHADER_SAMPLER_VIEWS);
+
+ switch (type) {
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+ assert(index < PIPE_MAX_CONSTANT_BUFFERS);
+ return stage_offset + index;
+
+ case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+ assert(index < PIPE_MAX_SHADER_SAMPLER_VIEWS);
+ return stage_offset + PIPE_MAX_CONSTANT_BUFFERS + index;
+
+ default:
+ unreachable("unexpected type");
+ }
+ }
+}
+
+static void
+emit_sampler(struct ntv_context *ctx, struct nir_variable *var)
+{
+ const struct glsl_type *type = glsl_without_array(var->type);
+
+ bool is_ms;
+ SpvDim dimension = type_to_dim(glsl_get_sampler_dim(type), &is_ms);
+
+ SpvId result_type = get_glsl_basetype(ctx, glsl_get_sampler_result_type(type));
+ SpvId image_type = spirv_builder_type_image(&ctx->builder, result_type,
+ dimension, false,
+ glsl_sampler_type_is_array(type),
+ is_ms, 1,
+ SpvImageFormatUnknown);
+
+ SpvId sampled_type = spirv_builder_type_sampled_image(&ctx->builder,
+ image_type);
+ SpvId pointer_type = spirv_builder_type_pointer(&ctx->builder,
+ SpvStorageClassUniformConstant,
+ sampled_type);
+
+ if (glsl_type_is_array(var->type)) {
+ for (int i = 0; i < glsl_get_length(var->type); ++i) {
+ SpvId var_id = spirv_builder_emit_var(&ctx->builder, pointer_type,
+ SpvStorageClassUniformConstant);
+
+ if (var->name) {
+ char element_name[100];
+ snprintf(element_name, sizeof(element_name), "%s_%d", var->name, i);
+ spirv_builder_emit_name(&ctx->builder, var_id, var->name);
+ }
+
+ int index = var->data.binding + i;
+ assert(!(ctx->samplers_used & (1 << index)));
+ assert(!ctx->image_types[index]);
+ ctx->image_types[index] = image_type;
+ ctx->samplers[index] = var_id;
+ ctx->samplers_used |= 1 << index;
+
+ spirv_builder_emit_descriptor_set(&ctx->builder, var_id,
+ var->data.descriptor_set);
+ int binding = zink_binding(ctx->stage,
+ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
+ var->data.binding + i);
+ spirv_builder_emit_binding(&ctx->builder, var_id, binding);
+ }
+ } else {
+ SpvId var_id = spirv_builder_emit_var(&ctx->builder, pointer_type,
+ SpvStorageClassUniformConstant);
+
+ if (var->name)
+ spirv_builder_emit_name(&ctx->builder, var_id, var->name);
+
+ int index = var->data.binding;
+ assert(!(ctx->samplers_used & (1 << index)));
+ assert(!ctx->image_types[index]);
+ ctx->image_types[index] = image_type;
+ ctx->samplers[index] = var_id;
+ ctx->samplers_used |= 1 << index;
+
+ spirv_builder_emit_descriptor_set(&ctx->builder, var_id,
+ var->data.descriptor_set);
+ int binding = zink_binding(ctx->stage,
+ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
+ var->data.binding);
+ spirv_builder_emit_binding(&ctx->builder, var_id, binding);
+ }
+}
+
+static void
+emit_ubo(struct ntv_context *ctx, struct nir_variable *var)
+{
+ uint32_t size = glsl_count_attribute_slots(var->type, false);
+ SpvId vec4_type = get_uvec_type(ctx, 32, 4);
+ SpvId array_length = emit_uint_const(ctx, 32, size);
+ SpvId array_type = spirv_builder_type_array(&ctx->builder, vec4_type,
+ array_length);
+ spirv_builder_emit_array_stride(&ctx->builder, array_type, 16);
+
+ // wrap UBO-array in a struct
+ SpvId struct_type = spirv_builder_type_struct(&ctx->builder, &array_type, 1);
+ if (var->name) {
+ char struct_name[100];
+ snprintf(struct_name, sizeof(struct_name), "struct_%s", var->name);
+ spirv_builder_emit_name(&ctx->builder, struct_type, struct_name);
+ }
+
+ spirv_builder_emit_decoration(&ctx->builder, struct_type,
+ SpvDecorationBlock);
+ spirv_builder_emit_member_offset(&ctx->builder, struct_type, 0, 0);
+
+
+ SpvId pointer_type = spirv_builder_type_pointer(&ctx->builder,
+ SpvStorageClassUniform,
+ struct_type);
+
+ SpvId var_id = spirv_builder_emit_var(&ctx->builder, pointer_type,
+ SpvStorageClassUniform);
+ if (var->name)
+ spirv_builder_emit_name(&ctx->builder, var_id, var->name);
+
+ assert(ctx->num_ubos < ARRAY_SIZE(ctx->ubos));
+ ctx->ubos[ctx->num_ubos++] = var_id;
+
+ spirv_builder_emit_descriptor_set(&ctx->builder, var_id,
+ var->data.descriptor_set);
+ int binding = zink_binding(ctx->stage,
+ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
+ var->data.binding);
+ spirv_builder_emit_binding(&ctx->builder, var_id, binding);
+}
+
+static void
+emit_uniform(struct ntv_context *ctx, struct nir_variable *var)
+{
+ if (var->data.mode == nir_var_mem_ubo)
+ emit_ubo(ctx, var);
+ else {
+ assert(var->data.mode == nir_var_uniform);
+ if (glsl_type_is_sampler(glsl_without_array(var->type)))
+ emit_sampler(ctx, var);
+ }
+}
+
+static SpvId
+get_src_ssa(struct ntv_context *ctx, const nir_ssa_def *ssa)
+{
+ assert(ssa->index < ctx->num_defs);
+ assert(ctx->defs[ssa->index] != 0);
+ return ctx->defs[ssa->index];
+}
+
+static SpvId
+get_var_from_reg(struct ntv_context *ctx, nir_register *reg)
+{
+ assert(reg->index < ctx->num_regs);
+ assert(ctx->regs[reg->index] != 0);
+ return ctx->regs[reg->index];
+}
+
+static SpvId
+get_src_reg(struct ntv_context *ctx, const nir_reg_src *reg)
+{
+ assert(reg->reg);
+ assert(!reg->indirect);
+ assert(!reg->base_offset);
+
+ SpvId var = get_var_from_reg(ctx, reg->reg);
+ SpvId type = get_uvec_type(ctx, reg->reg->bit_size, reg->reg->num_components);
+ return spirv_builder_emit_load(&ctx->builder, type, var);
+}
+
+static SpvId
+get_src(struct ntv_context *ctx, nir_src *src)
+{
+ if (src->is_ssa)
+ return get_src_ssa(ctx, src->ssa);
+ else
+ return get_src_reg(ctx, &src->reg);
+}
+
+static SpvId
+get_alu_src_raw(struct ntv_context *ctx, nir_alu_instr *alu, unsigned src)
+{
+ assert(!alu->src[src].negate);
+ assert(!alu->src[src].abs);
+
+ SpvId def = get_src(ctx, &alu->src[src].src);
+
+ unsigned used_channels = 0;
+ bool need_swizzle = false;
+ for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
+ if (!nir_alu_instr_channel_used(alu, src, i))
+ continue;
+
+ used_channels++;
+
+ if (alu->src[src].swizzle[i] != i)
+ need_swizzle = true;
+ }
+ assert(used_channels != 0);
+
+ unsigned live_channels = nir_src_num_components(alu->src[src].src);
+ if (used_channels != live_channels)
+ need_swizzle = true;
+
+ if (!need_swizzle)
+ return def;
+
+ int bit_size = nir_src_bit_size(alu->src[src].src);
+ assert(bit_size == 1 || bit_size == 32);
+
+ SpvId raw_type = bit_size == 1 ? spirv_builder_type_bool(&ctx->builder) :
+ spirv_builder_type_uint(&ctx->builder, bit_size);
+
+ if (used_channels == 1) {
+ uint32_t indices[] = { alu->src[src].swizzle[0] };
+ return spirv_builder_emit_composite_extract(&ctx->builder, raw_type,
+ def, indices,
+ ARRAY_SIZE(indices));
+ } else if (live_channels == 1) {
+ SpvId raw_vec_type = spirv_builder_type_vector(&ctx->builder,
+ raw_type,
+ used_channels);
+
+ SpvId constituents[NIR_MAX_VEC_COMPONENTS];
+ for (unsigned i = 0; i < used_channels; ++i)
+ constituents[i] = def;
+
+ return spirv_builder_emit_composite_construct(&ctx->builder,
+ raw_vec_type,
+ constituents,
+ used_channels);
+ } else {
+ SpvId raw_vec_type = spirv_builder_type_vector(&ctx->builder,
+ raw_type,
+ used_channels);
+
+ uint32_t components[NIR_MAX_VEC_COMPONENTS];
+ size_t num_components = 0;
+ for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
+ if (!nir_alu_instr_channel_used(alu, src, i))
+ continue;
+
+ components[num_components++] = alu->src[src].swizzle[i];
+ }
+
+ return spirv_builder_emit_vector_shuffle(&ctx->builder, raw_vec_type,
+ def, def, components,
+ num_components);
+ }
+}
+
+static void
+store_ssa_def(struct ntv_context *ctx, nir_ssa_def *ssa, SpvId result)
+{
+ assert(result != 0);
+ assert(ssa->index < ctx->num_defs);
+ ctx->defs[ssa->index] = result;
+}
+
+static SpvId
+emit_select(struct ntv_context *ctx, SpvId type, SpvId cond,
+ SpvId if_true, SpvId if_false)
+{
+ return emit_triop(ctx, SpvOpSelect, type, cond, if_true, if_false);
+}
+
+static SpvId
+uvec_to_bvec(struct ntv_context *ctx, SpvId value, unsigned num_components)
+{
+ SpvId type = get_bvec_type(ctx, num_components);
+ SpvId zero = get_uvec_constant(ctx, 32, num_components, 0);
+ return emit_binop(ctx, SpvOpINotEqual, type, value, zero);
+}
+
+static SpvId
+emit_bitcast(struct ntv_context *ctx, SpvId type, SpvId value)
+{
+ return emit_unop(ctx, SpvOpBitcast, type, value);
+}
+
+static SpvId
+bitcast_to_uvec(struct ntv_context *ctx, SpvId value, unsigned bit_size,
+ unsigned num_components)
+{
+ SpvId type = get_uvec_type(ctx, bit_size, num_components);
+ return emit_bitcast(ctx, type, value);
+}
+
+static SpvId
+bitcast_to_ivec(struct ntv_context *ctx, SpvId value, unsigned bit_size,
+ unsigned num_components)
+{
+ SpvId type = get_ivec_type(ctx, bit_size, num_components);
+ return emit_bitcast(ctx, type, value);
+}
+
+static SpvId
+bitcast_to_fvec(struct ntv_context *ctx, SpvId value, unsigned bit_size,
+ unsigned num_components)
+{
+ SpvId type = get_fvec_type(ctx, bit_size, num_components);
+ return emit_bitcast(ctx, type, value);
+}
+
+static void
+store_reg_def(struct ntv_context *ctx, nir_reg_dest *reg, SpvId result)
+{
+ SpvId var = get_var_from_reg(ctx, reg->reg);
+ assert(var);
+ spirv_builder_emit_store(&ctx->builder, var, result);
+}
+
+static void
+store_dest_raw(struct ntv_context *ctx, nir_dest *dest, SpvId result)
+{
+ if (dest->is_ssa)
+ store_ssa_def(ctx, &dest->ssa, result);
+ else
+ store_reg_def(ctx, &dest->reg, result);
+}
+
+static void
+store_dest(struct ntv_context *ctx, nir_dest *dest, SpvId result, nir_alu_type type)
+{
+ unsigned num_components = nir_dest_num_components(*dest);
+ unsigned bit_size = nir_dest_bit_size(*dest);
+
+ if (bit_size != 1) {
+ switch (nir_alu_type_get_base_type(type)) {
+ case nir_type_bool:
+ assert("bool should have bit-size 1");
+
+ case nir_type_uint:
+ break; /* nothing to do! */
+
+ case nir_type_int:
+ case nir_type_float:
+ result = bitcast_to_uvec(ctx, result, bit_size, num_components);
+ break;
+
+ default:
+ unreachable("unsupported nir_alu_type");
+ }
+ }
+
+ store_dest_raw(ctx, dest, result);
+}
+
+static SpvId
+emit_unop(struct ntv_context *ctx, SpvOp op, SpvId type, SpvId src)
+{
+ return spirv_builder_emit_unop(&ctx->builder, op, type, src);
+}
+
+static SpvId
+emit_binop(struct ntv_context *ctx, SpvOp op, SpvId type,
+ SpvId src0, SpvId src1)
+{
+ return spirv_builder_emit_binop(&ctx->builder, op, type, src0, src1);
+}
+
+static SpvId
+emit_triop(struct ntv_context *ctx, SpvOp op, SpvId type,
+ SpvId src0, SpvId src1, SpvId src2)
+{
+ return spirv_builder_emit_triop(&ctx->builder, op, type, src0, src1, src2);
+}
+
+static SpvId
+emit_builtin_unop(struct ntv_context *ctx, enum GLSLstd450 op, SpvId type,
+ SpvId src)
+{
+ SpvId args[] = { src };
+ return spirv_builder_emit_ext_inst(&ctx->builder, type, ctx->GLSL_std_450,
+ op, args, ARRAY_SIZE(args));
+}
+
+static SpvId
+emit_builtin_binop(struct ntv_context *ctx, enum GLSLstd450 op, SpvId type,
+ SpvId src0, SpvId src1)
+{
+ SpvId args[] = { src0, src1 };
+ return spirv_builder_emit_ext_inst(&ctx->builder, type, ctx->GLSL_std_450,
+ op, args, ARRAY_SIZE(args));
+}
+
+static SpvId
+emit_builtin_triop(struct ntv_context *ctx, enum GLSLstd450 op, SpvId type,
+ SpvId src0, SpvId src1, SpvId src2)
+{
+ SpvId args[] = { src0, src1, src2 };
+ return spirv_builder_emit_ext_inst(&ctx->builder, type, ctx->GLSL_std_450,
+ op, args, ARRAY_SIZE(args));
+}
+
+static SpvId
+get_fvec_constant(struct ntv_context *ctx, unsigned bit_size,
+ unsigned num_components, float value)
+{
+ assert(bit_size == 32);
+
+ SpvId result = emit_float_const(ctx, bit_size, value);
+ if (num_components == 1)
+ return result;
+
+ assert(num_components > 1);
+ SpvId components[num_components];
+ for (int i = 0; i < num_components; i++)
+ components[i] = result;
+
+ SpvId type = get_fvec_type(ctx, bit_size, num_components);
+ return spirv_builder_const_composite(&ctx->builder, type, components,
+ num_components);
+}
+
+static SpvId
+get_uvec_constant(struct ntv_context *ctx, unsigned bit_size,
+ unsigned num_components, uint32_t value)
+{
+ assert(bit_size == 32);
+
+ SpvId result = emit_uint_const(ctx, bit_size, value);
+ if (num_components == 1)
+ return result;
+
+ assert(num_components > 1);
+ SpvId components[num_components];
+ for (int i = 0; i < num_components; i++)
+ components[i] = result;
+
+ SpvId type = get_uvec_type(ctx, bit_size, num_components);
+ return spirv_builder_const_composite(&ctx->builder, type, components,
+ num_components);
+}
+
+static SpvId
+get_ivec_constant(struct ntv_context *ctx, unsigned bit_size,
+ unsigned num_components, int32_t value)
+{
+ assert(bit_size == 32);
+
+ SpvId result = emit_int_const(ctx, bit_size, value);
+ if (num_components == 1)
+ return result;
+
+ assert(num_components > 1);
+ SpvId components[num_components];
+ for (int i = 0; i < num_components; i++)
+ components[i] = result;
+
+ SpvId type = get_ivec_type(ctx, bit_size, num_components);
+ return spirv_builder_const_composite(&ctx->builder, type, components,
+ num_components);
+}
+
+static inline unsigned
+alu_instr_src_components(const nir_alu_instr *instr, unsigned src)
+{
+ if (nir_op_infos[instr->op].input_sizes[src] > 0)
+ return nir_op_infos[instr->op].input_sizes[src];
+
+ if (instr->dest.dest.is_ssa)
+ return instr->dest.dest.ssa.num_components;
+ else
+ return instr->dest.dest.reg.reg->num_components;
+}
+
+static SpvId
+get_alu_src(struct ntv_context *ctx, nir_alu_instr *alu, unsigned src)
+{
+ SpvId raw_value = get_alu_src_raw(ctx, alu, src);
+
+ unsigned num_components = alu_instr_src_components(alu, src);
+ unsigned bit_size = nir_src_bit_size(alu->src[src].src);
+ nir_alu_type type = nir_op_infos[alu->op].input_types[src];
+
+ if (bit_size == 1)
+ return raw_value;
+ else {
+ switch (nir_alu_type_get_base_type(type)) {
+ case nir_type_bool:
+ unreachable("bool should have bit-size 1");
+
+ case nir_type_int:
+ return bitcast_to_ivec(ctx, raw_value, bit_size, num_components);
+
+ case nir_type_uint:
+ return raw_value;
+
+ case nir_type_float:
+ return bitcast_to_fvec(ctx, raw_value, bit_size, num_components);
+
+ default:
+ unreachable("unknown nir_alu_type");
+ }
+ }
+}
+
+static void
+store_alu_result(struct ntv_context *ctx, nir_alu_instr *alu, SpvId result)
+{
+ assert(!alu->dest.saturate);
+ return store_dest(ctx, &alu->dest.dest, result,
+ nir_op_infos[alu->op].output_type);
+}
+
+static SpvId
+get_dest_type(struct ntv_context *ctx, nir_dest *dest, nir_alu_type type)
+{
+ unsigned num_components = nir_dest_num_components(*dest);
+ unsigned bit_size = nir_dest_bit_size(*dest);
+
+ if (bit_size == 1)
+ return get_bvec_type(ctx, num_components);
+
+ switch (nir_alu_type_get_base_type(type)) {
+ case nir_type_bool:
+ unreachable("bool should have bit-size 1");
+
+ case nir_type_int:
+ return get_ivec_type(ctx, bit_size, num_components);
+
+ case nir_type_uint:
+ return get_uvec_type(ctx, bit_size, num_components);
+
+ case nir_type_float:
+ return get_fvec_type(ctx, bit_size, num_components);
+
+ default:
+ unreachable("unsupported nir_alu_type");
+ }
+}
+
+static void
+emit_alu(struct ntv_context *ctx, nir_alu_instr *alu)
+{
+ SpvId src[nir_op_infos[alu->op].num_inputs];
+ for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++)
+ src[i] = get_alu_src(ctx, alu, i);
+
+ SpvId dest_type = get_dest_type(ctx, &alu->dest.dest,
+ nir_op_infos[alu->op].output_type);
+ unsigned bit_size = nir_dest_bit_size(alu->dest.dest);
+ unsigned num_components = nir_dest_num_components(alu->dest.dest);
+
+ SpvId result = 0;
+ switch (alu->op) {
+ case nir_op_mov:
+ assert(nir_op_infos[alu->op].num_inputs == 1);
+ result = src[0];
+ break;
+
+#define UNOP(nir_op, spirv_op) \
+ case nir_op: \
+ assert(nir_op_infos[alu->op].num_inputs == 1); \
+ result = emit_unop(ctx, spirv_op, dest_type, src[0]); \
+ break;
+
+ UNOP(nir_op_ineg, SpvOpSNegate)
+ UNOP(nir_op_fneg, SpvOpFNegate)
+ UNOP(nir_op_fddx, SpvOpDPdx)
+ UNOP(nir_op_fddx_coarse, SpvOpDPdxCoarse)
+ UNOP(nir_op_fddx_fine, SpvOpDPdxFine)
+ UNOP(nir_op_fddy, SpvOpDPdy)
+ UNOP(nir_op_fddy_coarse, SpvOpDPdyCoarse)
+ UNOP(nir_op_fddy_fine, SpvOpDPdyFine)
+ UNOP(nir_op_f2i32, SpvOpConvertFToS)
+ UNOP(nir_op_f2u32, SpvOpConvertFToU)
+ UNOP(nir_op_i2f32, SpvOpConvertSToF)
+ UNOP(nir_op_u2f32, SpvOpConvertUToF)
+#undef UNOP
+
+ case nir_op_inot:
+ if (bit_size == 1)
+ result = emit_unop(ctx, SpvOpLogicalNot, dest_type, src[0]);
+ else
+ result = emit_unop(ctx, SpvOpNot, dest_type, src[0]);
+ break;
+
+ case nir_op_b2i32:
+ assert(nir_op_infos[alu->op].num_inputs == 1);
+ result = emit_select(ctx, dest_type, src[0],
+ get_ivec_constant(ctx, 32, num_components, 1),
+ get_ivec_constant(ctx, 32, num_components, 0));
+ break;
+
+ case nir_op_b2f32:
+ assert(nir_op_infos[alu->op].num_inputs == 1);
+ result = emit_select(ctx, dest_type, src[0],
+ get_fvec_constant(ctx, 32, num_components, 1),
+ get_fvec_constant(ctx, 32, num_components, 0));
+ break;
+
+#define BUILTIN_UNOP(nir_op, spirv_op) \
+ case nir_op: \
+ assert(nir_op_infos[alu->op].num_inputs == 1); \
+ result = emit_builtin_unop(ctx, spirv_op, dest_type, src[0]); \
+ break;
+
+ BUILTIN_UNOP(nir_op_iabs, GLSLstd450SAbs)
+ BUILTIN_UNOP(nir_op_fabs, GLSLstd450FAbs)
+ BUILTIN_UNOP(nir_op_fsqrt, GLSLstd450Sqrt)
+ BUILTIN_UNOP(nir_op_frsq, GLSLstd450InverseSqrt)
+ BUILTIN_UNOP(nir_op_flog2, GLSLstd450Log2)
+ BUILTIN_UNOP(nir_op_fexp2, GLSLstd450Exp2)
+ BUILTIN_UNOP(nir_op_ffract, GLSLstd450Fract)
+ BUILTIN_UNOP(nir_op_ffloor, GLSLstd450Floor)
+ BUILTIN_UNOP(nir_op_fceil, GLSLstd450Ceil)
+ BUILTIN_UNOP(nir_op_ftrunc, GLSLstd450Trunc)
+ BUILTIN_UNOP(nir_op_fround_even, GLSLstd450RoundEven)
+ BUILTIN_UNOP(nir_op_fsign, GLSLstd450FSign)
+ BUILTIN_UNOP(nir_op_fsin, GLSLstd450Sin)
+ BUILTIN_UNOP(nir_op_fcos, GLSLstd450Cos)
+#undef BUILTIN_UNOP
+
+ case nir_op_frcp:
+ assert(nir_op_infos[alu->op].num_inputs == 1);
+ result = emit_binop(ctx, SpvOpFDiv, dest_type,
+ get_fvec_constant(ctx, bit_size, num_components, 1),
+ src[0]);
+ break;
+
+ case nir_op_f2b1:
+ assert(nir_op_infos[alu->op].num_inputs == 1);
+ result = emit_binop(ctx, SpvOpFOrdNotEqual, dest_type, src[0],
+ get_fvec_constant(ctx,
+ nir_src_bit_size(alu->src[0].src),
+ num_components, 0));
+ break;
+ case nir_op_i2b1:
+ assert(nir_op_infos[alu->op].num_inputs == 1);
+ result = emit_binop(ctx, SpvOpINotEqual, dest_type, src[0],
+ get_ivec_constant(ctx,
+ nir_src_bit_size(alu->src[0].src),
+ num_components, 0));
+ break;
+
+
+#define BINOP(nir_op, spirv_op) \
+ case nir_op: \
+ assert(nir_op_infos[alu->op].num_inputs == 2); \
+ result = emit_binop(ctx, spirv_op, dest_type, src[0], src[1]); \
+ break;
+
+ BINOP(nir_op_iadd, SpvOpIAdd)
+ BINOP(nir_op_isub, SpvOpISub)
+ BINOP(nir_op_imul, SpvOpIMul)
+ BINOP(nir_op_idiv, SpvOpSDiv)
+ BINOP(nir_op_udiv, SpvOpUDiv)
+ BINOP(nir_op_umod, SpvOpUMod)
+ BINOP(nir_op_fadd, SpvOpFAdd)
+ BINOP(nir_op_fsub, SpvOpFSub)
+ BINOP(nir_op_fmul, SpvOpFMul)
+ BINOP(nir_op_fdiv, SpvOpFDiv)
+ BINOP(nir_op_fmod, SpvOpFMod)
+ BINOP(nir_op_ilt, SpvOpSLessThan)
+ BINOP(nir_op_ige, SpvOpSGreaterThanEqual)
+ BINOP(nir_op_uge, SpvOpUGreaterThanEqual)
+ BINOP(nir_op_flt, SpvOpFOrdLessThan)
+ BINOP(nir_op_fge, SpvOpFOrdGreaterThanEqual)
+ BINOP(nir_op_feq, SpvOpFOrdEqual)
+ BINOP(nir_op_fne, SpvOpFOrdNotEqual)
+ BINOP(nir_op_ishl, SpvOpShiftLeftLogical)
+ BINOP(nir_op_ishr, SpvOpShiftRightArithmetic)
+ BINOP(nir_op_ushr, SpvOpShiftRightLogical)
+#undef BINOP
+
+#define BINOP_LOG(nir_op, spv_op, spv_log_op) \
+ case nir_op: \
+ assert(nir_op_infos[alu->op].num_inputs == 2); \
+ if (nir_src_bit_size(alu->src[0].src) == 1) \
+ result = emit_binop(ctx, spv_log_op, dest_type, src[0], src[1]); \
+ else \
+ result = emit_binop(ctx, spv_op, dest_type, src[0], src[1]); \
+ break;
+
+ BINOP_LOG(nir_op_iand, SpvOpBitwiseAnd, SpvOpLogicalAnd)
+ BINOP_LOG(nir_op_ior, SpvOpBitwiseOr, SpvOpLogicalOr)
+ BINOP_LOG(nir_op_ieq, SpvOpIEqual, SpvOpLogicalEqual)
+ BINOP_LOG(nir_op_ine, SpvOpINotEqual, SpvOpLogicalNotEqual)
+#undef BINOP_LOG
+
+#define BUILTIN_BINOP(nir_op, spirv_op) \
+ case nir_op: \
+ assert(nir_op_infos[alu->op].num_inputs == 2); \
+ result = emit_builtin_binop(ctx, spirv_op, dest_type, src[0], src[1]); \
+ break;
+
+ BUILTIN_BINOP(nir_op_fmin, GLSLstd450FMin)
+ BUILTIN_BINOP(nir_op_fmax, GLSLstd450FMax)
+#undef BUILTIN_BINOP
+
+ case nir_op_fdot2:
+ case nir_op_fdot3:
+ case nir_op_fdot4:
+ assert(nir_op_infos[alu->op].num_inputs == 2);
+ result = emit_binop(ctx, SpvOpDot, dest_type, src[0], src[1]);
+ break;
+
+ case nir_op_fdph:
+ unreachable("should already be lowered away");
+
+ case nir_op_seq:
+ case nir_op_sne:
+ case nir_op_slt:
+ case nir_op_sge: {
+ assert(nir_op_infos[alu->op].num_inputs == 2);
+ int num_components = nir_dest_num_components(alu->dest.dest);
+ SpvId bool_type = get_bvec_type(ctx, num_components);
+
+ SpvId zero = emit_float_const(ctx, bit_size, 0.0f);
+ SpvId one = emit_float_const(ctx, bit_size, 1.0f);
+ if (num_components > 1) {
+ SpvId zero_comps[num_components], one_comps[num_components];
+ for (int i = 0; i < num_components; i++) {
+ zero_comps[i] = zero;
+ one_comps[i] = one;
+ }
+
+ zero = spirv_builder_const_composite(&ctx->builder, dest_type,
+ zero_comps, num_components);
+ one = spirv_builder_const_composite(&ctx->builder, dest_type,
+ one_comps, num_components);
+ }
+
+ SpvOp op;
+ switch (alu->op) {
+ case nir_op_seq: op = SpvOpFOrdEqual; break;
+ case nir_op_sne: op = SpvOpFOrdNotEqual; break;
+ case nir_op_slt: op = SpvOpFOrdLessThan; break;
+ case nir_op_sge: op = SpvOpFOrdGreaterThanEqual; break;
+ default: unreachable("unexpected op");
+ }
+
+ result = emit_binop(ctx, op, bool_type, src[0], src[1]);
+ result = emit_select(ctx, dest_type, result, one, zero);
+ }
+ break;
+
+ case nir_op_flrp:
+ assert(nir_op_infos[alu->op].num_inputs == 3);
+ result = emit_builtin_triop(ctx, GLSLstd450FMix, dest_type,
+ src[0], src[1], src[2]);
+ break;
+
+ case nir_op_fcsel:
+ result = emit_binop(ctx, SpvOpFOrdGreaterThan,
+ get_bvec_type(ctx, num_components),
+ src[0],
+ get_fvec_constant(ctx,
+ nir_src_bit_size(alu->src[0].src),
+ num_components, 0));
+ result = emit_select(ctx, dest_type, result, src[1], src[2]);
+ break;
+
+ case nir_op_bcsel:
+ assert(nir_op_infos[alu->op].num_inputs == 3);
+ result = emit_select(ctx, dest_type, src[0], src[1], src[2]);
+ break;
+
+ case nir_op_bany_fnequal2:
+ case nir_op_bany_fnequal3:
+ case nir_op_bany_fnequal4:
+ assert(nir_op_infos[alu->op].num_inputs == 2);
+ assert(alu_instr_src_components(alu, 0) ==
+ alu_instr_src_components(alu, 1));
+ result = emit_binop(ctx, SpvOpFOrdNotEqual,
+ get_bvec_type(ctx, alu_instr_src_components(alu, 0)),
+ src[0], src[1]);
+ result = emit_unop(ctx, SpvOpAny, dest_type, result);
+ break;
+
+ case nir_op_ball_fequal2:
+ case nir_op_ball_fequal3:
+ case nir_op_ball_fequal4:
+ assert(nir_op_infos[alu->op].num_inputs == 2);
+ assert(alu_instr_src_components(alu, 0) ==
+ alu_instr_src_components(alu, 1));
+ result = emit_binop(ctx, SpvOpFOrdEqual,
+ get_bvec_type(ctx, alu_instr_src_components(alu, 0)),
+ src[0], src[1]);
+ result = emit_unop(ctx, SpvOpAll, dest_type, result);
+ break;
+
+ case nir_op_bany_inequal2:
+ case nir_op_bany_inequal3:
+ case nir_op_bany_inequal4:
+ assert(nir_op_infos[alu->op].num_inputs == 2);
+ assert(alu_instr_src_components(alu, 0) ==
+ alu_instr_src_components(alu, 1));
+ result = emit_binop(ctx, SpvOpINotEqual,
+ get_bvec_type(ctx, alu_instr_src_components(alu, 0)),
+ src[0], src[1]);
+ result = emit_unop(ctx, SpvOpAny, dest_type, result);
+ break;
+
+ case nir_op_ball_iequal2:
+ case nir_op_ball_iequal3:
+ case nir_op_ball_iequal4:
+ assert(nir_op_infos[alu->op].num_inputs == 2);
+ assert(alu_instr_src_components(alu, 0) ==
+ alu_instr_src_components(alu, 1));
+ result = emit_binop(ctx, SpvOpIEqual,
+ get_bvec_type(ctx, alu_instr_src_components(alu, 0)),
+ src[0], src[1]);
+ result = emit_unop(ctx, SpvOpAll, dest_type, result);
+ break;
+
+ case nir_op_vec2:
+ case nir_op_vec3:
+ case nir_op_vec4: {
+ int num_inputs = nir_op_infos[alu->op].num_inputs;
+ assert(2 <= num_inputs && num_inputs <= 4);
+ result = spirv_builder_emit_composite_construct(&ctx->builder, dest_type,
+ src, num_inputs);
+ }
+ break;
+
+ default:
+ fprintf(stderr, "emit_alu: not implemented (%s)\n",
+ nir_op_infos[alu->op].name);
+
+ unreachable("unsupported opcode");
+ return;
+ }
+
+ store_alu_result(ctx, alu, result);
+}
+
+static void
+emit_load_const(struct ntv_context *ctx, nir_load_const_instr *load_const)
+{
+ unsigned bit_size = load_const->def.bit_size;
+ unsigned num_components = load_const->def.num_components;
+
+ SpvId constant;
+ if (num_components > 1) {
+ SpvId components[num_components];
+ SpvId type;
+ if (bit_size == 1) {
+ for (int i = 0; i < num_components; i++)
+ components[i] = spirv_builder_const_bool(&ctx->builder,
+ load_const->value[i].b);
+
+ type = get_bvec_type(ctx, num_components);
+ } else {
+ for (int i = 0; i < num_components; i++)
+ components[i] = emit_uint_const(ctx, bit_size,
+ load_const->value[i].u32);
+
+ type = get_uvec_type(ctx, bit_size, num_components);
+ }
+ constant = spirv_builder_const_composite(&ctx->builder, type,
+ components, num_components);
+ } else {
+ assert(num_components == 1);
+ if (bit_size == 1)
+ constant = spirv_builder_const_bool(&ctx->builder,
+ load_const->value[0].b);
+ else
+ constant = emit_uint_const(ctx, bit_size, load_const->value[0].u32);
+ }
+
+ store_ssa_def(ctx, &load_const->def, constant);
+}
+
+static void
+emit_load_ubo(struct ntv_context *ctx, nir_intrinsic_instr *intr)
+{
+ nir_const_value *const_block_index = nir_src_as_const_value(intr->src[0]);
+ assert(const_block_index); // no dynamic indexing for now
+ assert(const_block_index->u32 == 0); // we only support the default UBO for now
+
+ nir_const_value *const_offset = nir_src_as_const_value(intr->src[1]);
+ if (const_offset) {
+ SpvId uvec4_type = get_uvec_type(ctx, 32, 4);
+ SpvId pointer_type = spirv_builder_type_pointer(&ctx->builder,
+ SpvStorageClassUniform,
+ uvec4_type);
+
+ unsigned idx = const_offset->u32;
+ SpvId member = emit_uint_const(ctx, 32, 0);
+ SpvId offset = emit_uint_const(ctx, 32, idx);
+ SpvId offsets[] = { member, offset };
+ SpvId ptr = spirv_builder_emit_access_chain(&ctx->builder, pointer_type,
+ ctx->ubos[0], offsets,
+ ARRAY_SIZE(offsets));
+ SpvId result = spirv_builder_emit_load(&ctx->builder, uvec4_type, ptr);
+
+ SpvId type = get_dest_uvec_type(ctx, &intr->dest);
+ unsigned num_components = nir_dest_num_components(intr->dest);
+ if (num_components == 1) {
+ uint32_t components[] = { 0 };
+ result = spirv_builder_emit_composite_extract(&ctx->builder,
+ type,
+ result, components,
+ 1);
+ } else if (num_components < 4) {
+ SpvId constituents[num_components];
+ SpvId uint_type = spirv_builder_type_uint(&ctx->builder, 32);
+ for (uint32_t i = 0; i < num_components; ++i)
+ constituents[i] = spirv_builder_emit_composite_extract(&ctx->builder,
+ uint_type,
+ result, &i,
+ 1);
+
+ result = spirv_builder_emit_composite_construct(&ctx->builder,
+ type,
+ constituents,
+ num_components);
+ }
+
+ if (nir_dest_bit_size(intr->dest) == 1)
+ result = uvec_to_bvec(ctx, result, num_components);
+
+ store_dest(ctx, &intr->dest, result, nir_type_uint);
+ } else
+ unreachable("uniform-addressing not yet supported");
+}
+
+static void
+emit_discard(struct ntv_context *ctx, nir_intrinsic_instr *intr)
+{
+ assert(ctx->block_started);
+ spirv_builder_emit_kill(&ctx->builder);
+ /* discard is weird in NIR, so let's just create an unreachable block after
+ it and hope that the vulkan driver will DCE any instructinos in it. */
+ spirv_builder_label(&ctx->builder, spirv_builder_new_id(&ctx->builder));
+}
+
+static void
+emit_load_deref(struct ntv_context *ctx, nir_intrinsic_instr *intr)
+{
+ SpvId ptr = get_src(ctx, intr->src);
+
+ nir_variable *var = nir_intrinsic_get_var(intr, 0);
+ SpvId result = spirv_builder_emit_load(&ctx->builder,
+ get_glsl_type(ctx, var->type),
+ ptr);
+ unsigned num_components = nir_dest_num_components(intr->dest);
+ unsigned bit_size = nir_dest_bit_size(intr->dest);
+ result = bitcast_to_uvec(ctx, result, bit_size, num_components);
+ store_dest(ctx, &intr->dest, result, nir_type_uint);
+}
+
+static void
+emit_store_deref(struct ntv_context *ctx, nir_intrinsic_instr *intr)
+{
+ SpvId ptr = get_src(ctx, &intr->src[0]);
+ SpvId src = get_src(ctx, &intr->src[1]);
+
+ nir_variable *var = nir_intrinsic_get_var(intr, 0);
+ SpvId type = get_glsl_type(ctx, glsl_without_array(var->type));
+ SpvId result = emit_bitcast(ctx, type, src);
+ spirv_builder_emit_store(&ctx->builder, ptr, result);
+}
+
+static SpvId
+create_builtin_var(struct ntv_context *ctx, SpvId var_type,
+ SpvStorageClass storage_class,
+ const char *name, SpvBuiltIn builtin)
+{
+ SpvId pointer_type = spirv_builder_type_pointer(&ctx->builder,
+ storage_class,
+ var_type);
+ SpvId var = spirv_builder_emit_var(&ctx->builder, pointer_type,
+ storage_class);
+ spirv_builder_emit_name(&ctx->builder, var, name);
+ spirv_builder_emit_builtin(&ctx->builder, var, builtin);
+
+ assert(ctx->num_entry_ifaces < ARRAY_SIZE(ctx->entry_ifaces));
+ ctx->entry_ifaces[ctx->num_entry_ifaces++] = var;
+ return var;
+}
+
+static void
+emit_load_front_face(struct ntv_context *ctx, nir_intrinsic_instr *intr)
+{
+ SpvId var_type = spirv_builder_type_bool(&ctx->builder);
+ if (!ctx->front_face_var)
+ ctx->front_face_var = create_builtin_var(ctx, var_type,
+ SpvStorageClassInput,
+ "gl_FrontFacing",
+ SpvBuiltInFrontFacing);
+
+ SpvId result = spirv_builder_emit_load(&ctx->builder, var_type,
+ ctx->front_face_var);
+ assert(1 == nir_dest_num_components(intr->dest));
+ store_dest(ctx, &intr->dest, result, nir_type_bool);
+}
+
+static void
+emit_load_instance_id(struct ntv_context *ctx, nir_intrinsic_instr *intr)
+{
+ SpvId var_type = spirv_builder_type_uint(&ctx->builder, 32);
+ if (!ctx->instance_id_var)
+ ctx->instance_id_var = create_builtin_var(ctx, var_type,
+ SpvStorageClassInput,
+ "gl_InstanceId",
+ SpvBuiltInInstanceIndex);
+
+ SpvId result = spirv_builder_emit_load(&ctx->builder, var_type,
+ ctx->instance_id_var);
+ assert(1 == nir_dest_num_components(intr->dest));
+ store_dest(ctx, &intr->dest, result, nir_type_uint);
+}
+
+static void
+emit_load_vertex_id(struct ntv_context *ctx, nir_intrinsic_instr *intr)
+{
+ SpvId var_type = spirv_builder_type_uint(&ctx->builder, 32);
+ if (!ctx->vertex_id_var)
+ ctx->vertex_id_var = create_builtin_var(ctx, var_type,
+ SpvStorageClassInput,
+ "gl_VertexID",
+ SpvBuiltInVertexIndex);
+
+ SpvId result = spirv_builder_emit_load(&ctx->builder, var_type,
+ ctx->vertex_id_var);
+ assert(1 == nir_dest_num_components(intr->dest));
+ store_dest(ctx, &intr->dest, result, nir_type_uint);
+}
+
+static void
+emit_intrinsic(struct ntv_context *ctx, nir_intrinsic_instr *intr)
+{
+ switch (intr->intrinsic) {
+ case nir_intrinsic_load_ubo:
+ emit_load_ubo(ctx, intr);
+ break;
+
+ case nir_intrinsic_discard:
+ emit_discard(ctx, intr);
+ break;
+
+ case nir_intrinsic_load_deref:
+ emit_load_deref(ctx, intr);
+ break;
+
+ case nir_intrinsic_store_deref:
+ emit_store_deref(ctx, intr);
+ break;
+
+ case nir_intrinsic_load_front_face:
+ emit_load_front_face(ctx, intr);
+ break;
+
+ case nir_intrinsic_load_instance_id:
+ emit_load_instance_id(ctx, intr);
+ break;
+
+ case nir_intrinsic_load_vertex_id:
+ emit_load_vertex_id(ctx, intr);
+ break;
+
+ default:
+ fprintf(stderr, "emit_intrinsic: not implemented (%s)\n",
+ nir_intrinsic_infos[intr->intrinsic].name);
+ unreachable("unsupported intrinsic");
+ }
+}
+
+static void
+emit_undef(struct ntv_context *ctx, nir_ssa_undef_instr *undef)
+{
+ SpvId type = get_uvec_type(ctx, undef->def.bit_size,
+ undef->def.num_components);
+
+ store_ssa_def(ctx, &undef->def,
+ spirv_builder_emit_undef(&ctx->builder, type));
+}
+
+static SpvId
+get_src_float(struct ntv_context *ctx, nir_src *src)
+{
+ SpvId def = get_src(ctx, src);
+ unsigned num_components = nir_src_num_components(*src);
+ unsigned bit_size = nir_src_bit_size(*src);
+ return bitcast_to_fvec(ctx, def, bit_size, num_components);
+}
+
+static SpvId
+get_src_int(struct ntv_context *ctx, nir_src *src)
+{
+ SpvId def = get_src(ctx, src);
+ unsigned num_components = nir_src_num_components(*src);
+ unsigned bit_size = nir_src_bit_size(*src);
+ return bitcast_to_ivec(ctx, def, bit_size, num_components);
+}
+
+static void
+emit_tex(struct ntv_context *ctx, nir_tex_instr *tex)
+{
+ assert(tex->op == nir_texop_tex ||
+ tex->op == nir_texop_txb ||
+ tex->op == nir_texop_txl ||
+ tex->op == nir_texop_txd ||
+ tex->op == nir_texop_txf ||
+ tex->op == nir_texop_txs);
+ assert(tex->texture_index == tex->sampler_index);
+
+ SpvId coord = 0, proj = 0, bias = 0, lod = 0, dref = 0, dx = 0, dy = 0,
+ offset = 0;
+ unsigned coord_components = 0;
+ for (unsigned i = 0; i < tex->num_srcs; i++) {
+ switch (tex->src[i].src_type) {
+ case nir_tex_src_coord:
+ if (tex->op == nir_texop_txf)
+ coord = get_src_int(ctx, &tex->src[i].src);
+ else
+ coord = get_src_float(ctx, &tex->src[i].src);
+ coord_components = nir_src_num_components(tex->src[i].src);
+ break;
+
+ case nir_tex_src_projector:
+ assert(nir_src_num_components(tex->src[i].src) == 1);
+ proj = get_src_float(ctx, &tex->src[i].src);
+ assert(proj != 0);
+ break;
+
+ case nir_tex_src_offset:
+ offset = get_src_int(ctx, &tex->src[i].src);
+ break;
+
+ case nir_tex_src_bias:
+ assert(tex->op == nir_texop_txb);
+ bias = get_src_float(ctx, &tex->src[i].src);
+ assert(bias != 0);
+ break;
+
+ case nir_tex_src_lod:
+ assert(nir_src_num_components(tex->src[i].src) == 1);
+ if (tex->op == nir_texop_txf ||
+ tex->op == nir_texop_txs)
+ lod = get_src_int(ctx, &tex->src[i].src);
+ else
+ lod = get_src_float(ctx, &tex->src[i].src);
+ assert(lod != 0);
+ break;
+
+ case nir_tex_src_comparator:
+ assert(nir_src_num_components(tex->src[i].src) == 1);
+ dref = get_src_float(ctx, &tex->src[i].src);
+ assert(dref != 0);
+ break;
+
+ case nir_tex_src_ddx:
+ dx = get_src_float(ctx, &tex->src[i].src);
+ assert(dx != 0);
+ break;
+
+ case nir_tex_src_ddy:
+ dy = get_src_float(ctx, &tex->src[i].src);
+ assert(dy != 0);
+ break;
+
+ default:
+ fprintf(stderr, "texture source: %d\n", tex->src[i].src_type);
+ unreachable("unknown texture source");
+ }
+ }
+
+ if (lod == 0 && ctx->stage != MESA_SHADER_FRAGMENT) {
+ lod = emit_float_const(ctx, 32, 0.0f);
+ assert(lod != 0);
+ }
+
+ SpvId image_type = ctx->image_types[tex->texture_index];
+ SpvId sampled_type = spirv_builder_type_sampled_image(&ctx->builder,
+ image_type);
+
+ assert(ctx->samplers_used & (1u << tex->texture_index));
+ SpvId load = spirv_builder_emit_load(&ctx->builder, sampled_type,
+ ctx->samplers[tex->texture_index]);
+
+ SpvId dest_type = get_dest_type(ctx, &tex->dest, tex->dest_type);
+
+ if (tex->op == nir_texop_txs) {
+ SpvId image = spirv_builder_emit_image(&ctx->builder, image_type, load);
+ SpvId result = spirv_builder_emit_image_query_size(&ctx->builder,
+ dest_type, image,
+ lod);
+ store_dest(ctx, &tex->dest, result, tex->dest_type);
+ return;
+ }
+
+ if (proj && coord_components > 0) {
+ SpvId constituents[coord_components + 1];
+ if (coord_components == 1)
+ constituents[0] = coord;
+ else {
+ assert(coord_components > 1);
+ SpvId float_type = spirv_builder_type_float(&ctx->builder, 32);
+ for (uint32_t i = 0; i < coord_components; ++i)
+ constituents[i] = spirv_builder_emit_composite_extract(&ctx->builder,
+ float_type,
+ coord,
+ &i, 1);
+ }
+
+ constituents[coord_components++] = proj;
+
+ SpvId vec_type = get_fvec_type(ctx, 32, coord_components);
+ coord = spirv_builder_emit_composite_construct(&ctx->builder,
+ vec_type,
+ constituents,
+ coord_components);
+ }
+
+ SpvId actual_dest_type = dest_type;
+ if (dref)
+ actual_dest_type = spirv_builder_type_float(&ctx->builder, 32);
+
+ SpvId result;
+ if (tex->op == nir_texop_txf) {
+ SpvId image = spirv_builder_emit_image(&ctx->builder, image_type, load);
+ result = spirv_builder_emit_image_fetch(&ctx->builder, dest_type,
+ image, coord, lod);
+ } else {
+ result = spirv_builder_emit_image_sample(&ctx->builder,
+ actual_dest_type, load,
+ coord,
+ proj != 0,
+ lod, bias, dref, dx, dy,
+ offset);
+ }
+
+ spirv_builder_emit_decoration(&ctx->builder, result,
+ SpvDecorationRelaxedPrecision);
+
+ if (dref && nir_dest_num_components(tex->dest) > 1) {
+ SpvId components[4] = { result, result, result, result };
+ result = spirv_builder_emit_composite_construct(&ctx->builder,
+ dest_type,
+ components,
+ 4);
+ }
+
+ store_dest(ctx, &tex->dest, result, tex->dest_type);
+}
+
+static void
+start_block(struct ntv_context *ctx, SpvId label)
+{
+ /* terminate previous block if needed */
+ if (ctx->block_started)
+ spirv_builder_emit_branch(&ctx->builder, label);
+
+ /* start new block */
+ spirv_builder_label(&ctx->builder, label);
+ ctx->block_started = true;
+}
+
+static void
+branch(struct ntv_context *ctx, SpvId label)
+{
+ assert(ctx->block_started);
+ spirv_builder_emit_branch(&ctx->builder, label);
+ ctx->block_started = false;
+}
+
+static void
+branch_conditional(struct ntv_context *ctx, SpvId condition, SpvId then_id,
+ SpvId else_id)
+{
+ assert(ctx->block_started);
+ spirv_builder_emit_branch_conditional(&ctx->builder, condition,
+ then_id, else_id);
+ ctx->block_started = false;
+}
+
+static void
+emit_jump(struct ntv_context *ctx, nir_jump_instr *jump)
+{
+ switch (jump->type) {
+ case nir_jump_break:
+ assert(ctx->loop_break);
+ branch(ctx, ctx->loop_break);
+ break;
+
+ case nir_jump_continue:
+ assert(ctx->loop_cont);
+ branch(ctx, ctx->loop_cont);
+ break;
+
+ default:
+ unreachable("Unsupported jump type\n");
+ }
+}
+
+static void
+emit_deref_var(struct ntv_context *ctx, nir_deref_instr *deref)
+{
+ assert(deref->deref_type == nir_deref_type_var);
+
+ struct hash_entry *he = _mesa_hash_table_search(ctx->vars, deref->var);
+ assert(he);
+ SpvId result = (SpvId)(intptr_t)he->data;
+ store_dest_raw(ctx, &deref->dest, result);
+}
+
+static void
+emit_deref_array(struct ntv_context *ctx, nir_deref_instr *deref)
+{
+ assert(deref->deref_type == nir_deref_type_array);
+ nir_variable *var = nir_deref_instr_get_variable(deref);
+
+ SpvStorageClass storage_class;
+ switch (var->data.mode) {
+ case nir_var_shader_in:
+ storage_class = SpvStorageClassInput;
+ break;
+
+ case nir_var_shader_out:
+ storage_class = SpvStorageClassOutput;
+ break;
+
+ default:
+ unreachable("Unsupported nir_variable_mode\n");
+ }
+
+ SpvId index = get_src(ctx, &deref->arr.index);
+
+ SpvId ptr_type = spirv_builder_type_pointer(&ctx->builder,
+ storage_class,
+ get_glsl_type(ctx, deref->type));
+
+ SpvId result = spirv_builder_emit_access_chain(&ctx->builder,
+ ptr_type,
+ get_src(ctx, &deref->parent),
+ &index, 1);
+ /* uint is a bit of a lie here, it's really just an opaque type */
+ store_dest(ctx, &deref->dest, result, nir_type_uint);
+}
+
+static void
+emit_deref(struct ntv_context *ctx, nir_deref_instr *deref)
+{
+ switch (deref->deref_type) {
+ case nir_deref_type_var:
+ emit_deref_var(ctx, deref);
+ break;
+
+ case nir_deref_type_array:
+ emit_deref_array(ctx, deref);
+ break;
+
+ default:
+ unreachable("unexpected deref_type");
+ }
+}
+
+static void
+emit_block(struct ntv_context *ctx, struct nir_block *block)
+{
+ start_block(ctx, block_label(ctx, block));
+ nir_foreach_instr(instr, block) {
+ switch (instr->type) {
+ case nir_instr_type_alu:
+ emit_alu(ctx, nir_instr_as_alu(instr));
+ break;
+ case nir_instr_type_intrinsic:
+ emit_intrinsic(ctx, nir_instr_as_intrinsic(instr));
+ break;
+ case nir_instr_type_load_const:
+ emit_load_const(ctx, nir_instr_as_load_const(instr));
+ break;
+ case nir_instr_type_ssa_undef:
+ emit_undef(ctx, nir_instr_as_ssa_undef(instr));
+ break;
+ case nir_instr_type_tex:
+ emit_tex(ctx, nir_instr_as_tex(instr));
+ break;
+ case nir_instr_type_phi:
+ unreachable("nir_instr_type_phi not supported");
+ break;
+ case nir_instr_type_jump:
+ emit_jump(ctx, nir_instr_as_jump(instr));
+ break;
+ case nir_instr_type_call:
+ unreachable("nir_instr_type_call not supported");
+ break;
+ case nir_instr_type_parallel_copy:
+ unreachable("nir_instr_type_parallel_copy not supported");
+ break;
+ case nir_instr_type_deref:
+ emit_deref(ctx, nir_instr_as_deref(instr));
+ break;
+ }
+ }
+}
+
+static void
+emit_cf_list(struct ntv_context *ctx, struct exec_list *list);
+
+static SpvId
+get_src_bool(struct ntv_context *ctx, nir_src *src)
+{
+ assert(nir_src_bit_size(*src) == 1);
+ return get_src(ctx, src);
+}
+
+static void
+emit_if(struct ntv_context *ctx, nir_if *if_stmt)
+{
+ SpvId condition = get_src_bool(ctx, &if_stmt->condition);
+
+ SpvId header_id = spirv_builder_new_id(&ctx->builder);
+ SpvId then_id = block_label(ctx, nir_if_first_then_block(if_stmt));
+ SpvId endif_id = spirv_builder_new_id(&ctx->builder);
+ SpvId else_id = endif_id;
+
+ bool has_else = !exec_list_is_empty(&if_stmt->else_list);
+ if (has_else) {
+ assert(nir_if_first_else_block(if_stmt)->index < ctx->num_blocks);
+ else_id = block_label(ctx, nir_if_first_else_block(if_stmt));
+ }
+
+ /* create a header-block */
+ start_block(ctx, header_id);
+ spirv_builder_emit_selection_merge(&ctx->builder, endif_id,
+ SpvSelectionControlMaskNone);
+ branch_conditional(ctx, condition, then_id, else_id);
+
+ emit_cf_list(ctx, &if_stmt->then_list);
+
+ if (has_else) {
+ if (ctx->block_started)
+ branch(ctx, endif_id);
+
+ emit_cf_list(ctx, &if_stmt->else_list);
+ }
+
+ start_block(ctx, endif_id);
+}
+
+static void
+emit_loop(struct ntv_context *ctx, nir_loop *loop)
+{
+ SpvId header_id = spirv_builder_new_id(&ctx->builder);
+ SpvId begin_id = block_label(ctx, nir_loop_first_block(loop));
+ SpvId break_id = spirv_builder_new_id(&ctx->builder);
+ SpvId cont_id = spirv_builder_new_id(&ctx->builder);
+
+ /* create a header-block */
+ start_block(ctx, header_id);
+ spirv_builder_loop_merge(&ctx->builder, break_id, cont_id, SpvLoopControlMaskNone);
+ branch(ctx, begin_id);
+
+ SpvId save_break = ctx->loop_break;
+ SpvId save_cont = ctx->loop_cont;
+ ctx->loop_break = break_id;
+ ctx->loop_cont = cont_id;
+
+ emit_cf_list(ctx, &loop->body);
+
+ ctx->loop_break = save_break;
+ ctx->loop_cont = save_cont;
+
+ branch(ctx, cont_id);
+ start_block(ctx, cont_id);
+ branch(ctx, header_id);
+
+ start_block(ctx, break_id);
+}
+
+static void
+emit_cf_list(struct ntv_context *ctx, struct exec_list *list)
+{
+ foreach_list_typed(nir_cf_node, node, node, list) {
+ switch (node->type) {
+ case nir_cf_node_block:
+ emit_block(ctx, nir_cf_node_as_block(node));
+ break;
+
+ case nir_cf_node_if:
+ emit_if(ctx, nir_cf_node_as_if(node));
+ break;
+
+ case nir_cf_node_loop:
+ emit_loop(ctx, nir_cf_node_as_loop(node));
+ break;
+
+ case nir_cf_node_function:
+ unreachable("nir_cf_node_function not supported");
+ break;
+ }
+ }
+}
+
+struct spirv_shader *
+nir_to_spirv(struct nir_shader *s)
+{
+ struct spirv_shader *ret = NULL;
+
+ struct ntv_context ctx = {};
+
+ switch (s->info.stage) {
+ case MESA_SHADER_VERTEX:
+ case MESA_SHADER_FRAGMENT:
+ case MESA_SHADER_COMPUTE:
+ spirv_builder_emit_cap(&ctx.builder, SpvCapabilityShader);
+ break;
+
+ case MESA_SHADER_TESS_CTRL:
+ case MESA_SHADER_TESS_EVAL:
+ spirv_builder_emit_cap(&ctx.builder, SpvCapabilityTessellation);
+ break;
+
+ case MESA_SHADER_GEOMETRY:
+ spirv_builder_emit_cap(&ctx.builder, SpvCapabilityGeometry);
+ break;
+
+ default:
+ unreachable("invalid stage");
+ }
+
+ // TODO: only enable when needed
+ if (s->info.stage == MESA_SHADER_FRAGMENT) {
+ spirv_builder_emit_cap(&ctx.builder, SpvCapabilitySampled1D);
+ spirv_builder_emit_cap(&ctx.builder, SpvCapabilityImageQuery);
+ spirv_builder_emit_cap(&ctx.builder, SpvCapabilityDerivativeControl);
+ }
+
+ ctx.stage = s->info.stage;
+ ctx.GLSL_std_450 = spirv_builder_import(&ctx.builder, "GLSL.std.450");
+ spirv_builder_emit_source(&ctx.builder, SpvSourceLanguageGLSL, 450);
+
+ spirv_builder_emit_mem_model(&ctx.builder, SpvAddressingModelLogical,
+ SpvMemoryModelGLSL450);
+
+ SpvExecutionModel exec_model;
+ switch (s->info.stage) {
+ case MESA_SHADER_VERTEX:
+ exec_model = SpvExecutionModelVertex;
+ break;
+ case MESA_SHADER_TESS_CTRL:
+ exec_model = SpvExecutionModelTessellationControl;
+ break;
+ case MESA_SHADER_TESS_EVAL:
+ exec_model = SpvExecutionModelTessellationEvaluation;
+ break;
+ case MESA_SHADER_GEOMETRY:
+ exec_model = SpvExecutionModelGeometry;
+ break;
+ case MESA_SHADER_FRAGMENT:
+ exec_model = SpvExecutionModelFragment;
+ break;
+ case MESA_SHADER_COMPUTE:
+ exec_model = SpvExecutionModelGLCompute;
+ break;
+ default:
+ unreachable("invalid stage");
+ }
+
+ SpvId type_void = spirv_builder_type_void(&ctx.builder);
+ SpvId type_main = spirv_builder_type_function(&ctx.builder, type_void,
+ NULL, 0);
+ SpvId entry_point = spirv_builder_new_id(&ctx.builder);
+ spirv_builder_emit_name(&ctx.builder, entry_point, "main");
+
+ ctx.vars = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
+ _mesa_key_pointer_equal);
+
+ nir_foreach_variable(var, &s->inputs)
+ emit_input(&ctx, var);
+
+ nir_foreach_variable(var, &s->outputs)
+ emit_output(&ctx, var);
+
+ nir_foreach_variable(var, &s->uniforms)
+ emit_uniform(&ctx, var);
+
+ if (s->info.stage == MESA_SHADER_FRAGMENT) {
+ spirv_builder_emit_exec_mode(&ctx.builder, entry_point,
+ SpvExecutionModeOriginUpperLeft);
+ if (s->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
+ spirv_builder_emit_exec_mode(&ctx.builder, entry_point,
+ SpvExecutionModeDepthReplacing);
+ }
+
+
+ spirv_builder_function(&ctx.builder, entry_point, type_void,
+ SpvFunctionControlMaskNone,
+ type_main);
+
+ nir_function_impl *entry = nir_shader_get_entrypoint(s);
+ nir_metadata_require(entry, nir_metadata_block_index);
+
+ ctx.defs = (SpvId *)malloc(sizeof(SpvId) * entry->ssa_alloc);
+ if (!ctx.defs)
+ goto fail;
+ ctx.num_defs = entry->ssa_alloc;
+
+ nir_index_local_regs(entry);
+ ctx.regs = malloc(sizeof(SpvId) * entry->reg_alloc);
+ if (!ctx.regs)
+ goto fail;
+ ctx.num_regs = entry->reg_alloc;
+
+ SpvId *block_ids = (SpvId *)malloc(sizeof(SpvId) * entry->num_blocks);
+ if (!block_ids)
+ goto fail;
+
+ for (int i = 0; i < entry->num_blocks; ++i)
+ block_ids[i] = spirv_builder_new_id(&ctx.builder);
+
+ ctx.block_ids = block_ids;
+ ctx.num_blocks = entry->num_blocks;
+
+ /* emit a block only for the variable declarations */
+ start_block(&ctx, spirv_builder_new_id(&ctx.builder));
+ foreach_list_typed(nir_register, reg, node, &entry->registers) {
+ SpvId type = get_uvec_type(&ctx, reg->bit_size, reg->num_components);
+ SpvId pointer_type = spirv_builder_type_pointer(&ctx.builder,
+ SpvStorageClassFunction,
+ type);
+ SpvId var = spirv_builder_emit_var(&ctx.builder, pointer_type,
+ SpvStorageClassFunction);
+
+ ctx.regs[reg->index] = var;
+ }
+
+ emit_cf_list(&ctx, &entry->body);
+
+ free(ctx.defs);
+
+ spirv_builder_return(&ctx.builder); // doesn't belong here, but whatevz
+ spirv_builder_function_end(&ctx.builder);
+
+ spirv_builder_emit_entry_point(&ctx.builder, exec_model, entry_point,
+ "main", ctx.entry_ifaces,
+ ctx.num_entry_ifaces);
+
+ size_t num_words = spirv_builder_get_num_words(&ctx.builder);
+
+ ret = CALLOC_STRUCT(spirv_shader);
+ if (!ret)
+ goto fail;
+
+ ret->words = MALLOC(sizeof(uint32_t) * num_words);
+ if (!ret->words)
+ goto fail;
+
+ ret->num_words = spirv_builder_get_words(&ctx.builder, ret->words, num_words);
+ assert(ret->num_words == num_words);
+
+ return ret;
+
+fail:
+
+ if (ret)
+ spirv_shader_delete(ret);
+
+ if (ctx.vars)
+ _mesa_hash_table_destroy(ctx.vars, NULL);
+
+ return NULL;
+}
+
+void
+spirv_shader_delete(struct spirv_shader *s)
+{
+ FREE(s->words);
+ FREE(s);
+}
diff --git a/lib/mesa/src/gallium/drivers/zink/nir_to_spirv/nir_to_spirv.h b/lib/mesa/src/gallium/drivers/zink/nir_to_spirv/nir_to_spirv.h
new file mode 100644
index 000000000..de767018c
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/nir_to_spirv/nir_to_spirv.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef NIR_TO_SPIRV_H
+#define NIR_TO_SPIRV_H
+
+#include <stdlib.h>
+#include <stdint.h>
+#include <vulkan/vulkan.h>
+
+#include "compiler/shader_enums.h"
+
+struct spirv_shader {
+ uint32_t *words;
+ size_t num_words;
+};
+
+struct nir_shader;
+
+struct spirv_shader *
+nir_to_spirv(struct nir_shader *s);
+
+void
+spirv_shader_delete(struct spirv_shader *s);
+
+uint32_t
+zink_binding(gl_shader_stage stage, VkDescriptorType type, int index);
+
+struct nir_shader;
+
+bool
+zink_nir_lower_b2b(struct nir_shader *shader);
+
+#endif
diff --git a/lib/mesa/src/gallium/drivers/zink/nir_to_spirv/spirv_builder.c b/lib/mesa/src/gallium/drivers/zink/nir_to_spirv/spirv_builder.c
new file mode 100644
index 000000000..9fb447611
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/nir_to_spirv/spirv_builder.c
@@ -0,0 +1,1074 @@
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "spirv_builder.h"
+
+#include "util/macros.h"
+#include "util/u_bitcast.h"
+#include "util/u_memory.h"
+#include "util/hash_table.h"
+
+#include <stdbool.h>
+#include <inttypes.h>
+#include <string.h>
+
+static bool
+spirv_buffer_grow(struct spirv_buffer *b, size_t needed)
+{
+ size_t new_room = MAX3(64, (b->room * 3) / 2, needed);
+
+ uint32_t *new_words = realloc(b->words, new_room * sizeof(uint32_t));
+ if (!new_words)
+ return false;
+
+ b->words = new_words;
+ b->room = new_room;
+ return true;
+}
+
+static inline bool
+spirv_buffer_prepare(struct spirv_buffer *b, size_t needed)
+{
+ needed += b->num_words;
+ if (b->room >= b->num_words + needed)
+ return true;
+
+ return spirv_buffer_grow(b, needed);
+}
+
+static inline void
+spirv_buffer_emit_word(struct spirv_buffer *b, uint32_t word)
+{
+ assert(b->num_words < b->room);
+ b->words[b->num_words++] = word;
+}
+
+static int
+spirv_buffer_emit_string(struct spirv_buffer *b, const char *str)
+{
+ int pos = 0;
+ uint32_t word = 0;
+ while (str[pos] != '\0') {
+ word |= str[pos] << (8 * (pos % 4));
+ if (++pos % 4 == 0) {
+ spirv_buffer_prepare(b, 1);
+ spirv_buffer_emit_word(b, word);
+ word = 0;
+ }
+ }
+
+ spirv_buffer_prepare(b, 1);
+ spirv_buffer_emit_word(b, word);
+
+ return 1 + pos / 4;
+}
+
+void
+spirv_builder_emit_cap(struct spirv_builder *b, SpvCapability cap)
+{
+ spirv_buffer_prepare(&b->capabilities, 2);
+ spirv_buffer_emit_word(&b->capabilities, SpvOpCapability | (2 << 16));
+ spirv_buffer_emit_word(&b->capabilities, cap);
+}
+
+void
+spirv_builder_emit_source(struct spirv_builder *b, SpvSourceLanguage lang,
+ uint32_t version)
+{
+ spirv_buffer_prepare(&b->debug_names, 3);
+ spirv_buffer_emit_word(&b->debug_names, SpvOpSource | (3 << 16));
+ spirv_buffer_emit_word(&b->debug_names, lang);
+ spirv_buffer_emit_word(&b->debug_names, version);
+}
+
+void
+spirv_builder_emit_mem_model(struct spirv_builder *b,
+ SpvAddressingModel addr_model,
+ SpvMemoryModel mem_model)
+{
+ spirv_buffer_prepare(&b->memory_model, 3);
+ spirv_buffer_emit_word(&b->memory_model, SpvOpMemoryModel | (3 << 16));
+ spirv_buffer_emit_word(&b->memory_model, addr_model);
+ spirv_buffer_emit_word(&b->memory_model, mem_model);
+}
+
+void
+spirv_builder_emit_entry_point(struct spirv_builder *b,
+ SpvExecutionModel exec_model, SpvId entry_point,
+ const char *name, const SpvId interfaces[],
+ size_t num_interfaces)
+{
+ size_t pos = b->entry_points.num_words;
+ spirv_buffer_prepare(&b->entry_points, 3);
+ spirv_buffer_emit_word(&b->entry_points, SpvOpEntryPoint);
+ spirv_buffer_emit_word(&b->entry_points, exec_model);
+ spirv_buffer_emit_word(&b->entry_points, entry_point);
+ int len = spirv_buffer_emit_string(&b->entry_points, name);
+ b->entry_points.words[pos] |= (3 + len + num_interfaces) << 16;
+ spirv_buffer_prepare(&b->entry_points, num_interfaces);
+ for (int i = 0; i < num_interfaces; ++i)
+ spirv_buffer_emit_word(&b->entry_points, interfaces[i]);
+}
+
+void
+spirv_builder_emit_exec_mode(struct spirv_builder *b, SpvId entry_point,
+ SpvExecutionMode exec_mode)
+{
+ spirv_buffer_prepare(&b->exec_modes, 3);
+ spirv_buffer_emit_word(&b->exec_modes, SpvOpExecutionMode | (3 << 16));
+ spirv_buffer_emit_word(&b->exec_modes, entry_point);
+ spirv_buffer_emit_word(&b->exec_modes, exec_mode);
+}
+
+void
+spirv_builder_emit_name(struct spirv_builder *b, SpvId target,
+ const char *name)
+{
+ size_t pos = b->debug_names.num_words;
+ spirv_buffer_prepare(&b->debug_names, 2);
+ spirv_buffer_emit_word(&b->debug_names, SpvOpName);
+ spirv_buffer_emit_word(&b->debug_names, target);
+ int len = spirv_buffer_emit_string(&b->debug_names, name);
+ b->debug_names.words[pos] |= (2 + len) << 16;
+}
+
+static void
+emit_decoration(struct spirv_builder *b, SpvId target,
+ SpvDecoration decoration, const uint32_t extra_operands[],
+ size_t num_extra_operands)
+{
+ int words = 3 + num_extra_operands;
+ spirv_buffer_prepare(&b->decorations, words);
+ spirv_buffer_emit_word(&b->decorations, SpvOpDecorate | (words << 16));
+ spirv_buffer_emit_word(&b->decorations, target);
+ spirv_buffer_emit_word(&b->decorations, decoration);
+ for (int i = 0; i < num_extra_operands; ++i)
+ spirv_buffer_emit_word(&b->decorations, extra_operands[i]);
+}
+
+void
+spirv_builder_emit_decoration(struct spirv_builder *b, SpvId target,
+ SpvDecoration decoration)
+{
+ emit_decoration(b, target, decoration, NULL, 0);
+}
+
+void
+spirv_builder_emit_location(struct spirv_builder *b, SpvId target,
+ uint32_t location)
+{
+ uint32_t args[] = { location };
+ emit_decoration(b, target, SpvDecorationLocation, args, ARRAY_SIZE(args));
+}
+
+void
+spirv_builder_emit_component(struct spirv_builder *b, SpvId target,
+ uint32_t component)
+{
+ uint32_t args[] = { component };
+ emit_decoration(b, target, SpvDecorationComponent, args, ARRAY_SIZE(args));
+}
+
+void
+spirv_builder_emit_builtin(struct spirv_builder *b, SpvId target,
+ SpvBuiltIn builtin)
+{
+ uint32_t args[] = { builtin };
+ emit_decoration(b, target, SpvDecorationBuiltIn, args, ARRAY_SIZE(args));
+}
+
+void
+spirv_builder_emit_descriptor_set(struct spirv_builder *b, SpvId target,
+ uint32_t descriptor_set)
+{
+ uint32_t args[] = { descriptor_set };
+ emit_decoration(b, target, SpvDecorationDescriptorSet, args,
+ ARRAY_SIZE(args));
+}
+
+void
+spirv_builder_emit_binding(struct spirv_builder *b, SpvId target,
+ uint32_t binding)
+{
+ uint32_t args[] = { binding };
+ emit_decoration(b, target, SpvDecorationBinding, args, ARRAY_SIZE(args));
+}
+
+void
+spirv_builder_emit_array_stride(struct spirv_builder *b, SpvId target,
+ uint32_t stride)
+{
+ uint32_t args[] = { stride };
+ emit_decoration(b, target, SpvDecorationArrayStride, args, ARRAY_SIZE(args));
+}
+
+void
+spirv_builder_emit_index(struct spirv_builder *b, SpvId target, int index)
+{
+ uint32_t args[] = { index };
+ emit_decoration(b, target, SpvDecorationIndex, args, ARRAY_SIZE(args));
+}
+
+static void
+emit_member_decoration(struct spirv_builder *b, SpvId target, uint32_t member,
+ SpvDecoration decoration, const uint32_t extra_operands[],
+ size_t num_extra_operands)
+{
+ int words = 4 + num_extra_operands;
+ spirv_buffer_prepare(&b->decorations, words);
+ spirv_buffer_emit_word(&b->decorations,
+ SpvOpMemberDecorate | (words << 16));
+ spirv_buffer_emit_word(&b->decorations, target);
+ spirv_buffer_emit_word(&b->decorations, member);
+ spirv_buffer_emit_word(&b->decorations, decoration);
+ for (int i = 0; i < num_extra_operands; ++i)
+ spirv_buffer_emit_word(&b->decorations, extra_operands[i]);
+}
+
+void
+spirv_builder_emit_member_offset(struct spirv_builder *b, SpvId target,
+ uint32_t member, uint32_t offset)
+{
+ uint32_t args[] = { offset };
+ emit_member_decoration(b, target, member, SpvDecorationOffset,
+ args, ARRAY_SIZE(args));
+}
+
+SpvId
+spirv_builder_emit_undef(struct spirv_builder *b, SpvId result_type)
+{
+ SpvId result = spirv_builder_new_id(b);
+ spirv_buffer_prepare(&b->instructions, 3);
+ spirv_buffer_emit_word(&b->instructions, SpvOpUndef | (3 << 16));
+ spirv_buffer_emit_word(&b->instructions, result_type);
+ spirv_buffer_emit_word(&b->instructions, result);
+ return result;
+}
+
+void
+spirv_builder_function(struct spirv_builder *b, SpvId result,
+ SpvId return_type,
+ SpvFunctionControlMask function_control,
+ SpvId function_type)
+{
+ spirv_buffer_prepare(&b->instructions, 5);
+ spirv_buffer_emit_word(&b->instructions, SpvOpFunction | (5 << 16));
+ spirv_buffer_emit_word(&b->instructions, return_type);
+ spirv_buffer_emit_word(&b->instructions, result);
+ spirv_buffer_emit_word(&b->instructions, function_control);
+ spirv_buffer_emit_word(&b->instructions, function_type);
+}
+
+void
+spirv_builder_function_end(struct spirv_builder *b)
+{
+ spirv_buffer_prepare(&b->instructions, 1);
+ spirv_buffer_emit_word(&b->instructions, SpvOpFunctionEnd | (1 << 16));
+}
+
+void
+spirv_builder_label(struct spirv_builder *b, SpvId label)
+{
+ spirv_buffer_prepare(&b->instructions, 2);
+ spirv_buffer_emit_word(&b->instructions, SpvOpLabel | (2 << 16));
+ spirv_buffer_emit_word(&b->instructions, label);
+}
+
+void
+spirv_builder_return(struct spirv_builder *b)
+{
+ spirv_buffer_prepare(&b->instructions, 1);
+ spirv_buffer_emit_word(&b->instructions, SpvOpReturn | (1 << 16));
+}
+
+SpvId
+spirv_builder_emit_load(struct spirv_builder *b, SpvId result_type,
+ SpvId pointer)
+{
+ return spirv_builder_emit_unop(b, SpvOpLoad, result_type, pointer);
+}
+
+void
+spirv_builder_emit_store(struct spirv_builder *b, SpvId pointer, SpvId object)
+{
+ spirv_buffer_prepare(&b->instructions, 3);
+ spirv_buffer_emit_word(&b->instructions, SpvOpStore | (3 << 16));
+ spirv_buffer_emit_word(&b->instructions, pointer);
+ spirv_buffer_emit_word(&b->instructions, object);
+}
+
+SpvId
+spirv_builder_emit_access_chain(struct spirv_builder *b, SpvId result_type,
+ SpvId base, const SpvId indexes[],
+ size_t num_indexes)
+{
+ SpvId result = spirv_builder_new_id(b);
+
+ int words = 4 + num_indexes;
+ spirv_buffer_prepare(&b->instructions, words);
+ spirv_buffer_emit_word(&b->instructions, SpvOpAccessChain | (words << 16));
+ spirv_buffer_emit_word(&b->instructions, result_type);
+ spirv_buffer_emit_word(&b->instructions, result);
+ spirv_buffer_emit_word(&b->instructions, base);
+ for (int i = 0; i < num_indexes; ++i)
+ spirv_buffer_emit_word(&b->instructions, indexes[i]);
+ return result;
+}
+
+
+SpvId
+spirv_builder_emit_unop(struct spirv_builder *b, SpvOp op, SpvId result_type,
+ SpvId operand)
+{
+ SpvId result = spirv_builder_new_id(b);
+ spirv_buffer_prepare(&b->instructions, 4);
+ spirv_buffer_emit_word(&b->instructions, op | (4 << 16));
+ spirv_buffer_emit_word(&b->instructions, result_type);
+ spirv_buffer_emit_word(&b->instructions, result);
+ spirv_buffer_emit_word(&b->instructions, operand);
+ return result;
+}
+
+SpvId
+spirv_builder_emit_binop(struct spirv_builder *b, SpvOp op, SpvId result_type,
+ SpvId operand0, SpvId operand1)
+{
+ SpvId result = spirv_builder_new_id(b);
+ spirv_buffer_prepare(&b->instructions, 5);
+ spirv_buffer_emit_word(&b->instructions, op | (5 << 16));
+ spirv_buffer_emit_word(&b->instructions, result_type);
+ spirv_buffer_emit_word(&b->instructions, result);
+ spirv_buffer_emit_word(&b->instructions, operand0);
+ spirv_buffer_emit_word(&b->instructions, operand1);
+ return result;
+}
+
+SpvId
+spirv_builder_emit_triop(struct spirv_builder *b, SpvOp op, SpvId result_type,
+ SpvId operand0, SpvId operand1, SpvId operand2)
+{
+ SpvId result = spirv_builder_new_id(b);
+ spirv_buffer_prepare(&b->instructions, 6);
+ spirv_buffer_emit_word(&b->instructions, op | (6 << 16));
+ spirv_buffer_emit_word(&b->instructions, result_type);
+ spirv_buffer_emit_word(&b->instructions, result);
+ spirv_buffer_emit_word(&b->instructions, operand0);
+ spirv_buffer_emit_word(&b->instructions, operand1);
+ spirv_buffer_emit_word(&b->instructions, operand2);
+ return result;
+}
+
+SpvId
+spirv_builder_emit_composite_extract(struct spirv_builder *b, SpvId result_type,
+ SpvId composite, const uint32_t indexes[],
+ size_t num_indexes)
+{
+ SpvId result = spirv_builder_new_id(b);
+
+ assert(num_indexes > 0);
+ int words = 4 + num_indexes;
+ spirv_buffer_prepare(&b->instructions, words);
+ spirv_buffer_emit_word(&b->instructions,
+ SpvOpCompositeExtract | (words << 16));
+ spirv_buffer_emit_word(&b->instructions, result_type);
+ spirv_buffer_emit_word(&b->instructions, result);
+ spirv_buffer_emit_word(&b->instructions, composite);
+ for (int i = 0; i < num_indexes; ++i)
+ spirv_buffer_emit_word(&b->instructions, indexes[i]);
+ return result;
+}
+
+SpvId
+spirv_builder_emit_composite_construct(struct spirv_builder *b,
+ SpvId result_type,
+ const SpvId constituents[],
+ size_t num_constituents)
+{
+ SpvId result = spirv_builder_new_id(b);
+
+ assert(num_constituents > 0);
+ int words = 3 + num_constituents;
+ spirv_buffer_prepare(&b->instructions, words);
+ spirv_buffer_emit_word(&b->instructions,
+ SpvOpCompositeConstruct | (words << 16));
+ spirv_buffer_emit_word(&b->instructions, result_type);
+ spirv_buffer_emit_word(&b->instructions, result);
+ for (int i = 0; i < num_constituents; ++i)
+ spirv_buffer_emit_word(&b->instructions, constituents[i]);
+ return result;
+}
+
+SpvId
+spirv_builder_emit_vector_shuffle(struct spirv_builder *b, SpvId result_type,
+ SpvId vector_1, SpvId vector_2,
+ const uint32_t components[],
+ size_t num_components)
+{
+ SpvId result = spirv_builder_new_id(b);
+
+ assert(num_components > 0);
+ int words = 5 + num_components;
+ spirv_buffer_prepare(&b->instructions, words);
+ spirv_buffer_emit_word(&b->instructions, SpvOpVectorShuffle | (words << 16));
+ spirv_buffer_emit_word(&b->instructions, result_type);
+ spirv_buffer_emit_word(&b->instructions, result);
+ spirv_buffer_emit_word(&b->instructions, vector_1);
+ spirv_buffer_emit_word(&b->instructions, vector_2);
+ for (int i = 0; i < num_components; ++i)
+ spirv_buffer_emit_word(&b->instructions, components[i]);
+ return result;
+}
+
+void
+spirv_builder_emit_branch(struct spirv_builder *b, SpvId label)
+{
+ spirv_buffer_prepare(&b->instructions, 2);
+ spirv_buffer_emit_word(&b->instructions, SpvOpBranch | (2 << 16));
+ spirv_buffer_emit_word(&b->instructions, label);
+}
+
+void
+spirv_builder_emit_selection_merge(struct spirv_builder *b, SpvId merge_block,
+ SpvSelectionControlMask selection_control)
+{
+ spirv_buffer_prepare(&b->instructions, 3);
+ spirv_buffer_emit_word(&b->instructions, SpvOpSelectionMerge | (3 << 16));
+ spirv_buffer_emit_word(&b->instructions, merge_block);
+ spirv_buffer_emit_word(&b->instructions, selection_control);
+}
+
+void
+spirv_builder_loop_merge(struct spirv_builder *b, SpvId merge_block,
+ SpvId cont_target, SpvLoopControlMask loop_control)
+{
+ spirv_buffer_prepare(&b->instructions, 4);
+ spirv_buffer_emit_word(&b->instructions, SpvOpLoopMerge | (4 << 16));
+ spirv_buffer_emit_word(&b->instructions, merge_block);
+ spirv_buffer_emit_word(&b->instructions, cont_target);
+ spirv_buffer_emit_word(&b->instructions, loop_control);
+}
+
+void
+spirv_builder_emit_branch_conditional(struct spirv_builder *b, SpvId condition,
+ SpvId true_label, SpvId false_label)
+{
+ spirv_buffer_prepare(&b->instructions, 4);
+ spirv_buffer_emit_word(&b->instructions, SpvOpBranchConditional | (4 << 16));
+ spirv_buffer_emit_word(&b->instructions, condition);
+ spirv_buffer_emit_word(&b->instructions, true_label);
+ spirv_buffer_emit_word(&b->instructions, false_label);
+}
+
+SpvId
+spirv_builder_emit_phi(struct spirv_builder *b, SpvId result_type,
+ size_t num_vars, size_t *position)
+{
+ SpvId result = spirv_builder_new_id(b);
+
+ assert(num_vars > 0);
+ int words = 3 + 2 * num_vars;
+ spirv_buffer_prepare(&b->instructions, words);
+ spirv_buffer_emit_word(&b->instructions, SpvOpPhi | (words << 16));
+ spirv_buffer_emit_word(&b->instructions, result_type);
+ spirv_buffer_emit_word(&b->instructions, result);
+ *position = b->instructions.num_words;
+ for (int i = 0; i < 2 * num_vars; ++i)
+ spirv_buffer_emit_word(&b->instructions, 0);
+ return result;
+}
+
+void
+spirv_builder_set_phi_operand(struct spirv_builder *b, size_t position,
+ size_t index, SpvId variable, SpvId parent)
+{
+ b->instructions.words[position + index * 2 + 0] = variable;
+ b->instructions.words[position + index * 2 + 1] = parent;
+}
+
+void
+spirv_builder_emit_kill(struct spirv_builder *b)
+{
+ spirv_buffer_prepare(&b->instructions, 1);
+ spirv_buffer_emit_word(&b->instructions, SpvOpKill | (1 << 16));
+}
+
+SpvId
+spirv_builder_emit_image_sample(struct spirv_builder *b,
+ SpvId result_type,
+ SpvId sampled_image,
+ SpvId coordinate,
+ bool proj,
+ SpvId lod,
+ SpvId bias,
+ SpvId dref,
+ SpvId dx,
+ SpvId dy,
+ SpvId offset)
+{
+ SpvId result = spirv_builder_new_id(b);
+
+ int opcode = SpvOpImageSampleImplicitLod;
+ int operands = 5;
+ if (proj)
+ opcode += SpvOpImageSampleProjImplicitLod - SpvOpImageSampleImplicitLod;
+ if (lod || (dx && dy))
+ opcode += SpvOpImageSampleExplicitLod - SpvOpImageSampleImplicitLod;
+ if (dref) {
+ opcode += SpvOpImageSampleDrefImplicitLod - SpvOpImageSampleImplicitLod;
+ operands++;
+ }
+
+ SpvImageOperandsMask operand_mask = SpvImageOperandsMaskNone;
+ SpvId extra_operands[5];
+ int num_extra_operands = 0;
+ if (bias) {
+ extra_operands[++num_extra_operands] = bias;
+ operand_mask |= SpvImageOperandsBiasMask;
+ }
+ if (lod) {
+ extra_operands[++num_extra_operands] = lod;
+ operand_mask |= SpvImageOperandsLodMask;
+ } else if (dx && dy) {
+ extra_operands[++num_extra_operands] = dx;
+ extra_operands[++num_extra_operands] = dy;
+ operand_mask |= SpvImageOperandsGradMask;
+ }
+ if (offset) {
+ extra_operands[++num_extra_operands] = offset;
+ operand_mask |= SpvImageOperandsOffsetMask;
+ }
+
+ /* finalize num_extra_operands / extra_operands */
+ if (num_extra_operands > 0) {
+ extra_operands[0] = operand_mask;
+ num_extra_operands++;
+ }
+
+ spirv_buffer_prepare(&b->instructions, operands + num_extra_operands);
+ spirv_buffer_emit_word(&b->instructions, opcode | ((operands + num_extra_operands) << 16));
+ spirv_buffer_emit_word(&b->instructions, result_type);
+ spirv_buffer_emit_word(&b->instructions, result);
+ spirv_buffer_emit_word(&b->instructions, sampled_image);
+ spirv_buffer_emit_word(&b->instructions, coordinate);
+ if (dref)
+ spirv_buffer_emit_word(&b->instructions, dref);
+ for (int i = 0; i < num_extra_operands; ++i)
+ spirv_buffer_emit_word(&b->instructions, extra_operands[i]);
+ return result;
+}
+
+SpvId
+spirv_builder_emit_image(struct spirv_builder *b, SpvId result_type,
+ SpvId sampled_image)
+{
+ SpvId result = spirv_builder_new_id(b);
+ spirv_buffer_prepare(&b->instructions, 4);
+ spirv_buffer_emit_word(&b->instructions, SpvOpImage | (4 << 16));
+ spirv_buffer_emit_word(&b->instructions, result_type);
+ spirv_buffer_emit_word(&b->instructions, result);
+ spirv_buffer_emit_word(&b->instructions, sampled_image);
+ return result;
+}
+
+SpvId
+spirv_builder_emit_image_fetch(struct spirv_builder *b,
+ SpvId result_type,
+ SpvId image,
+ SpvId coordinate,
+ SpvId lod)
+{
+ SpvId result = spirv_builder_new_id(b);
+
+ SpvId extra_operands[2];
+ int num_extra_operands = 0;
+ if (lod) {
+ extra_operands[0] = SpvImageOperandsLodMask;
+ extra_operands[1] = lod;
+ num_extra_operands = 2;
+ }
+
+ spirv_buffer_prepare(&b->instructions, 5 + num_extra_operands);
+ spirv_buffer_emit_word(&b->instructions, SpvOpImageFetch |
+ ((5 + num_extra_operands) << 16));
+ spirv_buffer_emit_word(&b->instructions, result_type);
+ spirv_buffer_emit_word(&b->instructions, result);
+ spirv_buffer_emit_word(&b->instructions, image);
+ spirv_buffer_emit_word(&b->instructions, coordinate);
+ for (int i = 0; i < num_extra_operands; ++i)
+ spirv_buffer_emit_word(&b->instructions, extra_operands[i]);
+ return result;
+}
+
+SpvId
+spirv_builder_emit_image_query_size(struct spirv_builder *b,
+ SpvId result_type,
+ SpvId image,
+ SpvId lod)
+{
+ int opcode = SpvOpImageQuerySize;
+ int words = 4;
+ if (lod) {
+ words++;
+ opcode = SpvOpImageQuerySizeLod;
+ }
+
+ SpvId result = spirv_builder_new_id(b);
+ spirv_buffer_prepare(&b->instructions, words);
+ spirv_buffer_emit_word(&b->instructions, opcode | (words << 16));
+ spirv_buffer_emit_word(&b->instructions, result_type);
+ spirv_buffer_emit_word(&b->instructions, result);
+ spirv_buffer_emit_word(&b->instructions, image);
+
+ if (lod)
+ spirv_buffer_emit_word(&b->instructions, lod);
+
+ return result;
+}
+
+SpvId
+spirv_builder_emit_ext_inst(struct spirv_builder *b, SpvId result_type,
+ SpvId set, uint32_t instruction,
+ const SpvId *args, size_t num_args)
+{
+ SpvId result = spirv_builder_new_id(b);
+
+ int words = 5 + num_args;
+ spirv_buffer_prepare(&b->instructions, words);
+ spirv_buffer_emit_word(&b->instructions, SpvOpExtInst | (words << 16));
+ spirv_buffer_emit_word(&b->instructions, result_type);
+ spirv_buffer_emit_word(&b->instructions, result);
+ spirv_buffer_emit_word(&b->instructions, set);
+ spirv_buffer_emit_word(&b->instructions, instruction);
+ for (int i = 0; i < num_args; ++i)
+ spirv_buffer_emit_word(&b->instructions, args[i]);
+ return result;
+}
+
+struct spirv_type {
+ SpvOp op;
+ uint32_t args[8];
+ size_t num_args;
+
+ SpvId type;
+};
+
+static uint32_t
+non_aggregate_type_hash(const void *arg)
+{
+ const struct spirv_type *type = arg;
+
+ uint32_t hash = _mesa_fnv32_1a_offset_bias;
+ hash = _mesa_fnv32_1a_accumulate(hash, type->op);
+ hash = _mesa_fnv32_1a_accumulate_block(hash, type->args, sizeof(uint32_t) *
+ type->num_args);
+ return hash;
+}
+
+static bool
+non_aggregate_type_equals(const void *a, const void *b)
+{
+ const struct spirv_type *ta = a, *tb = b;
+
+ if (ta->op != tb->op)
+ return false;
+
+ assert(ta->num_args == tb->num_args);
+ return memcmp(ta->args, tb->args, sizeof(uint32_t) * ta->num_args) == 0;
+}
+
+static SpvId
+get_type_def(struct spirv_builder *b, SpvOp op, const uint32_t args[],
+ size_t num_args)
+{
+ /* According to the SPIR-V specification:
+ *
+ * "Two different type <id>s form, by definition, two different types. It
+ * is valid to declare multiple aggregate type <id>s having the same
+ * opcode and operands. This is to allow multiple instances of aggregate
+ * types with the same structure to be decorated differently. (Different
+ * decorations are not required; two different aggregate type <id>s are
+ * allowed to have identical declarations and decorations, and will still
+ * be two different types.) Non-aggregate types are different: It is
+ * invalid to declare multiple type <id>s for the same scalar, vector, or
+ * matrix type. That is, non-aggregate type declarations must all have
+ * different opcodes or operands. (Note that non-aggregate types cannot
+ * be decorated in ways that affect their type.)"
+ *
+ * ..so, we need to prevent the same non-aggregate type to be re-defined
+ * with a new <id>. We do this by putting the definitions in a hash-map, so
+ * we can easily look up and reuse them.
+ */
+
+ struct spirv_type key;
+ assert(num_args <= ARRAY_SIZE(key.args));
+ key.op = op;
+ memcpy(&key.args, args, sizeof(uint32_t) * num_args);
+ key.num_args = num_args;
+
+ struct hash_entry *entry;
+ if (b->types) {
+ entry = _mesa_hash_table_search(b->types, &key);
+ if (entry)
+ return ((struct spirv_type *)entry->data)->type;
+ } else {
+ b->types = _mesa_hash_table_create(NULL, non_aggregate_type_hash,
+ non_aggregate_type_equals);
+ assert(b->types);
+ }
+
+ struct spirv_type *type = CALLOC_STRUCT(spirv_type);
+ if (!type)
+ return 0;
+
+ type->op = op;
+ memcpy(&type->args, args, sizeof(uint32_t) * num_args);
+ type->num_args = num_args;
+
+ type->type = spirv_builder_new_id(b);
+ spirv_buffer_prepare(&b->types_const_defs, 2 + num_args);
+ spirv_buffer_emit_word(&b->types_const_defs, op | ((2 + num_args) << 16));
+ spirv_buffer_emit_word(&b->types_const_defs, type->type);
+ for (int i = 0; i < num_args; ++i)
+ spirv_buffer_emit_word(&b->types_const_defs, args[i]);
+
+ entry = _mesa_hash_table_insert(b->types, type, type);
+ assert(entry);
+
+ return ((struct spirv_type *)entry->data)->type;
+}
+
+SpvId
+spirv_builder_type_void(struct spirv_builder *b)
+{
+ return get_type_def(b, SpvOpTypeVoid, NULL, 0);
+}
+
+SpvId
+spirv_builder_type_bool(struct spirv_builder *b)
+{
+ return get_type_def(b, SpvOpTypeBool, NULL, 0);
+}
+
+SpvId
+spirv_builder_type_int(struct spirv_builder *b, unsigned width)
+{
+ uint32_t args[] = { width, 1 };
+ return get_type_def(b, SpvOpTypeInt, args, ARRAY_SIZE(args));
+}
+
+SpvId
+spirv_builder_type_uint(struct spirv_builder *b, unsigned width)
+{
+ uint32_t args[] = { width, 0 };
+ return get_type_def(b, SpvOpTypeInt, args, ARRAY_SIZE(args));
+}
+
+SpvId
+spirv_builder_type_float(struct spirv_builder *b, unsigned width)
+{
+ uint32_t args[] = { width };
+ return get_type_def(b, SpvOpTypeFloat, args, ARRAY_SIZE(args));
+}
+
+SpvId
+spirv_builder_type_image(struct spirv_builder *b, SpvId sampled_type,
+ SpvDim dim, bool depth, bool arrayed, bool ms,
+ unsigned sampled, SpvImageFormat image_format)
+{
+ assert(sampled < 3);
+ uint32_t args[] = {
+ sampled_type, dim, depth ? 1 : 0, arrayed ? 1 : 0, ms ? 1 : 0, sampled,
+ image_format
+ };
+ return get_type_def(b, SpvOpTypeImage, args, ARRAY_SIZE(args));
+}
+
+SpvId
+spirv_builder_type_sampled_image(struct spirv_builder *b, SpvId image_type)
+{
+ uint32_t args[] = { image_type };
+ return get_type_def(b, SpvOpTypeSampledImage, args, ARRAY_SIZE(args));
+}
+
+SpvId
+spirv_builder_type_pointer(struct spirv_builder *b,
+ SpvStorageClass storage_class, SpvId type)
+{
+ uint32_t args[] = { storage_class, type };
+ return get_type_def(b, SpvOpTypePointer, args, ARRAY_SIZE(args));
+}
+
+SpvId
+spirv_builder_type_vector(struct spirv_builder *b, SpvId component_type,
+ unsigned component_count)
+{
+ assert(component_count > 1);
+ uint32_t args[] = { component_type, component_count };
+ return get_type_def(b, SpvOpTypeVector, args, ARRAY_SIZE(args));
+}
+
+SpvId
+spirv_builder_type_array(struct spirv_builder *b, SpvId component_type,
+ SpvId length)
+{
+ SpvId type = spirv_builder_new_id(b);
+ spirv_buffer_prepare(&b->types_const_defs, 4);
+ spirv_buffer_emit_word(&b->types_const_defs, SpvOpTypeArray | (4 << 16));
+ spirv_buffer_emit_word(&b->types_const_defs, type);
+ spirv_buffer_emit_word(&b->types_const_defs, component_type);
+ spirv_buffer_emit_word(&b->types_const_defs, length);
+ return type;
+}
+
+SpvId
+spirv_builder_type_struct(struct spirv_builder *b, const SpvId member_types[],
+ size_t num_member_types)
+{
+ int words = 2 + num_member_types;
+ SpvId type = spirv_builder_new_id(b);
+ spirv_buffer_prepare(&b->types_const_defs, words);
+ spirv_buffer_emit_word(&b->types_const_defs, SpvOpTypeStruct | (words << 16));
+ spirv_buffer_emit_word(&b->types_const_defs, type);
+ for (int i = 0; i < num_member_types; ++i)
+ spirv_buffer_emit_word(&b->types_const_defs, member_types[i]);
+ return type;
+}
+
+SpvId
+spirv_builder_type_function(struct spirv_builder *b, SpvId return_type,
+ const SpvId parameter_types[],
+ size_t num_parameter_types)
+{
+ int words = 3 + num_parameter_types;
+ SpvId type = spirv_builder_new_id(b);
+ spirv_buffer_prepare(&b->types_const_defs, words);
+ spirv_buffer_emit_word(&b->types_const_defs, SpvOpTypeFunction | (words << 16));
+ spirv_buffer_emit_word(&b->types_const_defs, type);
+ spirv_buffer_emit_word(&b->types_const_defs, return_type);
+ for (int i = 0; i < num_parameter_types; ++i)
+ spirv_buffer_emit_word(&b->types_const_defs, parameter_types[i]);
+ return type;
+}
+
+struct spirv_const {
+ SpvOp op, type;
+ uint32_t args[8];
+ size_t num_args;
+
+ SpvId result;
+};
+
+static uint32_t
+const_hash(const void *arg)
+{
+ const struct spirv_const *key = arg;
+
+ uint32_t hash = _mesa_fnv32_1a_offset_bias;
+ hash = _mesa_fnv32_1a_accumulate(hash, key->op);
+ hash = _mesa_fnv32_1a_accumulate(hash, key->type);
+ hash = _mesa_fnv32_1a_accumulate_block(hash, key->args, sizeof(uint32_t) *
+ key->num_args);
+ return hash;
+}
+
+static bool
+const_equals(const void *a, const void *b)
+{
+ const struct spirv_const *ca = a, *cb = b;
+
+ if (ca->op != cb->op ||
+ ca->type != cb->type)
+ return false;
+
+ assert(ca->num_args == cb->num_args);
+ return memcmp(ca->args, cb->args, sizeof(uint32_t) * ca->num_args) == 0;
+}
+
+static SpvId
+get_const_def(struct spirv_builder *b, SpvOp op, SpvId type,
+ const uint32_t args[], size_t num_args)
+{
+ struct spirv_const key;
+ assert(num_args <= ARRAY_SIZE(key.args));
+ key.op = op;
+ key.type = type;
+ memcpy(&key.args, args, sizeof(uint32_t) * num_args);
+ key.num_args = num_args;
+
+ struct hash_entry *entry;
+ if (b->consts) {
+ entry = _mesa_hash_table_search(b->consts, &key);
+ if (entry)
+ return ((struct spirv_const *)entry->data)->result;
+ } else {
+ b->consts = _mesa_hash_table_create(NULL, const_hash, const_equals);
+ assert(b->consts);
+ }
+
+ struct spirv_const *cnst = CALLOC_STRUCT(spirv_const);
+ if (!cnst)
+ return 0;
+
+ cnst->op = op;
+ cnst->type = type;
+ memcpy(&cnst->args, args, sizeof(uint32_t) * num_args);
+ cnst->num_args = num_args;
+
+ cnst->result = spirv_builder_new_id(b);
+ spirv_buffer_prepare(&b->types_const_defs, 3 + num_args);
+ spirv_buffer_emit_word(&b->types_const_defs, op | ((3 + num_args) << 16));
+ spirv_buffer_emit_word(&b->types_const_defs, type);
+ spirv_buffer_emit_word(&b->types_const_defs, cnst->result);
+ for (int i = 0; i < num_args; ++i)
+ spirv_buffer_emit_word(&b->types_const_defs, args[i]);
+
+ entry = _mesa_hash_table_insert(b->consts, cnst, cnst);
+ assert(entry);
+
+ return ((struct spirv_const *)entry->data)->result;
+}
+
+SpvId
+spirv_builder_const_bool(struct spirv_builder *b, bool val)
+{
+ return get_const_def(b, val ? SpvOpConstantTrue : SpvOpConstantFalse,
+ spirv_builder_type_bool(b), NULL, 0);
+}
+
+SpvId
+spirv_builder_const_int(struct spirv_builder *b, int width, int32_t val)
+{
+ assert(width <= 32);
+ uint32_t args[] = { val };
+ return get_const_def(b, SpvOpConstant, spirv_builder_type_int(b, width),
+ args, ARRAY_SIZE(args));
+}
+
+SpvId
+spirv_builder_const_uint(struct spirv_builder *b, int width, uint32_t val)
+{
+ assert(width <= 32);
+ uint32_t args[] = { val };
+ return get_const_def(b, SpvOpConstant, spirv_builder_type_uint(b, width),
+ args, ARRAY_SIZE(args));
+}
+
+SpvId
+spirv_builder_const_float(struct spirv_builder *b, int width, float val)
+{
+ assert(width <= 32);
+ uint32_t args[] = { u_bitcast_f2u(val) };
+ return get_const_def(b, SpvOpConstant, spirv_builder_type_float(b, width),
+ args, ARRAY_SIZE(args));
+}
+
+SpvId
+spirv_builder_const_composite(struct spirv_builder *b, SpvId result_type,
+ const SpvId constituents[],
+ size_t num_constituents)
+{
+ return get_const_def(b, SpvOpConstantComposite, result_type,
+ (const uint32_t *)constituents,
+ num_constituents);
+}
+
+SpvId
+spirv_builder_emit_var(struct spirv_builder *b, SpvId type,
+ SpvStorageClass storage_class)
+{
+ assert(storage_class != SpvStorageClassGeneric);
+ struct spirv_buffer *buf = storage_class != SpvStorageClassFunction ?
+ &b->types_const_defs : &b->instructions;
+
+ SpvId ret = spirv_builder_new_id(b);
+ spirv_buffer_prepare(buf, 4);
+ spirv_buffer_emit_word(buf, SpvOpVariable | (4 << 16));
+ spirv_buffer_emit_word(buf, type);
+ spirv_buffer_emit_word(buf, ret);
+ spirv_buffer_emit_word(buf, storage_class);
+ return ret;
+}
+
+SpvId
+spirv_builder_import(struct spirv_builder *b, const char *name)
+{
+ SpvId result = spirv_builder_new_id(b);
+ size_t pos = b->imports.num_words;
+ spirv_buffer_prepare(&b->imports, 2);
+ spirv_buffer_emit_word(&b->imports, SpvOpExtInstImport);
+ spirv_buffer_emit_word(&b->imports, result);
+ int len = spirv_buffer_emit_string(&b->imports, name);
+ b->imports.words[pos] |= (2 + len) << 16;
+ return result;
+}
+
+size_t
+spirv_builder_get_num_words(struct spirv_builder *b)
+{
+ const size_t header_size = 5;
+ return header_size +
+ b->capabilities.num_words +
+ b->imports.num_words +
+ b->memory_model.num_words +
+ b->entry_points.num_words +
+ b->exec_modes.num_words +
+ b->debug_names.num_words +
+ b->decorations.num_words +
+ b->types_const_defs.num_words +
+ b->instructions.num_words;
+}
+
+size_t
+spirv_builder_get_words(struct spirv_builder *b, uint32_t *words,
+ size_t num_words)
+{
+ assert(num_words >= spirv_builder_get_num_words(b));
+
+ size_t written = 0;
+ words[written++] = SpvMagicNumber;
+ words[written++] = 0x00010000;
+ words[written++] = 0;
+ words[written++] = b->prev_id + 1;
+ words[written++] = 0;
+
+ const struct spirv_buffer *buffers[] = {
+ &b->capabilities,
+ &b->imports,
+ &b->memory_model,
+ &b->entry_points,
+ &b->exec_modes,
+ &b->debug_names,
+ &b->decorations,
+ &b->types_const_defs,
+ &b->instructions
+ };
+
+ for (int i = 0; i < ARRAY_SIZE(buffers); ++i) {
+ const struct spirv_buffer *buffer = buffers[i];
+ for (int j = 0; j < buffer->num_words; ++j)
+ words[written++] = buffer->words[j];
+ }
+
+ assert(written == spirv_builder_get_num_words(b));
+ return written;
+}
diff --git a/lib/mesa/src/gallium/drivers/zink/nir_to_spirv/spirv_builder.h b/lib/mesa/src/gallium/drivers/zink/nir_to_spirv/spirv_builder.h
new file mode 100644
index 000000000..d0843b8ff
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/nir_to_spirv/spirv_builder.h
@@ -0,0 +1,321 @@
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef SPIRV_BUILDER_H
+#define SPIRV_BUILDER_H
+
+#include "compiler/spirv/spirv.h"
+#include "compiler/spirv/GLSL.std.450.h"
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+struct hash_table;
+
+struct spirv_buffer {
+ uint32_t *words;
+ size_t num_words, room;
+};
+
+struct spirv_builder {
+ struct spirv_buffer capabilities;
+ struct spirv_buffer imports;
+ struct spirv_buffer memory_model;
+ struct spirv_buffer entry_points;
+ struct spirv_buffer exec_modes;
+ struct spirv_buffer debug_names;
+ struct spirv_buffer decorations;
+
+ struct spirv_buffer types_const_defs;
+ struct hash_table *types;
+ struct hash_table *consts;
+
+ struct spirv_buffer instructions;
+ SpvId prev_id;
+};
+
+static inline SpvId
+spirv_builder_new_id(struct spirv_builder *b)
+{
+ return ++b->prev_id;
+}
+
+void
+spirv_builder_emit_cap(struct spirv_builder *b, SpvCapability cap);
+
+void
+spirv_builder_emit_source(struct spirv_builder *b, SpvSourceLanguage lang,
+ uint32_t version);
+
+void
+spirv_builder_emit_mem_model(struct spirv_builder *b,
+ SpvAddressingModel addr_model,
+ SpvMemoryModel mem_model);
+
+void
+spirv_builder_emit_name(struct spirv_builder *b, SpvId target,
+ const char *name);
+
+void
+spirv_builder_emit_decoration(struct spirv_builder *b, SpvId target,
+ SpvDecoration decoration);
+
+void
+spirv_builder_emit_location(struct spirv_builder *b, SpvId target,
+ uint32_t location);
+
+void
+spirv_builder_emit_component(struct spirv_builder *b, SpvId target,
+ uint32_t component);
+
+void
+spirv_builder_emit_builtin(struct spirv_builder *b, SpvId target,
+ SpvBuiltIn builtin);
+
+void
+spirv_builder_emit_index(struct spirv_builder *b, SpvId target, int index);
+
+void
+spirv_builder_emit_descriptor_set(struct spirv_builder *b, SpvId target,
+ uint32_t descriptor_set);
+
+void
+spirv_builder_emit_binding(struct spirv_builder *b, SpvId target,
+ uint32_t binding);
+
+void
+spirv_builder_emit_array_stride(struct spirv_builder *b, SpvId target,
+ uint32_t stride);
+
+void
+spirv_builder_emit_member_offset(struct spirv_builder *b, SpvId target,
+ uint32_t member, uint32_t offset);
+
+void
+spirv_builder_emit_entry_point(struct spirv_builder *b,
+ SpvExecutionModel exec_model, SpvId entry_point,
+ const char *name, const SpvId interfaces[],
+ size_t num_interfaces);
+
+void
+spirv_builder_emit_exec_mode(struct spirv_builder *b, SpvId entry_point,
+ SpvExecutionMode exec_mode);
+
+void
+spirv_builder_function(struct spirv_builder *b, SpvId result,
+ SpvId return_type,
+ SpvFunctionControlMask function_control,
+ SpvId function_type);
+
+void
+spirv_builder_function_end(struct spirv_builder *b);
+
+void
+spirv_builder_label(struct spirv_builder *b, SpvId label);
+
+void
+spirv_builder_return(struct spirv_builder *b);
+
+SpvId
+spirv_builder_emit_undef(struct spirv_builder *b, SpvId result_type);
+
+SpvId
+spirv_builder_emit_load(struct spirv_builder *b, SpvId result_type,
+ SpvId pointer);
+
+void
+spirv_builder_emit_store(struct spirv_builder *b, SpvId pointer, SpvId object);
+
+SpvId
+spirv_builder_emit_access_chain(struct spirv_builder *b, SpvId result_type,
+ SpvId base, const SpvId indexes[],
+ size_t num_indexes);
+
+SpvId
+spirv_builder_emit_unop(struct spirv_builder *b, SpvOp op, SpvId result_type,
+ SpvId operand);
+
+SpvId
+spirv_builder_emit_binop(struct spirv_builder *b, SpvOp op, SpvId result_type,
+ SpvId operand0, SpvId operand1);
+
+SpvId
+spirv_builder_emit_triop(struct spirv_builder *b, SpvOp op, SpvId result_type,
+ SpvId operand0, SpvId operand1, SpvId operand2);
+
+SpvId
+spirv_builder_emit_composite_extract(struct spirv_builder *b, SpvId result_type,
+ SpvId composite, const uint32_t indexes[],
+ size_t num_indexes);
+
+SpvId
+spirv_builder_emit_composite_construct(struct spirv_builder *b,
+ SpvId result_type,
+ const SpvId constituents[],
+ size_t num_constituents);
+
+SpvId
+spirv_builder_emit_vector_shuffle(struct spirv_builder *b, SpvId result_type,
+ SpvId vector_1, SpvId vector_2,
+ const uint32_t components[],
+ size_t num_components);
+
+void
+spirv_builder_emit_branch(struct spirv_builder *b, SpvId label);
+
+void
+spirv_builder_emit_selection_merge(struct spirv_builder *b, SpvId merge_block,
+ SpvSelectionControlMask selection_control);
+
+void
+spirv_builder_loop_merge(struct spirv_builder *b, SpvId merge_block,
+ SpvId cont_target, SpvLoopControlMask loop_control);
+
+void
+spirv_builder_emit_branch_conditional(struct spirv_builder *b, SpvId condition,
+ SpvId true_label, SpvId false_label);
+
+SpvId
+spirv_builder_emit_phi(struct spirv_builder *b, SpvId result_type,
+ size_t num_vars, size_t *position);
+
+void
+spirv_builder_set_phi_operand(struct spirv_builder *b, size_t position,
+ size_t index, SpvId variable, SpvId parent);
+
+void
+spirv_builder_emit_kill(struct spirv_builder *b);
+
+
+SpvId
+spirv_builder_emit_image_sample(struct spirv_builder *b,
+ SpvId result_type,
+ SpvId sampled_image,
+ SpvId coordinate,
+ bool proj,
+ SpvId lod,
+ SpvId bias,
+ SpvId dref,
+ SpvId dx,
+ SpvId dy,
+ SpvId offset);
+
+SpvId
+spirv_builder_emit_image(struct spirv_builder *b, SpvId result_type,
+ SpvId sampled_image);
+
+SpvId
+spirv_builder_emit_image_fetch(struct spirv_builder *b,
+ SpvId result_type,
+ SpvId image,
+ SpvId coordinate,
+ SpvId lod);
+
+SpvId
+spirv_builder_emit_image_query_size(struct spirv_builder *b,
+ SpvId result_type,
+ SpvId image,
+ SpvId lod);
+
+SpvId
+spirv_builder_emit_ext_inst(struct spirv_builder *b, SpvId result_type,
+ SpvId set, uint32_t instruction,
+ const SpvId args[], size_t num_args);
+
+SpvId
+spirv_builder_type_void(struct spirv_builder *b);
+
+SpvId
+spirv_builder_type_bool(struct spirv_builder *b);
+
+SpvId
+spirv_builder_type_int(struct spirv_builder *b, unsigned width);
+
+SpvId
+spirv_builder_type_uint(struct spirv_builder *b, unsigned width);
+
+SpvId
+spirv_builder_type_float(struct spirv_builder *b, unsigned width);
+
+SpvId
+spirv_builder_type_image(struct spirv_builder *b, SpvId sampled_type,
+ SpvDim dim, bool depth, bool arrayed, bool ms,
+ unsigned sampled, SpvImageFormat image_format);
+
+SpvId
+spirv_builder_type_sampled_image(struct spirv_builder *b, SpvId image_type);
+
+SpvId
+spirv_builder_type_pointer(struct spirv_builder *b,
+ SpvStorageClass storage_class, SpvId type);
+
+SpvId
+spirv_builder_type_vector(struct spirv_builder *b, SpvId component_type,
+ unsigned component_count);
+
+SpvId
+spirv_builder_type_array(struct spirv_builder *b, SpvId component_type,
+ SpvId length);
+
+SpvId
+spirv_builder_type_struct(struct spirv_builder *b, const SpvId member_types[],
+ size_t num_member_types);
+
+SpvId
+spirv_builder_type_function(struct spirv_builder *b, SpvId return_type,
+ const SpvId parameter_types[],
+ size_t num_parameter_types);
+
+SpvId
+spirv_builder_const_bool(struct spirv_builder *b, bool val);
+
+SpvId
+spirv_builder_const_int(struct spirv_builder *b, int width, int32_t val);
+
+SpvId
+spirv_builder_const_uint(struct spirv_builder *b, int width, uint32_t val);
+
+SpvId
+spirv_builder_const_float(struct spirv_builder *b, int width, float val);
+
+SpvId
+spirv_builder_const_composite(struct spirv_builder *b, SpvId result_type,
+ const SpvId constituents[],
+ size_t num_constituents);
+
+SpvId
+spirv_builder_emit_var(struct spirv_builder *b, SpvId type,
+ SpvStorageClass storage_class);
+
+SpvId
+spirv_builder_import(struct spirv_builder *b, const char *name);
+
+size_t
+spirv_builder_get_num_words(struct spirv_builder *b);
+
+size_t
+spirv_builder_get_words(struct spirv_builder *b, uint32_t *words,
+ size_t num_words);
+
+#endif
diff --git a/lib/mesa/src/gallium/drivers/zink/nir_to_spirv/zink_nir_algebraic.py b/lib/mesa/src/gallium/drivers/zink/nir_to_spirv/zink_nir_algebraic.py
new file mode 100644
index 000000000..af2419cf9
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/nir_to_spirv/zink_nir_algebraic.py
@@ -0,0 +1,48 @@
+#
+# Copyright (C) 2020 Collabora Ltd.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import argparse
+import sys
+
+lower_b2b = [
+ (('b2b32', 'a'), ('b2i32', 'a')),
+ (('b2b1', 'a'), ('i2b1', 'a')),
+]
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-p', '--import-path', required=True)
+ args = parser.parse_args()
+ sys.path.insert(0, args.import_path)
+ run()
+
+
+def run():
+ import nir_algebraic # pylint: disable=import-error
+
+ print('#include "nir_to_spirv/nir_to_spirv.h"')
+
+ print(nir_algebraic.AlgebraicPass("zink_nir_lower_b2b",
+ lower_b2b).render())
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/mesa/src/gallium/drivers/zink/zink_batch.c b/lib/mesa/src/gallium/drivers/zink/zink_batch.c
new file mode 100644
index 000000000..a73128d49
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/zink_batch.c
@@ -0,0 +1,119 @@
+#include "zink_batch.h"
+
+#include "zink_context.h"
+#include "zink_fence.h"
+#include "zink_framebuffer.h"
+#include "zink_query.h"
+#include "zink_render_pass.h"
+#include "zink_resource.h"
+#include "zink_screen.h"
+
+#include "util/u_debug.h"
+#include "util/set.h"
+
+static void
+reset_batch(struct zink_screen *screen, struct zink_batch *batch)
+{
+ batch->descs_left = ZINK_BATCH_DESC_SIZE;
+
+ // cmdbuf hasn't been submitted before
+ if (!batch->fence)
+ return;
+
+ zink_fence_finish(screen, batch->fence, PIPE_TIMEOUT_INFINITE);
+ zink_fence_reference(screen, &batch->fence, NULL);
+
+ zink_render_pass_reference(screen, &batch->rp, NULL);
+ zink_framebuffer_reference(screen, &batch->fb, NULL);
+
+ /* unref all used resources */
+ set_foreach(batch->resources, entry) {
+ struct pipe_resource *pres = (struct pipe_resource *)entry->key;
+ pipe_resource_reference(&pres, NULL);
+ }
+ _mesa_set_clear(batch->resources, NULL);
+
+ /* unref all used sampler-views */
+ set_foreach(batch->sampler_views, entry) {
+ struct pipe_sampler_view *pres = (struct pipe_sampler_view *)entry->key;
+ pipe_sampler_view_reference(&pres, NULL);
+ }
+ _mesa_set_clear(batch->sampler_views, NULL);
+
+ util_dynarray_foreach(&batch->zombie_samplers, VkSampler, samp) {
+ vkDestroySampler(screen->dev, *samp, NULL);
+ }
+ util_dynarray_clear(&batch->zombie_samplers);
+
+ if (vkResetDescriptorPool(screen->dev, batch->descpool, 0) != VK_SUCCESS)
+ fprintf(stderr, "vkResetDescriptorPool failed\n");
+}
+
+void
+zink_start_batch(struct zink_context *ctx, struct zink_batch *batch)
+{
+ reset_batch(zink_screen(ctx->base.screen), batch);
+
+ VkCommandBufferBeginInfo cbbi = {};
+ cbbi.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
+ cbbi.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
+ if (vkBeginCommandBuffer(batch->cmdbuf, &cbbi) != VK_SUCCESS)
+ debug_printf("vkBeginCommandBuffer failed\n");
+
+ if (!ctx->queries_disabled)
+ zink_resume_queries(ctx, batch);
+}
+
+void
+zink_end_batch(struct zink_context *ctx, struct zink_batch *batch)
+{
+ if (!ctx->queries_disabled)
+ zink_suspend_queries(ctx, batch);
+
+ if (vkEndCommandBuffer(batch->cmdbuf) != VK_SUCCESS) {
+ debug_printf("vkEndCommandBuffer failed\n");
+ return;
+ }
+
+ assert(batch->fence == NULL);
+ batch->fence = zink_create_fence(ctx->base.screen);
+ if (!batch->fence)
+ return;
+
+ VkSubmitInfo si = {};
+ si.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
+ si.waitSemaphoreCount = 0;
+ si.pWaitSemaphores = NULL;
+ si.signalSemaphoreCount = 0;
+ si.pSignalSemaphores = NULL;
+ si.pWaitDstStageMask = NULL;
+ si.commandBufferCount = 1;
+ si.pCommandBuffers = &batch->cmdbuf;
+
+ if (vkQueueSubmit(ctx->queue, 1, &si, batch->fence->fence) != VK_SUCCESS) {
+ debug_printf("vkQueueSubmit failed\n");
+ abort();
+ }
+}
+
+void
+zink_batch_reference_resoure(struct zink_batch *batch,
+ struct zink_resource *res)
+{
+ struct set_entry *entry = _mesa_set_search(batch->resources, res);
+ if (!entry) {
+ entry = _mesa_set_add(batch->resources, res);
+ pipe_reference(NULL, &res->base.reference);
+ }
+}
+
+void
+zink_batch_reference_sampler_view(struct zink_batch *batch,
+ struct zink_sampler_view *sv)
+{
+ struct set_entry *entry = _mesa_set_search(batch->sampler_views, sv);
+ if (!entry) {
+ entry = _mesa_set_add(batch->sampler_views, sv);
+ pipe_reference(NULL, &sv->base.reference);
+ }
+}
diff --git a/lib/mesa/src/gallium/drivers/zink/zink_batch.h b/lib/mesa/src/gallium/drivers/zink/zink_batch.h
new file mode 100644
index 000000000..602040ad4
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/zink_batch.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef ZINK_BATCH_H
+#define ZINK_BATCH_H
+
+#include <vulkan/vulkan.h>
+
+#include "util/u_dynarray.h"
+
+struct zink_context;
+struct zink_fence;
+struct zink_framebuffer;
+struct zink_render_pass;
+struct zink_resource;
+struct zink_sampler_view;
+
+#define ZINK_BATCH_DESC_SIZE 1000
+
+struct zink_batch {
+ VkCommandBuffer cmdbuf;
+ VkDescriptorPool descpool;
+ int descs_left;
+ struct zink_fence *fence;
+
+ struct zink_render_pass *rp;
+ struct zink_framebuffer *fb;
+
+ struct set *resources;
+ struct set *sampler_views;
+
+ struct util_dynarray zombie_samplers;
+};
+
+void
+zink_start_batch(struct zink_context *ctx, struct zink_batch *batch);
+
+void
+zink_end_batch(struct zink_context *ctx, struct zink_batch *batch);
+
+void
+zink_batch_reference_resoure(struct zink_batch *batch,
+ struct zink_resource *res);
+
+void
+zink_batch_reference_sampler_view(struct zink_batch *batch,
+ struct zink_sampler_view *sv);
+
+#endif
diff --git a/lib/mesa/src/gallium/drivers/zink/zink_blit.c b/lib/mesa/src/gallium/drivers/zink/zink_blit.c
new file mode 100644
index 000000000..efce46d52
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/zink_blit.c
@@ -0,0 +1,211 @@
+#include "zink_context.h"
+#include "zink_helpers.h"
+#include "zink_resource.h"
+#include "zink_screen.h"
+
+#include "util/u_blitter.h"
+#include "util/format/u_format.h"
+
+static bool
+blit_resolve(struct zink_context *ctx, const struct pipe_blit_info *info)
+{
+ if (util_format_get_mask(info->dst.format) != info->mask ||
+ util_format_get_mask(info->src.format) != info->mask ||
+ info->scissor_enable ||
+ info->alpha_blend)
+ return false;
+
+ struct zink_resource *src = zink_resource(info->src.resource);
+ struct zink_resource *dst = zink_resource(info->dst.resource);
+
+ struct zink_screen *screen = zink_screen(ctx->base.screen);
+ if (src->format != zink_get_format(screen, info->src.format) ||
+ dst->format != zink_get_format(screen, info->dst.format))
+ return false;
+
+ struct zink_batch *batch = zink_batch_no_rp(ctx);
+
+ zink_batch_reference_resoure(batch, src);
+ zink_batch_reference_resoure(batch, dst);
+
+ if (src->layout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL)
+ zink_resource_barrier(batch->cmdbuf, src, src->aspect,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
+
+ if (dst->layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
+ zink_resource_barrier(batch->cmdbuf, dst, dst->aspect,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
+
+ VkImageResolve region = {};
+
+ region.srcSubresource.aspectMask = src->aspect;
+ region.srcSubresource.mipLevel = info->src.level;
+ region.srcSubresource.baseArrayLayer = 0; // no clue
+ region.srcSubresource.layerCount = 1; // no clue
+ region.srcOffset.x = info->src.box.x;
+ region.srcOffset.y = info->src.box.y;
+ region.srcOffset.z = info->src.box.z;
+
+ region.dstSubresource.aspectMask = dst->aspect;
+ region.dstSubresource.mipLevel = info->dst.level;
+ region.dstSubresource.baseArrayLayer = 0; // no clue
+ region.dstSubresource.layerCount = 1; // no clue
+ region.dstOffset.x = info->dst.box.x;
+ region.dstOffset.y = info->dst.box.y;
+ region.dstOffset.z = info->dst.box.z;
+
+ region.extent.width = info->dst.box.width;
+ region.extent.height = info->dst.box.height;
+ region.extent.depth = info->dst.box.depth;
+ vkCmdResolveImage(batch->cmdbuf, src->image, src->layout,
+ dst->image, dst->layout,
+ 1, &region);
+
+ return true;
+}
+
+static bool
+blit_native(struct zink_context *ctx, const struct pipe_blit_info *info)
+{
+ if (util_format_get_mask(info->dst.format) != info->mask ||
+ util_format_get_mask(info->src.format) != info->mask ||
+ info->scissor_enable ||
+ info->alpha_blend)
+ return false;
+
+ if (util_format_is_depth_or_stencil(info->dst.format) &&
+ info->dst.format != info->src.format)
+ return false;
+
+ struct zink_resource *src = zink_resource(info->src.resource);
+ struct zink_resource *dst = zink_resource(info->dst.resource);
+
+ struct zink_screen *screen = zink_screen(ctx->base.screen);
+ if (src->format != zink_get_format(screen, info->src.format) ||
+ dst->format != zink_get_format(screen, info->dst.format))
+ return false;
+
+ struct zink_batch *batch = zink_batch_no_rp(ctx);
+ zink_batch_reference_resoure(batch, src);
+ zink_batch_reference_resoure(batch, dst);
+
+ if (src == dst) {
+ /* The Vulkan 1.1 specification says the following about valid usage
+ * of vkCmdBlitImage:
+ *
+ * "srcImageLayout must be VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR,
+ * VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL or VK_IMAGE_LAYOUT_GENERAL"
+ *
+ * and:
+ *
+ * "dstImageLayout must be VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR,
+ * VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL or VK_IMAGE_LAYOUT_GENERAL"
+ *
+ * Since we cant have the same image in two states at the same time,
+ * we're effectively left with VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR or
+ * VK_IMAGE_LAYOUT_GENERAL. And since this isn't a present-related
+ * operation, VK_IMAGE_LAYOUT_GENERAL seems most appropriate.
+ */
+ if (src->layout != VK_IMAGE_LAYOUT_GENERAL)
+ zink_resource_barrier(batch->cmdbuf, src, src->aspect,
+ VK_IMAGE_LAYOUT_GENERAL);
+ } else {
+ if (src->layout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL)
+ zink_resource_barrier(batch->cmdbuf, src, src->aspect,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
+
+ if (dst->layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
+ zink_resource_barrier(batch->cmdbuf, dst, dst->aspect,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
+ }
+
+ VkImageBlit region = {};
+ region.srcSubresource.aspectMask = src->aspect;
+ region.srcSubresource.mipLevel = info->src.level;
+ region.srcOffsets[0].x = info->src.box.x;
+ region.srcOffsets[0].y = info->src.box.y;
+ region.srcOffsets[1].x = info->src.box.x + info->src.box.width;
+ region.srcOffsets[1].y = info->src.box.y + info->src.box.height;
+
+ if (src->base.array_size > 1) {
+ region.srcOffsets[0].z = 0;
+ region.srcOffsets[1].z = 1;
+ region.srcSubresource.baseArrayLayer = info->src.box.z;
+ region.srcSubresource.layerCount = info->src.box.depth;
+ } else {
+ region.srcOffsets[0].z = info->src.box.z;
+ region.srcOffsets[1].z = info->src.box.z + info->src.box.depth;
+ region.srcSubresource.baseArrayLayer = 0;
+ region.srcSubresource.layerCount = 1;
+ }
+
+ region.dstSubresource.aspectMask = dst->aspect;
+ region.dstSubresource.mipLevel = info->dst.level;
+ region.dstOffsets[0].x = info->dst.box.x;
+ region.dstOffsets[0].y = info->dst.box.y;
+ region.dstOffsets[1].x = info->dst.box.x + info->dst.box.width;
+ region.dstOffsets[1].y = info->dst.box.y + info->dst.box.height;
+
+ if (dst->base.array_size > 1) {
+ region.dstOffsets[0].z = 0;
+ region.dstOffsets[1].z = 1;
+ region.dstSubresource.baseArrayLayer = info->dst.box.z;
+ region.dstSubresource.layerCount = info->dst.box.depth;
+ } else {
+ region.dstOffsets[0].z = info->dst.box.z;
+ region.dstOffsets[1].z = info->dst.box.z + info->dst.box.depth;
+ region.dstSubresource.baseArrayLayer = 0;
+ region.dstSubresource.layerCount = 1;
+ }
+
+ vkCmdBlitImage(batch->cmdbuf, src->image, src->layout,
+ dst->image, dst->layout,
+ 1, &region,
+ zink_filter(info->filter));
+
+ return true;
+}
+
+void
+zink_blit(struct pipe_context *pctx,
+ const struct pipe_blit_info *info)
+{
+ struct zink_context *ctx = zink_context(pctx);
+ if (info->src.resource->nr_samples > 1 &&
+ info->dst.resource->nr_samples <= 1) {
+ if (blit_resolve(ctx, info))
+ return;
+ } else {
+ if (blit_native(ctx, info))
+ return;
+ }
+
+ if (!util_blitter_is_blit_supported(ctx->blitter, info)) {
+ debug_printf("blit unsupported %s -> %s\n",
+ util_format_short_name(info->src.resource->format),
+ util_format_short_name(info->dst.resource->format));
+ return;
+ }
+
+ util_blitter_save_blend(ctx->blitter, ctx->gfx_pipeline_state.blend_state);
+ util_blitter_save_depth_stencil_alpha(ctx->blitter, ctx->gfx_pipeline_state.depth_stencil_alpha_state);
+ util_blitter_save_vertex_elements(ctx->blitter, ctx->element_state);
+ util_blitter_save_stencil_ref(ctx->blitter, &ctx->stencil_ref);
+ util_blitter_save_rasterizer(ctx->blitter, ctx->rast_state);
+ util_blitter_save_fragment_shader(ctx->blitter, ctx->gfx_stages[PIPE_SHADER_FRAGMENT]);
+ util_blitter_save_vertex_shader(ctx->blitter, ctx->gfx_stages[PIPE_SHADER_VERTEX]);
+ util_blitter_save_framebuffer(ctx->blitter, &ctx->fb_state);
+ util_blitter_save_viewport(ctx->blitter, ctx->viewport_states);
+ util_blitter_save_scissor(ctx->blitter, ctx->scissor_states);
+ util_blitter_save_fragment_sampler_states(ctx->blitter,
+ ctx->num_samplers[PIPE_SHADER_FRAGMENT],
+ ctx->sampler_states[PIPE_SHADER_FRAGMENT]);
+ util_blitter_save_fragment_sampler_views(ctx->blitter,
+ ctx->num_image_views[PIPE_SHADER_FRAGMENT],
+ ctx->image_views[PIPE_SHADER_FRAGMENT]);
+ util_blitter_save_fragment_constant_buffer_slot(ctx->blitter, ctx->ubos[PIPE_SHADER_FRAGMENT]);
+ util_blitter_save_vertex_buffer_slot(ctx->blitter, ctx->buffers);
+ util_blitter_save_sample_mask(ctx->blitter, ctx->gfx_pipeline_state.sample_mask);
+
+ util_blitter_blit(ctx->blitter, info);
+}
diff --git a/lib/mesa/src/gallium/drivers/zink/zink_compiler.c b/lib/mesa/src/gallium/drivers/zink/zink_compiler.c
new file mode 100644
index 000000000..73ede848b
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/zink_compiler.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "zink_compiler.h"
+#include "zink_screen.h"
+#include "nir_to_spirv/nir_to_spirv.h"
+
+#include "pipe/p_state.h"
+
+#include "nir.h"
+#include "compiler/nir/nir_builder.h"
+
+#include "nir/tgsi_to_nir.h"
+#include "tgsi/tgsi_dump.h"
+#include "tgsi/tgsi_from_mesa.h"
+
+#include "util/u_memory.h"
+
+static bool
+lower_instr(nir_intrinsic_instr *instr, nir_builder *b)
+{
+ b->cursor = nir_before_instr(&instr->instr);
+
+ if (instr->intrinsic == nir_intrinsic_load_ubo) {
+ nir_ssa_def *old_idx = nir_ssa_for_src(b, instr->src[0], 1);
+ nir_ssa_def *new_idx = nir_iadd(b, old_idx, nir_imm_int(b, 1));
+ nir_instr_rewrite_src(&instr->instr, &instr->src[0],
+ nir_src_for_ssa(new_idx));
+ return true;
+ }
+
+ if (instr->intrinsic == nir_intrinsic_load_uniform) {
+ nir_ssa_def *ubo_idx = nir_imm_int(b, 0);
+ nir_ssa_def *ubo_offset =
+ nir_iadd(b, nir_imm_int(b, nir_intrinsic_base(instr)),
+ nir_ssa_for_src(b, instr->src[0], 1));
+
+ nir_intrinsic_instr *load =
+ nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo);
+ load->num_components = instr->num_components;
+ load->src[0] = nir_src_for_ssa(ubo_idx);
+ load->src[1] = nir_src_for_ssa(ubo_offset);
+ assert(instr->dest.ssa.bit_size >= 8);
+ nir_intrinsic_set_align(load, instr->dest.ssa.bit_size / 8, 0);
+ nir_ssa_dest_init(&load->instr, &load->dest,
+ load->num_components, instr->dest.ssa.bit_size,
+ instr->dest.ssa.name);
+ nir_builder_instr_insert(b, &load->instr);
+ nir_ssa_def_rewrite_uses(&instr->dest.ssa, nir_src_for_ssa(&load->dest.ssa));
+
+ nir_instr_remove(&instr->instr);
+ return true;
+ }
+
+ return false;
+}
+
+static bool
+lower_uniforms_to_ubo(nir_shader *shader)
+{
+ bool progress = false;
+
+ nir_foreach_function(function, shader) {
+ if (function->impl) {
+ nir_builder builder;
+ nir_builder_init(&builder, function->impl);
+ nir_foreach_block(block, function->impl) {
+ nir_foreach_instr_safe(instr, block) {
+ if (instr->type == nir_instr_type_intrinsic)
+ progress |= lower_instr(nir_instr_as_intrinsic(instr),
+ &builder);
+ }
+ }
+
+ nir_metadata_preserve(function->impl, nir_metadata_block_index |
+ nir_metadata_dominance);
+ }
+ }
+
+ if (progress) {
+ assert(shader->num_uniforms > 0);
+ const struct glsl_type *type = glsl_array_type(glsl_vec4_type(),
+ shader->num_uniforms, 0);
+ nir_variable *ubo = nir_variable_create(shader, nir_var_mem_ubo, type,
+ "uniform_0");
+ ubo->data.binding = 0;
+
+ struct glsl_struct_field field = {
+ .type = type,
+ .name = "data",
+ .location = -1,
+ };
+ ubo->interface_type =
+ glsl_interface_type(&field, 1, GLSL_INTERFACE_PACKING_STD430,
+ false, "__ubo0_interface");
+ }
+
+ return progress;
+}
+
+static bool
+lower_discard_if_instr(nir_intrinsic_instr *instr, nir_builder *b)
+{
+ if (instr->intrinsic == nir_intrinsic_discard_if) {
+ b->cursor = nir_before_instr(&instr->instr);
+
+ nir_if *if_stmt = nir_push_if(b, nir_ssa_for_src(b, instr->src[0], 1));
+ nir_intrinsic_instr *discard =
+ nir_intrinsic_instr_create(b->shader, nir_intrinsic_discard);
+ nir_builder_instr_insert(b, &discard->instr);
+ nir_pop_if(b, if_stmt);
+ nir_instr_remove(&instr->instr);
+ return true;
+ }
+ assert(instr->intrinsic != nir_intrinsic_discard ||
+ nir_block_last_instr(instr->instr.block) == &instr->instr);
+
+ return false;
+}
+
+static bool
+lower_discard_if(nir_shader *shader)
+{
+ bool progress = false;
+
+ nir_foreach_function(function, shader) {
+ if (function->impl) {
+ nir_builder builder;
+ nir_builder_init(&builder, function->impl);
+ nir_foreach_block(block, function->impl) {
+ nir_foreach_instr_safe(instr, block) {
+ if (instr->type == nir_instr_type_intrinsic)
+ progress |= lower_discard_if_instr(
+ nir_instr_as_intrinsic(instr),
+ &builder);
+ }
+ }
+
+ nir_metadata_preserve(function->impl, nir_metadata_dominance);
+ }
+ }
+
+ return progress;
+}
+
+static const struct nir_shader_compiler_options nir_options = {
+ .lower_all_io_to_temps = true,
+ .lower_ffma = true,
+ .lower_fdph = true,
+ .lower_flrp32 = true,
+ .lower_fpow = true,
+ .lower_fsat = true,
+};
+
+const void *
+zink_get_compiler_options(struct pipe_screen *screen,
+ enum pipe_shader_ir ir,
+ enum pipe_shader_type shader)
+{
+ assert(ir == PIPE_SHADER_IR_NIR);
+ return &nir_options;
+}
+
+struct nir_shader *
+zink_tgsi_to_nir(struct pipe_screen *screen, const struct tgsi_token *tokens)
+{
+ if (zink_debug & ZINK_DEBUG_TGSI) {
+ fprintf(stderr, "TGSI shader:\n---8<---\n");
+ tgsi_dump_to_file(tokens, 0, stderr);
+ fprintf(stderr, "---8<---\n\n");
+ }
+
+ return tgsi_to_nir(tokens, screen);
+}
+
+static void
+optimize_nir(struct nir_shader *s)
+{
+ bool progress;
+ do {
+ progress = false;
+ NIR_PASS_V(s, nir_lower_vars_to_ssa);
+ NIR_PASS(progress, s, nir_copy_prop);
+ NIR_PASS(progress, s, nir_opt_remove_phis);
+ NIR_PASS(progress, s, nir_opt_dce);
+ NIR_PASS(progress, s, nir_opt_dead_cf);
+ NIR_PASS(progress, s, nir_opt_cse);
+ NIR_PASS(progress, s, nir_opt_peephole_select, 8, true, true);
+ NIR_PASS(progress, s, nir_opt_algebraic);
+ NIR_PASS(progress, s, nir_opt_constant_folding);
+ NIR_PASS(progress, s, nir_opt_undef);
+ NIR_PASS(progress, s, zink_nir_lower_b2b);
+ } while (progress);
+}
+
+struct zink_shader *
+zink_compile_nir(struct zink_screen *screen, struct nir_shader *nir)
+{
+ struct zink_shader *ret = CALLOC_STRUCT(zink_shader);
+
+ NIR_PASS_V(nir, lower_uniforms_to_ubo);
+ NIR_PASS_V(nir, nir_lower_clip_halfz);
+ NIR_PASS_V(nir, nir_lower_regs_to_ssa);
+ optimize_nir(nir);
+ NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_function_temp);
+ NIR_PASS_V(nir, lower_discard_if);
+ NIR_PASS_V(nir, nir_convert_from_ssa, true);
+
+ if (zink_debug & ZINK_DEBUG_NIR) {
+ fprintf(stderr, "NIR shader:\n---8<---\n");
+ nir_print_shader(nir, stderr);
+ fprintf(stderr, "---8<---\n");
+ }
+
+ ret->num_bindings = 0;
+ nir_foreach_variable(var, &nir->uniforms) {
+ if (var->data.mode == nir_var_mem_ubo) {
+ int binding = zink_binding(nir->info.stage,
+ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
+ var->data.binding);
+ ret->bindings[ret->num_bindings].index = var->data.binding;
+ ret->bindings[ret->num_bindings].binding = binding;
+ ret->bindings[ret->num_bindings].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ ret->num_bindings++;
+ } else {
+ assert(var->data.mode == nir_var_uniform);
+ if (glsl_type_is_array(var->type) &&
+ glsl_type_is_sampler(glsl_get_array_element(var->type))) {
+ for (int i = 0; i < glsl_get_length(var->type); ++i) {
+ int binding = zink_binding(nir->info.stage,
+ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
+ var->data.binding + i);
+ ret->bindings[ret->num_bindings].index = var->data.binding + i;
+ ret->bindings[ret->num_bindings].binding = binding;
+ ret->bindings[ret->num_bindings].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+ ret->num_bindings++;
+ }
+ } else if (glsl_type_is_sampler(var->type)) {
+ int binding = zink_binding(nir->info.stage,
+ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
+ var->data.binding);
+ ret->bindings[ret->num_bindings].index = var->data.binding;
+ ret->bindings[ret->num_bindings].binding = binding;
+ ret->bindings[ret->num_bindings].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+ ret->num_bindings++;
+ }
+ }
+ }
+
+ ret->info = nir->info;
+
+ struct spirv_shader *spirv = nir_to_spirv(nir);
+ assert(spirv);
+
+ if (zink_debug & ZINK_DEBUG_SPIRV) {
+ char buf[256];
+ static int i;
+ snprintf(buf, sizeof(buf), "dump%02d.spv", i++);
+ FILE *fp = fopen(buf, "wb");
+ if (fp) {
+ fwrite(spirv->words, sizeof(uint32_t), spirv->num_words, fp);
+ fclose(fp);
+ fprintf(stderr, "wrote '%s'...\n", buf);
+ }
+ }
+
+ VkShaderModuleCreateInfo smci = {};
+ smci.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
+ smci.codeSize = spirv->num_words * sizeof(uint32_t);
+ smci.pCode = spirv->words;
+
+ if (vkCreateShaderModule(screen->dev, &smci, NULL, &ret->shader_module) != VK_SUCCESS)
+ return NULL;
+
+ return ret;
+}
+
+void
+zink_shader_free(struct zink_screen *screen, struct zink_shader *shader)
+{
+ vkDestroyShaderModule(screen->dev, shader->shader_module, NULL);
+ FREE(shader);
+}
diff --git a/lib/mesa/src/gallium/drivers/zink/zink_compiler.h b/lib/mesa/src/gallium/drivers/zink/zink_compiler.h
new file mode 100644
index 000000000..47e5b4b7c
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/zink_compiler.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef ZINK_COMPILER_H
+#define ZINK_COMPILER_H
+
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+
+#include "compiler/shader_info.h"
+
+#include <vulkan/vulkan.h>
+
+struct pipe_screen;
+struct zink_screen;
+
+struct nir_shader_compiler_options;
+struct nir_shader;
+
+struct tgsi_token;
+
+const void *
+zink_get_compiler_options(struct pipe_screen *screen,
+ enum pipe_shader_ir ir,
+ enum pipe_shader_type shader);
+
+struct nir_shader *
+zink_tgsi_to_nir(struct pipe_screen *screen, const struct tgsi_token *tokens);
+
+struct zink_shader {
+ VkShaderModule shader_module;
+
+ shader_info info;
+
+ struct {
+ int index;
+ int binding;
+ VkDescriptorType type;
+ } bindings[PIPE_MAX_CONSTANT_BUFFERS + PIPE_MAX_SHADER_SAMPLER_VIEWS];
+ size_t num_bindings;
+};
+
+struct zink_shader *
+zink_compile_nir(struct zink_screen *screen, struct nir_shader *nir);
+
+void
+zink_shader_free(struct zink_screen *screen, struct zink_shader *shader);
+
+#endif
diff --git a/lib/mesa/src/gallium/drivers/zink/zink_context.c b/lib/mesa/src/gallium/drivers/zink/zink_context.c
new file mode 100644
index 000000000..323f87e27
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/zink_context.c
@@ -0,0 +1,1172 @@
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "zink_context.h"
+
+#include "zink_batch.h"
+#include "zink_compiler.h"
+#include "zink_fence.h"
+#include "zink_framebuffer.h"
+#include "zink_helpers.h"
+#include "zink_pipeline.h"
+#include "zink_render_pass.h"
+#include "zink_resource.h"
+#include "zink_screen.h"
+#include "zink_state.h"
+#include "zink_surface.h"
+
+#include "indices/u_primconvert.h"
+#include "util/u_blitter.h"
+#include "util/u_debug.h"
+#include "util/format/u_format.h"
+#include "util/u_framebuffer.h"
+#include "util/u_helpers.h"
+#include "util/u_inlines.h"
+
+#include "nir.h"
+
+#include "util/u_memory.h"
+#include "util/u_upload_mgr.h"
+
+static void
+zink_context_destroy(struct pipe_context *pctx)
+{
+ struct zink_context *ctx = zink_context(pctx);
+ struct zink_screen *screen = zink_screen(pctx->screen);
+
+ if (vkQueueWaitIdle(ctx->queue) != VK_SUCCESS)
+ debug_printf("vkQueueWaitIdle failed\n");
+
+ for (int i = 0; i < ARRAY_SIZE(ctx->batches); ++i)
+ vkFreeCommandBuffers(screen->dev, ctx->cmdpool, 1, &ctx->batches[i].cmdbuf);
+ vkDestroyCommandPool(screen->dev, ctx->cmdpool, NULL);
+
+ util_primconvert_destroy(ctx->primconvert);
+ u_upload_destroy(pctx->stream_uploader);
+ slab_destroy_child(&ctx->transfer_pool);
+ util_blitter_destroy(ctx->blitter);
+ FREE(ctx);
+}
+
+static VkSamplerMipmapMode
+sampler_mipmap_mode(enum pipe_tex_mipfilter filter)
+{
+ switch (filter) {
+ case PIPE_TEX_MIPFILTER_NEAREST: return VK_SAMPLER_MIPMAP_MODE_NEAREST;
+ case PIPE_TEX_MIPFILTER_LINEAR: return VK_SAMPLER_MIPMAP_MODE_LINEAR;
+ case PIPE_TEX_MIPFILTER_NONE:
+ unreachable("PIPE_TEX_MIPFILTER_NONE should be dealt with earlier");
+ }
+ unreachable("unexpected filter");
+}
+
+static VkSamplerAddressMode
+sampler_address_mode(enum pipe_tex_wrap filter)
+{
+ switch (filter) {
+ case PIPE_TEX_WRAP_REPEAT: return VK_SAMPLER_ADDRESS_MODE_REPEAT;
+ case PIPE_TEX_WRAP_CLAMP: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
+ case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
+ case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
+ case PIPE_TEX_WRAP_MIRROR_REPEAT: return VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT;
+ case PIPE_TEX_WRAP_MIRROR_CLAMP: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
+ case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
+ case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
+ }
+ unreachable("unexpected wrap");
+}
+
+static VkCompareOp
+compare_op(enum pipe_compare_func op)
+{
+ switch (op) {
+ case PIPE_FUNC_NEVER: return VK_COMPARE_OP_NEVER;
+ case PIPE_FUNC_LESS: return VK_COMPARE_OP_LESS;
+ case PIPE_FUNC_EQUAL: return VK_COMPARE_OP_EQUAL;
+ case PIPE_FUNC_LEQUAL: return VK_COMPARE_OP_LESS_OR_EQUAL;
+ case PIPE_FUNC_GREATER: return VK_COMPARE_OP_GREATER;
+ case PIPE_FUNC_NOTEQUAL: return VK_COMPARE_OP_NOT_EQUAL;
+ case PIPE_FUNC_GEQUAL: return VK_COMPARE_OP_GREATER_OR_EQUAL;
+ case PIPE_FUNC_ALWAYS: return VK_COMPARE_OP_ALWAYS;
+ }
+ unreachable("unexpected compare");
+}
+
+static void *
+zink_create_sampler_state(struct pipe_context *pctx,
+ const struct pipe_sampler_state *state)
+{
+ struct zink_screen *screen = zink_screen(pctx->screen);
+
+ VkSamplerCreateInfo sci = {};
+ sci.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
+ sci.magFilter = zink_filter(state->mag_img_filter);
+ sci.minFilter = zink_filter(state->min_img_filter);
+
+ if (state->min_mip_filter != PIPE_TEX_MIPFILTER_NONE) {
+ sci.mipmapMode = sampler_mipmap_mode(state->min_mip_filter);
+ sci.minLod = state->min_lod;
+ sci.maxLod = state->max_lod;
+ } else {
+ sci.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST;
+ sci.minLod = 0;
+ sci.maxLod = 0;
+ }
+
+ sci.addressModeU = sampler_address_mode(state->wrap_s);
+ sci.addressModeV = sampler_address_mode(state->wrap_t);
+ sci.addressModeW = sampler_address_mode(state->wrap_r);
+ sci.mipLodBias = state->lod_bias;
+
+ if (state->compare_mode == PIPE_TEX_COMPARE_NONE)
+ sci.compareOp = VK_COMPARE_OP_NEVER;
+ else {
+ sci.compareOp = compare_op(state->compare_func);
+ sci.compareEnable = VK_TRUE;
+ }
+
+ sci.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK; // TODO
+ sci.unnormalizedCoordinates = !state->normalized_coords;
+
+ if (state->max_anisotropy > 1) {
+ sci.maxAnisotropy = state->max_anisotropy;
+ sci.anisotropyEnable = VK_TRUE;
+ }
+
+ VkSampler *sampler = CALLOC(1, sizeof(VkSampler));
+ if (!sampler)
+ return NULL;
+
+ if (vkCreateSampler(screen->dev, &sci, NULL, sampler) != VK_SUCCESS) {
+ FREE(sampler);
+ return NULL;
+ }
+
+ return sampler;
+}
+
+static void
+zink_bind_sampler_states(struct pipe_context *pctx,
+ enum pipe_shader_type shader,
+ unsigned start_slot,
+ unsigned num_samplers,
+ void **samplers)
+{
+ struct zink_context *ctx = zink_context(pctx);
+ for (unsigned i = 0; i < num_samplers; ++i) {
+ VkSampler *sampler = samplers[i];
+ ctx->sampler_states[shader][start_slot + i] = sampler;
+ ctx->samplers[shader][start_slot + i] = sampler ? *sampler : VK_NULL_HANDLE;
+ }
+ ctx->num_samplers[shader] = start_slot + num_samplers;
+}
+
+static void
+zink_delete_sampler_state(struct pipe_context *pctx,
+ void *sampler_state)
+{
+ struct zink_batch *batch = zink_curr_batch(zink_context(pctx));
+ util_dynarray_append(&batch->zombie_samplers, VkSampler,
+ *(VkSampler *)sampler_state);
+ FREE(sampler_state);
+}
+
+
+static VkImageViewType
+image_view_type(enum pipe_texture_target target)
+{
+ switch (target) {
+ case PIPE_TEXTURE_1D: return VK_IMAGE_VIEW_TYPE_1D;
+ case PIPE_TEXTURE_1D_ARRAY: return VK_IMAGE_VIEW_TYPE_1D_ARRAY;
+ case PIPE_TEXTURE_2D: return VK_IMAGE_VIEW_TYPE_2D;
+ case PIPE_TEXTURE_2D_ARRAY: return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
+ case PIPE_TEXTURE_CUBE: return VK_IMAGE_VIEW_TYPE_CUBE;
+ case PIPE_TEXTURE_CUBE_ARRAY: return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
+ case PIPE_TEXTURE_3D: return VK_IMAGE_VIEW_TYPE_3D;
+ case PIPE_TEXTURE_RECT: return VK_IMAGE_VIEW_TYPE_2D;
+ default:
+ unreachable("unexpected target");
+ }
+}
+
+static VkComponentSwizzle
+component_mapping(enum pipe_swizzle swizzle)
+{
+ switch (swizzle) {
+ case PIPE_SWIZZLE_X: return VK_COMPONENT_SWIZZLE_R;
+ case PIPE_SWIZZLE_Y: return VK_COMPONENT_SWIZZLE_G;
+ case PIPE_SWIZZLE_Z: return VK_COMPONENT_SWIZZLE_B;
+ case PIPE_SWIZZLE_W: return VK_COMPONENT_SWIZZLE_A;
+ case PIPE_SWIZZLE_0: return VK_COMPONENT_SWIZZLE_ZERO;
+ case PIPE_SWIZZLE_1: return VK_COMPONENT_SWIZZLE_ONE;
+ case PIPE_SWIZZLE_NONE: return VK_COMPONENT_SWIZZLE_IDENTITY; // ???
+ default:
+ unreachable("unexpected swizzle");
+ }
+}
+
+static VkImageAspectFlags
+sampler_aspect_from_format(enum pipe_format fmt)
+{
+ if (util_format_is_depth_or_stencil(fmt)) {
+ const struct util_format_description *desc = util_format_description(fmt);
+ if (util_format_has_depth(desc))
+ return VK_IMAGE_ASPECT_DEPTH_BIT;
+ assert(util_format_has_stencil(desc));
+ return VK_IMAGE_ASPECT_STENCIL_BIT;
+ } else
+ return VK_IMAGE_ASPECT_COLOR_BIT;
+}
+
+static struct pipe_sampler_view *
+zink_create_sampler_view(struct pipe_context *pctx, struct pipe_resource *pres,
+ const struct pipe_sampler_view *state)
+{
+ struct zink_screen *screen = zink_screen(pctx->screen);
+ struct zink_resource *res = zink_resource(pres);
+ struct zink_sampler_view *sampler_view = CALLOC_STRUCT(zink_sampler_view);
+
+ sampler_view->base = *state;
+ sampler_view->base.texture = NULL;
+ pipe_resource_reference(&sampler_view->base.texture, pres);
+ sampler_view->base.reference.count = 1;
+ sampler_view->base.context = pctx;
+
+ VkImageViewCreateInfo ivci = {};
+ ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
+ ivci.image = res->image;
+ ivci.viewType = image_view_type(state->target);
+ ivci.format = zink_get_format(screen, state->format);
+ ivci.components.r = component_mapping(state->swizzle_r);
+ ivci.components.g = component_mapping(state->swizzle_g);
+ ivci.components.b = component_mapping(state->swizzle_b);
+ ivci.components.a = component_mapping(state->swizzle_a);
+
+ ivci.subresourceRange.aspectMask = sampler_aspect_from_format(state->format);
+ ivci.subresourceRange.baseMipLevel = state->u.tex.first_level;
+ ivci.subresourceRange.baseArrayLayer = state->u.tex.first_layer;
+ ivci.subresourceRange.levelCount = state->u.tex.last_level - state->u.tex.first_level + 1;
+ ivci.subresourceRange.layerCount = state->u.tex.last_layer - state->u.tex.first_layer + 1;
+
+ VkResult err = vkCreateImageView(screen->dev, &ivci, NULL, &sampler_view->image_view);
+ if (err != VK_SUCCESS) {
+ FREE(sampler_view);
+ return NULL;
+ }
+
+ return &sampler_view->base;
+}
+
+static void
+zink_sampler_view_destroy(struct pipe_context *pctx,
+ struct pipe_sampler_view *pview)
+{
+ struct zink_sampler_view *view = zink_sampler_view(pview);
+ vkDestroyImageView(zink_screen(pctx->screen)->dev, view->image_view, NULL);
+ FREE(view);
+}
+
+static void *
+zink_create_vs_state(struct pipe_context *pctx,
+ const struct pipe_shader_state *shader)
+{
+ struct nir_shader *nir;
+ if (shader->type != PIPE_SHADER_IR_NIR)
+ nir = zink_tgsi_to_nir(pctx->screen, shader->tokens);
+ else
+ nir = (struct nir_shader *)shader->ir.nir;
+
+ return zink_compile_nir(zink_screen(pctx->screen), nir);
+}
+
+static void
+bind_stage(struct zink_context *ctx, enum pipe_shader_type stage,
+ struct zink_shader *shader)
+{
+ assert(stage < PIPE_SHADER_COMPUTE);
+ ctx->gfx_stages[stage] = shader;
+ ctx->dirty_program = true;
+}
+
+static void
+zink_bind_vs_state(struct pipe_context *pctx,
+ void *cso)
+{
+ bind_stage(zink_context(pctx), PIPE_SHADER_VERTEX, cso);
+}
+
+static void
+zink_delete_vs_state(struct pipe_context *pctx,
+ void *cso)
+{
+ zink_shader_free(zink_screen(pctx->screen), cso);
+}
+
+static void *
+zink_create_fs_state(struct pipe_context *pctx,
+ const struct pipe_shader_state *shader)
+{
+ struct nir_shader *nir;
+ if (shader->type != PIPE_SHADER_IR_NIR)
+ nir = zink_tgsi_to_nir(pctx->screen, shader->tokens);
+ else
+ nir = (struct nir_shader *)shader->ir.nir;
+
+ return zink_compile_nir(zink_screen(pctx->screen), nir);
+}
+
+static void
+zink_bind_fs_state(struct pipe_context *pctx,
+ void *cso)
+{
+ bind_stage(zink_context(pctx), PIPE_SHADER_FRAGMENT, cso);
+}
+
+static void
+zink_delete_fs_state(struct pipe_context *pctx,
+ void *cso)
+{
+ zink_shader_free(zink_screen(pctx->screen), cso);
+}
+
+static void
+zink_set_polygon_stipple(struct pipe_context *pctx,
+ const struct pipe_poly_stipple *ps)
+{
+}
+
+static void
+zink_set_vertex_buffers(struct pipe_context *pctx,
+ unsigned start_slot,
+ unsigned num_buffers,
+ const struct pipe_vertex_buffer *buffers)
+{
+ struct zink_context *ctx = zink_context(pctx);
+
+ if (buffers) {
+ for (int i = 0; i < num_buffers; ++i) {
+ const struct pipe_vertex_buffer *vb = buffers + i;
+ ctx->gfx_pipeline_state.bindings[start_slot + i].stride = vb->stride;
+ }
+ }
+
+ util_set_vertex_buffers_mask(ctx->buffers, &ctx->buffers_enabled_mask,
+ buffers, start_slot, num_buffers);
+}
+
+static void
+zink_set_viewport_states(struct pipe_context *pctx,
+ unsigned start_slot,
+ unsigned num_viewports,
+ const struct pipe_viewport_state *state)
+{
+ struct zink_context *ctx = zink_context(pctx);
+
+ for (unsigned i = 0; i < num_viewports; ++i) {
+ VkViewport viewport = {
+ state[i].translate[0] - state[i].scale[0],
+ state[i].translate[1] - state[i].scale[1],
+ state[i].scale[0] * 2,
+ state[i].scale[1] * 2,
+ state[i].translate[2] - state[i].scale[2],
+ state[i].translate[2] + state[i].scale[2]
+ };
+ ctx->viewport_states[start_slot + i] = state[i];
+ ctx->viewports[start_slot + i] = viewport;
+ }
+ ctx->num_viewports = start_slot + num_viewports;
+}
+
+static void
+zink_set_scissor_states(struct pipe_context *pctx,
+ unsigned start_slot, unsigned num_scissors,
+ const struct pipe_scissor_state *states)
+{
+ struct zink_context *ctx = zink_context(pctx);
+
+ for (unsigned i = 0; i < num_scissors; i++) {
+ VkRect2D scissor;
+
+ scissor.offset.x = states[i].minx;
+ scissor.offset.y = states[i].miny;
+ scissor.extent.width = states[i].maxx - states[i].minx;
+ scissor.extent.height = states[i].maxy - states[i].miny;
+ ctx->scissor_states[start_slot + i] = states[i];
+ ctx->scissors[start_slot + i] = scissor;
+ }
+}
+
+static void
+zink_set_constant_buffer(struct pipe_context *pctx,
+ enum pipe_shader_type shader, uint index,
+ const struct pipe_constant_buffer *cb)
+{
+ struct zink_context *ctx = zink_context(pctx);
+
+ if (cb) {
+ struct pipe_resource *buffer = cb->buffer;
+ unsigned offset = cb->buffer_offset;
+ if (cb->user_buffer) {
+ struct zink_screen *screen = zink_screen(pctx->screen);
+ u_upload_data(ctx->base.const_uploader, 0, cb->buffer_size,
+ screen->props.limits.minUniformBufferOffsetAlignment,
+ cb->user_buffer, &offset, &buffer);
+ }
+
+ pipe_resource_reference(&ctx->ubos[shader][index].buffer, buffer);
+ ctx->ubos[shader][index].buffer_offset = offset;
+ ctx->ubos[shader][index].buffer_size = cb->buffer_size;
+ ctx->ubos[shader][index].user_buffer = NULL;
+
+ if (cb->user_buffer)
+ pipe_resource_reference(&buffer, NULL);
+ } else {
+ pipe_resource_reference(&ctx->ubos[shader][index].buffer, NULL);
+ ctx->ubos[shader][index].buffer_offset = 0;
+ ctx->ubos[shader][index].buffer_size = 0;
+ ctx->ubos[shader][index].user_buffer = NULL;
+ }
+}
+
+static void
+zink_set_sampler_views(struct pipe_context *pctx,
+ enum pipe_shader_type shader_type,
+ unsigned start_slot,
+ unsigned num_views,
+ struct pipe_sampler_view **views)
+{
+ struct zink_context *ctx = zink_context(pctx);
+ assert(views);
+ for (unsigned i = 0; i < num_views; ++i) {
+ pipe_sampler_view_reference(
+ &ctx->image_views[shader_type][start_slot + i],
+ views[i]);
+ }
+ ctx->num_image_views[shader_type] = start_slot + num_views;
+}
+
+static void
+zink_set_stencil_ref(struct pipe_context *pctx,
+ const struct pipe_stencil_ref *ref)
+{
+ struct zink_context *ctx = zink_context(pctx);
+ ctx->stencil_ref = *ref;
+}
+
+static void
+zink_set_clip_state(struct pipe_context *pctx,
+ const struct pipe_clip_state *pcs)
+{
+}
+
+static struct zink_render_pass *
+get_render_pass(struct zink_context *ctx)
+{
+ struct zink_screen *screen = zink_screen(ctx->base.screen);
+ const struct pipe_framebuffer_state *fb = &ctx->fb_state;
+ struct zink_render_pass_state state = { 0 };
+
+ for (int i = 0; i < fb->nr_cbufs; i++) {
+ struct pipe_resource *res = fb->cbufs[i]->texture;
+ state.rts[i].format = zink_get_format(screen, fb->cbufs[i]->format);
+ state.rts[i].samples = res->nr_samples > 0 ? res->nr_samples :
+ VK_SAMPLE_COUNT_1_BIT;
+ }
+ state.num_cbufs = fb->nr_cbufs;
+
+ if (fb->zsbuf) {
+ struct zink_resource *zsbuf = zink_resource(fb->zsbuf->texture);
+ state.rts[fb->nr_cbufs].format = zsbuf->format;
+ state.rts[fb->nr_cbufs].samples = zsbuf->base.nr_samples > 0 ? zsbuf->base.nr_samples : VK_SAMPLE_COUNT_1_BIT;
+ }
+ state.have_zsbuf = fb->zsbuf != NULL;
+
+ struct hash_entry *entry = _mesa_hash_table_search(ctx->render_pass_cache,
+ &state);
+ if (!entry) {
+ struct zink_render_pass *rp;
+ rp = zink_create_render_pass(screen, &state);
+ entry = _mesa_hash_table_insert(ctx->render_pass_cache, &state, rp);
+ if (!entry)
+ return NULL;
+ }
+
+ return entry->data;
+}
+
+static struct zink_framebuffer *
+create_framebuffer(struct zink_context *ctx)
+{
+ struct zink_screen *screen = zink_screen(ctx->base.screen);
+
+ struct zink_framebuffer_state state = {};
+ state.rp = get_render_pass(ctx);
+ for (int i = 0; i < ctx->fb_state.nr_cbufs; i++) {
+ struct pipe_surface *psurf = ctx->fb_state.cbufs[i];
+ state.attachments[i] = zink_surface(psurf);
+ }
+
+ state.num_attachments = ctx->fb_state.nr_cbufs;
+ if (ctx->fb_state.zsbuf) {
+ struct pipe_surface *psurf = ctx->fb_state.zsbuf;
+ state.attachments[state.num_attachments++] = zink_surface(psurf);
+ }
+
+ state.width = ctx->fb_state.width;
+ state.height = ctx->fb_state.height;
+ state.layers = MAX2(ctx->fb_state.layers, 1);
+
+ return zink_create_framebuffer(screen, &state);
+}
+
+void
+zink_begin_render_pass(struct zink_context *ctx, struct zink_batch *batch)
+{
+ struct zink_screen *screen = zink_screen(ctx->base.screen);
+ assert(batch == zink_curr_batch(ctx));
+ assert(ctx->gfx_pipeline_state.render_pass);
+
+ struct pipe_framebuffer_state *fb_state = &ctx->fb_state;
+
+ VkRenderPassBeginInfo rpbi = {};
+ rpbi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
+ rpbi.renderPass = ctx->gfx_pipeline_state.render_pass->render_pass;
+ rpbi.renderArea.offset.x = 0;
+ rpbi.renderArea.offset.y = 0;
+ rpbi.renderArea.extent.width = fb_state->width;
+ rpbi.renderArea.extent.height = fb_state->height;
+ rpbi.clearValueCount = 0;
+ rpbi.pClearValues = NULL;
+ rpbi.framebuffer = ctx->framebuffer->fb;
+
+ assert(ctx->gfx_pipeline_state.render_pass && ctx->framebuffer);
+ assert(!batch->rp || batch->rp == ctx->gfx_pipeline_state.render_pass);
+ assert(!batch->fb || batch->fb == ctx->framebuffer);
+
+ for (int i = 0; i < fb_state->nr_cbufs; i++) {
+ struct zink_resource *res = zink_resource(fb_state->cbufs[i]->texture);
+ if (res->layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)
+ zink_resource_barrier(batch->cmdbuf, res, res->aspect,
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+ }
+
+ if (fb_state->zsbuf) {
+ struct zink_resource *res = zink_resource(fb_state->zsbuf->texture);
+ if (res->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
+ zink_resource_barrier(batch->cmdbuf, res, res->aspect,
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
+ }
+
+ zink_render_pass_reference(screen, &batch->rp, ctx->gfx_pipeline_state.render_pass);
+ zink_framebuffer_reference(screen, &batch->fb, ctx->framebuffer);
+
+ vkCmdBeginRenderPass(batch->cmdbuf, &rpbi, VK_SUBPASS_CONTENTS_INLINE);
+}
+
+static void
+flush_batch(struct zink_context *ctx)
+{
+ struct zink_batch *batch = zink_curr_batch(ctx);
+ if (batch->rp)
+ vkCmdEndRenderPass(batch->cmdbuf);
+
+ zink_end_batch(ctx, batch);
+
+ ctx->curr_batch++;
+ if (ctx->curr_batch == ARRAY_SIZE(ctx->batches))
+ ctx->curr_batch = 0;
+
+ zink_start_batch(ctx, zink_curr_batch(ctx));
+}
+
+struct zink_batch *
+zink_batch_rp(struct zink_context *ctx)
+{
+ struct zink_batch *batch = zink_curr_batch(ctx);
+ if (!batch->rp) {
+ zink_begin_render_pass(ctx, batch);
+ assert(batch->rp);
+ }
+ return batch;
+}
+
+struct zink_batch *
+zink_batch_no_rp(struct zink_context *ctx)
+{
+ struct zink_batch *batch = zink_curr_batch(ctx);
+ if (batch->rp) {
+ /* flush batch and get a new one */
+ flush_batch(ctx);
+ batch = zink_curr_batch(ctx);
+ assert(!batch->rp);
+ }
+ return batch;
+}
+
+static void
+zink_set_framebuffer_state(struct pipe_context *pctx,
+ const struct pipe_framebuffer_state *state)
+{
+ struct zink_context *ctx = zink_context(pctx);
+ struct zink_screen *screen = zink_screen(pctx->screen);
+
+ VkSampleCountFlagBits rast_samples = VK_SAMPLE_COUNT_1_BIT;
+ for (int i = 0; i < state->nr_cbufs; i++)
+ rast_samples = MAX2(rast_samples, state->cbufs[i]->texture->nr_samples);
+ if (state->zsbuf && state->zsbuf->texture->nr_samples)
+ rast_samples = MAX2(rast_samples, state->zsbuf->texture->nr_samples);
+
+ util_copy_framebuffer_state(&ctx->fb_state, state);
+
+ struct zink_framebuffer *fb = ctx->framebuffer;
+ /* explicitly unref previous fb to ensure it gets destroyed */
+ if (fb)
+ zink_framebuffer_reference(screen, &fb, NULL);
+ fb = create_framebuffer(ctx);
+ zink_framebuffer_reference(screen, &ctx->framebuffer, fb);
+ zink_render_pass_reference(screen, &ctx->gfx_pipeline_state.render_pass, fb->rp);
+
+ ctx->gfx_pipeline_state.rast_samples = rast_samples;
+ ctx->gfx_pipeline_state.num_attachments = state->nr_cbufs;
+
+ struct zink_batch *batch = zink_batch_no_rp(ctx);
+
+ for (int i = 0; i < state->nr_cbufs; i++) {
+ struct zink_resource *res = zink_resource(state->cbufs[i]->texture);
+ if (res->layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)
+ zink_resource_barrier(batch->cmdbuf, res, res->aspect,
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+ }
+
+ if (state->zsbuf) {
+ struct zink_resource *res = zink_resource(state->zsbuf->texture);
+ if (res->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
+ zink_resource_barrier(batch->cmdbuf, res, res->aspect,
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
+ }
+}
+
+static void
+zink_set_blend_color(struct pipe_context *pctx,
+ const struct pipe_blend_color *color)
+{
+ struct zink_context *ctx = zink_context(pctx);
+ memcpy(ctx->blend_constants, color->color, sizeof(float) * 4);
+}
+
+static void
+zink_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask)
+{
+ struct zink_context *ctx = zink_context(pctx);
+ ctx->gfx_pipeline_state.sample_mask = sample_mask;
+}
+
+static VkAccessFlags
+access_src_flags(VkImageLayout layout)
+{
+ switch (layout) {
+ case VK_IMAGE_LAYOUT_UNDEFINED:
+ case VK_IMAGE_LAYOUT_GENERAL:
+ return 0;
+
+ case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
+ return VK_ACCESS_COLOR_ATTACHMENT_READ_BIT;
+ case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
+ return VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
+
+ case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
+ return VK_ACCESS_SHADER_READ_BIT;
+
+ case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
+ return VK_ACCESS_TRANSFER_READ_BIT;
+
+ case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
+ return VK_ACCESS_TRANSFER_WRITE_BIT;
+
+ case VK_IMAGE_LAYOUT_PREINITIALIZED:
+ return VK_ACCESS_HOST_WRITE_BIT;
+
+ default:
+ unreachable("unexpected layout");
+ }
+}
+
+static VkAccessFlags
+access_dst_flags(VkImageLayout layout)
+{
+ switch (layout) {
+ case VK_IMAGE_LAYOUT_UNDEFINED:
+ case VK_IMAGE_LAYOUT_GENERAL:
+ return 0;
+
+ case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
+ return VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
+ return VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
+
+ case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
+ return VK_ACCESS_TRANSFER_READ_BIT;
+
+ case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
+ return VK_ACCESS_TRANSFER_WRITE_BIT;
+
+ default:
+ unreachable("unexpected layout");
+ }
+}
+
+static VkPipelineStageFlags
+pipeline_dst_stage(VkImageLayout layout)
+{
+ switch (layout) {
+ case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
+ return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
+ case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
+ return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
+
+ case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
+ return VK_PIPELINE_STAGE_TRANSFER_BIT;
+ case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
+ return VK_PIPELINE_STAGE_TRANSFER_BIT;
+
+ default:
+ return VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
+ }
+}
+
+static VkPipelineStageFlags
+pipeline_src_stage(VkImageLayout layout)
+{
+ switch (layout) {
+ case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
+ return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
+ case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
+ return VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT;
+
+ case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
+ return VK_PIPELINE_STAGE_TRANSFER_BIT;
+ case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
+ return VK_PIPELINE_STAGE_TRANSFER_BIT;
+
+ default:
+ return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
+ }
+}
+
+
+void
+zink_resource_barrier(VkCommandBuffer cmdbuf, struct zink_resource *res,
+ VkImageAspectFlags aspect, VkImageLayout new_layout)
+{
+ VkImageSubresourceRange isr = {
+ aspect,
+ 0, VK_REMAINING_MIP_LEVELS,
+ 0, VK_REMAINING_ARRAY_LAYERS
+ };
+
+ VkImageMemoryBarrier imb = {
+ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
+ NULL,
+ access_src_flags(res->layout),
+ access_dst_flags(new_layout),
+ res->layout,
+ new_layout,
+ VK_QUEUE_FAMILY_IGNORED,
+ VK_QUEUE_FAMILY_IGNORED,
+ res->image,
+ isr
+ };
+ vkCmdPipelineBarrier(
+ cmdbuf,
+ pipeline_src_stage(res->layout),
+ pipeline_dst_stage(new_layout),
+ 0,
+ 0, NULL,
+ 0, NULL,
+ 1, &imb
+ );
+
+ res->layout = new_layout;
+}
+
+static void
+zink_clear(struct pipe_context *pctx,
+ unsigned buffers,
+ const struct pipe_scissor_state *scissor_state,
+ const union pipe_color_union *pcolor,
+ double depth, unsigned stencil)
+{
+ struct zink_context *ctx = zink_context(pctx);
+ struct pipe_framebuffer_state *fb = &ctx->fb_state;
+
+ /* FIXME: this is very inefficient; if no renderpass has been started yet,
+ * we should record the clear if it's full-screen, and apply it as we
+ * start the render-pass. Otherwise we can do a partial out-of-renderpass
+ * clear.
+ */
+ struct zink_batch *batch = zink_batch_rp(ctx);
+
+ VkClearAttachment attachments[1 + PIPE_MAX_COLOR_BUFS];
+ int num_attachments = 0;
+
+ if (buffers & PIPE_CLEAR_COLOR) {
+ VkClearColorValue color;
+ color.float32[0] = pcolor->f[0];
+ color.float32[1] = pcolor->f[1];
+ color.float32[2] = pcolor->f[2];
+ color.float32[3] = pcolor->f[3];
+
+ for (unsigned i = 0; i < fb->nr_cbufs; i++) {
+ if (!(buffers & (PIPE_CLEAR_COLOR0 << i)) || !fb->cbufs[i])
+ continue;
+
+ attachments[num_attachments].aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ attachments[num_attachments].colorAttachment = i;
+ attachments[num_attachments].clearValue.color = color;
+ ++num_attachments;
+ }
+ }
+
+ if (buffers & PIPE_CLEAR_DEPTHSTENCIL && fb->zsbuf) {
+ VkImageAspectFlags aspect = 0;
+ if (buffers & PIPE_CLEAR_DEPTH)
+ aspect |= VK_IMAGE_ASPECT_DEPTH_BIT;
+ if (buffers & PIPE_CLEAR_STENCIL)
+ aspect |= VK_IMAGE_ASPECT_STENCIL_BIT;
+
+ attachments[num_attachments].aspectMask = aspect;
+ attachments[num_attachments].clearValue.depthStencil.depth = depth;
+ attachments[num_attachments].clearValue.depthStencil.stencil = stencil;
+ ++num_attachments;
+ }
+
+ VkClearRect cr;
+ cr.rect.offset.x = 0;
+ cr.rect.offset.y = 0;
+ cr.rect.extent.width = fb->width;
+ cr.rect.extent.height = fb->height;
+ cr.baseArrayLayer = 0;
+ cr.layerCount = util_framebuffer_get_num_layers(fb);
+ vkCmdClearAttachments(batch->cmdbuf, num_attachments, attachments, 1, &cr);
+}
+
+VkShaderStageFlagBits
+zink_shader_stage(enum pipe_shader_type type)
+{
+ VkShaderStageFlagBits stages[] = {
+ [PIPE_SHADER_VERTEX] = VK_SHADER_STAGE_VERTEX_BIT,
+ [PIPE_SHADER_FRAGMENT] = VK_SHADER_STAGE_FRAGMENT_BIT,
+ [PIPE_SHADER_GEOMETRY] = VK_SHADER_STAGE_GEOMETRY_BIT,
+ [PIPE_SHADER_TESS_CTRL] = VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
+ [PIPE_SHADER_TESS_EVAL] = VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
+ [PIPE_SHADER_COMPUTE] = VK_SHADER_STAGE_COMPUTE_BIT,
+ };
+ return stages[type];
+}
+
+static uint32_t
+hash_gfx_program(const void *key)
+{
+ return _mesa_hash_data(key, sizeof(struct zink_shader *) * (PIPE_SHADER_TYPES - 1));
+}
+
+static bool
+equals_gfx_program(const void *a, const void *b)
+{
+ return memcmp(a, b, sizeof(struct zink_shader *) * (PIPE_SHADER_TYPES - 1)) == 0;
+}
+
+static uint32_t
+hash_render_pass_state(const void *key)
+{
+ return _mesa_hash_data(key, sizeof(struct zink_render_pass_state));
+}
+
+static bool
+equals_render_pass_state(const void *a, const void *b)
+{
+ return memcmp(a, b, sizeof(struct zink_render_pass_state)) == 0;
+}
+
+static void
+zink_flush(struct pipe_context *pctx,
+ struct pipe_fence_handle **pfence,
+ enum pipe_flush_flags flags)
+{
+ struct zink_context *ctx = zink_context(pctx);
+
+ struct zink_batch *batch = zink_curr_batch(ctx);
+ flush_batch(ctx);
+
+ if (pfence)
+ zink_fence_reference(zink_screen(pctx->screen),
+ (struct zink_fence **)pfence,
+ batch->fence);
+
+ /* HACK:
+ * For some strange reason, we need to finish before presenting, or else
+ * we start rendering on top of the back-buffer for the next frame. This
+ * seems like a bug in the DRI-driver to me, because we really should
+ * be properly protected by fences here, and the back-buffer should
+ * either be swapped with the front-buffer, or blitted from. But for
+ * some strange reason, neither of these things happen.
+ */
+ if (flags & PIPE_FLUSH_END_OF_FRAME)
+ pctx->screen->fence_finish(pctx->screen, pctx,
+ (struct pipe_fence_handle *)batch->fence,
+ PIPE_TIMEOUT_INFINITE);
+}
+
+static void
+zink_flush_resource(struct pipe_context *pipe,
+ struct pipe_resource *resource)
+{
+}
+
+static void
+zink_resource_copy_region(struct pipe_context *pctx,
+ struct pipe_resource *pdst,
+ unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz,
+ struct pipe_resource *psrc,
+ unsigned src_level, const struct pipe_box *src_box)
+{
+ struct zink_resource *dst = zink_resource(pdst);
+ struct zink_resource *src = zink_resource(psrc);
+ struct zink_context *ctx = zink_context(pctx);
+ if (dst->base.target != PIPE_BUFFER && src->base.target != PIPE_BUFFER) {
+ VkImageCopy region = {};
+
+ region.srcSubresource.aspectMask = src->aspect;
+ region.srcSubresource.mipLevel = src_level;
+ region.srcSubresource.layerCount = 1;
+ if (src->base.array_size > 1) {
+ region.srcSubresource.baseArrayLayer = src_box->z;
+ region.srcSubresource.layerCount = src_box->depth;
+ region.extent.depth = 1;
+ } else {
+ region.srcOffset.z = src_box->z;
+ region.srcSubresource.layerCount = 1;
+ region.extent.depth = src_box->depth;
+ }
+
+ region.srcOffset.x = src_box->x;
+ region.srcOffset.y = src_box->y;
+
+ region.dstSubresource.aspectMask = dst->aspect;
+ region.dstSubresource.mipLevel = dst_level;
+ if (dst->base.array_size > 1) {
+ region.dstSubresource.baseArrayLayer = dstz;
+ region.dstSubresource.layerCount = src_box->depth;
+ } else {
+ region.dstOffset.z = dstz;
+ region.dstSubresource.layerCount = 1;
+ }
+
+ region.dstOffset.x = dstx;
+ region.dstOffset.y = dsty;
+ region.extent.width = src_box->width;
+ region.extent.height = src_box->height;
+
+ struct zink_batch *batch = zink_batch_no_rp(ctx);
+ zink_batch_reference_resoure(batch, src);
+ zink_batch_reference_resoure(batch, dst);
+
+ if (src->layout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
+ zink_resource_barrier(batch->cmdbuf, src, src->aspect,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
+ }
+
+ if (dst->layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
+ zink_resource_barrier(batch->cmdbuf, dst, dst->aspect,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
+ }
+
+ vkCmdCopyImage(batch->cmdbuf, src->image, src->layout,
+ dst->image, dst->layout,
+ 1, &region);
+ } else if (dst->base.target == PIPE_BUFFER &&
+ src->base.target == PIPE_BUFFER) {
+ VkBufferCopy region;
+ region.srcOffset = src_box->x;
+ region.dstOffset = dstx;
+ region.size = src_box->width;
+
+ struct zink_batch *batch = zink_batch_no_rp(ctx);
+ zink_batch_reference_resoure(batch, src);
+ zink_batch_reference_resoure(batch, dst);
+
+ vkCmdCopyBuffer(batch->cmdbuf, src->buffer, dst->buffer, 1, &region);
+ } else
+ debug_printf("zink: TODO resource copy\n");
+}
+
+struct pipe_context *
+zink_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
+{
+ struct zink_screen *screen = zink_screen(pscreen);
+ struct zink_context *ctx = CALLOC_STRUCT(zink_context);
+ if (!ctx)
+ goto fail;
+
+ ctx->base.screen = pscreen;
+ ctx->base.priv = priv;
+
+ ctx->base.destroy = zink_context_destroy;
+
+ zink_context_state_init(&ctx->base);
+
+ ctx->base.create_sampler_state = zink_create_sampler_state;
+ ctx->base.bind_sampler_states = zink_bind_sampler_states;
+ ctx->base.delete_sampler_state = zink_delete_sampler_state;
+
+ ctx->base.create_sampler_view = zink_create_sampler_view;
+ ctx->base.set_sampler_views = zink_set_sampler_views;
+ ctx->base.sampler_view_destroy = zink_sampler_view_destroy;
+
+ ctx->base.create_vs_state = zink_create_vs_state;
+ ctx->base.bind_vs_state = zink_bind_vs_state;
+ ctx->base.delete_vs_state = zink_delete_vs_state;
+
+ ctx->base.create_fs_state = zink_create_fs_state;
+ ctx->base.bind_fs_state = zink_bind_fs_state;
+ ctx->base.delete_fs_state = zink_delete_fs_state;
+
+ ctx->base.set_polygon_stipple = zink_set_polygon_stipple;
+ ctx->base.set_vertex_buffers = zink_set_vertex_buffers;
+ ctx->base.set_viewport_states = zink_set_viewport_states;
+ ctx->base.set_scissor_states = zink_set_scissor_states;
+ ctx->base.set_constant_buffer = zink_set_constant_buffer;
+ ctx->base.set_framebuffer_state = zink_set_framebuffer_state;
+ ctx->base.set_stencil_ref = zink_set_stencil_ref;
+ ctx->base.set_clip_state = zink_set_clip_state;
+ ctx->base.set_blend_color = zink_set_blend_color;
+
+ ctx->base.set_sample_mask = zink_set_sample_mask;
+
+ ctx->base.clear = zink_clear;
+ ctx->base.draw_vbo = zink_draw_vbo;
+ ctx->base.flush = zink_flush;
+
+ ctx->base.resource_copy_region = zink_resource_copy_region;
+ ctx->base.blit = zink_blit;
+
+ ctx->base.flush_resource = zink_flush_resource;
+ zink_context_surface_init(&ctx->base);
+ zink_context_resource_init(&ctx->base);
+ zink_context_query_init(&ctx->base);
+
+ slab_create_child(&ctx->transfer_pool, &screen->transfer_pool);
+
+ ctx->base.stream_uploader = u_upload_create_default(&ctx->base);
+ ctx->base.const_uploader = ctx->base.stream_uploader;
+
+ int prim_hwsupport = 1 << PIPE_PRIM_POINTS |
+ 1 << PIPE_PRIM_LINES |
+ 1 << PIPE_PRIM_LINE_STRIP |
+ 1 << PIPE_PRIM_TRIANGLES |
+ 1 << PIPE_PRIM_TRIANGLE_STRIP |
+ 1 << PIPE_PRIM_TRIANGLE_FAN;
+
+ ctx->primconvert = util_primconvert_create(&ctx->base, prim_hwsupport);
+ if (!ctx->primconvert)
+ goto fail;
+
+ ctx->blitter = util_blitter_create(&ctx->base);
+ if (!ctx->blitter)
+ goto fail;
+
+ VkCommandPoolCreateInfo cpci = {};
+ cpci.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
+ cpci.queueFamilyIndex = screen->gfx_queue;
+ cpci.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
+ if (vkCreateCommandPool(screen->dev, &cpci, NULL, &ctx->cmdpool) != VK_SUCCESS)
+ goto fail;
+
+ VkCommandBufferAllocateInfo cbai = {};
+ cbai.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
+ cbai.commandPool = ctx->cmdpool;
+ cbai.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
+ cbai.commandBufferCount = 1;
+
+ VkDescriptorPoolSize sizes[] = {
+ {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, ZINK_BATCH_DESC_SIZE},
+ {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, ZINK_BATCH_DESC_SIZE}
+ };
+ VkDescriptorPoolCreateInfo dpci = {};
+ dpci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
+ dpci.pPoolSizes = sizes;
+ dpci.poolSizeCount = ARRAY_SIZE(sizes);
+ dpci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
+ dpci.maxSets = ZINK_BATCH_DESC_SIZE;
+
+ for (int i = 0; i < ARRAY_SIZE(ctx->batches); ++i) {
+ if (vkAllocateCommandBuffers(screen->dev, &cbai, &ctx->batches[i].cmdbuf) != VK_SUCCESS)
+ goto fail;
+
+ ctx->batches[i].resources = _mesa_set_create(NULL, _mesa_hash_pointer,
+ _mesa_key_pointer_equal);
+ ctx->batches[i].sampler_views = _mesa_set_create(NULL,
+ _mesa_hash_pointer,
+ _mesa_key_pointer_equal);
+
+ if (!ctx->batches[i].resources || !ctx->batches[i].sampler_views)
+ goto fail;
+
+ util_dynarray_init(&ctx->batches[i].zombie_samplers, NULL);
+
+ if (vkCreateDescriptorPool(screen->dev, &dpci, 0,
+ &ctx->batches[i].descpool) != VK_SUCCESS)
+ goto fail;
+ }
+
+ vkGetDeviceQueue(screen->dev, screen->gfx_queue, 0, &ctx->queue);
+
+ ctx->program_cache = _mesa_hash_table_create(NULL,
+ hash_gfx_program,
+ equals_gfx_program);
+ ctx->render_pass_cache = _mesa_hash_table_create(NULL,
+ hash_render_pass_state,
+ equals_render_pass_state);
+ if (!ctx->program_cache || !ctx->render_pass_cache)
+ goto fail;
+
+ const uint8_t data[] = { 0 };
+ ctx->dummy_buffer = pipe_buffer_create_with_data(&ctx->base,
+ PIPE_BIND_VERTEX_BUFFER, PIPE_USAGE_IMMUTABLE, sizeof(data), data);
+ if (!ctx->dummy_buffer)
+ goto fail;
+
+ ctx->dirty_program = true;
+
+ /* start the first batch */
+ zink_start_batch(ctx, zink_curr_batch(ctx));
+
+ return &ctx->base;
+
+fail:
+ if (ctx) {
+ vkDestroyCommandPool(screen->dev, ctx->cmdpool, NULL);
+ FREE(ctx);
+ }
+ return NULL;
+}
diff --git a/lib/mesa/src/gallium/drivers/zink/zink_context.h b/lib/mesa/src/gallium/drivers/zink/zink_context.h
new file mode 100644
index 000000000..76fa780b9
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/zink_context.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef ZINK_CONTEXT_H
+#define ZINK_CONTEXT_H
+
+#include "zink_pipeline.h"
+#include "zink_batch.h"
+
+#include "pipe/p_context.h"
+#include "pipe/p_state.h"
+
+#include "util/slab.h"
+#include "util/list.h"
+
+#include <vulkan/vulkan.h>
+
+struct blitter_context;
+struct primconvert_context;
+struct list_head;
+
+struct zink_blend_state;
+struct zink_depth_stencil_alpha_state;
+struct zink_gfx_program;
+struct zink_rasterizer_state;
+struct zink_resource;
+struct zink_vertex_elements_state;
+
+struct zink_sampler_view {
+ struct pipe_sampler_view base;
+ VkImageView image_view;
+};
+
+static inline struct zink_sampler_view *
+zink_sampler_view(struct pipe_sampler_view *pview)
+{
+ return (struct zink_sampler_view *)pview;
+}
+
+struct zink_context {
+ struct pipe_context base;
+ struct slab_child_pool transfer_pool;
+ struct blitter_context *blitter;
+
+ VkCommandPool cmdpool;
+ struct zink_batch batches[4];
+ unsigned curr_batch;
+
+ VkQueue queue;
+
+ struct pipe_constant_buffer ubos[PIPE_SHADER_TYPES][PIPE_MAX_CONSTANT_BUFFERS];
+ struct pipe_framebuffer_state fb_state;
+
+ struct zink_vertex_elements_state *element_state;
+ struct zink_rasterizer_state *rast_state;
+
+ struct zink_shader *gfx_stages[PIPE_SHADER_TYPES - 1];
+ struct zink_gfx_pipeline_state gfx_pipeline_state;
+ struct hash_table *program_cache;
+ struct zink_gfx_program *curr_program;
+
+ unsigned dirty_program : 1;
+
+ struct hash_table *render_pass_cache;
+
+ struct primconvert_context *primconvert;
+
+ struct zink_framebuffer *framebuffer;
+
+ struct pipe_viewport_state viewport_states[PIPE_MAX_VIEWPORTS];
+ struct pipe_scissor_state scissor_states[PIPE_MAX_VIEWPORTS];
+ VkViewport viewports[PIPE_MAX_VIEWPORTS];
+ VkRect2D scissors[PIPE_MAX_VIEWPORTS];
+ unsigned num_viewports;
+
+ struct pipe_vertex_buffer buffers[PIPE_MAX_ATTRIBS];
+ uint32_t buffers_enabled_mask;
+
+ void *sampler_states[PIPE_SHADER_TYPES][PIPE_MAX_SAMPLERS];
+ VkSampler samplers[PIPE_SHADER_TYPES][PIPE_MAX_SAMPLERS];
+ unsigned num_samplers[PIPE_SHADER_TYPES];
+ struct pipe_sampler_view *image_views[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_SAMPLER_VIEWS];
+ unsigned num_image_views[PIPE_SHADER_TYPES];
+
+ float line_width;
+ float blend_constants[4];
+
+ struct pipe_stencil_ref stencil_ref;
+
+ struct list_head active_queries;
+ bool queries_disabled;
+
+ struct pipe_resource *dummy_buffer;
+};
+
+static inline struct zink_context *
+zink_context(struct pipe_context *context)
+{
+ return (struct zink_context *)context;
+}
+
+static inline struct zink_batch *
+zink_curr_batch(struct zink_context *ctx)
+{
+ assert(ctx->curr_batch < ARRAY_SIZE(ctx->batches));
+ return ctx->batches + ctx->curr_batch;
+}
+
+struct zink_batch *
+zink_batch_rp(struct zink_context *ctx);
+
+struct zink_batch *
+zink_batch_no_rp(struct zink_context *ctx);
+
+void
+zink_resource_barrier(VkCommandBuffer cmdbuf, struct zink_resource *res,
+ VkImageAspectFlags aspect, VkImageLayout new_layout);
+
+ void
+ zink_begin_render_pass(struct zink_context *ctx,
+ struct zink_batch *batch);
+
+
+VkShaderStageFlagBits
+zink_shader_stage(enum pipe_shader_type type);
+
+struct pipe_context *
+zink_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags);
+
+void
+zink_context_query_init(struct pipe_context *ctx);
+
+void
+zink_blit(struct pipe_context *pctx,
+ const struct pipe_blit_info *info);
+
+void
+zink_draw_vbo(struct pipe_context *pctx,
+ const struct pipe_draw_info *dinfo);
+
+#endif
diff --git a/lib/mesa/src/gallium/drivers/zink/zink_draw.c b/lib/mesa/src/gallium/drivers/zink/zink_draw.c
new file mode 100644
index 000000000..553579acf
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/zink_draw.c
@@ -0,0 +1,312 @@
+#include "zink_compiler.h"
+#include "zink_context.h"
+#include "zink_program.h"
+#include "zink_resource.h"
+#include "zink_screen.h"
+#include "zink_state.h"
+
+#include "indices/u_primconvert.h"
+#include "util/hash_table.h"
+#include "util/u_debug.h"
+#include "util/u_helpers.h"
+#include "util/u_inlines.h"
+#include "util/u_prim.h"
+
+static VkDescriptorSet
+allocate_descriptor_set(struct zink_screen *screen,
+ struct zink_batch *batch,
+ struct zink_gfx_program *prog)
+{
+ assert(batch->descs_left >= prog->num_descriptors);
+ VkDescriptorSetAllocateInfo dsai;
+ memset((void *)&dsai, 0, sizeof(dsai));
+ dsai.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
+ dsai.pNext = NULL;
+ dsai.descriptorPool = batch->descpool;
+ dsai.descriptorSetCount = 1;
+ dsai.pSetLayouts = &prog->dsl;
+
+ VkDescriptorSet desc_set;
+ if (vkAllocateDescriptorSets(screen->dev, &dsai, &desc_set) != VK_SUCCESS) {
+ debug_printf("ZINK: failed to allocate descriptor set :/");
+ return VK_NULL_HANDLE;
+ }
+
+ batch->descs_left -= prog->num_descriptors;
+ return desc_set;
+}
+
+static void
+zink_bind_vertex_buffers(struct zink_batch *batch, struct zink_context *ctx)
+{
+ VkBuffer buffers[PIPE_MAX_ATTRIBS];
+ VkDeviceSize buffer_offsets[PIPE_MAX_ATTRIBS];
+ const struct zink_vertex_elements_state *elems = ctx->element_state;
+ for (unsigned i = 0; i < elems->hw_state.num_bindings; i++) {
+ struct pipe_vertex_buffer *vb = ctx->buffers + ctx->element_state->binding_map[i];
+ assert(vb);
+ if (vb->buffer.resource) {
+ struct zink_resource *res = zink_resource(vb->buffer.resource);
+ buffers[i] = res->buffer;
+ buffer_offsets[i] = vb->buffer_offset;
+ zink_batch_reference_resoure(batch, res);
+ } else {
+ buffers[i] = zink_resource(ctx->dummy_buffer)->buffer;
+ buffer_offsets[i] = 0;
+ }
+ }
+
+ if (elems->hw_state.num_bindings > 0)
+ vkCmdBindVertexBuffers(batch->cmdbuf, 0,
+ elems->hw_state.num_bindings,
+ buffers, buffer_offsets);
+}
+
+static struct zink_gfx_program *
+get_gfx_program(struct zink_context *ctx)
+{
+ if (ctx->dirty_program) {
+ struct hash_entry *entry = _mesa_hash_table_search(ctx->program_cache,
+ ctx->gfx_stages);
+ if (!entry) {
+ struct zink_gfx_program *prog;
+ prog = zink_create_gfx_program(zink_screen(ctx->base.screen),
+ ctx->gfx_stages);
+ entry = _mesa_hash_table_insert(ctx->program_cache, prog->stages, prog);
+ if (!entry)
+ return NULL;
+ }
+ ctx->curr_program = entry->data;
+ ctx->dirty_program = false;
+ }
+
+ assert(ctx->curr_program);
+ return ctx->curr_program;
+}
+
+static bool
+line_width_needed(enum pipe_prim_type reduced_prim,
+ VkPolygonMode polygon_mode)
+{
+ switch (reduced_prim) {
+ case PIPE_PRIM_POINTS:
+ return false;
+
+ case PIPE_PRIM_LINES:
+ return true;
+
+ case PIPE_PRIM_TRIANGLES:
+ return polygon_mode == VK_POLYGON_MODE_LINE;
+
+ default:
+ unreachable("unexpected reduced prim");
+ }
+}
+
+void
+zink_draw_vbo(struct pipe_context *pctx,
+ const struct pipe_draw_info *dinfo)
+{
+ struct zink_context *ctx = zink_context(pctx);
+ struct zink_screen *screen = zink_screen(pctx->screen);
+ struct zink_rasterizer_state *rast_state = ctx->rast_state;
+
+ if (dinfo->mode >= PIPE_PRIM_QUADS ||
+ dinfo->mode == PIPE_PRIM_LINE_LOOP ||
+ dinfo->index_size == 1) {
+ if (!u_trim_pipe_prim(dinfo->mode, (unsigned *)&dinfo->count))
+ return;
+
+ util_primconvert_save_rasterizer_state(ctx->primconvert, &rast_state->base);
+ util_primconvert_draw_vbo(ctx->primconvert, dinfo);
+ return;
+ }
+
+ struct zink_gfx_program *gfx_program = get_gfx_program(ctx);
+ if (!gfx_program)
+ return;
+
+ VkPipeline pipeline = zink_get_gfx_pipeline(screen, gfx_program,
+ &ctx->gfx_pipeline_state,
+ dinfo->mode);
+
+ enum pipe_prim_type reduced_prim = u_reduced_prim(dinfo->mode);
+
+ bool depth_bias = false;
+ switch (reduced_prim) {
+ case PIPE_PRIM_POINTS:
+ depth_bias = rast_state->offset_point;
+ break;
+
+ case PIPE_PRIM_LINES:
+ depth_bias = rast_state->offset_line;
+ break;
+
+ case PIPE_PRIM_TRIANGLES:
+ depth_bias = rast_state->offset_tri;
+ break;
+
+ default:
+ unreachable("unexpected reduced prim");
+ }
+
+ unsigned index_offset = 0;
+ struct pipe_resource *index_buffer = NULL;
+ if (dinfo->index_size > 0) {
+ if (dinfo->has_user_indices) {
+ if (!util_upload_index_buffer(pctx, dinfo, &index_buffer, &index_offset, 4)) {
+ debug_printf("util_upload_index_buffer() failed\n");
+ return;
+ }
+ } else
+ index_buffer = dinfo->index.resource;
+ }
+
+ VkWriteDescriptorSet wds[PIPE_SHADER_TYPES * PIPE_MAX_CONSTANT_BUFFERS + PIPE_SHADER_TYPES * PIPE_MAX_SHADER_SAMPLER_VIEWS];
+ VkDescriptorBufferInfo buffer_infos[PIPE_SHADER_TYPES * PIPE_MAX_CONSTANT_BUFFERS];
+ VkDescriptorImageInfo image_infos[PIPE_SHADER_TYPES * PIPE_MAX_SHADER_SAMPLER_VIEWS];
+ int num_wds = 0, num_buffer_info = 0, num_image_info = 0;
+
+ struct zink_resource *transitions[PIPE_SHADER_TYPES * PIPE_MAX_SHADER_SAMPLER_VIEWS];
+ int num_transitions = 0;
+
+ for (int i = 0; i < ARRAY_SIZE(ctx->gfx_stages); i++) {
+ struct zink_shader *shader = ctx->gfx_stages[i];
+ if (!shader)
+ continue;
+
+ for (int j = 0; j < shader->num_bindings; j++) {
+ int index = shader->bindings[j].index;
+ if (shader->bindings[j].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
+ assert(ctx->ubos[i][index].buffer_size > 0);
+ assert(ctx->ubos[i][index].buffer_size <= screen->props.limits.maxUniformBufferRange);
+ assert(ctx->ubos[i][index].buffer);
+ struct zink_resource *res = zink_resource(ctx->ubos[i][index].buffer);
+ buffer_infos[num_buffer_info].buffer = res->buffer;
+ buffer_infos[num_buffer_info].offset = ctx->ubos[i][index].buffer_offset;
+ buffer_infos[num_buffer_info].range = ctx->ubos[i][index].buffer_size;
+ wds[num_wds].pBufferInfo = buffer_infos + num_buffer_info;
+ ++num_buffer_info;
+ } else {
+ struct pipe_sampler_view *psampler_view = ctx->image_views[i][index];
+ assert(psampler_view);
+ struct zink_sampler_view *sampler_view = zink_sampler_view(psampler_view);
+
+ struct zink_resource *res = zink_resource(psampler_view->texture);
+ VkImageLayout layout = res->layout;
+ if (layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL &&
+ layout != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL &&
+ layout != VK_IMAGE_LAYOUT_GENERAL) {
+ transitions[num_transitions++] = res;
+ layout = VK_IMAGE_LAYOUT_GENERAL;
+ }
+ image_infos[num_image_info].imageLayout = layout;
+ image_infos[num_image_info].imageView = sampler_view->image_view;
+ image_infos[num_image_info].sampler = ctx->samplers[i][index];
+ wds[num_wds].pImageInfo = image_infos + num_image_info;
+ ++num_image_info;
+ }
+
+ wds[num_wds].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ wds[num_wds].pNext = NULL;
+ wds[num_wds].dstBinding = shader->bindings[j].binding;
+ wds[num_wds].dstArrayElement = 0;
+ wds[num_wds].descriptorCount = 1;
+ wds[num_wds].descriptorType = shader->bindings[j].type;
+ ++num_wds;
+ }
+ }
+
+ struct zink_batch *batch;
+ if (num_transitions > 0) {
+ batch = zink_batch_no_rp(ctx);
+
+ for (int i = 0; i < num_transitions; ++i)
+ zink_resource_barrier(batch->cmdbuf, transitions[i],
+ transitions[i]->aspect,
+ VK_IMAGE_LAYOUT_GENERAL);
+ }
+
+ batch = zink_batch_rp(ctx);
+
+ if (batch->descs_left < gfx_program->num_descriptors) {
+ ctx->base.flush(&ctx->base, NULL, 0);
+ batch = zink_batch_rp(ctx);
+ assert(batch->descs_left >= gfx_program->num_descriptors);
+ }
+
+ VkDescriptorSet desc_set = allocate_descriptor_set(screen, batch,
+ gfx_program);
+ assert(desc_set != VK_NULL_HANDLE);
+
+ for (int i = 0; i < ARRAY_SIZE(ctx->gfx_stages); i++) {
+ struct zink_shader *shader = ctx->gfx_stages[i];
+ if (!shader)
+ continue;
+
+ for (int j = 0; j < shader->num_bindings; j++) {
+ int index = shader->bindings[j].index;
+ if (shader->bindings[j].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
+ struct zink_resource *res = zink_resource(ctx->ubos[i][index].buffer);
+ zink_batch_reference_resoure(batch, res);
+ } else {
+ struct zink_sampler_view *sampler_view = zink_sampler_view(ctx->image_views[i][index]);
+ zink_batch_reference_sampler_view(batch, sampler_view);
+ }
+ }
+ }
+
+ vkCmdSetViewport(batch->cmdbuf, 0, ctx->num_viewports, ctx->viewports);
+ if (ctx->rast_state->base.scissor)
+ vkCmdSetScissor(batch->cmdbuf, 0, ctx->num_viewports, ctx->scissors);
+ else if (ctx->fb_state.width && ctx->fb_state.height) {
+ VkRect2D fb_scissor = {};
+ fb_scissor.extent.width = ctx->fb_state.width;
+ fb_scissor.extent.height = ctx->fb_state.height;
+ vkCmdSetScissor(batch->cmdbuf, 0, 1, &fb_scissor);
+ }
+
+ if (line_width_needed(reduced_prim, rast_state->hw_state.polygon_mode)) {
+ if (screen->feats.wideLines || ctx->line_width == 1.0f)
+ vkCmdSetLineWidth(batch->cmdbuf, ctx->line_width);
+ else
+ debug_printf("BUG: wide lines not supported, needs fallback!");
+ }
+
+ vkCmdSetStencilReference(batch->cmdbuf, VK_STENCIL_FACE_FRONT_BIT, ctx->stencil_ref.ref_value[0]);
+ vkCmdSetStencilReference(batch->cmdbuf, VK_STENCIL_FACE_BACK_BIT, ctx->stencil_ref.ref_value[1]);
+
+ if (depth_bias)
+ vkCmdSetDepthBias(batch->cmdbuf, rast_state->offset_units, rast_state->offset_clamp, rast_state->offset_scale);
+ else
+ vkCmdSetDepthBias(batch->cmdbuf, 0.0f, 0.0f, 0.0f);
+
+ if (ctx->gfx_pipeline_state.blend_state->need_blend_constants)
+ vkCmdSetBlendConstants(batch->cmdbuf, ctx->blend_constants);
+
+ if (num_wds > 0) {
+ for (int i = 0; i < num_wds; ++i)
+ wds[i].dstSet = desc_set;
+ vkUpdateDescriptorSets(screen->dev, num_wds, wds, 0, NULL);
+ }
+
+ vkCmdBindPipeline(batch->cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
+ vkCmdBindDescriptorSets(batch->cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS,
+ gfx_program->layout, 0, 1, &desc_set, 0, NULL);
+ zink_bind_vertex_buffers(batch, ctx);
+
+ if (dinfo->index_size > 0) {
+ assert(dinfo->index_size != 1);
+ VkIndexType index_type = dinfo->index_size == 2 ? VK_INDEX_TYPE_UINT16 : VK_INDEX_TYPE_UINT32;
+ struct zink_resource *res = zink_resource(index_buffer);
+ vkCmdBindIndexBuffer(batch->cmdbuf, res->buffer, index_offset, index_type);
+ zink_batch_reference_resoure(batch, res);
+ vkCmdDrawIndexed(batch->cmdbuf,
+ dinfo->count, dinfo->instance_count,
+ dinfo->start, dinfo->index_bias, dinfo->start_instance);
+ } else
+ vkCmdDraw(batch->cmdbuf, dinfo->count, dinfo->instance_count, dinfo->start, dinfo->start_instance);
+
+ if (dinfo->index_size > 0 && dinfo->has_user_indices)
+ pipe_resource_reference(&index_buffer, NULL);
+}
diff --git a/lib/mesa/src/gallium/drivers/zink/zink_fence.c b/lib/mesa/src/gallium/drivers/zink/zink_fence.c
new file mode 100644
index 000000000..72f1b6c57
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/zink_fence.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "zink_fence.h"
+
+#include "zink_screen.h"
+
+#include "util/u_memory.h"
+
+static void
+destroy_fence(struct zink_screen *screen, struct zink_fence *fence)
+{
+ if (fence->fence)
+ vkDestroyFence(screen->dev, fence->fence, NULL);
+ FREE(fence);
+}
+
+struct zink_fence *
+zink_create_fence(struct pipe_screen *pscreen)
+{
+ struct zink_screen *screen = zink_screen(pscreen);
+
+ VkFenceCreateInfo fci = {};
+ fci.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
+
+ struct zink_fence *ret = CALLOC_STRUCT(zink_fence);
+ if (!ret) {
+ debug_printf("CALLOC_STRUCT failed\n");
+ return NULL;
+ }
+
+ if (vkCreateFence(screen->dev, &fci, NULL, &ret->fence) != VK_SUCCESS) {
+ debug_printf("vkCreateFence failed\n");
+ goto fail;
+ }
+
+ pipe_reference_init(&ret->reference, 1);
+ return ret;
+
+fail:
+ destroy_fence(screen, ret);
+ return NULL;
+}
+
+void
+zink_fence_reference(struct zink_screen *screen,
+ struct zink_fence **ptr,
+ struct zink_fence *fence)
+{
+ if (pipe_reference(&(*ptr)->reference, &fence->reference))
+ destroy_fence(screen, *ptr);
+
+ *ptr = fence;
+}
+
+static void
+fence_reference(struct pipe_screen *pscreen,
+ struct pipe_fence_handle **pptr,
+ struct pipe_fence_handle *pfence)
+{
+ zink_fence_reference(zink_screen(pscreen), (struct zink_fence **)pptr,
+ zink_fence(pfence));
+}
+
+bool
+zink_fence_finish(struct zink_screen *screen, struct zink_fence *fence,
+ uint64_t timeout_ns)
+{
+ return vkWaitForFences(screen->dev, 1, &fence->fence, VK_TRUE,
+ timeout_ns) == VK_SUCCESS;
+}
+
+static bool
+fence_finish(struct pipe_screen *pscreen, struct pipe_context *pctx,
+ struct pipe_fence_handle *pfence, uint64_t timeout_ns)
+{
+ return zink_fence_finish(zink_screen(pscreen), zink_fence(pfence),
+ timeout_ns);
+}
+
+void
+zink_screen_fence_init(struct pipe_screen *pscreen)
+{
+ pscreen->fence_reference = fence_reference;
+ pscreen->fence_finish = fence_finish;
+}
diff --git a/lib/mesa/src/gallium/drivers/zink/zink_fence.h b/lib/mesa/src/gallium/drivers/zink/zink_fence.h
new file mode 100644
index 000000000..ca8fecce1
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/zink_fence.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef ZINK_FENCE_H
+#define ZINK_FENCE_H
+
+#include "util/u_inlines.h"
+
+#include <vulkan/vulkan.h>
+
+struct pipe_screen;
+struct zink_screen;
+
+struct zink_fence {
+ struct pipe_reference reference;
+ VkFence fence;
+};
+
+static inline struct zink_fence *
+zink_fence(struct pipe_fence_handle *pfence)
+{
+ return (struct zink_fence *)pfence;
+}
+
+struct zink_fence *
+zink_create_fence(struct pipe_screen *pscreen);
+
+void
+zink_fence_reference(struct zink_screen *screen,
+ struct zink_fence **ptr,
+ struct zink_fence *fence);
+
+bool
+zink_fence_finish(struct zink_screen *screen, struct zink_fence *fence,
+ uint64_t timeout_ns);
+
+void
+zink_screen_fence_init(struct pipe_screen *pscreen);
+
+#endif
diff --git a/lib/mesa/src/gallium/drivers/zink/zink_format.c b/lib/mesa/src/gallium/drivers/zink/zink_format.c
new file mode 100644
index 000000000..bd2dd92ce
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/zink_format.c
@@ -0,0 +1,153 @@
+#include "zink_screen.h"
+
+static const VkFormat formats[PIPE_FORMAT_COUNT] = {
+#define MAP_FORMAT_NORM(FMT) \
+ [PIPE_FORMAT_ ## FMT ## _UNORM] = VK_FORMAT_ ## FMT ## _UNORM, \
+ [PIPE_FORMAT_ ## FMT ## _SNORM] = VK_FORMAT_ ## FMT ## _SNORM,
+
+#define MAP_FORMAT_SCALED(FMT) \
+ [PIPE_FORMAT_ ## FMT ## _USCALED] = VK_FORMAT_ ## FMT ## _USCALED, \
+ [PIPE_FORMAT_ ## FMT ## _SSCALED] = VK_FORMAT_ ## FMT ## _SSCALED,
+
+#define MAP_FORMAT_INT(FMT) \
+ [PIPE_FORMAT_ ## FMT ## _UINT] = VK_FORMAT_ ## FMT ## _UINT, \
+ [PIPE_FORMAT_ ## FMT ## _SINT] = VK_FORMAT_ ## FMT ## _SINT,
+
+#define MAP_FORMAT_SRGB(FMT) \
+ [PIPE_FORMAT_ ## FMT ## _SRGB] = VK_FORMAT_ ## FMT ## _SRGB,
+
+#define MAP_FORMAT_FLOAT(FMT) \
+ [PIPE_FORMAT_ ## FMT ## _FLOAT] = VK_FORMAT_ ## FMT ## _SFLOAT,
+
+ // one component
+
+ // 8-bits
+ MAP_FORMAT_NORM(R8)
+ MAP_FORMAT_SCALED(R8)
+ MAP_FORMAT_INT(R8)
+ // 16-bits
+ MAP_FORMAT_NORM(R16)
+ MAP_FORMAT_SCALED(R16)
+ MAP_FORMAT_INT(R16)
+ MAP_FORMAT_FLOAT(R16)
+ // 32-bits
+ MAP_FORMAT_INT(R32)
+ MAP_FORMAT_FLOAT(R32)
+
+ // two components
+
+ // 8-bits
+ MAP_FORMAT_NORM(R8G8)
+ MAP_FORMAT_SCALED(R8G8)
+ MAP_FORMAT_INT(R8G8)
+ // 16-bits
+ MAP_FORMAT_NORM(R16G16)
+ MAP_FORMAT_SCALED(R16G16)
+ MAP_FORMAT_INT(R16G16)
+ MAP_FORMAT_FLOAT(R16G16)
+ // 32-bits
+ MAP_FORMAT_INT(R32G32)
+ MAP_FORMAT_FLOAT(R32G32)
+
+ // three components
+
+ // 8-bits
+ MAP_FORMAT_NORM(R8G8B8)
+ MAP_FORMAT_SCALED(R8G8B8)
+ MAP_FORMAT_INT(R8G8B8)
+ MAP_FORMAT_SRGB(R8G8B8)
+ // 16-bits
+ MAP_FORMAT_NORM(R16G16B16)
+ MAP_FORMAT_SCALED(R16G16B16)
+ MAP_FORMAT_INT(R16G16B16)
+ MAP_FORMAT_FLOAT(R16G16B16)
+ // 32-bits
+ MAP_FORMAT_INT(R32G32B32)
+ MAP_FORMAT_FLOAT(R32G32B32)
+
+ // four components
+
+ // 8-bits
+ MAP_FORMAT_NORM(R8G8B8A8)
+ MAP_FORMAT_SCALED(R8G8B8A8)
+ MAP_FORMAT_INT(R8G8B8A8)
+ MAP_FORMAT_SRGB(R8G8B8A8)
+ [PIPE_FORMAT_B8G8R8A8_UNORM] = VK_FORMAT_B8G8R8A8_UNORM,
+ [PIPE_FORMAT_B8G8R8X8_UNORM] = VK_FORMAT_B8G8R8A8_UNORM,
+ MAP_FORMAT_SRGB(B8G8R8A8)
+ [PIPE_FORMAT_A8B8G8R8_SRGB] = VK_FORMAT_A8B8G8R8_SRGB_PACK32,
+ // 16-bits
+ MAP_FORMAT_NORM(R16G16B16A16)
+ MAP_FORMAT_SCALED(R16G16B16A16)
+ MAP_FORMAT_INT(R16G16B16A16)
+ MAP_FORMAT_FLOAT(R16G16B16A16)
+ // 32-bits
+ MAP_FORMAT_INT(R32G32B32A32)
+ MAP_FORMAT_FLOAT(R32G32B32A32)
+
+ // other color formats
+ [PIPE_FORMAT_B5G6R5_UNORM] = VK_FORMAT_R5G6B5_UNORM_PACK16,
+ [PIPE_FORMAT_B5G5R5A1_UNORM] = VK_FORMAT_B5G5R5A1_UNORM_PACK16,
+ [PIPE_FORMAT_R11G11B10_FLOAT] = VK_FORMAT_B10G11R11_UFLOAT_PACK32,
+ [PIPE_FORMAT_R9G9B9E5_FLOAT] = VK_FORMAT_E5B9G9R9_UFLOAT_PACK32,
+ [PIPE_FORMAT_R10G10B10A2_UNORM] = VK_FORMAT_A2B10G10R10_UNORM_PACK32,
+ [PIPE_FORMAT_B10G10R10A2_UNORM] = VK_FORMAT_A2R10G10B10_UNORM_PACK32,
+ [PIPE_FORMAT_R10G10B10A2_UINT] = VK_FORMAT_A2B10G10R10_UINT_PACK32,
+ [PIPE_FORMAT_B10G10R10A2_UINT] = VK_FORMAT_A2R10G10B10_UINT_PACK32,
+
+ // depth/stencil formats
+ [PIPE_FORMAT_Z32_FLOAT] = VK_FORMAT_D32_SFLOAT,
+ [PIPE_FORMAT_Z32_FLOAT_S8X24_UINT] = VK_FORMAT_D32_SFLOAT_S8_UINT,
+ [PIPE_FORMAT_Z16_UNORM] = VK_FORMAT_D16_UNORM,
+ [PIPE_FORMAT_Z24X8_UNORM] = VK_FORMAT_X8_D24_UNORM_PACK32,
+ [PIPE_FORMAT_Z24_UNORM_S8_UINT] = VK_FORMAT_D24_UNORM_S8_UINT,
+
+ // compressed formats
+ [PIPE_FORMAT_DXT1_RGB] = VK_FORMAT_BC1_RGB_UNORM_BLOCK,
+ [PIPE_FORMAT_DXT1_RGBA] = VK_FORMAT_BC1_RGBA_UNORM_BLOCK,
+ [PIPE_FORMAT_DXT3_RGBA] = VK_FORMAT_BC2_UNORM_BLOCK,
+ [PIPE_FORMAT_DXT5_RGBA] = VK_FORMAT_BC3_UNORM_BLOCK,
+ [PIPE_FORMAT_DXT1_SRGB] = VK_FORMAT_BC1_RGB_SRGB_BLOCK,
+ [PIPE_FORMAT_DXT1_SRGBA] = VK_FORMAT_BC1_RGBA_SRGB_BLOCK,
+ [PIPE_FORMAT_DXT3_SRGBA] = VK_FORMAT_BC2_SRGB_BLOCK,
+ [PIPE_FORMAT_DXT5_SRGBA] = VK_FORMAT_BC3_SRGB_BLOCK,
+
+ [PIPE_FORMAT_RGTC1_UNORM] = VK_FORMAT_BC4_UNORM_BLOCK,
+ [PIPE_FORMAT_RGTC1_SNORM] = VK_FORMAT_BC4_SNORM_BLOCK,
+ [PIPE_FORMAT_RGTC2_UNORM] = VK_FORMAT_BC5_UNORM_BLOCK,
+ [PIPE_FORMAT_RGTC2_SNORM] = VK_FORMAT_BC5_SNORM_BLOCK,
+ [PIPE_FORMAT_BPTC_RGBA_UNORM] = VK_FORMAT_BC7_UNORM_BLOCK,
+ [PIPE_FORMAT_BPTC_SRGBA] = VK_FORMAT_BC7_SRGB_BLOCK,
+ [PIPE_FORMAT_BPTC_RGB_FLOAT] = VK_FORMAT_BC6H_SFLOAT_BLOCK,
+ [PIPE_FORMAT_BPTC_RGB_UFLOAT] = VK_FORMAT_BC6H_UFLOAT_BLOCK,
+};
+
+bool
+zink_is_depth_format_supported(struct zink_screen *screen, VkFormat format)
+{
+ VkFormatProperties props;
+ vkGetPhysicalDeviceFormatProperties(screen->pdev, format, &props);
+ return (props.linearTilingFeatures | props.optimalTilingFeatures) &
+ VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT;
+}
+
+VkFormat
+zink_get_format(struct zink_screen *screen, enum pipe_format format)
+{
+ VkFormat ret = formats[format];
+
+ if (ret == VK_FORMAT_X8_D24_UNORM_PACK32 &&
+ !screen->have_X8_D24_UNORM_PACK32) {
+ assert(zink_is_depth_format_supported(screen, VK_FORMAT_D32_SFLOAT));
+ return VK_FORMAT_D32_SFLOAT;
+ }
+
+ if (ret == VK_FORMAT_D24_UNORM_S8_UINT &&
+ !screen->have_D24_UNORM_S8_UINT) {
+ assert(zink_is_depth_format_supported(screen,
+ VK_FORMAT_D32_SFLOAT_S8_UINT));
+ return VK_FORMAT_D32_SFLOAT_S8_UINT;
+ }
+
+ return ret;
+}
diff --git a/lib/mesa/src/gallium/drivers/zink/zink_framebuffer.c b/lib/mesa/src/gallium/drivers/zink/zink_framebuffer.c
new file mode 100644
index 000000000..6031ad24a
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/zink_framebuffer.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "zink_framebuffer.h"
+
+#include "zink_render_pass.h"
+#include "zink_screen.h"
+#include "zink_surface.h"
+
+#include "util/u_memory.h"
+#include "util/u_string.h"
+
+void
+zink_destroy_framebuffer(struct zink_screen *screen,
+ struct zink_framebuffer *fbuf)
+{
+ vkDestroyFramebuffer(screen->dev, fbuf->fb, NULL);
+ for (int i = 0; i < ARRAY_SIZE(fbuf->surfaces); ++i)
+ pipe_surface_reference(fbuf->surfaces + i, NULL);
+
+ zink_render_pass_reference(screen, &fbuf->rp, NULL);
+
+ FREE(fbuf);
+}
+
+struct zink_framebuffer *
+zink_create_framebuffer(struct zink_screen *screen,
+ struct zink_framebuffer_state *fb)
+{
+ struct zink_framebuffer *fbuf = CALLOC_STRUCT(zink_framebuffer);
+ if (!fbuf)
+ return NULL;
+
+ pipe_reference_init(&fbuf->reference, 1);
+
+ VkImageView attachments[ARRAY_SIZE(fb->attachments)];
+ for (int i = 0; i < fb->num_attachments; i++) {
+ struct zink_surface *surf = fb->attachments[i];
+ pipe_surface_reference(fbuf->surfaces + i, &surf->base);
+ attachments[i] = surf->image_view;
+ }
+
+ zink_render_pass_reference(screen, &fbuf->rp, fb->rp);
+
+ VkFramebufferCreateInfo fci = {};
+ fci.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
+ fci.renderPass = fbuf->rp->render_pass;
+ fci.attachmentCount = fb->num_attachments;
+ fci.pAttachments = attachments;
+ fci.width = fb->width;
+ fci.height = fb->height;
+ fci.layers = fb->layers;
+
+ if (vkCreateFramebuffer(screen->dev, &fci, NULL, &fbuf->fb) != VK_SUCCESS) {
+ zink_destroy_framebuffer(screen, fbuf);
+ return NULL;
+ }
+
+ return fbuf;
+}
+
+void
+debug_describe_zink_framebuffer(char* buf, const struct zink_framebuffer *ptr)
+{
+ sprintf(buf, "zink_framebuffer");
+}
diff --git a/lib/mesa/src/gallium/drivers/zink/zink_framebuffer.h b/lib/mesa/src/gallium/drivers/zink/zink_framebuffer.h
new file mode 100644
index 000000000..63dd36214
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/zink_framebuffer.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef ZINK_FRAMEBUFFER_H
+#define ZINK_FRAMEBUFFER_H
+
+#include "pipe/p_state.h"
+#include <vulkan/vulkan.h>
+
+#include "util/u_inlines.h"
+
+struct zink_screen;
+struct zink_render_pass;
+
+struct zink_framebuffer_state {
+ struct zink_render_pass *rp;
+ uint32_t width;
+ uint16_t height, layers;
+ uint8_t num_attachments;
+ struct zink_surface *attachments[PIPE_MAX_COLOR_BUFS + 1];
+};
+
+struct zink_framebuffer {
+ struct pipe_reference reference;
+ VkFramebuffer fb;
+
+ struct pipe_surface *surfaces[PIPE_MAX_COLOR_BUFS + 1];
+ struct zink_render_pass *rp;
+};
+
+struct zink_framebuffer *
+zink_create_framebuffer(struct zink_screen *screen,
+ struct zink_framebuffer_state *fb);
+
+void
+zink_destroy_framebuffer(struct zink_screen *screen,
+ struct zink_framebuffer *fbuf);
+
+void
+debug_describe_zink_framebuffer(char* buf, const struct zink_framebuffer *ptr);
+
+static inline void
+zink_framebuffer_reference(struct zink_screen *screen,
+ struct zink_framebuffer **dst,
+ struct zink_framebuffer *src)
+{
+ struct zink_framebuffer *old_dst = *dst;
+
+ if (pipe_reference_described(&old_dst->reference, &src->reference,
+ (debug_reference_descriptor)debug_describe_zink_framebuffer))
+ zink_destroy_framebuffer(screen, old_dst);
+ *dst = src;
+}
+
+#endif
diff --git a/lib/mesa/src/gallium/drivers/zink/zink_helpers.h b/lib/mesa/src/gallium/drivers/zink/zink_helpers.h
new file mode 100644
index 000000000..9fea18ac3
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/zink_helpers.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2019 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef ZINK_HELPERS_H
+#define ZINK_HELPERS_H
+
+static inline VkFilter
+zink_filter(enum pipe_tex_filter filter)
+{
+ switch (filter) {
+ case PIPE_TEX_FILTER_NEAREST: return VK_FILTER_NEAREST;
+ case PIPE_TEX_FILTER_LINEAR: return VK_FILTER_LINEAR;
+ }
+ unreachable("unexpected filter");
+}
+
+#endif
diff --git a/lib/mesa/src/gallium/drivers/zink/zink_pipeline.c b/lib/mesa/src/gallium/drivers/zink/zink_pipeline.c
new file mode 100644
index 000000000..261bdde52
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/zink_pipeline.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "zink_pipeline.h"
+
+#include "zink_compiler.h"
+#include "zink_context.h"
+#include "zink_program.h"
+#include "zink_render_pass.h"
+#include "zink_screen.h"
+#include "zink_state.h"
+
+#include "util/u_debug.h"
+#include "util/u_prim.h"
+
+VkPipeline
+zink_create_gfx_pipeline(struct zink_screen *screen,
+ struct zink_gfx_program *prog,
+ struct zink_gfx_pipeline_state *state,
+ VkPrimitiveTopology primitive_topology)
+{
+ VkPipelineVertexInputStateCreateInfo vertex_input_state = {};
+ vertex_input_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
+ vertex_input_state.pVertexBindingDescriptions = state->bindings;
+ vertex_input_state.vertexBindingDescriptionCount = state->element_state->num_bindings;
+ vertex_input_state.pVertexAttributeDescriptions = state->element_state->attribs;
+ vertex_input_state.vertexAttributeDescriptionCount = state->element_state->num_attribs;
+
+ VkPipelineInputAssemblyStateCreateInfo primitive_state = {};
+ primitive_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
+ primitive_state.topology = primitive_topology;
+ primitive_state.primitiveRestartEnable = VK_FALSE;
+
+ VkPipelineColorBlendStateCreateInfo blend_state = {};
+ blend_state.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
+ blend_state.pAttachments = state->blend_state->attachments;
+ blend_state.attachmentCount = state->num_attachments;
+ blend_state.logicOpEnable = state->blend_state->logicop_enable;
+ blend_state.logicOp = state->blend_state->logicop_func;
+
+ VkPipelineMultisampleStateCreateInfo ms_state = {};
+ ms_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
+ ms_state.rasterizationSamples = state->rast_samples;
+ ms_state.alphaToCoverageEnable = state->blend_state->alpha_to_coverage;
+ ms_state.alphaToOneEnable = state->blend_state->alpha_to_one;
+ ms_state.pSampleMask = state->sample_mask ? &state->sample_mask : NULL;
+
+ VkPipelineViewportStateCreateInfo viewport_state = {};
+ viewport_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
+ viewport_state.viewportCount = 1;
+ viewport_state.pViewports = NULL;
+ viewport_state.scissorCount = 1;
+ viewport_state.pScissors = NULL;
+
+ VkPipelineRasterizationStateCreateInfo rast_state = {};
+ rast_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
+
+ rast_state.depthClampEnable = state->rast_state->depth_clamp;
+ rast_state.rasterizerDiscardEnable = state->rast_state->rasterizer_discard;
+ rast_state.polygonMode = state->rast_state->polygon_mode;
+ rast_state.cullMode = state->rast_state->cull_mode;
+ rast_state.frontFace = state->rast_state->front_face;
+
+ rast_state.depthBiasEnable = VK_TRUE;
+ rast_state.depthBiasConstantFactor = 0.0;
+ rast_state.depthBiasClamp = 0.0;
+ rast_state.depthBiasSlopeFactor = 0.0;
+ rast_state.lineWidth = 1.0f;
+
+ VkPipelineDepthStencilStateCreateInfo depth_stencil_state = {};
+ depth_stencil_state.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
+ depth_stencil_state.depthTestEnable = state->depth_stencil_alpha_state->depth_test;
+ depth_stencil_state.depthCompareOp = state->depth_stencil_alpha_state->depth_compare_op;
+ depth_stencil_state.depthBoundsTestEnable = state->depth_stencil_alpha_state->depth_bounds_test;
+ depth_stencil_state.minDepthBounds = state->depth_stencil_alpha_state->min_depth_bounds;
+ depth_stencil_state.maxDepthBounds = state->depth_stencil_alpha_state->max_depth_bounds;
+ depth_stencil_state.stencilTestEnable = state->depth_stencil_alpha_state->stencil_test;
+ depth_stencil_state.front = state->depth_stencil_alpha_state->stencil_front;
+ depth_stencil_state.back = state->depth_stencil_alpha_state->stencil_back;
+ depth_stencil_state.depthWriteEnable = state->depth_stencil_alpha_state->depth_write;
+
+ VkDynamicState dynamicStateEnables[] = {
+ VK_DYNAMIC_STATE_VIEWPORT,
+ VK_DYNAMIC_STATE_SCISSOR,
+ VK_DYNAMIC_STATE_LINE_WIDTH,
+ VK_DYNAMIC_STATE_DEPTH_BIAS,
+ VK_DYNAMIC_STATE_BLEND_CONSTANTS,
+ VK_DYNAMIC_STATE_STENCIL_REFERENCE,
+ };
+
+ VkPipelineDynamicStateCreateInfo pipelineDynamicStateCreateInfo = {};
+ pipelineDynamicStateCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
+ pipelineDynamicStateCreateInfo.pDynamicStates = dynamicStateEnables;
+ pipelineDynamicStateCreateInfo.dynamicStateCount = ARRAY_SIZE(dynamicStateEnables);
+
+ VkGraphicsPipelineCreateInfo pci = {};
+ pci.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
+ pci.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT;
+ pci.layout = prog->layout;
+ pci.renderPass = state->render_pass->render_pass;
+ pci.pVertexInputState = &vertex_input_state;
+ pci.pInputAssemblyState = &primitive_state;
+ pci.pRasterizationState = &rast_state;
+ pci.pColorBlendState = &blend_state;
+ pci.pMultisampleState = &ms_state;
+ pci.pViewportState = &viewport_state;
+ pci.pDepthStencilState = &depth_stencil_state;
+ pci.pDynamicState = &pipelineDynamicStateCreateInfo;
+
+ VkPipelineShaderStageCreateInfo shader_stages[PIPE_SHADER_TYPES - 1];
+ uint32_t num_stages = 0;
+ for (int i = 0; i < PIPE_SHADER_TYPES - 1; ++i) {
+ if (!prog->stages[i])
+ continue;
+
+ VkPipelineShaderStageCreateInfo stage = {};
+ stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
+ stage.stage = zink_shader_stage(i);
+ stage.module = prog->stages[i]->shader_module;
+ stage.pName = "main";
+ shader_stages[num_stages++] = stage;
+ }
+ assert(num_stages > 0);
+
+ pci.pStages = shader_stages;
+ pci.stageCount = num_stages;
+
+ VkPipeline pipeline;
+ if (vkCreateGraphicsPipelines(screen->dev, VK_NULL_HANDLE, 1, &pci,
+ NULL, &pipeline) != VK_SUCCESS) {
+ debug_printf("vkCreateGraphicsPipelines failed\n");
+ return VK_NULL_HANDLE;
+ }
+
+ return pipeline;
+}
diff --git a/lib/mesa/src/gallium/drivers/zink/zink_pipeline.h b/lib/mesa/src/gallium/drivers/zink/zink_pipeline.h
new file mode 100644
index 000000000..d65bce21f
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/zink_pipeline.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef ZINK_PIPELINE_H
+#define ZINK_PIPELINE_H
+
+#include <vulkan/vulkan.h>
+
+#include "pipe/p_state.h"
+
+struct zink_blend_state;
+struct zink_depth_stencil_alpha_state;
+struct zink_gfx_program;
+struct zink_rasterizer_state;
+struct zink_render_pass;
+struct zink_screen;
+struct zink_vertex_elements_state;
+
+struct zink_gfx_pipeline_state {
+ struct zink_render_pass *render_pass;
+
+ struct zink_vertex_elements_hw_state *element_state;
+ VkVertexInputBindingDescription bindings[PIPE_MAX_ATTRIBS]; // combination of element_state and stride
+
+ uint32_t num_attachments;
+ struct zink_blend_state *blend_state;
+
+ struct zink_rasterizer_hw_state *rast_state;
+
+ struct zink_depth_stencil_alpha_state *depth_stencil_alpha_state;
+
+ VkSampleMask sample_mask;
+ uint8_t rast_samples;
+};
+
+VkPipeline
+zink_create_gfx_pipeline(struct zink_screen *screen,
+ struct zink_gfx_program *prog,
+ struct zink_gfx_pipeline_state *state,
+ VkPrimitiveTopology primitive_topology);
+
+#endif
diff --git a/lib/mesa/src/gallium/drivers/zink/zink_program.c b/lib/mesa/src/gallium/drivers/zink/zink_program.c
new file mode 100644
index 000000000..95b47a69d
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/zink_program.c
@@ -0,0 +1,250 @@
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "zink_program.h"
+
+#include "zink_compiler.h"
+#include "zink_context.h"
+#include "zink_render_pass.h"
+#include "zink_screen.h"
+
+#include "util/hash_table.h"
+#include "util/set.h"
+#include "util/u_debug.h"
+#include "util/u_memory.h"
+
+static VkDescriptorSetLayout
+create_desc_set_layout(VkDevice dev,
+ struct zink_shader *stages[PIPE_SHADER_TYPES - 1],
+ unsigned *num_descriptors)
+{
+ VkDescriptorSetLayoutBinding bindings[PIPE_SHADER_TYPES * PIPE_MAX_CONSTANT_BUFFERS];
+ int num_bindings = 0;
+
+ for (int i = 0; i < PIPE_SHADER_TYPES - 1; i++) {
+ struct zink_shader *shader = stages[i];
+ if (!shader)
+ continue;
+
+ VkShaderStageFlagBits stage_flags = zink_shader_stage(i);
+ for (int j = 0; j < shader->num_bindings; j++) {
+ assert(num_bindings < ARRAY_SIZE(bindings));
+ bindings[num_bindings].binding = shader->bindings[j].binding;
+ bindings[num_bindings].descriptorType = shader->bindings[j].type;
+ bindings[num_bindings].descriptorCount = 1;
+ bindings[num_bindings].stageFlags = stage_flags;
+ bindings[num_bindings].pImmutableSamplers = NULL;
+ ++num_bindings;
+ }
+ }
+
+ VkDescriptorSetLayoutCreateInfo dcslci = {};
+ dcslci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
+ dcslci.pNext = NULL;
+ dcslci.flags = 0;
+ dcslci.bindingCount = num_bindings;
+ dcslci.pBindings = bindings;
+
+ VkDescriptorSetLayout dsl;
+ if (vkCreateDescriptorSetLayout(dev, &dcslci, 0, &dsl) != VK_SUCCESS) {
+ debug_printf("vkCreateDescriptorSetLayout failed\n");
+ return VK_NULL_HANDLE;
+ }
+
+ *num_descriptors = num_bindings;
+ return dsl;
+}
+
+static VkPipelineLayout
+create_pipeline_layout(VkDevice dev, VkDescriptorSetLayout dsl)
+{
+ assert(dsl != VK_NULL_HANDLE);
+
+ VkPipelineLayoutCreateInfo plci = {};
+ plci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
+
+ plci.pSetLayouts = &dsl;
+ plci.setLayoutCount = 1;
+
+ VkPipelineLayout layout;
+ if (vkCreatePipelineLayout(dev, &plci, NULL, &layout) != VK_SUCCESS) {
+ debug_printf("vkCreatePipelineLayout failed!\n");
+ return VK_NULL_HANDLE;
+ }
+
+ return layout;
+}
+
+static uint32_t
+hash_gfx_pipeline_state(const void *key)
+{
+ return _mesa_hash_data(key, sizeof(struct zink_gfx_pipeline_state));
+}
+
+static bool
+equals_gfx_pipeline_state(const void *a, const void *b)
+{
+ return memcmp(a, b, sizeof(struct zink_gfx_pipeline_state)) == 0;
+}
+
+struct zink_gfx_program *
+zink_create_gfx_program(struct zink_screen *screen,
+ struct zink_shader *stages[PIPE_SHADER_TYPES - 1])
+{
+ struct zink_gfx_program *prog = CALLOC_STRUCT(zink_gfx_program);
+ if (!prog)
+ goto fail;
+
+ for (int i = 0; i < ARRAY_SIZE(prog->pipelines); ++i) {
+ prog->pipelines[i] = _mesa_hash_table_create(NULL,
+ hash_gfx_pipeline_state,
+ equals_gfx_pipeline_state);
+ if (!prog->pipelines[i])
+ goto fail;
+ }
+
+ for (int i = 0; i < PIPE_SHADER_TYPES - 1; ++i)
+ prog->stages[i] = stages[i];
+
+ prog->dsl = create_desc_set_layout(screen->dev, stages,
+ &prog->num_descriptors);
+ if (!prog->dsl)
+ goto fail;
+
+ prog->layout = create_pipeline_layout(screen->dev, prog->dsl);
+ if (!prog->layout)
+ goto fail;
+
+ prog->render_passes = _mesa_set_create(NULL, _mesa_hash_pointer,
+ _mesa_key_pointer_equal);
+ if (!prog->render_passes)
+ goto fail;
+
+ return prog;
+
+fail:
+ if (prog)
+ zink_destroy_gfx_program(screen, prog);
+ return NULL;
+}
+
+void
+zink_destroy_gfx_program(struct zink_screen *screen,
+ struct zink_gfx_program *prog)
+{
+ if (prog->layout)
+ vkDestroyPipelineLayout(screen->dev, prog->layout, NULL);
+
+ if (prog->dsl)
+ vkDestroyDescriptorSetLayout(screen->dev, prog->dsl, NULL);
+
+ /* unref all used render-passes */
+ if (prog->render_passes) {
+ set_foreach(prog->render_passes, entry) {
+ struct zink_render_pass *pres = (struct zink_render_pass *)entry->key;
+ zink_render_pass_reference(screen, &pres, NULL);
+ }
+ _mesa_set_destroy(prog->render_passes, NULL);
+ }
+
+ FREE(prog);
+}
+
+struct pipeline_cache_entry {
+ struct zink_gfx_pipeline_state state;
+ VkPipeline pipeline;
+};
+
+static VkPrimitiveTopology
+primitive_topology(enum pipe_prim_type mode)
+{
+ switch (mode) {
+ case PIPE_PRIM_POINTS:
+ return VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
+
+ case PIPE_PRIM_LINES:
+ return VK_PRIMITIVE_TOPOLOGY_LINE_LIST;
+
+ case PIPE_PRIM_LINE_STRIP:
+ return VK_PRIMITIVE_TOPOLOGY_LINE_STRIP;
+
+ case PIPE_PRIM_TRIANGLES:
+ return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
+
+ case PIPE_PRIM_TRIANGLE_STRIP:
+ return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
+
+ case PIPE_PRIM_TRIANGLE_FAN:
+ return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN;
+
+ default:
+ unreachable("unexpected enum pipe_prim_type");
+ }
+}
+
+static void
+reference_render_pass(struct zink_screen *screen,
+ struct zink_gfx_program *prog,
+ struct zink_render_pass *render_pass)
+{
+ struct set_entry *entry = _mesa_set_search(prog->render_passes,
+ render_pass);
+ if (!entry) {
+ entry = _mesa_set_add(prog->render_passes, render_pass);
+ pipe_reference(NULL, &render_pass->reference);
+ }
+}
+
+VkPipeline
+zink_get_gfx_pipeline(struct zink_screen *screen,
+ struct zink_gfx_program *prog,
+ struct zink_gfx_pipeline_state *state,
+ enum pipe_prim_type mode)
+{
+ assert(mode <= ARRAY_SIZE(prog->pipelines));
+
+ /* TODO: use pre-hashed versions to save some time (can re-hash only when
+ state changes) */
+ struct hash_entry *entry = _mesa_hash_table_search(prog->pipelines[mode], state);
+ if (!entry) {
+ VkPrimitiveTopology vkmode = primitive_topology(mode);
+ VkPipeline pipeline = zink_create_gfx_pipeline(screen, prog,
+ state, vkmode);
+ if (pipeline == VK_NULL_HANDLE)
+ return VK_NULL_HANDLE;
+
+ struct pipeline_cache_entry *pc_entry = CALLOC_STRUCT(pipeline_cache_entry);
+ if (!pc_entry)
+ return VK_NULL_HANDLE;
+
+ memcpy(&pc_entry->state, state, sizeof(*state));
+ pc_entry->pipeline = pipeline;
+
+ entry = _mesa_hash_table_insert(prog->pipelines[mode], &pc_entry->state, pc_entry);
+ assert(entry);
+
+ reference_render_pass(screen, prog, state->render_pass);
+ }
+
+ return ((struct pipeline_cache_entry *)(entry->data))->pipeline;
+}
diff --git a/lib/mesa/src/gallium/drivers/zink/zink_program.h b/lib/mesa/src/gallium/drivers/zink/zink_program.h
new file mode 100644
index 000000000..8807f044a
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/zink_program.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef ZINK_PROGRAM_H
+#define ZINK_PROGRAM_H
+
+#include <vulkan/vulkan.h>
+
+#include "pipe/p_state.h"
+
+struct zink_screen;
+struct zink_shader;
+struct zink_gfx_pipeline_state;
+
+struct hash_table;
+struct set;
+
+struct zink_gfx_program {
+ struct zink_shader *stages[PIPE_SHADER_TYPES - 1]; // compute stage doesn't belong here
+ VkDescriptorSetLayout dsl;
+ VkPipelineLayout layout;
+ unsigned num_descriptors;
+ struct hash_table *pipelines[PIPE_PRIM_TRIANGLE_FAN + 1];
+ struct set *render_passes;
+};
+
+struct zink_gfx_program *
+zink_create_gfx_program(struct zink_screen *screen,
+ struct zink_shader *stages[PIPE_SHADER_TYPES - 1]);
+
+void
+zink_destroy_gfx_program(struct zink_screen *screen,
+ struct zink_gfx_program *prog);
+
+VkPipeline
+zink_get_gfx_pipeline(struct zink_screen *screen,
+ struct zink_gfx_program *prog,
+ struct zink_gfx_pipeline_state *state,
+ enum pipe_prim_type mode);
+
+#endif
diff --git a/lib/mesa/src/gallium/drivers/zink/zink_public.h b/lib/mesa/src/gallium/drivers/zink/zink_public.h
new file mode 100644
index 000000000..be772f51e
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/zink_public.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef ZINK_PUBLIC_H
+#define ZINK_PUBLIC_H
+
+struct pipe_screen;
+struct sw_winsys;
+
+struct pipe_screen *
+zink_create_screen(struct sw_winsys *winsys);
+
+struct pipe_screen *
+zink_drm_create_screen(int fd);
+#endif
diff --git a/lib/mesa/src/gallium/drivers/zink/zink_query.c b/lib/mesa/src/gallium/drivers/zink/zink_query.c
new file mode 100644
index 000000000..e8ed72a1d
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/zink_query.c
@@ -0,0 +1,262 @@
+#include "zink_query.h"
+
+#include "zink_context.h"
+#include "zink_screen.h"
+
+#include "util/u_dump.h"
+#include "util/u_inlines.h"
+#include "util/u_memory.h"
+
+struct zink_query {
+ enum pipe_query_type type;
+
+ VkQueryPool query_pool;
+ unsigned curr_query, num_queries;
+
+ VkQueryType vkqtype;
+ bool use_64bit;
+ bool precise;
+
+ struct list_head active_list;
+};
+
+static VkQueryType
+convert_query_type(unsigned query_type, bool *use_64bit, bool *precise)
+{
+ *use_64bit = false;
+ *precise = false;
+ switch (query_type) {
+ case PIPE_QUERY_OCCLUSION_COUNTER:
+ *precise = true;
+ *use_64bit = true;
+ case PIPE_QUERY_OCCLUSION_PREDICATE:
+ case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
+ return VK_QUERY_TYPE_OCCLUSION;
+ case PIPE_QUERY_TIMESTAMP:
+ *use_64bit = true;
+ return VK_QUERY_TYPE_TIMESTAMP;
+ case PIPE_QUERY_PIPELINE_STATISTICS:
+ return VK_QUERY_TYPE_PIPELINE_STATISTICS;
+ default:
+ debug_printf("unknown query: %s\n",
+ util_str_query_type(query_type, true));
+ unreachable("zink: unknown query type");
+ }
+}
+
+static struct pipe_query *
+zink_create_query(struct pipe_context *pctx,
+ unsigned query_type, unsigned index)
+{
+ struct zink_screen *screen = zink_screen(pctx->screen);
+ struct zink_query *query = CALLOC_STRUCT(zink_query);
+ VkQueryPoolCreateInfo pool_create = {};
+
+ if (!query)
+ return NULL;
+
+ query->type = query_type;
+ query->vkqtype = convert_query_type(query_type, &query->use_64bit, &query->precise);
+ if (query->vkqtype == -1)
+ return NULL;
+
+ query->num_queries = query_type == PIPE_QUERY_TIMESTAMP ? 1 : 100;
+ query->curr_query = 0;
+
+ pool_create.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
+ pool_create.queryType = query->vkqtype;
+ pool_create.queryCount = query->num_queries;
+
+ VkResult status = vkCreateQueryPool(screen->dev, &pool_create, NULL, &query->query_pool);
+ if (status != VK_SUCCESS) {
+ FREE(query);
+ return NULL;
+ }
+ return (struct pipe_query *)query;
+}
+
+static void
+zink_destroy_query(struct pipe_context *pctx,
+ struct pipe_query *q)
+{
+ struct zink_screen *screen = zink_screen(pctx->screen);
+ struct zink_query *query = (struct zink_query *)q;
+
+ vkDestroyQueryPool(screen->dev, query->query_pool, NULL);
+}
+
+static void
+begin_query(struct zink_context *ctx, struct zink_query *q)
+{
+ VkQueryControlFlags flags = 0;
+ if (q->precise)
+ flags |= VK_QUERY_CONTROL_PRECISE_BIT;
+
+ struct zink_batch *batch = zink_curr_batch(ctx);
+ vkCmdBeginQuery(batch->cmdbuf, q->query_pool, q->curr_query, flags);
+}
+
+static bool
+zink_begin_query(struct pipe_context *pctx,
+ struct pipe_query *q)
+{
+ struct zink_context *ctx = zink_context(pctx);
+ struct zink_query *query = (struct zink_query *)q;
+
+ /* ignore begin_query for timestamps */
+ if (query->type == PIPE_QUERY_TIMESTAMP)
+ return true;
+
+ /* TODO: resetting on begin isn't ideal, as it forces render-pass exit...
+ * should instead reset on creation (if possible?)... Or perhaps maintain
+ * the pool in the batch instead?
+ */
+ struct zink_batch *batch = zink_batch_no_rp(zink_context(pctx));
+ vkCmdResetQueryPool(batch->cmdbuf, query->query_pool, 0, query->curr_query);
+ query->curr_query = 0;
+
+ begin_query(ctx, query);
+ list_addtail(&query->active_list, &ctx->active_queries);
+
+ return true;
+}
+
+static void
+end_query(struct zink_context *ctx, struct zink_query *q)
+{
+ struct zink_batch *batch = zink_curr_batch(ctx);
+ assert(q->type != PIPE_QUERY_TIMESTAMP);
+ vkCmdEndQuery(batch->cmdbuf, q->query_pool, q->curr_query);
+ if (++q->curr_query == q->num_queries) {
+ assert(0);
+ /* need to reset pool! */
+ }
+}
+
+static bool
+zink_end_query(struct pipe_context *pctx,
+ struct pipe_query *q)
+{
+ struct zink_context *ctx = zink_context(pctx);
+ struct zink_query *query = (struct zink_query *)q;
+
+ if (query->type == PIPE_QUERY_TIMESTAMP) {
+ assert(query->curr_query == 0);
+ struct zink_batch *batch = zink_curr_batch(ctx);
+ vkCmdWriteTimestamp(batch->cmdbuf, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
+ query->query_pool, 0);
+ } else {
+ end_query(ctx, query);
+ list_delinit(&query->active_list);
+ }
+
+ return true;
+}
+
+static bool
+zink_get_query_result(struct pipe_context *pctx,
+ struct pipe_query *q,
+ bool wait,
+ union pipe_query_result *result)
+{
+ struct zink_screen *screen = zink_screen(pctx->screen);
+ struct zink_query *query = (struct zink_query *)q;
+ VkQueryResultFlagBits flags = 0;
+
+ if (wait) {
+ struct pipe_fence_handle *fence = NULL;
+ pctx->flush(pctx, &fence, PIPE_FLUSH_HINT_FINISH);
+ if (fence) {
+ pctx->screen->fence_finish(pctx->screen, NULL, fence,
+ PIPE_TIMEOUT_INFINITE);
+ pctx->screen->fence_reference(pctx->screen, &fence, NULL);
+ }
+ flags |= VK_QUERY_RESULT_WAIT_BIT;
+ } else
+ pctx->flush(pctx, NULL, 0);
+
+ if (query->use_64bit)
+ flags |= VK_QUERY_RESULT_64_BIT;
+
+ // TODO: handle curr_query > 100
+ // union pipe_query_result results[100];
+ uint64_t results[100];
+ memset(results, 0, sizeof(results));
+ assert(query->curr_query <= ARRAY_SIZE(results));
+ if (vkGetQueryPoolResults(screen->dev, query->query_pool,
+ 0, query->curr_query,
+ sizeof(results),
+ results,
+ sizeof(uint64_t),
+ flags) != VK_SUCCESS)
+ return false;
+
+ util_query_clear_result(result, query->type);
+ for (int i = 0; i < query->curr_query; ++i) {
+ switch (query->type) {
+ case PIPE_QUERY_OCCLUSION_PREDICATE:
+ case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
+ case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
+ case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
+ case PIPE_QUERY_GPU_FINISHED:
+ result->b |= results[i] != 0;
+ break;
+
+ case PIPE_QUERY_OCCLUSION_COUNTER:
+ result->u64 += results[i];
+ break;
+
+ default:
+ debug_printf("unhangled query type: %s\n",
+ util_str_query_type(query->type, true));
+ unreachable("unexpected query type");
+ }
+ }
+
+ return TRUE;
+}
+
+void
+zink_suspend_queries(struct zink_context *ctx, struct zink_batch *batch)
+{
+ struct zink_query *query;
+ LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, active_list) {
+ end_query(ctx, query);
+ }
+}
+
+void
+zink_resume_queries(struct zink_context *ctx, struct zink_batch *batch)
+{
+ struct zink_query *query;
+ LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, active_list) {
+ begin_query(ctx, query);
+ }
+}
+
+static void
+zink_set_active_query_state(struct pipe_context *pctx, bool enable)
+{
+ struct zink_context *ctx = zink_context(pctx);
+ ctx->queries_disabled = !enable;
+
+ struct zink_batch *batch = zink_curr_batch(ctx);
+ if (ctx->queries_disabled)
+ zink_suspend_queries(ctx, batch);
+ else
+ zink_resume_queries(ctx, batch);
+}
+
+void
+zink_context_query_init(struct pipe_context *pctx)
+{
+ struct zink_context *ctx = zink_context(pctx);
+ list_inithead(&ctx->active_queries);
+
+ pctx->create_query = zink_create_query;
+ pctx->destroy_query = zink_destroy_query;
+ pctx->begin_query = zink_begin_query;
+ pctx->end_query = zink_end_query;
+ pctx->get_query_result = zink_get_query_result;
+ pctx->set_active_query_state = zink_set_active_query_state;
+}
diff --git a/lib/mesa/src/gallium/drivers/zink/zink_query.h b/lib/mesa/src/gallium/drivers/zink/zink_query.h
new file mode 100644
index 000000000..4b26b4434
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/zink_query.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2019 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef ZINK_QUERY_H
+#define ZINK_QUERY_H
+
+struct zink_batch;
+struct zink_context;
+
+void
+zink_suspend_queries(struct zink_context *ctx, struct zink_batch *batch);
+
+void
+zink_resume_queries(struct zink_context *ctx, struct zink_batch *batch);
+
+#endif
diff --git a/lib/mesa/src/gallium/drivers/zink/zink_render_pass.c b/lib/mesa/src/gallium/drivers/zink/zink_render_pass.c
new file mode 100644
index 000000000..424fca26a
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/zink_render_pass.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "zink_render_pass.h"
+
+#include "zink_screen.h"
+
+#include "util/u_memory.h"
+#include "util/u_string.h"
+
+static VkRenderPass
+create_render_pass(VkDevice dev, struct zink_render_pass_state *state)
+{
+
+ VkAttachmentReference color_refs[PIPE_MAX_COLOR_BUFS], zs_ref;
+ VkAttachmentDescription attachments[PIPE_MAX_COLOR_BUFS + 1];
+
+ for (int i = 0; i < state->num_cbufs; i++) {
+ struct zink_rt_attrib *rt = state->rts + i;
+ attachments[i].flags = 0;
+ attachments[i].format = rt->format;
+ attachments[i].samples = rt->samples;
+ attachments[i].loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
+ attachments[i].storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ attachments[i].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ attachments[i].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ attachments[i].initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ attachments[i].finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ color_refs[i].attachment = i;
+ color_refs[i].layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ }
+
+ int num_attachments = state->num_cbufs;
+ if (state->have_zsbuf) {
+ struct zink_rt_attrib *rt = state->rts + state->num_cbufs;
+ attachments[num_attachments].flags = 0;
+ attachments[num_attachments].format = rt->format;
+ attachments[num_attachments].samples = rt->samples;
+ attachments[num_attachments].loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
+ attachments[num_attachments].storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ attachments[num_attachments].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
+ attachments[num_attachments].stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE;
+ attachments[num_attachments].initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+ attachments[num_attachments].finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+
+ zs_ref.attachment = num_attachments++;
+ zs_ref.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+ }
+
+ VkSubpassDescription subpass = {};
+ subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
+ subpass.colorAttachmentCount = state->num_cbufs;
+ subpass.pColorAttachments = color_refs;
+ subpass.pDepthStencilAttachment = state->have_zsbuf ? &zs_ref : NULL;
+
+ VkRenderPassCreateInfo rpci = {};
+ rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
+ rpci.attachmentCount = num_attachments;
+ rpci.pAttachments = attachments;
+ rpci.subpassCount = 1;
+ rpci.pSubpasses = &subpass;
+
+ VkRenderPass render_pass;
+ if (vkCreateRenderPass(dev, &rpci, NULL, &render_pass) != VK_SUCCESS)
+ return VK_NULL_HANDLE;
+
+ return render_pass;
+}
+
+struct zink_render_pass *
+zink_create_render_pass(struct zink_screen *screen,
+ struct zink_render_pass_state *state)
+{
+ struct zink_render_pass *rp = CALLOC_STRUCT(zink_render_pass);
+ if (!rp)
+ goto fail;
+
+ pipe_reference_init(&rp->reference, 1);
+
+ rp->render_pass = create_render_pass(screen->dev, state);
+ if (!rp->render_pass)
+ goto fail;
+
+ return rp;
+
+fail:
+ if (rp)
+ zink_destroy_render_pass(screen, rp);
+ return NULL;
+}
+
+void
+zink_destroy_render_pass(struct zink_screen *screen,
+ struct zink_render_pass *rp)
+{
+ vkDestroyRenderPass(screen->dev, rp->render_pass, NULL);
+ FREE(rp);
+}
+
+void
+debug_describe_zink_render_pass(char* buf, const struct zink_render_pass *ptr)
+{
+ sprintf(buf, "zink_render_pass");
+}
diff --git a/lib/mesa/src/gallium/drivers/zink/zink_render_pass.h b/lib/mesa/src/gallium/drivers/zink/zink_render_pass.h
new file mode 100644
index 000000000..2da246ee8
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/zink_render_pass.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef ZINK_RENDERPASS_H
+#define ZINK_RENDERPASS_H
+
+#include <vulkan/vulkan.h>
+
+#include "pipe/p_state.h"
+#include "util/u_inlines.h"
+
+struct zink_screen;
+
+struct zink_rt_attrib {
+ VkFormat format;
+ VkSampleCountFlagBits samples;
+};
+
+struct zink_render_pass_state {
+ uint8_t num_cbufs : 4; /* PIPE_MAX_COLOR_BUFS = 8 */
+ uint8_t have_zsbuf : 1;
+ struct zink_rt_attrib rts[PIPE_MAX_COLOR_BUFS + 1];
+};
+
+struct zink_render_pass {
+ struct pipe_reference reference;
+
+ VkRenderPass render_pass;
+};
+
+struct zink_render_pass *
+zink_create_render_pass(struct zink_screen *screen,
+ struct zink_render_pass_state *state);
+
+void
+zink_destroy_render_pass(struct zink_screen *screen,
+ struct zink_render_pass *rp);
+
+void
+debug_describe_zink_render_pass(char* buf, const struct zink_render_pass *ptr);
+
+static inline void
+zink_render_pass_reference(struct zink_screen *screen,
+ struct zink_render_pass **dst,
+ struct zink_render_pass *src)
+{
+ struct zink_render_pass *old_dst = *dst;
+
+ if (pipe_reference_described(&old_dst->reference, &src->reference,
+ (debug_reference_descriptor)debug_describe_zink_render_pass))
+ zink_destroy_render_pass(screen, old_dst);
+ *dst = src;
+}
+
+#endif
diff --git a/lib/mesa/src/gallium/drivers/zink/zink_resource.c b/lib/mesa/src/gallium/drivers/zink/zink_resource.c
new file mode 100644
index 000000000..e8073b459
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/zink_resource.c
@@ -0,0 +1,555 @@
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "zink_resource.h"
+
+#include "zink_batch.h"
+#include "zink_context.h"
+#include "zink_screen.h"
+
+#include "util/slab.h"
+#include "util/u_debug.h"
+#include "util/format/u_format.h"
+#include "util/u_inlines.h"
+#include "util/u_memory.h"
+
+#include "state_tracker/sw_winsys.h"
+
+static void
+zink_resource_destroy(struct pipe_screen *pscreen,
+ struct pipe_resource *pres)
+{
+ struct zink_screen *screen = zink_screen(pscreen);
+ struct zink_resource *res = zink_resource(pres);
+ if (pres->target == PIPE_BUFFER)
+ vkDestroyBuffer(screen->dev, res->buffer, NULL);
+ else
+ vkDestroyImage(screen->dev, res->image, NULL);
+
+ vkFreeMemory(screen->dev, res->mem, NULL);
+ FREE(res);
+}
+
+static uint32_t
+get_memory_type_index(struct zink_screen *screen,
+ const VkMemoryRequirements *reqs,
+ VkMemoryPropertyFlags props)
+{
+ for (uint32_t i = 0u; i < VK_MAX_MEMORY_TYPES; i++) {
+ if (((reqs->memoryTypeBits >> i) & 1) == 1) {
+ if ((screen->mem_props.memoryTypes[i].propertyFlags & props) == props) {
+ return i;
+ break;
+ }
+ }
+ }
+
+ unreachable("Unsupported memory-type");
+ return 0;
+}
+
+static VkImageAspectFlags
+aspect_from_format(enum pipe_format fmt)
+{
+ if (util_format_is_depth_or_stencil(fmt)) {
+ VkImageAspectFlags aspect = 0;
+ const struct util_format_description *desc = util_format_description(fmt);
+ if (util_format_has_depth(desc))
+ aspect |= VK_IMAGE_ASPECT_DEPTH_BIT;
+ if (util_format_has_stencil(desc))
+ aspect |= VK_IMAGE_ASPECT_STENCIL_BIT;
+ return aspect;
+ } else
+ return VK_IMAGE_ASPECT_COLOR_BIT;
+}
+
+static struct pipe_resource *
+resource_create(struct pipe_screen *pscreen,
+ const struct pipe_resource *templ,
+ struct winsys_handle *whandle,
+ unsigned external_usage)
+{
+ struct zink_screen *screen = zink_screen(pscreen);
+ struct zink_resource *res = CALLOC_STRUCT(zink_resource);
+
+ res->base = *templ;
+
+ pipe_reference_init(&res->base.reference, 1);
+ res->base.screen = pscreen;
+
+ VkMemoryRequirements reqs;
+ VkMemoryPropertyFlags flags = 0;
+ if (templ->target == PIPE_BUFFER) {
+ VkBufferCreateInfo bci = {};
+ bci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ bci.size = templ->width0;
+
+ bci.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
+ VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+
+ if (templ->bind & PIPE_BIND_VERTEX_BUFFER)
+ bci.usage |= VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
+
+ if (templ->bind & PIPE_BIND_INDEX_BUFFER)
+ bci.usage |= VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
+
+ if (templ->bind & PIPE_BIND_CONSTANT_BUFFER)
+ bci.usage |= VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
+
+ if (templ->bind & PIPE_BIND_SHADER_BUFFER)
+ bci.usage |= VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
+
+ if (templ->bind & PIPE_BIND_COMMAND_ARGS_BUFFER)
+ bci.usage |= VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
+
+ if (vkCreateBuffer(screen->dev, &bci, NULL, &res->buffer) !=
+ VK_SUCCESS) {
+ FREE(res);
+ return NULL;
+ }
+
+ vkGetBufferMemoryRequirements(screen->dev, res->buffer, &reqs);
+ flags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
+ } else {
+ res->format = zink_get_format(screen, templ->format);
+
+ VkImageCreateInfo ici = {};
+ ici.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
+ ici.flags = VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
+
+ switch (templ->target) {
+ case PIPE_TEXTURE_1D:
+ case PIPE_TEXTURE_1D_ARRAY:
+ ici.imageType = VK_IMAGE_TYPE_1D;
+ break;
+
+ case PIPE_TEXTURE_CUBE:
+ case PIPE_TEXTURE_CUBE_ARRAY:
+ ici.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
+ /* fall-through */
+ case PIPE_TEXTURE_2D:
+ case PIPE_TEXTURE_2D_ARRAY:
+ case PIPE_TEXTURE_RECT:
+ ici.imageType = VK_IMAGE_TYPE_2D;
+ break;
+
+ case PIPE_TEXTURE_3D:
+ ici.imageType = VK_IMAGE_TYPE_3D;
+ if (templ->bind & PIPE_BIND_RENDER_TARGET)
+ ici.flags |= VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT;
+ break;
+
+ case PIPE_BUFFER:
+ unreachable("PIPE_BUFFER should already be handled");
+
+ default:
+ unreachable("Unknown target");
+ }
+
+ ici.format = res->format;
+ ici.extent.width = templ->width0;
+ ici.extent.height = templ->height0;
+ ici.extent.depth = templ->depth0;
+ ici.mipLevels = templ->last_level + 1;
+ ici.arrayLayers = templ->array_size;
+ ici.samples = templ->nr_samples ? templ->nr_samples : VK_SAMPLE_COUNT_1_BIT;
+ ici.tiling = templ->bind & PIPE_BIND_LINEAR ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
+
+ if (templ->target == PIPE_TEXTURE_CUBE ||
+ templ->target == PIPE_TEXTURE_CUBE_ARRAY)
+ ici.arrayLayers *= 6;
+
+ if (templ->bind & PIPE_BIND_SHARED)
+ ici.tiling = VK_IMAGE_TILING_LINEAR;
+
+ if (templ->usage == PIPE_USAGE_STAGING)
+ ici.tiling = VK_IMAGE_TILING_LINEAR;
+
+ /* sadly, gallium doesn't let us know if it'll ever need this, so we have to assume */
+ ici.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
+ VK_IMAGE_USAGE_TRANSFER_DST_BIT |
+ VK_IMAGE_USAGE_SAMPLED_BIT;
+
+ if (templ->bind & PIPE_BIND_SHADER_IMAGE)
+ ici.usage |= VK_IMAGE_USAGE_STORAGE_BIT;
+
+ if (templ->bind & PIPE_BIND_RENDER_TARGET)
+ ici.usage |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+
+ if (templ->bind & PIPE_BIND_DEPTH_STENCIL)
+ ici.usage |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
+
+ if (templ->flags & PIPE_RESOURCE_FLAG_SPARSE)
+ ici.usage |= VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT;
+
+ if (templ->bind & PIPE_BIND_STREAM_OUTPUT)
+ ici.usage |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
+
+ ici.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ ici.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ res->layout = VK_IMAGE_LAYOUT_UNDEFINED;
+
+ VkResult result = vkCreateImage(screen->dev, &ici, NULL, &res->image);
+ if (result != VK_SUCCESS) {
+ FREE(res);
+ return NULL;
+ }
+
+ res->optimial_tiling = ici.tiling != VK_IMAGE_TILING_LINEAR;
+ res->aspect = aspect_from_format(templ->format);
+
+ vkGetImageMemoryRequirements(screen->dev, res->image, &reqs);
+ if (templ->usage == PIPE_USAGE_STAGING || (screen->winsys && (templ->bind & (PIPE_BIND_SCANOUT|PIPE_BIND_DISPLAY_TARGET|PIPE_BIND_SHARED))))
+ flags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
+ else
+ flags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ }
+
+ VkMemoryAllocateInfo mai = {};
+ mai.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
+ mai.allocationSize = reqs.size;
+ mai.memoryTypeIndex = get_memory_type_index(screen, &reqs, flags);
+
+ VkExportMemoryAllocateInfo emai = {};
+ if (templ->bind & PIPE_BIND_SHARED) {
+ emai.sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO;
+ emai.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
+ mai.pNext = &emai;
+ }
+
+ VkImportMemoryFdInfoKHR imfi = {
+ VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR,
+ NULL,
+ };
+
+ if (whandle && whandle->type == WINSYS_HANDLE_TYPE_FD) {
+ imfi.pNext = NULL;
+ imfi.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
+ imfi.fd = whandle->handle;
+
+ emai.pNext = &imfi;
+ }
+
+ if (vkAllocateMemory(screen->dev, &mai, NULL, &res->mem) != VK_SUCCESS)
+ goto fail;
+
+ res->offset = 0;
+ res->size = reqs.size;
+
+ if (templ->target == PIPE_BUFFER)
+ vkBindBufferMemory(screen->dev, res->buffer, res->mem, res->offset);
+ else
+ vkBindImageMemory(screen->dev, res->image, res->mem, res->offset);
+
+ if (screen->winsys && (templ->bind & (PIPE_BIND_DISPLAY_TARGET |
+ PIPE_BIND_SCANOUT |
+ PIPE_BIND_SHARED))) {
+ struct sw_winsys *winsys = screen->winsys;
+ res->dt = winsys->displaytarget_create(screen->winsys,
+ res->base.bind,
+ res->base.format,
+ templ->width0,
+ templ->height0,
+ 64, NULL,
+ &res->dt_stride);
+ }
+
+ return &res->base;
+
+fail:
+ if (templ->target == PIPE_BUFFER)
+ vkDestroyBuffer(screen->dev, res->buffer, NULL);
+ else
+ vkDestroyImage(screen->dev, res->image, NULL);
+
+ FREE(res);
+
+ return NULL;
+}
+
+static struct pipe_resource *
+zink_resource_create(struct pipe_screen *pscreen,
+ const struct pipe_resource *templ)
+{
+ return resource_create(pscreen, templ, NULL, 0);
+}
+
+static bool
+zink_resource_get_handle(struct pipe_screen *pscreen,
+ struct pipe_context *context,
+ struct pipe_resource *tex,
+ struct winsys_handle *whandle,
+ unsigned usage)
+{
+ struct zink_resource *res = zink_resource(tex);
+ struct zink_screen *screen = zink_screen(pscreen);
+ VkMemoryGetFdInfoKHR fd_info = {};
+ int fd;
+
+ if (res->base.target != PIPE_BUFFER) {
+ VkImageSubresource sub_res = {};
+ VkSubresourceLayout sub_res_layout = {};
+
+ sub_res.aspectMask = res->aspect;
+
+ vkGetImageSubresourceLayout(screen->dev, res->image, &sub_res, &sub_res_layout);
+
+ whandle->stride = sub_res_layout.rowPitch;
+ }
+
+ if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
+
+ if (!screen->vk_GetMemoryFdKHR)
+ screen->vk_GetMemoryFdKHR = (PFN_vkGetMemoryFdKHR)vkGetDeviceProcAddr(screen->dev, "vkGetMemoryFdKHR");
+ if (!screen->vk_GetMemoryFdKHR)
+ return false;
+ fd_info.sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR;
+ fd_info.memory = res->mem;
+ fd_info.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
+ VkResult result = (*screen->vk_GetMemoryFdKHR)(screen->dev, &fd_info, &fd);
+ if (result != VK_SUCCESS)
+ return false;
+ whandle->handle = fd;
+ }
+ return true;
+}
+
+static struct pipe_resource *
+zink_resource_from_handle(struct pipe_screen *pscreen,
+ const struct pipe_resource *templ,
+ struct winsys_handle *whandle,
+ unsigned usage)
+{
+ return resource_create(pscreen, templ, whandle, usage);
+}
+
+void
+zink_screen_resource_init(struct pipe_screen *pscreen)
+{
+ pscreen->resource_create = zink_resource_create;
+ pscreen->resource_destroy = zink_resource_destroy;
+
+ if (zink_screen(pscreen)->have_KHR_external_memory_fd) {
+ pscreen->resource_get_handle = zink_resource_get_handle;
+ pscreen->resource_from_handle = zink_resource_from_handle;
+ }
+}
+
+static bool
+zink_transfer_copy_bufimage(struct zink_context *ctx,
+ struct zink_resource *res,
+ struct zink_resource *staging_res,
+ struct zink_transfer *trans,
+ bool buf2img)
+{
+ struct zink_batch *batch = zink_batch_no_rp(ctx);
+
+ if (buf2img) {
+ if (res->layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
+ zink_resource_barrier(batch->cmdbuf, res, res->aspect,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
+ }
+ } else {
+ if (res->layout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
+ zink_resource_barrier(batch->cmdbuf, res, res->aspect,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
+ }
+ }
+
+ VkBufferImageCopy copyRegion = {};
+ copyRegion.bufferOffset = staging_res->offset;
+ copyRegion.bufferRowLength = 0;
+ copyRegion.bufferImageHeight = 0;
+ copyRegion.imageSubresource.mipLevel = trans->base.level;
+ copyRegion.imageSubresource.layerCount = 1;
+ if (res->base.array_size > 1) {
+ copyRegion.imageSubresource.baseArrayLayer = trans->base.box.z;
+ copyRegion.imageSubresource.layerCount = trans->base.box.depth;
+ copyRegion.imageExtent.depth = 1;
+ } else {
+ copyRegion.imageOffset.z = trans->base.box.z;
+ copyRegion.imageExtent.depth = trans->base.box.depth;
+ }
+ copyRegion.imageOffset.x = trans->base.box.x;
+ copyRegion.imageOffset.y = trans->base.box.y;
+
+ copyRegion.imageExtent.width = trans->base.box.width;
+ copyRegion.imageExtent.height = trans->base.box.height;
+
+ zink_batch_reference_resoure(batch, res);
+ zink_batch_reference_resoure(batch, staging_res);
+
+ unsigned aspects = res->aspect;
+ while (aspects) {
+ int aspect = 1 << u_bit_scan(&aspects);
+ copyRegion.imageSubresource.aspectMask = aspect;
+
+ if (buf2img)
+ vkCmdCopyBufferToImage(batch->cmdbuf, staging_res->buffer, res->image, res->layout, 1, &copyRegion);
+ else
+ vkCmdCopyImageToBuffer(batch->cmdbuf, res->image, res->layout, staging_res->buffer, 1, &copyRegion);
+ }
+
+ return true;
+}
+
+static void *
+zink_transfer_map(struct pipe_context *pctx,
+ struct pipe_resource *pres,
+ unsigned level,
+ unsigned usage,
+ const struct pipe_box *box,
+ struct pipe_transfer **transfer)
+{
+ struct zink_context *ctx = zink_context(pctx);
+ struct zink_screen *screen = zink_screen(pctx->screen);
+ struct zink_resource *res = zink_resource(pres);
+
+ struct zink_transfer *trans = slab_alloc(&ctx->transfer_pool);
+ if (!trans)
+ return NULL;
+
+ memset(trans, 0, sizeof(*trans));
+ pipe_resource_reference(&trans->base.resource, pres);
+
+ trans->base.resource = pres;
+ trans->base.level = level;
+ trans->base.usage = usage;
+ trans->base.box = *box;
+
+ void *ptr;
+ if (pres->target == PIPE_BUFFER) {
+ VkResult result = vkMapMemory(screen->dev, res->mem, res->offset, res->size, 0, &ptr);
+ if (result != VK_SUCCESS)
+ return NULL;
+
+ trans->base.stride = 0;
+ trans->base.layer_stride = 0;
+ ptr = ((uint8_t *)ptr) + box->x;
+ } else {
+ if (res->optimial_tiling || ((res->base.usage != PIPE_USAGE_STAGING))) {
+ trans->base.stride = util_format_get_stride(pres->format, box->width);
+ trans->base.layer_stride = util_format_get_2d_size(pres->format,
+ trans->base.stride,
+ box->height);
+
+ struct pipe_resource templ = *pres;
+ templ.usage = PIPE_USAGE_STAGING;
+ templ.target = PIPE_BUFFER;
+ templ.bind = 0;
+ templ.width0 = trans->base.layer_stride * box->depth;
+ templ.height0 = templ.depth0 = 0;
+ templ.last_level = 0;
+ templ.array_size = 1;
+ templ.flags = 0;
+
+ trans->staging_res = zink_resource_create(pctx->screen, &templ);
+ if (!trans->staging_res)
+ return NULL;
+
+ struct zink_resource *staging_res = zink_resource(trans->staging_res);
+
+ if (usage & PIPE_TRANSFER_READ) {
+ struct zink_context *ctx = zink_context(pctx);
+ bool ret = zink_transfer_copy_bufimage(ctx, res,
+ staging_res, trans,
+ false);
+ if (ret == false)
+ return NULL;
+
+ /* need to wait for rendering to finish */
+ struct pipe_fence_handle *fence = NULL;
+ pctx->flush(pctx, &fence, PIPE_FLUSH_HINT_FINISH);
+ if (fence) {
+ pctx->screen->fence_finish(pctx->screen, NULL, fence,
+ PIPE_TIMEOUT_INFINITE);
+ pctx->screen->fence_reference(pctx->screen, &fence, NULL);
+ }
+ }
+
+ VkResult result = vkMapMemory(screen->dev, staging_res->mem,
+ staging_res->offset,
+ staging_res->size, 0, &ptr);
+ if (result != VK_SUCCESS)
+ return NULL;
+
+ } else {
+ assert(!res->optimial_tiling);
+ VkResult result = vkMapMemory(screen->dev, res->mem, res->offset, res->size, 0, &ptr);
+ if (result != VK_SUCCESS)
+ return NULL;
+ VkImageSubresource isr = {
+ res->aspect,
+ level,
+ 0
+ };
+ VkSubresourceLayout srl;
+ vkGetImageSubresourceLayout(screen->dev, res->image, &isr, &srl);
+ trans->base.stride = srl.rowPitch;
+ trans->base.layer_stride = srl.arrayPitch;
+ ptr = ((uint8_t *)ptr) + box->z * srl.depthPitch +
+ box->y * srl.rowPitch +
+ box->x;
+ }
+ }
+
+ *transfer = &trans->base;
+ return ptr;
+}
+
+static void
+zink_transfer_unmap(struct pipe_context *pctx,
+ struct pipe_transfer *ptrans)
+{
+ struct zink_context *ctx = zink_context(pctx);
+ struct zink_screen *screen = zink_screen(pctx->screen);
+ struct zink_resource *res = zink_resource(ptrans->resource);
+ struct zink_transfer *trans = (struct zink_transfer *)ptrans;
+ if (trans->staging_res) {
+ struct zink_resource *staging_res = zink_resource(trans->staging_res);
+ vkUnmapMemory(screen->dev, staging_res->mem);
+
+ if (trans->base.usage & PIPE_TRANSFER_WRITE) {
+ struct zink_context *ctx = zink_context(pctx);
+
+ zink_transfer_copy_bufimage(ctx, res, staging_res, trans, true);
+ }
+
+ pipe_resource_reference(&trans->staging_res, NULL);
+ } else
+ vkUnmapMemory(screen->dev, res->mem);
+
+ pipe_resource_reference(&trans->base.resource, NULL);
+ slab_free(&ctx->transfer_pool, ptrans);
+}
+
+void
+zink_context_resource_init(struct pipe_context *pctx)
+{
+ pctx->transfer_map = zink_transfer_map;
+ pctx->transfer_unmap = zink_transfer_unmap;
+
+ pctx->transfer_flush_region = u_default_transfer_flush_region;
+ pctx->buffer_subdata = u_default_buffer_subdata;
+ pctx->texture_subdata = u_default_texture_subdata;
+}
diff --git a/lib/mesa/src/gallium/drivers/zink/zink_resource.h b/lib/mesa/src/gallium/drivers/zink/zink_resource.h
new file mode 100644
index 000000000..65e5e19dc
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/zink_resource.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef ZINK_RESOURCE_H
+#define ZINK_RESOURCE_H
+
+struct pipe_screen;
+struct sw_displaytarget;
+
+#include "util/u_transfer.h"
+
+#include <vulkan/vulkan.h>
+
+struct zink_resource {
+ struct pipe_resource base;
+
+ union {
+ VkBuffer buffer;
+ struct {
+ VkFormat format;
+ VkImage image;
+ VkImageLayout layout;
+ VkImageAspectFlags aspect;
+ bool optimial_tiling;
+ };
+ };
+ VkDeviceMemory mem;
+ VkDeviceSize offset, size;
+
+ struct sw_displaytarget *dt;
+ unsigned dt_stride;
+};
+
+struct zink_transfer {
+ struct pipe_transfer base;
+ struct pipe_resource *staging_res;
+};
+
+static inline struct zink_resource *
+zink_resource(struct pipe_resource *r)
+{
+ return (struct zink_resource *)r;
+}
+
+void
+zink_screen_resource_init(struct pipe_screen *pscreen);
+
+void
+zink_context_resource_init(struct pipe_context *pctx);
+
+#endif
diff --git a/lib/mesa/src/gallium/drivers/zink/zink_screen.c b/lib/mesa/src/gallium/drivers/zink/zink_screen.c
new file mode 100644
index 000000000..9d751d30e
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/zink_screen.c
@@ -0,0 +1,824 @@
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "zink_screen.h"
+
+#include "zink_compiler.h"
+#include "zink_context.h"
+#include "zink_fence.h"
+#include "zink_public.h"
+#include "zink_resource.h"
+
+#include "os/os_process.h"
+#include "util/u_debug.h"
+#include "util/format/u_format.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "util/u_screen.h"
+#include "util/u_string.h"
+
+#include "state_tracker/sw_winsys.h"
+
+static const struct debug_named_value
+debug_options[] = {
+ { "nir", ZINK_DEBUG_NIR, "Dump NIR during program compile" },
+ { "spirv", ZINK_DEBUG_SPIRV, "Dump SPIR-V during program compile" },
+ { "tgsi", ZINK_DEBUG_TGSI, "Dump TGSI during program compile" },
+ DEBUG_NAMED_VALUE_END
+};
+
+DEBUG_GET_ONCE_FLAGS_OPTION(zink_debug, "ZINK_DEBUG", debug_options, 0)
+
+uint32_t
+zink_debug;
+
+static const char *
+zink_get_vendor(struct pipe_screen *pscreen)
+{
+ return "Collabora Ltd";
+}
+
+static const char *
+zink_get_device_vendor(struct pipe_screen *pscreen)
+{
+ struct zink_screen *screen = zink_screen(pscreen);
+ static char buf[1000];
+ snprintf(buf, sizeof(buf), "Unknown (vendor-id: 0x%04x)", screen->props.vendorID);
+ return buf;
+}
+
+static const char *
+zink_get_name(struct pipe_screen *pscreen)
+{
+ struct zink_screen *screen = zink_screen(pscreen);
+ static char buf[1000];
+ snprintf(buf, sizeof(buf), "zink (%s)", screen->props.deviceName);
+ return buf;
+}
+
+static int
+get_video_mem(struct zink_screen *screen)
+{
+ VkDeviceSize size = 0;
+ for (uint32_t i = 0; i < screen->mem_props.memoryHeapCount; ++i)
+ size += screen->mem_props.memoryHeaps[i].size;
+ return (int)(size >> 20);
+}
+
+static int
+zink_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
+{
+ struct zink_screen *screen = zink_screen(pscreen);
+
+ switch (param) {
+ case PIPE_CAP_NPOT_TEXTURES:
+ return 1;
+
+ case PIPE_CAP_MAX_DUAL_SOURCE_RENDER_TARGETS:
+ if (!screen->feats.dualSrcBlend)
+ return 0;
+ return screen->props.limits.maxFragmentDualSrcAttachments;
+
+ case PIPE_CAP_POINT_SPRITE:
+ return 1;
+
+ case PIPE_CAP_MAX_RENDER_TARGETS:
+ return screen->props.limits.maxColorAttachments;
+
+ case PIPE_CAP_OCCLUSION_QUERY:
+ return 1;
+
+#if 0 /* TODO: Enable me */
+ case PIPE_CAP_QUERY_TIME_ELAPSED:
+ return 1;
+#endif
+
+ case PIPE_CAP_TEXTURE_SWIZZLE:
+ return 1;
+
+ case PIPE_CAP_MAX_TEXTURE_2D_SIZE:
+ return screen->props.limits.maxImageDimension2D;
+ case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
+ return 1 + util_logbase2(screen->props.limits.maxImageDimension3D);
+ case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
+ return 1 + util_logbase2(screen->props.limits.maxImageDimensionCube);
+
+ case PIPE_CAP_BLEND_EQUATION_SEPARATE:
+ case PIPE_CAP_FRAGMENT_SHADER_TEXTURE_LOD:
+ case PIPE_CAP_FRAGMENT_SHADER_DERIVATIVES:
+ case PIPE_CAP_VERTEX_SHADER_SATURATE:
+ return 1;
+
+ case PIPE_CAP_INDEP_BLEND_ENABLE:
+ case PIPE_CAP_INDEP_BLEND_FUNC:
+ return 1;
+
+ case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS:
+ return screen->props.limits.maxImageArrayLayers;
+
+#if 0 /* TODO: Enable me */
+ case PIPE_CAP_DEPTH_CLIP_DISABLE:
+ return 0;
+#endif
+
+ case PIPE_CAP_TGSI_INSTANCEID:
+ case PIPE_CAP_MIXED_COLORBUFFER_FORMATS:
+ case PIPE_CAP_SEAMLESS_CUBE_MAP:
+ return 1;
+
+ case PIPE_CAP_MIN_TEXEL_OFFSET:
+ return screen->props.limits.minTexelOffset;
+ case PIPE_CAP_MAX_TEXEL_OFFSET:
+ return screen->props.limits.maxTexelOffset;
+
+ case PIPE_CAP_VERTEX_COLOR_UNCLAMPED:
+ return 1;
+
+ case PIPE_CAP_GLSL_FEATURE_LEVEL:
+ case PIPE_CAP_GLSL_FEATURE_LEVEL_COMPATIBILITY:
+ return 120;
+
+#if 0 /* TODO: Enable me */
+ case PIPE_CAP_COMPUTE:
+ return 1;
+#endif
+
+ case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
+ return screen->props.limits.minUniformBufferOffsetAlignment;
+
+#if 0 /* TODO: Enable me */
+ case PIPE_CAP_QUERY_TIMESTAMP:
+ return 1;
+#endif
+
+ case PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT:
+ return screen->props.limits.minMemoryMapAlignment;
+
+ case PIPE_CAP_CUBE_MAP_ARRAY:
+ return screen->feats.imageCubeArray;
+
+ case PIPE_CAP_TEXTURE_BUFFER_OBJECTS:
+ return 1;
+
+ case PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT:
+ return screen->props.limits.minTexelBufferOffsetAlignment;
+
+ case PIPE_CAP_PREFER_BLIT_BASED_TEXTURE_TRANSFER:
+ return 0; /* unsure */
+
+ case PIPE_CAP_MAX_TEXTURE_BUFFER_SIZE:
+ return screen->props.limits.maxTexelBufferElements;
+
+ case PIPE_CAP_ENDIANNESS:
+ return PIPE_ENDIAN_NATIVE; /* unsure */
+
+ case PIPE_CAP_MAX_VIEWPORTS:
+ return screen->props.limits.maxViewports;
+
+ case PIPE_CAP_MIXED_FRAMEBUFFER_SIZES:
+ return 1;
+
+ case PIPE_CAP_MAX_GEOMETRY_OUTPUT_VERTICES:
+ return screen->props.limits.maxGeometryOutputVertices;
+ case PIPE_CAP_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS:
+ return screen->props.limits.maxGeometryOutputComponents;
+
+#if 0 /* TODO: Enable me. Enables ARB_texture_gather */
+ case PIPE_CAP_MAX_TEXTURE_GATHER_COMPONENTS:
+ return 4;
+#endif
+
+ case PIPE_CAP_MIN_TEXTURE_GATHER_OFFSET:
+ return screen->props.limits.minTexelGatherOffset;
+ case PIPE_CAP_MAX_TEXTURE_GATHER_OFFSET:
+ return screen->props.limits.maxTexelGatherOffset;
+
+ case PIPE_CAP_TGSI_FS_FINE_DERIVATIVE:
+ return 1;
+
+ case PIPE_CAP_VENDOR_ID:
+ return screen->props.vendorID;
+ case PIPE_CAP_DEVICE_ID:
+ return screen->props.deviceID;
+
+ case PIPE_CAP_ACCELERATED:
+ return 1;
+ case PIPE_CAP_VIDEO_MEMORY:
+ return get_video_mem(screen);
+ case PIPE_CAP_UMA:
+ return screen->props.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
+
+ case PIPE_CAP_MAX_VERTEX_ATTRIB_STRIDE:
+ return screen->props.limits.maxVertexInputBindingStride;
+
+#if 0 /* TODO: Enable me */
+ case PIPE_CAP_SAMPLER_VIEW_TARGET:
+ return 1;
+#endif
+
+#if 0 /* TODO: Enable me */
+ case PIPE_CAP_CLIP_HALFZ:
+ return 1;
+#endif
+
+#if 0 /* TODO: Enable me */
+ case PIPE_CAP_TEXTURE_FLOAT_LINEAR:
+ case PIPE_CAP_TEXTURE_HALF_FLOAT_LINEAR:
+ return 1;
+#endif
+
+ case PIPE_CAP_SHAREABLE_SHADERS:
+ return 1;
+
+#if 0 /* TODO: Enable me. Enables GL_ARB_shader_storage_buffer_object */
+ case PIPE_CAP_SHADER_BUFFER_OFFSET_ALIGNMENT:
+ return screen->props.limits.minStorageBufferOffsetAlignment;
+#endif
+
+ case PIPE_CAP_PCI_GROUP:
+ case PIPE_CAP_PCI_BUS:
+ case PIPE_CAP_PCI_DEVICE:
+ case PIPE_CAP_PCI_FUNCTION:
+ return 0; /* TODO: figure these out */
+
+#if 0 /* TODO: Enable me */
+ case PIPE_CAP_CULL_DISTANCE:
+ return screen->feats.shaderCullDistance;
+#endif
+
+ case PIPE_CAP_VIEWPORT_SUBPIXEL_BITS:
+ return screen->props.limits.viewportSubPixelBits;
+
+ case PIPE_CAP_GLSL_OPTIMIZE_CONSERVATIVELY:
+ return 0; /* not sure */
+
+ case PIPE_CAP_MAX_GS_INVOCATIONS:
+ return 0; /* not implemented */
+
+ case PIPE_CAP_MAX_COMBINED_SHADER_BUFFERS:
+ return screen->props.limits.maxDescriptorSetStorageBuffers;
+
+ case PIPE_CAP_MAX_SHADER_BUFFER_SIZE:
+ return screen->props.limits.maxStorageBufferRange; /* unsure */
+
+ case PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT:
+ case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER:
+ return 1;
+
+ case PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT:
+ case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER:
+ return 0;
+
+ case PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT:
+ return 0;
+
+ case PIPE_CAP_NIR_COMPACT_ARRAYS:
+ return 1;
+
+ case PIPE_CAP_TGSI_FS_FACE_IS_INTEGER_SYSVAL:
+ return 1;
+
+ case PIPE_CAP_FLATSHADE:
+ case PIPE_CAP_ALPHA_TEST:
+ case PIPE_CAP_CLIP_PLANES:
+ case PIPE_CAP_POINT_SIZE_FIXED:
+ case PIPE_CAP_TWO_SIDED_COLOR:
+ return 0;
+
+ case PIPE_CAP_DMABUF:
+ return screen->have_KHR_external_memory_fd;
+
+ default:
+ return u_pipe_screen_get_param_defaults(pscreen, param);
+ }
+}
+
+static float
+zink_get_paramf(struct pipe_screen *pscreen, enum pipe_capf param)
+{
+ struct zink_screen *screen = zink_screen(pscreen);
+
+ switch (param) {
+ case PIPE_CAPF_MAX_LINE_WIDTH:
+ case PIPE_CAPF_MAX_LINE_WIDTH_AA:
+ return screen->props.limits.lineWidthRange[1];
+
+ case PIPE_CAPF_MAX_POINT_WIDTH:
+ case PIPE_CAPF_MAX_POINT_WIDTH_AA:
+ return screen->props.limits.pointSizeRange[1];
+
+ case PIPE_CAPF_MAX_TEXTURE_ANISOTROPY:
+ return screen->props.limits.maxSamplerAnisotropy;
+
+ case PIPE_CAPF_MAX_TEXTURE_LOD_BIAS:
+ return screen->props.limits.maxSamplerLodBias;
+
+ case PIPE_CAPF_MIN_CONSERVATIVE_RASTER_DILATE:
+ case PIPE_CAPF_MAX_CONSERVATIVE_RASTER_DILATE:
+ case PIPE_CAPF_CONSERVATIVE_RASTER_DILATE_GRANULARITY:
+ return 0.0f; /* not implemented */
+ }
+
+ /* should only get here on unhandled cases */
+ return 0.0;
+}
+
+static int
+zink_get_shader_param(struct pipe_screen *pscreen,
+ enum pipe_shader_type shader,
+ enum pipe_shader_cap param)
+{
+ struct zink_screen *screen = zink_screen(pscreen);
+
+ switch (param) {
+ case PIPE_SHADER_CAP_MAX_INSTRUCTIONS:
+ case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS:
+ case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS:
+ case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS:
+ case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH:
+ if (shader == PIPE_SHADER_VERTEX ||
+ shader == PIPE_SHADER_FRAGMENT)
+ return INT_MAX;
+ return 0;
+
+ case PIPE_SHADER_CAP_MAX_INPUTS:
+ switch (shader) {
+ case PIPE_SHADER_VERTEX:
+ return MIN2(screen->props.limits.maxVertexInputAttributes,
+ PIPE_MAX_SHADER_INPUTS);
+ case PIPE_SHADER_FRAGMENT:
+ return MIN2(screen->props.limits.maxFragmentInputComponents / 4,
+ PIPE_MAX_SHADER_INPUTS);
+ default:
+ return 0; /* unsupported stage */
+ }
+
+ case PIPE_SHADER_CAP_MAX_OUTPUTS:
+ switch (shader) {
+ case PIPE_SHADER_VERTEX:
+ return MIN2(screen->props.limits.maxVertexOutputComponents / 4,
+ PIPE_MAX_SHADER_OUTPUTS);
+ case PIPE_SHADER_FRAGMENT:
+ return MIN2(screen->props.limits.maxColorAttachments,
+ PIPE_MAX_SHADER_OUTPUTS);
+ default:
+ return 0; /* unsupported stage */
+ }
+
+ case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS:
+ switch (shader) {
+ case PIPE_SHADER_VERTEX:
+ case PIPE_SHADER_FRAGMENT:
+ /* this might be a bit simplistic... */
+ return MIN2(screen->props.limits.maxPerStageDescriptorSamplers,
+ PIPE_MAX_SAMPLERS);
+ default:
+ return 0; /* unsupported stage */
+ }
+
+ case PIPE_SHADER_CAP_MAX_CONST_BUFFER_SIZE:
+ return MIN2(screen->props.limits.maxUniformBufferRange, INT_MAX);
+
+ case PIPE_SHADER_CAP_MAX_CONST_BUFFERS:
+ return screen->props.limits.maxPerStageDescriptorUniformBuffers;
+
+ case PIPE_SHADER_CAP_MAX_TEMPS:
+ return INT_MAX;
+
+ case PIPE_SHADER_CAP_INTEGERS:
+ return 1;
+
+ case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR:
+ case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR:
+ case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
+ case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR:
+ case PIPE_SHADER_CAP_SUBROUTINES:
+ case PIPE_SHADER_CAP_INT64_ATOMICS:
+ case PIPE_SHADER_CAP_FP16:
+ return 0; /* not implemented */
+
+ case PIPE_SHADER_CAP_PREFERRED_IR:
+ return PIPE_SHADER_IR_NIR;
+
+ case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED:
+ return 0; /* not implemented */
+
+ case PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS:
+ return MIN2(screen->props.limits.maxPerStageDescriptorSampledImages,
+ PIPE_MAX_SHADER_SAMPLER_VIEWS);
+
+ case PIPE_SHADER_CAP_TGSI_DROUND_SUPPORTED:
+ case PIPE_SHADER_CAP_TGSI_DFRACEXP_DLDEXP_SUPPORTED:
+ case PIPE_SHADER_CAP_TGSI_FMA_SUPPORTED:
+ return 0; /* not implemented */
+
+ case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE:
+ return 0; /* no idea */
+
+ case PIPE_SHADER_CAP_MAX_UNROLL_ITERATIONS_HINT:
+ return 32; /* arbitrary */
+
+ case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS:
+ /* TODO: this limitation is dumb, and will need some fixes in mesa */
+ return MIN2(screen->props.limits.maxPerStageDescriptorStorageBuffers, 8);
+
+ case PIPE_SHADER_CAP_SUPPORTED_IRS:
+ return (1 << PIPE_SHADER_IR_NIR) | (1 << PIPE_SHADER_IR_TGSI);
+
+ case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
+ return MIN2(screen->props.limits.maxPerStageDescriptorStorageImages,
+ PIPE_MAX_SHADER_IMAGES);
+
+ case PIPE_SHADER_CAP_LOWER_IF_THRESHOLD:
+ case PIPE_SHADER_CAP_TGSI_SKIP_MERGE_REGISTERS:
+ return 0; /* unsure */
+
+ case PIPE_SHADER_CAP_TGSI_LDEXP_SUPPORTED:
+ case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTERS:
+ case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTER_BUFFERS:
+ case PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED:
+ return 0; /* not implemented */
+ }
+
+ /* should only get here on unhandled cases */
+ return 0;
+}
+
+static VkSampleCountFlagBits
+vk_sample_count_flags(uint32_t sample_count)
+{
+ switch (sample_count) {
+ case 1: return VK_SAMPLE_COUNT_1_BIT;
+ case 2: return VK_SAMPLE_COUNT_2_BIT;
+ case 4: return VK_SAMPLE_COUNT_4_BIT;
+ case 8: return VK_SAMPLE_COUNT_8_BIT;
+ case 16: return VK_SAMPLE_COUNT_16_BIT;
+ case 32: return VK_SAMPLE_COUNT_32_BIT;
+ case 64: return VK_SAMPLE_COUNT_64_BIT;
+ default:
+ return 0;
+ }
+}
+
+static bool
+zink_is_format_supported(struct pipe_screen *pscreen,
+ enum pipe_format format,
+ enum pipe_texture_target target,
+ unsigned sample_count,
+ unsigned storage_sample_count,
+ unsigned bind)
+{
+ struct zink_screen *screen = zink_screen(pscreen);
+
+ if (format == PIPE_FORMAT_NONE)
+ return screen->props.limits.framebufferNoAttachmentsSampleCounts &
+ vk_sample_count_flags(sample_count);
+
+ VkFormat vkformat = zink_get_format(screen, format);
+ if (vkformat == VK_FORMAT_UNDEFINED)
+ return false;
+
+ if (sample_count >= 1) {
+ VkSampleCountFlagBits sample_mask = vk_sample_count_flags(sample_count);
+ if (!sample_mask)
+ return false;
+ const struct util_format_description *desc = util_format_description(format);
+ if (util_format_is_depth_or_stencil(format)) {
+ if (util_format_has_depth(desc)) {
+ if (bind & PIPE_BIND_DEPTH_STENCIL &&
+ (screen->props.limits.framebufferDepthSampleCounts & sample_mask) != sample_mask)
+ return false;
+ if (bind & PIPE_BIND_SAMPLER_VIEW &&
+ (screen->props.limits.sampledImageDepthSampleCounts & sample_mask) != sample_mask)
+ return false;
+ }
+ if (util_format_has_stencil(desc)) {
+ if (bind & PIPE_BIND_DEPTH_STENCIL &&
+ (screen->props.limits.framebufferStencilSampleCounts & sample_mask) != sample_mask)
+ return false;
+ if (bind & PIPE_BIND_SAMPLER_VIEW &&
+ (screen->props.limits.sampledImageStencilSampleCounts & sample_mask) != sample_mask)
+ return false;
+ }
+ } else if (util_format_is_pure_integer(format)) {
+ if (bind & PIPE_BIND_RENDER_TARGET &&
+ !(screen->props.limits.framebufferColorSampleCounts & sample_mask))
+ return false;
+ if (bind & PIPE_BIND_SAMPLER_VIEW &&
+ !(screen->props.limits.sampledImageIntegerSampleCounts & sample_mask))
+ return false;
+ } else {
+ if (bind & PIPE_BIND_RENDER_TARGET &&
+ !(screen->props.limits.framebufferColorSampleCounts & sample_mask))
+ return false;
+ if (bind & PIPE_BIND_SAMPLER_VIEW &&
+ !(screen->props.limits.sampledImageColorSampleCounts & sample_mask))
+ return false;
+ }
+ }
+
+ VkFormatProperties props;
+ vkGetPhysicalDeviceFormatProperties(screen->pdev, vkformat, &props);
+
+ if (target == PIPE_BUFFER) {
+ if (bind & PIPE_BIND_VERTEX_BUFFER &&
+ !(props.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT))
+ return false;
+ } else {
+ /* all other targets are texture-targets */
+ if (bind & PIPE_BIND_RENDER_TARGET &&
+ !(props.optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT))
+ return false;
+
+ if (bind & PIPE_BIND_BLENDABLE &&
+ !(props.optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT))
+ return false;
+
+ if (bind & PIPE_BIND_SAMPLER_VIEW &&
+ !(props.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT))
+ return false;
+
+ if (bind & PIPE_BIND_DEPTH_STENCIL &&
+ !(props.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT))
+ return false;
+ }
+
+ if (util_format_is_compressed(format)) {
+ const struct util_format_description *desc = util_format_description(format);
+ if (desc->layout == UTIL_FORMAT_LAYOUT_BPTC &&
+ !screen->feats.textureCompressionBC)
+ return false;
+ }
+
+ return true;
+}
+
+static void
+zink_destroy_screen(struct pipe_screen *pscreen)
+{
+ struct zink_screen *screen = zink_screen(pscreen);
+ slab_destroy_parent(&screen->transfer_pool);
+ FREE(screen);
+}
+
+static VkInstance
+create_instance()
+{
+ VkApplicationInfo ai = {};
+ ai.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
+
+ char proc_name[128];
+ if (os_get_process_name(proc_name, ARRAY_SIZE(proc_name)))
+ ai.pApplicationName = proc_name;
+ else
+ ai.pApplicationName = "unknown";
+
+ ai.pEngineName = "mesa zink";
+ ai.apiVersion = VK_API_VERSION_1_0;
+
+ const char *extensions[] = {
+ VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
+ VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME,
+ };
+
+ VkInstanceCreateInfo ici = {};
+ ici.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
+ ici.pApplicationInfo = &ai;
+ ici.ppEnabledExtensionNames = extensions;
+ ici.enabledExtensionCount = ARRAY_SIZE(extensions);
+
+ VkInstance instance = VK_NULL_HANDLE;
+ VkResult err = vkCreateInstance(&ici, NULL, &instance);
+ if (err != VK_SUCCESS)
+ return VK_NULL_HANDLE;
+
+ return instance;
+}
+
+static VkPhysicalDevice
+choose_pdev(const VkInstance instance)
+{
+ uint32_t i, pdev_count;
+ VkPhysicalDevice *pdevs, pdev;
+ vkEnumeratePhysicalDevices(instance, &pdev_count, NULL);
+ assert(pdev_count > 0);
+
+ pdevs = malloc(sizeof(*pdevs) * pdev_count);
+ vkEnumeratePhysicalDevices(instance, &pdev_count, pdevs);
+ assert(pdev_count > 0);
+
+ pdev = pdevs[0];
+ for (i = 0; i < pdev_count; ++i) {
+ VkPhysicalDeviceProperties props;
+ vkGetPhysicalDeviceProperties(pdevs[i], &props);
+ if (props.deviceType == VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU) {
+ pdev = pdevs[i];
+ break;
+ }
+ }
+ free(pdevs);
+ return pdev;
+}
+
+static uint32_t
+find_gfx_queue(const VkPhysicalDevice pdev)
+{
+ uint32_t num_queues;
+ vkGetPhysicalDeviceQueueFamilyProperties(pdev, &num_queues, NULL);
+ assert(num_queues > 0);
+
+ VkQueueFamilyProperties *props = malloc(sizeof(*props) * num_queues);
+ vkGetPhysicalDeviceQueueFamilyProperties(pdev, &num_queues, props);
+
+ for (uint32_t i = 0; i < num_queues; i++) {
+ if (props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
+ free(props);
+ return i;
+ }
+ }
+
+ return UINT32_MAX;
+}
+
+static void
+zink_flush_frontbuffer(struct pipe_screen *pscreen,
+ struct pipe_resource *pres,
+ unsigned level, unsigned layer,
+ void *winsys_drawable_handle,
+ struct pipe_box *sub_box)
+{
+ struct zink_screen *screen = zink_screen(pscreen);
+ struct sw_winsys *winsys = screen->winsys;
+ struct zink_resource *res = zink_resource(pres);
+
+ if (!winsys)
+ return;
+ void *map = winsys->displaytarget_map(winsys, res->dt, 0);
+
+ if (map) {
+ VkImageSubresource isr = {};
+ isr.aspectMask = res->aspect;
+ isr.mipLevel = level;
+ isr.arrayLayer = layer;
+ VkSubresourceLayout layout;
+ vkGetImageSubresourceLayout(screen->dev, res->image, &isr, &layout);
+
+ void *ptr;
+ VkResult result = vkMapMemory(screen->dev, res->mem, res->offset, res->size, 0, &ptr);
+ if (result != VK_SUCCESS) {
+ debug_printf("failed to map memory for display\n");
+ return;
+ }
+ for (int i = 0; i < pres->height0; ++i) {
+ uint8_t *src = (uint8_t *)ptr + i * layout.rowPitch;
+ uint8_t *dst = (uint8_t *)map + i * res->dt_stride;
+ memcpy(dst, src, res->dt_stride);
+ }
+ vkUnmapMemory(screen->dev, res->mem);
+ }
+
+ winsys->displaytarget_unmap(winsys, res->dt);
+
+ assert(res->dt);
+ if (res->dt)
+ winsys->displaytarget_display(winsys, res->dt, winsys_drawable_handle, sub_box);
+}
+
+static struct pipe_screen *
+zink_internal_create_screen(struct sw_winsys *winsys, int fd)
+{
+ struct zink_screen *screen = CALLOC_STRUCT(zink_screen);
+ if (!screen)
+ return NULL;
+
+ zink_debug = debug_get_option_zink_debug();
+
+ screen->instance = create_instance();
+ screen->pdev = choose_pdev(screen->instance);
+ screen->gfx_queue = find_gfx_queue(screen->pdev);
+
+ vkGetPhysicalDeviceProperties(screen->pdev, &screen->props);
+ vkGetPhysicalDeviceFeatures(screen->pdev, &screen->feats);
+ vkGetPhysicalDeviceMemoryProperties(screen->pdev, &screen->mem_props);
+
+ screen->have_X8_D24_UNORM_PACK32 = zink_is_depth_format_supported(screen,
+ VK_FORMAT_X8_D24_UNORM_PACK32);
+ screen->have_D24_UNORM_S8_UINT = zink_is_depth_format_supported(screen,
+ VK_FORMAT_D24_UNORM_S8_UINT);
+
+ uint32_t num_extensions = 0;
+ if (vkEnumerateDeviceExtensionProperties(screen->pdev, NULL,
+ &num_extensions, NULL) == VK_SUCCESS && num_extensions > 0) {
+ VkExtensionProperties *extensions = MALLOC(sizeof(VkExtensionProperties) *
+ num_extensions);
+ if (extensions) {
+ vkEnumerateDeviceExtensionProperties(screen->pdev, NULL,
+ &num_extensions, extensions);
+
+ for (uint32_t i = 0; i < num_extensions; ++i) {
+ if (!strcmp(extensions[i].extensionName,
+ VK_KHR_MAINTENANCE1_EXTENSION_NAME))
+ screen->have_KHR_maintenance1 = true;
+ if (!strcmp(extensions[i].extensionName,
+ VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME))
+ screen->have_KHR_external_memory_fd = true;
+ }
+ FREE(extensions);
+ }
+ }
+
+ if (!screen->have_KHR_maintenance1) {
+ debug_printf("ZINK: VK_KHR_maintenance1 required!\n");
+ goto fail;
+ }
+
+ VkDeviceQueueCreateInfo qci = {};
+ float dummy = 0.0f;
+ qci.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
+ qci.queueFamilyIndex = screen->gfx_queue;
+ qci.queueCount = 1;
+ qci.pQueuePriorities = &dummy;
+
+ VkDeviceCreateInfo dci = {};
+ dci.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
+ dci.queueCreateInfoCount = 1;
+ dci.pQueueCreateInfos = &qci;
+ dci.pEnabledFeatures = &screen->feats;
+ const char *extensions[3] = {
+ VK_KHR_MAINTENANCE1_EXTENSION_NAME,
+ };
+ num_extensions = 1;
+
+ if (fd >= 0 && !screen->have_KHR_external_memory_fd) {
+ debug_printf("ZINK: KHR_external_memory_fd required!\n");
+ goto fail;
+ }
+
+ if (screen->have_KHR_external_memory_fd) {
+ extensions[num_extensions++] = VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME;
+ extensions[num_extensions++] = VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME;
+ }
+ assert(num_extensions <= ARRAY_SIZE(extensions));
+
+ dci.ppEnabledExtensionNames = extensions;
+ dci.enabledExtensionCount = num_extensions;
+ if (vkCreateDevice(screen->pdev, &dci, NULL, &screen->dev) != VK_SUCCESS)
+ goto fail;
+
+ screen->winsys = winsys;
+
+ screen->base.get_name = zink_get_name;
+ screen->base.get_vendor = zink_get_vendor;
+ screen->base.get_device_vendor = zink_get_device_vendor;
+ screen->base.get_param = zink_get_param;
+ screen->base.get_paramf = zink_get_paramf;
+ screen->base.get_shader_param = zink_get_shader_param;
+ screen->base.get_compiler_options = zink_get_compiler_options;
+ screen->base.is_format_supported = zink_is_format_supported;
+ screen->base.context_create = zink_context_create;
+ screen->base.flush_frontbuffer = zink_flush_frontbuffer;
+ screen->base.destroy = zink_destroy_screen;
+
+ zink_screen_resource_init(&screen->base);
+ zink_screen_fence_init(&screen->base);
+
+ slab_create_parent(&screen->transfer_pool, sizeof(struct zink_transfer), 16);
+
+ return &screen->base;
+
+fail:
+ FREE(screen);
+ return NULL;
+}
+
+struct pipe_screen *
+zink_create_screen(struct sw_winsys *winsys)
+{
+ return zink_internal_create_screen(winsys, -1);
+}
+
+struct pipe_screen *
+zink_drm_create_screen(int fd)
+{
+ return zink_internal_create_screen(NULL, fd);
+}
diff --git a/lib/mesa/src/gallium/drivers/zink/zink_screen.h b/lib/mesa/src/gallium/drivers/zink/zink_screen.h
new file mode 100644
index 000000000..09acb7f3e
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/zink_screen.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef ZINK_SCREEN_H
+#define ZINK_SCREEN_H
+
+#include "pipe/p_screen.h"
+#include "util/slab.h"
+
+#include <vulkan/vulkan.h>
+
+extern uint32_t zink_debug;
+
+#define ZINK_DEBUG_NIR 0x1
+#define ZINK_DEBUG_SPIRV 0x2
+#define ZINK_DEBUG_TGSI 0x4
+
+struct zink_screen {
+ struct pipe_screen base;
+
+ struct sw_winsys *winsys;
+
+ struct slab_parent_pool transfer_pool;
+
+ VkInstance instance;
+ VkPhysicalDevice pdev;
+
+ VkPhysicalDeviceProperties props;
+ VkPhysicalDeviceFeatures feats;
+ VkPhysicalDeviceMemoryProperties mem_props;
+
+ bool have_KHR_maintenance1;
+ bool have_KHR_external_memory_fd;
+
+ bool have_X8_D24_UNORM_PACK32;
+ bool have_D24_UNORM_S8_UINT;
+
+ uint32_t gfx_queue;
+ VkDevice dev;
+
+ PFN_vkGetMemoryFdKHR vk_GetMemoryFdKHR;
+};
+
+static inline struct zink_screen *
+zink_screen(struct pipe_screen *pipe)
+{
+ return (struct zink_screen *)pipe;
+}
+
+VkFormat
+zink_get_format(struct zink_screen *screen, enum pipe_format format);
+
+bool
+zink_is_depth_format_supported(struct zink_screen *screen, VkFormat format);
+
+#endif
diff --git a/lib/mesa/src/gallium/drivers/zink/zink_state.c b/lib/mesa/src/gallium/drivers/zink/zink_state.c
new file mode 100644
index 000000000..bec8d05d5
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/zink_state.c
@@ -0,0 +1,455 @@
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "zink_state.h"
+
+#include "zink_context.h"
+#include "zink_screen.h"
+
+#include "util/u_memory.h"
+
+#include <math.h>
+
+static void *
+zink_create_vertex_elements_state(struct pipe_context *pctx,
+ unsigned num_elements,
+ const struct pipe_vertex_element *elements)
+{
+ struct zink_screen *screen = zink_screen(pctx->screen);
+ unsigned int i;
+ struct zink_vertex_elements_state *ves = CALLOC_STRUCT(zink_vertex_elements_state);
+ if (!ves)
+ return NULL;
+
+ int buffer_map[PIPE_MAX_ATTRIBS];
+ for (int i = 0; i < ARRAY_SIZE(buffer_map); ++i)
+ buffer_map[i] = -1;
+
+ int num_bindings = 0;
+ for (i = 0; i < num_elements; ++i) {
+ const struct pipe_vertex_element *elem = elements + i;
+ assert(!elem->instance_divisor);
+
+ int binding = elem->vertex_buffer_index;
+ if (buffer_map[binding] < 0) {
+ ves->binding_map[num_bindings] = binding;
+ buffer_map[binding] = num_bindings++;
+ }
+ binding = buffer_map[binding];
+
+
+ ves->bindings[binding].binding = binding;
+ ves->bindings[binding].inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
+
+ ves->hw_state.attribs[i].binding = binding;
+ ves->hw_state.attribs[i].location = i; // TODO: unsure
+ ves->hw_state.attribs[i].format = zink_get_format(screen,
+ elem->src_format);
+ assert(ves->hw_state.attribs[i].format != VK_FORMAT_UNDEFINED);
+ ves->hw_state.attribs[i].offset = elem->src_offset;
+ }
+
+ ves->hw_state.num_bindings = num_bindings;
+ ves->hw_state.num_attribs = num_elements;
+ return ves;
+}
+
+static void
+zink_bind_vertex_elements_state(struct pipe_context *pctx,
+ void *cso)
+{
+ struct zink_context *ctx = zink_context(pctx);
+ struct zink_gfx_pipeline_state *state = &ctx->gfx_pipeline_state;
+ ctx->element_state = cso;
+ if (cso) {
+ state->element_state = &ctx->element_state->hw_state;
+ struct zink_vertex_elements_state *ves = cso;
+ for (int i = 0; i < state->element_state->num_bindings; ++i) {
+ state->bindings[i].binding = ves->bindings[i].binding;
+ state->bindings[i].inputRate = ves->bindings[i].inputRate;
+ }
+ } else
+ state->element_state = NULL;
+}
+
+static void
+zink_delete_vertex_elements_state(struct pipe_context *pctx,
+ void *ves)
+{
+}
+
+static VkBlendFactor
+blend_factor(enum pipe_blendfactor factor)
+{
+ switch (factor) {
+ case PIPE_BLENDFACTOR_ONE: return VK_BLEND_FACTOR_ONE;
+ case PIPE_BLENDFACTOR_SRC_COLOR: return VK_BLEND_FACTOR_SRC_COLOR;
+ case PIPE_BLENDFACTOR_SRC_ALPHA: return VK_BLEND_FACTOR_SRC_ALPHA;
+ case PIPE_BLENDFACTOR_DST_ALPHA: return VK_BLEND_FACTOR_DST_ALPHA;
+ case PIPE_BLENDFACTOR_DST_COLOR: return VK_BLEND_FACTOR_DST_COLOR;
+ case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE:
+ return VK_BLEND_FACTOR_SRC_ALPHA_SATURATE;
+ case PIPE_BLENDFACTOR_CONST_COLOR: return VK_BLEND_FACTOR_CONSTANT_COLOR;
+ case PIPE_BLENDFACTOR_CONST_ALPHA: return VK_BLEND_FACTOR_CONSTANT_ALPHA;
+ case PIPE_BLENDFACTOR_SRC1_COLOR: return VK_BLEND_FACTOR_SRC1_COLOR;
+ case PIPE_BLENDFACTOR_SRC1_ALPHA: return VK_BLEND_FACTOR_SRC1_ALPHA;
+
+ case PIPE_BLENDFACTOR_ZERO: return VK_BLEND_FACTOR_ZERO;
+
+ case PIPE_BLENDFACTOR_INV_SRC_COLOR:
+ return VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR;
+ case PIPE_BLENDFACTOR_INV_SRC_ALPHA:
+ return VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
+ case PIPE_BLENDFACTOR_INV_DST_ALPHA:
+ return VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA;
+ case PIPE_BLENDFACTOR_INV_DST_COLOR:
+ return VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR;
+
+ case PIPE_BLENDFACTOR_INV_CONST_COLOR:
+ return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR;
+ case PIPE_BLENDFACTOR_INV_CONST_ALPHA:
+ return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA;
+ case PIPE_BLENDFACTOR_INV_SRC1_COLOR:
+ return VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR;
+ case PIPE_BLENDFACTOR_INV_SRC1_ALPHA:
+ return VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA;
+ }
+ unreachable("unexpected blend factor");
+}
+
+
+static bool
+need_blend_constants(enum pipe_blendfactor factor)
+{
+ switch (factor) {
+ case PIPE_BLENDFACTOR_CONST_COLOR:
+ case PIPE_BLENDFACTOR_CONST_ALPHA:
+ case PIPE_BLENDFACTOR_INV_CONST_COLOR:
+ case PIPE_BLENDFACTOR_INV_CONST_ALPHA:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static VkBlendOp
+blend_op(enum pipe_blend_func func)
+{
+ switch (func) {
+ case PIPE_BLEND_ADD: return VK_BLEND_OP_ADD;
+ case PIPE_BLEND_SUBTRACT: return VK_BLEND_OP_SUBTRACT;
+ case PIPE_BLEND_REVERSE_SUBTRACT: return VK_BLEND_OP_REVERSE_SUBTRACT;
+ case PIPE_BLEND_MIN: return VK_BLEND_OP_MIN;
+ case PIPE_BLEND_MAX: return VK_BLEND_OP_MAX;
+ }
+ unreachable("unexpected blend function");
+}
+
+static VkLogicOp
+logic_op(enum pipe_logicop func)
+{
+ switch (func) {
+ case PIPE_LOGICOP_CLEAR: return VK_LOGIC_OP_CLEAR;
+ case PIPE_LOGICOP_NOR: return VK_LOGIC_OP_NOR;
+ case PIPE_LOGICOP_AND_INVERTED: return VK_LOGIC_OP_AND_INVERTED;
+ case PIPE_LOGICOP_COPY_INVERTED: return VK_LOGIC_OP_COPY_INVERTED;
+ case PIPE_LOGICOP_AND_REVERSE: return VK_LOGIC_OP_AND_REVERSE;
+ case PIPE_LOGICOP_INVERT: return VK_LOGIC_OP_INVERT;
+ case PIPE_LOGICOP_XOR: return VK_LOGIC_OP_XOR;
+ case PIPE_LOGICOP_NAND: return VK_LOGIC_OP_NAND;
+ case PIPE_LOGICOP_AND: return VK_LOGIC_OP_AND;
+ case PIPE_LOGICOP_EQUIV: return VK_LOGIC_OP_EQUIVALENT;
+ case PIPE_LOGICOP_NOOP: return VK_LOGIC_OP_NO_OP;
+ case PIPE_LOGICOP_OR_INVERTED: return VK_LOGIC_OP_OR_INVERTED;
+ case PIPE_LOGICOP_COPY: return VK_LOGIC_OP_COPY;
+ case PIPE_LOGICOP_OR_REVERSE: return VK_LOGIC_OP_OR_REVERSE;
+ case PIPE_LOGICOP_OR: return VK_LOGIC_OP_OR;
+ case PIPE_LOGICOP_SET: return VK_LOGIC_OP_SET;
+ }
+ unreachable("unexpected logicop function");
+}
+
+static void *
+zink_create_blend_state(struct pipe_context *pctx,
+ const struct pipe_blend_state *blend_state)
+{
+ struct zink_blend_state *cso = CALLOC_STRUCT(zink_blend_state);
+ if (!cso)
+ return NULL;
+
+ if (blend_state->logicop_enable) {
+ cso->logicop_enable = VK_TRUE;
+ cso->logicop_func = logic_op(blend_state->logicop_func);
+ }
+
+ /* TODO: figure out what to do with dither (nothing is probably "OK" for now,
+ * as dithering is undefined in GL
+ */
+
+ /* TODO: these are multisampling-state, and should be set there instead of
+ * here, as that's closer tied to the update-frequency
+ */
+ cso->alpha_to_coverage = blend_state->alpha_to_coverage;
+ cso->alpha_to_one = blend_state->alpha_to_one;
+
+ cso->need_blend_constants = false;
+
+ for (int i = 0; i < PIPE_MAX_COLOR_BUFS; ++i) {
+ const struct pipe_rt_blend_state *rt = blend_state->rt;
+ if (blend_state->independent_blend_enable)
+ rt = blend_state->rt + i;
+
+ VkPipelineColorBlendAttachmentState att = { };
+
+ if (rt->blend_enable) {
+ att.blendEnable = VK_TRUE;
+ att.srcColorBlendFactor = blend_factor(rt->rgb_src_factor);
+ att.dstColorBlendFactor = blend_factor(rt->rgb_dst_factor);
+ att.colorBlendOp = blend_op(rt->rgb_func);
+ att.srcAlphaBlendFactor = blend_factor(rt->alpha_src_factor);
+ att.dstAlphaBlendFactor = blend_factor(rt->alpha_dst_factor);
+ att.alphaBlendOp = blend_op(rt->alpha_func);
+
+ if (need_blend_constants(rt->rgb_src_factor) ||
+ need_blend_constants(rt->rgb_dst_factor) ||
+ need_blend_constants(rt->alpha_src_factor) ||
+ need_blend_constants(rt->alpha_dst_factor))
+ cso->need_blend_constants = true;
+ }
+
+ if (rt->colormask & PIPE_MASK_R)
+ att.colorWriteMask |= VK_COLOR_COMPONENT_R_BIT;
+ if (rt->colormask & PIPE_MASK_G)
+ att.colorWriteMask |= VK_COLOR_COMPONENT_G_BIT;
+ if (rt->colormask & PIPE_MASK_B)
+ att.colorWriteMask |= VK_COLOR_COMPONENT_B_BIT;
+ if (rt->colormask & PIPE_MASK_A)
+ att.colorWriteMask |= VK_COLOR_COMPONENT_A_BIT;
+
+ cso->attachments[i] = att;
+ }
+
+ return cso;
+}
+
+static void
+zink_bind_blend_state(struct pipe_context *pctx, void *cso)
+{
+ zink_context(pctx)->gfx_pipeline_state.blend_state = cso;
+}
+
+static void
+zink_delete_blend_state(struct pipe_context *pctx, void *blend_state)
+{
+ FREE(blend_state);
+}
+
+static VkCompareOp
+compare_op(enum pipe_compare_func func)
+{
+ switch (func) {
+ case PIPE_FUNC_NEVER: return VK_COMPARE_OP_NEVER;
+ case PIPE_FUNC_LESS: return VK_COMPARE_OP_LESS;
+ case PIPE_FUNC_EQUAL: return VK_COMPARE_OP_EQUAL;
+ case PIPE_FUNC_LEQUAL: return VK_COMPARE_OP_LESS_OR_EQUAL;
+ case PIPE_FUNC_GREATER: return VK_COMPARE_OP_GREATER;
+ case PIPE_FUNC_NOTEQUAL: return VK_COMPARE_OP_NOT_EQUAL;
+ case PIPE_FUNC_GEQUAL: return VK_COMPARE_OP_GREATER_OR_EQUAL;
+ case PIPE_FUNC_ALWAYS: return VK_COMPARE_OP_ALWAYS;
+ }
+ unreachable("unexpected func");
+}
+
+static VkStencilOp
+stencil_op(enum pipe_stencil_op op)
+{
+ switch (op) {
+ case PIPE_STENCIL_OP_KEEP: return VK_STENCIL_OP_KEEP;
+ case PIPE_STENCIL_OP_ZERO: return VK_STENCIL_OP_ZERO;
+ case PIPE_STENCIL_OP_REPLACE: return VK_STENCIL_OP_REPLACE;
+ case PIPE_STENCIL_OP_INCR: return VK_STENCIL_OP_INCREMENT_AND_CLAMP;
+ case PIPE_STENCIL_OP_DECR: return VK_STENCIL_OP_DECREMENT_AND_CLAMP;
+ case PIPE_STENCIL_OP_INCR_WRAP: return VK_STENCIL_OP_INCREMENT_AND_CLAMP;
+ case PIPE_STENCIL_OP_DECR_WRAP: return VK_STENCIL_OP_DECREMENT_AND_CLAMP;
+ case PIPE_STENCIL_OP_INVERT: return VK_STENCIL_OP_INVERT;
+ }
+ unreachable("unexpected op");
+}
+
+static VkStencilOpState
+stencil_op_state(const struct pipe_stencil_state *src)
+{
+ VkStencilOpState ret;
+ ret.failOp = stencil_op(src->fail_op);
+ ret.passOp = stencil_op(src->zpass_op);
+ ret.depthFailOp = stencil_op(src->zfail_op);
+ ret.compareOp = compare_op(src->func);
+ ret.compareMask = src->valuemask;
+ ret.writeMask = src->writemask;
+ ret.reference = 0; // not used: we'll use a dynamic state for this
+ return ret;
+}
+
+static void *
+zink_create_depth_stencil_alpha_state(struct pipe_context *pctx,
+ const struct pipe_depth_stencil_alpha_state *depth_stencil_alpha)
+{
+ struct zink_depth_stencil_alpha_state *cso = CALLOC_STRUCT(zink_depth_stencil_alpha_state);
+ if (!cso)
+ return NULL;
+
+ if (depth_stencil_alpha->depth.enabled) {
+ cso->depth_test = VK_TRUE;
+ cso->depth_compare_op = compare_op(depth_stencil_alpha->depth.func);
+ }
+
+ if (depth_stencil_alpha->depth.bounds_test) {
+ cso->depth_bounds_test = VK_TRUE;
+ cso->min_depth_bounds = depth_stencil_alpha->depth.bounds_min;
+ cso->max_depth_bounds = depth_stencil_alpha->depth.bounds_max;
+ }
+
+ if (depth_stencil_alpha->stencil[0].enabled) {
+ cso->stencil_test = VK_TRUE;
+ cso->stencil_front = stencil_op_state(depth_stencil_alpha->stencil);
+ }
+
+ if (depth_stencil_alpha->stencil[0].enabled)
+ cso->stencil_back = stencil_op_state(depth_stencil_alpha->stencil + 1);
+ else
+ cso->stencil_back = cso->stencil_front;
+
+ cso->depth_write = depth_stencil_alpha->depth.writemask;
+
+ return cso;
+}
+
+static void
+zink_bind_depth_stencil_alpha_state(struct pipe_context *pctx, void *cso)
+{
+ zink_context(pctx)->gfx_pipeline_state.depth_stencil_alpha_state = cso;
+}
+
+static void
+zink_delete_depth_stencil_alpha_state(struct pipe_context *pctx,
+ void *depth_stencil_alpha)
+{
+ FREE(depth_stencil_alpha);
+}
+
+static float
+round_to_granularity(float value, float granularity)
+{
+ return roundf(value / granularity) * granularity;
+}
+
+static float
+line_width(float width, float granularity, const float range[2])
+{
+ assert(granularity >= 0);
+ assert(range[0] <= range[1]);
+
+ if (granularity > 0)
+ width = round_to_granularity(width, granularity);
+
+ return CLAMP(width, range[0], range[1]);
+}
+
+static void *
+zink_create_rasterizer_state(struct pipe_context *pctx,
+ const struct pipe_rasterizer_state *rs_state)
+{
+ struct zink_screen *screen = zink_screen(pctx->screen);
+
+ struct zink_rasterizer_state *state = CALLOC_STRUCT(zink_rasterizer_state);
+ if (!state)
+ return NULL;
+
+ state->base = *rs_state;
+
+ assert(rs_state->depth_clip_far == rs_state->depth_clip_near);
+ state->hw_state.depth_clamp = rs_state->depth_clip_near == 0;
+ state->hw_state.rasterizer_discard = rs_state->rasterizer_discard;
+
+ assert(rs_state->fill_front <= PIPE_POLYGON_MODE_POINT);
+ if (rs_state->fill_back != rs_state->fill_front)
+ debug_printf("BUG: vulkan doesn't support different front and back fill modes\n");
+ state->hw_state.polygon_mode = (VkPolygonMode)rs_state->fill_front; // same values
+ state->hw_state.cull_mode = (VkCullModeFlags)rs_state->cull_face; // same bits
+
+ state->hw_state.front_face = rs_state->front_ccw ?
+ VK_FRONT_FACE_COUNTER_CLOCKWISE :
+ VK_FRONT_FACE_CLOCKWISE;
+
+ state->offset_point = rs_state->offset_point;
+ state->offset_line = rs_state->offset_line;
+ state->offset_tri = rs_state->offset_tri;
+ state->offset_units = rs_state->offset_units;
+ state->offset_clamp = rs_state->offset_clamp;
+ state->offset_scale = rs_state->offset_scale;
+
+ state->line_width = line_width(rs_state->line_width,
+ screen->props.limits.lineWidthGranularity,
+ screen->props.limits.lineWidthRange);
+
+ return state;
+}
+
+static void
+zink_bind_rasterizer_state(struct pipe_context *pctx, void *cso)
+{
+ struct zink_context *ctx = zink_context(pctx);
+ ctx->rast_state = cso;
+
+ if (ctx->rast_state) {
+ ctx->gfx_pipeline_state.rast_state = &ctx->rast_state->hw_state;
+ ctx->line_width = ctx->rast_state->line_width;
+ }
+}
+
+static void
+zink_delete_rasterizer_state(struct pipe_context *pctx, void *rs_state)
+{
+ FREE(rs_state);
+}
+
+void
+zink_context_state_init(struct pipe_context *pctx)
+{
+ pctx->create_vertex_elements_state = zink_create_vertex_elements_state;
+ pctx->bind_vertex_elements_state = zink_bind_vertex_elements_state;
+ pctx->delete_vertex_elements_state = zink_delete_vertex_elements_state;
+
+ pctx->create_blend_state = zink_create_blend_state;
+ pctx->bind_blend_state = zink_bind_blend_state;
+ pctx->delete_blend_state = zink_delete_blend_state;
+
+ pctx->create_depth_stencil_alpha_state = zink_create_depth_stencil_alpha_state;
+ pctx->bind_depth_stencil_alpha_state = zink_bind_depth_stencil_alpha_state;
+ pctx->delete_depth_stencil_alpha_state = zink_delete_depth_stencil_alpha_state;
+
+ pctx->create_rasterizer_state = zink_create_rasterizer_state;
+ pctx->bind_rasterizer_state = zink_bind_rasterizer_state;
+ pctx->delete_rasterizer_state = zink_delete_rasterizer_state;
+}
diff --git a/lib/mesa/src/gallium/drivers/zink/zink_state.h b/lib/mesa/src/gallium/drivers/zink/zink_state.h
new file mode 100644
index 000000000..ef5e18176
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/zink_state.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef ZINK_STATE_H
+#define ZINK_STATE_H
+
+#include <vulkan/vulkan.h>
+
+#include "pipe/p_state.h"
+
+struct zink_vertex_elements_hw_state {
+ VkVertexInputAttributeDescription attribs[PIPE_MAX_ATTRIBS];
+ uint32_t num_bindings, num_attribs;
+};
+
+struct zink_vertex_elements_state {
+ struct {
+ uint32_t binding;
+ VkVertexInputRate inputRate;
+ } bindings[PIPE_MAX_ATTRIBS];
+ uint8_t binding_map[PIPE_MAX_ATTRIBS];
+ struct zink_vertex_elements_hw_state hw_state;
+};
+
+struct zink_rasterizer_hw_state {
+ VkBool32 depth_clamp;
+ VkBool32 rasterizer_discard;
+ VkFrontFace front_face;
+ VkPolygonMode polygon_mode;
+ VkCullModeFlags cull_mode;
+};
+
+struct zink_rasterizer_state {
+ struct pipe_rasterizer_state base;
+ bool offset_point, offset_line, offset_tri;
+ float offset_units, offset_clamp, offset_scale;
+ float line_width;
+ struct zink_rasterizer_hw_state hw_state;
+};
+
+struct zink_blend_state {
+ VkPipelineColorBlendAttachmentState attachments[PIPE_MAX_COLOR_BUFS];
+
+ VkBool32 logicop_enable;
+ VkLogicOp logicop_func;
+
+ VkBool32 alpha_to_coverage;
+ VkBool32 alpha_to_one;
+
+ bool need_blend_constants;
+};
+
+struct zink_depth_stencil_alpha_state {
+ VkBool32 depth_test;
+ VkCompareOp depth_compare_op;
+
+ VkBool32 depth_bounds_test;
+ float min_depth_bounds, max_depth_bounds;
+
+ VkBool32 stencil_test;
+ VkStencilOpState stencil_front;
+ VkStencilOpState stencil_back;
+
+ VkBool32 depth_write;
+};
+
+void
+zink_context_state_init(struct pipe_context *pctx);
+
+#endif
diff --git a/lib/mesa/src/gallium/drivers/zink/zink_surface.c b/lib/mesa/src/gallium/drivers/zink/zink_surface.c
new file mode 100644
index 000000000..e9c02af0c
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/zink_surface.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "zink_context.h"
+#include "zink_resource.h"
+#include "zink_screen.h"
+#include "zink_surface.h"
+
+#include "util/format/u_format.h"
+#include "util/u_inlines.h"
+#include "util/u_memory.h"
+
+static struct pipe_surface *
+zink_create_surface(struct pipe_context *pctx,
+ struct pipe_resource *pres,
+ const struct pipe_surface *templ)
+{
+ struct zink_screen *screen = zink_screen(pctx->screen);
+ unsigned int level = templ->u.tex.level;
+
+ struct zink_surface *surface = CALLOC_STRUCT(zink_surface);
+ if (!surface)
+ return NULL;
+
+ pipe_resource_reference(&surface->base.texture, pres);
+ pipe_reference_init(&surface->base.reference, 1);
+ surface->base.context = pctx;
+ surface->base.format = templ->format;
+ surface->base.width = u_minify(pres->width0, level);
+ surface->base.height = u_minify(pres->height0, level);
+ surface->base.nr_samples = templ->nr_samples;
+ surface->base.u.tex.level = level;
+ surface->base.u.tex.first_layer = templ->u.tex.first_layer;
+ surface->base.u.tex.last_layer = templ->u.tex.last_layer;
+
+ struct zink_resource *res = zink_resource(pres);
+
+ VkImageViewCreateInfo ivci = {};
+ ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
+ ivci.image = res->image;
+
+ switch (pres->target) {
+ case PIPE_TEXTURE_1D:
+ ivci.viewType = VK_IMAGE_VIEW_TYPE_1D;
+ break;
+
+ case PIPE_TEXTURE_1D_ARRAY:
+ ivci.viewType = VK_IMAGE_VIEW_TYPE_1D_ARRAY;
+ break;
+
+ case PIPE_TEXTURE_2D:
+ case PIPE_TEXTURE_RECT:
+ ivci.viewType = VK_IMAGE_VIEW_TYPE_2D;
+ break;
+
+ case PIPE_TEXTURE_2D_ARRAY:
+ ivci.viewType = VK_IMAGE_VIEW_TYPE_2D_ARRAY;
+ break;
+
+ case PIPE_TEXTURE_CUBE:
+ ivci.viewType = VK_IMAGE_VIEW_TYPE_CUBE;
+ break;
+
+ case PIPE_TEXTURE_CUBE_ARRAY:
+ ivci.viewType = VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
+ break;
+
+ case PIPE_TEXTURE_3D:
+ ivci.viewType = VK_IMAGE_VIEW_TYPE_2D;
+ break;
+
+ default:
+ unreachable("unsupported target");
+ }
+
+ ivci.format = zink_get_format(screen, templ->format);
+
+ // TODO: format swizzles
+ ivci.components.r = VK_COMPONENT_SWIZZLE_R;
+ ivci.components.g = VK_COMPONENT_SWIZZLE_G;
+ ivci.components.b = VK_COMPONENT_SWIZZLE_B;
+ ivci.components.a = VK_COMPONENT_SWIZZLE_A;
+
+ ivci.subresourceRange.aspectMask = res->aspect;
+ ivci.subresourceRange.baseMipLevel = templ->u.tex.level;
+ ivci.subresourceRange.levelCount = 1;
+ ivci.subresourceRange.baseArrayLayer = templ->u.tex.first_layer;
+ ivci.subresourceRange.layerCount = 1 + templ->u.tex.last_layer - templ->u.tex.first_layer;
+
+ if (pres->target == PIPE_TEXTURE_CUBE ||
+ pres->target == PIPE_TEXTURE_CUBE_ARRAY)
+ ivci.subresourceRange.layerCount *= 6;
+
+ if (vkCreateImageView(screen->dev, &ivci, NULL,
+ &surface->image_view) != VK_SUCCESS) {
+ FREE(surface);
+ return NULL;
+ }
+
+ return &surface->base;
+}
+
+static void
+zink_surface_destroy(struct pipe_context *pctx,
+ struct pipe_surface *psurface)
+{
+ struct zink_screen *screen = zink_screen(pctx->screen);
+ struct zink_surface *surface = zink_surface(psurface);
+ pipe_resource_reference(&psurface->texture, NULL);
+ vkDestroyImageView(screen->dev, surface->image_view, NULL);
+ FREE(surface);
+}
+
+void
+zink_context_surface_init(struct pipe_context *context)
+{
+ context->create_surface = zink_create_surface;
+ context->surface_destroy = zink_surface_destroy;
+}
diff --git a/lib/mesa/src/gallium/drivers/zink/zink_surface.h b/lib/mesa/src/gallium/drivers/zink/zink_surface.h
new file mode 100644
index 000000000..a85a4981c
--- /dev/null
+++ b/lib/mesa/src/gallium/drivers/zink/zink_surface.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+ #ifndef ZINK_SURFACE_H
+ #define ZINK_SURFACE_H
+
+#include "pipe/p_state.h"
+
+#include <vulkan/vulkan.h>
+
+struct pipe_context;
+
+struct zink_surface {
+ struct pipe_surface base;
+ VkImageView image_view;
+};
+
+static inline struct zink_surface *
+zink_surface(struct pipe_surface *pipe)
+{
+ return (struct zink_surface *)pipe;
+}
+
+void
+zink_context_surface_init(struct pipe_context *context);
+
+#endif