summaryrefslogtreecommitdiff
path: root/lib/mesa/src/intel/vulkan/anv_device.c
diff options
context:
space:
mode:
authorJonathan Gray <jsg@cvs.openbsd.org>2021-07-22 10:50:50 +0000
committerJonathan Gray <jsg@cvs.openbsd.org>2021-07-22 10:50:50 +0000
commit9130ec005fbc78a62420643414d8354d0929ca50 (patch)
tree6762777acdd2d4eee17ef87290e80dc7afe2b73d /lib/mesa/src/intel/vulkan/anv_device.c
parentca11beabae33eb59fb981b8adf50b1d47a2a98f0 (diff)
Merge Mesa 21.1.5
Diffstat (limited to 'lib/mesa/src/intel/vulkan/anv_device.c')
-rw-r--r--lib/mesa/src/intel/vulkan/anv_device.c2119
1 files changed, 1183 insertions, 936 deletions
diff --git a/lib/mesa/src/intel/vulkan/anv_device.c b/lib/mesa/src/intel/vulkan/anv_device.c
index 1594224e0..a53b4e315 100644
--- a/lib/mesa/src/intel/vulkan/anv_device.c
+++ b/lib/mesa/src/intel/vulkan/anv_device.c
@@ -27,10 +27,12 @@
#include <sys/mman.h>
#include <unistd.h>
#include <fcntl.h>
-#include <xf86drm.h>
#include "drm-uapi/drm_fourcc.h"
+#include "drm-uapi/drm.h"
+#include <xf86drm.h>
#include "anv_private.h"
+#include "anv_measure.h"
#include "util/debug.h"
#include "util/build_id.h"
#include "util/disk_cache.h"
@@ -39,33 +41,42 @@
#include "util/os_misc.h"
#include "util/u_atomic.h"
#include "util/u_string.h"
-#include "util/xmlpool.h"
+#include "util/driconf.h"
#include "git_sha1.h"
#include "vk_util.h"
-#include "common/gen_aux_map.h"
-#include "common/gen_defines.h"
-#include "compiler/glsl_types.h"
+#include "vk_deferred_operation.h"
+#include "common/intel_aux_map.h"
+#include "common/intel_defines.h"
+#include "common/intel_uuid.h"
+#include "perf/gen_perf.h"
#include "genxml/gen7_pack.h"
-static const char anv_dri_options_xml[] =
-DRI_CONF_BEGIN
+static const driOptionDescription anv_dri_options[] = {
DRI_CONF_SECTION_PERFORMANCE
DRI_CONF_VK_X11_OVERRIDE_MIN_IMAGE_COUNT(0)
- DRI_CONF_VK_X11_STRICT_IMAGE_COUNT("false")
+ DRI_CONF_VK_X11_STRICT_IMAGE_COUNT(false)
DRI_CONF_SECTION_END
DRI_CONF_SECTION_DEBUG
- DRI_CONF_ALWAYS_FLUSH_CACHE("false")
- DRI_CONF_VK_WSI_FORCE_BGRA8_UNORM_FIRST("false")
+ DRI_CONF_ALWAYS_FLUSH_CACHE(false)
+ DRI_CONF_VK_WSI_FORCE_BGRA8_UNORM_FIRST(false)
DRI_CONF_SECTION_END
-DRI_CONF_END;
+};
/* This is probably far to big but it reflects the max size used for messages
* in OpenGLs KHR_debug.
*/
#define MAX_DEBUG_MESSAGE_LENGTH 4096
+/* Render engine timestamp register */
+#define TIMESTAMP 0x2358
+
+/* The "RAW" clocks on Linux are called "FAST" on FreeBSD */
+#if !defined(CLOCK_MONOTONIC_RAW) && defined(CLOCK_MONOTONIC_FAST)
+#define CLOCK_MONOTONIC_RAW CLOCK_MONOTONIC_FAST
+#endif
+
static void
compiler_debug_log(void *data, const char *fmt, ...)
{
@@ -73,7 +84,7 @@ compiler_debug_log(void *data, const char *fmt, ...)
struct anv_device *device = (struct anv_device *)data;
struct anv_instance *instance = device->physical->instance;
- if (list_is_empty(&instance->debug_report_callbacks.callbacks))
+ if (list_is_empty(&instance->vk.debug_report.callbacks))
return;
va_list args;
@@ -81,10 +92,9 @@ compiler_debug_log(void *data, const char *fmt, ...)
(void) vsnprintf(str, MAX_DEBUG_MESSAGE_LENGTH, fmt, args);
va_end(args);
- vk_debug_report(&instance->debug_report_callbacks,
+ vk_debug_report(&instance->vk,
VK_DEBUG_REPORT_DEBUG_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
- 0, 0, 0, "anv", str);
+ NULL, 0, 0, "anv", str);
}
static void
@@ -93,8 +103,8 @@ compiler_perf_log(void *data, const char *fmt, ...)
va_list args;
va_start(args, fmt);
- if (unlikely(INTEL_DEBUG & DEBUG_PERF))
- intel_logd_v(fmt, args);
+ if (INTEL_DEBUG & DEBUG_PERF)
+ mesa_logd_v(fmt, args);
va_end(args);
}
@@ -124,6 +134,206 @@ anv_compute_heap_size(int fd, uint64_t gtt_size)
return MIN2(available_ram, available_gtt);
}
+#if defined(VK_USE_PLATFORM_WAYLAND_KHR) || \
+ defined(VK_USE_PLATFORM_XCB_KHR) || \
+ defined(VK_USE_PLATFORM_XLIB_KHR) || \
+ defined(VK_USE_PLATFORM_DISPLAY_KHR)
+#define ANV_USE_WSI_PLATFORM
+#endif
+
+#ifdef ANDROID
+#define ANV_API_VERSION VK_MAKE_VERSION(1, 1, VK_HEADER_VERSION)
+#else
+#define ANV_API_VERSION VK_MAKE_VERSION(1, 2, VK_HEADER_VERSION)
+#endif
+
+VkResult anv_EnumerateInstanceVersion(
+ uint32_t* pApiVersion)
+{
+ *pApiVersion = ANV_API_VERSION;
+ return VK_SUCCESS;
+}
+
+static const struct vk_instance_extension_table instance_extensions = {
+ .KHR_device_group_creation = true,
+ .KHR_external_fence_capabilities = true,
+ .KHR_external_memory_capabilities = true,
+ .KHR_external_semaphore_capabilities = true,
+ .KHR_get_physical_device_properties2 = true,
+ .EXT_debug_report = true,
+
+#ifdef ANV_USE_WSI_PLATFORM
+ .KHR_get_surface_capabilities2 = true,
+ .KHR_surface = true,
+ .KHR_surface_protected_capabilities = true,
+#endif
+#ifdef VK_USE_PLATFORM_WAYLAND_KHR
+ .KHR_wayland_surface = true,
+#endif
+#ifdef VK_USE_PLATFORM_XCB_KHR
+ .KHR_xcb_surface = true,
+#endif
+#ifdef VK_USE_PLATFORM_XLIB_KHR
+ .KHR_xlib_surface = true,
+#endif
+#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT
+ .EXT_acquire_xlib_display = true,
+#endif
+#ifdef VK_USE_PLATFORM_DISPLAY_KHR
+ .KHR_display = true,
+ .KHR_get_display_properties2 = true,
+ .EXT_direct_mode_display = true,
+ .EXT_display_surface_counter = true,
+#endif
+};
+
+static void
+get_device_extensions(const struct anv_physical_device *device,
+ struct vk_device_extension_table *ext)
+{
+ *ext = (struct vk_device_extension_table) {
+ .KHR_8bit_storage = device->info.ver >= 8,
+ .KHR_16bit_storage = device->info.ver >= 8,
+ .KHR_bind_memory2 = true,
+ .KHR_buffer_device_address = device->has_a64_buffer_access,
+ .KHR_copy_commands2 = true,
+ .KHR_create_renderpass2 = true,
+ .KHR_dedicated_allocation = true,
+ .KHR_deferred_host_operations = true,
+ .KHR_depth_stencil_resolve = true,
+ .KHR_descriptor_update_template = true,
+ .KHR_device_group = true,
+ .KHR_draw_indirect_count = true,
+ .KHR_driver_properties = true,
+ .KHR_external_fence = device->has_syncobj_wait,
+ .KHR_external_fence_fd = device->has_syncobj_wait,
+ .KHR_external_memory = true,
+ .KHR_external_memory_fd = true,
+ .KHR_external_semaphore = true,
+ .KHR_external_semaphore_fd = true,
+ .KHR_get_memory_requirements2 = true,
+ .KHR_image_format_list = true,
+ .KHR_imageless_framebuffer = true,
+#ifdef ANV_USE_WSI_PLATFORM
+ .KHR_incremental_present = true,
+#endif
+ .KHR_maintenance1 = true,
+ .KHR_maintenance2 = true,
+ .KHR_maintenance3 = true,
+ .KHR_multiview = true,
+ .KHR_performance_query =
+ device->use_softpin && device->perf &&
+ (device->perf->i915_perf_version >= 3 ||
+ INTEL_DEBUG & DEBUG_NO_OACONFIG) &&
+ device->use_call_secondary,
+ .KHR_pipeline_executable_properties = true,
+ .KHR_push_descriptor = true,
+ .KHR_relaxed_block_layout = true,
+ .KHR_sampler_mirror_clamp_to_edge = true,
+ .KHR_sampler_ycbcr_conversion = true,
+ .KHR_separate_depth_stencil_layouts = true,
+ .KHR_shader_atomic_int64 = device->info.ver >= 9 &&
+ device->use_softpin,
+ .KHR_shader_clock = true,
+ .KHR_shader_draw_parameters = true,
+ .KHR_shader_float16_int8 = device->info.ver >= 8,
+ .KHR_shader_float_controls = device->info.ver >= 8,
+ .KHR_shader_non_semantic_info = true,
+ .KHR_shader_subgroup_extended_types = device->info.ver >= 8,
+ .KHR_shader_terminate_invocation = true,
+ .KHR_spirv_1_4 = true,
+ .KHR_storage_buffer_storage_class = true,
+#ifdef ANV_USE_WSI_PLATFORM
+ .KHR_swapchain = true,
+ .KHR_swapchain_mutable_format = true,
+#endif
+ .KHR_timeline_semaphore = true,
+ .KHR_uniform_buffer_standard_layout = true,
+ .KHR_variable_pointers = true,
+ .KHR_vulkan_memory_model = true,
+ .KHR_workgroup_memory_explicit_layout = true,
+ .KHR_zero_initialize_workgroup_memory = true,
+ .EXT_4444_formats = true,
+ .EXT_buffer_device_address = device->has_a64_buffer_access,
+ .EXT_calibrated_timestamps = device->has_reg_timestamp,
+ .EXT_conditional_rendering = device->info.ver >= 8 ||
+ device->info.is_haswell,
+ .EXT_conservative_rasterization = device->info.ver >= 9,
+ .EXT_custom_border_color = device->info.ver >= 8,
+ .EXT_depth_clip_enable = true,
+ .EXT_descriptor_indexing = device->has_a64_buffer_access &&
+ device->has_bindless_images,
+#ifdef VK_USE_PLATFORM_DISPLAY_KHR
+ .EXT_display_control = true,
+#endif
+ .EXT_extended_dynamic_state = true,
+ .EXT_external_memory_dma_buf = true,
+ .EXT_external_memory_host = true,
+ .EXT_fragment_shader_interlock = device->info.ver >= 9,
+ .EXT_global_priority = device->has_context_priority,
+ .EXT_host_query_reset = true,
+ .EXT_image_robustness = true,
+ .EXT_image_drm_format_modifier = true,
+ .EXT_index_type_uint8 = true,
+ .EXT_inline_uniform_block = true,
+ .EXT_line_rasterization = true,
+ .EXT_memory_budget = device->has_mem_available,
+ .EXT_pci_bus_info = true,
+ .EXT_pipeline_creation_cache_control = true,
+ .EXT_pipeline_creation_feedback = true,
+ .EXT_post_depth_coverage = device->info.ver >= 9,
+ .EXT_private_data = true,
+ .EXT_queue_family_foreign = true,
+ .EXT_robustness2 = true,
+ .EXT_sample_locations = true,
+ .EXT_sampler_filter_minmax = device->info.ver >= 9,
+ .EXT_scalar_block_layout = true,
+ .EXT_separate_stencil_usage = true,
+ .EXT_shader_atomic_float = true,
+ .EXT_shader_demote_to_helper_invocation = true,
+ .EXT_shader_stencil_export = device->info.ver >= 9,
+ .EXT_shader_subgroup_ballot = true,
+ .EXT_shader_subgroup_vote = true,
+ .EXT_shader_viewport_index_layer = true,
+ .EXT_subgroup_size_control = true,
+ .EXT_texel_buffer_alignment = true,
+ .EXT_transform_feedback = true,
+ .EXT_vertex_attribute_divisor = true,
+ .EXT_ycbcr_image_arrays = true,
+#ifdef ANDROID
+ .ANDROID_external_memory_android_hardware_buffer = true,
+ .ANDROID_native_buffer = true,
+#endif
+ .GOOGLE_decorate_string = true,
+ .GOOGLE_hlsl_functionality1 = true,
+ .GOOGLE_user_type = true,
+ .INTEL_performance_query = device->perf &&
+ device->perf->i915_perf_version >= 3,
+ .INTEL_shader_integer_functions2 = device->info.ver >= 8,
+ .NV_compute_shader_derivatives = true,
+ };
+}
+
+static void
+anv_init_meminfo(struct anv_physical_device *device, int fd)
+{
+ uint64_t heap_size = anv_compute_heap_size(fd, device->gtt_size);
+
+ if (heap_size > (2ull << 30) && !device->supports_48bit_addresses) {
+ /* When running with an overridden PCI ID, we may get a GTT size from
+ * the kernel that is greater than 2 GiB but the execbuf check for 48bit
+ * address support can still fail. Just clamp the address space size to
+ * 2 GiB if we don't have 48-bit support.
+ */
+ mesa_logw("%s:%d: The kernel reported a GTT size larger than 2 GiB but "
+ "not support for 48-bit addresses",
+ __FILE__, __LINE__);
+ heap_size = 2ull << 30;
+ }
+
+ device->sys.size = heap_size;
+}
+
static VkResult
anv_physical_device_init_heaps(struct anv_physical_device *device, int fd)
{
@@ -135,7 +345,7 @@ anv_physical_device_init_heaps(struct anv_physical_device *device, int fd)
anv_perf_warn(NULL, NULL,
"Failed to get I915_CONTEXT_PARAM_GTT_SIZE: %m");
- if (anv_gem_get_aperture(fd, &device->gtt_size) == -1) {
+ if (gen_get_aperture_size(fd, &device->gtt_size) == -1) {
return vk_errorfi(device->instance, NULL,
VK_ERROR_INITIALIZATION_FAILED,
"failed to get aperture size: %m");
@@ -145,64 +355,92 @@ anv_physical_device_init_heaps(struct anv_physical_device *device, int fd)
/* We only allow 48-bit addresses with softpin because knowing the actual
* address is required for the vertex cache flush workaround.
*/
- device->supports_48bit_addresses = (device->info.gen >= 8) &&
+ device->supports_48bit_addresses = (device->info.ver >= 8) &&
device->has_softpin &&
device->gtt_size > (4ULL << 30 /* GiB */);
- uint64_t heap_size = anv_compute_heap_size(fd, device->gtt_size);
+ anv_init_meminfo(device, fd);
+ assert(device->sys.size != 0);
- if (heap_size > (2ull << 30) && !device->supports_48bit_addresses) {
- /* When running with an overridden PCI ID, we may get a GTT size from
- * the kernel that is greater than 2 GiB but the execbuf check for 48bit
- * address support can still fail. Just clamp the address space size to
- * 2 GiB if we don't have 48-bit support.
+ if (device->vram.size > 0) {
+ /* We can create 2 different heaps when we have local memory support,
+ * first heap with local memory size and second with system memory size.
*/
- intel_logw("%s:%d: The kernel reported a GTT size larger than 2 GiB but "
- "not support for 48-bit addresses",
- __FILE__, __LINE__);
- heap_size = 2ull << 30;
- }
+ device->memory.heap_count = 2;
+ device->memory.heaps[0] = (struct anv_memory_heap) {
+ .size = device->vram.size,
+ .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
+ .is_local_mem = true,
+ };
+ device->memory.heaps[1] = (struct anv_memory_heap) {
+ .size = device->sys.size,
+ .flags = 0,
+ .is_local_mem = false,
+ };
- device->memory.heap_count = 1;
- device->memory.heaps[0] = (struct anv_memory_heap) {
- .size = heap_size,
- .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
- };
+ device->memory.type_count = 3;
+ device->memory.types[0] = (struct anv_memory_type) {
+ .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
+ .heapIndex = 0,
+ };
+ device->memory.types[1] = (struct anv_memory_type) {
+ .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+ VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
+ VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
+ .heapIndex = 1,
+ };
+ device->memory.types[2] = (struct anv_memory_type) {
+ .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+ VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
+ .heapIndex = 0,
+ };
+ } else if (device->info.has_llc) {
+ device->memory.heap_count = 1;
+ device->memory.heaps[0] = (struct anv_memory_heap) {
+ .size = device->sys.size,
+ .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
+ .is_local_mem = false,
+ };
- uint32_t type_count = 0;
- for (uint32_t heap = 0; heap < device->memory.heap_count; heap++) {
- if (device->info.has_llc) {
- /* Big core GPUs share LLC with the CPU and thus one memory type can be
- * both cached and coherent at the same time.
- */
- device->memory.types[type_count++] = (struct anv_memory_type) {
- .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
- VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
- VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
- VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
- .heapIndex = heap,
- };
- } else {
- /* The spec requires that we expose a host-visible, coherent memory
- * type, but Atom GPUs don't share LLC. Thus we offer two memory types
- * to give the application a choice between cached, but not coherent and
- * coherent but uncached (WC though).
- */
- device->memory.types[type_count++] = (struct anv_memory_type) {
- .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
- VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
- VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
- .heapIndex = heap,
- };
- device->memory.types[type_count++] = (struct anv_memory_type) {
- .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
- VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
- VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
- .heapIndex = heap,
- };
- }
+ /* Big core GPUs share LLC with the CPU and thus one memory type can be
+ * both cached and coherent at the same time.
+ */
+ device->memory.type_count = 1;
+ device->memory.types[0] = (struct anv_memory_type) {
+ .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+ VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
+ VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
+ .heapIndex = 0,
+ };
+ } else {
+ device->memory.heap_count = 1;
+ device->memory.heaps[0] = (struct anv_memory_heap) {
+ .size = device->sys.size,
+ .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
+ .is_local_mem = false,
+ };
+
+ /* The spec requires that we expose a host-visible, coherent memory
+ * type, but Atom GPUs don't share LLC. Thus we offer two memory types
+ * to give the application a choice between cached, but not coherent and
+ * coherent but uncached (WC though).
+ */
+ device->memory.type_count = 2;
+ device->memory.types[0] = (struct anv_memory_type) {
+ .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+ VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
+ .heapIndex = 0,
+ };
+ device->memory.types[1] = (struct anv_memory_type) {
+ .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+ VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
+ .heapIndex = 0,
+ };
}
- device->memory.type_count = type_count;
return VK_SUCCESS;
}
@@ -249,26 +487,8 @@ anv_physical_device_init_uuids(struct anv_physical_device *device)
_mesa_sha1_final(&sha1_ctx, sha1);
memcpy(device->pipeline_cache_uuid, sha1, VK_UUID_SIZE);
- /* The driver UUID is used for determining sharability of images and memory
- * between two Vulkan instances in separate processes. People who want to
- * share memory need to also check the device UUID (below) so all this
- * needs to be is the build-id.
- */
- memcpy(device->driver_uuid, build_id_data(note), VK_UUID_SIZE);
-
- /* The device UUID uniquely identifies the given device within the machine.
- * Since we never have more than one device, this doesn't need to be a real
- * UUID. However, on the off-chance that someone tries to use this to
- * cache pre-tiled images or something of the like, we use the PCI ID and
- * some bits of ISL info to ensure that this is safe.
- */
- _mesa_sha1_init(&sha1_ctx);
- _mesa_sha1_update(&sha1_ctx, &device->info.chipset_id,
- sizeof(device->info.chipset_id));
- _mesa_sha1_update(&sha1_ctx, &device->isl_dev.has_bit6_swizzling,
- sizeof(device->isl_dev.has_bit6_swizzling));
- _mesa_sha1_final(&sha1_ctx, sha1);
- memcpy(device->device_uuid, sha1, VK_UUID_SIZE);
+ intel_uuid_compute_driver_id(device->driver_uuid, &device->info, VK_UUID_SIZE);
+ intel_uuid_compute_device_id(device->device_uuid, &device->isl_dev, VK_UUID_SIZE);
return VK_SUCCESS;
}
@@ -304,6 +524,121 @@ anv_physical_device_free_disk_cache(struct anv_physical_device *device)
#endif
}
+/* The ANV_QUEUE_OVERRIDE environment variable is a comma separated list of
+ * queue overrides.
+ *
+ * To override the number queues:
+ * * "gc" is for graphics queues with compute support
+ * * "g" is for graphics queues with no compute support
+ * * "c" is for compute queues with no graphics support
+ *
+ * For example, ANV_QUEUE_OVERRIDE=gc=2,c=1 would override the number of
+ * advertised queues to be 2 queues with graphics+compute support, and 1 queue
+ * with compute-only support.
+ *
+ * ANV_QUEUE_OVERRIDE=c=1 would override the number of advertised queues to
+ * include 1 queue with compute-only support, but it will not change the
+ * number of graphics+compute queues.
+ *
+ * ANV_QUEUE_OVERRIDE=gc=0,c=1 would override the number of advertised queues
+ * to include 1 queue with compute-only support, and it would override the
+ * number of graphics+compute queues to be 0.
+ */
+static void
+anv_override_engine_counts(int *gc_count, int *g_count, int *c_count)
+{
+ int gc_override = -1;
+ int g_override = -1;
+ int c_override = -1;
+ char *env = getenv("ANV_QUEUE_OVERRIDE");
+
+ if (env == NULL)
+ return;
+
+ env = strdup(env);
+ char *save = NULL;
+ char *next = strtok_r(env, ",", &save);
+ while (next != NULL) {
+ if (strncmp(next, "gc=", 3) == 0) {
+ gc_override = strtol(next + 3, NULL, 0);
+ } else if (strncmp(next, "g=", 2) == 0) {
+ g_override = strtol(next + 2, NULL, 0);
+ } else if (strncmp(next, "c=", 2) == 0) {
+ c_override = strtol(next + 2, NULL, 0);
+ } else {
+ mesa_logw("Ignoring unsupported ANV_QUEUE_OVERRIDE token: %s", next);
+ }
+ next = strtok_r(NULL, ",", &save);
+ }
+ free(env);
+ if (gc_override >= 0)
+ *gc_count = gc_override;
+ if (g_override >= 0)
+ *g_count = g_override;
+ if (*g_count > 0 && *gc_count <= 0 && (gc_override >= 0 || g_override >= 0))
+ mesa_logw("ANV_QUEUE_OVERRIDE: gc=0 with g > 0 violates the "
+ "Vulkan specification");
+ if (c_override >= 0)
+ *c_count = c_override;
+}
+
+static void
+anv_physical_device_init_queue_families(struct anv_physical_device *pdevice)
+{
+ uint32_t family_count = 0;
+
+ if (pdevice->engine_info) {
+ int gc_count =
+ anv_gem_count_engines(pdevice->engine_info, I915_ENGINE_CLASS_RENDER);
+ int g_count = 0;
+ int c_count = 0;
+
+ anv_override_engine_counts(&gc_count, &g_count, &c_count);
+
+ if (gc_count > 0) {
+ pdevice->queue.families[family_count++] = (struct anv_queue_family) {
+ .queueFlags = VK_QUEUE_GRAPHICS_BIT |
+ VK_QUEUE_COMPUTE_BIT |
+ VK_QUEUE_TRANSFER_BIT,
+ .queueCount = gc_count,
+ .engine_class = I915_ENGINE_CLASS_RENDER,
+ };
+ }
+ if (g_count > 0) {
+ pdevice->queue.families[family_count++] = (struct anv_queue_family) {
+ .queueFlags = VK_QUEUE_GRAPHICS_BIT |
+ VK_QUEUE_TRANSFER_BIT,
+ .queueCount = g_count,
+ .engine_class = I915_ENGINE_CLASS_RENDER,
+ };
+ }
+ if (c_count > 0) {
+ pdevice->queue.families[family_count++] = (struct anv_queue_family) {
+ .queueFlags = VK_QUEUE_COMPUTE_BIT |
+ VK_QUEUE_TRANSFER_BIT,
+ .queueCount = c_count,
+ .engine_class = I915_ENGINE_CLASS_RENDER,
+ };
+ }
+ /* Increase count below when other families are added as a reminder to
+ * increase the ANV_MAX_QUEUE_FAMILIES value.
+ */
+ STATIC_ASSERT(ANV_MAX_QUEUE_FAMILIES >= 3);
+ } else {
+ /* Default to a single render queue */
+ pdevice->queue.families[family_count++] = (struct anv_queue_family) {
+ .queueFlags = VK_QUEUE_GRAPHICS_BIT |
+ VK_QUEUE_COMPUTE_BIT |
+ VK_QUEUE_TRANSFER_BIT,
+ .queueCount = 1,
+ .engine_class = I915_ENGINE_CLASS_RENDER,
+ };
+ family_count = 1;
+ }
+ assert(family_count <= ANV_MAX_QUEUE_FAMILIES);
+ pdevice->queue.family_count = family_count;
+}
+
static VkResult
anv_physical_device_try_create(struct anv_instance *instance,
drmDevicePtr drm_device,
@@ -318,8 +653,14 @@ anv_physical_device_try_create(struct anv_instance *instance,
brw_process_intel_debug_variable();
fd = open(path, O_RDWR | O_CLOEXEC);
- if (fd < 0)
- return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
+ if (fd < 0) {
+ if (errno == ENOMEM) {
+ return vk_errorfi(instance, NULL, VK_ERROR_OUT_OF_HOST_MEMORY,
+ "Unable to open device %s: out of memory", path);
+ }
+ return vk_errorfi(instance, NULL, VK_ERROR_INCOMPATIBLE_DRIVER,
+ "Unable to open device %s: %m", path);
+ }
struct gen_device_info devinfo;
if (!gen_get_device_info_from_fd(fd, &devinfo)) {
@@ -330,15 +671,13 @@ anv_physical_device_try_create(struct anv_instance *instance,
const char *device_name = gen_get_device_name(devinfo.chipset_id);
if (devinfo.is_haswell) {
- intel_logw("Haswell Vulkan support is incomplete");
- } else if (devinfo.gen == 7 && !devinfo.is_baytrail) {
- intel_logw("Ivy Bridge Vulkan support is incomplete");
- } else if (devinfo.gen == 7 && devinfo.is_baytrail) {
- intel_logw("Bay Trail Vulkan support is incomplete");
- } else if (devinfo.gen >= 8 && devinfo.gen <= 11) {
- /* Gen8-11 fully supported */
- } else if (devinfo.gen == 12) {
- intel_logw("Vulkan is not yet fully supported on gen12");
+ mesa_logw("Haswell Vulkan support is incomplete");
+ } else if (devinfo.ver == 7 && !devinfo.is_baytrail) {
+ mesa_logw("Ivy Bridge Vulkan support is incomplete");
+ } else if (devinfo.ver == 7 && devinfo.is_baytrail) {
+ mesa_logw("Bay Trail Vulkan support is incomplete");
+ } else if (devinfo.ver >= 8 && devinfo.ver <= 12) {
+ /* Gfx8-12 fully supported */
} else {
result = vk_errorfi(instance, NULL, VK_ERROR_INCOMPATIBLE_DRIVER,
"Vulkan not yet supported on %s", device_name);
@@ -346,14 +685,24 @@ anv_physical_device_try_create(struct anv_instance *instance,
}
struct anv_physical_device *device =
- vk_alloc(&instance->alloc, sizeof(*device), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
+ vk_zalloc(&instance->vk.alloc, sizeof(*device), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (device == NULL) {
result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
goto fail_fd;
}
- device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
+ struct vk_physical_device_dispatch_table dispatch_table;
+ vk_physical_device_dispatch_table_from_entrypoints(
+ &dispatch_table, &anv_physical_device_entrypoints, true);
+
+ result = vk_physical_device_init(&device->vk, &instance->vk,
+ NULL, /* We set up extensions later */
+ &dispatch_table);
+ if (result != VK_SUCCESS) {
+ vk_error(result);
+ goto fail_alloc;
+ }
device->instance = instance;
assert(strlen(path) < ARRAY_SIZE(device->path));
@@ -372,14 +721,14 @@ anv_physical_device_try_create(struct anv_instance *instance,
device->pci_info.function = drm_device->businfo.pci->func;
device->cmd_parser_version = -1;
- if (device->info.gen == 7) {
+ if (device->info.ver == 7) {
device->cmd_parser_version =
anv_gem_get_param(fd, I915_PARAM_CMD_PARSER_VERSION);
if (device->cmd_parser_version == -1) {
result = vk_errorfi(device->instance, NULL,
VK_ERROR_INITIALIZATION_FAILED,
"failed to get command parser version");
- goto fail_alloc;
+ goto fail_base;
}
}
@@ -387,14 +736,14 @@ anv_physical_device_try_create(struct anv_instance *instance,
result = vk_errorfi(device->instance, NULL,
VK_ERROR_INITIALIZATION_FAILED,
"kernel missing gem wait");
- goto fail_alloc;
+ goto fail_base;
}
if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXECBUF2)) {
result = vk_errorfi(device->instance, NULL,
VK_ERROR_INITIALIZATION_FAILED,
"kernel missing execbuf2");
- goto fail_alloc;
+ goto fail_base;
}
if (!device->info.has_llc &&
@@ -402,7 +751,7 @@ anv_physical_device_try_create(struct anv_instance *instance,
result = vk_errorfi(device->instance, NULL,
VK_ERROR_INITIALIZATION_FAILED,
"kernel missing wc mmap");
- goto fail_alloc;
+ goto fail_base;
}
device->has_softpin = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_SOFTPIN);
@@ -412,11 +761,18 @@ anv_physical_device_try_create(struct anv_instance *instance,
device->has_syncobj = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_FENCE_ARRAY);
device->has_syncobj_wait = device->has_syncobj &&
anv_gem_supports_syncobj_wait(fd);
+ device->has_syncobj_wait_available =
+ anv_gem_get_drm_cap(fd, DRM_CAP_SYNCOBJ_TIMELINE) != 0;
+
device->has_context_priority = anv_gem_has_context_priority(fd);
+ /* Initialize memory regions struct to 0. */
+ memset(&device->vram, 0, sizeof(device->vram));
+ memset(&device->sys, 0, sizeof(device->sys));
+
result = anv_physical_device_init_heaps(device, fd);
if (result != VK_SUCCESS)
- goto fail_alloc;
+ goto fail_base;
device->use_softpin = device->has_softpin &&
device->supports_48bit_addresses;
@@ -424,67 +780,61 @@ anv_physical_device_try_create(struct anv_instance *instance,
device->has_context_isolation =
anv_gem_get_param(fd, I915_PARAM_HAS_CONTEXT_ISOLATION);
+ device->has_exec_timeline =
+ anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_TIMELINE_FENCES);
+ if (env_var_as_boolean("ANV_QUEUE_THREAD_DISABLE", false))
+ device->has_exec_timeline = false;
+
+ device->has_thread_submit =
+ device->has_syncobj_wait_available && device->has_exec_timeline;
+
device->always_use_bindless =
env_var_as_boolean("ANV_ALWAYS_BINDLESS", false);
+ device->use_call_secondary =
+ device->use_softpin &&
+ !env_var_as_boolean("ANV_DISABLE_SECONDARY_CMD_BUFFER_CALLS", false);
+
/* We first got the A64 messages on broadwell and we can only use them if
* we can pass addresses directly into the shader which requires softpin.
*/
- device->has_a64_buffer_access = device->info.gen >= 8 &&
+ device->has_a64_buffer_access = device->info.ver >= 8 &&
device->use_softpin;
/* We first get bindless image access on Skylake and we can only really do
* it if we don't have any relocations so we need softpin.
*/
- device->has_bindless_images = device->info.gen >= 9 &&
+ device->has_bindless_images = device->info.ver >= 9 &&
device->use_softpin;
/* We've had bindless samplers since Ivy Bridge (forever in Vulkan terms)
* because it's just a matter of setting the sampler address in the sample
* message header. However, we've not bothered to wire it up for vec4 so
- * we leave it disabled on gen7.
+ * we leave it disabled on gfx7.
*/
- device->has_bindless_samplers = device->info.gen >= 8;
+ device->has_bindless_samplers = device->info.ver >= 8;
device->has_implicit_ccs = device->info.has_aux_map;
+ /* Check if we can read the GPU timestamp register from the CPU */
+ uint64_t u64_ignore;
+ device->has_reg_timestamp = anv_gem_reg_read(fd, TIMESTAMP | I915_REG_READ_8B_WA,
+ &u64_ignore) == 0;
+
uint64_t avail_mem;
device->has_mem_available = os_get_available_system_memory(&avail_mem);
device->always_flush_cache =
driQueryOptionb(&instance->dri_options, "always_flush_cache");
- /* Starting with Gen10, the timestamp frequency of the command streamer may
- * vary from one part to another. We can query the value from the kernel.
- */
- if (device->info.gen >= 10) {
- int timestamp_frequency =
- anv_gem_get_param(fd, I915_PARAM_CS_TIMESTAMP_FREQUENCY);
-
- if (timestamp_frequency < 0)
- intel_logw("Kernel 4.16-rc1+ required to properly query CS timestamp frequency");
- else
- device->info.timestamp_frequency = timestamp_frequency;
- }
+ device->has_mmap_offset =
+ anv_gem_get_param(fd, I915_PARAM_MMAP_GTT_VERSION) >= 4;
/* GENs prior to 8 do not support EU/Subslice info */
- if (device->info.gen >= 8) {
- device->subslice_total = anv_gem_get_param(fd, I915_PARAM_SUBSLICE_TOTAL);
- device->eu_total = anv_gem_get_param(fd, I915_PARAM_EU_TOTAL);
+ device->subslice_total = gen_device_info_subslice_total(&device->info);
+ device->eu_total = gen_device_info_eu_total(&device->info);
- /* Without this information, we cannot get the right Braswell
- * brandstrings, and we have to use conservative numbers for GPGPU on
- * many platforms, but otherwise, things will just work.
- */
- if (device->subslice_total < 1 || device->eu_total < 1) {
- intel_logw("Kernel 4.1 required to properly query GPU properties");
- }
- } else if (device->info.gen == 7) {
- device->subslice_total = 1 << (device->info.gt - 1);
- }
-
- if (device->info.is_cherryview &&
- device->subslice_total > 0 && device->eu_total > 0) {
+ if (device->info.is_cherryview) {
/* Logical CS threads = EUs per subslice * num threads per EU */
uint32_t max_cs_threads =
device->eu_total / device->subslice_total * device->info.num_thread_per_eu;
@@ -497,29 +847,30 @@ anv_physical_device_try_create(struct anv_instance *instance,
device->compiler = brw_compiler_create(NULL, &device->info);
if (device->compiler == NULL) {
result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- goto fail_alloc;
+ goto fail_base;
}
device->compiler->shader_debug_log = compiler_debug_log;
device->compiler->shader_perf_log = compiler_perf_log;
device->compiler->supports_pull_constants = false;
device->compiler->constant_buffer_0_is_relative =
- device->info.gen < 8 || !device->has_context_isolation;
+ device->info.ver < 8 || !device->has_context_isolation;
device->compiler->supports_shader_constants = true;
device->compiler->compact_params = false;
+ device->compiler->indirect_ubos_use_sampler = device->info.ver < 12;
/* Broadwell PRM says:
*
- * "Before Gen8, there was a historical configuration control field to
+ * "Before Gfx8, there was a historical configuration control field to
* swizzle address bit[6] for in X/Y tiling modes. This was set in three
* different places: TILECTL[1:0], ARB_MODE[5:4], and
* DISP_ARB_CTL[14:13].
*
- * For Gen8 and subsequent generations, the swizzle fields are all
+ * For Gfx8 and subsequent generations, the swizzle fields are all
* reserved, and the CPU's memory controller performs all address
* swizzling modifications."
*/
bool swizzled =
- device->info.gen < 8 && anv_gem_get_bit6_swizzle(fd, I915_TILING_X);
+ device->info.ver < 8 && anv_gem_get_bit6_swizzle(fd, I915_TILING_X);
isl_device_init(&device->isl_dev, &device->info, swizzled);
@@ -529,7 +880,7 @@ anv_physical_device_try_create(struct anv_instance *instance,
anv_physical_device_init_disk_cache(device);
- if (instance->enabled_extensions.KHR_display) {
+ if (instance->vk.enabled_extensions.KHR_display) {
master_fd = open(primary_path, O_RDWR | O_CLOEXEC);
if (master_fd >= 0) {
/* prod the device with a GETPARAM call which will fail if
@@ -543,28 +894,36 @@ anv_physical_device_try_create(struct anv_instance *instance,
}
device->master_fd = master_fd;
+ device->engine_info = anv_gem_get_engine_info(fd);
+ anv_physical_device_init_queue_families(device);
+
result = anv_init_wsi(device);
if (result != VK_SUCCESS)
- goto fail_disk_cache;
+ goto fail_engine_info;
- device->perf = anv_get_perf(&device->info, fd);
+ anv_physical_device_init_perf(device, fd);
- anv_physical_device_get_supported_extensions(device,
- &device->supported_extensions);
+ anv_measure_device_init(device);
+ get_device_extensions(device, &device->vk.supported_extensions);
device->local_fd = fd;
+ anv_genX(&device->info, init_physical_device_state)(device);
+
*device_out = device;
return VK_SUCCESS;
-fail_disk_cache:
+fail_engine_info:
+ free(device->engine_info);
anv_physical_device_free_disk_cache(device);
fail_compiler:
ralloc_free(device->compiler);
+fail_base:
+ vk_physical_device_finish(&device->vk);
fail_alloc:
- vk_free(&instance->alloc, device);
+ vk_free(&instance->vk.alloc, device);
fail_fd:
close(fd);
if (master_fd != -1)
@@ -576,13 +935,16 @@ static void
anv_physical_device_destroy(struct anv_physical_device *device)
{
anv_finish_wsi(device);
+ anv_measure_device_destroy(device);
+ free(device->engine_info);
anv_physical_device_free_disk_cache(device);
ralloc_free(device->compiler);
ralloc_free(device->perf);
close(device->local_fd);
if (device->master_fd >= 0)
close(device->master_fd);
- vk_free(&device->instance->alloc, device);
+ vk_physical_device_finish(&device->vk);
+ vk_free(&device->instance->vk.alloc, device);
}
static void *
@@ -617,17 +979,24 @@ VkResult anv_EnumerateInstanceExtensionProperties(
uint32_t* pPropertyCount,
VkExtensionProperties* pProperties)
{
- VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
+ if (pLayerName)
+ return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
- for (int i = 0; i < ANV_INSTANCE_EXTENSION_COUNT; i++) {
- if (anv_instance_extensions_supported.extensions[i]) {
- vk_outarray_append(&out, prop) {
- *prop = anv_instance_extensions[i];
- }
- }
- }
+ return vk_enumerate_instance_extension_properties(
+ &instance_extensions, pPropertyCount, pProperties);
+}
- return vk_outarray_status(&out);
+static void
+anv_init_dri_options(struct anv_instance *instance)
+{
+ driParseOptionInfo(&instance->available_dri_options, anv_dri_options,
+ ARRAY_SIZE(anv_dri_options));
+ driParseConfigFiles(&instance->dri_options,
+ &instance->available_dri_options, 0, "anv", NULL,
+ instance->vk.app_info.app_name,
+ instance->vk.app_info.app_version,
+ instance->vk.app_info.engine_name,
+ instance->vk.app_info.engine_version);
}
VkResult anv_CreateInstance(
@@ -640,118 +1009,34 @@ VkResult anv_CreateInstance(
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
- struct anv_instance_extension_table enabled_extensions = {};
- for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
- int idx;
- for (idx = 0; idx < ANV_INSTANCE_EXTENSION_COUNT; idx++) {
- if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
- anv_instance_extensions[idx].extensionName) == 0)
- break;
- }
-
- if (idx >= ANV_INSTANCE_EXTENSION_COUNT)
- return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
-
- if (!anv_instance_extensions_supported.extensions[idx])
- return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
-
- enabled_extensions.extensions[idx] = true;
- }
+ if (pAllocator == NULL)
+ pAllocator = &default_alloc;
- instance = vk_alloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
+ instance = vk_alloc(pAllocator, sizeof(*instance), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (!instance)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
-
- if (pAllocator)
- instance->alloc = *pAllocator;
- else
- instance->alloc = default_alloc;
-
- instance->app_info = (struct anv_app_info) { .api_version = 0 };
- if (pCreateInfo->pApplicationInfo) {
- const VkApplicationInfo *app = pCreateInfo->pApplicationInfo;
-
- instance->app_info.app_name =
- vk_strdup(&instance->alloc, app->pApplicationName,
- VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
- instance->app_info.app_version = app->applicationVersion;
-
- instance->app_info.engine_name =
- vk_strdup(&instance->alloc, app->pEngineName,
- VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
- instance->app_info.engine_version = app->engineVersion;
-
- instance->app_info.api_version = app->apiVersion;
- }
-
- if (instance->app_info.api_version == 0)
- instance->app_info.api_version = VK_API_VERSION_1_0;
+ struct vk_instance_dispatch_table dispatch_table;
+ vk_instance_dispatch_table_from_entrypoints(
+ &dispatch_table, &anv_instance_entrypoints, true);
- instance->enabled_extensions = enabled_extensions;
-
- for (unsigned i = 0; i < ARRAY_SIZE(instance->dispatch.entrypoints); i++) {
- /* Vulkan requires that entrypoints for extensions which have not been
- * enabled must not be advertised.
- */
- if (!anv_instance_entrypoint_is_enabled(i, instance->app_info.api_version,
- &instance->enabled_extensions)) {
- instance->dispatch.entrypoints[i] = NULL;
- } else {
- instance->dispatch.entrypoints[i] =
- anv_instance_dispatch_table.entrypoints[i];
- }
- }
-
- for (unsigned i = 0; i < ARRAY_SIZE(instance->physical_device_dispatch.entrypoints); i++) {
- /* Vulkan requires that entrypoints for extensions which have not been
- * enabled must not be advertised.
- */
- if (!anv_physical_device_entrypoint_is_enabled(i, instance->app_info.api_version,
- &instance->enabled_extensions)) {
- instance->physical_device_dispatch.entrypoints[i] = NULL;
- } else {
- instance->physical_device_dispatch.entrypoints[i] =
- anv_physical_device_dispatch_table.entrypoints[i];
- }
- }
-
- for (unsigned i = 0; i < ARRAY_SIZE(instance->device_dispatch.entrypoints); i++) {
- /* Vulkan requires that entrypoints for extensions which have not been
- * enabled must not be advertised.
- */
- if (!anv_device_entrypoint_is_enabled(i, instance->app_info.api_version,
- &instance->enabled_extensions, NULL)) {
- instance->device_dispatch.entrypoints[i] = NULL;
- } else {
- instance->device_dispatch.entrypoints[i] =
- anv_device_dispatch_table.entrypoints[i];
- }
+ result = vk_instance_init(&instance->vk, &instance_extensions,
+ &dispatch_table, pCreateInfo, pAllocator);
+ if (result != VK_SUCCESS) {
+ vk_free(pAllocator, instance);
+ return vk_error(result);
}
instance->physical_devices_enumerated = false;
list_inithead(&instance->physical_devices);
- result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
- if (result != VK_SUCCESS) {
- vk_free2(&default_alloc, pAllocator, instance);
- return vk_error(result);
- }
-
instance->pipeline_cache_enabled =
env_var_as_boolean("ANV_ENABLE_PIPELINE_CACHE", true);
- glsl_type_singleton_init_or_ref();
-
VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
- driParseOptionInfo(&instance->available_dri_options, anv_dri_options_xml);
- driParseConfigFiles(&instance->dri_options, &instance->available_dri_options,
- 0, "anv", NULL,
- instance->app_info.engine_name,
- instance->app_info.engine_version);
+ anv_init_dri_options(instance);
*pInstance = anv_instance_to_handle(instance);
@@ -771,19 +1056,13 @@ void anv_DestroyInstance(
&instance->physical_devices, link)
anv_physical_device_destroy(pdevice);
- vk_free(&instance->alloc, (char *)instance->app_info.app_name);
- vk_free(&instance->alloc, (char *)instance->app_info.engine_name);
-
VG(VALGRIND_DESTROY_MEMPOOL(instance));
- vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
-
- glsl_type_singleton_decref();
-
driDestroyOptionCache(&instance->dri_options);
driDestroyOptionInfo(&instance->available_dri_options);
- vk_free(&instance->alloc, instance);
+ vk_instance_finish(&instance->vk);
+ vk_free(&instance->vk.alloc, instance);
}
static VkResult
@@ -902,15 +1181,15 @@ void anv_GetPhysicalDeviceFeatures(
.depthClamp = true,
.depthBiasClamp = true,
.fillModeNonSolid = true,
- .depthBounds = pdevice->info.gen >= 12,
+ .depthBounds = pdevice->info.ver >= 12,
.wideLines = true,
.largePoints = true,
.alphaToOne = true,
.multiViewport = true,
.samplerAnisotropy = true,
- .textureCompressionETC2 = pdevice->info.gen >= 8 ||
+ .textureCompressionETC2 = pdevice->info.ver >= 8 ||
pdevice->info.is_baytrail,
- .textureCompressionASTC_LDR = pdevice->info.gen >= 9, /* FINISHME CHV */
+ .textureCompressionASTC_LDR = pdevice->info.ver >= 9, /* FINISHME CHV */
.textureCompressionBC = true,
.occlusionQueryPrecise = true,
.pipelineStatisticsQuery = true,
@@ -927,12 +1206,11 @@ void anv_GetPhysicalDeviceFeatures(
.shaderStorageImageArrayDynamicIndexing = true,
.shaderClipDistance = true,
.shaderCullDistance = true,
- .shaderFloat64 = pdevice->info.gen >= 8 &&
+ .shaderFloat64 = pdevice->info.ver >= 8 &&
pdevice->info.has_64bit_float,
- .shaderInt64 = pdevice->info.gen >= 8 &&
- pdevice->info.has_64bit_int,
- .shaderInt16 = pdevice->info.gen >= 8,
- .shaderResourceMinLod = pdevice->info.gen >= 9,
+ .shaderInt64 = pdevice->info.ver >= 8,
+ .shaderInt16 = pdevice->info.ver >= 8,
+ .shaderResourceMinLod = pdevice->info.ver >= 9,
.variableMultisampleRate = true,
.inheritedQueries = true,
};
@@ -942,7 +1220,7 @@ void anv_GetPhysicalDeviceFeatures(
pdevice->compiler->scalar_stage[MESA_SHADER_VERTEX] &&
pdevice->compiler->scalar_stage[MESA_SHADER_GEOMETRY];
- struct anv_app_info *app_info = &pdevice->instance->app_info;
+ struct vk_app_info *app_info = &pdevice->instance->vk.app_info;
/* The new DOOM and Wolfenstein games require depthBounds without
* checking for it. They seem to run fine without it so just claim it's
@@ -958,9 +1236,9 @@ anv_get_physical_device_features_1_1(struct anv_physical_device *pdevice,
{
assert(f->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES);
- f->storageBuffer16BitAccess = pdevice->info.gen >= 8;
- f->uniformAndStorageBuffer16BitAccess = pdevice->info.gen >= 8;
- f->storagePushConstant16 = pdevice->info.gen >= 8;
+ f->storageBuffer16BitAccess = pdevice->info.ver >= 8;
+ f->uniformAndStorageBuffer16BitAccess = pdevice->info.ver >= 8;
+ f->storagePushConstant16 = pdevice->info.ver >= 8;
f->storageInputOutput16 = false;
f->multiview = true;
f->multiviewGeometryShader = true;
@@ -980,14 +1258,14 @@ anv_get_physical_device_features_1_2(struct anv_physical_device *pdevice,
f->samplerMirrorClampToEdge = true;
f->drawIndirectCount = true;
- f->storageBuffer8BitAccess = pdevice->info.gen >= 8;
- f->uniformAndStorageBuffer8BitAccess = pdevice->info.gen >= 8;
- f->storagePushConstant8 = pdevice->info.gen >= 8;
- f->shaderBufferInt64Atomics = pdevice->info.gen >= 9 &&
+ f->storageBuffer8BitAccess = pdevice->info.ver >= 8;
+ f->uniformAndStorageBuffer8BitAccess = pdevice->info.ver >= 8;
+ f->storagePushConstant8 = pdevice->info.ver >= 8;
+ f->shaderBufferInt64Atomics = pdevice->info.ver >= 9 &&
pdevice->use_softpin;
f->shaderSharedInt64Atomics = false;
- f->shaderFloat16 = pdevice->info.gen >= 8;
- f->shaderInt8 = pdevice->info.gen >= 8;
+ f->shaderFloat16 = pdevice->info.ver >= 8;
+ f->shaderInt8 = pdevice->info.ver >= 8;
bool descIndexing = pdevice->has_a64_buffer_access &&
pdevice->has_bindless_images;
@@ -1010,10 +1288,10 @@ anv_get_physical_device_features_1_2(struct anv_physical_device *pdevice,
f->descriptorBindingStorageTexelBufferUpdateAfterBind = descIndexing;
f->descriptorBindingUpdateUnusedWhilePending = descIndexing;
f->descriptorBindingPartiallyBound = descIndexing;
- f->descriptorBindingVariableDescriptorCount = false;
+ f->descriptorBindingVariableDescriptorCount = descIndexing;
f->runtimeDescriptorArray = descIndexing;
- f->samplerFilterMinmax = pdevice->info.gen >= 9;
+ f->samplerFilterMinmax = pdevice->info.ver >= 9;
f->scalarBlockLayout = true;
f->imagelessFramebuffer = true;
f->uniformBufferStandardLayout = true;
@@ -1055,6 +1333,14 @@ void anv_GetPhysicalDeviceFeatures2(
vk_foreach_struct(ext, pFeatures->pNext) {
switch (ext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT: {
+ VkPhysicalDevice4444FormatsFeaturesEXT *features =
+ (VkPhysicalDevice4444FormatsFeaturesEXT *)ext;
+ features->formatA4R4G4B4 = true;
+ features->formatA4B4G4R4 = false;
+ break;
+ }
+
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR: {
VkPhysicalDevice8BitStorageFeaturesKHR *features =
(VkPhysicalDevice8BitStorageFeaturesKHR *)ext;
@@ -1101,13 +1387,21 @@ void anv_GetPhysicalDeviceFeatures2(
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
(VkPhysicalDeviceConditionalRenderingFeaturesEXT*)ext;
- features->conditionalRendering = pdevice->info.gen >= 8 ||
+ features->conditionalRendering = pdevice->info.ver >= 8 ||
pdevice->info.is_haswell;
- features->inheritedConditionalRendering = pdevice->info.gen >= 8 ||
+ features->inheritedConditionalRendering = pdevice->info.ver >= 8 ||
pdevice->info.is_haswell;
break;
}
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT: {
+ VkPhysicalDeviceCustomBorderColorFeaturesEXT *features =
+ (VkPhysicalDeviceCustomBorderColorFeaturesEXT *)ext;
+ features->customBorderColors = pdevice->info.ver >= 8;
+ features->customBorderColorWithoutFormat = pdevice->info.ver >= 8;
+ break;
+ }
+
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT: {
VkPhysicalDeviceDepthClipEnableFeaturesEXT *features =
(VkPhysicalDeviceDepthClipEnableFeaturesEXT *)ext;
@@ -1125,8 +1419,8 @@ void anv_GetPhysicalDeviceFeatures2(
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT: {
VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT *features =
(VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT *)ext;
- features->fragmentShaderSampleInterlock = pdevice->info.gen >= 9;
- features->fragmentShaderPixelInterlock = pdevice->info.gen >= 9;
+ features->fragmentShaderSampleInterlock = pdevice->info.ver >= 9;
+ features->fragmentShaderPixelInterlock = pdevice->info.ver >= 9;
features->fragmentShaderShadingRateInterlock = false;
break;
}
@@ -1164,6 +1458,13 @@ void anv_GetPhysicalDeviceFeatures2(
break;
}
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT: {
+ VkPhysicalDeviceImageRobustnessFeaturesEXT *features =
+ (VkPhysicalDeviceImageRobustnessFeaturesEXT *)ext;
+ features->robustImageAccess = true;
+ break;
+ }
+
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT: {
VkPhysicalDeviceIndexTypeUint8FeaturesEXT *features =
(VkPhysicalDeviceIndexTypeUint8FeaturesEXT *)ext;
@@ -1184,15 +1485,15 @@ void anv_GetPhysicalDeviceFeatures2(
(VkPhysicalDeviceLineRasterizationFeaturesEXT *)ext;
features->rectangularLines = true;
features->bresenhamLines = true;
- /* Support for Smooth lines with MSAA was removed on gen11. From the
+ /* Support for Smooth lines with MSAA was removed on gfx11. From the
* BSpec section "Multisample ModesState" table for "AA Line Support
* Requirements":
*
- * GEN10:BUG:######## NUM_MULTISAMPLES == 1
+ * GFX10:BUG:######## NUM_MULTISAMPLES == 1
*
* Fortunately, this isn't a case most people care about.
*/
- features->smoothLines = pdevice->info.gen < 10;
+ features->smoothLines = pdevice->info.ver < 10;
features->stippledRectangularLines = false;
features->stippledBresenhamLines = true;
features->stippledSmoothLines = false;
@@ -1215,6 +1516,22 @@ void anv_GetPhysicalDeviceFeatures2(
break;
}
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR: {
+ VkPhysicalDevicePerformanceQueryFeaturesKHR *feature =
+ (VkPhysicalDevicePerformanceQueryFeaturesKHR *)ext;
+ feature->performanceCounterQueryPools = true;
+ /* HW only supports a single configuration at a time. */
+ feature->performanceCounterMultipleQueryPools = false;
+ break;
+ }
+
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT: {
+ VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT *features =
+ (VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT *)ext;
+ features->pipelineCreationCacheControl = true;
+ break;
+ }
+
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR: {
VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR *features =
(VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR *)ext;
@@ -1222,12 +1539,26 @@ void anv_GetPhysicalDeviceFeatures2(
break;
}
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT: {
+ VkPhysicalDevicePrivateDataFeaturesEXT *features = (void *)ext;
+ features->privateData = true;
+ break;
+ }
+
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
VkPhysicalDeviceProtectedMemoryFeatures *features = (void *)ext;
CORE_FEATURE(1, 1, protectedMemory);
break;
}
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT: {
+ VkPhysicalDeviceRobustness2FeaturesEXT *features = (void *)ext;
+ features->robustBufferAccess2 = true;
+ features->robustImageAccess2 = true;
+ features->nullDescriptor = true;
+ break;
+ }
+
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
(VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
@@ -1249,6 +1580,23 @@ void anv_GetPhysicalDeviceFeatures2(
break;
}
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_FEATURES_EXT: {
+ VkPhysicalDeviceShaderAtomicFloatFeaturesEXT *features = (void *)ext;
+ features->shaderBufferFloat32Atomics = true;
+ features->shaderBufferFloat32AtomicAdd = false;
+ features->shaderBufferFloat64Atomics = false;
+ features->shaderBufferFloat64AtomicAdd = false;
+ features->shaderSharedFloat32Atomics = true;
+ features->shaderSharedFloat32AtomicAdd = false;
+ features->shaderSharedFloat64Atomics = false;
+ features->shaderSharedFloat64AtomicAdd = false;
+ features->shaderImageFloat32Atomics = true;
+ features->shaderImageFloat32AtomicAdd = false;
+ features->sparseImageFloat32Atomics = false;
+ features->sparseImageFloat32AtomicAdd = false;
+ break;
+ }
+
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR: {
VkPhysicalDeviceShaderAtomicInt64FeaturesKHR *features = (void *)ext;
CORE_FEATURE(1, 2, shaderBufferInt64Atomics);
@@ -1276,6 +1624,13 @@ void anv_GetPhysicalDeviceFeatures2(
break;
}
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_FUNCTIONS_2_FEATURES_INTEL: {
+ VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL *features =
+ (VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL *)ext;
+ features->shaderIntegerFunctions2 = true;
+ break;
+ }
+
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES_KHR: {
VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR *features =
(VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR *)ext;
@@ -1283,6 +1638,13 @@ void anv_GetPhysicalDeviceFeatures2(
break;
}
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR: {
+ VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR *features =
+ (VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR *)ext;
+ features->shaderTerminateInvocation = true;
+ break;
+ }
+
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT: {
VkPhysicalDeviceSubgroupSizeControlFeaturesEXT *features =
(VkPhysicalDeviceSubgroupSizeControlFeaturesEXT *)ext;
@@ -1351,6 +1713,16 @@ void anv_GetPhysicalDeviceFeatures2(
break;
}
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_WORKGROUP_MEMORY_EXPLICIT_LAYOUT_FEATURES_KHR: {
+ VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR *features =
+ (VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR *)ext;
+ features->workgroupMemoryExplicitLayout = true;
+ features->workgroupMemoryExplicitLayoutScalarBlockLayout = true;
+ features->workgroupMemoryExplicitLayout8BitAccess = true;
+ features->workgroupMemoryExplicitLayout16BitAccess = true;
+ break;
+ }
+
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT: {
VkPhysicalDeviceYcbcrImageArraysFeaturesEXT *features =
(VkPhysicalDeviceYcbcrImageArraysFeaturesEXT *)ext;
@@ -1358,6 +1730,20 @@ void anv_GetPhysicalDeviceFeatures2(
break;
}
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT: {
+ VkPhysicalDeviceExtendedDynamicStateFeaturesEXT *features =
+ (VkPhysicalDeviceExtendedDynamicStateFeaturesEXT *)ext;
+ features->extendedDynamicState = true;
+ break;
+ }
+
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES_KHR: {
+ VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR *features =
+ (VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR *)ext;
+ features->shaderZeroInitializeWorkgroupMemory = true;
+ break;
+ }
+
default:
anv_debug_ignored_stype(ext->sType);
break;
@@ -1372,6 +1758,8 @@ void anv_GetPhysicalDeviceFeatures2(
#define MAX_PER_STAGE_DESCRIPTOR_INPUT_ATTACHMENTS 64
#define MAX_DESCRIPTOR_SET_INPUT_ATTACHMENTS 256
+#define MAX_CUSTOM_BORDER_COLORS 4096
+
void anv_GetPhysicalDeviceProperties(
VkPhysicalDevice physicalDevice,
VkPhysicalDeviceProperties* pProperties)
@@ -1380,7 +1768,7 @@ void anv_GetPhysicalDeviceProperties(
const struct gen_device_info *devinfo = &pdevice->info;
/* See assertions made when programming the buffer surface state. */
- const uint32_t max_raw_buffer_sz = devinfo->gen >= 7 ?
+ const uint32_t max_raw_buffer_sz = devinfo->ver >= 7 ?
(1ul << 30) : (1ul << 27);
const uint32_t max_ssbos = pdevice->has_a64_buffer_access ? UINT16_MAX : 64;
@@ -1388,7 +1776,7 @@ void anv_GetPhysicalDeviceProperties(
pdevice->has_bindless_images ? UINT16_MAX : 128;
const uint32_t max_samplers =
pdevice->has_bindless_samplers ? UINT16_MAX :
- (devinfo->gen >= 8 || devinfo->is_haswell) ? 128 : 16;
+ (devinfo->ver >= 8 || devinfo->is_haswell) ? 128 : 16;
const uint32_t max_images =
pdevice->has_bindless_images ? UINT16_MAX : MAX_IMAGES;
@@ -1450,7 +1838,7 @@ void anv_GetPhysicalDeviceProperties(
.maxTessellationEvaluationInputComponents = 128,
.maxTessellationEvaluationOutputComponents = 128,
.maxGeometryShaderInvocations = 32,
- .maxGeometryInputComponents = 64,
+ .maxGeometryInputComponents = devinfo->ver >= 8 ? 128 : 64,
.maxGeometryOutputComponents = 128,
.maxGeometryOutputVertices = 256,
.maxGeometryTotalOutputComponents = 1024,
@@ -1482,9 +1870,8 @@ void anv_GetPhysicalDeviceProperties(
* case of R32G32B32A32 which is 16 bytes.
*/
.minTexelBufferOffsetAlignment = 16,
- /* We need 16 for UBO block reads to work and 32 for push UBOs */
- .minUniformBufferOffsetAlignment = 32,
- .minStorageBufferOffsetAlignment = 4,
+ .minUniformBufferOffsetAlignment = ANV_UBO_ALIGNMENT,
+ .minStorageBufferOffsetAlignment = ANV_SSBO_ALIGNMENT,
.minTexelOffset = -8,
.maxTexelOffset = 7,
.minTexelGatherOffset = -32,
@@ -1515,7 +1902,7 @@ void anv_GetPhysicalDeviceProperties(
.pointSizeRange = { 0.125, 255.875 },
.lineWidthRange = {
0.0,
- (devinfo->gen >= 9 || devinfo->is_cherryview) ?
+ (devinfo->ver >= 9 || devinfo->is_cherryview) ?
2047.9921875 : 7.9921875,
},
.pointSizeGranularity = (1.0 / 8.0),
@@ -1528,7 +1915,7 @@ void anv_GetPhysicalDeviceProperties(
};
*pProperties = (VkPhysicalDeviceProperties) {
- .apiVersion = anv_physical_device_api_version(pdevice),
+ .apiVersion = ANV_API_VERSION,
.driverVersion = vk_get_driver_version(),
.vendorID = 0x8086,
.deviceID = pdevice->info.chipset_id,
@@ -1568,15 +1955,15 @@ anv_get_physical_device_properties_1_1(struct anv_physical_device *pdevice,
VK_SUBGROUP_FEATURE_SHUFFLE_BIT |
VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT |
VK_SUBGROUP_FEATURE_QUAD_BIT;
- if (pdevice->info.gen >= 8) {
+ if (pdevice->info.ver >= 8) {
/* TODO: There's no technical reason why these can't be made to
- * work on gen7 but they don't at the moment so it's best to leave
+ * work on gfx7 but they don't at the moment so it's best to leave
* the feature disabled than enabled and broken.
*/
p->subgroupSupportedOperations |= VK_SUBGROUP_FEATURE_ARITHMETIC_BIT |
VK_SUBGROUP_FEATURE_CLUSTERED_BIT;
}
- p->subgroupQuadOperationsInAllStages = pdevice->info.gen >= 8;
+ p->subgroupQuadOperationsInAllStages = pdevice->info.ver >= 8;
p->pointClippingBehavior = VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY;
p->maxMultiviewViewCount = 16;
@@ -1622,7 +2009,7 @@ anv_get_physical_device_properties_1_2(struct anv_physical_device *pdevice,
* Restriction : Half-float denorms are always retained."
*/
p->shaderDenormFlushToZeroFloat16 = false;
- p->shaderDenormPreserveFloat16 = pdevice->info.gen > 8;
+ p->shaderDenormPreserveFloat16 = pdevice->info.ver > 8;
p->shaderRoundingModeRTEFloat16 = true;
p->shaderRoundingModeRTZFloat16 = true;
p->shaderSignedZeroInfNanPreserveFloat16 = true;
@@ -1640,17 +2027,22 @@ anv_get_physical_device_properties_1_2(struct anv_physical_device *pdevice,
p->shaderSignedZeroInfNanPreserveFloat64 = true;
/* It's a bit hard to exactly map our implementation to the limits
- * described here. The bindless surface handle in the extended
+ * described by Vulkan. The bindless surface handle in the extended
* message descriptors is 20 bits and it's an index into the table of
* RENDER_SURFACE_STATE structs that starts at bindless surface base
- * address. Given that most things consume two surface states per
- * view (general/sampled for textures and write-only/read-write for
- * images), we claim 2^19 things.
+ * address. This means that we can have at must 1M surface states
+ * allocated at any given time. Since most image views take two
+ * descriptors, this means we have a limit of about 500K image views.
*
- * For SSBOs, we just use A64 messages so there is no real limit
- * there beyond the limit on the total size of a descriptor set.
+ * However, since we allocate surface states at vkCreateImageView time,
+ * this means our limit is actually something on the order of 500K image
+ * views allocated at any time. The actual limit describe by Vulkan, on
+ * the other hand, is a limit of how many you can have in a descriptor set.
+ * Assuming anyone using 1M descriptors will be using the same image view
+ * twice a bunch of times (or a bunch of null descriptors), we can safely
+ * advertise a larger limit here.
*/
- const unsigned max_bindless_views = 1 << 19;
+ const unsigned max_bindless_views = 1 << 20;
p->maxUpdateAfterBindDescriptorsInAllPools = max_bindless_views;
p->shaderUniformBufferArrayNonUniformIndexingNative = false;
p->shaderSampledImageArrayNonUniformIndexingNative = false;
@@ -1682,7 +2074,7 @@ anv_get_physical_device_properties_1_2(struct anv_physical_device *pdevice,
VK_RESOLVE_MODE_MAX_BIT_KHR;
/* Average doesn't make sense for stencil so we don't support that */
p->supportedStencilResolveModes = VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR;
- if (pdevice->info.gen >= 8) {
+ if (pdevice->info.ver >= 8) {
/* The advanced stencil resolve modes currently require stencil
* sampling be supported by the hardware.
*/
@@ -1692,8 +2084,8 @@ anv_get_physical_device_properties_1_2(struct anv_physical_device *pdevice,
p->independentResolveNone = true;
p->independentResolve = true;
- p->filterMinmaxSingleComponentFormats = pdevice->info.gen >= 9;
- p->filterMinmaxImageComponentMapping = pdevice->info.gen >= 9;
+ p->filterMinmaxSingleComponentFormats = pdevice->info.ver >= 9;
+ p->filterMinmaxImageComponentMapping = pdevice->info.ver >= 9;
p->maxTimelineSemaphoreValueDifference = UINT64_MAX;
@@ -1728,6 +2120,40 @@ void anv_GetPhysicalDeviceProperties2(
vk_foreach_struct(ext, pProperties->pNext) {
switch (ext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT: {
+ /* TODO: Real limits */
+ VkPhysicalDeviceConservativeRasterizationPropertiesEXT *properties =
+ (VkPhysicalDeviceConservativeRasterizationPropertiesEXT *)ext;
+ /* There's nothing in the public docs about this value as far as I
+ * can tell. However, this is the value the Windows driver reports
+ * and there's a comment on a rejected HW feature in the internal
+ * docs that says:
+ *
+ * "This is similar to conservative rasterization, except the
+ * primitive area is not extended by 1/512 and..."
+ *
+ * That's a bit of an obtuse reference but it's the best we've got
+ * for now.
+ */
+ properties->primitiveOverestimationSize = 1.0f / 512.0f;
+ properties->maxExtraPrimitiveOverestimationSize = 0.0f;
+ properties->extraPrimitiveOverestimationSizeGranularity = 0.0f;
+ properties->primitiveUnderestimation = false;
+ properties->conservativePointAndLineRasterization = false;
+ properties->degenerateTrianglesRasterized = true;
+ properties->degenerateLinesRasterized = false;
+ properties->fullyCoveredFragmentShaderInputVariable = false;
+ properties->conservativeRasterizationPostDepthCoverage = true;
+ break;
+ }
+
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_PROPERTIES_EXT: {
+ VkPhysicalDeviceCustomBorderColorPropertiesEXT *properties =
+ (VkPhysicalDeviceCustomBorderColorPropertiesEXT *)ext;
+ properties->maxCustomBorderColorSamplers = MAX_CUSTOM_BORDER_COLORS;
+ break;
+ }
+
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES_KHR: {
VkPhysicalDeviceDepthStencilResolvePropertiesKHR *properties =
(VkPhysicalDeviceDepthStencilResolvePropertiesKHR *)ext;
@@ -1858,6 +2284,16 @@ void anv_GetPhysicalDeviceProperties2(
break;
}
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_PROPERTIES_KHR: {
+ VkPhysicalDevicePerformanceQueryPropertiesKHR *properties =
+ (VkPhysicalDevicePerformanceQueryPropertiesKHR *)ext;
+ /* We could support this by spawning a shader to do the equation
+ * normalization.
+ */
+ properties->allowCommandBufferQueryCopies = false;
+ break;
+ }
+
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: {
VkPhysicalDevicePointClippingProperties *properties =
(VkPhysicalDevicePointClippingProperties *) ext;
@@ -1889,6 +2325,15 @@ void anv_GetPhysicalDeviceProperties2(
break;
}
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT: {
+ VkPhysicalDeviceRobustness2PropertiesEXT *properties = (void *)ext;
+ properties->robustStorageBufferAccessSizeAlignment =
+ ANV_SSBO_BOUNDS_CHECK_ALIGNMENT;
+ properties->robustUniformBufferAccessSizeAlignment =
+ ANV_UBO_ALIGNMENT;
+ break;
+ }
+
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT: {
VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT *properties =
(VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT *)ext;
@@ -1915,7 +2360,8 @@ void anv_GetPhysicalDeviceProperties2(
STATIC_ASSERT(8 <= BRW_SUBGROUP_SIZE && BRW_SUBGROUP_SIZE <= 32);
props->minSubgroupSize = 8;
props->maxSubgroupSize = 32;
- props->maxComputeWorkgroupSubgroups = pdevice->info.max_cs_threads;
+ /* Limit max_threads to 64 for the GPGPU_WALKER command. */
+ props->maxComputeWorkgroupSubgroups = MIN2(64, pdevice->info.max_cs_threads);
props->requiredSubgroupSizeStages = VK_SHADER_STAGE_COMPUTE_BIT;
break;
}
@@ -1941,6 +2387,25 @@ void anv_GetPhysicalDeviceProperties2(
break;
}
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT: {
+ VkPhysicalDeviceSampleLocationsPropertiesEXT *props =
+ (VkPhysicalDeviceSampleLocationsPropertiesEXT *)ext;
+
+ props->sampleLocationSampleCounts =
+ isl_device_get_sample_counts(&pdevice->isl_dev);
+
+ /* See also anv_GetPhysicalDeviceMultisamplePropertiesEXT */
+ props->maxSampleLocationGridSize.width = 1;
+ props->maxSampleLocationGridSize.height = 1;
+
+ props->sampleLocationCoordinateRange[0] = 0;
+ props->sampleLocationCoordinateRange[1] = 0.9375;
+ props->sampleLocationSubPixelBits = 4;
+
+ props->variableSampleLocations = true;
+ break;
+ }
+
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT: {
VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT *props =
(VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT *)ext;
@@ -1991,7 +2456,9 @@ void anv_GetPhysicalDeviceProperties2(
props->transformFeedbackQueries = true;
props->transformFeedbackStreamsLinesTriangles = false;
props->transformFeedbackRasterizationStreamSelect = false;
- props->transformFeedbackDraw = true;
+ /* This requires MI_MATH */
+ props->transformFeedbackDraw = pdevice->info.is_haswell ||
+ pdevice->info.ver >= 8;
break;
}
@@ -2021,13 +2488,8 @@ void anv_GetPhysicalDeviceProperties2(
#undef CORE_PROPERTY
}
-/* We support exactly one queue family. */
static const VkQueueFamilyProperties
-anv_queue_family_properties = {
- .queueFlags = VK_QUEUE_GRAPHICS_BIT |
- VK_QUEUE_COMPUTE_BIT |
- VK_QUEUE_TRANSFER_BIT,
- .queueCount = 1,
+anv_queue_family_properties_template = {
.timestampValidBits = 36, /* XXX: Real value here */
.minImageTransferGranularity = { 1, 1, 1 },
};
@@ -2037,10 +2499,16 @@ void anv_GetPhysicalDeviceQueueFamilyProperties(
uint32_t* pCount,
VkQueueFamilyProperties* pQueueFamilyProperties)
{
+ ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pCount);
- vk_outarray_append(&out, p) {
- *p = anv_queue_family_properties;
+ for (uint32_t i = 0; i < pdevice->queue.family_count; i++) {
+ struct anv_queue_family *queue_family = &pdevice->queue.families[i];
+ vk_outarray_append(&out, p) {
+ *p = anv_queue_family_properties_template;
+ p->queueFlags = queue_family->queueFlags;
+ p->queueCount = queue_family->queueCount;
+ }
}
}
@@ -2049,14 +2517,19 @@ void anv_GetPhysicalDeviceQueueFamilyProperties2(
uint32_t* pQueueFamilyPropertyCount,
VkQueueFamilyProperties2* pQueueFamilyProperties)
{
-
+ ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
- vk_outarray_append(&out, p) {
- p->queueFamilyProperties = anv_queue_family_properties;
+ for (uint32_t i = 0; i < pdevice->queue.family_count; i++) {
+ struct anv_queue_family *queue_family = &pdevice->queue.families[i];
+ vk_outarray_append(&out, p) {
+ p->queueFamilyProperties = anv_queue_family_properties_template;
+ p->queueFamilyProperties.queueFlags = queue_family->queueFlags;
+ p->queueFamilyProperties.queueCount = queue_family->queueCount;
- vk_foreach_struct(s, p->pNext) {
- anv_debug_ignored_stype(s->sType);
+ vk_foreach_struct(s, p->pNext) {
+ anv_debug_ignored_stype(s->sType);
+ }
}
}
}
@@ -2177,41 +2650,9 @@ PFN_vkVoidFunction anv_GetInstanceProcAddr(
const char* pName)
{
ANV_FROM_HANDLE(anv_instance, instance, _instance);
-
- /* The Vulkan 1.0 spec for vkGetInstanceProcAddr has a table of exactly
- * when we have to return valid function pointers, NULL, or it's left
- * undefined. See the table for exact details.
- */
- if (pName == NULL)
- return NULL;
-
-#define LOOKUP_ANV_ENTRYPOINT(entrypoint) \
- if (strcmp(pName, "vk" #entrypoint) == 0) \
- return (PFN_vkVoidFunction)anv_##entrypoint
-
- LOOKUP_ANV_ENTRYPOINT(EnumerateInstanceExtensionProperties);
- LOOKUP_ANV_ENTRYPOINT(EnumerateInstanceLayerProperties);
- LOOKUP_ANV_ENTRYPOINT(EnumerateInstanceVersion);
- LOOKUP_ANV_ENTRYPOINT(CreateInstance);
-
-#undef LOOKUP_ANV_ENTRYPOINT
-
- if (instance == NULL)
- return NULL;
-
- int idx = anv_get_instance_entrypoint_index(pName);
- if (idx >= 0)
- return instance->dispatch.entrypoints[idx];
-
- idx = anv_get_physical_device_entrypoint_index(pName);
- if (idx >= 0)
- return instance->physical_device_dispatch.entrypoints[idx];
-
- idx = anv_get_device_entrypoint_index(pName);
- if (idx >= 0)
- return instance->device_dispatch.entrypoints[idx];
-
- return NULL;
+ return vk_instance_get_proc_addr(&instance->vk,
+ &anv_instance_entrypoints,
+ pName);
}
/* With version 1+ of the loader interface the ICD should expose
@@ -2230,22 +2671,6 @@ VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
return anv_GetInstanceProcAddr(instance, pName);
}
-PFN_vkVoidFunction anv_GetDeviceProcAddr(
- VkDevice _device,
- const char* pName)
-{
- ANV_FROM_HANDLE(anv_device, device, _device);
-
- if (!device || !pName)
- return NULL;
-
- int idx = anv_get_device_entrypoint_index(pName);
- if (idx < 0)
- return NULL;
-
- return device->dispatch.entrypoints[idx];
-}
-
/* With version 4+ of the loader interface the ICD should expose
* vk_icdGetPhysicalDeviceProcAddr()
*/
@@ -2259,53 +2684,7 @@ PFN_vkVoidFunction vk_icdGetPhysicalDeviceProcAddr(
const char* pName)
{
ANV_FROM_HANDLE(anv_instance, instance, _instance);
-
- if (!pName || !instance)
- return NULL;
-
- int idx = anv_get_physical_device_entrypoint_index(pName);
- if (idx < 0)
- return NULL;
-
- return instance->physical_device_dispatch.entrypoints[idx];
-}
-
-
-VkResult
-anv_CreateDebugReportCallbackEXT(VkInstance _instance,
- const VkDebugReportCallbackCreateInfoEXT* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkDebugReportCallbackEXT* pCallback)
-{
- ANV_FROM_HANDLE(anv_instance, instance, _instance);
- return vk_create_debug_report_callback(&instance->debug_report_callbacks,
- pCreateInfo, pAllocator, &instance->alloc,
- pCallback);
-}
-
-void
-anv_DestroyDebugReportCallbackEXT(VkInstance _instance,
- VkDebugReportCallbackEXT _callback,
- const VkAllocationCallbacks* pAllocator)
-{
- ANV_FROM_HANDLE(anv_instance, instance, _instance);
- vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
- _callback, pAllocator, &instance->alloc);
-}
-
-void
-anv_DebugReportMessageEXT(VkInstance _instance,
- VkDebugReportFlagsEXT flags,
- VkDebugReportObjectTypeEXT objectType,
- uint64_t object,
- size_t location,
- int32_t messageCode,
- const char* pLayerPrefix,
- const char* pMessage)
-{
- ANV_FROM_HANDLE(anv_instance, instance, _instance);
- vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
- object, location, messageCode, pLayerPrefix, pMessage);
+ return vk_instance_get_physical_device_proc_addr(&instance->vk, pName);
}
static struct anv_state
@@ -2319,34 +2698,6 @@ anv_state_pool_emit_data(struct anv_state_pool *pool, size_t size, size_t align,
return state;
}
-/* Haswell border color is a bit of a disaster. Float and unorm formats use a
- * straightforward 32-bit float color in the first 64 bytes. Instead of using
- * a nice float/integer union like Gen8+, Haswell specifies the integer border
- * color as a separate entry /after/ the float color. The layout of this entry
- * also depends on the format's bpp (with extra hacks for RG32), and overlaps.
- *
- * Since we don't know the format/bpp, we can't make any of the border colors
- * containing '1' work for all formats, as it would be in the wrong place for
- * some of them. We opt to make 32-bit integers work as this seems like the
- * most common option. Fortunately, transparent black works regardless, as
- * all zeroes is the same in every bit-size.
- */
-struct hsw_border_color {
- float float32[4];
- uint32_t _pad0[12];
- uint32_t uint32[4];
- uint32_t _pad1[108];
-};
-
-struct gen8_border_color {
- union {
- float float32[4];
- uint32_t uint32[4];
- };
- /* Pad out to 64 bytes */
- uint32_t _pad[12];
-};
-
static void
anv_device_init_border_colors(struct anv_device *device)
{
@@ -2364,7 +2715,7 @@ anv_device_init_border_colors(struct anv_device *device)
anv_state_pool_emit_data(&device->dynamic_state_pool,
sizeof(border_colors), 512, border_colors);
} else {
- static const struct gen8_border_color border_colors[] = {
+ static const struct gfx8_border_color border_colors[] = {
[VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 0.0 } },
[VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 1.0 } },
[VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = { .float32 = { 1.0, 1.0, 1.0, 1.0 } },
@@ -2382,7 +2733,7 @@ anv_device_init_border_colors(struct anv_device *device)
static VkResult
anv_device_init_trivial_batch(struct anv_device *device)
{
- VkResult result = anv_device_alloc_bo(device, 4096,
+ VkResult result = anv_device_alloc_bo(device, "trivial-batch", 4096,
ANV_BO_ALLOC_MAPPED,
0 /* explicit_address */,
&device->trivial_batch_bo);
@@ -2395,8 +2746,8 @@ anv_device_init_trivial_batch(struct anv_device *device)
.end = device->trivial_batch_bo->map + 4096,
};
- anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END, bbe);
- anv_batch_emit(&batch, GEN7_MI_NOOP, noop);
+ anv_batch_emit(&batch, GFX7_MI_BATCH_BUFFER_END, bbe);
+ anv_batch_emit(&batch, GFX7_MI_NOOP, noop);
if (!device->info.has_llc)
gen_clflush_range(batch.start, batch.next - batch.start);
@@ -2404,87 +2755,18 @@ anv_device_init_trivial_batch(struct anv_device *device)
return VK_SUCCESS;
}
-VkResult anv_EnumerateDeviceExtensionProperties(
- VkPhysicalDevice physicalDevice,
- const char* pLayerName,
- uint32_t* pPropertyCount,
- VkExtensionProperties* pProperties)
-{
- ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
- VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
-
- for (int i = 0; i < ANV_DEVICE_EXTENSION_COUNT; i++) {
- if (device->supported_extensions.extensions[i]) {
- vk_outarray_append(&out, prop) {
- *prop = anv_device_extensions[i];
- }
- }
- }
-
- return vk_outarray_status(&out);
-}
-
-static void
-anv_device_init_dispatch(struct anv_device *device)
-{
- const struct anv_instance *instance = device->physical->instance;
-
- const struct anv_device_dispatch_table *genX_table;
- switch (device->info.gen) {
- case 12:
- genX_table = &gen12_device_dispatch_table;
- break;
- case 11:
- genX_table = &gen11_device_dispatch_table;
- break;
- case 10:
- genX_table = &gen10_device_dispatch_table;
- break;
- case 9:
- genX_table = &gen9_device_dispatch_table;
- break;
- case 8:
- genX_table = &gen8_device_dispatch_table;
- break;
- case 7:
- if (device->info.is_haswell)
- genX_table = &gen75_device_dispatch_table;
- else
- genX_table = &gen7_device_dispatch_table;
- break;
- default:
- unreachable("unsupported gen\n");
- }
-
- for (unsigned i = 0; i < ARRAY_SIZE(device->dispatch.entrypoints); i++) {
- /* Vulkan requires that entrypoints for extensions which have not been
- * enabled must not be advertised.
- */
- if (!anv_device_entrypoint_is_enabled(i, instance->app_info.api_version,
- &instance->enabled_extensions,
- &device->enabled_extensions)) {
- device->dispatch.entrypoints[i] = NULL;
- } else if (genX_table->entrypoints[i]) {
- device->dispatch.entrypoints[i] = genX_table->entrypoints[i];
- } else {
- device->dispatch.entrypoints[i] =
- anv_device_dispatch_table.entrypoints[i];
- }
- }
-}
-
static int
vk_priority_to_gen(int priority)
{
switch (priority) {
case VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT:
- return GEN_CONTEXT_LOW_PRIORITY;
+ return INTEL_CONTEXT_LOW_PRIORITY;
case VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT:
- return GEN_CONTEXT_MEDIUM_PRIORITY;
+ return INTEL_CONTEXT_MEDIUM_PRIORITY;
case VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT:
- return GEN_CONTEXT_HIGH_PRIORITY;
+ return INTEL_CONTEXT_HIGH_PRIORITY;
case VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT:
- return GEN_CONTEXT_REALTIME_PRIORITY;
+ return INTEL_CONTEXT_REALTIME_PRIORITY;
default:
unreachable("Invalid priority");
}
@@ -2493,7 +2775,7 @@ vk_priority_to_gen(int priority)
static VkResult
anv_device_init_hiz_clear_value_bo(struct anv_device *device)
{
- VkResult result = anv_device_alloc_bo(device, 4096,
+ VkResult result = anv_device_alloc_bo(device, "hiz-clear-value", 4096,
ANV_BO_ALLOC_MAPPED,
0 /* explicit_address */,
&device->hiz_clear_bo);
@@ -2512,14 +2794,14 @@ anv_device_init_hiz_clear_value_bo(struct anv_device *device)
}
static bool
-get_bo_from_pool(struct gen_batch_decode_bo *ret,
+get_bo_from_pool(struct intel_batch_decode_bo *ret,
struct anv_block_pool *pool,
uint64_t address)
{
anv_block_pool_foreach_bo(bo, pool) {
- uint64_t bo_address = gen_48b_address(bo->offset);
+ uint64_t bo_address = intel_48b_address(bo->offset);
if (address >= bo_address && address < (bo_address + bo->size)) {
- *ret = (struct gen_batch_decode_bo) {
+ *ret = (struct intel_batch_decode_bo) {
.addr = bo_address,
.size = bo->size,
.map = bo->map,
@@ -2531,11 +2813,11 @@ get_bo_from_pool(struct gen_batch_decode_bo *ret,
}
/* Finding a buffer for batch decoding */
-static struct gen_batch_decode_bo
+static struct intel_batch_decode_bo
decode_get_bo(void *v_batch, bool ppgtt, uint64_t address)
{
struct anv_device *device = v_batch;
- struct gen_batch_decode_bo ret_bo = {};
+ struct intel_batch_decode_bo ret_bo = {};
assert(ppgtt);
@@ -2549,7 +2831,7 @@ decode_get_bo(void *v_batch, bool ppgtt, uint64_t address)
return ret_bo;
if (!device->cmd_buffer_being_decoded)
- return (struct gen_batch_decode_bo) { };
+ return (struct intel_batch_decode_bo) { };
struct anv_batch_bo **bo;
@@ -2558,7 +2840,7 @@ decode_get_bo(void *v_batch, bool ppgtt, uint64_t address)
uint64_t bo_address = (*bo)->bo->offset & (~0ull >> 16);
if (address >= bo_address && address < bo_address + (*bo)->bo->size) {
- return (struct gen_batch_decode_bo) {
+ return (struct intel_batch_decode_bo) {
.addr = bo_address,
.size = (*bo)->bo->size,
.map = (*bo)->bo->map,
@@ -2566,18 +2848,18 @@ decode_get_bo(void *v_batch, bool ppgtt, uint64_t address)
}
}
- return (struct gen_batch_decode_bo) { };
+ return (struct intel_batch_decode_bo) { };
}
-struct gen_aux_map_buffer {
- struct gen_buffer base;
+struct intel_aux_map_buffer {
+ struct intel_buffer base;
struct anv_state state;
};
-static struct gen_buffer *
-gen_aux_map_buffer_alloc(void *driver_ctx, uint32_t size)
+static struct intel_buffer *
+intel_aux_map_buffer_alloc(void *driver_ctx, uint32_t size)
{
- struct gen_aux_map_buffer *buf = malloc(sizeof(struct gen_aux_map_buffer));
+ struct intel_aux_map_buffer *buf = malloc(sizeof(struct intel_aux_map_buffer));
if (!buf)
return NULL;
@@ -2596,9 +2878,9 @@ gen_aux_map_buffer_alloc(void *driver_ctx, uint32_t size)
}
static void
-gen_aux_map_buffer_free(void *driver_ctx, struct gen_buffer *buffer)
+intel_aux_map_buffer_free(void *driver_ctx, struct intel_buffer *buffer)
{
- struct gen_aux_map_buffer *buf = (struct gen_aux_map_buffer*)buffer;
+ struct intel_aux_map_buffer *buf = (struct intel_aux_map_buffer*)buffer;
struct anv_device *device = (struct anv_device*)driver_ctx;
struct anv_state_pool *pool = &device->dynamic_state_pool;
anv_state_pool_free(pool, buf->state);
@@ -2606,8 +2888,8 @@ gen_aux_map_buffer_free(void *driver_ctx, struct gen_buffer *buffer)
}
static struct gen_mapped_pinned_buffer_alloc aux_map_allocator = {
- .alloc = gen_aux_map_buffer_alloc,
- .free = gen_aux_map_buffer_free,
+ .alloc = intel_aux_map_buffer_alloc,
+ .free = intel_aux_map_buffer_free,
};
static VkResult
@@ -2639,24 +2921,6 @@ VkResult anv_CreateDevice(
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
- struct anv_device_extension_table enabled_extensions = { };
- for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
- int idx;
- for (idx = 0; idx < ANV_DEVICE_EXTENSION_COUNT; idx++) {
- if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
- anv_device_extensions[idx].extensionName) == 0)
- break;
- }
-
- if (idx >= ANV_DEVICE_EXTENSION_COUNT)
- return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
-
- if (!physical_device->supported_extensions.extensions[idx])
- return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
-
- enabled_extensions.extensions[idx] = true;
- }
-
/* Check enabled features */
bool robust_buffer_access = false;
if (pCreateInfo->pEnabledFeatures) {
@@ -2707,35 +2971,42 @@ VkResult anv_CreateDevice(
queue_priority ? queue_priority->globalPriority :
VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT;
- device = vk_alloc2(&physical_device->instance->alloc, pAllocator,
+ device = vk_alloc2(&physical_device->instance->vk.alloc, pAllocator,
sizeof(*device), 8,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (!device)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ struct vk_device_dispatch_table dispatch_table;
+ vk_device_dispatch_table_from_entrypoints(&dispatch_table,
+ anv_genX(&physical_device->info, device_entrypoints), true);
+ vk_device_dispatch_table_from_entrypoints(&dispatch_table,
+ &anv_device_entrypoints, false);
+
+ result = vk_device_init(&device->vk, &physical_device->vk,
+ &dispatch_table, pCreateInfo, pAllocator);
+ if (result != VK_SUCCESS) {
+ vk_error(result);
+ goto fail_alloc;
+ }
+
if (INTEL_DEBUG & DEBUG_BATCH) {
const unsigned decode_flags =
- GEN_BATCH_DECODE_FULL |
- ((INTEL_DEBUG & DEBUG_COLOR) ? GEN_BATCH_DECODE_IN_COLOR : 0) |
- GEN_BATCH_DECODE_OFFSETS |
- GEN_BATCH_DECODE_FLOATS;
-
- gen_batch_decode_ctx_init(&device->decoder_ctx,
- &physical_device->info,
- stderr, decode_flags, NULL,
- decode_get_bo, NULL, device);
+ INTEL_BATCH_DECODE_FULL |
+ ((INTEL_DEBUG & DEBUG_COLOR) ? INTEL_BATCH_DECODE_IN_COLOR : 0) |
+ INTEL_BATCH_DECODE_OFFSETS |
+ INTEL_BATCH_DECODE_FLOATS;
+
+ intel_batch_decode_ctx_init(&device->decoder_ctx,
+ &physical_device->info,
+ stderr, decode_flags, NULL,
+ decode_get_bo, NULL, device);
}
- device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
device->physical = physical_device;
device->no_hw = physical_device->no_hw;
device->_lost = false;
- if (pAllocator)
- device->alloc = *pAllocator;
- else
- device->alloc = physical_device->instance->alloc;
-
/* XXX(chadv): Can we dup() physicalDevice->fd here? */
device->fd = open(physical_device->path, O_RDWR | O_CLOEXEC);
if (device->fd == -1) {
@@ -2743,20 +3014,76 @@ VkResult anv_CreateDevice(
goto fail_device;
}
- device->context_id = anv_gem_create_context(device);
+ uint32_t num_queues = 0;
+ for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++)
+ num_queues += pCreateInfo->pQueueCreateInfos[i].queueCount;
+
+ if (device->physical->engine_info) {
+ /* The kernel API supports at most 64 engines */
+ assert(num_queues <= 64);
+ uint16_t engine_classes[64];
+ int engine_count = 0;
+ for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
+ const VkDeviceQueueCreateInfo *queueCreateInfo =
+ &pCreateInfo->pQueueCreateInfos[i];
+
+ assert(queueCreateInfo->queueFamilyIndex <
+ physical_device->queue.family_count);
+ struct anv_queue_family *queue_family =
+ &physical_device->queue.families[queueCreateInfo->queueFamilyIndex];
+
+ for (uint32_t j = 0; j < queueCreateInfo->queueCount; j++)
+ engine_classes[engine_count++] = queue_family->engine_class;
+ }
+ device->context_id =
+ anv_gem_create_context_engines(device,
+ physical_device->engine_info,
+ engine_count, engine_classes);
+ } else {
+ assert(num_queues == 1);
+ device->context_id = anv_gem_create_context(device);
+ }
if (device->context_id == -1) {
result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
goto fail_fd;
}
- result = anv_queue_init(device, &device->queue);
- if (result != VK_SUCCESS)
+ device->has_thread_submit = physical_device->has_thread_submit;
+
+ device->queues =
+ vk_zalloc(&device->vk.alloc, num_queues * sizeof(*device->queues), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
+ if (device->queues == NULL) {
+ result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
goto fail_context_id;
+ }
+
+ device->queue_count = 0;
+ for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
+ const VkDeviceQueueCreateInfo *queueCreateInfo =
+ &pCreateInfo->pQueueCreateInfos[i];
+
+ for (uint32_t j = 0; j < queueCreateInfo->queueCount; j++) {
+ /* When using legacy contexts, we use I915_EXEC_RENDER but, with
+ * engine-based contexts, the bottom 6 bits of exec_flags are used
+ * for the engine ID.
+ */
+ uint32_t exec_flags = device->physical->engine_info ?
+ device->queue_count : I915_EXEC_RENDER;
+
+ result = anv_queue_init(device, &device->queues[device->queue_count],
+ exec_flags, queueCreateInfo);
+ if (result != VK_SUCCESS)
+ goto fail_queues;
+
+ device->queue_count++;
+ }
+ }
if (physical_device->use_softpin) {
if (pthread_mutex_init(&device->vma_mutex, NULL) != 0) {
result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
- goto fail_queue;
+ goto fail_queues;
}
/* keep the page with address zero out of the allocator */
@@ -2800,16 +3127,13 @@ VkResult anv_CreateDevice(
* command parser gets in the way and we have to fall back to growing
* the batch.
*/
- device->can_chain_batches = device->info.gen >= 8;
+ device->can_chain_batches = device->info.ver >= 8;
device->robust_buffer_access = robust_buffer_access;
- device->enabled_extensions = enabled_extensions;
-
- anv_device_init_dispatch(device);
if (pthread_mutex_init(&device->mutex, NULL) != 0) {
result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
- goto fail_queue;
+ goto fail_queues;
}
pthread_condattr_t condattr;
@@ -2833,48 +3157,105 @@ VkResult anv_CreateDevice(
if (result != VK_SUCCESS)
goto fail_queue_cond;
- anv_bo_pool_init(&device->batch_bo_pool, device);
+ anv_bo_pool_init(&device->batch_bo_pool, device, "batch");
- result = anv_state_pool_init(&device->dynamic_state_pool, device,
- DYNAMIC_STATE_POOL_MIN_ADDRESS, 16384);
+ /* Because scratch is also relative to General State Base Address, we leave
+ * the base address 0 and start the pool memory at an offset. This way we
+ * get the correct offsets in the anv_states that get allocated from it.
+ */
+ result = anv_state_pool_init(&device->general_state_pool, device,
+ "general pool",
+ 0, GENERAL_STATE_POOL_MIN_ADDRESS, 16384);
if (result != VK_SUCCESS)
goto fail_batch_bo_pool;
+ result = anv_state_pool_init(&device->dynamic_state_pool, device,
+ "dynamic pool",
+ DYNAMIC_STATE_POOL_MIN_ADDRESS, 0, 16384);
+ if (result != VK_SUCCESS)
+ goto fail_general_state_pool;
+
+ if (device->info.ver >= 8) {
+ /* The border color pointer is limited to 24 bits, so we need to make
+ * sure that any such color used at any point in the program doesn't
+ * exceed that limit.
+ * We achieve that by reserving all the custom border colors we support
+ * right off the bat, so they are close to the base address.
+ */
+ anv_state_reserved_pool_init(&device->custom_border_colors,
+ &device->dynamic_state_pool,
+ MAX_CUSTOM_BORDER_COLORS,
+ sizeof(struct gfx8_border_color), 64);
+ }
+
result = anv_state_pool_init(&device->instruction_state_pool, device,
- INSTRUCTION_STATE_POOL_MIN_ADDRESS, 16384);
+ "instruction pool",
+ INSTRUCTION_STATE_POOL_MIN_ADDRESS, 0, 16384);
if (result != VK_SUCCESS)
goto fail_dynamic_state_pool;
result = anv_state_pool_init(&device->surface_state_pool, device,
- SURFACE_STATE_POOL_MIN_ADDRESS, 4096);
+ "surface state pool",
+ SURFACE_STATE_POOL_MIN_ADDRESS, 0, 4096);
if (result != VK_SUCCESS)
goto fail_instruction_state_pool;
if (physical_device->use_softpin) {
+ int64_t bt_pool_offset = (int64_t)BINDING_TABLE_POOL_MIN_ADDRESS -
+ (int64_t)SURFACE_STATE_POOL_MIN_ADDRESS;
+ assert(INT32_MIN < bt_pool_offset && bt_pool_offset < 0);
result = anv_state_pool_init(&device->binding_table_pool, device,
- BINDING_TABLE_POOL_MIN_ADDRESS, 4096);
+ "binding table pool",
+ SURFACE_STATE_POOL_MIN_ADDRESS,
+ bt_pool_offset, 4096);
if (result != VK_SUCCESS)
goto fail_surface_state_pool;
}
- if (device->info.gen >= 12) {
- device->aux_map_ctx = gen_aux_map_init(device, &aux_map_allocator,
- &physical_device->info);
+ if (device->info.has_aux_map) {
+ device->aux_map_ctx = intel_aux_map_init(device, &aux_map_allocator,
+ &physical_device->info);
if (!device->aux_map_ctx)
goto fail_binding_table_pool;
}
- result = anv_device_alloc_bo(device, 4096, 0 /* flags */,
+ result = anv_device_alloc_bo(device, "workaround", 4096,
+ ANV_BO_ALLOC_CAPTURE | ANV_BO_ALLOC_MAPPED /* flags */,
0 /* explicit_address */,
&device->workaround_bo);
if (result != VK_SUCCESS)
goto fail_surface_aux_map_pool;
+ device->workaround_address = (struct anv_address) {
+ .bo = device->workaround_bo,
+ .offset = align_u32(
+ intel_debug_write_identifiers(device->workaround_bo->map,
+ device->workaround_bo->size,
+ "Anv") + 8, 8),
+ };
+
+ device->debug_frame_desc =
+ intel_debug_get_identifier_block(device->workaround_bo->map,
+ device->workaround_bo->size,
+ GEN_DEBUG_BLOCK_TYPE_FRAME);
+
result = anv_device_init_trivial_batch(device);
if (result != VK_SUCCESS)
goto fail_workaround_bo;
- if (device->info.gen >= 10) {
+ /* Allocate a null surface state at surface state offset 0. This makes
+ * NULL descriptor handling trivial because we can just memset structures
+ * to zero and they have a valid descriptor.
+ */
+ device->null_surface_state =
+ anv_state_pool_alloc(&device->surface_state_pool,
+ device->isl_dev.ss.size,
+ device->isl_dev.ss.align);
+ isl_null_fill_state(&device->isl_dev, device->null_surface_state.map,
+ isl_extent3d(1, 1, 1) /* This shouldn't matter */);
+ assert(device->null_surface_state.offset == 0);
+
+ if (device->info.ver >= 10) {
result = anv_device_init_hiz_clear_value_bo(device);
if (result != VK_SUCCESS)
goto fail_trivial_batch_bo;
@@ -2882,37 +3263,12 @@ VkResult anv_CreateDevice(
anv_scratch_pool_init(device, &device->scratch_pool);
- switch (device->info.gen) {
- case 7:
- if (!device->info.is_haswell)
- result = gen7_init_device_state(device);
- else
- result = gen75_init_device_state(device);
- break;
- case 8:
- result = gen8_init_device_state(device);
- break;
- case 9:
- result = gen9_init_device_state(device);
- break;
- case 10:
- result = gen10_init_device_state(device);
- break;
- case 11:
- result = gen11_init_device_state(device);
- break;
- case 12:
- result = gen12_init_device_state(device);
- break;
- default:
- /* Shouldn't get here as we don't create physical devices for any other
- * gens. */
- unreachable("unhandled gen");
- }
+ result = anv_genX(&device->info, init_device_state)(device);
if (result != VK_SUCCESS)
- goto fail_workaround_bo;
+ goto fail_clear_value_bo;
- anv_pipeline_cache_init(&device->default_pipeline_cache, device, true);
+ anv_pipeline_cache_init(&device->default_pipeline_cache, device,
+ true /* cache_enabled */, false /* external_sync */);
anv_device_init_blorp(device);
@@ -2924,16 +3280,17 @@ VkResult anv_CreateDevice(
return VK_SUCCESS;
- fail_workaround_bo:
- anv_scratch_pool_finish(device, &device->scratch_pool);
- if (device->info.gen >= 10)
+ fail_clear_value_bo:
+ if (device->info.ver >= 10)
anv_device_release_bo(device, device->hiz_clear_bo);
- anv_device_release_bo(device, device->workaround_bo);
+ anv_scratch_pool_finish(device, &device->scratch_pool);
fail_trivial_batch_bo:
anv_device_release_bo(device, device->trivial_batch_bo);
+ fail_workaround_bo:
+ anv_device_release_bo(device, device->workaround_bo);
fail_surface_aux_map_pool:
- if (device->info.gen >= 12) {
- gen_aux_map_finish(device->aux_map_ctx);
+ if (device->info.has_aux_map) {
+ intel_aux_map_finish(device->aux_map_ctx);
device->aux_map_ctx = NULL;
}
fail_binding_table_pool:
@@ -2944,7 +3301,11 @@ VkResult anv_CreateDevice(
fail_instruction_state_pool:
anv_state_pool_finish(&device->instruction_state_pool);
fail_dynamic_state_pool:
+ if (device->info.ver >= 8)
+ anv_state_reserved_pool_finish(&device->custom_border_colors);
anv_state_pool_finish(&device->dynamic_state_pool);
+ fail_general_state_pool:
+ anv_state_pool_finish(&device->general_state_pool);
fail_batch_bo_pool:
anv_bo_pool_finish(&device->batch_bo_pool);
anv_bo_cache_finish(&device->bo_cache);
@@ -2958,14 +3319,18 @@ VkResult anv_CreateDevice(
util_vma_heap_finish(&device->vma_cva);
util_vma_heap_finish(&device->vma_lo);
}
- fail_queue:
- anv_queue_finish(&device->queue);
+ fail_queues:
+ for (uint32_t i = 0; i < device->queue_count; i++)
+ anv_queue_finish(&device->queues[i]);
+ vk_free(&device->vk.alloc, device->queues);
fail_context_id:
anv_gem_destroy_context(device, device->context_id);
fail_fd:
close(device->fd);
fail_device:
- vk_free(&device->alloc, device);
+ vk_device_finish(&device->vk);
+ fail_alloc:
+ vk_free(&device->vk.alloc, device);
return result;
}
@@ -2983,12 +3348,12 @@ void anv_DestroyDevice(
anv_pipeline_cache_finish(&device->default_pipeline_cache);
- anv_queue_finish(&device->queue);
-
#ifdef HAVE_VALGRIND
/* We only need to free these to prevent valgrind errors. The backing
* BO will go away in a couple of lines so we don't actually leak.
*/
+ if (device->info.ver >= 8)
+ anv_state_reserved_pool_finish(&device->custom_border_colors);
anv_state_pool_free(&device->dynamic_state_pool, device->border_colors);
anv_state_pool_free(&device->dynamic_state_pool, device->slice_hash);
#endif
@@ -2997,11 +3362,11 @@ void anv_DestroyDevice(
anv_device_release_bo(device, device->workaround_bo);
anv_device_release_bo(device, device->trivial_batch_bo);
- if (device->info.gen >= 10)
+ if (device->info.ver >= 10)
anv_device_release_bo(device, device->hiz_clear_bo);
- if (device->info.gen >= 12) {
- gen_aux_map_finish(device->aux_map_ctx);
+ if (device->info.has_aux_map) {
+ intel_aux_map_finish(device->aux_map_ctx);
device->aux_map_ctx = NULL;
}
@@ -3010,6 +3375,7 @@ void anv_DestroyDevice(
anv_state_pool_finish(&device->surface_state_pool);
anv_state_pool_finish(&device->instruction_state_pool);
anv_state_pool_finish(&device->dynamic_state_pool);
+ anv_state_pool_finish(&device->general_state_pool);
anv_bo_pool_finish(&device->batch_bo_pool);
@@ -3024,14 +3390,19 @@ void anv_DestroyDevice(
pthread_cond_destroy(&device->queue_submit);
pthread_mutex_destroy(&device->mutex);
+ for (uint32_t i = 0; i < device->queue_count; i++)
+ anv_queue_finish(&device->queues[i]);
+ vk_free(&device->vk.alloc, device->queues);
+
anv_gem_destroy_context(device, device->context_id);
if (INTEL_DEBUG & DEBUG_BATCH)
- gen_batch_decode_ctx_finish(&device->decoder_ctx);
+ intel_batch_decode_ctx_finish(&device->decoder_ctx);
close(device->fd);
- vk_free(&device->alloc, device);
+ vk_device_finish(&device->vk);
+ vk_free(&device->vk.alloc, device);
}
VkResult anv_EnumerateInstanceLayerProperties(
@@ -3047,52 +3418,57 @@ VkResult anv_EnumerateInstanceLayerProperties(
return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
}
-VkResult anv_EnumerateDeviceLayerProperties(
- VkPhysicalDevice physicalDevice,
- uint32_t* pPropertyCount,
- VkLayerProperties* pProperties)
-{
- if (pProperties == NULL) {
- *pPropertyCount = 0;
- return VK_SUCCESS;
- }
-
- /* None supported at this time */
- return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
-}
-
-void anv_GetDeviceQueue(
- VkDevice _device,
- uint32_t queueNodeIndex,
- uint32_t queueIndex,
- VkQueue* pQueue)
-{
- const VkDeviceQueueInfo2 info = {
- .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
- .pNext = NULL,
- .flags = 0,
- .queueFamilyIndex = queueNodeIndex,
- .queueIndex = queueIndex,
- };
-
- anv_GetDeviceQueue2(_device, &info, pQueue);
-}
-
void anv_GetDeviceQueue2(
VkDevice _device,
const VkDeviceQueueInfo2* pQueueInfo,
VkQueue* pQueue)
{
ANV_FROM_HANDLE(anv_device, device, _device);
+ struct anv_physical_device *pdevice = device->physical;
- assert(pQueueInfo->queueIndex == 0);
+ assert(pQueueInfo->queueFamilyIndex < pdevice->queue.family_count);
+ struct anv_queue_family *queue_family =
+ &pdevice->queue.families[pQueueInfo->queueFamilyIndex];
+
+ int idx_in_family = 0;
+ struct anv_queue *queue = NULL;
+ for (uint32_t i = 0; i < device->queue_count; i++) {
+ if (device->queues[i].family != queue_family)
+ continue;
- if (pQueueInfo->flags == device->queue.flags)
- *pQueue = anv_queue_to_handle(&device->queue);
+ if (idx_in_family == pQueueInfo->queueIndex) {
+ queue = &device->queues[i];
+ break;
+ }
+
+ idx_in_family++;
+ }
+ assert(queue != NULL);
+
+ if (queue && queue->flags == pQueueInfo->flags)
+ *pQueue = anv_queue_to_handle(queue);
else
*pQueue = NULL;
}
+void
+_anv_device_report_lost(struct anv_device *device)
+{
+ assert(p_atomic_read(&device->_lost) > 0);
+
+ device->lost_reported = true;
+
+ for (uint32_t i = 0; i < device->queue_count; i++) {
+ struct anv_queue *queue = &device->queues[i];
+ if (queue->lost) {
+ __vk_errorf(device->physical->instance, &device->vk.base,
+ VK_ERROR_DEVICE_LOST,
+ queue->error_file, queue->error_line,
+ "%s", queue->error_msg);
+ }
+ }
+}
+
VkResult
_anv_device_set_lost(struct anv_device *device,
const char *file, int line,
@@ -3101,11 +3477,14 @@ _anv_device_set_lost(struct anv_device *device,
VkResult err;
va_list ap;
+ if (p_atomic_read(&device->_lost) > 0)
+ return VK_ERROR_DEVICE_LOST;
+
p_atomic_inc(&device->_lost);
+ device->lost_reported = true;
va_start(ap, msg);
- err = __vk_errorv(device->physical->instance, device,
- VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
+ err = __vk_errorv(device->physical->instance, &device->vk.base,
VK_ERROR_DEVICE_LOST, file, line, msg, ap);
va_end(ap);
@@ -3117,24 +3496,29 @@ _anv_device_set_lost(struct anv_device *device,
VkResult
_anv_queue_set_lost(struct anv_queue *queue,
- const char *file, int line,
- const char *msg, ...)
+ const char *file, int line,
+ const char *msg, ...)
{
- VkResult err;
va_list ap;
- p_atomic_inc(&queue->device->_lost);
+ if (queue->lost)
+ return VK_ERROR_DEVICE_LOST;
+ queue->lost = true;
+
+ queue->error_file = file;
+ queue->error_line = line;
va_start(ap, msg);
- err = __vk_errorv(queue->device->physical->instance, queue->device,
- VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- VK_ERROR_DEVICE_LOST, file, line, msg, ap);
+ vsnprintf(queue->error_msg, sizeof(queue->error_msg),
+ msg, ap);
va_end(ap);
+ p_atomic_inc(&queue->device->_lost);
+
if (env_var_as_boolean("ANV_ABORT_ON_DEVICE_LOSS", false))
abort();
- return err;
+ return VK_ERROR_DEVICE_LOST;
}
VkResult
@@ -3148,7 +3532,8 @@ anv_device_query_status(struct anv_device *device)
return VK_ERROR_DEVICE_LOST;
uint32_t active, pending;
- int ret = anv_gem_gpu_get_reset_stats(device, &active, &pending);
+ int ret = anv_gem_context_get_reset_stats(device->fd, device->context_id,
+ &active, &pending);
if (ret == -1) {
/* We don't know the real error. */
return anv_device_set_lost(device, "get_reset_stats failed: %m");
@@ -3215,7 +3600,13 @@ VkResult anv_DeviceWaitIdle(
if (anv_device_is_lost(device))
return VK_ERROR_DEVICE_LOST;
- return anv_queue_submit_simple_batch(&device->queue, NULL);
+ for (uint32_t i = 0; i < device->queue_count; i++) {
+ VkResult res = anv_queue_submit_simple_batch(&device->queues[i], NULL);
+ if (res != VK_SUCCESS)
+ return res;
+ }
+
+ return VK_SUCCESS;
}
uint64_t
@@ -3252,15 +3643,15 @@ anv_vma_alloc(struct anv_device *device,
done:
pthread_mutex_unlock(&device->vma_mutex);
- assert(addr == gen_48b_address(addr));
- return gen_canonical_address(addr);
+ assert(addr == intel_48b_address(addr));
+ return intel_canonical_address(addr);
}
void
anv_vma_free(struct anv_device *device,
uint64_t address, uint64_t size)
{
- const uint64_t addr_48b = gen_48b_address(address);
+ const uint64_t addr_48b = intel_48b_address(address);
pthread_mutex_lock(&device->vma_mutex);
@@ -3311,12 +3702,11 @@ VkResult anv_AllocateMemory(
if (mem_heap_used + aligned_alloc_size > mem_heap->size)
return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
- mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ mem = vk_object_alloc(&device->vk, pAllocator, sizeof(*mem),
+ VK_OBJECT_TYPE_DEVICE_MEMORY);
if (mem == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- assert(pAllocateInfo->memoryTypeIndex < pdevice->memory.type_count);
mem->type = mem_type;
mem->map = NULL;
mem->map_size = 0;
@@ -3448,7 +3838,8 @@ VkResult anv_AllocateMemory(
* this sort of attack but only if it can trust the buffer size.
*/
if (mem->bo->size < aligned_alloc_size) {
- result = vk_errorf(device, device, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ result = vk_errorf(device, &device->vk.base,
+ VK_ERROR_INVALID_EXTERNAL_HANDLE,
"aligned allocationSize too large for "
"VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT: "
"%"PRIu64"B > %"PRIu64"B",
@@ -3495,7 +3886,7 @@ VkResult anv_AllocateMemory(
/* Regular allocate (not importing memory). */
- result = anv_device_alloc_bo(device, pAllocateInfo->allocationSize,
+ result = anv_device_alloc_bo(device, "user", pAllocateInfo->allocationSize,
alloc_flags, client_address, &mem->bo);
if (result != VK_SUCCESS)
goto fail;
@@ -3508,13 +3899,14 @@ VkResult anv_AllocateMemory(
*/
if (image->needs_set_tiling) {
const uint32_t i915_tiling =
- isl_tiling_to_i915_tiling(image->planes[0].surface.isl.tiling);
+ isl_tiling_to_i915_tiling(image->planes[0].primary_surface.isl.tiling);
int ret = anv_gem_set_tiling(device, mem->bo->gem_handle,
- image->planes[0].surface.isl.row_pitch_B,
+ image->planes[0].primary_surface.isl.row_pitch_B,
i915_tiling);
if (ret) {
anv_device_release_bo(device, mem->bo);
- result = vk_errorf(device, device, VK_ERROR_OUT_OF_DEVICE_MEMORY,
+ result = vk_errorf(device, &device->vk.base,
+ VK_ERROR_OUT_OF_DEVICE_MEMORY,
"failed to set BO tiling: %m");
goto fail;
}
@@ -3526,7 +3918,8 @@ VkResult anv_AllocateMemory(
if (mem_heap_used > mem_heap->size) {
p_atomic_add(&mem_heap->used, -mem->bo->size);
anv_device_release_bo(device, mem->bo);
- result = vk_errorf(device, device, VK_ERROR_OUT_OF_DEVICE_MEMORY,
+ result = vk_errorf(device, &device->vk.base,
+ VK_ERROR_OUT_OF_DEVICE_MEMORY,
"Out of heap memory");
goto fail;
}
@@ -3540,7 +3933,7 @@ VkResult anv_AllocateMemory(
return VK_SUCCESS;
fail:
- vk_free2(&device->alloc, pAllocator, mem);
+ vk_object_free(&device->vk, pAllocator, mem);
return result;
}
@@ -3640,7 +4033,7 @@ void anv_FreeMemory(
AHardwareBuffer_release(mem->ahw);
#endif
- vk_free2(&device->alloc, pAllocator, mem);
+ vk_object_free(&device->vk, pAllocator, mem);
}
VkResult anv_MapMemory(
@@ -3690,7 +4083,11 @@ VkResult anv_MapMemory(
gem_flags |= I915_MMAP_WC;
/* GEM will fail to map if the offset isn't 4k-aligned. Round down. */
- uint64_t map_offset = offset & ~4095ull;
+ uint64_t map_offset;
+ if (!device->physical->has_mmap_offset)
+ map_offset = offset & ~4095ull;
+ else
+ map_offset = 0;
assert(offset >= map_offset);
uint64_t map_size = (offset + size) - map_offset;
@@ -3714,12 +4111,13 @@ void anv_UnmapMemory(
VkDevice _device,
VkDeviceMemory _memory)
{
+ ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
if (mem == NULL || mem->host_ptr)
return;
- anv_gem_munmap(mem->map, mem->map_size);
+ anv_gem_munmap(device, mem->map, mem->map_size);
mem->map = NULL;
mem->map_size = 0;
@@ -3776,13 +4174,13 @@ VkResult anv_InvalidateMappedMemoryRanges(
return VK_SUCCESS;
}
-void anv_GetBufferMemoryRequirements(
+void anv_GetBufferMemoryRequirements2(
VkDevice _device,
- VkBuffer _buffer,
- VkMemoryRequirements* pMemoryRequirements)
+ const VkBufferMemoryRequirementsInfo2* pInfo,
+ VkMemoryRequirements2* pMemoryRequirements)
{
- ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
ANV_FROM_HANDLE(anv_device, device, _device);
+ ANV_FROM_HANDLE(anv_buffer, buffer, pInfo->buffer);
/* The Vulkan spec (git aaed022) says:
*
@@ -3796,12 +4194,11 @@ void anv_GetBufferMemoryRequirements(
/* Base alignment requirement of a cache line */
uint32_t alignment = 16;
- /* We need an alignment of 32 for pushing UBOs */
if (buffer->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT)
- alignment = MAX2(alignment, 32);
+ alignment = MAX2(alignment, ANV_UBO_ALIGNMENT);
- pMemoryRequirements->size = buffer->size;
- pMemoryRequirements->alignment = alignment;
+ pMemoryRequirements->memoryRequirements.size = buffer->size;
+ pMemoryRequirements->memoryRequirements.alignment = alignment;
/* Storage and Uniform buffers should have their size aligned to
* 32-bits to avoid boundary checks when last DWord is not complete.
@@ -3811,18 +4208,9 @@ void anv_GetBufferMemoryRequirements(
if (device->robust_buffer_access &&
(buffer->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT ||
buffer->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT))
- pMemoryRequirements->size = align_u64(buffer->size, 4);
+ pMemoryRequirements->memoryRequirements.size = align_u64(buffer->size, 4);
- pMemoryRequirements->memoryTypeBits = memory_types;
-}
-
-void anv_GetBufferMemoryRequirements2(
- VkDevice _device,
- const VkBufferMemoryRequirementsInfo2* pInfo,
- VkMemoryRequirements2* pMemoryRequirements)
-{
- anv_GetBufferMemoryRequirements(_device, pInfo->buffer,
- &pMemoryRequirements->memoryRequirements);
+ pMemoryRequirements->memoryRequirements.memoryTypeBits = memory_types;
vk_foreach_struct(ext, pMemoryRequirements->pNext) {
switch (ext->sType) {
@@ -3840,132 +4228,6 @@ void anv_GetBufferMemoryRequirements2(
}
}
-void anv_GetImageMemoryRequirements(
- VkDevice _device,
- VkImage _image,
- VkMemoryRequirements* pMemoryRequirements)
-{
- ANV_FROM_HANDLE(anv_image, image, _image);
- ANV_FROM_HANDLE(anv_device, device, _device);
-
- /* The Vulkan spec (git aaed022) says:
- *
- * memoryTypeBits is a bitfield and contains one bit set for every
- * supported memory type for the resource. The bit `1<<i` is set if and
- * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
- * structure for the physical device is supported.
- *
- * All types are currently supported for images.
- */
- uint32_t memory_types = (1ull << device->physical->memory.type_count) - 1;
-
- /* We must have image allocated or imported at this point. According to the
- * specification, external images must have been bound to memory before
- * calling GetImageMemoryRequirements.
- */
- assert(image->size > 0);
-
- pMemoryRequirements->size = image->size;
- pMemoryRequirements->alignment = image->alignment;
- pMemoryRequirements->memoryTypeBits = memory_types;
-}
-
-void anv_GetImageMemoryRequirements2(
- VkDevice _device,
- const VkImageMemoryRequirementsInfo2* pInfo,
- VkMemoryRequirements2* pMemoryRequirements)
-{
- ANV_FROM_HANDLE(anv_device, device, _device);
- ANV_FROM_HANDLE(anv_image, image, pInfo->image);
-
- anv_GetImageMemoryRequirements(_device, pInfo->image,
- &pMemoryRequirements->memoryRequirements);
-
- vk_foreach_struct_const(ext, pInfo->pNext) {
- switch (ext->sType) {
- case VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO: {
- const VkImagePlaneMemoryRequirementsInfo *plane_reqs =
- (const VkImagePlaneMemoryRequirementsInfo *) ext;
- uint32_t plane = anv_image_aspect_to_plane(image->aspects,
- plane_reqs->planeAspect);
-
- assert(image->planes[plane].offset == 0);
-
- /* The Vulkan spec (git aaed022) says:
- *
- * memoryTypeBits is a bitfield and contains one bit set for every
- * supported memory type for the resource. The bit `1<<i` is set
- * if and only if the memory type `i` in the
- * VkPhysicalDeviceMemoryProperties structure for the physical
- * device is supported.
- *
- * All types are currently supported for images.
- */
- pMemoryRequirements->memoryRequirements.memoryTypeBits =
- (1ull << device->physical->memory.type_count) - 1;
-
- /* We must have image allocated or imported at this point. According to the
- * specification, external images must have been bound to memory before
- * calling GetImageMemoryRequirements.
- */
- assert(image->planes[plane].size > 0);
-
- pMemoryRequirements->memoryRequirements.size = image->planes[plane].size;
- pMemoryRequirements->memoryRequirements.alignment =
- image->planes[plane].alignment;
- break;
- }
-
- default:
- anv_debug_ignored_stype(ext->sType);
- break;
- }
- }
-
- vk_foreach_struct(ext, pMemoryRequirements->pNext) {
- switch (ext->sType) {
- case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS: {
- VkMemoryDedicatedRequirements *requirements = (void *)ext;
- if (image->needs_set_tiling || image->external_format) {
- /* If we need to set the tiling for external consumers, we need a
- * dedicated allocation.
- *
- * See also anv_AllocateMemory.
- */
- requirements->prefersDedicatedAllocation = true;
- requirements->requiresDedicatedAllocation = true;
- } else {
- requirements->prefersDedicatedAllocation = false;
- requirements->requiresDedicatedAllocation = false;
- }
- break;
- }
-
- default:
- anv_debug_ignored_stype(ext->sType);
- break;
- }
- }
-}
-
-void anv_GetImageSparseMemoryRequirements(
- VkDevice device,
- VkImage image,
- uint32_t* pSparseMemoryRequirementCount,
- VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
-{
- *pSparseMemoryRequirementCount = 0;
-}
-
-void anv_GetImageSparseMemoryRequirements2(
- VkDevice device,
- const VkImageSparseMemoryRequirementsInfo2* pInfo,
- uint32_t* pSparseMemoryRequirementCount,
- VkSparseImageMemoryRequirements2* pSparseMemoryRequirements)
-{
- *pSparseMemoryRequirementCount = 0;
-}
-
void anv_GetDeviceMemoryCommitment(
VkDevice device,
VkDeviceMemory memory,
@@ -3992,23 +4254,6 @@ anv_bind_buffer_memory(const VkBindBufferMemoryInfo *pBindInfo)
}
}
-VkResult anv_BindBufferMemory(
- VkDevice device,
- VkBuffer buffer,
- VkDeviceMemory memory,
- VkDeviceSize memoryOffset)
-{
- anv_bind_buffer_memory(
- &(VkBindBufferMemoryInfo) {
- .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
- .buffer = buffer,
- .memory = memory,
- .memoryOffset = memoryOffset,
- });
-
- return VK_SUCCESS;
-}
-
VkResult anv_BindBufferMemory2(
VkDevice device,
uint32_t bindInfoCount,
@@ -4042,22 +4287,18 @@ VkResult anv_CreateEvent(
VkEvent* pEvent)
{
ANV_FROM_HANDLE(anv_device, device, _device);
- struct anv_state state;
struct anv_event *event;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_EVENT_CREATE_INFO);
- state = anv_state_pool_alloc(&device->dynamic_state_pool,
- sizeof(*event), 8);
- event = state.map;
- event->state = state;
- event->semaphore = VK_EVENT_RESET;
+ event = vk_object_alloc(&device->vk, pAllocator, sizeof(*event),
+ VK_OBJECT_TYPE_EVENT);
+ if (event == NULL)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- if (!device->info.has_llc) {
- /* Make sure the writes we're flushing have landed. */
- __builtin_ia32_mfence();
- __builtin_ia32_clflush(event);
- }
+ event->state = anv_state_pool_alloc(&device->dynamic_state_pool,
+ sizeof(uint64_t), 8);
+ *(uint64_t *)event->state.map = VK_EVENT_RESET;
*pEvent = anv_event_to_handle(event);
@@ -4076,6 +4317,8 @@ void anv_DestroyEvent(
return;
anv_state_pool_free(&device->dynamic_state_pool, event->state);
+
+ vk_object_free(&device->vk, pAllocator, event);
}
VkResult anv_GetEventStatus(
@@ -4088,30 +4331,16 @@ VkResult anv_GetEventStatus(
if (anv_device_is_lost(device))
return VK_ERROR_DEVICE_LOST;
- if (!device->info.has_llc) {
- /* Invalidate read cache before reading event written by GPU. */
- __builtin_ia32_clflush(event);
- __builtin_ia32_mfence();
-
- }
-
- return event->semaphore;
+ return *(uint64_t *)event->state.map;
}
VkResult anv_SetEvent(
VkDevice _device,
VkEvent _event)
{
- ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_event, event, _event);
- event->semaphore = VK_EVENT_SET;
-
- if (!device->info.has_llc) {
- /* Make sure the writes we're flushing have landed. */
- __builtin_ia32_mfence();
- __builtin_ia32_clflush(event);
- }
+ *(uint64_t *)event->state.map = VK_EVENT_SET;
return VK_SUCCESS;
}
@@ -4120,16 +4349,9 @@ VkResult anv_ResetEvent(
VkDevice _device,
VkEvent _event)
{
- ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_event, event, _event);
- event->semaphore = VK_EVENT_RESET;
-
- if (!device->info.has_llc) {
- /* Make sure the writes we're flushing have landed. */
- __builtin_ia32_mfence();
- __builtin_ia32_clflush(event);
- }
+ *(uint64_t *)event->state.map = VK_EVENT_RESET;
return VK_SUCCESS;
}
@@ -4155,11 +4377,12 @@ VkResult anv_CreateBuffer(
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
- buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ buffer = vk_object_alloc(&device->vk, pAllocator, sizeof(*buffer),
+ VK_OBJECT_TYPE_BUFFER);
if (buffer == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ buffer->create_flags = pCreateInfo->flags;
buffer->size = pCreateInfo->size;
buffer->usage = pCreateInfo->usage;
buffer->address = ANV_NULL_ADDRESS;
@@ -4180,7 +4403,7 @@ void anv_DestroyBuffer(
if (!buffer)
return;
- vk_free2(&device->alloc, pAllocator, buffer);
+ vk_object_free(&device->vk, pAllocator, buffer);
}
VkDeviceAddress anv_GetBufferDeviceAddress(
@@ -4211,18 +4434,20 @@ uint64_t anv_GetDeviceMemoryOpaqueCaptureAddress(
assert(memory->bo->flags & EXEC_OBJECT_PINNED);
assert(memory->bo->has_client_visible_address);
- return gen_48b_address(memory->bo->offset);
+ return intel_48b_address(memory->bo->offset);
}
void
anv_fill_buffer_surface_state(struct anv_device *device, struct anv_state state,
enum isl_format format,
+ isl_surf_usage_flags_t usage,
struct anv_address address,
uint32_t range, uint32_t stride)
{
isl_buffer_fill_state(&device->isl_dev, state.map,
.address = anv_address_physical(address),
- .mocs = device->isl_dev.mocs.internal,
+ .mocs = isl_mocs(&device->isl_dev, usage,
+ address.bo && address.bo->is_external),
.size_B = range,
.format = format,
.swizzle = ISL_SWIZZLE_IDENTITY,
@@ -4245,7 +4470,12 @@ void anv_DestroySampler(
sampler->bindless_state);
}
- vk_free2(&device->alloc, pAllocator, sampler);
+ if (sampler->custom_border_color.map) {
+ anv_state_reserved_pool_free(&device->custom_border_colors,
+ sampler->custom_border_color);
+ }
+
+ vk_object_free(&device->vk, pAllocator, sampler);
}
VkResult anv_CreateFramebuffer(
@@ -4266,31 +4496,26 @@ VkResult anv_CreateFramebuffer(
* If flags includes VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR,
* parameter pAttachments is ignored.
*/
- if (!(pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR)) {
+ if (!(pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR))
size += sizeof(struct anv_image_view *) * pCreateInfo->attachmentCount;
- framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
- if (framebuffer == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ framebuffer = vk_object_alloc(&device->vk, pAllocator, size,
+ VK_OBJECT_TYPE_FRAMEBUFFER);
+ if (framebuffer == NULL)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ framebuffer->width = pCreateInfo->width;
+ framebuffer->height = pCreateInfo->height;
+ framebuffer->layers = pCreateInfo->layers;
+
+ if (!(pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR)) {
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
ANV_FROM_HANDLE(anv_image_view, iview, pCreateInfo->pAttachments[i]);
framebuffer->attachments[i] = iview;
}
framebuffer->attachment_count = pCreateInfo->attachmentCount;
- } else {
- framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
- if (framebuffer == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
-
- framebuffer->attachment_count = 0;
}
- framebuffer->width = pCreateInfo->width;
- framebuffer->height = pCreateInfo->height;
- framebuffer->layers = pCreateInfo->layers;
-
*pFramebuffer = anv_framebuffer_to_handle(framebuffer);
return VK_SUCCESS;
@@ -4307,7 +4532,7 @@ void anv_DestroyFramebuffer(
if (!fb)
return;
- vk_free2(&device->alloc, pAllocator, fb);
+ vk_object_free(&device->vk, pAllocator, fb);
}
static const VkTimeDomainEXT anv_time_domains[] = {
@@ -4352,8 +4577,6 @@ anv_clock_gettime(clockid_t clock_id)
return (uint64_t) current.tv_sec * 1000000000ULL + current.tv_nsec;
}
-#define TIMESTAMP 0x2358
-
VkResult anv_GetCalibratedTimestampsEXT(
VkDevice _device,
uint32_t timestampCount,
@@ -4377,7 +4600,7 @@ VkResult anv_GetCalibratedTimestampsEXT(
for (d = 0; d < timestampCount; d++) {
switch (pTimestampInfos[d].timeDomain) {
case VK_TIME_DOMAIN_DEVICE_EXT:
- ret = anv_gem_reg_read(device, TIMESTAMP | 1,
+ ret = anv_gem_reg_read(device->fd, TIMESTAMP | I915_REG_READ_8B_WA,
&pTimestamps[d]);
if (ret != 0) {
@@ -4451,6 +4674,30 @@ VkResult anv_GetCalibratedTimestampsEXT(
return VK_SUCCESS;
}
+void anv_GetPhysicalDeviceMultisamplePropertiesEXT(
+ VkPhysicalDevice physicalDevice,
+ VkSampleCountFlagBits samples,
+ VkMultisamplePropertiesEXT* pMultisampleProperties)
+{
+ ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
+
+ assert(pMultisampleProperties->sType ==
+ VK_STRUCTURE_TYPE_MULTISAMPLE_PROPERTIES_EXT);
+
+ VkExtent2D grid_size;
+ if (samples & isl_device_get_sample_counts(&physical_device->isl_dev)) {
+ grid_size.width = 1;
+ grid_size.height = 1;
+ } else {
+ grid_size.width = 0;
+ grid_size.height = 0;
+ }
+ pMultisampleProperties->maxSampleLocationGridSize = grid_size;
+
+ vk_foreach_struct(ext, pMultisampleProperties->pNext)
+ anv_debug_ignored_stype(ext->sType);
+}
+
/* vk_icd.h does not declare this function, so we declare it here to
* suppress Wmissing-prototypes.
*/