summaryrefslogtreecommitdiff
path: root/lib/mesa/src/gallium/winsys/svga
diff options
context:
space:
mode:
authorJonathan Gray <jsg@cvs.openbsd.org>2015-11-22 02:46:45 +0000
committerJonathan Gray <jsg@cvs.openbsd.org>2015-11-22 02:46:45 +0000
commit0784c49c0f8fcc8b3abd4c9286d9fd8bc089dd7d (patch)
treea6394e3e264a0f80b57f4ce0f5d9526aa543d4b0 /lib/mesa/src/gallium/winsys/svga
parentd91d0007eecf589ea5699e34aa4d748fce2c57b2 (diff)
import Mesa 11.0.6
Diffstat (limited to 'lib/mesa/src/gallium/winsys/svga')
-rw-r--r--lib/mesa/src/gallium/winsys/svga/drm/Makefile.am39
-rw-r--r--lib/mesa/src/gallium/winsys/svga/drm/Makefile.in890
-rw-r--r--lib/mesa/src/gallium/winsys/svga/drm/Makefile.sources20
-rw-r--r--lib/mesa/src/gallium/winsys/svga/drm/SConscript26
-rw-r--r--lib/mesa/src/gallium/winsys/svga/drm/pb_buffer_simple_fenced.c844
-rw-r--r--lib/mesa/src/gallium/winsys/svga/drm/svga_drm_public.h41
-rw-r--r--lib/mesa/src/gallium/winsys/svga/drm/vmw_buffer.c371
-rw-r--r--lib/mesa/src/gallium/winsys/svga/drm/vmw_buffer.h97
-rw-r--r--lib/mesa/src/gallium/winsys/svga/drm/vmw_context.c687
-rw-r--r--lib/mesa/src/gallium/winsys/svga/drm/vmw_context.h73
-rw-r--r--lib/mesa/src/gallium/winsys/svga/drm/vmw_fence.c434
-rw-r--r--lib/mesa/src/gallium/winsys/svga/drm/vmw_fence.h61
-rw-r--r--lib/mesa/src/gallium/winsys/svga/drm/vmw_screen.c132
-rw-r--r--lib/mesa/src/gallium/winsys/svga/drm/vmw_screen.h230
-rw-r--r--lib/mesa/src/gallium/winsys/svga/drm/vmw_screen_dri.c381
-rw-r--r--lib/mesa/src/gallium/winsys/svga/drm/vmw_screen_ioctl.c995
-rw-r--r--lib/mesa/src/gallium/winsys/svga/drm/vmw_screen_pools.c242
-rw-r--r--lib/mesa/src/gallium/winsys/svga/drm/vmw_screen_svga.c411
-rw-r--r--lib/mesa/src/gallium/winsys/svga/drm/vmw_shader.c64
-rw-r--r--lib/mesa/src/gallium/winsys/svga/drm/vmw_shader.h67
-rw-r--r--lib/mesa/src/gallium/winsys/svga/drm/vmw_surface.c207
-rw-r--r--lib/mesa/src/gallium/winsys/svga/drm/vmw_surface.h98
-rw-r--r--lib/mesa/src/gallium/winsys/svga/drm/vmwgfx_drm.h1063
23 files changed, 7473 insertions, 0 deletions
diff --git a/lib/mesa/src/gallium/winsys/svga/drm/Makefile.am b/lib/mesa/src/gallium/winsys/svga/drm/Makefile.am
new file mode 100644
index 000000000..dfa973676
--- /dev/null
+++ b/lib/mesa/src/gallium/winsys/svga/drm/Makefile.am
@@ -0,0 +1,39 @@
+# Copyright © 2012 Intel Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+
+include Makefile.sources
+include $(top_srcdir)/src/gallium/Automake.inc
+
+AM_CFLAGS = \
+ -I$(top_srcdir)/src/gallium/drivers/svga \
+ -I$(top_srcdir)/src/gallium/drivers/svga/include \
+ $(GALLIUM_WINSYS_CFLAGS) \
+ $(LIBDRM_CFLAGS)
+
+#On some systems -std= must be added to CFLAGS to be the last -std=
+CFLAGS += -std=gnu99
+
+noinst_LTLIBRARIES = libsvgadrm.la
+
+libsvgadrm_la_SOURCES = $(C_SOURCES)
+
+EXTRA_DIST = SConscript
diff --git a/lib/mesa/src/gallium/winsys/svga/drm/Makefile.in b/lib/mesa/src/gallium/winsys/svga/drm/Makefile.in
new file mode 100644
index 000000000..0d447e765
--- /dev/null
+++ b/lib/mesa/src/gallium/winsys/svga/drm/Makefile.in
@@ -0,0 +1,890 @@
+# Makefile.in generated by automake 1.15 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2014 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# Copyright © 2012 Intel Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+@HAVE_DRISW_TRUE@am__append_1 = \
+@HAVE_DRISW_TRUE@ $(top_builddir)/src/gallium/winsys/sw/dri/libswdri.la
+
+subdir = src/gallium/winsys/svga/drm
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/m4/ax_check_gnu_make.m4 \
+ $(top_srcdir)/m4/ax_check_python_mako_module.m4 \
+ $(top_srcdir)/m4/ax_gcc_builtin.m4 \
+ $(top_srcdir)/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/m4/ax_prog_bison.m4 \
+ $(top_srcdir)/m4/ax_prog_flex.m4 \
+ $(top_srcdir)/m4/ax_pthread.m4 $(top_srcdir)/m4/libtool.m4 \
+ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \
+ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \
+ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+LTLIBRARIES = $(noinst_LTLIBRARIES)
+libsvgadrm_la_LIBADD =
+am__objects_1 = pb_buffer_simple_fenced.lo vmw_buffer.lo \
+ vmw_context.lo vmw_fence.lo vmw_screen.lo vmw_screen_dri.lo \
+ vmw_screen_ioctl.lo vmw_screen_pools.lo vmw_screen_svga.lo \
+ vmw_surface.lo vmw_shader.lo
+am_libsvgadrm_la_OBJECTS = $(am__objects_1)
+libsvgadrm_la_OBJECTS = $(am_libsvgadrm_la_OBJECTS)
+AM_V_lt = $(am__v_lt_@AM_V@)
+am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
+am__v_lt_0 = --silent
+am__v_lt_1 =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+DEFAULT_INCLUDES = -I.@am__isrc@
+depcomp = $(SHELL) $(top_srcdir)/bin/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \
+ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \
+ $(AM_CFLAGS) $(CFLAGS)
+AM_V_CC = $(am__v_CC_@AM_V@)
+am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@)
+am__v_CC_0 = @echo " CC " $@;
+am__v_CC_1 =
+CCLD = $(CC)
+LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+ $(AM_LDFLAGS) $(LDFLAGS) -o $@
+AM_V_CCLD = $(am__v_CCLD_@AM_V@)
+am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@)
+am__v_CCLD_0 = @echo " CCLD " $@;
+am__v_CCLD_1 =
+SOURCES = $(libsvgadrm_la_SOURCES)
+DIST_SOURCES = $(libsvgadrm_la_SOURCES)
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates. Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+ BEGIN { nonempty = 0; } \
+ { items[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique. This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+ list='$(am__tagged_files)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.sources \
+ $(top_srcdir)/bin/depcomp \
+ $(top_srcdir)/src/gallium/Automake.inc
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMDGPU_CFLAGS = @AMDGPU_CFLAGS@
+AMDGPU_LIBS = @AMDGPU_LIBS@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AR = @AR@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BSYMBOLIC = @BSYMBOLIC@
+CC = @CC@
+CCAS = @CCAS@
+CCASDEPMODE = @CCASDEPMODE@
+CCASFLAGS = @CCASFLAGS@
+CCDEPMODE = @CCDEPMODE@
+
+#On some systems -std= must be added to CFLAGS to be the last -std=
+CFLAGS = @CFLAGS@ -std=gnu99
+CLANG_RESOURCE_DIR = @CLANG_RESOURCE_DIR@
+CLOCK_LIB = @CLOCK_LIB@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+D3D_DRIVER_INSTALL_DIR = @D3D_DRIVER_INSTALL_DIR@
+DEFINES = @DEFINES@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DLOPEN_LIBS = @DLOPEN_LIBS@
+DRI2PROTO_CFLAGS = @DRI2PROTO_CFLAGS@
+DRI2PROTO_LIBS = @DRI2PROTO_LIBS@
+DRI3PROTO_CFLAGS = @DRI3PROTO_CFLAGS@
+DRI3PROTO_LIBS = @DRI3PROTO_LIBS@
+DRIGL_CFLAGS = @DRIGL_CFLAGS@
+DRIGL_LIBS = @DRIGL_LIBS@
+DRI_DRIVER_INSTALL_DIR = @DRI_DRIVER_INSTALL_DIR@
+DRI_DRIVER_SEARCH_DIR = @DRI_DRIVER_SEARCH_DIR@
+DRI_LIB_DEPS = @DRI_LIB_DEPS@
+DRI_PC_REQ_PRIV = @DRI_PC_REQ_PRIV@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGL_CFLAGS = @EGL_CFLAGS@
+EGL_CLIENT_APIS = @EGL_CLIENT_APIS@
+EGL_LIB_DEPS = @EGL_LIB_DEPS@
+EGL_NATIVE_PLATFORM = @EGL_NATIVE_PLATFORM@
+EGREP = @EGREP@
+ELF_LIB = @ELF_LIB@
+EXEEXT = @EXEEXT@
+EXPAT_CFLAGS = @EXPAT_CFLAGS@
+EXPAT_LIBS = @EXPAT_LIBS@
+FGREP = @FGREP@
+FREEDRENO_CFLAGS = @FREEDRENO_CFLAGS@
+FREEDRENO_LIBS = @FREEDRENO_LIBS@
+GALLIUM_PIPE_LOADER_DEFINES = @GALLIUM_PIPE_LOADER_DEFINES@
+GBM_PC_LIB_PRIV = @GBM_PC_LIB_PRIV@
+GBM_PC_REQ_PRIV = @GBM_PC_REQ_PRIV@
+GC_SECTIONS = @GC_SECTIONS@
+GLESv1_CM_LIB_DEPS = @GLESv1_CM_LIB_DEPS@
+GLESv1_CM_PC_LIB_PRIV = @GLESv1_CM_PC_LIB_PRIV@
+GLESv2_LIB_DEPS = @GLESv2_LIB_DEPS@
+GLESv2_PC_LIB_PRIV = @GLESv2_PC_LIB_PRIV@
+GLPROTO_CFLAGS = @GLPROTO_CFLAGS@
+GLPROTO_LIBS = @GLPROTO_LIBS@
+GLX_TLS = @GLX_TLS@
+GL_LIB = @GL_LIB@
+GL_LIB_DEPS = @GL_LIB_DEPS@
+GL_PC_CFLAGS = @GL_PC_CFLAGS@
+GL_PC_LIB_PRIV = @GL_PC_LIB_PRIV@
+GL_PC_REQ_PRIV = @GL_PC_REQ_PRIV@
+GREP = @GREP@
+HAVE_XF86VIDMODE = @HAVE_XF86VIDMODE@
+INDENT = @INDENT@
+INDENT_FLAGS = @INDENT_FLAGS@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+INTEL_CFLAGS = @INTEL_CFLAGS@
+INTEL_LIBS = @INTEL_LIBS@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LD_NO_UNDEFINED = @LD_NO_UNDEFINED@
+LEX = @LEX@
+LEXLIB = @LEXLIB@
+LEX_OUTPUT_ROOT = @LEX_OUTPUT_ROOT@
+LIBCLC_INCLUDEDIR = @LIBCLC_INCLUDEDIR@
+LIBCLC_LIBEXECDIR = @LIBCLC_LIBEXECDIR@
+LIBDRM_CFLAGS = @LIBDRM_CFLAGS@
+LIBDRM_LIBS = @LIBDRM_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIBUDEV_CFLAGS = @LIBUDEV_CFLAGS@
+LIBUDEV_LIBS = @LIBUDEV_LIBS@
+LIB_DIR = @LIB_DIR@
+LIB_EXT = @LIB_EXT@
+LIPO = @LIPO@
+LLVM_BINDIR = @LLVM_BINDIR@
+LLVM_CFLAGS = @LLVM_CFLAGS@
+LLVM_CONFIG = @LLVM_CONFIG@
+LLVM_CPPFLAGS = @LLVM_CPPFLAGS@
+LLVM_CXXFLAGS = @LLVM_CXXFLAGS@
+LLVM_INCLUDEDIR = @LLVM_INCLUDEDIR@
+LLVM_LDFLAGS = @LLVM_LDFLAGS@
+LLVM_LIBDIR = @LLVM_LIBDIR@
+LLVM_LIBS = @LLVM_LIBS@
+LLVM_VERSION = @LLVM_VERSION@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@
+MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
+MESA_LLVM = @MESA_LLVM@
+MKDIR_P = @MKDIR_P@
+MSVC2008_COMPAT_CFLAGS = @MSVC2008_COMPAT_CFLAGS@
+MSVC2008_COMPAT_CXXFLAGS = @MSVC2008_COMPAT_CXXFLAGS@
+MSVC2013_COMPAT_CFLAGS = @MSVC2013_COMPAT_CFLAGS@
+MSVC2013_COMPAT_CXXFLAGS = @MSVC2013_COMPAT_CXXFLAGS@
+NINE_MAJOR = @NINE_MAJOR@
+NINE_MINOR = @NINE_MINOR@
+NINE_TINY = @NINE_TINY@
+NINE_VERSION = @NINE_VERSION@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NOUVEAU_CFLAGS = @NOUVEAU_CFLAGS@
+NOUVEAU_LIBS = @NOUVEAU_LIBS@
+NVVIEUX_CFLAGS = @NVVIEUX_CFLAGS@
+NVVIEUX_LIBS = @NVVIEUX_LIBS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OMX_CFLAGS = @OMX_CFLAGS@
+OMX_LIBS = @OMX_LIBS@
+OMX_LIB_INSTALL_DIR = @OMX_LIB_INSTALL_DIR@
+OPENCL_LIBNAME = @OPENCL_LIBNAME@
+OPENCL_VERSION = @OPENCL_VERSION@
+OSMESA_LIB = @OSMESA_LIB@
+OSMESA_LIB_DEPS = @OSMESA_LIB_DEPS@
+OSMESA_PC_LIB_PRIV = @OSMESA_PC_LIB_PRIV@
+OSMESA_PC_REQ = @OSMESA_PC_REQ@
+OSMESA_VERSION = @OSMESA_VERSION@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+POSIX_SHELL = @POSIX_SHELL@
+PRESENTPROTO_CFLAGS = @PRESENTPROTO_CFLAGS@
+PRESENTPROTO_LIBS = @PRESENTPROTO_LIBS@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+PYTHON2 = @PYTHON2@
+RADEON_CFLAGS = @RADEON_CFLAGS@
+RADEON_LIBS = @RADEON_LIBS@
+RANLIB = @RANLIB@
+RM = @RM@
+SED = @SED@
+SELINUX_CFLAGS = @SELINUX_CFLAGS@
+SELINUX_LIBS = @SELINUX_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE41_CFLAGS = @SSE41_CFLAGS@
+STRIP = @STRIP@
+VA_CFLAGS = @VA_CFLAGS@
+VA_LIBS = @VA_LIBS@
+VA_LIB_INSTALL_DIR = @VA_LIB_INSTALL_DIR@
+VA_MAJOR = @VA_MAJOR@
+VA_MINOR = @VA_MINOR@
+VDPAU_CFLAGS = @VDPAU_CFLAGS@
+VDPAU_LIBS = @VDPAU_LIBS@
+VDPAU_LIB_INSTALL_DIR = @VDPAU_LIB_INSTALL_DIR@
+VDPAU_MAJOR = @VDPAU_MAJOR@
+VDPAU_MINOR = @VDPAU_MINOR@
+VERSION = @VERSION@
+VG_LIB_DEPS = @VG_LIB_DEPS@
+VISIBILITY_CFLAGS = @VISIBILITY_CFLAGS@
+VISIBILITY_CXXFLAGS = @VISIBILITY_CXXFLAGS@
+VL_CFLAGS = @VL_CFLAGS@
+VL_LIBS = @VL_LIBS@
+WAYLAND_CFLAGS = @WAYLAND_CFLAGS@
+WAYLAND_LIBS = @WAYLAND_LIBS@
+WAYLAND_SCANNER = @WAYLAND_SCANNER@
+WAYLAND_SCANNER_CFLAGS = @WAYLAND_SCANNER_CFLAGS@
+WAYLAND_SCANNER_LIBS = @WAYLAND_SCANNER_LIBS@
+X11_INCLUDES = @X11_INCLUDES@
+XA_MAJOR = @XA_MAJOR@
+XA_MINOR = @XA_MINOR@
+XA_TINY = @XA_TINY@
+XA_VERSION = @XA_VERSION@
+XCB_DRI2_CFLAGS = @XCB_DRI2_CFLAGS@
+XCB_DRI2_LIBS = @XCB_DRI2_LIBS@
+XF86VIDMODE_CFLAGS = @XF86VIDMODE_CFLAGS@
+XF86VIDMODE_LIBS = @XF86VIDMODE_LIBS@
+XLIBGL_CFLAGS = @XLIBGL_CFLAGS@
+XLIBGL_LIBS = @XLIBGL_LIBS@
+XVMC_CFLAGS = @XVMC_CFLAGS@
+XVMC_LIBS = @XVMC_LIBS@
+XVMC_LIB_INSTALL_DIR = @XVMC_LIB_INSTALL_DIR@
+XVMC_MAJOR = @XVMC_MAJOR@
+XVMC_MINOR = @XVMC_MINOR@
+YACC = @YACC@
+YFLAGS = @YFLAGS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+acv_mako_found = @acv_mako_found@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+ifGNUmake = @ifGNUmake@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+C_SOURCES := \
+ pb_buffer_simple_fenced.c \
+ svga_drm_public.h \
+ vmw_buffer.c \
+ vmw_buffer.h \
+ vmw_context.c \
+ vmw_context.h \
+ vmw_fence.c \
+ vmw_fence.h \
+ vmwgfx_drm.h \
+ vmw_screen.c \
+ vmw_screen_dri.c \
+ vmw_screen.h \
+ vmw_screen_ioctl.c \
+ vmw_screen_pools.c \
+ vmw_screen_svga.c \
+ vmw_surface.c \
+ vmw_surface.h \
+ vmw_shader.c \
+ vmw_shader.h
+
+GALLIUM_CFLAGS = \
+ -I$(top_srcdir)/include \
+ -I$(top_srcdir)/src \
+ -I$(top_srcdir)/src/gallium/include \
+ -I$(top_srcdir)/src/gallium/auxiliary \
+ $(DEFINES)
+
+
+# src/gallium/auxiliary must appear before src/gallium/drivers
+# because there are stupidly two rbug_context.h files in
+# different directories, and which one is included by the
+# preprocessor is determined by the ordering of the -I flags.
+GALLIUM_DRIVER_CFLAGS = \
+ -I$(srcdir)/include \
+ -I$(top_srcdir)/src \
+ -I$(top_srcdir)/include \
+ -I$(top_srcdir)/src/gallium/include \
+ -I$(top_srcdir)/src/gallium/auxiliary \
+ -I$(top_srcdir)/src/gallium/drivers \
+ -I$(top_srcdir)/src/gallium/winsys \
+ $(DEFINES) \
+ $(VISIBILITY_CFLAGS)
+
+GALLIUM_DRIVER_CXXFLAGS = \
+ -I$(srcdir)/include \
+ -I$(top_srcdir)/src \
+ -I$(top_srcdir)/include \
+ -I$(top_srcdir)/src/gallium/include \
+ -I$(top_srcdir)/src/gallium/auxiliary \
+ -I$(top_srcdir)/src/gallium/drivers \
+ -I$(top_srcdir)/src/gallium/winsys \
+ $(DEFINES) \
+ $(VISIBILITY_CXXFLAGS)
+
+GALLIUM_TARGET_CFLAGS = \
+ -I$(top_srcdir)/src \
+ -I$(top_srcdir)/include \
+ -I$(top_srcdir)/src/loader \
+ -I$(top_srcdir)/src/gallium/include \
+ -I$(top_srcdir)/src/gallium/auxiliary \
+ -I$(top_srcdir)/src/gallium/drivers \
+ -I$(top_srcdir)/src/gallium/winsys \
+ $(DEFINES) \
+ $(PTHREAD_CFLAGS) \
+ $(LIBDRM_CFLAGS) \
+ $(VISIBILITY_CFLAGS)
+
+GALLIUM_COMMON_LIB_DEPS = \
+ -lm \
+ $(CLOCK_LIB) \
+ $(PTHREAD_LIBS) \
+ $(DLOPEN_LIBS)
+
+GALLIUM_WINSYS_CFLAGS = \
+ -I$(top_srcdir)/src \
+ -I$(top_srcdir)/include \
+ -I$(top_srcdir)/src/gallium/include \
+ -I$(top_srcdir)/src/gallium/auxiliary \
+ $(DEFINES) \
+ $(VISIBILITY_CFLAGS)
+
+GALLIUM_PIPE_LOADER_WINSYS_LIBS = \
+ $(top_builddir)/src/gallium/winsys/sw/null/libws_null.la \
+ $(top_builddir)/src/gallium/winsys/sw/wrapper/libwsw.la \
+ $(am__append_1)
+AM_CFLAGS = \
+ -I$(top_srcdir)/src/gallium/drivers/svga \
+ -I$(top_srcdir)/src/gallium/drivers/svga/include \
+ $(GALLIUM_WINSYS_CFLAGS) \
+ $(LIBDRM_CFLAGS)
+
+noinst_LTLIBRARIES = libsvgadrm.la
+libsvgadrm_la_SOURCES = $(C_SOURCES)
+EXTRA_DIST = SConscript
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj
+$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(srcdir)/Makefile.sources $(top_srcdir)/src/gallium/Automake.inc $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/gallium/winsys/svga/drm/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --foreign src/gallium/winsys/svga/drm/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+$(srcdir)/Makefile.sources $(top_srcdir)/src/gallium/Automake.inc $(am__empty):
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+clean-noinstLTLIBRARIES:
+ -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES)
+ @list='$(noinst_LTLIBRARIES)'; \
+ locs=`for p in $$list; do echo $$p; done | \
+ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \
+ sort -u`; \
+ test -z "$$locs" || { \
+ echo rm -f $${locs}; \
+ rm -f $${locs}; \
+ }
+
+libsvgadrm.la: $(libsvgadrm_la_OBJECTS) $(libsvgadrm_la_DEPENDENCIES) $(EXTRA_libsvgadrm_la_DEPENDENCIES)
+ $(AM_V_CCLD)$(LINK) $(libsvgadrm_la_OBJECTS) $(libsvgadrm_la_LIBADD) $(LIBS)
+
+mostlyclean-compile:
+ -rm -f *.$(OBJEXT)
+
+distclean-compile:
+ -rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pb_buffer_simple_fenced.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/vmw_buffer.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/vmw_context.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/vmw_fence.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/vmw_screen.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/vmw_screen_dri.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/vmw_screen_ioctl.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/vmw_screen_pools.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/vmw_screen_svga.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/vmw_shader.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/vmw_surface.Plo@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@ $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\
+@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\
+@am__fastdepCC_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $<
+
+.c.obj:
+@am__fastdepCC_TRUE@ $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\
+@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\
+@am__fastdepCC_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@ $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`;\
+@am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\
+@am__fastdepCC_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+ -rm -f *.lo
+
+clean-libtool:
+ -rm -rf .libs _libs
+
+ID: $(am__tagged_files)
+ $(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-am
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ set x; \
+ here=`pwd`; \
+ $(am__define_uniq_tagged_files); \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: ctags-am
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ $(am__define_uniq_tagged_files); \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-am
+
+cscopelist-am: $(am__tagged_files)
+ list='$(am__tagged_files)'; \
+ case "$(srcdir)" in \
+ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+ *) sdir=$(subdir)/$(srcdir) ;; \
+ esac; \
+ for i in $$list; do \
+ if test -f "$$i"; then \
+ echo "$(subdir)/$$i"; \
+ else \
+ echo "$$sdir/$$i"; \
+ fi; \
+ done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(LTLIBRARIES)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \
+ mostlyclean-am
+
+distclean: distclean-am
+ -rm -rf ./$(DEPDIR)
+ -rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+ distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -rf ./$(DEPDIR)
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+ mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \
+ clean-libtool clean-noinstLTLIBRARIES cscopelist-am ctags \
+ ctags-am distclean distclean-compile distclean-generic \
+ distclean-libtool distclean-tags distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-compile \
+ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
+ tags tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/lib/mesa/src/gallium/winsys/svga/drm/Makefile.sources b/lib/mesa/src/gallium/winsys/svga/drm/Makefile.sources
new file mode 100644
index 000000000..ab2b9321f
--- /dev/null
+++ b/lib/mesa/src/gallium/winsys/svga/drm/Makefile.sources
@@ -0,0 +1,20 @@
+C_SOURCES := \
+ pb_buffer_simple_fenced.c \
+ svga_drm_public.h \
+ vmw_buffer.c \
+ vmw_buffer.h \
+ vmw_context.c \
+ vmw_context.h \
+ vmw_fence.c \
+ vmw_fence.h \
+ vmwgfx_drm.h \
+ vmw_screen.c \
+ vmw_screen_dri.c \
+ vmw_screen.h \
+ vmw_screen_ioctl.c \
+ vmw_screen_pools.c \
+ vmw_screen_svga.c \
+ vmw_surface.c \
+ vmw_surface.h \
+ vmw_shader.c \
+ vmw_shader.h
diff --git a/lib/mesa/src/gallium/winsys/svga/drm/SConscript b/lib/mesa/src/gallium/winsys/svga/drm/SConscript
new file mode 100644
index 000000000..25850531d
--- /dev/null
+++ b/lib/mesa/src/gallium/winsys/svga/drm/SConscript
@@ -0,0 +1,26 @@
+Import('*')
+
+env = env.Clone()
+
+env.PkgUseModules('DRM')
+
+if env['gcc'] or env['clang'] or env['icc']:
+ env.Append(CCFLAGS = ['-fvisibility=hidden'])
+ env.Append(CPPDEFINES = [
+ 'HAVE_STDINT_H',
+ '-D_FILE_OFFSET_BITS=64',
+ ])
+
+env.Prepend(CPPPATH = [
+ '#/src/gallium/drivers/svga',
+ '#/src/gallium/drivers/svga/include',
+])
+
+sources = env.ParseSourceList('Makefile.sources', 'C_SOURCES')
+
+svgadrm = env.ConvenienceLibrary(
+ target = 'svgadrm',
+ source = sources,
+)
+
+Export('svgadrm')
diff --git a/lib/mesa/src/gallium/winsys/svga/drm/pb_buffer_simple_fenced.c b/lib/mesa/src/gallium/winsys/svga/drm/pb_buffer_simple_fenced.c
new file mode 100644
index 000000000..5ef95f3d6
--- /dev/null
+++ b/lib/mesa/src/gallium/winsys/svga/drm/pb_buffer_simple_fenced.c
@@ -0,0 +1,844 @@
+/**************************************************************************
+ *
+ * Copyright 2007-2010 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Implementation of fenced buffers.
+ *
+ * \author Jose Fonseca <jfonseca-at-vmware-dot-com>
+ * \author Thomas Hellström <thellstrom-at-vmware-dot-com>
+ */
+
+
+#include "pipe/p_config.h"
+
+#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
+#include <unistd.h>
+#include <sched.h>
+#endif
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_defines.h"
+#include "util/u_debug.h"
+#include "os/os_thread.h"
+#include "util/u_memory.h"
+#include "util/list.h"
+
+#include "pipebuffer/pb_buffer.h"
+#include "pipebuffer/pb_bufmgr.h"
+#include "pipebuffer/pb_buffer_fenced.h"
+#include "vmw_screen.h"
+
+
+/**
+ * Convenience macro (type safe).
+ */
+#define SUPER(__derived) (&(__derived)->base)
+
+
+struct fenced_manager
+{
+ struct pb_manager base;
+ struct pb_manager *provider;
+ struct pb_fence_ops *ops;
+
+ /**
+ * Following members are mutable and protected by this mutex.
+ */
+ pipe_mutex mutex;
+
+ /**
+ * Fenced buffer list.
+ *
+ * All fenced buffers are placed in this listed, ordered from the oldest
+ * fence to the newest fence.
+ */
+ struct list_head fenced;
+ pb_size num_fenced;
+
+ struct list_head unfenced;
+ pb_size num_unfenced;
+
+};
+
+
+/**
+ * Fenced buffer.
+ *
+ * Wrapper around a pipe buffer which adds fencing and reference counting.
+ */
+struct fenced_buffer
+{
+ /*
+ * Immutable members.
+ */
+
+ struct pb_buffer base;
+ struct fenced_manager *mgr;
+
+ /*
+ * Following members are mutable and protected by fenced_manager::mutex.
+ */
+
+ struct list_head head;
+
+ /**
+ * Buffer with storage.
+ */
+ struct pb_buffer *buffer;
+ pb_size size;
+
+ /**
+ * A bitmask of PB_USAGE_CPU/GPU_READ/WRITE describing the current
+ * buffer usage.
+ */
+ unsigned flags;
+
+ unsigned mapcount;
+
+ struct pb_validate *vl;
+ unsigned validation_flags;
+
+ struct pipe_fence_handle *fence;
+};
+
+
+static inline struct fenced_manager *
+fenced_manager(struct pb_manager *mgr)
+{
+ assert(mgr);
+ return (struct fenced_manager *)mgr;
+}
+
+
+static inline struct fenced_buffer *
+fenced_buffer(struct pb_buffer *buf)
+{
+ assert(buf);
+ return (struct fenced_buffer *)buf;
+}
+
+
+static void
+fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf);
+
+static enum pipe_error
+fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
+ struct fenced_buffer *fenced_buf,
+ const struct pb_desc *desc,
+ boolean wait);
+/**
+ * Dump the fenced buffer list.
+ *
+ * Useful to understand failures to allocate buffers.
+ */
+static void
+fenced_manager_dump_locked(struct fenced_manager *fenced_mgr)
+{
+#ifdef DEBUG
+ struct pb_fence_ops *ops = fenced_mgr->ops;
+ struct list_head *curr, *next;
+ struct fenced_buffer *fenced_buf;
+
+ debug_printf("%10s %7s %8s %7s %10s %s\n",
+ "buffer", "size", "refcount", "storage", "fence", "signalled");
+
+ curr = fenced_mgr->unfenced.next;
+ next = curr->next;
+ while(curr != &fenced_mgr->unfenced) {
+ fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
+ assert(!fenced_buf->fence);
+ debug_printf("%10p %7u %8u %7s\n",
+ (void *) fenced_buf,
+ fenced_buf->base.size,
+ p_atomic_read(&fenced_buf->base.reference.count),
+ fenced_buf->buffer ? "gpu" : "none");
+ curr = next;
+ next = curr->next;
+ }
+
+ curr = fenced_mgr->fenced.next;
+ next = curr->next;
+ while(curr != &fenced_mgr->fenced) {
+ int signaled;
+ fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
+ assert(fenced_buf->buffer);
+ signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
+ debug_printf("%10p %7u %8u %7s %10p %s\n",
+ (void *) fenced_buf,
+ fenced_buf->base.size,
+ p_atomic_read(&fenced_buf->base.reference.count),
+ "gpu",
+ (void *) fenced_buf->fence,
+ signaled == 0 ? "y" : "n");
+ curr = next;
+ next = curr->next;
+ }
+#else
+ (void)fenced_mgr;
+#endif
+}
+
+
+static inline void
+fenced_buffer_destroy_locked(struct fenced_manager *fenced_mgr,
+ struct fenced_buffer *fenced_buf)
+{
+ assert(!pipe_is_referenced(&fenced_buf->base.reference));
+
+ assert(!fenced_buf->fence);
+ assert(fenced_buf->head.prev);
+ assert(fenced_buf->head.next);
+ LIST_DEL(&fenced_buf->head);
+ assert(fenced_mgr->num_unfenced);
+ --fenced_mgr->num_unfenced;
+
+ fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
+
+ FREE(fenced_buf);
+}
+
+
+/**
+ * Add the buffer to the fenced list.
+ *
+ * Reference count should be incremented before calling this function.
+ */
+static inline void
+fenced_buffer_add_locked(struct fenced_manager *fenced_mgr,
+ struct fenced_buffer *fenced_buf)
+{
+ assert(pipe_is_referenced(&fenced_buf->base.reference));
+ assert(fenced_buf->flags & PB_USAGE_GPU_READ_WRITE);
+ assert(fenced_buf->fence);
+
+ p_atomic_inc(&fenced_buf->base.reference.count);
+
+ LIST_DEL(&fenced_buf->head);
+ assert(fenced_mgr->num_unfenced);
+ --fenced_mgr->num_unfenced;
+ LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->fenced);
+ ++fenced_mgr->num_fenced;
+}
+
+
+/**
+ * Remove the buffer from the fenced list, and potentially destroy the buffer
+ * if the reference count reaches zero.
+ *
+ * Returns TRUE if the buffer was detroyed.
+ */
+static inline boolean
+fenced_buffer_remove_locked(struct fenced_manager *fenced_mgr,
+ struct fenced_buffer *fenced_buf)
+{
+ struct pb_fence_ops *ops = fenced_mgr->ops;
+
+ assert(fenced_buf->fence);
+ assert(fenced_buf->mgr == fenced_mgr);
+
+ ops->fence_reference(ops, &fenced_buf->fence, NULL);
+ fenced_buf->flags &= ~PB_USAGE_GPU_READ_WRITE;
+
+ assert(fenced_buf->head.prev);
+ assert(fenced_buf->head.next);
+
+ LIST_DEL(&fenced_buf->head);
+ assert(fenced_mgr->num_fenced);
+ --fenced_mgr->num_fenced;
+
+ LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
+ ++fenced_mgr->num_unfenced;
+
+ if (p_atomic_dec_zero(&fenced_buf->base.reference.count)) {
+ fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+
+/**
+ * Wait for the fence to expire, and remove it from the fenced list.
+ *
+ * This function will release and re-acquire the mutex, so any copy of mutable
+ * state must be discarded after calling it.
+ */
+static inline enum pipe_error
+fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr,
+ struct fenced_buffer *fenced_buf)
+{
+ struct pb_fence_ops *ops = fenced_mgr->ops;
+ enum pipe_error ret = PIPE_ERROR;
+
+#if 0
+ debug_warning("waiting for GPU");
+#endif
+
+ assert(pipe_is_referenced(&fenced_buf->base.reference));
+ assert(fenced_buf->fence);
+
+ if(fenced_buf->fence) {
+ struct pipe_fence_handle *fence = NULL;
+ int finished;
+ boolean proceed;
+
+ ops->fence_reference(ops, &fence, fenced_buf->fence);
+
+ pipe_mutex_unlock(fenced_mgr->mutex);
+
+ finished = ops->fence_finish(ops, fenced_buf->fence, 0);
+
+ pipe_mutex_lock(fenced_mgr->mutex);
+
+ assert(pipe_is_referenced(&fenced_buf->base.reference));
+
+ /*
+ * Only proceed if the fence object didn't change in the meanwhile.
+ * Otherwise assume the work has been already carried out by another
+ * thread that re-acquired the lock before us.
+ */
+ proceed = fence == fenced_buf->fence ? TRUE : FALSE;
+
+ ops->fence_reference(ops, &fence, NULL);
+
+ if(proceed && finished == 0) {
+ /*
+ * Remove from the fenced list
+ */
+
+ boolean destroyed;
+
+ destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
+
+ /* TODO: remove consequents buffers with the same fence? */
+
+ assert(!destroyed);
+
+ fenced_buf->flags &= ~PB_USAGE_GPU_READ_WRITE;
+
+ ret = PIPE_OK;
+ }
+ }
+
+ return ret;
+}
+
+
+/**
+ * Remove as many fenced buffers from the fenced list as possible.
+ *
+ * Returns TRUE if at least one buffer was removed.
+ */
+static boolean
+fenced_manager_check_signalled_locked(struct fenced_manager *fenced_mgr,
+ boolean wait)
+{
+ struct pb_fence_ops *ops = fenced_mgr->ops;
+ struct list_head *curr, *next;
+ struct fenced_buffer *fenced_buf;
+ struct pipe_fence_handle *prev_fence = NULL;
+ boolean ret = FALSE;
+
+ curr = fenced_mgr->fenced.next;
+ next = curr->next;
+ while(curr != &fenced_mgr->fenced) {
+ fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
+
+ if(fenced_buf->fence != prev_fence) {
+ int signaled;
+
+ if (wait) {
+ signaled = ops->fence_finish(ops, fenced_buf->fence, 0);
+
+ /*
+ * Don't return just now. Instead preemptively check if the
+ * following buffers' fences already expired,
+ * without further waits.
+ */
+ wait = FALSE;
+ }
+ else {
+ signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
+ }
+
+ if (signaled != 0) {
+ return ret;
+ }
+
+ prev_fence = fenced_buf->fence;
+ }
+ else {
+ /* This buffer's fence object is identical to the previous buffer's
+ * fence object, so no need to check the fence again.
+ */
+ assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0);
+ }
+
+ fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
+
+ ret = TRUE;
+
+ curr = next;
+ next = curr->next;
+ }
+
+ return ret;
+}
+
+
+/**
+ * Destroy the GPU storage.
+ */
+static void
+fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf)
+{
+ if(fenced_buf->buffer) {
+ pb_reference(&fenced_buf->buffer, NULL);
+ }
+}
+
+
+/**
+ * Try to create GPU storage for this buffer.
+ *
+ * This function is a shorthand around pb_manager::create_buffer for
+ * fenced_buffer_create_gpu_storage_locked()'s benefit.
+ */
+static inline boolean
+fenced_buffer_try_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
+ struct fenced_buffer *fenced_buf,
+ const struct pb_desc *desc)
+{
+ struct pb_manager *provider = fenced_mgr->provider;
+
+ assert(!fenced_buf->buffer);
+
+ fenced_buf->buffer = provider->create_buffer(fenced_mgr->provider,
+ fenced_buf->size, desc);
+ return fenced_buf->buffer ? TRUE : FALSE;
+}
+
+
+/**
+ * Create GPU storage for this buffer.
+ */
+static enum pipe_error
+fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
+ struct fenced_buffer *fenced_buf,
+ const struct pb_desc *desc,
+ boolean wait)
+{
+ assert(!fenced_buf->buffer);
+
+ /*
+ * Check for signaled buffers before trying to allocate.
+ */
+ fenced_manager_check_signalled_locked(fenced_mgr, FALSE);
+
+ fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf, desc);
+
+ /*
+ * Keep trying while there is some sort of progress:
+ * - fences are expiring,
+ * - or buffers are being being swapped out from GPU memory into CPU memory.
+ */
+ while(!fenced_buf->buffer &&
+ (fenced_manager_check_signalled_locked(fenced_mgr, FALSE))) {
+ fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf,
+ desc);
+ }
+
+ if(!fenced_buf->buffer && wait) {
+ /*
+ * Same as before, but this time around, wait to free buffers if
+ * necessary.
+ */
+ while(!fenced_buf->buffer &&
+ (fenced_manager_check_signalled_locked(fenced_mgr, TRUE))) {
+ fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf,
+ desc);
+ }
+ }
+
+ if(!fenced_buf->buffer) {
+ if(0)
+ fenced_manager_dump_locked(fenced_mgr);
+
+ /* give up */
+ return PIPE_ERROR_OUT_OF_MEMORY;
+ }
+
+ return PIPE_OK;
+}
+
+
+static void
+fenced_buffer_destroy(struct pb_buffer *buf)
+{
+ struct fenced_buffer *fenced_buf = fenced_buffer(buf);
+ struct fenced_manager *fenced_mgr = fenced_buf->mgr;
+
+ assert(!pipe_is_referenced(&fenced_buf->base.reference));
+
+ pipe_mutex_lock(fenced_mgr->mutex);
+
+ fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
+
+ pipe_mutex_unlock(fenced_mgr->mutex);
+}
+
+
+static void *
+fenced_buffer_map(struct pb_buffer *buf,
+ unsigned flags, void *flush_ctx)
+{
+ struct fenced_buffer *fenced_buf = fenced_buffer(buf);
+ struct fenced_manager *fenced_mgr = fenced_buf->mgr;
+ struct pb_fence_ops *ops = fenced_mgr->ops;
+ void *map = NULL;
+
+ pipe_mutex_lock(fenced_mgr->mutex);
+
+ assert(!(flags & PB_USAGE_GPU_READ_WRITE));
+
+ /*
+ * Serialize writes.
+ */
+ while((fenced_buf->flags & PB_USAGE_GPU_WRITE) ||
+ ((fenced_buf->flags & PB_USAGE_GPU_READ) &&
+ (flags & PB_USAGE_CPU_WRITE))) {
+
+ /*
+ * Don't wait for the GPU to finish accessing it,
+ * if blocking is forbidden.
+ */
+ if((flags & PB_USAGE_DONTBLOCK) &&
+ ops->fence_signalled(ops, fenced_buf->fence, 0) != 0) {
+ goto done;
+ }
+
+ if (flags & PB_USAGE_UNSYNCHRONIZED) {
+ break;
+ }
+
+ /*
+ * Wait for the GPU to finish accessing. This will release and re-acquire
+ * the mutex, so all copies of mutable state must be discarded.
+ */
+ fenced_buffer_finish_locked(fenced_mgr, fenced_buf);
+ }
+
+ map = pb_map(fenced_buf->buffer, flags, flush_ctx);
+
+ if(map) {
+ ++fenced_buf->mapcount;
+ fenced_buf->flags |= flags & PB_USAGE_CPU_READ_WRITE;
+ }
+
+done:
+ pipe_mutex_unlock(fenced_mgr->mutex);
+
+ return map;
+}
+
+
+static void
+fenced_buffer_unmap(struct pb_buffer *buf)
+{
+ struct fenced_buffer *fenced_buf = fenced_buffer(buf);
+ struct fenced_manager *fenced_mgr = fenced_buf->mgr;
+
+ pipe_mutex_lock(fenced_mgr->mutex);
+
+ assert(fenced_buf->mapcount);
+ if(fenced_buf->mapcount) {
+ if (fenced_buf->buffer)
+ pb_unmap(fenced_buf->buffer);
+ --fenced_buf->mapcount;
+ if(!fenced_buf->mapcount)
+ fenced_buf->flags &= ~PB_USAGE_CPU_READ_WRITE;
+ }
+
+ pipe_mutex_unlock(fenced_mgr->mutex);
+}
+
+
+static enum pipe_error
+fenced_buffer_validate(struct pb_buffer *buf,
+ struct pb_validate *vl,
+ unsigned flags)
+{
+ struct fenced_buffer *fenced_buf = fenced_buffer(buf);
+ struct fenced_manager *fenced_mgr = fenced_buf->mgr;
+ enum pipe_error ret;
+
+ pipe_mutex_lock(fenced_mgr->mutex);
+
+ if(!vl) {
+ /* invalidate */
+ fenced_buf->vl = NULL;
+ fenced_buf->validation_flags = 0;
+ ret = PIPE_OK;
+ goto done;
+ }
+
+ assert(flags & PB_USAGE_GPU_READ_WRITE);
+ assert(!(flags & ~PB_USAGE_GPU_READ_WRITE));
+ flags &= PB_USAGE_GPU_READ_WRITE;
+
+ /* Buffer cannot be validated in two different lists */
+ if(fenced_buf->vl && fenced_buf->vl != vl) {
+ ret = PIPE_ERROR_RETRY;
+ goto done;
+ }
+
+ if(fenced_buf->vl == vl &&
+ (fenced_buf->validation_flags & flags) == flags) {
+ /* Nothing to do -- buffer already validated */
+ ret = PIPE_OK;
+ goto done;
+ }
+
+ ret = pb_validate(fenced_buf->buffer, vl, flags);
+ if (ret != PIPE_OK)
+ goto done;
+
+ fenced_buf->vl = vl;
+ fenced_buf->validation_flags |= flags;
+
+done:
+ pipe_mutex_unlock(fenced_mgr->mutex);
+
+ return ret;
+}
+
+
+static void
+fenced_buffer_fence(struct pb_buffer *buf,
+ struct pipe_fence_handle *fence)
+{
+ struct fenced_buffer *fenced_buf = fenced_buffer(buf);
+ struct fenced_manager *fenced_mgr = fenced_buf->mgr;
+ struct pb_fence_ops *ops = fenced_mgr->ops;
+
+ pipe_mutex_lock(fenced_mgr->mutex);
+
+ assert(pipe_is_referenced(&fenced_buf->base.reference));
+ assert(fenced_buf->buffer);
+
+ if(fence != fenced_buf->fence) {
+ assert(fenced_buf->vl);
+ assert(fenced_buf->validation_flags);
+
+ if (fenced_buf->fence) {
+ boolean destroyed;
+ destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
+ assert(!destroyed);
+ }
+ if (fence) {
+ ops->fence_reference(ops, &fenced_buf->fence, fence);
+ fenced_buf->flags |= fenced_buf->validation_flags;
+ fenced_buffer_add_locked(fenced_mgr, fenced_buf);
+ }
+
+ pb_fence(fenced_buf->buffer, fence);
+
+ fenced_buf->vl = NULL;
+ fenced_buf->validation_flags = 0;
+ }
+
+ pipe_mutex_unlock(fenced_mgr->mutex);
+}
+
+
+static void
+fenced_buffer_get_base_buffer(struct pb_buffer *buf,
+ struct pb_buffer **base_buf,
+ pb_size *offset)
+{
+ struct fenced_buffer *fenced_buf = fenced_buffer(buf);
+ struct fenced_manager *fenced_mgr = fenced_buf->mgr;
+
+ pipe_mutex_lock(fenced_mgr->mutex);
+
+ assert(fenced_buf->buffer);
+
+ if(fenced_buf->buffer)
+ pb_get_base_buffer(fenced_buf->buffer, base_buf, offset);
+ else {
+ *base_buf = buf;
+ *offset = 0;
+ }
+
+ pipe_mutex_unlock(fenced_mgr->mutex);
+}
+
+
+static const struct pb_vtbl
+fenced_buffer_vtbl = {
+ fenced_buffer_destroy,
+ fenced_buffer_map,
+ fenced_buffer_unmap,
+ fenced_buffer_validate,
+ fenced_buffer_fence,
+ fenced_buffer_get_base_buffer
+};
+
+
+/**
+ * Wrap a buffer in a fenced buffer.
+ */
+static struct pb_buffer *
+fenced_bufmgr_create_buffer(struct pb_manager *mgr,
+ pb_size size,
+ const struct pb_desc *desc)
+{
+ struct fenced_manager *fenced_mgr = fenced_manager(mgr);
+ struct fenced_buffer *fenced_buf;
+ enum pipe_error ret;
+
+ fenced_buf = CALLOC_STRUCT(fenced_buffer);
+ if(!fenced_buf)
+ goto no_buffer;
+
+ pipe_reference_init(&fenced_buf->base.reference, 1);
+ fenced_buf->base.alignment = desc->alignment;
+ fenced_buf->base.usage = desc->usage;
+ fenced_buf->base.size = size;
+ fenced_buf->size = size;
+
+ fenced_buf->base.vtbl = &fenced_buffer_vtbl;
+ fenced_buf->mgr = fenced_mgr;
+
+ pipe_mutex_lock(fenced_mgr->mutex);
+
+ /*
+ * Try to create GPU storage without stalling,
+ */
+ ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf,
+ desc, TRUE);
+
+ /*
+ * Give up.
+ */
+ if(ret != PIPE_OK) {
+ goto no_storage;
+ }
+
+ assert(fenced_buf->buffer);
+
+ LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
+ ++fenced_mgr->num_unfenced;
+ pipe_mutex_unlock(fenced_mgr->mutex);
+
+ return &fenced_buf->base;
+
+no_storage:
+ pipe_mutex_unlock(fenced_mgr->mutex);
+ FREE(fenced_buf);
+no_buffer:
+ return NULL;
+}
+
+
+static void
+fenced_bufmgr_flush(struct pb_manager *mgr)
+{
+ struct fenced_manager *fenced_mgr = fenced_manager(mgr);
+
+ pipe_mutex_lock(fenced_mgr->mutex);
+ while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
+ ;
+ pipe_mutex_unlock(fenced_mgr->mutex);
+
+ assert(fenced_mgr->provider->flush);
+ if(fenced_mgr->provider->flush)
+ fenced_mgr->provider->flush(fenced_mgr->provider);
+}
+
+
+static void
+fenced_bufmgr_destroy(struct pb_manager *mgr)
+{
+ struct fenced_manager *fenced_mgr = fenced_manager(mgr);
+
+ pipe_mutex_lock(fenced_mgr->mutex);
+
+ /* Wait on outstanding fences */
+ while (fenced_mgr->num_fenced) {
+ pipe_mutex_unlock(fenced_mgr->mutex);
+#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
+ sched_yield();
+#endif
+ pipe_mutex_lock(fenced_mgr->mutex);
+ while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
+ ;
+ }
+
+#ifdef DEBUG
+ /*assert(!fenced_mgr->num_unfenced);*/
+#endif
+
+ pipe_mutex_unlock(fenced_mgr->mutex);
+ pipe_mutex_destroy(fenced_mgr->mutex);
+
+ FREE(fenced_mgr);
+}
+
+
+struct pb_manager *
+simple_fenced_bufmgr_create(struct pb_manager *provider,
+ struct pb_fence_ops *ops)
+{
+ struct fenced_manager *fenced_mgr;
+
+ if(!provider)
+ return NULL;
+
+ fenced_mgr = CALLOC_STRUCT(fenced_manager);
+ if (!fenced_mgr)
+ return NULL;
+
+ fenced_mgr->base.destroy = fenced_bufmgr_destroy;
+ fenced_mgr->base.create_buffer = fenced_bufmgr_create_buffer;
+ fenced_mgr->base.flush = fenced_bufmgr_flush;
+
+ fenced_mgr->provider = provider;
+ fenced_mgr->ops = ops;
+
+ LIST_INITHEAD(&fenced_mgr->fenced);
+ fenced_mgr->num_fenced = 0;
+
+ LIST_INITHEAD(&fenced_mgr->unfenced);
+ fenced_mgr->num_unfenced = 0;
+
+ pipe_mutex_init(fenced_mgr->mutex);
+
+ return &fenced_mgr->base;
+}
diff --git a/lib/mesa/src/gallium/winsys/svga/drm/svga_drm_public.h b/lib/mesa/src/gallium/winsys/svga/drm/svga_drm_public.h
new file mode 100644
index 000000000..e98c89da1
--- /dev/null
+++ b/lib/mesa/src/gallium/winsys/svga/drm/svga_drm_public.h
@@ -0,0 +1,41 @@
+/**********************************************************
+ * Copyright 2010 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/**
+ * @file
+ * VMware SVGA DRM winsys public interface. Used by targets to create a stack.
+ *
+ * @author Jakob Bornecrantz Fonseca <jakob@vmware.com>
+ */
+
+#ifndef SVGA_DRM_PUBLIC_H_
+#define SVGA_DRM_PUBLIC_H_
+
+struct svga_winsys_screen;
+
+struct svga_winsys_screen *
+svga_drm_winsys_screen_create(int fd);
+
+#endif /* SVGA_PUBLIC_H_ */
diff --git a/lib/mesa/src/gallium/winsys/svga/drm/vmw_buffer.c b/lib/mesa/src/gallium/winsys/svga/drm/vmw_buffer.c
new file mode 100644
index 000000000..7eab3d050
--- /dev/null
+++ b/lib/mesa/src/gallium/winsys/svga/drm/vmw_buffer.c
@@ -0,0 +1,371 @@
+/**********************************************************
+ * Copyright 2009 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/**
+ * @file
+ * SVGA buffer manager for Guest Memory Regions (GMRs).
+ *
+ * GMRs are used for pixel and vertex data upload/download to/from the virtual
+ * SVGA hardware. There is a limited number of GMRs available, and
+ * creating/destroying them is also a slow operation so we must suballocate
+ * them.
+ *
+ * This file implements a pipebuffer library's buffer manager, so that we can
+ * use pipepbuffer's suballocation, fencing, and debugging facilities with GMRs.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+
+#include "svga_cmd.h"
+
+#include "util/u_inlines.h"
+#include "util/u_memory.h"
+#include "pipebuffer/pb_buffer.h"
+#include "pipebuffer/pb_bufmgr.h"
+
+#include "svga_winsys.h"
+
+#include "vmw_screen.h"
+#include "vmw_buffer.h"
+
+struct vmw_gmr_bufmgr;
+
+
+struct vmw_gmr_buffer
+{
+ struct pb_buffer base;
+
+ struct vmw_gmr_bufmgr *mgr;
+
+ struct vmw_region *region;
+ void *map;
+ unsigned map_flags;
+};
+
+
+extern const struct pb_vtbl vmw_gmr_buffer_vtbl;
+
+
+static inline struct vmw_gmr_buffer *
+vmw_gmr_buffer(struct pb_buffer *buf)
+{
+ assert(buf);
+ assert(buf->vtbl == &vmw_gmr_buffer_vtbl);
+ return (struct vmw_gmr_buffer *)buf;
+}
+
+
+struct vmw_gmr_bufmgr
+{
+ struct pb_manager base;
+
+ struct vmw_winsys_screen *vws;
+};
+
+
+static inline struct vmw_gmr_bufmgr *
+vmw_gmr_bufmgr(struct pb_manager *mgr)
+{
+ assert(mgr);
+ return (struct vmw_gmr_bufmgr *)mgr;
+}
+
+
+static void
+vmw_gmr_buffer_destroy(struct pb_buffer *_buf)
+{
+ struct vmw_gmr_buffer *buf = vmw_gmr_buffer(_buf);
+
+ vmw_ioctl_region_unmap(buf->region);
+
+ vmw_ioctl_region_destroy(buf->region);
+
+ FREE(buf);
+}
+
+
+static void *
+vmw_gmr_buffer_map(struct pb_buffer *_buf,
+ unsigned flags,
+ void *flush_ctx)
+{
+ struct vmw_gmr_buffer *buf = vmw_gmr_buffer(_buf);
+ int ret;
+
+ if (!buf->map)
+ buf->map = vmw_ioctl_region_map(buf->region);
+
+ if (!buf->map)
+ return NULL;
+
+
+ if ((_buf->usage & VMW_BUFFER_USAGE_SYNC) &&
+ !(flags & PB_USAGE_UNSYNCHRONIZED)) {
+ ret = vmw_ioctl_syncforcpu(buf->region,
+ !!(flags & PB_USAGE_DONTBLOCK),
+ !(flags & PB_USAGE_CPU_WRITE),
+ FALSE);
+ if (ret)
+ return NULL;
+ }
+
+ return buf->map;
+}
+
+
+static void
+vmw_gmr_buffer_unmap(struct pb_buffer *_buf)
+{
+ struct vmw_gmr_buffer *buf = vmw_gmr_buffer(_buf);
+ unsigned flags = buf->map_flags;
+
+ if ((_buf->usage & VMW_BUFFER_USAGE_SYNC) &&
+ !(flags & PB_USAGE_UNSYNCHRONIZED)) {
+ vmw_ioctl_releasefromcpu(buf->region,
+ !(flags & PB_USAGE_CPU_WRITE),
+ FALSE);
+ }
+}
+
+
+static void
+vmw_gmr_buffer_get_base_buffer(struct pb_buffer *buf,
+ struct pb_buffer **base_buf,
+ unsigned *offset)
+{
+ *base_buf = buf;
+ *offset = 0;
+}
+
+
+static enum pipe_error
+vmw_gmr_buffer_validate( struct pb_buffer *_buf,
+ struct pb_validate *vl,
+ unsigned flags )
+{
+ /* Always pinned */
+ return PIPE_OK;
+}
+
+
+static void
+vmw_gmr_buffer_fence( struct pb_buffer *_buf,
+ struct pipe_fence_handle *fence )
+{
+ /* We don't need to do anything, as the pipebuffer library
+ * will take care of delaying the destruction of fenced buffers */
+}
+
+
+const struct pb_vtbl vmw_gmr_buffer_vtbl = {
+ vmw_gmr_buffer_destroy,
+ vmw_gmr_buffer_map,
+ vmw_gmr_buffer_unmap,
+ vmw_gmr_buffer_validate,
+ vmw_gmr_buffer_fence,
+ vmw_gmr_buffer_get_base_buffer
+};
+
+
+static struct pb_buffer *
+vmw_gmr_bufmgr_create_buffer(struct pb_manager *_mgr,
+ pb_size size,
+ const struct pb_desc *pb_desc)
+{
+ struct vmw_gmr_bufmgr *mgr = vmw_gmr_bufmgr(_mgr);
+ struct vmw_winsys_screen *vws = mgr->vws;
+ struct vmw_gmr_buffer *buf;
+ const struct vmw_buffer_desc *desc =
+ (const struct vmw_buffer_desc *) pb_desc;
+
+ buf = CALLOC_STRUCT(vmw_gmr_buffer);
+ if(!buf)
+ goto error1;
+
+ pipe_reference_init(&buf->base.reference, 1);
+ buf->base.alignment = pb_desc->alignment;
+ buf->base.usage = pb_desc->usage & ~VMW_BUFFER_USAGE_SHARED;
+ buf->base.vtbl = &vmw_gmr_buffer_vtbl;
+ buf->mgr = mgr;
+ buf->base.size = size;
+ if ((pb_desc->usage & VMW_BUFFER_USAGE_SHARED) && desc->region) {
+ buf->region = desc->region;
+ } else {
+ buf->region = vmw_ioctl_region_create(vws, size);
+ if(!buf->region)
+ goto error2;
+ }
+
+ return &buf->base;
+error2:
+ FREE(buf);
+error1:
+ return NULL;
+}
+
+
+static void
+vmw_gmr_bufmgr_flush(struct pb_manager *mgr)
+{
+ /* No-op */
+}
+
+
+static void
+vmw_gmr_bufmgr_destroy(struct pb_manager *_mgr)
+{
+ struct vmw_gmr_bufmgr *mgr = vmw_gmr_bufmgr(_mgr);
+ FREE(mgr);
+}
+
+
+struct pb_manager *
+vmw_gmr_bufmgr_create(struct vmw_winsys_screen *vws)
+{
+ struct vmw_gmr_bufmgr *mgr;
+
+ mgr = CALLOC_STRUCT(vmw_gmr_bufmgr);
+ if(!mgr)
+ return NULL;
+
+ mgr->base.destroy = vmw_gmr_bufmgr_destroy;
+ mgr->base.create_buffer = vmw_gmr_bufmgr_create_buffer;
+ mgr->base.flush = vmw_gmr_bufmgr_flush;
+
+ mgr->vws = vws;
+
+ return &mgr->base;
+}
+
+
+boolean
+vmw_gmr_bufmgr_region_ptr(struct pb_buffer *buf,
+ struct SVGAGuestPtr *ptr)
+{
+ struct pb_buffer *base_buf;
+ unsigned offset = 0;
+ struct vmw_gmr_buffer *gmr_buf;
+
+ pb_get_base_buffer( buf, &base_buf, &offset );
+
+ gmr_buf = vmw_gmr_buffer(base_buf);
+ if(!gmr_buf)
+ return FALSE;
+
+ *ptr = vmw_ioctl_region_ptr(gmr_buf->region);
+
+ ptr->offset += offset;
+
+ return TRUE;
+}
+
+#ifdef DEBUG
+struct svga_winsys_buffer {
+ struct pb_buffer *pb_buf;
+ struct debug_flush_buf *fbuf;
+};
+
+struct pb_buffer *
+vmw_pb_buffer(struct svga_winsys_buffer *buffer)
+{
+ assert(buffer);
+ return buffer->pb_buf;
+}
+
+struct svga_winsys_buffer *
+vmw_svga_winsys_buffer_wrap(struct pb_buffer *buffer)
+{
+ struct svga_winsys_buffer *buf;
+
+ if (!buffer)
+ return NULL;
+
+ buf = CALLOC_STRUCT(svga_winsys_buffer);
+ if (!buf) {
+ pb_reference(&buffer, NULL);
+ return NULL;
+ }
+
+ buf->pb_buf = buffer;
+ buf->fbuf = debug_flush_buf_create(TRUE, VMW_DEBUG_FLUSH_STACK);
+ return buf;
+}
+
+struct debug_flush_buf *
+vmw_debug_flush_buf(struct svga_winsys_buffer *buffer)
+{
+ return buffer->fbuf;
+}
+
+#endif
+
+void
+vmw_svga_winsys_buffer_destroy(struct svga_winsys_screen *sws,
+ struct svga_winsys_buffer *buf)
+{
+ struct pb_buffer *pbuf = vmw_pb_buffer(buf);
+ (void)sws;
+ pb_reference(&pbuf, NULL);
+#ifdef DEBUG
+ debug_flush_buf_reference(&buf->fbuf, NULL);
+ FREE(buf);
+#endif
+}
+
+void *
+vmw_svga_winsys_buffer_map(struct svga_winsys_screen *sws,
+ struct svga_winsys_buffer *buf,
+ unsigned flags)
+{
+ void *map;
+
+ (void)sws;
+ if (flags & PIPE_TRANSFER_UNSYNCHRONIZED)
+ flags &= ~PIPE_TRANSFER_DONTBLOCK;
+
+ map = pb_map(vmw_pb_buffer(buf), flags, NULL);
+
+#ifdef DEBUG
+ if (map != NULL)
+ debug_flush_map(buf->fbuf, flags);
+#endif
+
+ return map;
+}
+
+
+void
+vmw_svga_winsys_buffer_unmap(struct svga_winsys_screen *sws,
+ struct svga_winsys_buffer *buf)
+{
+ (void)sws;
+
+#ifdef DEBUG
+ debug_flush_unmap(buf->fbuf);
+#endif
+
+ pb_unmap(vmw_pb_buffer(buf));
+}
diff --git a/lib/mesa/src/gallium/winsys/svga/drm/vmw_buffer.h b/lib/mesa/src/gallium/winsys/svga/drm/vmw_buffer.h
new file mode 100644
index 000000000..b9cbb2554
--- /dev/null
+++ b/lib/mesa/src/gallium/winsys/svga/drm/vmw_buffer.h
@@ -0,0 +1,97 @@
+/**********************************************************
+ * Copyright 2009 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+
+#ifndef VMW_BUFFER_H_
+#define VMW_BUFFER_H_
+
+#include <assert.h>
+#include "pipe/p_compiler.h"
+#include "pipebuffer/pb_bufmgr.h"
+#include "util/u_debug_flush.h"
+
+
+#define VMW_BUFFER_USAGE_SHARED (1 << 20)
+#define VMW_BUFFER_USAGE_SYNC (1 << 21)
+
+struct SVGAGuestPtr;
+struct pb_buffer;
+struct pb_manager;
+struct svga_winsys_buffer;
+struct svga_winsys_surface;
+struct vmw_winsys_screen;
+
+struct vmw_buffer_desc {
+ struct pb_desc pb_desc;
+ struct vmw_region *region;
+};
+
+
+#ifdef DEBUG
+
+struct pb_buffer *
+vmw_pb_buffer(struct svga_winsys_buffer *buffer);
+struct svga_winsys_buffer *
+vmw_svga_winsys_buffer_wrap(struct pb_buffer *buffer);
+struct debug_flush_buf *
+vmw_debug_flush_buf(struct svga_winsys_buffer *buffer);
+
+#else
+static inline struct pb_buffer *
+vmw_pb_buffer(struct svga_winsys_buffer *buffer)
+{
+ assert(buffer);
+ return (struct pb_buffer *)buffer;
+}
+
+
+static inline struct svga_winsys_buffer *
+vmw_svga_winsys_buffer_wrap(struct pb_buffer *buffer)
+{
+ return (struct svga_winsys_buffer *)buffer;
+}
+#endif
+
+void
+vmw_svga_winsys_buffer_destroy(struct svga_winsys_screen *sws,
+ struct svga_winsys_buffer *buf);
+void *
+vmw_svga_winsys_buffer_map(struct svga_winsys_screen *sws,
+ struct svga_winsys_buffer *buf,
+ unsigned flags);
+
+void
+vmw_svga_winsys_buffer_unmap(struct svga_winsys_screen *sws,
+ struct svga_winsys_buffer *buf);
+
+struct pb_manager *
+vmw_gmr_bufmgr_create(struct vmw_winsys_screen *vws);
+
+boolean
+vmw_gmr_bufmgr_region_ptr(struct pb_buffer *buf,
+ struct SVGAGuestPtr *ptr);
+
+
+#endif /* VMW_BUFFER_H_ */
diff --git a/lib/mesa/src/gallium/winsys/svga/drm/vmw_context.c b/lib/mesa/src/gallium/winsys/svga/drm/vmw_context.c
new file mode 100644
index 000000000..31bedde7c
--- /dev/null
+++ b/lib/mesa/src/gallium/winsys/svga/drm/vmw_context.c
@@ -0,0 +1,687 @@
+/**********************************************************
+ * Copyright 2009 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+
+#include "svga_cmd.h"
+
+#include "util/u_debug.h"
+#include "util/u_memory.h"
+#include "util/u_debug_stack.h"
+#include "util/u_debug_flush.h"
+#include "util/u_hash_table.h"
+#include "pipebuffer/pb_buffer.h"
+#include "pipebuffer/pb_validate.h"
+
+#include "svga_winsys.h"
+#include "vmw_context.h"
+#include "vmw_screen.h"
+#include "vmw_buffer.h"
+#include "vmw_surface.h"
+#include "vmw_fence.h"
+#include "vmw_shader.h"
+
+#define VMW_COMMAND_SIZE (64*1024)
+#define VMW_SURFACE_RELOCS (1024)
+#define VMW_SHADER_RELOCS (1024)
+#define VMW_REGION_RELOCS (512)
+
+#define VMW_MUST_FLUSH_STACK 8
+
+/*
+ * A factor applied to the maximum mob memory size to determine
+ * the optimial time to preemptively flush the command buffer.
+ * The constant is based on some performance trials with SpecViewperf.
+ */
+#define VMW_MAX_MOB_MEM_FACTOR 2
+
+/*
+ * A factor applied to the maximum surface memory size to determine
+ * the optimial time to preemptively flush the command buffer.
+ * The constant is based on some performance trials with SpecViewperf.
+ */
+#define VMW_MAX_SURF_MEM_FACTOR 2
+
+
+struct vmw_buffer_relocation
+{
+ struct pb_buffer *buffer;
+ boolean is_mob;
+ uint32 offset;
+
+ union {
+ struct {
+ struct SVGAGuestPtr *where;
+ } region;
+ struct {
+ SVGAMobId *id;
+ uint32 *offset_into_mob;
+ } mob;
+ };
+};
+
+struct vmw_ctx_validate_item {
+ union {
+ struct vmw_svga_winsys_surface *vsurf;
+ struct vmw_svga_winsys_shader *vshader;
+ };
+ boolean referenced;
+};
+
+struct vmw_svga_winsys_context
+{
+ struct svga_winsys_context base;
+
+ struct vmw_winsys_screen *vws;
+ struct util_hash_table *hash;
+
+#ifdef DEBUG
+ boolean must_flush;
+ struct debug_stack_frame must_flush_stack[VMW_MUST_FLUSH_STACK];
+ struct debug_flush_ctx *fctx;
+#endif
+
+ struct {
+ uint8_t buffer[VMW_COMMAND_SIZE];
+ uint32_t size;
+ uint32_t used;
+ uint32_t reserved;
+ } command;
+
+ struct {
+ struct vmw_ctx_validate_item items[VMW_SURFACE_RELOCS];
+ uint32_t size;
+ uint32_t used;
+ uint32_t staged;
+ uint32_t reserved;
+ } surface;
+
+ struct {
+ struct vmw_buffer_relocation relocs[VMW_REGION_RELOCS];
+ uint32_t size;
+ uint32_t used;
+ uint32_t staged;
+ uint32_t reserved;
+ } region;
+
+ struct {
+ struct vmw_ctx_validate_item items[VMW_SHADER_RELOCS];
+ uint32_t size;
+ uint32_t used;
+ uint32_t staged;
+ uint32_t reserved;
+ } shader;
+
+ struct pb_validate *validate;
+
+ /**
+ * The amount of surface, GMR or MOB memory that is referred by the commands
+ * currently batched in the context command buffer.
+ */
+ uint64_t seen_surfaces;
+ uint64_t seen_regions;
+ uint64_t seen_mobs;
+
+ /**
+ * Whether this context should fail to reserve more commands, not because it
+ * ran out of command space, but because a substantial ammount of GMR was
+ * referred.
+ */
+ boolean preemptive_flush;
+};
+
+
+static inline struct vmw_svga_winsys_context *
+vmw_svga_winsys_context(struct svga_winsys_context *swc)
+{
+ assert(swc);
+ return (struct vmw_svga_winsys_context *)swc;
+}
+
+
+static inline unsigned
+vmw_translate_to_pb_flags(unsigned flags)
+{
+ unsigned f = 0;
+ if (flags & SVGA_RELOC_READ)
+ f |= PB_USAGE_GPU_READ;
+
+ if (flags & SVGA_RELOC_WRITE)
+ f |= PB_USAGE_GPU_WRITE;
+
+ return f;
+}
+
+static enum pipe_error
+vmw_swc_flush(struct svga_winsys_context *swc,
+ struct pipe_fence_handle **pfence)
+{
+ struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
+ struct pipe_fence_handle *fence = NULL;
+ unsigned i;
+ enum pipe_error ret;
+
+ ret = pb_validate_validate(vswc->validate);
+ assert(ret == PIPE_OK);
+ if(ret == PIPE_OK) {
+
+ /* Apply relocations */
+ for(i = 0; i < vswc->region.used; ++i) {
+ struct vmw_buffer_relocation *reloc = &vswc->region.relocs[i];
+ struct SVGAGuestPtr ptr;
+
+ if(!vmw_gmr_bufmgr_region_ptr(reloc->buffer, &ptr))
+ assert(0);
+
+ ptr.offset += reloc->offset;
+
+ if (reloc->is_mob) {
+ if (reloc->mob.id)
+ *reloc->mob.id = ptr.gmrId;
+ if (reloc->mob.offset_into_mob)
+ *reloc->mob.offset_into_mob = ptr.offset;
+ else {
+ assert(ptr.offset == 0);
+ }
+ } else
+ *reloc->region.where = ptr;
+ }
+
+ if (vswc->command.used || pfence != NULL)
+ vmw_ioctl_command(vswc->vws,
+ vswc->base.cid,
+ 0,
+ vswc->command.buffer,
+ vswc->command.used,
+ &fence);
+
+ pb_validate_fence(vswc->validate, fence);
+ }
+
+ vswc->command.used = 0;
+ vswc->command.reserved = 0;
+
+ for(i = 0; i < vswc->surface.used + vswc->surface.staged; ++i) {
+ struct vmw_ctx_validate_item *isurf = &vswc->surface.items[i];
+ if (isurf->referenced)
+ p_atomic_dec(&isurf->vsurf->validated);
+ vmw_svga_winsys_surface_reference(&isurf->vsurf, NULL);
+ }
+
+ util_hash_table_clear(vswc->hash);
+ vswc->surface.used = 0;
+ vswc->surface.reserved = 0;
+
+ for(i = 0; i < vswc->shader.used + vswc->shader.staged; ++i) {
+ struct vmw_ctx_validate_item *ishader = &vswc->shader.items[i];
+ if (ishader->referenced)
+ p_atomic_dec(&ishader->vshader->validated);
+ vmw_svga_winsys_shader_reference(&ishader->vshader, NULL);
+ }
+
+ vswc->shader.used = 0;
+ vswc->shader.reserved = 0;
+
+ vswc->region.used = 0;
+ vswc->region.reserved = 0;
+
+#ifdef DEBUG
+ vswc->must_flush = FALSE;
+ debug_flush_flush(vswc->fctx);
+#endif
+ vswc->preemptive_flush = FALSE;
+ vswc->seen_surfaces = 0;
+ vswc->seen_regions = 0;
+ vswc->seen_mobs = 0;
+
+ if(pfence)
+ vmw_fence_reference(vswc->vws, pfence, fence);
+
+ vmw_fence_reference(vswc->vws, &fence, NULL);
+
+ return ret;
+}
+
+
+static void *
+vmw_swc_reserve(struct svga_winsys_context *swc,
+ uint32_t nr_bytes, uint32_t nr_relocs )
+{
+ struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
+
+#ifdef DEBUG
+ /* Check if somebody forgot to check the previous failure */
+ if(vswc->must_flush) {
+ debug_printf("Forgot to flush:\n");
+ debug_backtrace_dump(vswc->must_flush_stack, VMW_MUST_FLUSH_STACK);
+ assert(!vswc->must_flush);
+ }
+ debug_flush_might_flush(vswc->fctx);
+#endif
+
+ assert(nr_bytes <= vswc->command.size);
+ if(nr_bytes > vswc->command.size)
+ return NULL;
+
+ if(vswc->preemptive_flush ||
+ vswc->command.used + nr_bytes > vswc->command.size ||
+ vswc->surface.used + nr_relocs > vswc->surface.size ||
+ vswc->shader.used + nr_relocs > vswc->shader.size ||
+ vswc->region.used + nr_relocs > vswc->region.size) {
+#ifdef DEBUG
+ vswc->must_flush = TRUE;
+ debug_backtrace_capture(vswc->must_flush_stack, 1,
+ VMW_MUST_FLUSH_STACK);
+#endif
+ return NULL;
+ }
+
+ assert(vswc->command.used + nr_bytes <= vswc->command.size);
+ assert(vswc->surface.used + nr_relocs <= vswc->surface.size);
+ assert(vswc->shader.used + nr_relocs <= vswc->shader.size);
+ assert(vswc->region.used + nr_relocs <= vswc->region.size);
+
+ vswc->command.reserved = nr_bytes;
+ vswc->surface.reserved = nr_relocs;
+ vswc->surface.staged = 0;
+ vswc->shader.reserved = nr_relocs;
+ vswc->shader.staged = 0;
+ vswc->region.reserved = nr_relocs;
+ vswc->region.staged = 0;
+
+ return vswc->command.buffer + vswc->command.used;
+}
+
+static void
+vmw_swc_context_relocation(struct svga_winsys_context *swc,
+ uint32 *cid)
+{
+ *cid = swc->cid;
+}
+
+static boolean
+vmw_swc_add_validate_buffer(struct vmw_svga_winsys_context *vswc,
+ struct pb_buffer *pb_buf,
+ unsigned flags)
+{
+ enum pipe_error ret;
+ unsigned translated_flags;
+
+ /*
+ * TODO: Update pb_validate to provide a similar functionality
+ * (Check buffer already present before adding)
+ */
+ if (util_hash_table_get(vswc->hash, pb_buf) != pb_buf) {
+ translated_flags = vmw_translate_to_pb_flags(flags);
+ ret = pb_validate_add_buffer(vswc->validate, pb_buf, translated_flags);
+ /* TODO: Update pipebuffer to reserve buffers and not fail here */
+ assert(ret == PIPE_OK);
+ (void)ret;
+ (void)util_hash_table_set(vswc->hash, pb_buf, pb_buf);
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+static void
+vmw_swc_region_relocation(struct svga_winsys_context *swc,
+ struct SVGAGuestPtr *where,
+ struct svga_winsys_buffer *buffer,
+ uint32 offset,
+ unsigned flags)
+{
+ struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
+ struct vmw_buffer_relocation *reloc;
+
+ assert(vswc->region.staged < vswc->region.reserved);
+
+ reloc = &vswc->region.relocs[vswc->region.used + vswc->region.staged];
+ reloc->region.where = where;
+
+ /*
+ * pb_validate holds a refcount to the buffer, so no need to
+ * refcount it again in the relocation.
+ */
+ reloc->buffer = vmw_pb_buffer(buffer);
+ reloc->offset = offset;
+ reloc->is_mob = FALSE;
+ ++vswc->region.staged;
+
+ if (vmw_swc_add_validate_buffer(vswc, reloc->buffer, flags)) {
+ vswc->seen_regions += reloc->buffer->size;
+ if(vswc->seen_regions >= VMW_GMR_POOL_SIZE/5)
+ vswc->preemptive_flush = TRUE;
+ }
+
+#ifdef DEBUG
+ if (!(flags & SVGA_RELOC_INTERNAL))
+ debug_flush_cb_reference(vswc->fctx, vmw_debug_flush_buf(buffer));
+#endif
+}
+
+static void
+vmw_swc_mob_relocation(struct svga_winsys_context *swc,
+ SVGAMobId *id,
+ uint32 *offset_into_mob,
+ struct svga_winsys_buffer *buffer,
+ uint32 offset,
+ unsigned flags)
+{
+ struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
+ struct vmw_buffer_relocation *reloc;
+
+ assert(vswc->region.staged < vswc->region.reserved);
+
+ reloc = &vswc->region.relocs[vswc->region.used + vswc->region.staged];
+ reloc->mob.id = id;
+ reloc->mob.offset_into_mob = offset_into_mob;
+
+ /*
+ * pb_validate holds a refcount to the buffer, so no need to
+ * refcount it again in the relocation.
+ */
+ reloc->buffer = vmw_pb_buffer(buffer);
+ reloc->offset = offset;
+ reloc->is_mob = TRUE;
+ ++vswc->region.staged;
+
+ if (vmw_swc_add_validate_buffer(vswc, reloc->buffer, flags)) {
+ vswc->seen_mobs += reloc->buffer->size;
+ /* divide by 5, tested for best performance */
+ if (vswc->seen_mobs >= vswc->vws->ioctl.max_mob_memory / VMW_MAX_MOB_MEM_FACTOR)
+ vswc->preemptive_flush = TRUE;
+ }
+
+#ifdef DEBUG
+ if (!(flags & SVGA_RELOC_INTERNAL))
+ debug_flush_cb_reference(vswc->fctx, vmw_debug_flush_buf(buffer));
+#endif
+}
+
+
+/**
+ * vmw_swc_surface_clear_reference - Clear referenced info for a surface
+ *
+ * @swc: Pointer to an svga_winsys_context
+ * @vsurf: Pointer to a vmw_svga_winsys_surface, the referenced info of which
+ * we want to clear
+ *
+ * This is primarily used by a discard surface map to indicate that the
+ * surface data is no longer referenced by a draw call, and mapping it
+ * should therefore no longer cause a flush.
+ */
+void
+vmw_swc_surface_clear_reference(struct svga_winsys_context *swc,
+ struct vmw_svga_winsys_surface *vsurf)
+{
+ struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
+ struct vmw_ctx_validate_item *isrf =
+ util_hash_table_get(vswc->hash, vsurf);
+
+ if (isrf && isrf->referenced) {
+ isrf->referenced = FALSE;
+ p_atomic_dec(&vsurf->validated);
+ }
+}
+
+static void
+vmw_swc_surface_only_relocation(struct svga_winsys_context *swc,
+ uint32 *where,
+ struct vmw_svga_winsys_surface *vsurf,
+ unsigned flags)
+{
+ struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
+ struct vmw_ctx_validate_item *isrf;
+
+ assert(vswc->surface.staged < vswc->surface.reserved);
+ isrf = util_hash_table_get(vswc->hash, vsurf);
+
+ if (isrf == NULL) {
+ isrf = &vswc->surface.items[vswc->surface.used + vswc->surface.staged];
+ vmw_svga_winsys_surface_reference(&isrf->vsurf, vsurf);
+ isrf->referenced = FALSE;
+ /*
+ * Note that a failure here may just fall back to unhashed behavior
+ * and potentially cause unnecessary flushing, so ignore the
+ * return code.
+ */
+ (void) util_hash_table_set(vswc->hash, vsurf, isrf);
+ ++vswc->surface.staged;
+
+ vswc->seen_surfaces += vsurf->size;
+ /* divide by 5 not well tuned for performance */
+ if (vswc->seen_surfaces >= vswc->vws->ioctl.max_surface_memory / VMW_MAX_SURF_MEM_FACTOR)
+ vswc->preemptive_flush = TRUE;
+ }
+
+ if (!(flags & SVGA_RELOC_INTERNAL) && !isrf->referenced) {
+ isrf->referenced = TRUE;
+ p_atomic_inc(&vsurf->validated);
+ }
+
+ *where = vsurf->sid;
+}
+
+static void
+vmw_swc_surface_relocation(struct svga_winsys_context *swc,
+ uint32 *where,
+ uint32 *mobid,
+ struct svga_winsys_surface *surface,
+ unsigned flags)
+{
+ struct vmw_svga_winsys_surface *vsurf;
+
+ assert(swc->have_gb_objects || mobid == NULL);
+
+ if(!surface) {
+ *where = SVGA3D_INVALID_ID;
+ if (mobid)
+ *mobid = SVGA3D_INVALID_ID;
+ return;
+ }
+
+ vsurf = vmw_svga_winsys_surface(surface);
+ vmw_swc_surface_only_relocation(swc, where, vsurf, flags);
+
+ if (swc->have_gb_objects && vsurf->buf != NULL) {
+
+ /*
+ * Make sure backup buffer ends up fenced.
+ */
+
+ pipe_mutex_lock(vsurf->mutex);
+ assert(vsurf->buf != NULL);
+
+ vmw_swc_mob_relocation(swc, mobid, NULL, (struct svga_winsys_buffer *)
+ vsurf->buf, 0, flags);
+ pipe_mutex_unlock(vsurf->mutex);
+ }
+}
+
+static void
+vmw_swc_shader_relocation(struct svga_winsys_context *swc,
+ uint32 *shid,
+ uint32 *mobid,
+ uint32 *offset,
+ struct svga_winsys_gb_shader *shader)
+{
+ struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
+ struct vmw_svga_winsys_shader *vshader;
+ struct vmw_ctx_validate_item *ishader;
+ if(!shader) {
+ *shid = SVGA3D_INVALID_ID;
+ return;
+ }
+
+ assert(vswc->shader.staged < vswc->shader.reserved);
+ vshader = vmw_svga_winsys_shader(shader);
+ ishader = util_hash_table_get(vswc->hash, vshader);
+
+ if (ishader == NULL) {
+ ishader = &vswc->shader.items[vswc->shader.used + vswc->shader.staged];
+ vmw_svga_winsys_shader_reference(&ishader->vshader, vshader);
+ ishader->referenced = FALSE;
+ /*
+ * Note that a failure here may just fall back to unhashed behavior
+ * and potentially cause unnecessary flushing, so ignore the
+ * return code.
+ */
+ (void) util_hash_table_set(vswc->hash, vshader, ishader);
+ ++vswc->shader.staged;
+ }
+
+ if (!ishader->referenced) {
+ ishader->referenced = TRUE;
+ p_atomic_inc(&vshader->validated);
+ }
+
+ *shid = vshader->shid;
+
+ if (mobid != NULL && vshader->buf)
+ vmw_swc_mob_relocation(swc, mobid, offset, vshader->buf,
+ 0, SVGA_RELOC_READ);
+}
+
+static void
+vmw_swc_commit(struct svga_winsys_context *swc)
+{
+ struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
+
+ assert(vswc->command.reserved);
+ assert(vswc->command.used + vswc->command.reserved <= vswc->command.size);
+ vswc->command.used += vswc->command.reserved;
+ vswc->command.reserved = 0;
+
+ assert(vswc->surface.staged <= vswc->surface.reserved);
+ assert(vswc->surface.used + vswc->surface.staged <= vswc->surface.size);
+ vswc->surface.used += vswc->surface.staged;
+ vswc->surface.staged = 0;
+ vswc->surface.reserved = 0;
+
+ assert(vswc->shader.staged <= vswc->shader.reserved);
+ assert(vswc->shader.used + vswc->shader.staged <= vswc->shader.size);
+ vswc->shader.used += vswc->shader.staged;
+ vswc->shader.staged = 0;
+ vswc->shader.reserved = 0;
+
+ assert(vswc->region.staged <= vswc->region.reserved);
+ assert(vswc->region.used + vswc->region.staged <= vswc->region.size);
+ vswc->region.used += vswc->region.staged;
+ vswc->region.staged = 0;
+ vswc->region.reserved = 0;
+}
+
+
+static void
+vmw_swc_destroy(struct svga_winsys_context *swc)
+{
+ struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
+ unsigned i;
+
+ for(i = 0; i < vswc->surface.used; ++i) {
+ struct vmw_ctx_validate_item *isurf = &vswc->surface.items[i];
+ if (isurf->referenced)
+ p_atomic_dec(&isurf->vsurf->validated);
+ vmw_svga_winsys_surface_reference(&isurf->vsurf, NULL);
+ }
+
+ for(i = 0; i < vswc->shader.used; ++i) {
+ struct vmw_ctx_validate_item *ishader = &vswc->shader.items[i];
+ if (ishader->referenced)
+ p_atomic_dec(&ishader->vshader->validated);
+ vmw_svga_winsys_shader_reference(&ishader->vshader, NULL);
+ }
+
+ util_hash_table_destroy(vswc->hash);
+ pb_validate_destroy(vswc->validate);
+ vmw_ioctl_context_destroy(vswc->vws, swc->cid);
+#ifdef DEBUG
+ debug_flush_ctx_destroy(vswc->fctx);
+#endif
+ FREE(vswc);
+}
+
+static unsigned vmw_hash_ptr(void *p)
+{
+ return (unsigned)(unsigned long)p;
+}
+
+static int vmw_ptr_compare(void *key1, void *key2)
+{
+ return (key1 == key2) ? 0 : 1;
+}
+
+struct svga_winsys_context *
+vmw_svga_winsys_context_create(struct svga_winsys_screen *sws)
+{
+ struct vmw_winsys_screen *vws = vmw_winsys_screen(sws);
+ struct vmw_svga_winsys_context *vswc;
+
+ vswc = CALLOC_STRUCT(vmw_svga_winsys_context);
+ if(!vswc)
+ return NULL;
+
+ vswc->base.destroy = vmw_swc_destroy;
+ vswc->base.reserve = vmw_swc_reserve;
+ vswc->base.surface_relocation = vmw_swc_surface_relocation;
+ vswc->base.region_relocation = vmw_swc_region_relocation;
+ vswc->base.mob_relocation = vmw_swc_mob_relocation;
+ vswc->base.context_relocation = vmw_swc_context_relocation;
+ vswc->base.shader_relocation = vmw_swc_shader_relocation;
+ vswc->base.commit = vmw_swc_commit;
+ vswc->base.flush = vmw_swc_flush;
+ vswc->base.surface_map = vmw_svga_winsys_surface_map;
+ vswc->base.surface_unmap = vmw_svga_winsys_surface_unmap;
+
+ vswc->base.cid = vmw_ioctl_context_create(vws);
+ vswc->base.have_gb_objects = sws->have_gb_objects;
+
+ vswc->vws = vws;
+
+ vswc->command.size = VMW_COMMAND_SIZE;
+ vswc->surface.size = VMW_SURFACE_RELOCS;
+ vswc->shader.size = VMW_SHADER_RELOCS;
+ vswc->region.size = VMW_REGION_RELOCS;
+
+ vswc->validate = pb_validate_create();
+ if(!vswc->validate)
+ goto out_no_validate;
+
+ vswc->hash = util_hash_table_create(vmw_hash_ptr, vmw_ptr_compare);
+ if (!vswc->hash)
+ goto out_no_hash;
+
+#ifdef DEBUG
+ vswc->fctx = debug_flush_ctx_create(TRUE, VMW_DEBUG_FLUSH_STACK);
+#endif
+
+ return &vswc->base;
+
+out_no_hash:
+ pb_validate_destroy(vswc->validate);
+out_no_validate:
+ FREE(vswc);
+ return NULL;
+}
diff --git a/lib/mesa/src/gallium/winsys/svga/drm/vmw_context.h b/lib/mesa/src/gallium/winsys/svga/drm/vmw_context.h
new file mode 100644
index 000000000..2c2fb4157
--- /dev/null
+++ b/lib/mesa/src/gallium/winsys/svga/drm/vmw_context.h
@@ -0,0 +1,73 @@
+/**********************************************************
+ * Copyright 2009 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/**
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+
+#ifndef VMW_CONTEXT_H_
+#define VMW_CONTEXT_H_
+
+#include <stdio.h>
+#include "pipe/p_compiler.h"
+
+struct svga_winsys_screen;
+struct svga_winsys_context;
+struct pipe_context;
+struct pipe_screen;
+
+
+/** Set to 1 to get extra debug info/output */
+#define VMW_DEBUG 0
+
+#if VMW_DEBUG
+#define vmw_printf debug_printf
+#define VMW_FUNC debug_printf("%s\n", __FUNCTION__)
+#else
+#define VMW_FUNC
+#define vmw_printf(...)
+#endif
+
+
+/**
+ * Called when an error/failure is encountered.
+ * We want these messages reported for all build types.
+ */
+#define vmw_error(...) fprintf(stderr, "VMware: " __VA_ARGS__)
+
+
+struct svga_winsys_context *
+vmw_svga_winsys_context_create(struct svga_winsys_screen *sws);
+
+struct vmw_svga_winsys_surface;
+
+
+void
+vmw_swc_surface_clear_reference(struct svga_winsys_context *swc,
+ struct vmw_svga_winsys_surface *vsurf);
+
+
+#endif /* VMW_CONTEXT_H_ */
diff --git a/lib/mesa/src/gallium/winsys/svga/drm/vmw_fence.c b/lib/mesa/src/gallium/winsys/svga/drm/vmw_fence.c
new file mode 100644
index 000000000..17822ce27
--- /dev/null
+++ b/lib/mesa/src/gallium/winsys/svga/drm/vmw_fence.c
@@ -0,0 +1,434 @@
+/**********************************************************
+ * Copyright 2009-2011 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+#include "util/u_memory.h"
+#include "util/u_atomic.h"
+#include "util/list.h"
+#include "os/os_thread.h"
+
+#include "pipebuffer/pb_buffer_fenced.h"
+
+#include "vmw_screen.h"
+#include "vmw_fence.h"
+
+struct vmw_fence_ops
+{
+ /*
+ * Immutable members.
+ */
+ struct pb_fence_ops base;
+ struct vmw_winsys_screen *vws;
+
+ pipe_mutex mutex;
+
+ /*
+ * Protected by mutex;
+ */
+ struct list_head not_signaled;
+ uint32_t last_signaled;
+ uint32_t last_emitted;
+};
+
+struct vmw_fence
+{
+ struct list_head ops_list;
+ int32_t refcount;
+ uint32_t handle;
+ uint32_t mask;
+ int32_t signalled;
+ uint32_t seqno;
+};
+
+/**
+ * vmw_fence_seq_is_signaled - Check whether a fence seqno is
+ * signaled.
+ *
+ * @ops: Pointer to a struct pb_fence_ops.
+ *
+ */
+static inline boolean
+vmw_fence_seq_is_signaled(uint32_t seq, uint32_t last, uint32_t cur)
+{
+ return (cur - last <= cur - seq);
+}
+
+
+/**
+ * vmw_fence_ops - Return the vmw_fence_ops structure backing a
+ * struct pb_fence_ops pointer.
+ *
+ * @ops: Pointer to a struct pb_fence_ops.
+ *
+ */
+static inline struct vmw_fence_ops *
+vmw_fence_ops(struct pb_fence_ops *ops)
+{
+ assert(ops);
+ return (struct vmw_fence_ops *)ops;
+}
+
+
+/**
+ * vmw_fences_release - Release all fences from the not_signaled
+ * list.
+ *
+ * @ops: Pointer to a struct vmw_fence_ops.
+ *
+ */
+static void
+vmw_fences_release(struct vmw_fence_ops *ops)
+{
+ struct vmw_fence *fence, *n;
+
+ pipe_mutex_lock(ops->mutex);
+ LIST_FOR_EACH_ENTRY_SAFE(fence, n, &ops->not_signaled, ops_list)
+ LIST_DELINIT(&fence->ops_list);
+ pipe_mutex_unlock(ops->mutex);
+}
+
+/**
+ * vmw_fences_signal - Traverse the not_signaled list and try to
+ * signal unsignaled fences.
+ *
+ * @ops: Pointer to a struct pb_fence_ops.
+ * @signaled: Seqno that has signaled.
+ * @emitted: Last seqno emitted by the kernel.
+ * @has_emitted: Whether we provide the emitted value.
+ *
+ */
+void
+vmw_fences_signal(struct pb_fence_ops *fence_ops,
+ uint32_t signaled,
+ uint32_t emitted,
+ boolean has_emitted)
+{
+ struct vmw_fence_ops *ops = NULL;
+ struct vmw_fence *fence, *n;
+
+ if (fence_ops == NULL)
+ return;
+
+ ops = vmw_fence_ops(fence_ops);
+ pipe_mutex_lock(ops->mutex);
+
+ if (!has_emitted) {
+ emitted = ops->last_emitted;
+ if (emitted - signaled > (1 << 30))
+ emitted = signaled;
+ }
+
+ if (signaled == ops->last_signaled && emitted == ops->last_emitted)
+ goto out_unlock;
+
+ LIST_FOR_EACH_ENTRY_SAFE(fence, n, &ops->not_signaled, ops_list) {
+ if (!vmw_fence_seq_is_signaled(fence->seqno, signaled, emitted))
+ break;
+
+ p_atomic_set(&fence->signalled, 1);
+ LIST_DELINIT(&fence->ops_list);
+ }
+ ops->last_signaled = signaled;
+ ops->last_emitted = emitted;
+
+out_unlock:
+ pipe_mutex_unlock(ops->mutex);
+}
+
+
+/**
+ * vmw_fence - return the vmw_fence object identified by a
+ * struct pipe_fence_handle *
+ *
+ * @fence: The opaque pipe fence handle.
+ */
+static inline struct vmw_fence *
+vmw_fence(struct pipe_fence_handle *fence)
+{
+ return (struct vmw_fence *) fence;
+}
+
+
+/**
+ * vmw_fence_create - Create a user-space fence object.
+ *
+ * @fence_ops: The fence_ops manager to register with.
+ * @handle: Handle identifying the kernel fence object.
+ * @mask: Mask of flags that this fence object may signal.
+ *
+ * Returns NULL on failure.
+ */
+struct pipe_fence_handle *
+vmw_fence_create(struct pb_fence_ops *fence_ops, uint32_t handle,
+ uint32_t seqno, uint32_t mask)
+{
+ struct vmw_fence *fence = CALLOC_STRUCT(vmw_fence);
+ struct vmw_fence_ops *ops = vmw_fence_ops(fence_ops);
+
+ if (!fence)
+ return NULL;
+
+ p_atomic_set(&fence->refcount, 1);
+ fence->handle = handle;
+ fence->mask = mask;
+ fence->seqno = seqno;
+ p_atomic_set(&fence->signalled, 0);
+ pipe_mutex_lock(ops->mutex);
+
+ if (vmw_fence_seq_is_signaled(seqno, ops->last_signaled, seqno)) {
+ p_atomic_set(&fence->signalled, 1);
+ LIST_INITHEAD(&fence->ops_list);
+ } else {
+ p_atomic_set(&fence->signalled, 0);
+ LIST_ADDTAIL(&fence->ops_list, &ops->not_signaled);
+ }
+
+ pipe_mutex_unlock(ops->mutex);
+
+ return (struct pipe_fence_handle *) fence;
+}
+
+
+/**
+ * vmw_fence_reference - Reference / unreference a vmw fence object.
+ *
+ * @vws: Pointer to the winsys screen.
+ * @ptr: Pointer to reference transfer destination.
+ * @fence: Pointer to object to reference. May be NULL.
+ */
+void
+vmw_fence_reference(struct vmw_winsys_screen *vws,
+ struct pipe_fence_handle **ptr,
+ struct pipe_fence_handle *fence)
+{
+ if (*ptr) {
+ struct vmw_fence *vfence = vmw_fence(*ptr);
+
+ if (p_atomic_dec_zero(&vfence->refcount)) {
+ struct vmw_fence_ops *ops = vmw_fence_ops(vws->fence_ops);
+
+ vmw_ioctl_fence_unref(vws, vfence->handle);
+
+ pipe_mutex_lock(ops->mutex);
+ LIST_DELINIT(&vfence->ops_list);
+ pipe_mutex_unlock(ops->mutex);
+
+ FREE(vfence);
+ }
+ }
+
+ if (fence) {
+ struct vmw_fence *vfence = vmw_fence(fence);
+
+ p_atomic_inc(&vfence->refcount);
+ }
+
+ *ptr = fence;
+}
+
+
+/**
+ * vmw_fence_signalled - Check whether a fence object is signalled.
+ *
+ * @vws: Pointer to the winsys screen.
+ * @fence: Handle to the fence object.
+ * @flag: Fence flags to check. If the fence object can't signal
+ * a flag, it is assumed to be already signaled.
+ *
+ * Returns 0 if the fence object was signaled, nonzero otherwise.
+ */
+int
+vmw_fence_signalled(struct vmw_winsys_screen *vws,
+ struct pipe_fence_handle *fence,
+ unsigned flag)
+{
+ struct vmw_fence *vfence;
+ int32_t vflags = SVGA_FENCE_FLAG_EXEC;
+ int ret;
+ uint32_t old;
+
+ if (!fence)
+ return 0;
+
+ vfence = vmw_fence(fence);
+ old = p_atomic_read(&vfence->signalled);
+
+ vflags &= ~vfence->mask;
+
+ if ((old & vflags) == vflags)
+ return 0;
+
+ /*
+ * Currently we update signaled fences on each execbuf call.
+ * That should really be sufficient, and we can avoid
+ * a lot of kernel calls this way.
+ */
+#if 1
+ ret = vmw_ioctl_fence_signalled(vws, vfence->handle, vflags);
+
+ if (ret == 0)
+ p_atomic_set(&vfence->signalled, 1);
+ return ret;
+#else
+ (void) ret;
+ return -1;
+#endif
+}
+
+/**
+ * vmw_fence_finish - Wait for a fence object to signal.
+ *
+ * @vws: Pointer to the winsys screen.
+ * @fence: Handle to the fence object.
+ * @flag: Fence flags to wait for. If the fence object can't signal
+ * a flag, it is assumed to be already signaled.
+ *
+ * Returns 0 if the wait succeeded. Nonzero otherwise.
+ */
+int
+vmw_fence_finish(struct vmw_winsys_screen *vws,
+ struct pipe_fence_handle *fence,
+ unsigned flag)
+{
+ struct vmw_fence *vfence;
+ int32_t vflags = SVGA_FENCE_FLAG_EXEC;
+ int ret;
+ uint32_t old;
+
+ if (!fence)
+ return 0;
+
+ vfence = vmw_fence(fence);
+ old = p_atomic_read(&vfence->signalled);
+ vflags &= ~vfence->mask;
+
+ if ((old & vflags) == vflags)
+ return 0;
+
+ ret = vmw_ioctl_fence_finish(vws, vfence->handle, vflags);
+
+ if (ret == 0) {
+ int32_t prev = old;
+
+ do {
+ old = prev;
+ prev = p_atomic_cmpxchg(&vfence->signalled, old, old | vflags);
+ } while (prev != old);
+ }
+
+ return ret;
+}
+
+
+/**
+ * vmw_fence_ops_fence_reference - wrapper for the pb_fence_ops api.
+ *
+ * wrapper around vmw_fence_reference.
+ */
+static void
+vmw_fence_ops_fence_reference(struct pb_fence_ops *ops,
+ struct pipe_fence_handle **ptr,
+ struct pipe_fence_handle *fence)
+{
+ struct vmw_winsys_screen *vws = vmw_fence_ops(ops)->vws;
+
+ vmw_fence_reference(vws, ptr, fence);
+}
+
+/**
+ * vmw_fence_ops_fence_signalled - wrapper for the pb_fence_ops api.
+ *
+ * wrapper around vmw_fence_signalled.
+ */
+static int
+vmw_fence_ops_fence_signalled(struct pb_fence_ops *ops,
+ struct pipe_fence_handle *fence,
+ unsigned flag)
+{
+ struct vmw_winsys_screen *vws = vmw_fence_ops(ops)->vws;
+
+ return vmw_fence_signalled(vws, fence, flag);
+}
+
+
+/**
+ * vmw_fence_ops_fence_finish - wrapper for the pb_fence_ops api.
+ *
+ * wrapper around vmw_fence_finish.
+ */
+static int
+vmw_fence_ops_fence_finish(struct pb_fence_ops *ops,
+ struct pipe_fence_handle *fence,
+ unsigned flag)
+{
+ struct vmw_winsys_screen *vws = vmw_fence_ops(ops)->vws;
+
+ return vmw_fence_finish(vws, fence, flag);
+}
+
+
+/**
+ * vmw_fence_ops_destroy - Destroy a pb_fence_ops function table.
+ *
+ * @ops: The function table to destroy.
+ *
+ * Part of the pb_fence_ops api.
+ */
+static void
+vmw_fence_ops_destroy(struct pb_fence_ops *ops)
+{
+ vmw_fences_release(vmw_fence_ops(ops));
+ FREE(ops);
+}
+
+
+/**
+ * vmw_fence_ops_create - Create a pb_fence_ops function table.
+ *
+ * @vws: Pointer to a struct vmw_winsys_screen.
+ *
+ * Returns a pointer to a pb_fence_ops function table to interface
+ * with pipe_buffer. This function is typically called on driver setup.
+ *
+ * Returns NULL on failure.
+ */
+struct pb_fence_ops *
+vmw_fence_ops_create(struct vmw_winsys_screen *vws)
+{
+ struct vmw_fence_ops *ops;
+
+ ops = CALLOC_STRUCT(vmw_fence_ops);
+ if(!ops)
+ return NULL;
+
+ pipe_mutex_init(ops->mutex);
+ LIST_INITHEAD(&ops->not_signaled);
+ ops->base.destroy = &vmw_fence_ops_destroy;
+ ops->base.fence_reference = &vmw_fence_ops_fence_reference;
+ ops->base.fence_signalled = &vmw_fence_ops_fence_signalled;
+ ops->base.fence_finish = &vmw_fence_ops_fence_finish;
+
+ ops->vws = vws;
+
+ return &ops->base;
+}
diff --git a/lib/mesa/src/gallium/winsys/svga/drm/vmw_fence.h b/lib/mesa/src/gallium/winsys/svga/drm/vmw_fence.h
new file mode 100644
index 000000000..56f1a0ab0
--- /dev/null
+++ b/lib/mesa/src/gallium/winsys/svga/drm/vmw_fence.h
@@ -0,0 +1,61 @@
+/**********************************************************
+ * Copyright 2009 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+
+#ifndef VMW_FENCE_H_
+#define VMW_FENCE_H_
+
+
+#include "pipe/p_compiler.h"
+#include "pipebuffer/pb_buffer_fenced.h"
+
+struct pipe_fence_handle;
+struct pb_fence_ops;
+struct vmw_winsys_screen;
+
+
+struct pipe_fence_handle *
+vmw_fence_create(struct pb_fence_ops *fence_ops,
+ uint32_t handle, uint32_t seqno, uint32_t mask);
+
+int
+vmw_fence_finish(struct vmw_winsys_screen *vws,
+ struct pipe_fence_handle *fence,
+ unsigned flag);
+int
+vmw_fence_signalled(struct vmw_winsys_screen *vws,
+ struct pipe_fence_handle *fence,
+ unsigned flag);
+void
+vmw_fence_reference(struct vmw_winsys_screen *vws,
+ struct pipe_fence_handle **ptr,
+ struct pipe_fence_handle *fence);
+
+struct pb_fence_ops *
+vmw_fence_ops_create(struct vmw_winsys_screen *vws);
+
+
+
+#endif /* VMW_FENCE_H_ */
diff --git a/lib/mesa/src/gallium/winsys/svga/drm/vmw_screen.c b/lib/mesa/src/gallium/winsys/svga/drm/vmw_screen.c
new file mode 100644
index 000000000..0c343cc7b
--- /dev/null
+++ b/lib/mesa/src/gallium/winsys/svga/drm/vmw_screen.c
@@ -0,0 +1,132 @@
+/**********************************************************
+ * Copyright 2009 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+
+#include "vmw_screen.h"
+#include "vmw_fence.h"
+#include "vmw_context.h"
+
+#include "util/u_memory.h"
+#include "pipe/p_compiler.h"
+#include "util/u_hash_table.h"
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+static struct util_hash_table *dev_hash = NULL;
+
+static int vmw_dev_compare(void *key1, void *key2)
+{
+ return (major(*(dev_t *)key1) == major(*(dev_t *)key2) &&
+ minor(*(dev_t *)key1) == minor(*(dev_t *)key2)) ? 0 : 1;
+}
+
+static unsigned vmw_dev_hash(void *key)
+{
+ return (major(*(dev_t *) key) << 16) | minor(*(dev_t *) key);
+}
+
+/* Called from vmw_drm_create_screen(), creates and initializes the
+ * vmw_winsys_screen structure, which is the main entity in this
+ * module.
+ * First, check whether a vmw_winsys_screen object already exists for
+ * this device, and in that case return that one, making sure that we
+ * have our own file descriptor open to DRM.
+ */
+
+struct vmw_winsys_screen *
+vmw_winsys_create( int fd, boolean use_old_scanout_flag )
+{
+ struct vmw_winsys_screen *vws;
+ struct stat stat_buf;
+
+ if (dev_hash == NULL) {
+ dev_hash = util_hash_table_create(vmw_dev_hash, vmw_dev_compare);
+ if (dev_hash == NULL)
+ return NULL;
+ }
+
+ if (fstat(fd, &stat_buf))
+ return NULL;
+
+ vws = util_hash_table_get(dev_hash, &stat_buf.st_rdev);
+ if (vws) {
+ vws->open_count++;
+ return vws;
+ }
+
+ vws = CALLOC_STRUCT(vmw_winsys_screen);
+ if (!vws)
+ goto out_no_vws;
+
+ vws->device = stat_buf.st_rdev;
+ vws->open_count = 1;
+ vws->ioctl.drm_fd = dup(fd);
+ vws->use_old_scanout_flag = use_old_scanout_flag;
+ vws->base.have_gb_dma = TRUE;
+
+ if (!vmw_ioctl_init(vws))
+ goto out_no_ioctl;
+
+ vws->fence_ops = vmw_fence_ops_create(vws);
+ if (!vws->fence_ops)
+ goto out_no_fence_ops;
+
+ if(!vmw_pools_init(vws))
+ goto out_no_pools;
+
+ if (!vmw_winsys_screen_init_svga(vws))
+ goto out_no_svga;
+
+ if (util_hash_table_set(dev_hash, &vws->device, vws) != PIPE_OK)
+ goto out_no_hash_insert;
+
+ return vws;
+out_no_hash_insert:
+out_no_svga:
+ vmw_pools_cleanup(vws);
+out_no_pools:
+ vws->fence_ops->destroy(vws->fence_ops);
+out_no_fence_ops:
+ vmw_ioctl_cleanup(vws);
+out_no_ioctl:
+ close(vws->ioctl.drm_fd);
+ FREE(vws);
+out_no_vws:
+ return NULL;
+}
+
+void
+vmw_winsys_destroy(struct vmw_winsys_screen *vws)
+{
+ if (--vws->open_count == 0) {
+ util_hash_table_remove(dev_hash, &vws->device);
+ vmw_pools_cleanup(vws);
+ vws->fence_ops->destroy(vws->fence_ops);
+ vmw_ioctl_cleanup(vws);
+ close(vws->ioctl.drm_fd);
+ FREE(vws);
+ }
+}
diff --git a/lib/mesa/src/gallium/winsys/svga/drm/vmw_screen.h b/lib/mesa/src/gallium/winsys/svga/drm/vmw_screen.h
new file mode 100644
index 000000000..ce98db9b3
--- /dev/null
+++ b/lib/mesa/src/gallium/winsys/svga/drm/vmw_screen.h
@@ -0,0 +1,230 @@
+/**********************************************************
+ * Copyright 2009 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/**
+ * @file
+ * Common definitions for the VMware SVGA winsys.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+
+#ifndef VMW_SCREEN_H_
+#define VMW_SCREEN_H_
+
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_state.h"
+
+#include "svga_winsys.h"
+#include "pipebuffer/pb_buffer_fenced.h"
+
+
+#define VMW_GMR_POOL_SIZE (16*1024*1024)
+#define VMW_QUERY_POOL_SIZE (8192)
+#define VMW_DEBUG_FLUSH_STACK 10
+
+/*
+ * Something big, but arbitrary. The kernel reports an error if it can't
+ * handle this, and the svga driver will resort to multiple partial
+ * uploads.
+ */
+#define VMW_MAX_BUFFER_SIZE (512*1024*1024)
+
+struct pb_manager;
+struct vmw_region;
+
+struct vmw_cap_3d {
+ boolean has_cap;
+ SVGA3dDevCapResult result;
+};
+
+struct vmw_winsys_screen
+{
+ struct svga_winsys_screen base;
+
+ boolean use_old_scanout_flag;
+
+ struct {
+ int drm_fd;
+ uint32_t hwversion;
+ uint32_t num_cap_3d;
+ struct vmw_cap_3d *cap_3d;
+ uint64_t max_mob_memory;
+ uint64_t max_surface_memory;
+ uint64_t max_texture_size;
+ boolean have_drm_2_6;
+ } ioctl;
+
+ struct {
+ struct pb_manager *gmr;
+ struct pb_manager *gmr_mm;
+ struct pb_manager *gmr_fenced;
+ struct pb_manager *gmr_slab;
+ struct pb_manager *gmr_slab_fenced;
+ struct pb_manager *query_mm;
+ struct pb_manager *query_fenced;
+ struct pb_manager *mob_fenced;
+ struct pb_manager *mob_cache;
+ struct pb_manager *mob_shader_slab;
+ struct pb_manager *mob_shader_slab_fenced;
+ } pools;
+
+ struct pb_fence_ops *fence_ops;
+
+ /*
+ * Screen instances
+ */
+ dev_t device;
+ int open_count;
+};
+
+
+static inline struct vmw_winsys_screen *
+vmw_winsys_screen(struct svga_winsys_screen *base)
+{
+ return (struct vmw_winsys_screen *)base;
+}
+
+/* */
+uint32_t
+vmw_region_size(struct vmw_region *region);
+
+uint32
+vmw_ioctl_context_create(struct vmw_winsys_screen *vws);
+
+void
+vmw_ioctl_context_destroy(struct vmw_winsys_screen *vws,
+ uint32 cid);
+
+uint32
+vmw_ioctl_surface_create(struct vmw_winsys_screen *vws,
+ SVGA3dSurfaceFlags flags,
+ SVGA3dSurfaceFormat format,
+ unsigned usage,
+ SVGA3dSize size,
+ uint32 numFaces,
+ uint32 numMipLevels);
+uint32
+vmw_ioctl_gb_surface_create(struct vmw_winsys_screen *vws,
+ SVGA3dSurfaceFlags flags,
+ SVGA3dSurfaceFormat format,
+ unsigned usage,
+ SVGA3dSize size,
+ uint32 numFaces,
+ uint32 numMipLevels,
+ uint32 buffer_handle,
+ struct vmw_region **p_region);
+
+int
+vmw_ioctl_gb_surface_ref(struct vmw_winsys_screen *vws,
+ const struct winsys_handle *whandle,
+ SVGA3dSurfaceFlags *flags,
+ SVGA3dSurfaceFormat *format,
+ uint32_t *numMipLevels,
+ uint32_t *handle,
+ struct vmw_region **p_region);
+
+void
+vmw_ioctl_surface_destroy(struct vmw_winsys_screen *vws,
+ uint32 sid);
+
+void
+vmw_ioctl_command(struct vmw_winsys_screen *vws,
+ int32_t cid,
+ uint32_t throttle_us,
+ void *commands,
+ uint32_t size,
+ struct pipe_fence_handle **fence);
+
+struct vmw_region *
+vmw_ioctl_region_create(struct vmw_winsys_screen *vws, uint32_t size);
+
+void
+vmw_ioctl_region_destroy(struct vmw_region *region);
+
+struct SVGAGuestPtr
+vmw_ioctl_region_ptr(struct vmw_region *region);
+
+void *
+vmw_ioctl_region_map(struct vmw_region *region);
+void
+vmw_ioctl_region_unmap(struct vmw_region *region);
+
+
+int
+vmw_ioctl_fence_finish(struct vmw_winsys_screen *vws,
+ uint32_t handle, uint32_t flags);
+
+int
+vmw_ioctl_fence_signalled(struct vmw_winsys_screen *vws,
+ uint32_t handle, uint32_t flags);
+
+void
+vmw_ioctl_fence_unref(struct vmw_winsys_screen *vws,
+ uint32_t handle);
+
+uint32
+vmw_ioctl_shader_create(struct vmw_winsys_screen *vws,
+ SVGA3dShaderType type,
+ uint32 code_len);
+void
+vmw_ioctl_shader_destroy(struct vmw_winsys_screen *vws, uint32 shid);
+
+int
+vmw_ioctl_syncforcpu(struct vmw_region *region,
+ boolean dont_block,
+ boolean readonly,
+ boolean allow_cs);
+void
+vmw_ioctl_releasefromcpu(struct vmw_region *region,
+ boolean readonly,
+ boolean allow_cs);
+/* Initialize parts of vmw_winsys_screen at startup:
+ */
+boolean vmw_ioctl_init(struct vmw_winsys_screen *vws);
+boolean vmw_pools_init(struct vmw_winsys_screen *vws);
+boolean vmw_query_pools_init(struct vmw_winsys_screen *vws);
+boolean vmw_mob_pools_init(struct vmw_winsys_screen *vws);
+boolean vmw_winsys_screen_init_svga(struct vmw_winsys_screen *vws);
+
+void vmw_ioctl_cleanup(struct vmw_winsys_screen *vws);
+void vmw_pools_cleanup(struct vmw_winsys_screen *vws);
+
+struct vmw_winsys_screen *vmw_winsys_create(int fd, boolean use_old_scanout_flag);
+void vmw_winsys_destroy(struct vmw_winsys_screen *sws);
+void vmw_winsys_screen_set_throttling(struct pipe_screen *screen,
+ uint32_t throttle_us);
+
+struct pb_manager *
+simple_fenced_bufmgr_create(struct pb_manager *provider,
+ struct pb_fence_ops *ops);
+void
+vmw_fences_signal(struct pb_fence_ops *fence_ops,
+ uint32_t signaled,
+ uint32_t emitted,
+ boolean has_emitted);
+
+#endif /* VMW_SCREEN_H_ */
diff --git a/lib/mesa/src/gallium/winsys/svga/drm/vmw_screen_dri.c b/lib/mesa/src/gallium/winsys/svga/drm/vmw_screen_dri.c
new file mode 100644
index 000000000..e70e0fec4
--- /dev/null
+++ b/lib/mesa/src/gallium/winsys/svga/drm/vmw_screen_dri.c
@@ -0,0 +1,381 @@
+/**********************************************************
+ * Copyright 2009 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+
+#include "pipe/p_compiler.h"
+#include "util/u_inlines.h"
+#include "util/u_memory.h"
+#include "util/u_format.h"
+
+#include "vmw_context.h"
+#include "vmw_screen.h"
+#include "vmw_surface.h"
+#include "vmw_buffer.h"
+#include "svga_drm_public.h"
+#include "svga3d_surfacedefs.h"
+
+#include "state_tracker/drm_driver.h"
+
+#include "vmwgfx_drm.h"
+#include <xf86drm.h>
+
+#include <stdio.h>
+#include <fcntl.h>
+
+struct dri1_api_version {
+ int major;
+ int minor;
+ int patch_level;
+};
+
+static struct svga_winsys_surface *
+vmw_drm_surface_from_handle(struct svga_winsys_screen *sws,
+ struct winsys_handle *whandle,
+ SVGA3dSurfaceFormat *format);
+
+static struct svga_winsys_surface *
+vmw_drm_gb_surface_from_handle(struct svga_winsys_screen *sws,
+ struct winsys_handle *whandle,
+ SVGA3dSurfaceFormat *format);
+static boolean
+vmw_drm_surface_get_handle(struct svga_winsys_screen *sws,
+ struct svga_winsys_surface *surface,
+ unsigned stride,
+ struct winsys_handle *whandle);
+
+static struct dri1_api_version drm_required = { 2, 1, 0 };
+static struct dri1_api_version drm_compat = { 2, 0, 0 };
+
+static boolean
+vmw_dri1_check_version(const struct dri1_api_version *cur,
+ const struct dri1_api_version *required,
+ const struct dri1_api_version *compat,
+ const char component[])
+{
+ if (cur->major > required->major && cur->major <= compat->major)
+ return TRUE;
+ if (cur->major == required->major && cur->minor >= required->minor)
+ return TRUE;
+
+ vmw_error("%s version failure.\n", component);
+ vmw_error("%s version is %d.%d.%d and this driver can only work\n"
+ "with versions %d.%d.x through %d.x.x.\n",
+ component,
+ cur->major, cur->minor, cur->patch_level,
+ required->major, required->minor, compat->major);
+ return FALSE;
+}
+
+/* This is actually the entrypoint to the entire driver,
+ * called by the target bootstrap code.
+ */
+struct svga_winsys_screen *
+svga_drm_winsys_screen_create(int fd)
+{
+ struct vmw_winsys_screen *vws;
+ struct dri1_api_version drm_ver;
+ drmVersionPtr ver;
+
+ ver = drmGetVersion(fd);
+ if (ver == NULL)
+ return NULL;
+
+ drm_ver.major = ver->version_major;
+ drm_ver.minor = ver->version_minor;
+ drm_ver.patch_level = 0; /* ??? */
+
+ drmFreeVersion(ver);
+ if (!vmw_dri1_check_version(&drm_ver, &drm_required,
+ &drm_compat, "vmwgfx drm driver"))
+ return NULL;
+
+ vws = vmw_winsys_create( fd, FALSE );
+ if (!vws)
+ goto out_no_vws;
+
+ /* XXX do this properly */
+ vws->base.surface_from_handle = vws->base.have_gb_objects ?
+ vmw_drm_gb_surface_from_handle : vmw_drm_surface_from_handle;
+ vws->base.surface_get_handle = vmw_drm_surface_get_handle;
+
+ return &vws->base;
+
+out_no_vws:
+ return NULL;
+}
+
+static inline boolean
+vmw_dri1_intersect_src_bbox(struct drm_clip_rect *dst,
+ int dst_x,
+ int dst_y,
+ const struct drm_clip_rect *src,
+ const struct drm_clip_rect *bbox)
+{
+ int xy1;
+ int xy2;
+
+ xy1 = ((int)src->x1 > (int)bbox->x1 + dst_x) ? src->x1 :
+ (int)bbox->x1 + dst_x;
+ xy2 = ((int)src->x2 < (int)bbox->x2 + dst_x) ? src->x2 :
+ (int)bbox->x2 + dst_x;
+ if (xy1 >= xy2 || xy1 < 0)
+ return FALSE;
+
+ dst->x1 = xy1;
+ dst->x2 = xy2;
+
+ xy1 = ((int)src->y1 > (int)bbox->y1 + dst_y) ? src->y1 :
+ (int)bbox->y1 + dst_y;
+ xy2 = ((int)src->y2 < (int)bbox->y2 + dst_y) ? src->y2 :
+ (int)bbox->y2 + dst_y;
+ if (xy1 >= xy2 || xy1 < 0)
+ return FALSE;
+
+ dst->y1 = xy1;
+ dst->y2 = xy2;
+ return TRUE;
+}
+
+/**
+ * vmw_drm_gb_surface_from_handle - Create a shared surface
+ *
+ * @sws: Screen to register the surface with.
+ * @whandle: struct winsys_handle identifying the kernel surface object
+ * @format: On successful return points to a value describing the
+ * surface format.
+ *
+ * Returns a refcounted pointer to a struct svga_winsys_surface
+ * embedded in a struct vmw_svga_winsys_surface on success or NULL
+ * on failure.
+ */
+static struct svga_winsys_surface *
+vmw_drm_gb_surface_from_handle(struct svga_winsys_screen *sws,
+ struct winsys_handle *whandle,
+ SVGA3dSurfaceFormat *format)
+{
+ struct vmw_svga_winsys_surface *vsrf;
+ struct svga_winsys_surface *ssrf;
+ struct vmw_winsys_screen *vws = vmw_winsys_screen(sws);
+ SVGA3dSurfaceFlags flags;
+ uint32_t mip_levels;
+ struct vmw_buffer_desc desc;
+ struct pb_manager *provider = vws->pools.gmr;
+ struct pb_buffer *pb_buf;
+ uint32_t handle;
+ int ret;
+
+ ret = vmw_ioctl_gb_surface_ref(vws, whandle, &flags, format,
+ &mip_levels, &handle, &desc.region);
+
+ if (ret) {
+ fprintf(stderr, "Failed referencing shared surface. SID %d.\n"
+ "Error %d (%s).\n",
+ whandle->handle, ret, strerror(-ret));
+ return NULL;
+ }
+
+ if (mip_levels != 1) {
+ fprintf(stderr, "Incorrect number of mipmap levels on shared surface."
+ " SID %d, levels %d\n",
+ whandle->handle, mip_levels);
+ goto out_mip;
+ }
+
+ vsrf = CALLOC_STRUCT(vmw_svga_winsys_surface);
+ if (!vsrf)
+ goto out_mip;
+
+ pipe_reference_init(&vsrf->refcnt, 1);
+ p_atomic_set(&vsrf->validated, 0);
+ vsrf->screen = vws;
+ vsrf->sid = handle;
+ vsrf->size = vmw_region_size(desc.region);
+
+ /*
+ * Synchronize backing buffers of shared surfaces using the
+ * kernel, since we don't pass fence objects around between
+ * processes.
+ */
+ desc.pb_desc.alignment = 4096;
+ desc.pb_desc.usage = VMW_BUFFER_USAGE_SHARED | VMW_BUFFER_USAGE_SYNC;
+ pb_buf = provider->create_buffer(provider, vsrf->size, &desc.pb_desc);
+ vsrf->buf = vmw_svga_winsys_buffer_wrap(pb_buf);
+ if (!vsrf->buf)
+ goto out_no_buf;
+ ssrf = svga_winsys_surface(vsrf);
+
+ return ssrf;
+
+out_no_buf:
+ FREE(vsrf);
+out_mip:
+ vmw_ioctl_region_destroy(desc.region);
+ vmw_ioctl_surface_destroy(vws, whandle->handle);
+ return NULL;
+}
+
+static struct svga_winsys_surface *
+vmw_drm_surface_from_handle(struct svga_winsys_screen *sws,
+ struct winsys_handle *whandle,
+ SVGA3dSurfaceFormat *format)
+{
+ struct vmw_svga_winsys_surface *vsrf;
+ struct svga_winsys_surface *ssrf;
+ struct vmw_winsys_screen *vws = vmw_winsys_screen(sws);
+ union drm_vmw_surface_reference_arg arg;
+ struct drm_vmw_surface_arg *req = &arg.req;
+ struct drm_vmw_surface_create_req *rep = &arg.rep;
+ uint32_t handle = 0;
+ struct drm_vmw_size size;
+ SVGA3dSize base_size;
+ int ret;
+ int i;
+
+ switch (whandle->type) {
+ case DRM_API_HANDLE_TYPE_SHARED:
+ case DRM_API_HANDLE_TYPE_KMS:
+ handle = whandle->handle;
+ break;
+ case DRM_API_HANDLE_TYPE_FD:
+ ret = drmPrimeFDToHandle(vws->ioctl.drm_fd, whandle->handle,
+ &handle);
+ if (ret) {
+ vmw_error("Failed to get handle from prime fd %d.\n",
+ (int) whandle->handle);
+ return NULL;
+ }
+ break;
+ default:
+ vmw_error("Attempt to import unsupported handle type %d.\n",
+ whandle->type);
+ return NULL;
+ }
+
+ memset(&arg, 0, sizeof(arg));
+ req->sid = handle;
+ rep->size_addr = (unsigned long)&size;
+
+ ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_REF_SURFACE,
+ &arg, sizeof(arg));
+
+ /*
+ * Need to close the handle we got from prime.
+ */
+ if (whandle->type == DRM_API_HANDLE_TYPE_FD)
+ vmw_ioctl_surface_destroy(vws, handle);
+
+ if (ret) {
+ /*
+ * Any attempt to share something other than a surface, like a dumb
+ * kms buffer, should fail here.
+ */
+ vmw_error("Failed referencing shared surface. SID %d.\n"
+ "Error %d (%s).\n",
+ handle, ret, strerror(-ret));
+ return NULL;
+ }
+
+ if (rep->mip_levels[0] != 1) {
+ vmw_error("Incorrect number of mipmap levels on shared surface."
+ " SID %d, levels %d\n",
+ handle, rep->mip_levels[0]);
+ goto out_mip;
+ }
+
+ for (i=1; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
+ if (rep->mip_levels[i] != 0) {
+ vmw_error("Incorrect number of faces levels on shared surface."
+ " SID %d, face %d present.\n",
+ handle, i);
+ goto out_mip;
+ }
+ }
+
+ vsrf = CALLOC_STRUCT(vmw_svga_winsys_surface);
+ if (!vsrf)
+ goto out_mip;
+
+ pipe_reference_init(&vsrf->refcnt, 1);
+ p_atomic_set(&vsrf->validated, 0);
+ vsrf->screen = vws;
+ vsrf->sid = handle;
+ ssrf = svga_winsys_surface(vsrf);
+ *format = rep->format;
+
+ /* Estimate usage, for early flushing. */
+
+ base_size.width = size.width;
+ base_size.height = size.height;
+ base_size.depth = size.depth;
+ vsrf->size = svga3dsurface_get_serialized_size(rep->format, base_size,
+ rep->mip_levels[0],
+ FALSE);
+
+ return ssrf;
+
+out_mip:
+ vmw_ioctl_surface_destroy(vws, handle);
+
+ return NULL;
+}
+
+static boolean
+vmw_drm_surface_get_handle(struct svga_winsys_screen *sws,
+ struct svga_winsys_surface *surface,
+ unsigned stride,
+ struct winsys_handle *whandle)
+{
+ struct vmw_winsys_screen *vws = vmw_winsys_screen(sws);
+ struct vmw_svga_winsys_surface *vsrf;
+ int ret;
+
+ if (!surface)
+ return FALSE;
+
+ vsrf = vmw_svga_winsys_surface(surface);
+ whandle->handle = vsrf->sid;
+ whandle->stride = stride;
+
+ switch (whandle->type) {
+ case DRM_API_HANDLE_TYPE_SHARED:
+ case DRM_API_HANDLE_TYPE_KMS:
+ whandle->handle = vsrf->sid;
+ break;
+ case DRM_API_HANDLE_TYPE_FD:
+ ret = drmPrimeHandleToFD(vws->ioctl.drm_fd, vsrf->sid, DRM_CLOEXEC,
+ (int *)&whandle->handle);
+ if (ret) {
+ vmw_error("Failed to get file descriptor from prime.\n");
+ return FALSE;
+ }
+ break;
+ default:
+ vmw_error("Attempt to export unsupported handle type %d.\n",
+ whandle->type);
+ return FALSE;
+ }
+
+ return TRUE;
+}
diff --git a/lib/mesa/src/gallium/winsys/svga/drm/vmw_screen_ioctl.c b/lib/mesa/src/gallium/winsys/svga/drm/vmw_screen_ioctl.c
new file mode 100644
index 000000000..e2f0da58b
--- /dev/null
+++ b/lib/mesa/src/gallium/winsys/svga/drm/vmw_screen_ioctl.c
@@ -0,0 +1,995 @@
+/**********************************************************
+ * Copyright 2009 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/**
+ * @file
+ *
+ * Wrappers for DRM ioctl functionlaity used by the rest of the vmw
+ * drm winsys.
+ *
+ * Based on svgaicd_escape.c
+ */
+
+
+#include "svga_cmd.h"
+#include "util/u_memory.h"
+#include "util/u_math.h"
+#include "svgadump/svga_dump.h"
+#include "state_tracker/drm_driver.h"
+#include "vmw_screen.h"
+#include "vmw_context.h"
+#include "vmw_fence.h"
+#include "xf86drm.h"
+#include "vmwgfx_drm.h"
+#include "svga3d_caps.h"
+#include "svga3d_reg.h"
+
+#include "os/os_mman.h"
+
+#include <errno.h>
+#include <unistd.h>
+
+#define VMW_MAX_DEFAULT_TEXTURE_SIZE (128 * 1024 * 1024)
+
+struct vmw_region
+{
+ uint32_t handle;
+ uint64_t map_handle;
+ void *data;
+ uint32_t map_count;
+ int drm_fd;
+ uint32_t size;
+};
+
+/* XXX: This isn't a real hardware flag, but just a hack for kernel to
+ * know about primary surfaces. In newer versions of the kernel
+ * interface the driver uses a special field.
+ */
+#define SVGA3D_SURFACE_HINT_SCANOUT (1 << 9)
+
+
+uint32_t
+vmw_region_size(struct vmw_region *region)
+{
+ return region->size;
+}
+
+uint32
+vmw_ioctl_context_create(struct vmw_winsys_screen *vws)
+{
+ struct drm_vmw_context_arg c_arg;
+ int ret;
+
+ VMW_FUNC;
+
+ ret = drmCommandRead(vws->ioctl.drm_fd, DRM_VMW_CREATE_CONTEXT,
+ &c_arg, sizeof(c_arg));
+
+ if (ret)
+ return -1;
+
+ vmw_printf("Context id is %d\n", c_arg.cid);
+
+ return c_arg.cid;
+}
+
+void
+vmw_ioctl_context_destroy(struct vmw_winsys_screen *vws, uint32 cid)
+{
+ struct drm_vmw_context_arg c_arg;
+
+ VMW_FUNC;
+
+ memset(&c_arg, 0, sizeof(c_arg));
+ c_arg.cid = cid;
+
+ (void)drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_UNREF_CONTEXT,
+ &c_arg, sizeof(c_arg));
+
+}
+
+uint32
+vmw_ioctl_surface_create(struct vmw_winsys_screen *vws,
+ SVGA3dSurfaceFlags flags,
+ SVGA3dSurfaceFormat format,
+ unsigned usage,
+ SVGA3dSize size,
+ uint32_t numFaces, uint32_t numMipLevels)
+{
+ union drm_vmw_surface_create_arg s_arg;
+ struct drm_vmw_surface_create_req *req = &s_arg.req;
+ struct drm_vmw_surface_arg *rep = &s_arg.rep;
+ struct drm_vmw_size sizes[DRM_VMW_MAX_SURFACE_FACES*
+ DRM_VMW_MAX_MIP_LEVELS];
+ struct drm_vmw_size *cur_size;
+ uint32_t iFace;
+ uint32_t iMipLevel;
+ int ret;
+
+ vmw_printf("%s flags %d format %d\n", __FUNCTION__, flags, format);
+
+ memset(&s_arg, 0, sizeof(s_arg));
+ if (vws->use_old_scanout_flag &&
+ (flags & SVGA3D_SURFACE_HINT_SCANOUT)) {
+ req->flags = (uint32_t) flags;
+ req->scanout = false;
+ } else if (flags & SVGA3D_SURFACE_HINT_SCANOUT) {
+ req->flags = (uint32_t) (flags & ~SVGA3D_SURFACE_HINT_SCANOUT);
+ req->scanout = true;
+ } else {
+ req->flags = (uint32_t) flags;
+ req->scanout = false;
+ }
+ req->format = (uint32_t) format;
+ req->shareable = !!(usage & SVGA_SURFACE_USAGE_SHARED);
+
+ assert(numFaces * numMipLevels < DRM_VMW_MAX_SURFACE_FACES*
+ DRM_VMW_MAX_MIP_LEVELS);
+ cur_size = sizes;
+ for (iFace = 0; iFace < numFaces; ++iFace) {
+ SVGA3dSize mipSize = size;
+
+ req->mip_levels[iFace] = numMipLevels;
+ for (iMipLevel = 0; iMipLevel < numMipLevels; ++iMipLevel) {
+ cur_size->width = mipSize.width;
+ cur_size->height = mipSize.height;
+ cur_size->depth = mipSize.depth;
+ mipSize.width = MAX2(mipSize.width >> 1, 1);
+ mipSize.height = MAX2(mipSize.height >> 1, 1);
+ mipSize.depth = MAX2(mipSize.depth >> 1, 1);
+ cur_size++;
+ }
+ }
+ for (iFace = numFaces; iFace < SVGA3D_MAX_SURFACE_FACES; ++iFace) {
+ req->mip_levels[iFace] = 0;
+ }
+
+ req->size_addr = (unsigned long)&sizes;
+
+ ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_CREATE_SURFACE,
+ &s_arg, sizeof(s_arg));
+
+ if (ret)
+ return -1;
+
+ vmw_printf("Surface id is %d\n", rep->sid);
+
+ return rep->sid;
+}
+
+
+uint32
+vmw_ioctl_gb_surface_create(struct vmw_winsys_screen *vws,
+ SVGA3dSurfaceFlags flags,
+ SVGA3dSurfaceFormat format,
+ unsigned usage,
+ SVGA3dSize size,
+ uint32_t numFaces,
+ uint32_t numMipLevels,
+ uint32_t buffer_handle,
+ struct vmw_region **p_region)
+{
+ union drm_vmw_gb_surface_create_arg s_arg;
+ struct drm_vmw_gb_surface_create_req *req = &s_arg.req;
+ struct drm_vmw_gb_surface_create_rep *rep = &s_arg.rep;
+ struct vmw_region *region = NULL;
+ int ret;
+
+ vmw_printf("%s flags %d format %d\n", __FUNCTION__, flags, format);
+
+ if (p_region) {
+ region = CALLOC_STRUCT(vmw_region);
+ if (!region)
+ return SVGA3D_INVALID_ID;
+ }
+
+ memset(&s_arg, 0, sizeof(s_arg));
+ if (flags & SVGA3D_SURFACE_HINT_SCANOUT) {
+ req->svga3d_flags = (uint32_t) (flags & ~SVGA3D_SURFACE_HINT_SCANOUT);
+ req->drm_surface_flags = drm_vmw_surface_flag_scanout;
+ } else {
+ req->svga3d_flags = (uint32_t) flags;
+ }
+ req->format = (uint32_t) format;
+ if (usage & SVGA_SURFACE_USAGE_SHARED)
+ req->drm_surface_flags |= drm_vmw_surface_flag_shareable;
+ req->drm_surface_flags |= drm_vmw_surface_flag_create_buffer;
+
+ assert(numFaces * numMipLevels < DRM_VMW_MAX_SURFACE_FACES*
+ DRM_VMW_MAX_MIP_LEVELS);
+ req->base_size.width = size.width;
+ req->base_size.height = size.height;
+ req->base_size.depth = size.depth;
+ req->mip_levels = numMipLevels;
+ req->multisample_count = 0;
+ req->autogen_filter = SVGA3D_TEX_FILTER_NONE;
+ if (buffer_handle)
+ req->buffer_handle = buffer_handle;
+ else
+ req->buffer_handle = SVGA3D_INVALID_ID;
+
+ ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GB_SURFACE_CREATE,
+ &s_arg, sizeof(s_arg));
+
+ if (ret)
+ goto out_fail_create;
+
+ if (p_region) {
+ region->handle = rep->buffer_handle;
+ region->map_handle = rep->buffer_map_handle;
+ region->drm_fd = vws->ioctl.drm_fd;
+ region->size = rep->backup_size;
+ *p_region = region;
+ }
+
+ vmw_printf("Surface id is %d\n", rep->sid);
+ return rep->handle;
+
+out_fail_create:
+ FREE(region);
+ return SVGA3D_INVALID_ID;
+}
+
+/**
+ * vmw_ioctl_surface_req - Fill in a struct surface_req
+ *
+ * @vws: Winsys screen
+ * @whandle: Surface handle
+ * @req: The struct surface req to fill in
+ * @needs_unref: This call takes a kernel surface reference that needs to
+ * be unreferenced.
+ *
+ * Returns 0 on success, negative error type otherwise.
+ * Fills in the surface_req structure according to handle type and kernel
+ * capabilities.
+ */
+static int
+vmw_ioctl_surface_req(const struct vmw_winsys_screen *vws,
+ const struct winsys_handle *whandle,
+ struct drm_vmw_surface_arg *req,
+ boolean *needs_unref)
+{
+ int ret;
+
+ switch(whandle->type) {
+ case DRM_API_HANDLE_TYPE_SHARED:
+ case DRM_API_HANDLE_TYPE_KMS:
+ *needs_unref = FALSE;
+ req->handle_type = DRM_VMW_HANDLE_LEGACY;
+ req->sid = whandle->handle;
+ break;
+ case DRM_API_HANDLE_TYPE_FD:
+ if (!vws->ioctl.have_drm_2_6) {
+ uint32_t handle;
+
+ ret = drmPrimeFDToHandle(vws->ioctl.drm_fd, whandle->handle, &handle);
+ if (ret) {
+ vmw_error("Failed to get handle from prime fd %d.\n",
+ (int) whandle->handle);
+ return -EINVAL;
+ }
+
+ *needs_unref = TRUE;
+ req->handle_type = DRM_VMW_HANDLE_LEGACY;
+ req->sid = handle;
+ } else {
+ *needs_unref = FALSE;
+ req->handle_type = DRM_VMW_HANDLE_PRIME;
+ req->sid = whandle->handle;
+ }
+ break;
+ default:
+ vmw_error("Attempt to import unsupported handle type %d.\n",
+ whandle->type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_ioctl_gb_surface_ref - Put a reference on a guest-backed surface and
+ * get surface information
+ *
+ * @vws: Screen to register the reference on
+ * @handle: Kernel handle of the guest-backed surface
+ * @flags: flags used when the surface was created
+ * @format: Format used when the surface was created
+ * @numMipLevels: Number of mipmap levels of the surface
+ * @p_region: On successful return points to a newly allocated
+ * struct vmw_region holding a reference to the surface backup buffer.
+ *
+ * Returns 0 on success, a system error on failure.
+ */
+int
+vmw_ioctl_gb_surface_ref(struct vmw_winsys_screen *vws,
+ const struct winsys_handle *whandle,
+ SVGA3dSurfaceFlags *flags,
+ SVGA3dSurfaceFormat *format,
+ uint32_t *numMipLevels,
+ uint32_t *handle,
+ struct vmw_region **p_region)
+{
+ union drm_vmw_gb_surface_reference_arg s_arg;
+ struct drm_vmw_surface_arg *req = &s_arg.req;
+ struct drm_vmw_gb_surface_ref_rep *rep = &s_arg.rep;
+ struct vmw_region *region = NULL;
+ boolean needs_unref = FALSE;
+ int ret;
+
+ vmw_printf("%s flags %d format %d\n", __FUNCTION__, flags, format);
+
+ assert(p_region != NULL);
+ region = CALLOC_STRUCT(vmw_region);
+ if (!region)
+ return -ENOMEM;
+
+ memset(&s_arg, 0, sizeof(s_arg));
+ ret = vmw_ioctl_surface_req(vws, whandle, req, &needs_unref);
+ if (ret)
+ goto out_fail_req;
+
+ *handle = req->sid;
+ ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GB_SURFACE_REF,
+ &s_arg, sizeof(s_arg));
+
+ if (ret)
+ goto out_fail_ref;
+
+ region->handle = rep->crep.buffer_handle;
+ region->map_handle = rep->crep.buffer_map_handle;
+ region->drm_fd = vws->ioctl.drm_fd;
+ region->size = rep->crep.backup_size;
+ *p_region = region;
+
+ *handle = rep->crep.handle;
+ *flags = rep->creq.svga3d_flags;
+ *format = rep->creq.format;
+ *numMipLevels = rep->creq.mip_levels;
+
+ if (needs_unref)
+ vmw_ioctl_surface_destroy(vws, *handle);
+
+ return 0;
+out_fail_ref:
+ if (needs_unref)
+ vmw_ioctl_surface_destroy(vws, *handle);
+out_fail_req:
+ FREE(region);
+ return ret;
+}
+
+void
+vmw_ioctl_surface_destroy(struct vmw_winsys_screen *vws, uint32 sid)
+{
+ struct drm_vmw_surface_arg s_arg;
+
+ VMW_FUNC;
+
+ memset(&s_arg, 0, sizeof(s_arg));
+ s_arg.sid = sid;
+
+ (void)drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_UNREF_SURFACE,
+ &s_arg, sizeof(s_arg));
+}
+
+void
+vmw_ioctl_command(struct vmw_winsys_screen *vws, int32_t cid,
+ uint32_t throttle_us, void *commands, uint32_t size,
+ struct pipe_fence_handle **pfence)
+{
+ struct drm_vmw_execbuf_arg arg;
+ struct drm_vmw_fence_rep rep;
+ int ret;
+
+#ifdef DEBUG
+ {
+ static boolean firsttime = TRUE;
+ static boolean debug = FALSE;
+ static boolean skip = FALSE;
+ if (firsttime) {
+ debug = debug_get_bool_option("SVGA_DUMP_CMD", FALSE);
+ skip = debug_get_bool_option("SVGA_SKIP_CMD", FALSE);
+ }
+ if (debug) {
+ VMW_FUNC;
+ svga_dump_commands(commands, size);
+ }
+ firsttime = FALSE;
+ if (skip) {
+ size = 0;
+ }
+ }
+#endif
+
+ memset(&arg, 0, sizeof(arg));
+ memset(&rep, 0, sizeof(rep));
+
+ rep.error = -EFAULT;
+ if (pfence)
+ arg.fence_rep = (unsigned long)&rep;
+ arg.commands = (unsigned long)commands;
+ arg.command_size = size;
+ arg.throttle_us = throttle_us;
+ arg.version = DRM_VMW_EXECBUF_VERSION;
+
+ do {
+ ret = drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_EXECBUF, &arg, sizeof(arg));
+ } while(ret == -ERESTART);
+ if (ret) {
+ vmw_error("%s error %s.\n", __FUNCTION__, strerror(-ret));
+ }
+
+ if (rep.error) {
+
+ /*
+ * Kernel has already synced, or caller requested no fence.
+ */
+ if (pfence)
+ *pfence = NULL;
+ } else {
+ if (pfence) {
+ vmw_fences_signal(vws->fence_ops, rep.passed_seqno, rep.seqno,
+ TRUE);
+
+ *pfence = vmw_fence_create(vws->fence_ops, rep.handle,
+ rep.seqno, rep.mask);
+ if (*pfence == NULL) {
+ /*
+ * Fence creation failed. Need to sync.
+ */
+ (void) vmw_ioctl_fence_finish(vws, rep.handle, rep.mask);
+ vmw_ioctl_fence_unref(vws, rep.handle);
+ }
+ }
+ }
+}
+
+
+struct vmw_region *
+vmw_ioctl_region_create(struct vmw_winsys_screen *vws, uint32_t size)
+{
+ struct vmw_region *region;
+ union drm_vmw_alloc_dmabuf_arg arg;
+ struct drm_vmw_alloc_dmabuf_req *req = &arg.req;
+ struct drm_vmw_dmabuf_rep *rep = &arg.rep;
+ int ret;
+
+ vmw_printf("%s: size = %u\n", __FUNCTION__, size);
+
+ region = CALLOC_STRUCT(vmw_region);
+ if (!region)
+ goto out_err1;
+
+ memset(&arg, 0, sizeof(arg));
+ req->size = size;
+ do {
+ ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_ALLOC_DMABUF, &arg,
+ sizeof(arg));
+ } while (ret == -ERESTART);
+
+ if (ret) {
+ vmw_error("IOCTL failed %d: %s\n", ret, strerror(-ret));
+ goto out_err1;
+ }
+
+ region->data = NULL;
+ region->handle = rep->handle;
+ region->map_handle = rep->map_handle;
+ region->map_count = 0;
+ region->size = size;
+ region->drm_fd = vws->ioctl.drm_fd;
+
+ vmw_printf(" gmrId = %u, offset = %u\n",
+ region->ptr.gmrId, region->ptr.offset);
+
+ return region;
+
+ out_err1:
+ FREE(region);
+ return NULL;
+}
+
+void
+vmw_ioctl_region_destroy(struct vmw_region *region)
+{
+ struct drm_vmw_unref_dmabuf_arg arg;
+
+ vmw_printf("%s: gmrId = %u, offset = %u\n", __FUNCTION__,
+ region->ptr.gmrId, region->ptr.offset);
+
+ if (region->data) {
+ os_munmap(region->data, region->size);
+ region->data = NULL;
+ }
+
+ memset(&arg, 0, sizeof(arg));
+ arg.handle = region->handle;
+ drmCommandWrite(region->drm_fd, DRM_VMW_UNREF_DMABUF, &arg, sizeof(arg));
+
+ FREE(region);
+}
+
+SVGAGuestPtr
+vmw_ioctl_region_ptr(struct vmw_region *region)
+{
+ SVGAGuestPtr ptr = {region->handle, 0};
+ return ptr;
+}
+
+void *
+vmw_ioctl_region_map(struct vmw_region *region)
+{
+ void *map;
+
+ vmw_printf("%s: gmrId = %u, offset = %u\n", __FUNCTION__,
+ region->ptr.gmrId, region->ptr.offset);
+
+ if (region->data == NULL) {
+ map = os_mmap(NULL, region->size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ region->drm_fd, region->map_handle);
+ if (map == MAP_FAILED) {
+ vmw_error("%s: Map failed.\n", __FUNCTION__);
+ return NULL;
+ }
+
+ region->data = map;
+ }
+
+ ++region->map_count;
+
+ return region->data;
+}
+
+void
+vmw_ioctl_region_unmap(struct vmw_region *region)
+{
+ vmw_printf("%s: gmrId = %u, offset = %u\n", __FUNCTION__,
+ region->ptr.gmrId, region->ptr.offset);
+ --region->map_count;
+}
+
+/**
+ * vmw_ioctl_syncforcpu - Synchronize a buffer object for CPU usage
+ *
+ * @region: Pointer to a struct vmw_region representing the buffer object.
+ * @dont_block: Dont wait for GPU idle, but rather return -EBUSY if the
+ * GPU is busy with the buffer object.
+ * @readonly: Hint that the CPU access is read-only.
+ * @allow_cs: Allow concurrent command submission while the buffer is
+ * synchronized for CPU. If FALSE command submissions referencing the
+ * buffer will block until a corresponding call to vmw_ioctl_releasefromcpu.
+ *
+ * This function idles any GPU activities touching the buffer and blocks
+ * command submission of commands referencing the buffer, even from
+ * other processes.
+ */
+int
+vmw_ioctl_syncforcpu(struct vmw_region *region,
+ boolean dont_block,
+ boolean readonly,
+ boolean allow_cs)
+{
+ struct drm_vmw_synccpu_arg arg;
+
+ memset(&arg, 0, sizeof(arg));
+ arg.op = drm_vmw_synccpu_grab;
+ arg.handle = region->handle;
+ arg.flags = drm_vmw_synccpu_read;
+ if (!readonly)
+ arg.flags |= drm_vmw_synccpu_write;
+ if (dont_block)
+ arg.flags |= drm_vmw_synccpu_dontblock;
+ if (allow_cs)
+ arg.flags |= drm_vmw_synccpu_allow_cs;
+
+ return drmCommandWrite(region->drm_fd, DRM_VMW_SYNCCPU, &arg, sizeof(arg));
+}
+
+/**
+ * vmw_ioctl_releasefromcpu - Undo a previous syncforcpu.
+ *
+ * @region: Pointer to a struct vmw_region representing the buffer object.
+ * @readonly: Should hold the same value as the matching syncforcpu call.
+ * @allow_cs: Should hold the same value as the matching syncforcpu call.
+ */
+void
+vmw_ioctl_releasefromcpu(struct vmw_region *region,
+ boolean readonly,
+ boolean allow_cs)
+{
+ struct drm_vmw_synccpu_arg arg;
+
+ memset(&arg, 0, sizeof(arg));
+ arg.op = drm_vmw_synccpu_release;
+ arg.handle = region->handle;
+ arg.flags = drm_vmw_synccpu_read;
+ if (!readonly)
+ arg.flags |= drm_vmw_synccpu_write;
+ if (allow_cs)
+ arg.flags |= drm_vmw_synccpu_allow_cs;
+
+ (void) drmCommandWrite(region->drm_fd, DRM_VMW_SYNCCPU, &arg, sizeof(arg));
+}
+
+void
+vmw_ioctl_fence_unref(struct vmw_winsys_screen *vws,
+ uint32_t handle)
+{
+ struct drm_vmw_fence_arg arg;
+ int ret;
+
+ memset(&arg, 0, sizeof(arg));
+ arg.handle = handle;
+
+ ret = drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_FENCE_UNREF,
+ &arg, sizeof(arg));
+ if (ret != 0)
+ vmw_error("%s Failed\n", __FUNCTION__);
+}
+
+static inline uint32_t
+vmw_drm_fence_flags(uint32_t flags)
+{
+ uint32_t dflags = 0;
+
+ if (flags & SVGA_FENCE_FLAG_EXEC)
+ dflags |= DRM_VMW_FENCE_FLAG_EXEC;
+ if (flags & SVGA_FENCE_FLAG_QUERY)
+ dflags |= DRM_VMW_FENCE_FLAG_QUERY;
+
+ return dflags;
+}
+
+
+int
+vmw_ioctl_fence_signalled(struct vmw_winsys_screen *vws,
+ uint32_t handle,
+ uint32_t flags)
+{
+ struct drm_vmw_fence_signaled_arg arg;
+ uint32_t vflags = vmw_drm_fence_flags(flags);
+ int ret;
+
+ memset(&arg, 0, sizeof(arg));
+ arg.handle = handle;
+ arg.flags = vflags;
+
+ ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_FENCE_SIGNALED,
+ &arg, sizeof(arg));
+
+ if (ret != 0)
+ return ret;
+
+ vmw_fences_signal(vws->fence_ops, arg.passed_seqno, 0, FALSE);
+
+ return (arg.signaled) ? 0 : -1;
+}
+
+
+
+int
+vmw_ioctl_fence_finish(struct vmw_winsys_screen *vws,
+ uint32_t handle,
+ uint32_t flags)
+{
+ struct drm_vmw_fence_wait_arg arg;
+ uint32_t vflags = vmw_drm_fence_flags(flags);
+ int ret;
+
+ memset(&arg, 0, sizeof(arg));
+
+ arg.handle = handle;
+ arg.timeout_us = 10*1000000;
+ arg.lazy = 0;
+ arg.flags = vflags;
+
+ ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_FENCE_WAIT,
+ &arg, sizeof(arg));
+
+ if (ret != 0)
+ vmw_error("%s Failed\n", __FUNCTION__);
+
+ return 0;
+}
+
+uint32
+vmw_ioctl_shader_create(struct vmw_winsys_screen *vws,
+ SVGA3dShaderType type,
+ uint32 code_len)
+{
+ struct drm_vmw_shader_create_arg sh_arg;
+ int ret;
+
+ VMW_FUNC;
+
+ memset(&sh_arg, 0, sizeof(sh_arg));
+
+ sh_arg.size = code_len;
+ sh_arg.buffer_handle = SVGA3D_INVALID_ID;
+ sh_arg.shader_handle = SVGA3D_INVALID_ID;
+ switch (type) {
+ case SVGA3D_SHADERTYPE_VS:
+ sh_arg.shader_type = drm_vmw_shader_type_vs;
+ break;
+ case SVGA3D_SHADERTYPE_PS:
+ sh_arg.shader_type = drm_vmw_shader_type_ps;
+ break;
+ default:
+ assert(!"Invalid shader type.");
+ break;
+ }
+
+ ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_CREATE_SHADER,
+ &sh_arg, sizeof(sh_arg));
+
+ if (ret)
+ return SVGA3D_INVALID_ID;
+
+ return sh_arg.shader_handle;
+}
+
+void
+vmw_ioctl_shader_destroy(struct vmw_winsys_screen *vws, uint32 shid)
+{
+ struct drm_vmw_shader_arg sh_arg;
+
+ VMW_FUNC;
+
+ memset(&sh_arg, 0, sizeof(sh_arg));
+ sh_arg.handle = shid;
+
+ (void)drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_UNREF_SHADER,
+ &sh_arg, sizeof(sh_arg));
+
+}
+
+static int
+vmw_ioctl_parse_caps(struct vmw_winsys_screen *vws,
+ const uint32_t *cap_buffer)
+{
+ int i;
+
+ if (vws->base.have_gb_objects) {
+ for (i = 0; i < vws->ioctl.num_cap_3d; ++i) {
+ vws->ioctl.cap_3d[i].has_cap = TRUE;
+ vws->ioctl.cap_3d[i].result.u = cap_buffer[i];
+ }
+ return 0;
+ } else {
+ const uint32 *capsBlock;
+ const SVGA3dCapsRecord *capsRecord = NULL;
+ uint32 offset;
+ const SVGA3dCapPair *capArray;
+ int numCaps, index;
+
+ /*
+ * Search linearly through the caps block records for the specified type.
+ */
+ capsBlock = cap_buffer;
+ for (offset = 0; capsBlock[offset] != 0; offset += capsBlock[offset]) {
+ const SVGA3dCapsRecord *record;
+ assert(offset < SVGA_FIFO_3D_CAPS_SIZE);
+ record = (const SVGA3dCapsRecord *) (capsBlock + offset);
+ if ((record->header.type >= SVGA3DCAPS_RECORD_DEVCAPS_MIN) &&
+ (record->header.type <= SVGA3DCAPS_RECORD_DEVCAPS_MAX) &&
+ (!capsRecord || (record->header.type > capsRecord->header.type))) {
+ capsRecord = record;
+ }
+ }
+
+ if(!capsRecord)
+ return -1;
+
+ /*
+ * Calculate the number of caps from the size of the record.
+ */
+ capArray = (const SVGA3dCapPair *) capsRecord->data;
+ numCaps = (int) ((capsRecord->header.length * sizeof(uint32) -
+ sizeof capsRecord->header) / (2 * sizeof(uint32)));
+
+ for (i = 0; i < numCaps; i++) {
+ index = capArray[i][0];
+ if (index < vws->ioctl.num_cap_3d) {
+ vws->ioctl.cap_3d[index].has_cap = TRUE;
+ vws->ioctl.cap_3d[index].result.u = capArray[i][1];
+ } else {
+ debug_printf("Unknown devcaps seen: %d\n", index);
+ }
+ }
+ }
+ return 0;
+}
+
+boolean
+vmw_ioctl_init(struct vmw_winsys_screen *vws)
+{
+ struct drm_vmw_getparam_arg gp_arg;
+ struct drm_vmw_get_3d_cap_arg cap_arg;
+ unsigned int size;
+ int ret;
+ uint32_t *cap_buffer;
+ drmVersionPtr version;
+ boolean have_drm_2_5;
+
+ VMW_FUNC;
+
+ version = drmGetVersion(vws->ioctl.drm_fd);
+ if (!version)
+ goto out_no_version;
+
+ have_drm_2_5 = version->version_major > 2 ||
+ (version->version_major == 2 && version->version_minor > 4);
+ vws->ioctl.have_drm_2_6 = version->version_major > 2 ||
+ (version->version_major == 2 && version->version_minor > 5);
+
+ memset(&gp_arg, 0, sizeof(gp_arg));
+ gp_arg.param = DRM_VMW_PARAM_3D;
+ ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
+ &gp_arg, sizeof(gp_arg));
+ if (ret || gp_arg.value == 0) {
+ vmw_error("No 3D enabled (%i, %s).\n", ret, strerror(-ret));
+ goto out_no_3d;
+ }
+
+ memset(&gp_arg, 0, sizeof(gp_arg));
+ gp_arg.param = DRM_VMW_PARAM_FIFO_HW_VERSION;
+ ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
+ &gp_arg, sizeof(gp_arg));
+ if (ret) {
+ vmw_error("Failed to get fifo hw version (%i, %s).\n",
+ ret, strerror(-ret));
+ goto out_no_3d;
+ }
+ vws->ioctl.hwversion = gp_arg.value;
+
+ memset(&gp_arg, 0, sizeof(gp_arg));
+ gp_arg.param = DRM_VMW_PARAM_HW_CAPS;
+ ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
+ &gp_arg, sizeof(gp_arg));
+ if (ret)
+ vws->base.have_gb_objects = FALSE;
+ else
+ vws->base.have_gb_objects =
+ !!(gp_arg.value & (uint64_t) SVGA_CAP_GBOBJECTS);
+
+ if (vws->base.have_gb_objects && !have_drm_2_5)
+ goto out_no_3d;
+
+ if (vws->base.have_gb_objects) {
+ memset(&gp_arg, 0, sizeof(gp_arg));
+ gp_arg.param = DRM_VMW_PARAM_3D_CAPS_SIZE;
+ ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
+ &gp_arg, sizeof(gp_arg));
+ if (ret)
+ size = SVGA_FIFO_3D_CAPS_SIZE * sizeof(uint32_t);
+ else
+ size = gp_arg.value;
+
+ if (vws->base.have_gb_objects)
+ vws->ioctl.num_cap_3d = size / sizeof(uint32_t);
+ else
+ vws->ioctl.num_cap_3d = SVGA3D_DEVCAP_MAX;
+
+
+ memset(&gp_arg, 0, sizeof(gp_arg));
+ gp_arg.param = DRM_VMW_PARAM_MAX_MOB_MEMORY;
+ ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
+ &gp_arg, sizeof(gp_arg));
+ if (ret) {
+ /* Just guess a large enough value. */
+ vws->ioctl.max_mob_memory = 256*1024*1024;
+ } else {
+ vws->ioctl.max_mob_memory = gp_arg.value;
+ }
+
+ memset(&gp_arg, 0, sizeof(gp_arg));
+ gp_arg.param = DRM_VMW_PARAM_MAX_MOB_SIZE;
+ ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
+ &gp_arg, sizeof(gp_arg));
+
+ if (ret || gp_arg.value == 0) {
+ vws->ioctl.max_texture_size = VMW_MAX_DEFAULT_TEXTURE_SIZE;
+ } else {
+ vws->ioctl.max_texture_size = gp_arg.value;
+ }
+
+ /* Never early flush surfaces, mobs do accounting. */
+ vws->ioctl.max_surface_memory = -1;
+ } else {
+ vws->ioctl.num_cap_3d = SVGA3D_DEVCAP_MAX;
+
+ memset(&gp_arg, 0, sizeof(gp_arg));
+ gp_arg.param = DRM_VMW_PARAM_MAX_SURF_MEMORY;
+ if (have_drm_2_5)
+ ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
+ &gp_arg, sizeof(gp_arg));
+ if (!have_drm_2_5 || ret) {
+ /* Just guess a large enough value, around 800mb. */
+ vws->ioctl.max_surface_memory = 0x30000000;
+ } else {
+ vws->ioctl.max_surface_memory = gp_arg.value;
+ }
+
+ vws->ioctl.max_texture_size = VMW_MAX_DEFAULT_TEXTURE_SIZE;
+
+ size = SVGA_FIFO_3D_CAPS_SIZE * sizeof(uint32_t);
+ }
+
+ cap_buffer = calloc(1, size);
+ if (!cap_buffer) {
+ debug_printf("Failed alloc fifo 3D caps buffer.\n");
+ goto out_no_3d;
+ }
+
+ vws->ioctl.cap_3d = calloc(vws->ioctl.num_cap_3d,
+ sizeof(*vws->ioctl.cap_3d));
+ if (!vws->ioctl.cap_3d) {
+ debug_printf("Failed alloc fifo 3D caps buffer.\n");
+ goto out_no_caparray;
+ }
+
+ memset(&cap_arg, 0, sizeof(cap_arg));
+ cap_arg.buffer = (uint64_t) (unsigned long) (cap_buffer);
+ cap_arg.max_size = size;
+
+ ret = drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_GET_3D_CAP,
+ &cap_arg, sizeof(cap_arg));
+
+ if (ret) {
+ debug_printf("Failed to get 3D capabilities"
+ " (%i, %s).\n", ret, strerror(-ret));
+ goto out_no_caps;
+ }
+
+ ret = vmw_ioctl_parse_caps(vws, cap_buffer);
+ if (ret) {
+ debug_printf("Failed to parse 3D capabilities"
+ " (%i, %s).\n", ret, strerror(-ret));
+ goto out_no_caps;
+ }
+ free(cap_buffer);
+ drmFreeVersion(version);
+ vmw_printf("%s OK\n", __FUNCTION__);
+ return TRUE;
+ out_no_caps:
+ free(vws->ioctl.cap_3d);
+ out_no_caparray:
+ free(cap_buffer);
+ out_no_3d:
+ drmFreeVersion(version);
+ out_no_version:
+ vws->ioctl.num_cap_3d = 0;
+ debug_printf("%s Failed\n", __FUNCTION__);
+ return FALSE;
+}
+
+
+
+void
+vmw_ioctl_cleanup(struct vmw_winsys_screen *vws)
+{
+ VMW_FUNC;
+}
diff --git a/lib/mesa/src/gallium/winsys/svga/drm/vmw_screen_pools.c b/lib/mesa/src/gallium/winsys/svga/drm/vmw_screen_pools.c
new file mode 100644
index 000000000..1815bfa67
--- /dev/null
+++ b/lib/mesa/src/gallium/winsys/svga/drm/vmw_screen_pools.c
@@ -0,0 +1,242 @@
+/**********************************************************
+ * Copyright 2009 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+
+#include "vmw_screen.h"
+
+#include "vmw_buffer.h"
+#include "vmw_fence.h"
+
+#include "pipebuffer/pb_buffer.h"
+#include "pipebuffer/pb_bufmgr.h"
+
+/**
+ * vmw_pools_cleanup - Destroy the buffer pools.
+ *
+ * @vws: pointer to a struct vmw_winsys_screen.
+ */
+void
+vmw_pools_cleanup(struct vmw_winsys_screen *vws)
+{
+ if (vws->pools.mob_shader_slab_fenced)
+ vws->pools.mob_shader_slab_fenced->destroy
+ (vws->pools.mob_shader_slab_fenced);
+ if (vws->pools.mob_shader_slab)
+ vws->pools.mob_shader_slab->destroy(vws->pools.mob_shader_slab);
+ if (vws->pools.mob_fenced)
+ vws->pools.mob_fenced->destroy(vws->pools.mob_fenced);
+ if (vws->pools.mob_cache)
+ vws->pools.mob_cache->destroy(vws->pools.mob_cache);
+
+ if (vws->pools.query_fenced)
+ vws->pools.query_fenced->destroy(vws->pools.query_fenced);
+ if (vws->pools.query_mm)
+ vws->pools.query_mm->destroy(vws->pools.query_mm);
+
+ if(vws->pools.gmr_fenced)
+ vws->pools.gmr_fenced->destroy(vws->pools.gmr_fenced);
+ if (vws->pools.gmr_mm)
+ vws->pools.gmr_mm->destroy(vws->pools.gmr_mm);
+ if (vws->pools.gmr_slab_fenced)
+ vws->pools.gmr_slab_fenced->destroy(vws->pools.gmr_slab_fenced);
+ if (vws->pools.gmr_slab)
+ vws->pools.gmr_slab->destroy(vws->pools.gmr_slab);
+
+ if(vws->pools.gmr)
+ vws->pools.gmr->destroy(vws->pools.gmr);
+}
+
+
+/**
+ * vmw_query_pools_init - Create a pool of query buffers.
+ *
+ * @vws: Pointer to a struct vmw_winsys_screen.
+ *
+ * Typically this pool should be created on demand when we
+ * detect that the app will be using queries. There's nothing
+ * special with this pool other than the backing kernel buffer sizes,
+ * which are limited to 8192.
+ * If there is a performance issue with allocation and freeing of the
+ * query slabs, it should be easily fixable by allocating them out
+ * of a buffer cache.
+ */
+boolean
+vmw_query_pools_init(struct vmw_winsys_screen *vws)
+{
+ struct pb_desc desc;
+
+ desc.alignment = 16;
+ desc.usage = ~(VMW_BUFFER_USAGE_SHARED | VMW_BUFFER_USAGE_SYNC);
+
+ vws->pools.query_mm = pb_slab_range_manager_create(vws->pools.gmr, 16, 128,
+ VMW_QUERY_POOL_SIZE,
+ &desc);
+ if (!vws->pools.query_mm)
+ return FALSE;
+
+ vws->pools.query_fenced = simple_fenced_bufmgr_create(
+ vws->pools.query_mm, vws->fence_ops);
+
+ if(!vws->pools.query_fenced)
+ goto out_no_query_fenced;
+
+ return TRUE;
+
+ out_no_query_fenced:
+ vws->pools.query_mm->destroy(vws->pools.query_mm);
+ return FALSE;
+}
+
+/**
+ * vmw_mob_pool_init - Create a pool of fenced kernel buffers.
+ *
+ * @vws: Pointer to a struct vmw_winsys_screen.
+ *
+ * Typically this pool should be created on demand when we
+ * detect that the app will be using MOB buffers.
+ */
+boolean
+vmw_mob_pools_init(struct vmw_winsys_screen *vws)
+{
+ struct pb_desc desc;
+
+ vws->pools.mob_cache =
+ pb_cache_manager_create(vws->pools.gmr, 100000, 2.0f,
+ VMW_BUFFER_USAGE_SHARED,
+ 64 * 1024 * 1024);
+ if (!vws->pools.mob_cache)
+ return FALSE;
+
+ vws->pools.mob_fenced =
+ simple_fenced_bufmgr_create(vws->pools.mob_cache,
+ vws->fence_ops);
+ if(!vws->pools.mob_fenced)
+ goto out_no_mob_fenced;
+
+ desc.alignment = 64;
+ desc.usage = ~(SVGA_BUFFER_USAGE_PINNED | VMW_BUFFER_USAGE_SHARED |
+ VMW_BUFFER_USAGE_SYNC);
+ vws->pools.mob_shader_slab =
+ pb_slab_range_manager_create(vws->pools.mob_cache,
+ 64,
+ 8192,
+ 16384,
+ &desc);
+ if(!vws->pools.mob_shader_slab)
+ goto out_no_mob_shader_slab;
+
+ vws->pools.mob_shader_slab_fenced =
+ simple_fenced_bufmgr_create(vws->pools.mob_shader_slab,
+ vws->fence_ops);
+ if(!vws->pools.mob_fenced)
+ goto out_no_mob_shader_slab_fenced;
+
+ return TRUE;
+
+ out_no_mob_shader_slab_fenced:
+ vws->pools.mob_shader_slab->destroy(vws->pools.mob_shader_slab);
+ out_no_mob_shader_slab:
+ vws->pools.mob_fenced->destroy(vws->pools.mob_fenced);
+ out_no_mob_fenced:
+ vws->pools.mob_cache->destroy(vws->pools.mob_cache);
+ return FALSE;
+}
+
+/**
+ * vmw_pools_init - Create a pool of GMR buffers.
+ *
+ * @vws: Pointer to a struct vmw_winsys_screen.
+ */
+boolean
+vmw_pools_init(struct vmw_winsys_screen *vws)
+{
+ struct pb_desc desc;
+
+ vws->pools.gmr = vmw_gmr_bufmgr_create(vws);
+ if(!vws->pools.gmr)
+ goto error;
+
+ if ((vws->base.have_gb_objects && vws->base.have_gb_dma) ||
+ !vws->base.have_gb_objects) {
+ /*
+ * A managed pool for DMA buffers.
+ */
+ vws->pools.gmr_mm = mm_bufmgr_create(vws->pools.gmr,
+ VMW_GMR_POOL_SIZE,
+ 12 /* 4096 alignment */);
+ if(!vws->pools.gmr_mm)
+ goto error;
+
+ vws->pools.gmr_fenced = simple_fenced_bufmgr_create
+ (vws->pools.gmr_mm, vws->fence_ops);
+
+#ifdef DEBUG
+ vws->pools.gmr_fenced = pb_debug_manager_create(vws->pools.gmr_fenced,
+ 4096,
+ 4096);
+#endif
+ if(!vws->pools.gmr_fenced)
+ goto error;
+
+ /*
+ * The slab pool allocates buffers directly from the kernel except
+ * for very small buffers which are allocated from a slab in order
+ * not to waste memory, since a kernel buffer is a minimum 4096 bytes.
+ *
+ * Here we use it only for emergency in the case our pre-allocated
+ * managed buffer pool runs out of memory.
+ */
+
+ desc.alignment = 64;
+ desc.usage = ~(SVGA_BUFFER_USAGE_PINNED | SVGA_BUFFER_USAGE_SHADER |
+ VMW_BUFFER_USAGE_SHARED | VMW_BUFFER_USAGE_SYNC);
+ vws->pools.gmr_slab = pb_slab_range_manager_create(vws->pools.gmr,
+ 64,
+ 8192,
+ 16384,
+ &desc);
+ if (!vws->pools.gmr_slab)
+ goto error;
+
+ vws->pools.gmr_slab_fenced =
+ simple_fenced_bufmgr_create(vws->pools.gmr_slab, vws->fence_ops);
+
+ if (!vws->pools.gmr_slab_fenced)
+ goto error;
+ }
+
+ vws->pools.query_fenced = NULL;
+ vws->pools.query_mm = NULL;
+ vws->pools.mob_cache = NULL;
+
+ if (vws->base.have_gb_objects && !vmw_mob_pools_init(vws))
+ goto error;
+
+ return TRUE;
+
+error:
+ vmw_pools_cleanup(vws);
+ return FALSE;
+}
diff --git a/lib/mesa/src/gallium/winsys/svga/drm/vmw_screen_svga.c b/lib/mesa/src/gallium/winsys/svga/drm/vmw_screen_svga.c
new file mode 100644
index 000000000..32f16cd44
--- /dev/null
+++ b/lib/mesa/src/gallium/winsys/svga/drm/vmw_screen_svga.c
@@ -0,0 +1,411 @@
+/**********************************************************
+ * Copyright 2009 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/**
+ * @file
+ * This file implements the SVGA interface into this winsys, defined
+ * in drivers/svga/svga_winsys.h.
+ *
+ * @author Keith Whitwell
+ * @author Jose Fonseca
+ */
+
+
+#include "svga_cmd.h"
+#include "svga3d_caps.h"
+
+#include "util/u_inlines.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "pipebuffer/pb_buffer.h"
+#include "pipebuffer/pb_bufmgr.h"
+#include "svga_winsys.h"
+#include "vmw_context.h"
+#include "vmw_screen.h"
+#include "vmw_surface.h"
+#include "vmw_buffer.h"
+#include "vmw_fence.h"
+#include "vmw_shader.h"
+#include "svga3d_surfacedefs.h"
+
+/**
+ * Try to get a surface backing buffer from the cache
+ * if it's this size or smaller.
+ */
+#define VMW_TRY_CACHED_SIZE (2*1024*1024)
+
+static struct svga_winsys_buffer *
+vmw_svga_winsys_buffer_create(struct svga_winsys_screen *sws,
+ unsigned alignment,
+ unsigned usage,
+ unsigned size)
+{
+ struct vmw_winsys_screen *vws = vmw_winsys_screen(sws);
+ struct vmw_buffer_desc desc;
+ struct pb_manager *provider;
+ struct pb_buffer *buffer;
+
+ memset(&desc, 0, sizeof desc);
+ desc.pb_desc.alignment = alignment;
+ desc.pb_desc.usage = usage;
+
+ if (usage == SVGA_BUFFER_USAGE_PINNED) {
+ if (vws->pools.query_fenced == NULL && !vmw_query_pools_init(vws))
+ return NULL;
+ provider = vws->pools.query_fenced;
+ } else if (usage == SVGA_BUFFER_USAGE_SHADER) {
+ provider = vws->pools.mob_shader_slab_fenced;
+ } else
+ provider = vws->pools.gmr_fenced;
+
+ assert(provider);
+ buffer = provider->create_buffer(provider, size, &desc.pb_desc);
+
+ if(!buffer && provider == vws->pools.gmr_fenced) {
+
+ assert(provider);
+ provider = vws->pools.gmr_slab_fenced;
+ buffer = provider->create_buffer(provider, size, &desc.pb_desc);
+ }
+
+ if (!buffer)
+ return NULL;
+
+ return vmw_svga_winsys_buffer_wrap(buffer);
+}
+
+
+static void
+vmw_svga_winsys_fence_reference(struct svga_winsys_screen *sws,
+ struct pipe_fence_handle **pdst,
+ struct pipe_fence_handle *src)
+{
+ struct vmw_winsys_screen *vws = vmw_winsys_screen(sws);
+
+ vmw_fence_reference(vws, pdst, src);
+}
+
+
+static int
+vmw_svga_winsys_fence_signalled(struct svga_winsys_screen *sws,
+ struct pipe_fence_handle *fence,
+ unsigned flag)
+{
+ struct vmw_winsys_screen *vws = vmw_winsys_screen(sws);
+
+ return vmw_fence_signalled(vws, fence, flag);
+}
+
+
+static int
+vmw_svga_winsys_fence_finish(struct svga_winsys_screen *sws,
+ struct pipe_fence_handle *fence,
+ unsigned flag)
+{
+ struct vmw_winsys_screen *vws = vmw_winsys_screen(sws);
+
+ return vmw_fence_finish(vws, fence, flag);
+}
+
+
+
+static struct svga_winsys_surface *
+vmw_svga_winsys_surface_create(struct svga_winsys_screen *sws,
+ SVGA3dSurfaceFlags flags,
+ SVGA3dSurfaceFormat format,
+ unsigned usage,
+ SVGA3dSize size,
+ uint32 numFaces,
+ uint32 numMipLevels)
+{
+ struct vmw_winsys_screen *vws = vmw_winsys_screen(sws);
+ struct vmw_svga_winsys_surface *surface;
+ struct vmw_buffer_desc desc;
+ struct pb_manager *provider;
+ uint32_t buffer_size;
+
+
+ memset(&desc, 0, sizeof(desc));
+ surface = CALLOC_STRUCT(vmw_svga_winsys_surface);
+ if(!surface)
+ goto no_surface;
+
+ pipe_reference_init(&surface->refcnt, 1);
+ p_atomic_set(&surface->validated, 0);
+ surface->screen = vws;
+ pipe_mutex_init(surface->mutex);
+ surface->shared = !!(usage & SVGA_SURFACE_USAGE_SHARED);
+ provider = (surface->shared) ? vws->pools.gmr : vws->pools.mob_fenced;
+
+ /*
+ * Used for the backing buffer GB surfaces, and to approximate
+ * when to flush on non-GB hosts.
+ */
+ buffer_size = svga3dsurface_get_serialized_size(format, size, numMipLevels, (numFaces == 6));
+ if (buffer_size > vws->ioctl.max_texture_size) {
+ goto no_sid;
+ }
+
+ if (sws->have_gb_objects) {
+ SVGAGuestPtr ptr = {0,0};
+
+ /*
+ * If the backing buffer size is small enough, try to allocate a
+ * buffer out of the buffer cache. Otherwise, let the kernel allocate
+ * a suitable buffer for us.
+ */
+ if (buffer_size < VMW_TRY_CACHED_SIZE && !surface->shared) {
+ struct pb_buffer *pb_buf;
+
+ surface->size = buffer_size;
+ desc.pb_desc.alignment = 4096;
+ desc.pb_desc.usage = 0;
+ pb_buf = provider->create_buffer(provider, buffer_size, &desc.pb_desc);
+ surface->buf = vmw_svga_winsys_buffer_wrap(pb_buf);
+ if (surface->buf && !vmw_gmr_bufmgr_region_ptr(pb_buf, &ptr))
+ assert(0);
+ }
+
+ surface->sid = vmw_ioctl_gb_surface_create(vws, flags, format, usage,
+ size, numFaces,
+ numMipLevels, ptr.gmrId,
+ surface->buf ? NULL :
+ &desc.region);
+
+ if (surface->sid == SVGA3D_INVALID_ID && surface->buf) {
+
+ /*
+ * Kernel refused to allocate a surface for us.
+ * Perhaps something was wrong with our buffer?
+ * This is really a guard against future new size requirements
+ * on the backing buffers.
+ */
+ vmw_svga_winsys_buffer_destroy(sws, surface->buf);
+ surface->buf = NULL;
+ surface->sid = vmw_ioctl_gb_surface_create(vws, flags, format, usage,
+ size, numFaces,
+ numMipLevels, 0,
+ &desc.region);
+ if (surface->sid == SVGA3D_INVALID_ID)
+ goto no_sid;
+ }
+
+ /*
+ * If the kernel created the buffer for us, wrap it into a
+ * vmw_svga_winsys_buffer.
+ */
+ if (surface->buf == NULL) {
+ struct pb_buffer *pb_buf;
+
+ surface->size = vmw_region_size(desc.region);
+ desc.pb_desc.alignment = 4096;
+ desc.pb_desc.usage = VMW_BUFFER_USAGE_SHARED;
+ pb_buf = provider->create_buffer(provider, surface->size,
+ &desc.pb_desc);
+ surface->buf = vmw_svga_winsys_buffer_wrap(pb_buf);
+ if (surface->buf == NULL) {
+ vmw_ioctl_region_destroy(desc.region);
+ vmw_ioctl_surface_destroy(vws, surface->sid);
+ goto no_sid;
+ }
+ }
+ } else {
+ surface->sid = vmw_ioctl_surface_create(vws, flags, format, usage,
+ size, numFaces, numMipLevels);
+ if(surface->sid == SVGA3D_INVALID_ID)
+ goto no_sid;
+
+ /* Best estimate for surface size, used for early flushing. */
+ surface->size = buffer_size;
+ surface->buf = NULL;
+ }
+
+ return svga_winsys_surface(surface);
+
+no_sid:
+ if (surface->buf)
+ vmw_svga_winsys_buffer_destroy(sws, surface->buf);
+
+ FREE(surface);
+no_surface:
+ return NULL;
+}
+
+static boolean
+vmw_svga_winsys_surface_can_create(struct svga_winsys_screen *sws,
+ SVGA3dSurfaceFormat format,
+ SVGA3dSize size,
+ uint32 numFaces,
+ uint32 numMipLevels)
+{
+ struct vmw_winsys_screen *vws = vmw_winsys_screen(sws);
+ uint32_t buffer_size;
+
+ buffer_size = svga3dsurface_get_serialized_size(format, size,
+ numMipLevels,
+ (numFaces == 6));
+ if (buffer_size > vws->ioctl.max_texture_size) {
+ return FALSE;
+ }
+ return TRUE;
+}
+
+
+static boolean
+vmw_svga_winsys_surface_is_flushed(struct svga_winsys_screen *sws,
+ struct svga_winsys_surface *surface)
+{
+ struct vmw_svga_winsys_surface *vsurf = vmw_svga_winsys_surface(surface);
+ return (p_atomic_read(&vsurf->validated) == 0);
+}
+
+
+static void
+vmw_svga_winsys_surface_ref(struct svga_winsys_screen *sws,
+ struct svga_winsys_surface **pDst,
+ struct svga_winsys_surface *src)
+{
+ struct vmw_svga_winsys_surface *d_vsurf = vmw_svga_winsys_surface(*pDst);
+ struct vmw_svga_winsys_surface *s_vsurf = vmw_svga_winsys_surface(src);
+
+ vmw_svga_winsys_surface_reference(&d_vsurf, s_vsurf);
+ *pDst = svga_winsys_surface(d_vsurf);
+}
+
+
+static void
+vmw_svga_winsys_destroy(struct svga_winsys_screen *sws)
+{
+ struct vmw_winsys_screen *vws = vmw_winsys_screen(sws);
+
+ vmw_winsys_destroy(vws);
+}
+
+
+static SVGA3dHardwareVersion
+vmw_svga_winsys_get_hw_version(struct svga_winsys_screen *sws)
+{
+ struct vmw_winsys_screen *vws = vmw_winsys_screen(sws);
+
+ if (sws->have_gb_objects)
+ return SVGA3D_HWVERSION_WS8_B1;
+
+ return (SVGA3dHardwareVersion) vws->ioctl.hwversion;
+}
+
+
+static boolean
+vmw_svga_winsys_get_cap(struct svga_winsys_screen *sws,
+ SVGA3dDevCapIndex index,
+ SVGA3dDevCapResult *result)
+{
+ struct vmw_winsys_screen *vws = vmw_winsys_screen(sws);
+
+ if (index > vws->ioctl.num_cap_3d || !vws->ioctl.cap_3d[index].has_cap)
+ return FALSE;
+
+ *result = vws->ioctl.cap_3d[index].result;
+ return TRUE;
+}
+
+static struct svga_winsys_gb_shader *
+vmw_svga_winsys_shader_create(struct svga_winsys_screen *sws,
+ SVGA3dShaderType type,
+ const uint32 *bytecode,
+ uint32 bytecodeLen)
+{
+ struct vmw_winsys_screen *vws = vmw_winsys_screen(sws);
+ struct vmw_svga_winsys_shader *shader;
+ void *code;
+
+ shader = CALLOC_STRUCT(vmw_svga_winsys_shader);
+ if(!shader)
+ goto out_no_shader;
+
+ pipe_reference_init(&shader->refcnt, 1);
+ p_atomic_set(&shader->validated, 0);
+ shader->screen = vws;
+ shader->buf = vmw_svga_winsys_buffer_create(sws, 64,
+ SVGA_BUFFER_USAGE_SHADER,
+ bytecodeLen);
+ if (!shader->buf)
+ goto out_no_buf;
+
+ code = vmw_svga_winsys_buffer_map(sws, shader->buf, PIPE_TRANSFER_WRITE);
+ if (!code)
+ goto out_no_buf;
+
+ memcpy(code, bytecode, bytecodeLen);
+ vmw_svga_winsys_buffer_unmap(sws, shader->buf);
+
+ shader->shid = vmw_ioctl_shader_create(vws, type, bytecodeLen);
+ if(shader->shid == SVGA3D_INVALID_ID)
+ goto out_no_shid;
+
+ return svga_winsys_shader(shader);
+
+out_no_shid:
+ vmw_svga_winsys_buffer_destroy(sws, shader->buf);
+out_no_buf:
+ FREE(shader);
+out_no_shader:
+ return NULL;
+}
+
+static void
+vmw_svga_winsys_shader_destroy(struct svga_winsys_screen *sws,
+ struct svga_winsys_gb_shader *shader)
+{
+ struct vmw_svga_winsys_shader *d_shader =
+ vmw_svga_winsys_shader(shader);
+
+ vmw_svga_winsys_shader_reference(&d_shader, NULL);
+}
+
+boolean
+vmw_winsys_screen_init_svga(struct vmw_winsys_screen *vws)
+{
+ vws->base.destroy = vmw_svga_winsys_destroy;
+ vws->base.get_hw_version = vmw_svga_winsys_get_hw_version;
+ vws->base.get_cap = vmw_svga_winsys_get_cap;
+ vws->base.context_create = vmw_svga_winsys_context_create;
+ vws->base.surface_create = vmw_svga_winsys_surface_create;
+ vws->base.surface_is_flushed = vmw_svga_winsys_surface_is_flushed;
+ vws->base.surface_reference = vmw_svga_winsys_surface_ref;
+ vws->base.surface_can_create = vmw_svga_winsys_surface_can_create;
+ vws->base.buffer_create = vmw_svga_winsys_buffer_create;
+ vws->base.buffer_map = vmw_svga_winsys_buffer_map;
+ vws->base.buffer_unmap = vmw_svga_winsys_buffer_unmap;
+ vws->base.buffer_destroy = vmw_svga_winsys_buffer_destroy;
+ vws->base.fence_reference = vmw_svga_winsys_fence_reference;
+ vws->base.fence_signalled = vmw_svga_winsys_fence_signalled;
+ vws->base.shader_create = vmw_svga_winsys_shader_create;
+ vws->base.shader_destroy = vmw_svga_winsys_shader_destroy;
+ vws->base.fence_finish = vmw_svga_winsys_fence_finish;
+
+ return TRUE;
+}
+
+
diff --git a/lib/mesa/src/gallium/winsys/svga/drm/vmw_shader.c b/lib/mesa/src/gallium/winsys/svga/drm/vmw_shader.c
new file mode 100644
index 000000000..e82486ab6
--- /dev/null
+++ b/lib/mesa/src/gallium/winsys/svga/drm/vmw_shader.c
@@ -0,0 +1,64 @@
+/**********************************************************
+ * Copyright 2009-2012 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+
+#include "svga_cmd.h"
+#include "util/u_debug.h"
+#include "util/u_memory.h"
+
+#include "vmw_shader.h"
+#include "vmw_screen.h"
+
+void
+vmw_svga_winsys_shader_reference(struct vmw_svga_winsys_shader **pdst,
+ struct vmw_svga_winsys_shader *src)
+{
+ struct pipe_reference *src_ref;
+ struct pipe_reference *dst_ref;
+ struct vmw_svga_winsys_shader *dst;
+
+ if(pdst == NULL || *pdst == src)
+ return;
+
+ dst = *pdst;
+
+ src_ref = src ? &src->refcnt : NULL;
+ dst_ref = dst ? &dst->refcnt : NULL;
+
+ if (pipe_reference(dst_ref, src_ref)) {
+ struct svga_winsys_screen *sws = &dst->screen->base;
+
+ vmw_ioctl_shader_destroy(dst->screen, dst->shid);
+#ifdef DEBUG
+ /* to detect dangling pointers */
+ assert(p_atomic_read(&dst->validated) == 0);
+ dst->shid = SVGA3D_INVALID_ID;
+#endif
+ sws->buffer_destroy(sws, dst->buf);
+ FREE(dst);
+ }
+
+ *pdst = src;
+}
diff --git a/lib/mesa/src/gallium/winsys/svga/drm/vmw_shader.h b/lib/mesa/src/gallium/winsys/svga/drm/vmw_shader.h
new file mode 100644
index 000000000..28f997173
--- /dev/null
+++ b/lib/mesa/src/gallium/winsys/svga/drm/vmw_shader.h
@@ -0,0 +1,67 @@
+/**********************************************************
+ * Copyright 2009-2012 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/**
+ * @file
+ * Shaders for VMware SVGA winsys.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ * @author Thomas Hellstrom <thellstrom@vmware.com>
+ */
+
+#ifndef VMW_SHADER_H_
+
+#include "pipe/p_compiler.h"
+#include "util/u_atomic.h"
+#include "util/u_inlines.h"
+
+struct vmw_svga_winsys_shader
+{
+ int32_t validated;
+ struct pipe_reference refcnt;
+
+ struct vmw_winsys_screen *screen;
+ struct svga_winsys_buffer *buf;
+ uint32_t shid;
+};
+
+static inline struct svga_winsys_gb_shader *
+svga_winsys_shader(struct vmw_svga_winsys_shader *shader)
+{
+ assert(!shader || shader->shid != SVGA3D_INVALID_ID);
+ return (struct svga_winsys_gb_shader *)shader;
+}
+
+static inline struct vmw_svga_winsys_shader *
+vmw_svga_winsys_shader(struct svga_winsys_gb_shader *shader)
+{
+ return (struct vmw_svga_winsys_shader *)shader;
+}
+
+void
+vmw_svga_winsys_shader_reference(struct vmw_svga_winsys_shader **pdst,
+ struct vmw_svga_winsys_shader *src);
+
+#endif /* VMW_SHADER_H_ */
diff --git a/lib/mesa/src/gallium/winsys/svga/drm/vmw_surface.c b/lib/mesa/src/gallium/winsys/svga/drm/vmw_surface.c
new file mode 100644
index 000000000..cf648b4dd
--- /dev/null
+++ b/lib/mesa/src/gallium/winsys/svga/drm/vmw_surface.c
@@ -0,0 +1,207 @@
+/**********************************************************
+ * Copyright 2009 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+
+#include "svga_cmd.h"
+#include "util/u_debug.h"
+#include "util/u_memory.h"
+#include "pipe/p_defines.h"
+#include "vmw_surface.h"
+#include "vmw_screen.h"
+#include "vmw_buffer.h"
+#include "vmw_context.h"
+#include "pipebuffer/pb_bufmgr.h"
+
+
+void *
+vmw_svga_winsys_surface_map(struct svga_winsys_context *swc,
+ struct svga_winsys_surface *srf,
+ unsigned flags, boolean *retry)
+{
+ struct vmw_svga_winsys_surface *vsrf = vmw_svga_winsys_surface(srf);
+ void *data = NULL;
+ struct pb_buffer *pb_buf;
+ uint32_t pb_flags;
+ struct vmw_winsys_screen *vws = vsrf->screen;
+
+ *retry = FALSE;
+ assert((flags & (PIPE_TRANSFER_READ | PIPE_TRANSFER_WRITE)) != 0);
+ pipe_mutex_lock(vsrf->mutex);
+
+ if (vsrf->mapcount) {
+ /*
+ * Only allow multiple readers to map.
+ */
+ if ((flags & PIPE_TRANSFER_WRITE) ||
+ (vsrf->map_mode & PIPE_TRANSFER_WRITE))
+ goto out_unlock;
+
+ data = vsrf->data;
+ goto out_mapped;
+ }
+
+ vsrf->rebind = FALSE;
+
+ /*
+ * If we intend to read, there's no point discarding the
+ * data if busy.
+ */
+ if (flags & PIPE_TRANSFER_READ || vsrf->shared)
+ flags &= ~PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
+
+ /*
+ * Discard is a hint to a synchronized map.
+ */
+ if (flags & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)
+ flags &= ~PIPE_TRANSFER_UNSYNCHRONIZED;
+
+ /*
+ * The surface is allowed to be referenced on the command stream iff
+ * we're mapping unsynchronized or discard. This is an early check.
+ * We need to recheck after a failing discard map.
+ */
+ if (!(flags & (PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE |
+ PIPE_TRANSFER_UNSYNCHRONIZED)) &&
+ p_atomic_read(&vsrf->validated)) {
+ *retry = TRUE;
+ goto out_unlock;
+ }
+
+ pb_flags = flags & (PIPE_TRANSFER_READ_WRITE | PIPE_TRANSFER_UNSYNCHRONIZED);
+
+ if (flags & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
+ struct pb_manager *provider;
+ struct pb_desc desc;
+
+ /*
+ * First, if possible, try to map existing storage with DONTBLOCK.
+ */
+ if (!p_atomic_read(&vsrf->validated)) {
+ data = vmw_svga_winsys_buffer_map(&vws->base, vsrf->buf,
+ PIPE_TRANSFER_DONTBLOCK | pb_flags);
+ if (data)
+ goto out_mapped;
+ }
+
+ /*
+ * Attempt to get a new buffer.
+ */
+ provider = vws->pools.mob_fenced;
+ memset(&desc, 0, sizeof(desc));
+ desc.alignment = 4096;
+ pb_buf = provider->create_buffer(provider, vsrf->size, &desc);
+ if (pb_buf != NULL) {
+ struct svga_winsys_buffer *vbuf =
+ vmw_svga_winsys_buffer_wrap(pb_buf);
+
+ data = vmw_svga_winsys_buffer_map(&vws->base, vbuf, pb_flags);
+ if (data) {
+ vsrf->rebind = TRUE;
+ /*
+ * We've discarded data on this surface and thus
+ * it's data is no longer consider referenced.
+ */
+ vmw_swc_surface_clear_reference(swc, vsrf);
+ if (vsrf->buf)
+ vmw_svga_winsys_buffer_destroy(&vws->base, vsrf->buf);
+ vsrf->buf = vbuf;
+ goto out_mapped;
+ } else
+ vmw_svga_winsys_buffer_destroy(&vws->base, vbuf);
+ }
+ /*
+ * We couldn't get and map a new buffer for some reason.
+ * Fall through to an ordinary map.
+ * But tell pipe driver to flush now if already on validate list,
+ * Otherwise we'll overwrite previous contents.
+ */
+ if (!(flags & PIPE_TRANSFER_UNSYNCHRONIZED) &&
+ p_atomic_read(&vsrf->validated)) {
+ *retry = TRUE;
+ goto out_unlock;
+ }
+ }
+
+ pb_flags |= (flags & PIPE_TRANSFER_DONTBLOCK);
+ data = vmw_svga_winsys_buffer_map(&vws->base, vsrf->buf, pb_flags);
+ if (data == NULL)
+ goto out_unlock;
+
+out_mapped:
+ ++vsrf->mapcount;
+ vsrf->data = data;
+ vsrf->map_mode = flags & (PIPE_TRANSFER_READ | PIPE_TRANSFER_WRITE);
+out_unlock:
+ pipe_mutex_unlock(vsrf->mutex);
+ return data;
+}
+
+
+void
+vmw_svga_winsys_surface_unmap(struct svga_winsys_context *swc,
+ struct svga_winsys_surface *srf,
+ boolean *rebind)
+{
+ struct vmw_svga_winsys_surface *vsrf = vmw_svga_winsys_surface(srf);
+ pipe_mutex_lock(vsrf->mutex);
+ if (--vsrf->mapcount == 0) {
+ *rebind = vsrf->rebind;
+ vsrf->rebind = FALSE;
+ vmw_svga_winsys_buffer_unmap(&vsrf->screen->base, vsrf->buf);
+ }
+ pipe_mutex_unlock(vsrf->mutex);
+}
+
+void
+vmw_svga_winsys_surface_reference(struct vmw_svga_winsys_surface **pdst,
+ struct vmw_svga_winsys_surface *src)
+{
+ struct pipe_reference *src_ref;
+ struct pipe_reference *dst_ref;
+ struct vmw_svga_winsys_surface *dst;
+
+ if(pdst == NULL || *pdst == src)
+ return;
+
+ dst = *pdst;
+
+ src_ref = src ? &src->refcnt : NULL;
+ dst_ref = dst ? &dst->refcnt : NULL;
+
+ if (pipe_reference(dst_ref, src_ref)) {
+ if (dst->buf)
+ vmw_svga_winsys_buffer_destroy(&dst->screen->base, dst->buf);
+ vmw_ioctl_surface_destroy(dst->screen, dst->sid);
+#ifdef DEBUG
+ /* to detect dangling pointers */
+ assert(p_atomic_read(&dst->validated) == 0);
+ dst->sid = SVGA3D_INVALID_ID;
+#endif
+ pipe_mutex_destroy(dst->mutex);
+ FREE(dst);
+ }
+
+ *pdst = src;
+}
diff --git a/lib/mesa/src/gallium/winsys/svga/drm/vmw_surface.h b/lib/mesa/src/gallium/winsys/svga/drm/vmw_surface.h
new file mode 100644
index 000000000..1291f380a
--- /dev/null
+++ b/lib/mesa/src/gallium/winsys/svga/drm/vmw_surface.h
@@ -0,0 +1,98 @@
+/**********************************************************
+ * Copyright 2009 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/**
+ * @file
+ * Surfaces for VMware SVGA winsys.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+
+#ifndef VMW_SURFACE_H_
+#define VMW_SURFACE_H_
+
+
+#include "pipe/p_compiler.h"
+#include "util/u_atomic.h"
+#include "util/u_inlines.h"
+#include "os/os_thread.h"
+#include "pipebuffer/pb_buffer.h"
+
+#define VMW_MAX_PRESENTS 3
+
+
+
+struct vmw_svga_winsys_surface
+{
+ int32_t validated; /* atomic */
+ struct pipe_reference refcnt;
+
+ struct vmw_winsys_screen *screen;
+ uint32_t sid;
+
+ /* FIXME: make this thread safe */
+ unsigned next_present_no;
+ uint32_t present_fences[VMW_MAX_PRESENTS];
+
+ pipe_mutex mutex;
+ struct svga_winsys_buffer *buf; /* Current backing guest buffer */
+ uint32_t mapcount; /* Number of mappers */
+ uint32_t map_mode; /* PIPE_TRANSFER_[READ|WRITE] */
+ void *data; /* Pointer to data if mapcount != 0*/
+ boolean shared; /* Shared surface. Never discard */
+ uint32_t size; /* Size of backing buffer */
+ boolean rebind; /* Surface needs a rebind after next unmap */
+};
+
+
+static inline struct svga_winsys_surface *
+svga_winsys_surface(struct vmw_svga_winsys_surface *surf)
+{
+ assert(!surf || surf->sid != SVGA3D_INVALID_ID);
+ return (struct svga_winsys_surface *)surf;
+}
+
+
+static inline struct vmw_svga_winsys_surface *
+vmw_svga_winsys_surface(struct svga_winsys_surface *surf)
+{
+ return (struct vmw_svga_winsys_surface *)surf;
+}
+
+
+void
+vmw_svga_winsys_surface_reference(struct vmw_svga_winsys_surface **pdst,
+ struct vmw_svga_winsys_surface *src);
+void *
+vmw_svga_winsys_surface_map(struct svga_winsys_context *swc,
+ struct svga_winsys_surface *srf,
+ unsigned flags, boolean *retry);
+void
+vmw_svga_winsys_surface_unmap(struct svga_winsys_context *swc,
+ struct svga_winsys_surface *srf,
+ boolean *rebind);
+
+#endif /* VMW_SURFACE_H_ */
diff --git a/lib/mesa/src/gallium/winsys/svga/drm/vmwgfx_drm.h b/lib/mesa/src/gallium/winsys/svga/drm/vmwgfx_drm.h
new file mode 100644
index 000000000..73ad20537
--- /dev/null
+++ b/lib/mesa/src/gallium/winsys/svga/drm/vmwgfx_drm.h
@@ -0,0 +1,1063 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef __VMWGFX_DRM_H__
+#define __VMWGFX_DRM_H__
+
+#ifndef __KERNEL__
+#include <drm.h>
+#endif
+
+#define DRM_VMW_MAX_SURFACE_FACES 6
+#define DRM_VMW_MAX_MIP_LEVELS 24
+
+
+#define DRM_VMW_GET_PARAM 0
+#define DRM_VMW_ALLOC_DMABUF 1
+#define DRM_VMW_UNREF_DMABUF 2
+#define DRM_VMW_CURSOR_BYPASS 3
+/* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/
+#define DRM_VMW_CONTROL_STREAM 4
+#define DRM_VMW_CLAIM_STREAM 5
+#define DRM_VMW_UNREF_STREAM 6
+/* guarded by DRM_VMW_PARAM_3D == 1 */
+#define DRM_VMW_CREATE_CONTEXT 7
+#define DRM_VMW_UNREF_CONTEXT 8
+#define DRM_VMW_CREATE_SURFACE 9
+#define DRM_VMW_UNREF_SURFACE 10
+#define DRM_VMW_REF_SURFACE 11
+#define DRM_VMW_EXECBUF 12
+#define DRM_VMW_GET_3D_CAP 13
+#define DRM_VMW_FENCE_WAIT 14
+#define DRM_VMW_FENCE_SIGNALED 15
+#define DRM_VMW_FENCE_UNREF 16
+#define DRM_VMW_FENCE_EVENT 17
+#define DRM_VMW_PRESENT 18
+#define DRM_VMW_PRESENT_READBACK 19
+#define DRM_VMW_UPDATE_LAYOUT 20
+#define DRM_VMW_CREATE_SHADER 21
+#define DRM_VMW_UNREF_SHADER 22
+#define DRM_VMW_GB_SURFACE_CREATE 23
+#define DRM_VMW_GB_SURFACE_REF 24
+#define DRM_VMW_SYNCCPU 25
+
+/*************************************************************************/
+/**
+ * DRM_VMW_GET_PARAM - get device information.
+ *
+ * DRM_VMW_PARAM_FIFO_OFFSET:
+ * Offset to use to map the first page of the FIFO read-only.
+ * The fifo is mapped using the mmap() system call on the drm device.
+ *
+ * DRM_VMW_PARAM_OVERLAY_IOCTL:
+ * Does the driver support the overlay ioctl.
+ */
+
+#define DRM_VMW_PARAM_NUM_STREAMS 0
+#define DRM_VMW_PARAM_NUM_FREE_STREAMS 1
+#define DRM_VMW_PARAM_3D 2
+#define DRM_VMW_PARAM_HW_CAPS 3
+#define DRM_VMW_PARAM_FIFO_CAPS 4
+#define DRM_VMW_PARAM_MAX_FB_SIZE 5
+#define DRM_VMW_PARAM_FIFO_HW_VERSION 6
+#define DRM_VMW_PARAM_MAX_SURF_MEMORY 7
+#define DRM_VMW_PARAM_3D_CAPS_SIZE 8
+#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9
+#define DRM_VMW_PARAM_MAX_MOB_SIZE 10
+
+/**
+ * enum drm_vmw_handle_type - handle type for ref ioctls
+ *
+ */
+enum drm_vmw_handle_type {
+ DRM_VMW_HANDLE_LEGACY = 0,
+ DRM_VMW_HANDLE_PRIME = 1
+};
+
+/**
+ * struct drm_vmw_getparam_arg
+ *
+ * @value: Returned value. //Out
+ * @param: Parameter to query. //In.
+ *
+ * Argument to the DRM_VMW_GET_PARAM Ioctl.
+ */
+
+struct drm_vmw_getparam_arg {
+ uint64_t value;
+ uint32_t param;
+ uint32_t pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_CREATE_CONTEXT - Create a host context.
+ *
+ * Allocates a device unique context id, and queues a create context command
+ * for the host. Does not wait for host completion.
+ */
+
+/**
+ * struct drm_vmw_context_arg
+ *
+ * @cid: Device unique context ID.
+ *
+ * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
+ * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
+ */
+
+struct drm_vmw_context_arg {
+ int32_t cid;
+ uint32_t pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_UNREF_CONTEXT - Create a host context.
+ *
+ * Frees a global context id, and queues a destroy host command for the host.
+ * Does not wait for host completion. The context ID can be used directly
+ * in the command stream and shows up as the same context ID on the host.
+ */
+
+/*************************************************************************/
+/**
+ * DRM_VMW_CREATE_SURFACE - Create a host suface.
+ *
+ * Allocates a device unique surface id, and queues a create surface command
+ * for the host. Does not wait for host completion. The surface ID can be
+ * used directly in the command stream and shows up as the same surface
+ * ID on the host.
+ */
+
+/**
+ * struct drm_wmv_surface_create_req
+ *
+ * @flags: Surface flags as understood by the host.
+ * @format: Surface format as understood by the host.
+ * @mip_levels: Number of mip levels for each face.
+ * An unused face should have 0 encoded.
+ * @size_addr: Address of a user-space array of sruct drm_vmw_size
+ * cast to an uint64_t for 32-64 bit compatibility.
+ * The size of the array should equal the total number of mipmap levels.
+ * @shareable: Boolean whether other clients (as identified by file descriptors)
+ * may reference this surface.
+ * @scanout: Boolean whether the surface is intended to be used as a
+ * scanout.
+ *
+ * Input data to the DRM_VMW_CREATE_SURFACE Ioctl.
+ * Output data from the DRM_VMW_REF_SURFACE Ioctl.
+ */
+
+struct drm_vmw_surface_create_req {
+ uint32_t flags;
+ uint32_t format;
+ uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
+ uint64_t size_addr;
+ int32_t shareable;
+ int32_t scanout;
+};
+
+/**
+ * struct drm_wmv_surface_arg
+ *
+ * @sid: Surface id of created surface or surface to destroy or reference.
+ * @handle_type: Handle type for DRM_VMW_REF_SURFACE Ioctl.
+ *
+ * Output data from the DRM_VMW_CREATE_SURFACE Ioctl.
+ * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl.
+ * Input argument to the DRM_VMW_REF_SURFACE Ioctl.
+ */
+
+struct drm_vmw_surface_arg {
+ int32_t sid;
+ enum drm_vmw_handle_type handle_type;
+};
+
+/**
+ * struct drm_vmw_size ioctl.
+ *
+ * @width - mip level width
+ * @height - mip level height
+ * @depth - mip level depth
+ *
+ * Description of a mip level.
+ * Input data to the DRM_WMW_CREATE_SURFACE Ioctl.
+ */
+
+struct drm_vmw_size {
+ uint32_t width;
+ uint32_t height;
+ uint32_t depth;
+ uint32_t pad64;
+};
+
+/**
+ * union drm_vmw_surface_create_arg
+ *
+ * @rep: Output data as described above.
+ * @req: Input data as described above.
+ *
+ * Argument to the DRM_VMW_CREATE_SURFACE Ioctl.
+ */
+
+union drm_vmw_surface_create_arg {
+ struct drm_vmw_surface_arg rep;
+ struct drm_vmw_surface_create_req req;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_REF_SURFACE - Reference a host surface.
+ *
+ * Puts a reference on a host surface with a give sid, as previously
+ * returned by the DRM_VMW_CREATE_SURFACE ioctl.
+ * A reference will make sure the surface isn't destroyed while we hold
+ * it and will allow the calling client to use the surface ID in the command
+ * stream.
+ *
+ * On successful return, the Ioctl returns the surface information given
+ * in the DRM_VMW_CREATE_SURFACE ioctl.
+ */
+
+/**
+ * union drm_vmw_surface_reference_arg
+ *
+ * @rep: Output data as described above.
+ * @req: Input data as described above.
+ *
+ * Argument to the DRM_VMW_REF_SURFACE Ioctl.
+ */
+
+union drm_vmw_surface_reference_arg {
+ struct drm_vmw_surface_create_req rep;
+ struct drm_vmw_surface_arg req;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_UNREF_SURFACE - Unreference a host surface.
+ *
+ * Clear a reference previously put on a host surface.
+ * When all references are gone, including the one implicitly placed
+ * on creation,
+ * a destroy surface command will be queued for the host.
+ * Does not wait for completion.
+ */
+
+/*************************************************************************/
+/**
+ * DRM_VMW_EXECBUF
+ *
+ * Submit a command buffer for execution on the host, and return a
+ * fence seqno that when signaled, indicates that the command buffer has
+ * executed.
+ */
+
+/**
+ * struct drm_vmw_execbuf_arg
+ *
+ * @commands: User-space address of a command buffer cast to an uint64_t.
+ * @command-size: Size in bytes of the command buffer.
+ * @throttle-us: Sleep until software is less than @throttle_us
+ * microseconds ahead of hardware. The driver may round this value
+ * to the nearest kernel tick.
+ * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
+ * uint64_t.
+ * @version: Allows expanding the execbuf ioctl parameters without breaking
+ * backwards compatibility, since user-space will always tell the kernel
+ * which version it uses.
+ * @flags: Execbuf flags. None currently.
+ *
+ * Argument to the DRM_VMW_EXECBUF Ioctl.
+ */
+
+#define DRM_VMW_EXECBUF_VERSION 1
+
+struct drm_vmw_execbuf_arg {
+ uint64_t commands;
+ uint32_t command_size;
+ uint32_t throttle_us;
+ uint64_t fence_rep;
+ uint32_t version;
+ uint32_t flags;
+};
+
+/**
+ * struct drm_vmw_fence_rep
+ *
+ * @handle: Fence object handle for fence associated with a command submission.
+ * @mask: Fence flags relevant for this fence object.
+ * @seqno: Fence sequence number in fifo. A fence object with a lower
+ * seqno will signal the EXEC flag before a fence object with a higher
+ * seqno. This can be used by user-space to avoid kernel calls to determine
+ * whether a fence has signaled the EXEC flag. Note that @seqno will
+ * wrap at 32-bit.
+ * @passed_seqno: The highest seqno number processed by the hardware
+ * so far. This can be used to mark user-space fence objects as signaled, and
+ * to determine whether a fence seqno might be stale.
+ * @error: This member should've been set to -EFAULT on submission.
+ * The following actions should be take on completion:
+ * error == -EFAULT: Fence communication failed. The host is synchronized.
+ * Use the last fence id read from the FIFO fence register.
+ * error != 0 && error != -EFAULT:
+ * Fence submission failed. The host is synchronized. Use the fence_seq member.
+ * error == 0: All is OK, The host may not be synchronized.
+ * Use the fence_seq member.
+ *
+ * Input / Output data to the DRM_VMW_EXECBUF Ioctl.
+ */
+
+struct drm_vmw_fence_rep {
+ uint32_t handle;
+ uint32_t mask;
+ uint32_t seqno;
+ uint32_t passed_seqno;
+ uint32_t pad64;
+ int32_t error;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_ALLOC_DMABUF
+ *
+ * Allocate a DMA buffer that is visible also to the host.
+ * NOTE: The buffer is
+ * identified by a handle and an offset, which are private to the guest, but
+ * useable in the command stream. The guest kernel may translate these
+ * and patch up the command stream accordingly. In the future, the offset may
+ * be zero at all times, or it may disappear from the interface before it is
+ * fixed.
+ *
+ * The DMA buffer may stay user-space mapped in the guest at all times,
+ * and is thus suitable for sub-allocation.
+ *
+ * DMA buffers are mapped using the mmap() syscall on the drm device.
+ */
+
+/**
+ * struct drm_vmw_alloc_dmabuf_req
+ *
+ * @size: Required minimum size of the buffer.
+ *
+ * Input data to the DRM_VMW_ALLOC_DMABUF Ioctl.
+ */
+
+struct drm_vmw_alloc_dmabuf_req {
+ uint32_t size;
+ uint32_t pad64;
+};
+
+/**
+ * struct drm_vmw_dmabuf_rep
+ *
+ * @map_handle: Offset to use in the mmap() call used to map the buffer.
+ * @handle: Handle unique to this buffer. Used for unreferencing.
+ * @cur_gmr_id: GMR id to use in the command stream when this buffer is
+ * referenced. See not above.
+ * @cur_gmr_offset: Offset to use in the command stream when this buffer is
+ * referenced. See note above.
+ *
+ * Output data from the DRM_VMW_ALLOC_DMABUF Ioctl.
+ */
+
+struct drm_vmw_dmabuf_rep {
+ uint64_t map_handle;
+ uint32_t handle;
+ uint32_t cur_gmr_id;
+ uint32_t cur_gmr_offset;
+ uint32_t pad64;
+};
+
+/**
+ * union drm_vmw_dmabuf_arg
+ *
+ * @req: Input data as described above.
+ * @rep: Output data as described above.
+ *
+ * Argument to the DRM_VMW_ALLOC_DMABUF Ioctl.
+ */
+
+union drm_vmw_alloc_dmabuf_arg {
+ struct drm_vmw_alloc_dmabuf_req req;
+ struct drm_vmw_dmabuf_rep rep;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_UNREF_DMABUF - Free a DMA buffer.
+ *
+ */
+
+/**
+ * struct drm_vmw_unref_dmabuf_arg
+ *
+ * @handle: Handle indicating what buffer to free. Obtained from the
+ * DRM_VMW_ALLOC_DMABUF Ioctl.
+ *
+ * Argument to the DRM_VMW_UNREF_DMABUF Ioctl.
+ */
+
+struct drm_vmw_unref_dmabuf_arg {
+ uint32_t handle;
+ uint32_t pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_CONTROL_STREAM - Control overlays, aka streams.
+ *
+ * This IOCTL controls the overlay units of the svga device.
+ * The SVGA overlay units does not work like regular hardware units in
+ * that they do not automaticaly read back the contents of the given dma
+ * buffer. But instead only read back for each call to this ioctl, and
+ * at any point between this call being made and a following call that
+ * either changes the buffer or disables the stream.
+ */
+
+/**
+ * struct drm_vmw_rect
+ *
+ * Defines a rectangle. Used in the overlay ioctl to define
+ * source and destination rectangle.
+ */
+
+struct drm_vmw_rect {
+ int32_t x;
+ int32_t y;
+ uint32_t w;
+ uint32_t h;
+};
+
+/**
+ * struct drm_vmw_control_stream_arg
+ *
+ * @stream_id: Stearm to control
+ * @enabled: If false all following arguments are ignored.
+ * @handle: Handle to buffer for getting data from.
+ * @format: Format of the overlay as understood by the host.
+ * @width: Width of the overlay.
+ * @height: Height of the overlay.
+ * @size: Size of the overlay in bytes.
+ * @pitch: Array of pitches, the two last are only used for YUV12 formats.
+ * @offset: Offset from start of dma buffer to overlay.
+ * @src: Source rect, must be within the defined area above.
+ * @dst: Destination rect, x and y may be negative.
+ *
+ * Argument to the DRM_VMW_CONTROL_STREAM Ioctl.
+ */
+
+struct drm_vmw_control_stream_arg {
+ uint32_t stream_id;
+ uint32_t enabled;
+
+ uint32_t flags;
+ uint32_t color_key;
+
+ uint32_t handle;
+ uint32_t offset;
+ int32_t format;
+ uint32_t size;
+ uint32_t width;
+ uint32_t height;
+ uint32_t pitch[3];
+
+ uint32_t pad64;
+ struct drm_vmw_rect src;
+ struct drm_vmw_rect dst;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass.
+ *
+ */
+
+#define DRM_VMW_CURSOR_BYPASS_ALL (1 << 0)
+#define DRM_VMW_CURSOR_BYPASS_FLAGS (1)
+
+/**
+ * struct drm_vmw_cursor_bypass_arg
+ *
+ * @flags: Flags.
+ * @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed.
+ * @xpos: X position of cursor.
+ * @ypos: Y position of cursor.
+ * @xhot: X hotspot.
+ * @yhot: Y hotspot.
+ *
+ * Argument to the DRM_VMW_CURSOR_BYPASS Ioctl.
+ */
+
+struct drm_vmw_cursor_bypass_arg {
+ uint32_t flags;
+ uint32_t crtc_id;
+ int32_t xpos;
+ int32_t ypos;
+ int32_t xhot;
+ int32_t yhot;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_CLAIM_STREAM - Claim a single stream.
+ */
+
+/**
+ * struct drm_vmw_context_arg
+ *
+ * @stream_id: Device unique context ID.
+ *
+ * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
+ * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
+ */
+
+struct drm_vmw_stream_arg {
+ uint32_t stream_id;
+ uint32_t pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_UNREF_STREAM - Unclaim a stream.
+ *
+ * Return a single stream that was claimed by this process. Also makes
+ * sure that the stream has been stopped.
+ */
+
+/*************************************************************************/
+/**
+ * DRM_VMW_GET_3D_CAP
+ *
+ * Read 3D capabilities from the FIFO
+ *
+ */
+
+/**
+ * struct drm_vmw_get_3d_cap_arg
+ *
+ * @buffer: Pointer to a buffer for capability data, cast to an uint64_t
+ * @size: Max size to copy
+ *
+ * Input argument to the DRM_VMW_GET_3D_CAP_IOCTL
+ * ioctls.
+ */
+
+struct drm_vmw_get_3d_cap_arg {
+ uint64_t buffer;
+ uint32_t max_size;
+ uint32_t pad64;
+};
+
+
+/*************************************************************************/
+/**
+ * DRM_VMW_FENCE_WAIT
+ *
+ * Waits for a fence object to signal. The wait is interruptible, so that
+ * signals may be delivered during the interrupt. The wait may timeout,
+ * in which case the calls returns -EBUSY. If the wait is restarted,
+ * that is restarting without resetting @cookie_valid to zero,
+ * the timeout is computed from the first call.
+ *
+ * The flags argument to the DRM_VMW_FENCE_WAIT ioctl indicates what to wait
+ * on:
+ * DRM_VMW_FENCE_FLAG_EXEC: All commands ahead of the fence in the command
+ * stream
+ * have executed.
+ * DRM_VMW_FENCE_FLAG_QUERY: All query results resulting from query finish
+ * commands
+ * in the buffer given to the EXECBUF ioctl returning the fence object handle
+ * are available to user-space.
+ *
+ * DRM_VMW_WAIT_OPTION_UNREF: If this wait option is given, and the
+ * fenc wait ioctl returns 0, the fence object has been unreferenced after
+ * the wait.
+ */
+
+#define DRM_VMW_FENCE_FLAG_EXEC (1 << 0)
+#define DRM_VMW_FENCE_FLAG_QUERY (1 << 1)
+
+#define DRM_VMW_WAIT_OPTION_UNREF (1 << 0)
+
+/**
+ * struct drm_vmw_fence_wait_arg
+ *
+ * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
+ * @cookie_valid: Must be reset to 0 on first call. Left alone on restart.
+ * @kernel_cookie: Set to 0 on first call. Left alone on restart.
+ * @timeout_us: Wait timeout in microseconds. 0 for indefinite timeout.
+ * @lazy: Set to 1 if timing is not critical. Allow more than a kernel tick
+ * before returning.
+ * @flags: Fence flags to wait on.
+ * @wait_options: Options that control the behaviour of the wait ioctl.
+ *
+ * Input argument to the DRM_VMW_FENCE_WAIT ioctl.
+ */
+
+struct drm_vmw_fence_wait_arg {
+ uint32_t handle;
+ int32_t cookie_valid;
+ uint64_t kernel_cookie;
+ uint64_t timeout_us;
+ int32_t lazy;
+ int32_t flags;
+ int32_t wait_options;
+ int32_t pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_FENCE_SIGNALED
+ *
+ * Checks if a fence object is signaled..
+ */
+
+/**
+ * struct drm_vmw_fence_signaled_arg
+ *
+ * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
+ * @flags: Fence object flags input to DRM_VMW_FENCE_SIGNALED ioctl
+ * @signaled: Out: Flags signaled.
+ * @sequence: Out: Highest sequence passed so far. Can be used to signal the
+ * EXEC flag of user-space fence objects.
+ *
+ * Input/Output argument to the DRM_VMW_FENCE_SIGNALED and DRM_VMW_FENCE_UNREF
+ * ioctls.
+ */
+
+struct drm_vmw_fence_signaled_arg {
+ uint32_t handle;
+ uint32_t flags;
+ int32_t signaled;
+ uint32_t passed_seqno;
+ uint32_t signaled_flags;
+ uint32_t pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_FENCE_UNREF
+ *
+ * Unreferences a fence object, and causes it to be destroyed if there are no
+ * other references to it.
+ *
+ */
+
+/**
+ * struct drm_vmw_fence_arg
+ *
+ * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
+ *
+ * Input/Output argument to the DRM_VMW_FENCE_UNREF ioctl..
+ */
+
+struct drm_vmw_fence_arg {
+ uint32_t handle;
+ uint32_t pad64;
+};
+
+
+/*************************************************************************/
+/**
+ * DRM_VMW_FENCE_EVENT
+ *
+ * Queues an event on a fence to be delivered on the drm character device
+ * when the fence has signaled the DRM_VMW_FENCE_FLAG_EXEC flag.
+ * Optionally the approximate time when the fence signaled is
+ * given by the event.
+ */
+
+/*
+ * The event type
+ */
+#define DRM_VMW_EVENT_FENCE_SIGNALED 0x80000000
+
+struct drm_vmw_event_fence {
+ struct drm_event base;
+ uint64_t user_data;
+ uint32_t tv_sec;
+ uint32_t tv_usec;
+};
+
+/*
+ * Flags that may be given to the command.
+ */
+/* Request fence signaled time on the event. */
+#define DRM_VMW_FE_FLAG_REQ_TIME (1 << 0)
+
+/**
+ * struct drm_vmw_fence_event_arg
+ *
+ * @fence_rep: Pointer to fence_rep structure cast to uint64_t or 0 if
+ * the fence is not supposed to be referenced by user-space.
+ * @user_info: Info to be delivered with the event.
+ * @handle: Attach the event to this fence only.
+ * @flags: A set of flags as defined above.
+ */
+struct drm_vmw_fence_event_arg {
+ uint64_t fence_rep;
+ uint64_t user_data;
+ uint32_t handle;
+ uint32_t flags;
+};
+
+
+/*************************************************************************/
+/**
+ * DRM_VMW_PRESENT
+ *
+ * Executes an SVGA present on a given fb for a given surface. The surface
+ * is placed on the framebuffer. Cliprects are given relative to the given
+ * point (the point disignated by dest_{x|y}).
+ *
+ */
+
+/**
+ * struct drm_vmw_present_arg
+ * @fb_id: framebuffer id to present / read back from.
+ * @sid: Surface id to present from.
+ * @dest_x: X placement coordinate for surface.
+ * @dest_y: Y placement coordinate for surface.
+ * @clips_ptr: Pointer to an array of clip rects cast to an uint64_t.
+ * @num_clips: Number of cliprects given relative to the framebuffer origin,
+ * in the same coordinate space as the frame buffer.
+ * @pad64: Unused 64-bit padding.
+ *
+ * Input argument to the DRM_VMW_PRESENT ioctl.
+ */
+
+struct drm_vmw_present_arg {
+ uint32_t fb_id;
+ uint32_t sid;
+ int32_t dest_x;
+ int32_t dest_y;
+ uint64_t clips_ptr;
+ uint32_t num_clips;
+ uint32_t pad64;
+};
+
+
+/*************************************************************************/
+/**
+ * DRM_VMW_PRESENT_READBACK
+ *
+ * Executes an SVGA present readback from a given fb to the dma buffer
+ * currently bound as the fb. If there is no dma buffer bound to the fb,
+ * an error will be returned.
+ *
+ */
+
+/**
+ * struct drm_vmw_present_arg
+ * @fb_id: fb_id to present / read back from.
+ * @num_clips: Number of cliprects.
+ * @clips_ptr: Pointer to an array of clip rects cast to an uint64_t.
+ * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an uint64_t.
+ * If this member is NULL, then the ioctl should not return a fence.
+ */
+
+struct drm_vmw_present_readback_arg {
+ uint32_t fb_id;
+ uint32_t num_clips;
+ uint64_t clips_ptr;
+ uint64_t fence_rep;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_UPDATE_LAYOUT - Update layout
+ *
+ * Updates the preferred modes and connection status for connectors. The
+ * command consists of one drm_vmw_update_layout_arg pointing to an array
+ * of num_outputs drm_vmw_rect's.
+ */
+
+/**
+ * struct drm_vmw_update_layout_arg
+ *
+ * @num_outputs: number of active connectors
+ * @rects: pointer to array of drm_vmw_rect cast to an uint64_t
+ *
+ * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl.
+ */
+struct drm_vmw_update_layout_arg {
+ uint32_t num_outputs;
+ uint32_t pad64;
+ uint64_t rects;
+};
+
+
+/*************************************************************************/
+/**
+ * DRM_VMW_CREATE_SHADER - Create shader
+ *
+ * Creates a shader and optionally binds it to a dma buffer containing
+ * the shader byte-code.
+ */
+
+/**
+ * enum drm_vmw_shader_type - Shader types
+ */
+enum drm_vmw_shader_type {
+ drm_vmw_shader_type_vs = 0,
+ drm_vmw_shader_type_ps,
+ drm_vmw_shader_type_gs
+};
+
+
+/**
+ * struct drm_vmw_shader_create_arg
+ *
+ * @shader_type: Shader type of the shader to create.
+ * @size: Size of the byte-code in bytes.
+ * where the shader byte-code starts
+ * @buffer_handle: Buffer handle identifying the buffer containing the
+ * shader byte-code
+ * @shader_handle: On successful completion contains a handle that
+ * can be used to subsequently identify the shader.
+ * @offset: Offset in bytes into the buffer given by @buffer_handle,
+ *
+ * Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl.
+ */
+struct drm_vmw_shader_create_arg {
+ enum drm_vmw_shader_type shader_type;
+ uint32_t size;
+ uint32_t buffer_handle;
+ uint32_t shader_handle;
+ uint64_t offset;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_UNREF_SHADER - Unreferences a shader
+ *
+ * Destroys a user-space reference to a shader, optionally destroying
+ * it.
+ */
+
+/**
+ * struct drm_vmw_shader_arg
+ *
+ * @handle: Handle identifying the shader to destroy.
+ *
+ * Input argument to the DRM_VMW_UNREF_SHADER ioctl.
+ */
+struct drm_vmw_shader_arg {
+ uint32_t handle;
+ uint32_t pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface.
+ *
+ * Allocates a surface handle and queues a create surface command
+ * for the host on the first use of the surface. The surface ID can
+ * be used as the surface ID in commands referencing the surface.
+ */
+
+/**
+ * enum drm_vmw_surface_flags
+ *
+ * @drm_vmw_surface_flag_shareable: Whether the surface is shareable
+ * @drm_vmw_surface_flag_scanout: Whether the surface is a scanout
+ * surface.
+ * @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is
+ * given.
+ */
+enum drm_vmw_surface_flags {
+ drm_vmw_surface_flag_shareable = (1 << 0),
+ drm_vmw_surface_flag_scanout = (1 << 1),
+ drm_vmw_surface_flag_create_buffer = (1 << 2)
+};
+
+/**
+ * struct drm_vmw_gb_surface_create_req
+ *
+ * @svga3d_flags: SVGA3d surface flags for the device.
+ * @format: SVGA3d format.
+ * @mip_level: Number of mip levels for all faces.
+ * @drm_surface_flags Flags as described above.
+ * @multisample_count Future use. Set to 0.
+ * @autogen_filter Future use. Set to 0.
+ * @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID
+ * if none.
+ * @base_size Size of the base mip level for all faces.
+ *
+ * Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl.
+ * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
+ */
+struct drm_vmw_gb_surface_create_req {
+ uint32_t svga3d_flags;
+ uint32_t format;
+ uint32_t mip_levels;
+ enum drm_vmw_surface_flags drm_surface_flags;
+ uint32_t multisample_count;
+ uint32_t autogen_filter;
+ uint32_t buffer_handle;
+ uint32_t pad64;
+ struct drm_vmw_size base_size;
+};
+
+/**
+ * struct drm_vmw_gb_surface_create_rep
+ *
+ * @handle: Surface handle.
+ * @backup_size: Size of backup buffers for this surface.
+ * @buffer_handle: Handle of backup buffer. SVGA3D_INVALID_ID if none.
+ * @buffer_size: Actual size of the buffer identified by
+ * @buffer_handle
+ * @buffer_map_handle: Offset into device address space for the buffer
+ * identified by @buffer_handle.
+ *
+ * Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl.
+ * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl.
+ */
+struct drm_vmw_gb_surface_create_rep {
+ uint32_t handle;
+ uint32_t backup_size;
+ uint32_t buffer_handle;
+ uint32_t buffer_size;
+ uint64_t buffer_map_handle;
+};
+
+/**
+ * union drm_vmw_gb_surface_create_arg
+ *
+ * @req: Input argument as described above.
+ * @rep: Output argument as described above.
+ *
+ * Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl.
+ */
+union drm_vmw_gb_surface_create_arg {
+ struct drm_vmw_gb_surface_create_rep rep;
+ struct drm_vmw_gb_surface_create_req req;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_GB_SURFACE_REF - Reference a host surface.
+ *
+ * Puts a reference on a host surface with a given handle, as previously
+ * returned by the DRM_VMW_GB_SURFACE_CREATE ioctl.
+ * A reference will make sure the surface isn't destroyed while we hold
+ * it and will allow the calling client to use the surface handle in
+ * the command stream.
+ *
+ * On successful return, the Ioctl returns the surface information given
+ * to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl.
+ */
+
+/**
+ * struct drm_vmw_gb_surface_reference_arg
+ *
+ * @creq: The data used as input when the surface was created, as described
+ * above at "struct drm_vmw_gb_surface_create_req"
+ * @crep: Additional data output when the surface was created, as described
+ * above at "struct drm_vmw_gb_surface_create_rep"
+ *
+ * Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl.
+ */
+struct drm_vmw_gb_surface_ref_rep {
+ struct drm_vmw_gb_surface_create_req creq;
+ struct drm_vmw_gb_surface_create_rep crep;
+};
+
+/**
+ * union drm_vmw_gb_surface_reference_arg
+ *
+ * @req: Input data as described above at "struct drm_vmw_surface_arg"
+ * @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep"
+ *
+ * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
+ */
+union drm_vmw_gb_surface_reference_arg {
+ struct drm_vmw_gb_surface_ref_rep rep;
+ struct drm_vmw_surface_arg req;
+};
+
+
+/*************************************************************************/
+/**
+ * DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access.
+ *
+ * Idles any previously submitted GPU operations on the buffer and
+ * by default blocks command submissions that reference the buffer.
+ * If the file descriptor used to grab a blocking CPU sync is closed, the
+ * cpu sync is released.
+ * The flags argument indicates how the grab / release operation should be
+ * performed:
+ */
+
+/**
+ * enum drm_vmw_synccpu_flags - Synccpu flags:
+ *
+ * @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a
+ * hint to the kernel to allow command submissions that references the buffer
+ * for read-only.
+ * @drm_vmw_synccpu_write: Sync for write. Block all command submissions
+ * referencing this buffer.
+ * @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return
+ * -EBUSY should the buffer be busy.
+ * @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer
+ * while the buffer is synced for CPU. This is similar to the GEM bo idle
+ * behavior.
+ */
+enum drm_vmw_synccpu_flags {
+ drm_vmw_synccpu_read = (1 << 0),
+ drm_vmw_synccpu_write = (1 << 1),
+ drm_vmw_synccpu_dontblock = (1 << 2),
+ drm_vmw_synccpu_allow_cs = (1 << 3)
+};
+
+/**
+ * enum drm_vmw_synccpu_op - Synccpu operations:
+ *
+ * @drm_vmw_synccpu_grab: Grab the buffer for CPU operations
+ * @drm_vmw_synccpu_release: Release a previous grab.
+ */
+enum drm_vmw_synccpu_op {
+ drm_vmw_synccpu_grab,
+ drm_vmw_synccpu_release
+};
+
+/**
+ * struct drm_vmw_synccpu_arg
+ *
+ * @op: The synccpu operation as described above.
+ * @handle: Handle identifying the buffer object.
+ * @flags: Flags as described above.
+ */
+struct drm_vmw_synccpu_arg {
+ enum drm_vmw_synccpu_op op;
+ enum drm_vmw_synccpu_flags flags;
+ uint32_t handle;
+ uint32_t pad64;
+};
+
+#endif