summaryrefslogtreecommitdiff
path: root/lib/mesa/.gitlab-ci
diff options
context:
space:
mode:
authorJonathan Gray <jsg@cvs.openbsd.org>2022-09-02 05:18:14 +0000
committerJonathan Gray <jsg@cvs.openbsd.org>2022-09-02 05:18:14 +0000
commit5f66494d31f735486b8222ecfa0a0c9046e92543 (patch)
treef699ac8d7f6d510c30bded04f96a1209344f6a47 /lib/mesa/.gitlab-ci
parent17a5b543e3479aaa779cc68345c46d437edd05d8 (diff)
Import Mesa 22.1.7
Diffstat (limited to 'lib/mesa/.gitlab-ci')
-rw-r--r--lib/mesa/.gitlab-ci/all-skips.txt6
-rwxr-xr-xlib/mesa/.gitlab-ci/bare-metal/cisco-2960-poe-off.sh17
-rwxr-xr-xlib/mesa/.gitlab-ci/bare-metal/cisco-2960-poe-on.sh21
-rwxr-xr-xlib/mesa/.gitlab-ci/bare-metal/cros_servo_run.py14
-rwxr-xr-xlib/mesa/.gitlab-ci/bare-metal/fastboot_run.py22
-rwxr-xr-xlib/mesa/.gitlab-ci/bare-metal/poe-powered.sh34
-rwxr-xr-xlib/mesa/.gitlab-ci/bare-metal/poe_run.py4
-rw-r--r--lib/mesa/.gitlab-ci/bare-metal/rootfs-setup.sh8
-rwxr-xr-xlib/mesa/.gitlab-ci/bare-metal/serial_buffer.py27
-rw-r--r--lib/mesa/.gitlab-ci/build/gitlab-ci.yml525
-rwxr-xr-xlib/mesa/.gitlab-ci/common/generate-env.sh39
-rwxr-xr-xlib/mesa/.gitlab-ci/common/init-stage1.sh1
-rwxr-xr-xlib/mesa/.gitlab-ci/common/init-stage2.sh54
-rwxr-xr-xlib/mesa/.gitlab-ci/common/intel-gpu-freq.sh567
-rw-r--r--lib/mesa/.gitlab-ci/container/arm64.config15
-rw-r--r--lib/mesa/.gitlab-ci/container/baremetal_build.sh5
-rw-r--r--lib/mesa/.gitlab-ci/container/build-crosvm.sh56
-rw-r--r--lib/mesa/.gitlab-ci/container/build-crosvm_no-syslog.patch43
-rw-r--r--lib/mesa/.gitlab-ci/container/build-deqp-runner.sh27
-rw-r--r--lib/mesa/.gitlab-ci/container/build-deqp.sh10
-rw-r--r--lib/mesa/.gitlab-ci/container/build-fossilize.sh2
-rw-r--r--lib/mesa/.gitlab-ci/container/build-kernel.sh2
-rw-r--r--lib/mesa/.gitlab-ci/container/build-libdrm.sh2
-rw-r--r--lib/mesa/.gitlab-ci/container/build-piglit.sh2
-rwxr-xr-xlib/mesa/.gitlab-ci/container/build-skqp.sh97
-rw-r--r--lib/mesa/.gitlab-ci/container/build-skqp_BUILD.gn.patch13
-rw-r--r--lib/mesa/.gitlab-ci/container/build-skqp_base.gn47
-rw-r--r--lib/mesa/.gitlab-ci/container/build-skqp_fetch_gn.patch68
-rw-r--r--lib/mesa/.gitlab-ci/container/build-skqp_git-sync-deps.patch142
-rw-r--r--lib/mesa/.gitlab-ci/container/build-skqp_is_clang.py.patch13
-rw-r--r--lib/mesa/.gitlab-ci/container/build-vkd3d-proton.sh4
-rw-r--r--lib/mesa/.gitlab-ci/container/build-wayland.sh22
-rwxr-xr-xlib/mesa/.gitlab-ci/container/container_pre_build.sh2
-rw-r--r--lib/mesa/.gitlab-ci/container/create-rootfs.sh32
-rw-r--r--lib/mesa/.gitlab-ci/container/debian/android_build.sh50
-rw-r--r--lib/mesa/.gitlab-ci/container/debian/arm_build.sh3
-rw-r--r--lib/mesa/.gitlab-ci/container/debian/arm_test.sh6
-rw-r--r--lib/mesa/.gitlab-ci/container/debian/x86_build-base.sh3
-rw-r--r--lib/mesa/.gitlab-ci/container/debian/x86_build.sh15
-rw-r--r--lib/mesa/.gitlab-ci/container/debian/x86_test-base.sh2
-rw-r--r--lib/mesa/.gitlab-ci/container/debian/x86_test-gl.sh39
-rw-r--r--lib/mesa/.gitlab-ci/container/debian/x86_test-vk.sh6
-rw-r--r--lib/mesa/.gitlab-ci/container/fedora/x86_build.sh14
-rw-r--r--lib/mesa/.gitlab-ci/container/gitlab-ci.yml420
-rwxr-xr-xlib/mesa/.gitlab-ci/container/lava_build.sh41
-rw-r--r--lib/mesa/.gitlab-ci/container/x86_64.config5
-rw-r--r--lib/mesa/.gitlab-ci/cross-xfail-s390x1
-rwxr-xr-xlib/mesa/.gitlab-ci/crosvm-init.sh39
-rwxr-xr-xlib/mesa/.gitlab-ci/crosvm-runner.sh142
-rwxr-xr-xlib/mesa/.gitlab-ci/deqp-runner.sh38
-rw-r--r--lib/mesa/.gitlab-ci/download-git-cache.sh8
-rw-r--r--lib/mesa/.gitlab-ci/image-tags.yml21
-rwxr-xr-xlib/mesa/.gitlab-ci/lava/lava-gitlab-ci.yml9
-rwxr-xr-xlib/mesa/.gitlab-ci/lava/lava-pytest.sh34
-rwxr-xr-xlib/mesa/.gitlab-ci/lava/lava-submit.sh19
-rwxr-xr-xlib/mesa/.gitlab-ci/lava/lava_job_submitter.py131
-rwxr-xr-xlib/mesa/.gitlab-ci/meson/build.sh1
-rwxr-xr-xlib/mesa/.gitlab-ci/piglit/piglit-runner.sh25
-rwxr-xr-xlib/mesa/.gitlab-ci/piglit/piglit-traces.sh241
-rwxr-xr-xlib/mesa/.gitlab-ci/prepare-artifacts.sh3
-rwxr-xr-xlib/mesa/.gitlab-ci/skqp-runner.sh153
-rw-r--r--lib/mesa/.gitlab-ci/test-source-dep.yml104
-rw-r--r--lib/mesa/.gitlab-ci/test/gitlab-ci.yml314
-rw-r--r--lib/mesa/.gitlab-ci/tests/__init__.py0
-rw-r--r--lib/mesa/.gitlab-ci/tests/test_lava_job_submitter.py250
-rw-r--r--lib/mesa/.gitlab-ci/valve/b2c.yml.jinja2.jinja263
-rwxr-xr-xlib/mesa/.gitlab-ci/valve/generate_b2c.py101
-rw-r--r--lib/mesa/.gitlab-ci/windows/Dockerfile_build13
-rw-r--r--lib/mesa/.gitlab-ci/windows/Dockerfile_test7
-rw-r--r--lib/mesa/.gitlab-ci/windows/deqp_runner_run.ps131
-rw-r--r--lib/mesa/.gitlab-ci/windows/mesa_build.ps154
-rw-r--r--lib/mesa/.gitlab-ci/windows/mesa_container.ps14
-rw-r--r--lib/mesa/.gitlab-ci/windows/mesa_deps_build.ps1146
-rw-r--r--lib/mesa/.gitlab-ci/windows/mesa_deps_test.ps1124
-rw-r--r--lib/mesa/.gitlab-ci/windows/piglit_run.ps12
-rw-r--r--lib/mesa/.gitlab-ci/windows/spirv2dxil_check.ps154
-rw-r--r--lib/mesa/.gitlab-ci/windows/spirv2dxil_run.ps116
77 files changed, 4399 insertions, 296 deletions
diff --git a/lib/mesa/.gitlab-ci/all-skips.txt b/lib/mesa/.gitlab-ci/all-skips.txt
index 52a8e87f3..526debd4e 100644
--- a/lib/mesa/.gitlab-ci/all-skips.txt
+++ b/lib/mesa/.gitlab-ci/all-skips.txt
@@ -6,12 +6,12 @@
# reliable to be run in parallel with other tests due to CPU-side timing.
dEQP-GLES[0-9]*.functional.flush_finish.*
-# https://gitlab.freedesktop.org/mesa/mesa/-/issues/4575
-dEQP-VK.wsi.display.get_display_plane_capabilities
-
# piglit: WGL is Windows-only
wgl@.*
# These are sensitive to CPU timing, and would need to be run in isolation
# on the system rather than in parallel with other tests.
glx@glx_arb_sync_control@timing.*
+
+# This test is not built with waffle, while we do build tests with waffle
+spec@!opengl 1.1@windowoverlap
diff --git a/lib/mesa/.gitlab-ci/bare-metal/cisco-2960-poe-off.sh b/lib/mesa/.gitlab-ci/bare-metal/cisco-2960-poe-off.sh
new file mode 100755
index 000000000..2234ee796
--- /dev/null
+++ b/lib/mesa/.gitlab-ci/bare-metal/cisco-2960-poe-off.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+if [ -z "$BM_POE_INTERFACE" ]; then
+ echo "Must supply the PoE Interface to power down"
+ exit 1
+fi
+
+if [ -z "$BM_POE_ADDRESS" ]; then
+ echo "Must supply the PoE Switch host"
+ exit 1
+fi
+
+SNMP_KEY="1.3.6.1.4.1.9.9.402.1.2.1.1.1.$BM_POE_INTERFACE"
+SNMP_ON="i 1"
+SNMP_OFF="i 4"
+
+snmpset -v2c -r 3 -t 30 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_OFF
diff --git a/lib/mesa/.gitlab-ci/bare-metal/cisco-2960-poe-on.sh b/lib/mesa/.gitlab-ci/bare-metal/cisco-2960-poe-on.sh
new file mode 100755
index 000000000..60d7d726a
--- /dev/null
+++ b/lib/mesa/.gitlab-ci/bare-metal/cisco-2960-poe-on.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+if [ -z "$BM_POE_INTERFACE" ]; then
+ echo "Must supply the PoE Interface to power up"
+ exit 1
+fi
+
+if [ -z "$BM_POE_ADDRESS" ]; then
+ echo "Must supply the PoE Switch host"
+ exit 1
+fi
+
+set -ex
+
+SNMP_KEY="1.3.6.1.4.1.9.9.402.1.2.1.1.1.$BM_POE_INTERFACE"
+SNMP_ON="i 1"
+SNMP_OFF="i 4"
+
+snmpset -v2c -r 3 -t 10 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_OFF
+sleep 3s
+snmpset -v2c -r 3 -t 10 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_ON
diff --git a/lib/mesa/.gitlab-ci/bare-metal/cros_servo_run.py b/lib/mesa/.gitlab-ci/bare-metal/cros_servo_run.py
index 995e11210..042465e98 100755
--- a/lib/mesa/.gitlab-ci/bare-metal/cros_servo_run.py
+++ b/lib/mesa/.gitlab-ci/bare-metal/cros_servo_run.py
@@ -50,12 +50,18 @@ class CrosServoRun:
target=self.iter_feed_queue, daemon=True, args=(self.cpu_ser.lines(),))
self.iter_feed_cpu.start()
+ def close(self):
+ self.ec_ser.close()
+ self.cpu_ser.close()
+ self.iter_feed_ec.join()
+ self.iter_feed_cpu.join()
+
# Feed lines from our serial queues into the merged queue, marking when our
# input is done.
def iter_feed_queue(self, it):
for i in it:
self.serial_queue.put(i)
- self.serial_queue.put(sentinel)
+ self.serial_queue.put(self.sentinel)
# Return the next line from the queue, counting how many threads have
# terminated and joining when done
@@ -150,6 +156,10 @@ class CrosServoRun:
"Detected spontaneous reboot, restarting run...")
return 2
+ if re.search("arm-smmu 5040000.iommu: TLB sync timed out -- SMMU may be deadlocked", line):
+ self.print_error("Detected cheza MMU fail, restarting run...")
+ return 2
+
result = re.search("hwci: mesa: (\S*)", line)
if result:
if result.group(1) == "pass":
@@ -179,6 +189,8 @@ def main():
# power down the CPU on the device
servo.ec_write("power off\n")
+ servo.close()
+
sys.exit(retval)
diff --git a/lib/mesa/.gitlab-ci/bare-metal/fastboot_run.py b/lib/mesa/.gitlab-ci/bare-metal/fastboot_run.py
index 3654c7c80..0521a387d 100755
--- a/lib/mesa/.gitlab-ci/bare-metal/fastboot_run.py
+++ b/lib/mesa/.gitlab-ci/bare-metal/fastboot_run.py
@@ -36,6 +36,9 @@ class FastbootRun:
self.ser = SerialBuffer(args.dev, "results/serial-output.txt", "R SERIAL> ", timeout=600)
self.fastboot="fastboot boot -s {ser} artifacts/fastboot.img".format(ser=args.fbserial)
+ def close(self):
+ self.ser.close()
+
def print_error(self, message):
RED = '\033[0;31m'
NO_COLOR = '\033[0m'
@@ -67,7 +70,13 @@ class FastbootRun:
if self.logged_system(self.fastboot) != 0:
return 1
+ print_more_lines = -1
for line in self.ser.lines():
+ if print_more_lines == 0:
+ return 2
+ if print_more_lines > 0:
+ print_more_lines -= 1
+
if re.search("---. end Kernel panic", line):
return 1
@@ -89,6 +98,18 @@ class FastbootRun:
"Detected network device failure, restarting run...")
return 2
+ # A3xx recovery doesn't quite work. Sometimes the GPU will get
+ # wedged and recovery will fail (because power can't be reset?)
+ # This assumes that the jobs are sufficiently well-tested that GPU
+ # hangs aren't always triggered, so just try again. But print some
+ # more lines first so that we get better information on the cause
+ # of the hang. Once a hang happens, it's pretty chatty.
+ if "[drm:adreno_recover] *ERROR* gpu hw init failed: -22" in line:
+ self.print_error(
+ "Detected GPU hang, restarting run...")
+ if print_more_lines == -1:
+ print_more_lines = 30
+
result = re.search("hwci: mesa: (\S*)", line)
if result:
if result.group(1) == "pass":
@@ -111,6 +132,7 @@ def main():
while True:
retval = fastboot.run()
+ fastboot.close()
if retval != 2:
break
diff --git a/lib/mesa/.gitlab-ci/bare-metal/poe-powered.sh b/lib/mesa/.gitlab-ci/bare-metal/poe-powered.sh
index 3dd7330cd..4bd005745 100755
--- a/lib/mesa/.gitlab-ci/bare-metal/poe-powered.sh
+++ b/lib/mesa/.gitlab-ci/bare-metal/poe-powered.sh
@@ -20,18 +20,6 @@ if [ -z "$BM_POE_ADDRESS" ]; then
exit 1
fi
-if [ -z "$BM_POE_USERNAME" ]; then
- echo "Must set BM_POE_USERNAME in your gitlab-runner config.toml [[runners]] environment"
- echo "This is the PoE switch username."
- exit 1
-fi
-
-if [ -z "$BM_POE_PASSWORD" ]; then
- echo "Must set BM_POE_PASSWORD in your gitlab-runner config.toml [[runners]] environment"
- echo "This is the PoE switch password."
- exit 1
-fi
-
if [ -z "$BM_POE_INTERFACE" ]; then
echo "Must set BM_POE_INTERFACE in your gitlab-runner config.toml [[runners]] environment"
echo "This is the PoE switch interface where the device is connected."
@@ -107,11 +95,25 @@ fi
# Install kernel modules (it could be either in /lib/modules or
# /usr/lib/modules, but we want to install in the latter)
-[ -d $BM_BOOTFS/usr/lib/modules ] && rsync -a --delete $BM_BOOTFS/usr/lib/modules/ /nfs/usr/lib/modules/
-[ -d $BM_BOOTFS/lib/modules ] && rsync -a --delete $BM_BOOTFS/lib/modules/ /nfs/usr/lib/modules/
+[ -d $BM_BOOTFS/usr/lib/modules ] && rsync -a $BM_BOOTFS/usr/lib/modules/ /nfs/usr/lib/modules/
+[ -d $BM_BOOTFS/lib/modules ] && rsync -a $BM_BOOTFS/lib/modules/ /nfs/lib/modules/
# Install kernel image + bootloader files
-rsync -a --delete $BM_BOOTFS/boot/ /tftp/
+rsync -aL --delete $BM_BOOTFS/boot/ /tftp/
+
+# Set up the pxelinux config for Jetson Nano
+mkdir -p /tftp/pxelinux.cfg
+cat <<EOF >/tftp/pxelinux.cfg/default-arm-tegra210-p3450-0000
+PROMPT 0
+TIMEOUT 30
+DEFAULT primary
+MENU TITLE jetson nano boot options
+LABEL primary
+ MENU LABEL CI kernel on TFTP
+ LINUX Image
+ FDT tegra210-p3450-0000.dtb
+ APPEND \${cbootargs} $BM_CMDLINE
+EOF
# Create the rootfs in the NFS directory
mkdir -p /nfs/results
@@ -123,7 +125,7 @@ echo "$BM_CMDLINE" > /tftp/cmdline.txt
printf "$BM_BOOTCONFIG" >> /tftp/config.txt
set +e
-ATTEMPTS=2
+ATTEMPTS=10
while [ $((ATTEMPTS--)) -gt 0 ]; do
python3 $BM/poe_run.py \
--dev="$BM_SERIAL" \
diff --git a/lib/mesa/.gitlab-ci/bare-metal/poe_run.py b/lib/mesa/.gitlab-ci/bare-metal/poe_run.py
index f22b90d5e..f53c1478f 100755
--- a/lib/mesa/.gitlab-ci/bare-metal/poe_run.py
+++ b/lib/mesa/.gitlab-ci/bare-metal/poe_run.py
@@ -66,6 +66,10 @@ class PoERun:
self.print_error("Memory overflow in the binner; GPU hang")
return 1
+ if re.search("nouveau 57000000.gpu: bus: MMIO read of 00000000 FAULT at 137000", line):
+ self.print_error("nouveau jetson boot bug, retrying.")
+ return 2
+
result = re.search("hwci: mesa: (\S*)", line)
if result:
if result.group(1) == "pass":
diff --git a/lib/mesa/.gitlab-ci/bare-metal/rootfs-setup.sh b/lib/mesa/.gitlab-ci/bare-metal/rootfs-setup.sh
index 0b017454c..7a706d0a0 100644
--- a/lib/mesa/.gitlab-ci/bare-metal/rootfs-setup.sh
+++ b/lib/mesa/.gitlab-ci/bare-metal/rootfs-setup.sh
@@ -8,15 +8,21 @@ mkdir -p $rootfs_dst/results
cp $BM/bm-init.sh $rootfs_dst/init
cp $CI_COMMON/init*.sh $rootfs_dst/
+# Make JWT token available as file in the bare-metal storage to enable access
+# to MinIO
+cp "${CI_JOB_JWT_FILE}" "${rootfs_dst}${CI_JOB_JWT_FILE}"
+
cp $CI_COMMON/capture-devcoredump.sh $rootfs_dst/
+cp $CI_COMMON/intel-gpu-freq.sh $rootfs_dst/
set +x
+
# Pass through relevant env vars from the gitlab job to the baremetal init script
"$CI_COMMON"/generate-env.sh > $rootfs_dst/set-job-env-vars.sh
chmod +x $rootfs_dst/set-job-env-vars.sh
echo "Variables passed through:"
cat $rootfs_dst/set-job-env-vars.sh
-echo "export CI_JOB_JWT=${CI_JOB_JWT@Q}" >> $rootfs_dst/set-job-env-vars.sh
+
set -x
# Add the Mesa drivers we built, and make a consistent symlink to them.
diff --git a/lib/mesa/.gitlab-ci/bare-metal/serial_buffer.py b/lib/mesa/.gitlab-ci/bare-metal/serial_buffer.py
index 70f377097..710e78677 100755
--- a/lib/mesa/.gitlab-ci/bare-metal/serial_buffer.py
+++ b/lib/mesa/.gitlab-ci/bare-metal/serial_buffer.py
@@ -28,7 +28,6 @@ import serial
import threading
import time
-
class SerialBuffer:
def __init__(self, dev, filename, prefix, timeout = None):
self.filename = filename
@@ -36,15 +35,17 @@ class SerialBuffer:
if dev:
self.f = open(filename, "wb+")
- self.serial = serial.Serial(dev, 115200, timeout=timeout if timeout else 10)
+ self.serial = serial.Serial(dev, 115200, timeout=timeout)
else:
self.f = open(filename, "rb")
+ self.serial = None
self.byte_queue = queue.Queue()
self.line_queue = queue.Queue()
self.prefix = prefix
self.timeout = timeout
self.sentinel = object()
+ self.closing = False
if self.dev:
self.read_thread = threading.Thread(
@@ -58,24 +59,31 @@ class SerialBuffer:
target=self.serial_lines_thread_loop, daemon=True)
self.lines_thread.start()
+ def close(self):
+ self.closing = True
+ if self.serial:
+ self.serial.cancel_read()
+ self.read_thread.join()
+ self.lines_thread.join()
+ if self.serial:
+ self.serial.close()
+
# Thread that just reads the bytes from the serial device to try to keep from
# buffer overflowing it. If nothing is received in 1 minute, it finalizes.
def serial_read_thread_loop(self):
greet = "Serial thread reading from %s\n" % self.dev
self.byte_queue.put(greet.encode())
- while True:
+ while not self.closing:
try:
b = self.serial.read()
- if len(b) > 0:
- self.byte_queue.put(b)
- elif self.timeout:
- self.byte_queue.put(self.sentinel)
+ if len(b) == 0:
break
+ self.byte_queue.put(b)
except Exception as err:
print(self.prefix + str(err))
- self.byte_queue.put(self.sentinel)
break
+ self.byte_queue.put(self.sentinel)
# Thread that just reads the bytes from the file of serial output that some
# other process is appending to.
@@ -83,12 +91,13 @@ class SerialBuffer:
greet = "Serial thread reading from %s\n" % self.filename
self.byte_queue.put(greet.encode())
- while True:
+ while not self.closing:
line = self.f.readline()
if line:
self.byte_queue.put(line)
else:
time.sleep(0.1)
+ self.byte_queue.put(self.sentinel)
# Thread that processes the stream of bytes to 1) log to stdout, 2) log to
# file, 3) add to the queue of lines to be read by program logic
diff --git a/lib/mesa/.gitlab-ci/build/gitlab-ci.yml b/lib/mesa/.gitlab-ci/build/gitlab-ci.yml
new file mode 100644
index 000000000..e49f62e11
--- /dev/null
+++ b/lib/mesa/.gitlab-ci/build/gitlab-ci.yml
@@ -0,0 +1,525 @@
+# Shared between windows and Linux
+.build-common:
+ extends: .ci-run-policy
+ # Cancel job if a newer commit is pushed to the same branch
+ interruptible: true
+ artifacts:
+ name: "mesa_${CI_JOB_NAME}"
+ when: always
+ paths:
+ - _build/meson-logs/*.txt
+ - _build/meson-logs/strace
+ - shader-db
+
+# Just Linux
+.build-linux:
+ extends: .build-common
+ variables:
+ CCACHE_COMPILERCHECK: "content"
+ CCACHE_COMPRESS: "true"
+ CCACHE_DIR: /cache/mesa/ccache
+ # Use ccache transparently, and print stats before/after
+ before_script:
+ - !reference [default, before_script]
+ - export PATH="/usr/lib/ccache:$PATH"
+ - export CCACHE_BASEDIR="$PWD"
+ - echo -e "\e[0Ksection_start:$(date +%s):ccache_before[collapsed=true]\r\e[0Kccache stats before build"
+ - ccache --show-stats
+ - echo -e "\e[0Ksection_end:$(date +%s):ccache_before\r\e[0K"
+ after_script:
+ - echo -e "\e[0Ksection_start:$(date +%s):ccache_after[collapsed=true]\r\e[0Kccache stats after build"
+ - ccache --show-stats
+ - echo -e "\e[0Ksection_end:$(date +%s):ccache_after\r\e[0K"
+ - !reference [default, after_script]
+
+.build-windows:
+ extends: .build-common
+ tags:
+ - windows
+ - docker
+ - "1809"
+ - mesa
+ cache:
+ key: ${CI_JOB_NAME}
+ paths:
+ - subprojects/packagecache
+
+.meson-build:
+ extends:
+ - .build-linux
+ - .use-debian/x86_build
+ stage: build-x86_64
+ variables:
+ LLVM_VERSION: 11
+ script:
+ - .gitlab-ci/meson/build.sh
+
+debian-testing:
+ extends:
+ - .meson-build
+ - .ci-deqp-artifacts
+ variables:
+ UNWIND: "enabled"
+ DRI_LOADERS: >
+ -D glx=dri
+ -D gbm=enabled
+ -D egl=enabled
+ -D platforms=x11
+ GALLIUM_ST: >
+ -D dri3=enabled
+ -D gallium-va=enabled
+ GALLIUM_DRIVERS: "swrast,virgl,radeonsi,zink,crocus,iris,i915"
+ VULKAN_DRIVERS: "swrast,amd,intel"
+ BUILDTYPE: "debugoptimized"
+ EXTRA_OPTION: >
+ -D valgrind=false
+ MINIO_ARTIFACT_NAME: mesa-amd64
+ script:
+ - .gitlab-ci/lava/lava-pytest.sh
+ - .gitlab-ci/meson/build.sh
+ - .gitlab-ci/prepare-artifacts.sh
+ artifacts:
+ reports:
+ junit: artifacts/ci_scripts_report.xml
+
+debian-testing-asan:
+ extends:
+ - debian-testing
+ variables:
+ C_ARGS: >
+ -Wno-error=stringop-truncation
+ EXTRA_OPTION: >
+ -D b_sanitize=address
+ -D valgrind=false
+ -D tools=dlclose-skip
+ MINIO_ARTIFACT_NAME: ""
+ ARTIFACTS_DEBUG_SYMBOLS: 1
+
+debian-testing-msan:
+ extends:
+ - debian-clang
+ variables:
+ # l_undef is incompatible with msan
+ EXTRA_OPTION:
+ -D b_sanitize=memory
+ -D b_lundef=false
+ MINIO_ARTIFACT_NAME: ""
+ ARTIFACTS_DEBUG_SYMBOLS: 1
+ # Don't run all the tests yet:
+ # GLSL has some issues in sexpression reading.
+ # gtest has issues in its test initialization.
+ MESON_TEST_ARGS: "--suite glcpp --suite gallium --suite format"
+ # Freedreno dropped because freedreno tools fail at msan.
+ GALLIUM_DRIVERS: "iris,nouveau,kmsro,r300,r600,swrast,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,radeonsi,tegra,d3d12,crocus"
+ VULKAN_DRIVERS: intel,amd,broadcom,virtio-experimental
+
+debian-clover-testing:
+ extends:
+ - .meson-build
+ - .ci-deqp-artifacts
+ variables:
+ UNWIND: "enabled"
+ DRI_LOADERS: >
+ -D glx=disabled
+ -D egl=disabled
+ -D gbm=disabled
+ GALLIUM_ST: >
+ -D gallium-opencl=icd
+ -D opencl-spirv=true
+ GALLIUM_DRIVERS: "swrast"
+ BUILDTYPE: "debugoptimized"
+ EXTRA_OPTION: >
+ -D valgrind=false
+ script:
+ - .gitlab-ci/meson/build.sh
+ - .gitlab-ci/prepare-artifacts.sh
+
+debian-gallium:
+ extends: .meson-build
+ variables:
+ UNWIND: "enabled"
+ DRI_LOADERS: >
+ -D glx=dri
+ -D gbm=enabled
+ -D egl=enabled
+ -D platforms=x11,wayland
+ GALLIUM_ST: >
+ -D dri3=enabled
+ -D gallium-extra-hud=true
+ -D gallium-vdpau=enabled
+ -D gallium-xvmc=enabled
+ -D gallium-omx=bellagio
+ -D gallium-va=enabled
+ -D gallium-xa=enabled
+ -D gallium-nine=true
+ -D gallium-opencl=disabled
+ GALLIUM_DRIVERS: "iris,nouveau,kmsro,r300,r600,freedreno,swrast,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,d3d12,asahi,crocus"
+ VULKAN_DRIVERS: swrast
+ EXTRA_OPTION: >
+ -D osmesa=true
+ -D tools=drm-shim,etnaviv,freedreno,glsl,intel,intel-ui,nir,nouveau,xvmc,lima,panfrost,asahi
+ script:
+ - .gitlab-ci/meson/build.sh
+ - .gitlab-ci/run-shader-db.sh
+
+# Test a release build with -Werror so new warnings don't sneak in.
+debian-release:
+ extends: .meson-build
+ variables:
+ UNWIND: "enabled"
+ DRI_LOADERS: >
+ -D glx=dri
+ -D gbm=enabled
+ -D egl=enabled
+ -D platforms=x11,wayland
+ GALLIUM_ST: >
+ -D dri3=enabled
+ -D gallium-extra-hud=true
+ -D gallium-vdpau=enabled
+ -D gallium-xvmc=disabled
+ -D gallium-omx=disabled
+ -D gallium-va=enabled
+ -D gallium-xa=enabled
+ -D gallium-nine=false
+ -D gallium-opencl=disabled
+ -D llvm=enabled
+ GALLIUM_DRIVERS: "i915,iris,nouveau,kmsro,freedreno,r300,svga,swrast,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,d3d12,crocus"
+ VULKAN_DRIVERS: "amd,imagination-experimental"
+ BUILDTYPE: "release"
+ EXTRA_OPTION: >
+ -D osmesa=true
+ -D tools=all
+ -D intel-clc=enabled
+ -D imagination-srv=true
+ script:
+ - .gitlab-ci/meson/build.sh
+
+fedora-release:
+ extends:
+ - .meson-build
+ - .use-fedora/x86_build
+ variables:
+ BUILDTYPE: "release"
+ C_ARGS: >
+ -Wno-error=array-bounds
+ -Wno-error=maybe-uninitialized
+ -Wno-error=stringop-overread
+ -Wno-error=uninitialized
+ CPP_ARGS: >
+ -Wno-error=array-bounds
+ DRI_LOADERS: >
+ -D glx=dri
+ -D gbm=enabled
+ -D egl=enabled
+ -D glvnd=true
+ -D platforms=x11,wayland
+ EXTRA_OPTION: >
+ -D osmesa=true
+ -D selinux=true
+ -D tools=drm-shim,etnaviv,freedreno,glsl,intel,nir,nouveau,lima,panfrost,imagination
+ -D intel-clc=enabled
+ -D imagination-srv=true
+ GALLIUM_DRIVERS: "crocus,etnaviv,freedreno,iris,kmsro,lima,nouveau,panfrost,r300,r600,radeonsi,svga,swrast,tegra,v3d,vc4,virgl,zink"
+ GALLIUM_ST: >
+ -D dri3=enabled
+ -D gallium-extra-hud=true
+ -D gallium-vdpau=enabled
+ -D gallium-xvmc=disabled
+ -D gallium-omx=disabled
+ -D gallium-va=enabled
+ -D gallium-xa=enabled
+ -D gallium-nine=false
+ -D gallium-opencl=icd
+ -D gles1=disabled
+ -D gles2=enabled
+ -D llvm=enabled
+ -D microsoft-clc=disabled
+ -D shared-llvm=enabled
+ -D vulkan-device-select-layer=true
+ LLVM_VERSION: ""
+ UNWIND: "disabled"
+ VULKAN_DRIVERS: "amd,broadcom,freedreno,intel,imagination-experimental"
+ script:
+ - .gitlab-ci/meson/build.sh
+
+debian-android:
+ extends:
+ - .meson-cross
+ - .use-debian/android_build
+ variables:
+ UNWIND: "disabled"
+ C_ARGS: >
+ -Wno-error=asm-operand-widths
+ -Wno-error=constant-conversion
+ -Wno-error=enum-conversion
+ -Wno-error=initializer-overrides
+ -Wno-error=missing-braces
+ -Wno-error=sometimes-uninitialized
+ -Wno-error=unused-function
+ CPP_ARGS: >
+ -Wno-error=deprecated-declarations
+ DRI_LOADERS: >
+ -D glx=disabled
+ -D gbm=disabled
+ -D egl=enabled
+ -D platforms=android
+ EXTRA_OPTION: >
+ -D android-stub=true
+ -D llvm=disabled
+ -D platform-sdk-version=29
+ -D valgrind=false
+ GALLIUM_ST: >
+ -D dri3=disabled
+ -D gallium-vdpau=disabled
+ -D gallium-xvmc=disabled
+ -D gallium-omx=disabled
+ -D gallium-va=disabled
+ -D gallium-xa=disabled
+ -D gallium-nine=false
+ -D gallium-opencl=disabled
+ LLVM_VERSION: ""
+ PKG_CONFIG_LIBDIR: "/disable/non/android/system/pc/files"
+ script:
+ - PKG_CONFIG_PATH=/usr/local/lib/aarch64-linux-android/pkgconfig/:/android-ndk-r21d/toolchains/llvm/prebuilt/linux-x86_64/sysroot/usr/lib/aarch64-linux-android/pkgconfig/ CROSS=aarch64-linux-android GALLIUM_DRIVERS=etnaviv,freedreno,lima,panfrost,vc4,v3d VULKAN_DRIVERS=freedreno,broadcom,virtio-experimental .gitlab-ci/meson/build.sh
+ # x86_64 build:
+ # Can't do Intel because gen_decoder.c currently requires libexpat, which
+ # is not a dependency that AOSP wants to accept. Can't do Radeon Gallium
+ # drivers because they requires LLVM, which we don't have an Android build
+ # of.
+ - PKG_CONFIG_PATH=/usr/local/lib/x86_64-linux-android/pkgconfig/:/android-ndk-r21d/toolchains/llvm/prebuilt/linux-x86_64/sysroot/usr/lib/x86_64-linux-android/pkgconfig/ CROSS=x86_64-linux-android GALLIUM_DRIVERS=iris VULKAN_DRIVERS=amd,intel .gitlab-ci/meson/build.sh
+
+.meson-cross:
+ extends:
+ - .meson-build
+ stage: build-misc
+ variables:
+ UNWIND: "disabled"
+ DRI_LOADERS: >
+ -D glx=dri
+ -D gbm=enabled
+ -D egl=enabled
+ -D platforms=x11
+ -D osmesa=false
+ GALLIUM_ST: >
+ -D dri3=enabled
+ -D gallium-vdpau=disabled
+ -D gallium-xvmc=disabled
+ -D gallium-omx=disabled
+ -D gallium-va=disabled
+ -D gallium-xa=disabled
+ -D gallium-nine=false
+
+.meson-arm:
+ extends:
+ - .meson-cross
+ - .use-debian/arm_build
+ needs:
+ - debian/arm_build
+ variables:
+ VULKAN_DRIVERS: freedreno,broadcom
+ GALLIUM_DRIVERS: "etnaviv,freedreno,kmsro,lima,nouveau,panfrost,swrast,tegra,v3d,vc4"
+ BUILDTYPE: "debugoptimized"
+ tags:
+ - aarch64
+
+debian-armhf:
+ extends:
+ - .meson-arm
+ - .ci-deqp-artifacts
+ variables:
+ CROSS: armhf
+ EXTRA_OPTION: >
+ -D llvm=disabled
+ -D valgrind=false
+ MINIO_ARTIFACT_NAME: mesa-armhf
+ script:
+ - .gitlab-ci/meson/build.sh
+ - .gitlab-ci/prepare-artifacts.sh
+
+debian-arm64:
+ extends:
+ - .meson-arm
+ - .ci-deqp-artifacts
+ variables:
+ VULKAN_DRIVERS: "freedreno,broadcom,panfrost,imagination-experimental"
+ EXTRA_OPTION: >
+ -D llvm=disabled
+ -D valgrind=false
+ -D imagination-srv=true
+ MINIO_ARTIFACT_NAME: mesa-arm64
+ script:
+ - .gitlab-ci/meson/build.sh
+ - .gitlab-ci/prepare-artifacts.sh
+
+debian-arm64-asan:
+ extends:
+ - debian-arm64
+ variables:
+ C_ARGS: >
+ -Wno-error=stringop-truncation
+ EXTRA_OPTION: >
+ -D llvm=disabled
+ -D b_sanitize=address
+ -D valgrind=false
+ -D tools=dlclose-skip
+ ARTIFACTS_DEBUG_SYMBOLS: 1
+ MINIO_ARTIFACT_NAME: mesa-arm64-asan
+ MESON_TEST_ARGS: "--no-suite mesa:compiler"
+
+debian-arm64-build-test:
+ extends:
+ - .meson-arm
+ - .ci-deqp-artifacts
+ variables:
+ VULKAN_DRIVERS: "amd"
+ EXTRA_OPTION: >
+ -Dtools=panfrost,imagination
+ script:
+ - .gitlab-ci/meson/build.sh
+
+debian-clang:
+ extends: .meson-build
+ variables:
+ UNWIND: "enabled"
+ C_ARGS: >
+ -Wno-error=constant-conversion
+ -Wno-error=enum-conversion
+ -Wno-error=implicit-const-int-float-conversion
+ -Wno-error=initializer-overrides
+ -Wno-error=sometimes-uninitialized
+ -Wno-error=unused-function
+ CPP_ARGS: >
+ -Wno-error=c99-designator
+ -Wno-error=deprecated-declarations
+ -Wno-error=implicit-const-int-float-conversion
+ -Wno-error=missing-braces
+ -Wno-error=overloaded-virtual
+ -Wno-error=tautological-constant-out-of-range-compare
+ -Wno-error=unused-const-variable
+ -Wno-error=unused-private-field
+ DRI_LOADERS: >
+ -D glvnd=true
+ GALLIUM_DRIVERS: "iris,nouveau,kmsro,r300,r600,freedreno,swrast,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,radeonsi,tegra,d3d12,crocus,i915,asahi"
+ VULKAN_DRIVERS: intel,amd,freedreno,broadcom,virtio-experimental,swrast,panfrost,imagination-experimental
+ EXTRA_OPTIONS:
+ -D imagination-srv=true
+ CC: clang
+ CXX: clang++
+
+windows-vs2019:
+ extends:
+ - .build-windows
+ - .use-windows_build_vs2019
+ - .windows-build-rules
+ stage: build-misc
+ script:
+ - . .\.gitlab-ci\windows\mesa_build.ps1
+ artifacts:
+ paths:
+ - _build/meson-logs/*.txt
+ - _install/
+
+debian-clover:
+ extends: .meson-build
+ variables:
+ UNWIND: "enabled"
+ DRI_LOADERS: >
+ -D glx=disabled
+ -D egl=disabled
+ -D gbm=disabled
+ GALLIUM_DRIVERS: "r600,radeonsi"
+ GALLIUM_ST: >
+ -D dri3=disabled
+ -D gallium-vdpau=disabled
+ -D gallium-xvmc=disabled
+ -D gallium-omx=disabled
+ -D gallium-va=disabled
+ -D gallium-xa=disabled
+ -D gallium-nine=false
+ -D gallium-opencl=icd
+ EXTRA_OPTION: >
+ -D valgrind=false
+ script:
+ - LLVM_VERSION=9 GALLIUM_DRIVERS=r600,swrast .gitlab-ci/meson/build.sh
+ - .gitlab-ci/meson/build.sh
+
+debian-vulkan:
+ extends: .meson-build
+ variables:
+ UNWIND: "disabled"
+ DRI_LOADERS: >
+ -D glx=disabled
+ -D gbm=disabled
+ -D egl=disabled
+ -D platforms=x11,wayland
+ -D osmesa=false
+ GALLIUM_ST: >
+ -D dri3=enabled
+ -D gallium-vdpau=disabled
+ -D gallium-xvmc=disabled
+ -D gallium-omx=disabled
+ -D gallium-va=disabled
+ -D gallium-xa=disabled
+ -D gallium-nine=false
+ -D gallium-opencl=disabled
+ -D b_sanitize=undefined
+ -D c_args=-fno-sanitize-recover=all
+ -D cpp_args=-fno-sanitize-recover=all
+ UBSAN_OPTIONS: "print_stacktrace=1"
+ VULKAN_DRIVERS: intel,amd,freedreno,broadcom,virtio-experimental,imagination-experimental
+ EXTRA_OPTION: >
+ -D vulkan-layers=device-select,overlay
+ -D build-aco-tests=true
+ -D intel-clc=enabled
+ -D imagination-srv=true
+
+debian-i386:
+ extends:
+ - .meson-cross
+ - .use-debian/i386_build
+ variables:
+ CROSS: i386
+ VULKAN_DRIVERS: intel,amd,swrast,virtio-experimental
+ GALLIUM_DRIVERS: "iris,nouveau,r300,r600,radeonsi,swrast,virgl,zink,crocus"
+ EXTRA_OPTION: >
+ -D vulkan-layers=device-select,overlay
+
+debian-s390x:
+ extends:
+ - debian-ppc64el
+ - .use-debian/s390x_build
+ - .s390x-rules
+ tags:
+ - kvm
+ variables:
+ CROSS: s390x
+ GALLIUM_DRIVERS: "swrast,zink"
+ # The lp_test_blend test times out with LLVM 11
+ LLVM_VERSION: 9
+ VULKAN_DRIVERS: "swrast"
+
+debian-ppc64el:
+ extends:
+ - .meson-cross
+ - .use-debian/ppc64el_build
+ - .ppc64el-rules
+ variables:
+ CROSS: ppc64el
+ GALLIUM_DRIVERS: "nouveau,radeonsi,swrast,virgl,zink"
+ VULKAN_DRIVERS: "amd,swrast"
+
+debian-mingw32-x86_64:
+ extends: .meson-build
+ stage: build-misc
+ variables:
+ UNWIND: "disabled"
+ C_ARGS: >
+ -Wno-error=format
+ -Wno-error=format-extra-args
+ CPP_ARGS: $C_ARGS
+ GALLIUM_DRIVERS: "swrast"
+ EXTRA_OPTION: >
+ -Dllvm=disabled
+ -Dzlib=disabled
+ -Dosmesa=true
+ --cross-file=.gitlab-ci/x86_64-w64-mingw32
diff --git a/lib/mesa/.gitlab-ci/common/generate-env.sh b/lib/mesa/.gitlab-ci/common/generate-env.sh
index 05376d4a7..885826b2e 100755
--- a/lib/mesa/.gitlab-ci/common/generate-env.sh
+++ b/lib/mesa/.gitlab-ci/common/generate-env.sh
@@ -5,8 +5,11 @@ for var in \
BASE_SYSTEM_FORK_HOST_PREFIX \
BASE_SYSTEM_MAINLINE_HOST_PREFIX \
CI_COMMIT_BRANCH \
+ CI_COMMIT_REF_NAME \
CI_COMMIT_TITLE \
CI_JOB_ID \
+ CI_JOB_JWT_FILE \
+ CI_JOB_NAME \
CI_JOB_URL \
CI_MERGE_REQUEST_SOURCE_BRANCH_NAME \
CI_MERGE_REQUEST_TITLE \
@@ -14,12 +17,16 @@ for var in \
CI_NODE_TOTAL \
CI_PAGES_DOMAIN \
CI_PIPELINE_ID \
+ CI_PIPELINE_URL \
CI_PROJECT_DIR \
CI_PROJECT_NAME \
CI_PROJECT_PATH \
CI_PROJECT_ROOT_NAMESPACE \
CI_RUNNER_DESCRIPTION \
CI_SERVER_URL \
+ CROSVM_GALLIUM_DRIVER \
+ CROSVM_GPU_ARGS \
+ DEQP_BIN_DIR \
DEQP_CASELIST_FILTER \
DEQP_CASELIST_INV_FILTER \
DEQP_CONFIG \
@@ -29,6 +36,7 @@ for var in \
DEQP_RESULTS_DIR \
DEQP_RUNNER_OPTIONS \
DEQP_SUITE \
+ DEQP_TEMP_DIR \
DEQP_VARIANT \
DEQP_VER \
DEQP_WIDTH \
@@ -40,6 +48,9 @@ for var in \
FDO_UPSTREAM_REPO \
FD_MESA_DEBUG \
FLAKES_CHANNEL \
+ FREEDRENO_HANGCHECK_MS \
+ GALLIUM_DRIVER \
+ GALLIVM_PERF \
GPU_VERSION \
GTEST \
GTEST_FAILS \
@@ -49,40 +60,56 @@ for var in \
GTEST_SKIPS \
HWCI_FREQ_MAX \
HWCI_KERNEL_MODULES \
+ HWCI_KVM \
HWCI_START_XORG \
HWCI_TEST_SCRIPT \
IR3_SHADER_DEBUG \
JOB_ARTIFACTS_BASE \
JOB_RESULTS_PATH \
JOB_ROOTFS_OVERLAY_PATH \
+ KERNEL_IMAGE_BASE_URL \
+ KERNEL_IMAGE_NAME \
+ LD_LIBRARY_PATH \
+ LP_NUM_THREADS \
+ MESA_BASE_TAG \
MESA_BUILD_PATH \
- MESA_GL_VERSION_OVERRIDE \
- MESA_GLSL_VERSION_OVERRIDE \
+ MESA_DEBUG \
MESA_GLES_VERSION_OVERRIDE \
+ MESA_GLSL_VERSION_OVERRIDE \
+ MESA_GL_VERSION_OVERRIDE \
+ MESA_IMAGE \
+ MESA_IMAGE_PATH \
+ MESA_IMAGE_TAG \
+ MESA_TEMPLATES_COMMIT \
MESA_VK_IGNORE_CONFORMANCE_WARNING \
+ MESA_SPIRV_LOG_LEVEL \
MINIO_HOST \
- NIR_VALIDATE \
+ MINIO_RESULTS_UPLOAD \
+ NIR_DEBUG \
PAN_I_WANT_A_BROKEN_VULKAN_DRIVER \
PAN_MESA_DEBUG \
PIGLIT_FRACTION \
- PIGLIT_JUNIT_RESULTS \
PIGLIT_NO_WINDOW \
PIGLIT_OPTIONS \
PIGLIT_PLATFORM \
PIGLIT_PROFILES \
PIGLIT_REPLAY_ARTIFACTS_BASE_URL \
- PIGLIT_REPLAY_SUBCOMMAND \
PIGLIT_REPLAY_DESCRIPTION_FILE \
PIGLIT_REPLAY_DEVICE_NAME \
PIGLIT_REPLAY_EXTRA_ARGS \
+ PIGLIT_REPLAY_LOOP_TIMES \
PIGLIT_REPLAY_REFERENCE_IMAGES_BASE \
- PIGLIT_REPLAY_UPLOAD_TO_MINIO \
+ PIGLIT_REPLAY_SUBCOMMAND \
PIGLIT_RESULTS \
PIGLIT_TESTS \
PIPELINE_ARTIFACTS_BASE \
+ SKQP_ASSETS_DIR \
+ SKQP_BACKENDS \
TU_DEBUG \
+ VIRGL_HOST_API \
VK_CPU \
VK_DRIVER \
+ VK_ICD_FILENAMES \
; do
if [ -n "${!var+x}" ]; then
echo "export $var=${!var@Q}"
diff --git a/lib/mesa/.gitlab-ci/common/init-stage1.sh b/lib/mesa/.gitlab-ci/common/init-stage1.sh
index 648c37a2f..3b3317a2c 100755
--- a/lib/mesa/.gitlab-ci/common/init-stage1.sh
+++ b/lib/mesa/.gitlab-ci/common/init-stage1.sh
@@ -9,6 +9,7 @@ cd /
mount -t proc none /proc
mount -t sysfs none /sys
+mount -t debugfs none /sys/kernel/debug
mount -t devtmpfs none /dev || echo possibly already mounted
mkdir -p /dev/pts
mount -t devpts devpts /dev/pts
diff --git a/lib/mesa/.gitlab-ci/common/init-stage2.sh b/lib/mesa/.gitlab-ci/common/init-stage2.sh
index 53b904156..6aebc2231 100755
--- a/lib/mesa/.gitlab-ci/common/init-stage2.sh
+++ b/lib/mesa/.gitlab-ci/common/init-stage2.sh
@@ -8,7 +8,31 @@
set -ex
# Set up any devices required by the jobs
-[ -z "$HWCI_KERNEL_MODULES" ] || (echo -n $HWCI_KERNEL_MODULES | xargs -d, -n1 /usr/sbin/modprobe)
+[ -z "$HWCI_KERNEL_MODULES" ] || {
+ echo -n $HWCI_KERNEL_MODULES | xargs -d, -n1 /usr/sbin/modprobe
+}
+
+#
+# Load the KVM module specific to the detected CPU virtualization extensions:
+# - vmx for Intel VT
+# - svm for AMD-V
+#
+# Additionally, download the kernel image to boot the VM via HWCI_TEST_SCRIPT.
+#
+if [ "$HWCI_KVM" = "true" ]; then
+ unset KVM_KERNEL_MODULE
+ grep -qs '\bvmx\b' /proc/cpuinfo && KVM_KERNEL_MODULE=kvm_intel || {
+ grep -qs '\bsvm\b' /proc/cpuinfo && KVM_KERNEL_MODULE=kvm_amd
+ }
+
+ [ -z "${KVM_KERNEL_MODULE}" ] && \
+ echo "WARNING: Failed to detect CPU virtualization extensions" || \
+ modprobe ${KVM_KERNEL_MODULE}
+
+ mkdir -p /lava-files
+ wget -S --progress=dot:giga -O /lava-files/${KERNEL_IMAGE_NAME} \
+ "${KERNEL_IMAGE_BASE_URL}/${KERNEL_IMAGE_NAME}"
+fi
# Fix prefix confusion: the build installs to $CI_PROJECT_DIR, but we expect
# it in /install
@@ -36,6 +60,16 @@ if [ "$HWCI_FREQ_MAX" = "true" ]; then
# Disable GPU runtime power management
GPU_AUTOSUSPEND=`find /sys/devices -name autosuspend_delay_ms | grep gpu | head -1`
test -z "$GPU_AUTOSUSPEND" || echo -1 > $GPU_AUTOSUSPEND || true
+
+ # Lock Intel GPU frequency to 70% of the maximum allowed by hardware
+ # and enable throttling detection & reporting.
+ ./intel-gpu-freq.sh -s 70% -g all -d
+fi
+
+# Increase freedreno hangcheck timer because it's right at the edge of the
+# spilling tests timing out (and some traces, too)
+if [ -n "$FREEDRENO_HANGCHECK_MS" ]; then
+ echo $FREEDRENO_HANGCHECK_MS | tee -a /sys/kernel/debug/dri/128/hangcheck_period_ms
fi
# Start a little daemon to capture the first devcoredump we encounter. (They
@@ -61,18 +95,18 @@ if [ -n "$HWCI_START_XORG" ]; then
export DISPLAY=:0
fi
-RESULT=fail
-if sh $HWCI_TEST_SCRIPT; then
- RESULT=pass
- rm -rf results/trace/$PIGLIT_REPLAY_DEVICE_NAME
-fi
+sh -c "$HWCI_TEST_SCRIPT" && RESULT=pass || RESULT=fail
+
+# Let's make sure the results are always stored in current working directory
+mv -f ${CI_PROJECT_DIR}/results ./ 2>/dev/null || true
+
+[ "${RESULT}" = "fail" ] || rm -rf results/trace/$PIGLIT_REPLAY_DEVICE_NAME
# upload artifacts
-MINIO=$(cat /proc/cmdline | tr ' ' '\n' | grep minio_results | cut -d '=' -f 2 || true)
-if [ -n "$MINIO" ]; then
+if [ -n "$MINIO_RESULTS_UPLOAD" ]; then
tar -czf results.tar.gz results/;
- ci-fairy minio login "$CI_JOB_JWT";
- ci-fairy minio cp results.tar.gz minio://"$MINIO"/results.tar.gz;
+ ci-fairy minio login --token-file "${CI_JOB_JWT_FILE}";
+ ci-fairy minio cp results.tar.gz minio://"$MINIO_RESULTS_UPLOAD"/results.tar.gz;
fi
echo "hwci: mesa: $RESULT"
diff --git a/lib/mesa/.gitlab-ci/common/intel-gpu-freq.sh b/lib/mesa/.gitlab-ci/common/intel-gpu-freq.sh
new file mode 100755
index 000000000..10a72eea7
--- /dev/null
+++ b/lib/mesa/.gitlab-ci/common/intel-gpu-freq.sh
@@ -0,0 +1,567 @@
+#!/bin/sh
+#
+# The Intel i915 GPU driver allows to change the minimum, maximum and boost
+# frequencies in steps of 50 MHz via /sys/class/drm/card<n>/<freq_info>,
+# where <n> is the DRM card index and <freq_info> one of the following:
+#
+# - gt_max_freq_mhz (enforced maximum freq)
+# - gt_min_freq_mhz (enforced minimum freq)
+# - gt_boost_freq_mhz (enforced boost freq)
+#
+# The hardware capabilities can be accessed via:
+#
+# - gt_RP0_freq_mhz (supported maximum freq)
+# - gt_RPn_freq_mhz (supported minimum freq)
+# - gt_RP1_freq_mhz (most efficient freq)
+#
+# The current frequency can be read from:
+# - gt_act_freq_mhz (the actual GPU freq)
+# - gt_cur_freq_mhz (the last requested freq)
+#
+# Copyright (C) 2022 Collabora Ltd.
+# Author: Cristian Ciocaltea <cristian.ciocaltea@collabora.com>
+#
+# SPDX-License-Identifier: MIT
+#
+
+#
+# Constants
+#
+DRM_FREQ_SYSFS_PATTERN="/sys/class/drm/card%d/gt_%s_freq_mhz"
+ENF_FREQ_INFO="max min boost"
+CAP_FREQ_INFO="RP0 RPn RP1"
+ACT_FREQ_INFO="act cur"
+THROTT_DETECT_SLEEP_SEC=2
+THROTT_DETECT_PID_FILE_PATH=/tmp/thrott-detect.pid
+
+#
+# Global variables.
+#
+unset INTEL_DRM_CARD_INDEX
+unset GET_ACT_FREQ GET_ENF_FREQ GET_CAP_FREQ
+unset SET_MIN_FREQ SET_MAX_FREQ
+unset MONITOR_FREQ
+unset DETECT_THROTT
+unset DRY_RUN
+
+#
+# Simple printf based stderr logger.
+#
+log() {
+ local msg_type=$1
+
+ shift
+ printf "%s: %s: " "${msg_type}" "${0##*/}" >&2
+ printf "$@" >&2
+ printf "\n" >&2
+}
+
+#
+# Helper to print sysfs path for the given card index and freq info.
+#
+# arg1: Frequency info sysfs name, one of *_FREQ_INFO constants above
+# arg2: Video card index, defaults to INTEL_DRM_CARD_INDEX
+#
+print_freq_sysfs_path() {
+ printf ${DRM_FREQ_SYSFS_PATTERN} "${2:-${INTEL_DRM_CARD_INDEX}}" "$1"
+}
+
+#
+# Helper to set INTEL_DRM_CARD_INDEX for the first identified Intel video card.
+#
+identify_intel_gpu() {
+ local i=0 vendor path
+
+ while [ ${i} -lt 16 ]; do
+ [ -c "/dev/dri/card$i" ] || {
+ i=$((i + 1))
+ continue
+ }
+
+ path=$(print_freq_sysfs_path "" ${i})
+ path=${path%/*}/device/vendor
+
+ [ -r "${path}" ] && read vendor < "${path}" && \
+ [ "${vendor}" = "0x8086" ] && INTEL_DRM_CARD_INDEX=$i && return 0
+
+ i=$((i + 1))
+ done
+
+ return 1
+}
+
+#
+# Read the specified freq info from sysfs.
+#
+# arg1: Flag (y/n) to also enable printing the freq info.
+# arg2...: Frequency info sysfs name(s), see *_FREQ_INFO constants above
+# return: Global variable(s) FREQ_${arg} containing the requested information
+#
+read_freq_info() {
+ local var val path print=0 ret=0
+
+ [ "$1" = "y" ] && print=1
+ shift
+
+ while [ $# -gt 0 ]; do
+ var=FREQ_$1
+ path=$(print_freq_sysfs_path "$1")
+
+ [ -r ${path} ] && read ${var} < ${path} || {
+ log ERROR "Failed to read freq info from: %s" "${path}"
+ ret=1
+ continue
+ }
+
+ [ -n "${var}" ] || {
+ log ERROR "Got empty freq info from: %s" "${path}"
+ ret=1
+ continue
+ }
+
+ [ ${print} -eq 1 ] && {
+ eval val=\$${var}
+ printf "%6s: %4s MHz\n" "$1" "${val}"
+ }
+
+ shift
+ done
+
+ return ${ret}
+}
+
+#
+# Display requested info.
+#
+print_freq_info() {
+ local req_freq
+
+ [ -n "${GET_CAP_FREQ}" ] && {
+ printf "* Hardware capabilities\n"
+ read_freq_info y ${CAP_FREQ_INFO}
+ printf "\n"
+ }
+
+ [ -n "${GET_ENF_FREQ}" ] && {
+ printf "* Enforcements\n"
+ read_freq_info y ${ENF_FREQ_INFO}
+ printf "\n"
+ }
+
+ [ -n "${GET_ACT_FREQ}" ] && {
+ printf "* Actual\n"
+ read_freq_info y ${ACT_FREQ_INFO}
+ printf "\n"
+ }
+}
+
+#
+# Helper to print frequency value as requested by user via '-s, --set' option.
+# arg1: user requested freq value
+#
+compute_freq_set() {
+ local val
+
+ case "$1" in
+ +)
+ val=${FREQ_RP0}
+ ;;
+ -)
+ val=${FREQ_RPn}
+ ;;
+ *%)
+ val=$((${1%?} * ${FREQ_RP0} / 100))
+ # Adjust freq to comply with 50 MHz increments
+ val=$((val / 50 * 50))
+ ;;
+ *[!0-9]*)
+ log ERROR "Cannot set freq to invalid value: %s" "$1"
+ return 1
+ ;;
+ "")
+ log ERROR "Cannot set freq to unspecified value"
+ return 1
+ ;;
+ *)
+ # Adjust freq to comply with 50 MHz increments
+ val=$(($1 / 50 * 50))
+ ;;
+ esac
+
+ printf "%s" "${val}"
+}
+
+#
+# Helper for set_freq().
+#
+set_freq_max() {
+ log INFO "Setting GPU max freq to %s MHz" "${SET_MAX_FREQ}"
+
+ read_freq_info n min || return $?
+
+ [ ${SET_MAX_FREQ} -gt ${FREQ_RP0} ] && {
+ log ERROR "Cannot set GPU max freq (%s) to be greater than hw max freq (%s)" \
+ "${SET_MAX_FREQ}" "${FREQ_RP0}"
+ return 1
+ }
+
+ [ ${SET_MAX_FREQ} -lt ${FREQ_RPn} ] && {
+ log ERROR "Cannot set GPU max freq (%s) to be less than hw min freq (%s)" \
+ "${SET_MIN_FREQ}" "${FREQ_RPn}"
+ return 1
+ }
+
+ [ ${SET_MAX_FREQ} -lt ${FREQ_min} ] && {
+ log ERROR "Cannot set GPU max freq (%s) to be less than min freq (%s)" \
+ "${SET_MAX_FREQ}" "${FREQ_min}"
+ return 1
+ }
+
+ [ -z "${DRY_RUN}" ] || return 0
+
+ printf "%s" ${SET_MAX_FREQ} | tee $(print_freq_sysfs_path max) \
+ $(print_freq_sysfs_path boost) > /dev/null
+ [ $? -eq 0 ] || {
+ log ERROR "Failed to set GPU max frequency"
+ return 1
+ }
+}
+
+#
+# Helper for set_freq().
+#
+set_freq_min() {
+ log INFO "Setting GPU min freq to %s MHz" "${SET_MIN_FREQ}"
+
+ read_freq_info n max || return $?
+
+ [ ${SET_MIN_FREQ} -gt ${FREQ_max} ] && {
+ log ERROR "Cannot set GPU min freq (%s) to be greater than max freq (%s)" \
+ "${SET_MIN_FREQ}" "${FREQ_max}"
+ return 1
+ }
+
+ [ ${SET_MIN_FREQ} -lt ${FREQ_RPn} ] && {
+ log ERROR "Cannot set GPU min freq (%s) to be less than hw min freq (%s)" \
+ "${SET_MIN_FREQ}" "${FREQ_RPn}"
+ return 1
+ }
+
+ [ -z "${DRY_RUN}" ] || return 0
+
+ printf "%s" ${SET_MIN_FREQ} > $(print_freq_sysfs_path min)
+ [ $? -eq 0 ] || {
+ log ERROR "Failed to set GPU min frequency"
+ return 1
+ }
+}
+
+#
+# Set min or max or both GPU frequencies to the user indicated values.
+#
+set_freq() {
+ # Get hw max & min frequencies
+ read_freq_info n RP0 RPn || return $?
+
+ [ -z "${SET_MAX_FREQ}" ] || {
+ SET_MAX_FREQ=$(compute_freq_set "${SET_MAX_FREQ}")
+ [ -z "${SET_MAX_FREQ}" ] && return 1
+ }
+
+ [ -z "${SET_MIN_FREQ}" ] || {
+ SET_MIN_FREQ=$(compute_freq_set "${SET_MIN_FREQ}")
+ [ -z "${SET_MIN_FREQ}" ] && return 1
+ }
+
+ #
+ # Ensure correct operation order, to avoid setting min freq
+ # to a value which is larger than max freq.
+ #
+ # E.g.:
+ # crt_min=crt_max=600; new_min=new_max=700
+ # > operation order: max=700; min=700
+ #
+ # crt_min=crt_max=600; new_min=new_max=500
+ # > operation order: min=500; max=500
+ #
+ if [ -n "${SET_MAX_FREQ}" ] && [ -n "${SET_MIN_FREQ}" ]; then
+ [ ${SET_MAX_FREQ} -lt ${SET_MIN_FREQ} ] && {
+ log ERROR "Cannot set GPU max freq to be less than min freq"
+ return 1
+ }
+
+ read_freq_info n min || return $?
+
+ if [ ${SET_MAX_FREQ} -lt ${FREQ_min} ]; then
+ set_freq_min || return $?
+ set_freq_max
+ else
+ set_freq_max || return $?
+ set_freq_min
+ fi
+ elif [ -n "${SET_MAX_FREQ}" ]; then
+ set_freq_max
+ elif [ -n "${SET_MIN_FREQ}" ]; then
+ set_freq_min
+ else
+ log "Unexpected call to set_freq()"
+ return 1
+ fi
+}
+
+#
+# Helper for detect_throttling().
+#
+get_thrott_detect_pid() {
+ [ -e ${THROTT_DETECT_PID_FILE_PATH} ] || return 0
+
+ local pid
+ read pid < ${THROTT_DETECT_PID_FILE_PATH} || {
+ log ERROR "Failed to read pid from: %s" "${THROTT_DETECT_PID_FILE_PATH}"
+ return 1
+ }
+
+ local proc_path=/proc/${pid:-invalid}/cmdline
+ [ -r ${proc_path} ] && grep -qs "${0##*/}" ${proc_path} && {
+ printf "%s" "${pid}"
+ return 0
+ }
+
+ # Remove orphaned PID file
+ rm -rf ${THROTT_DETECT_PID_FILE_PATH}
+ return 1
+}
+
+#
+# Control detection and reporting of GPU throttling events.
+# arg1: start - run throttle detector in background
+# stop - stop throttle detector process, if any
+# status - verify if throttle detector is running
+#
+detect_throttling() {
+ local pid
+ pid=$(get_thrott_detect_pid)
+
+ case "$1" in
+ status)
+ printf "Throttling detector is "
+ [ -z "${pid}" ] && printf "not running\n" && return 0
+ printf "running (pid=%s)\n" ${pid}
+ ;;
+
+ stop)
+ [ -z "${pid}" ] && return 0
+
+ log INFO "Stopping throttling detector (pid=%s)" "${pid}"
+ kill ${pid}; sleep 1; kill -0 ${pid} 2>/dev/null && kill -9 ${pid}
+ rm -rf ${THROTT_DETECT_PID_FILE_PATH}
+ ;;
+
+ start)
+ [ -n "${pid}" ] && {
+ log WARN "Throttling detector is already running (pid=%s)" ${pid}
+ return 0
+ }
+
+ (
+ read_freq_info n RPn || exit $?
+
+ while true; do
+ sleep ${THROTT_DETECT_SLEEP_SEC}
+ read_freq_info n act min cur || exit $?
+
+ #
+ # The throttling seems to occur when act freq goes below min.
+ # However, it's necessary to exclude the idle states, where
+ # act freq normally reaches RPn and cur goes below min.
+ #
+ [ ${FREQ_act} -lt ${FREQ_min} ] && \
+ [ ${FREQ_act} -gt ${FREQ_RPn} ] && \
+ [ ${FREQ_cur} -ge ${FREQ_min} ] && \
+ printf "GPU throttling detected: act=%s min=%s cur=%s RPn=%s\n" \
+ ${FREQ_act} ${FREQ_min} ${FREQ_cur} ${FREQ_RPn}
+ done
+ ) &
+
+ pid=$!
+ log INFO "Started GPU throttling detector (pid=%s)" ${pid}
+
+ printf "%s\n" ${pid} > ${THROTT_DETECT_PID_FILE_PATH} || \
+ log WARN "Failed to write throttle detector PID file"
+ ;;
+ esac
+}
+
+#
+# Show help message.
+#
+print_usage() {
+ cat <<EOF
+Usage: ${0##*/} [OPTION]...
+
+A script to manage Intel GPU frequencies. Can be used for debugging performance
+problems or trying to obtain a stable frequency while benchmarking.
+
+Note Intel GPUs only accept specific frequencies, usually multiples of 50 MHz.
+
+Options:
+ -g, --get [act|enf|cap|all]
+ Get frequency information: active (default), enforced,
+ hardware capabilities or all of them.
+
+ -s, --set [{min|max}=]{FREQUENCY[%]|+|-}
+ Set min or max frequency to the given value (MHz).
+ Append '%' to interpret FREQUENCY as % of hw max.
+ Use '+' or '-' to set frequency to hardware max or min.
+ Omit min/max prefix to set both frequencies.
+
+ -r, --reset Reset frequencies to hardware defaults.
+
+ -m, --monitor [act|enf|cap|all]
+ Monitor the indicated frequencies via 'watch' utility.
+ See '-g, --get' option for more details.
+
+ -d|--detect-thrott [start|stop|status]
+ Start (default operation) the throttling detector
+ as a background process. Use 'stop' or 'status' to
+ terminate the detector process or verify its status.
+
+ --dry-run See what the script will do without applying any
+ frequency changes.
+
+ -h, --help Display this help text and exit.
+EOF
+}
+
+#
+# Parse user input for '-g, --get' option.
+# Returns 0 if a value has been provided, otherwise 1.
+#
+parse_option_get() {
+ local ret=0
+
+ case "$1" in
+ act) GET_ACT_FREQ=1;;
+ enf) GET_ENF_FREQ=1;;
+ cap) GET_CAP_FREQ=1;;
+ all) GET_ACT_FREQ=1; GET_ENF_FREQ=1; GET_CAP_FREQ=1;;
+ -*|"")
+ # No value provided, using default.
+ GET_ACT_FREQ=1
+ ret=1
+ ;;
+ *)
+ print_usage
+ exit 1
+ ;;
+ esac
+
+ return ${ret}
+}
+
+#
+# Validate user input for '-s, --set' option.
+#
+validate_option_set() {
+ case "$1" in
+ +|-|[0-9]%|[0-9][0-9]%)
+ return 0
+ ;;
+ *[!0-9]*|"")
+ print_usage
+ exit 1
+ ;;
+ esac
+}
+
+#
+# Parse script arguments.
+#
+[ $# -eq 0 ] && { print_usage; exit 1; }
+
+while [ $# -gt 0 ]; do
+ case "$1" in
+ -g|--get)
+ parse_option_get "$2" && shift
+ ;;
+
+ -s|--set)
+ shift
+ case "$1" in
+ min=*)
+ SET_MIN_FREQ=${1#min=}
+ validate_option_set "${SET_MIN_FREQ}"
+ ;;
+ max=*)
+ SET_MAX_FREQ=${1#max=}
+ validate_option_set "${SET_MAX_FREQ}"
+ ;;
+ *)
+ SET_MIN_FREQ=$1
+ validate_option_set "${SET_MIN_FREQ}"
+ SET_MAX_FREQ=${SET_MIN_FREQ}
+ ;;
+ esac
+ ;;
+
+ -r|--reset)
+ RESET_FREQ=1
+ SET_MIN_FREQ="-"
+ SET_MAX_FREQ="+"
+ ;;
+
+ -m|--monitor)
+ MONITOR_FREQ=act
+ parse_option_get "$2" && MONITOR_FREQ=$2 && shift
+ ;;
+
+ -d|--detect-thrott)
+ DETECT_THROTT=start
+ case "$2" in
+ start|stop|status)
+ DETECT_THROTT=$2
+ shift
+ ;;
+ esac
+ ;;
+
+ --dry-run)
+ DRY_RUN=1
+ ;;
+
+ -h|--help)
+ print_usage
+ exit 0
+ ;;
+
+ *)
+ print_usage
+ exit 1
+ ;;
+ esac
+
+ shift
+done
+
+#
+# Main
+#
+RET=0
+
+identify_intel_gpu || {
+ log INFO "No Intel GPU detected"
+ exit 0
+}
+
+[ -n "${SET_MIN_FREQ}${SET_MAX_FREQ}" ] && { set_freq || RET=$?; }
+print_freq_info
+
+[ -n "${DETECT_THROTT}" ] && detect_throttling ${DETECT_THROTT}
+
+[ -n "${MONITOR_FREQ}" ] && {
+ log INFO "Entering frequency monitoring mode"
+ sleep 2
+ exec watch -d -n 1 "$0" -g "${MONITOR_FREQ}"
+}
+
+exit ${RET}
diff --git a/lib/mesa/.gitlab-ci/container/arm64.config b/lib/mesa/.gitlab-ci/container/arm64.config
index 623084bb8..5ce3715ae 100644
--- a/lib/mesa/.gitlab-ci/container/arm64.config
+++ b/lib/mesa/.gitlab-ci/container/arm64.config
@@ -14,6 +14,7 @@ CONFIG_DRM_ROCKCHIP=y
CONFIG_DRM_PANFROST=y
CONFIG_DRM_LIMA=y
CONFIG_DRM_PANEL_SIMPLE=y
+CONFIG_DRM_PANEL_EDP=y
CONFIG_DRM_MSM=y
CONFIG_DRM_I2C_ADV7511=y
CONFIG_PWM_CROS_EC=y
@@ -35,6 +36,7 @@ CONFIG_TYPEC_TCPM=y
# For CONFIG_QCOM_LMH
CONFIG_OF=y
+CONFIG_QCOM_COMMAND_DB=y
CONFIG_QCOM_RPMHPD=y
CONFIG_QCOM_RPMPD=y
CONFIG_SDM_GPUCC_845=y
@@ -155,3 +157,16 @@ CONFIG_HW_RANDOM_MTK=y
CONFIG_MTK_DEVAPC=y
CONFIG_PWM_MTK_DISP=y
CONFIG_MTK_CMDQ=y
+
+# For nouveau. Note that DRM must be a module so that it's loaded after NFS is up to provide the firmware.
+CONFIG_ARCH_TEGRA=y
+CONFIG_DRM_NOUVEAU=m
+CONFIG_DRM_TEGRA=m
+CONFIG_R8169=y
+CONFIG_STAGING=y
+CONFIG_DRM_TEGRA_STAGING=y
+CONFIG_TEGRA_HOST1X=y
+CONFIG_ARM_TEGRA_DEVFREQ=y
+CONFIG_TEGRA_SOCTHERM=y
+CONFIG_DRM_TEGRA_DEBUG=y
+CONFIG_PWM_TEGRA=y
diff --git a/lib/mesa/.gitlab-ci/container/baremetal_build.sh b/lib/mesa/.gitlab-ci/container/baremetal_build.sh
index 5752287a8..cd11d6342 100644
--- a/lib/mesa/.gitlab-ci/container/baremetal_build.sh
+++ b/lib/mesa/.gitlab-ci/container/baremetal_build.sh
@@ -25,7 +25,10 @@ if [[ $arch == "arm64" ]]; then
wget ${ARTIFACTS_URL}/Image.gz
wget ${ARTIFACTS_URL}/cheza-kernel
- DEVICE_TREES="apq8016-sbc.dtb apq8096-db820c.dtb"
+ DEVICE_TREES=""
+ DEVICE_TREES="$DEVICE_TREES apq8016-sbc.dtb"
+ DEVICE_TREES="$DEVICE_TREES apq8096-db820c.dtb"
+ DEVICE_TREES="$DEVICE_TREES tegra210-p3450-0000.dtb"
for DTB in $DEVICE_TREES; do
wget ${ARTIFACTS_URL}/$DTB
diff --git a/lib/mesa/.gitlab-ci/container/build-crosvm.sh b/lib/mesa/.gitlab-ci/container/build-crosvm.sh
index 2fd582075..05172fa45 100644
--- a/lib/mesa/.gitlab-ci/container/build-crosvm.sh
+++ b/lib/mesa/.gitlab-ci/container/build-crosvm.sh
@@ -2,47 +2,25 @@
set -ex
-# Pull down repositories that crosvm depends on to cros checkout-like locations.
-CROS_ROOT=/
-THIRD_PARTY_ROOT=$CROS_ROOT/third_party
-mkdir -p $THIRD_PARTY_ROOT
-AOSP_EXTERNAL_ROOT=$CROS_ROOT/aosp/external
-mkdir -p $AOSP_EXTERNAL_ROOT
-PLATFORM2_ROOT=/platform2
-
-PLATFORM2_COMMIT=72e56e66ccf3d2ea48f5686bd1f772379c43628b
-git clone --single-branch --no-checkout https://chromium.googlesource.com/chromiumos/platform2 $PLATFORM2_ROOT
-pushd $PLATFORM2_ROOT
-git checkout $PLATFORM2_COMMIT
-popd
-
-# minijail does not exist in upstream linux distros.
-MINIJAIL_COMMIT=debdf5de5a0ae3b667bee2f8fb1f755b0b3f5a6c
-git clone --single-branch --no-checkout https://android.googlesource.com/platform/external/minijail $AOSP_EXTERNAL_ROOT/minijail
-pushd $AOSP_EXTERNAL_ROOT/minijail
-git checkout $MINIJAIL_COMMIT
-make
-cp libminijail.so /usr/lib/x86_64-linux-gnu/
-popd
-
-# Pull the cras library for audio access.
-ADHD_COMMIT=a1e0869b95c845c4fe6234a7b92fdfa6acc1e809
-git clone --single-branch --no-checkout https://chromium.googlesource.com/chromiumos/third_party/adhd $THIRD_PARTY_ROOT/adhd
-pushd $THIRD_PARTY_ROOT/adhd
-git checkout $ADHD_COMMIT
-popd
-
-# Pull vHost (dataplane for virtio backend drivers)
-VHOST_COMMIT=3091854e27242d09453004b011f701fa29c0b8e8
-git clone --single-branch --no-checkout https://chromium.googlesource.com/chromiumos/third_party/rust-vmm/vhost $THIRD_PARTY_ROOT/rust-vmm/vhost
-pushd $THIRD_PARTY_ROOT/rust-vmm/vhost
-git checkout $VHOST_COMMIT
-popd
+SCRIPT_DIR="$(pwd)"
-CROSVM_VERSION=e42a43d880b0364b55559dbeade3af174f929001
-git clone --single-branch --no-checkout https://chromium.googlesource.com/chromiumos/platform/crosvm /platform/crosvm
+CROSVM_VERSION=c7cd0e0114c8363b884ba56d8e12adee718dcc93
+git clone --single-branch -b main --no-checkout https://chromium.googlesource.com/chromiumos/platform/crosvm /platform/crosvm
pushd /platform/crosvm
git checkout "$CROSVM_VERSION"
+git submodule update --init
+# Apply all crosvm patches for Mesa CI
+cat "$SCRIPT_DIR"/.gitlab-ci/container/build-crosvm_*.patch |
+ patch -p1
+
+VIRGLRENDERER_VERSION=0564c9a0c2f584e004a7d4864aee3b8ec9692105
+rm -rf third_party/virglrenderer
+git clone --single-branch -b master --no-checkout https://gitlab.freedesktop.org/virgl/virglrenderer.git third_party/virglrenderer
+pushd third_party/virglrenderer
+git checkout "$VIRGLRENDERER_VERSION"
+meson build/ $EXTRA_MESON_ARGS
+ninja -C build install
+popd
RUSTFLAGS='-L native=/usr/local/lib' cargo install \
bindgen \
@@ -60,4 +38,4 @@ RUSTFLAGS='-L native=/usr/local/lib' cargo install \
popd
-rm -rf $PLATFORM2_ROOT $AOSP_EXTERNAL_ROOT/minijail $THIRD_PARTY_ROOT/adhd $THIRD_PARTY_ROOT/rust-vmm /platform/crosvm
+rm -rf /platform/crosvm
diff --git a/lib/mesa/.gitlab-ci/container/build-crosvm_no-syslog.patch b/lib/mesa/.gitlab-ci/container/build-crosvm_no-syslog.patch
new file mode 100644
index 000000000..804e90bb7
--- /dev/null
+++ b/lib/mesa/.gitlab-ci/container/build-crosvm_no-syslog.patch
@@ -0,0 +1,43 @@
+From 3c57ec558bccc67fd53363c23deea20646be5c47 Mon Sep 17 00:00:00 2001
+From: Tomeu Vizoso <tomeu.vizoso@collabora.com>
+Date: Wed, 17 Nov 2021 10:18:04 +0100
+Subject: [PATCH] Hack syslog out
+
+It's causing stability problems when running several Crosvm instances in
+parallel.
+
+Signed-off-by: Tomeu Vizoso <tomeu.vizoso@collabora.com>
+---
+ base/src/unix/linux/syslog.rs | 2 +-
+ common/sys_util/src/linux/syslog.rs | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/base/src/unix/linux/syslog.rs b/base/src/unix/linux/syslog.rs
+index 05972a3a..f0db3781 100644
+--- a/base/src/unix/linux/syslog.rs
++++ b/base/src/unix/linux/syslog.rs
+@@ -35,7 +35,7 @@ pub struct PlatformSyslog {
+ impl Syslog for PlatformSyslog {
+ fn new() -> Result<Self, Error> {
+ Ok(Self {
+- socket: Some(openlog_and_get_socket()?),
++ socket: None,
+ })
+ }
+
+diff --git a/common/sys_util/src/linux/syslog.rs b/common/sys_util/src/linux/syslog.rs
+index 05972a3a..f0db3781 100644
+--- a/common/sys_util/src/linux/syslog.rs
++++ b/common/sys_util/src/linux/syslog.rs
+@@ -35,7 +35,7 @@ pub struct PlatformSyslog {
+ impl Syslog for PlatformSyslog {
+ fn new() -> Result<Self, Error> {
+ Ok(Self {
+- socket: Some(openlog_and_get_socket()?),
++ socket: None,
+ })
+ }
+
+--
+2.25.1
+
diff --git a/lib/mesa/.gitlab-ci/container/build-deqp-runner.sh b/lib/mesa/.gitlab-ci/container/build-deqp-runner.sh
index ae989be82..9ae0d9932 100644
--- a/lib/mesa/.gitlab-ci/container/build-deqp-runner.sh
+++ b/lib/mesa/.gitlab-ci/container/build-deqp-runner.sh
@@ -1,9 +1,24 @@
-#!/bin/bash
+#!/bin/sh
set -ex
-cargo install --locked deqp-runner \
- -j ${FDO_CI_CONCURRENT:-4} \
- --version 0.10.0 \
- --root /usr/local \
- $EXTRA_CARGO_ARGS
+if [ -n "${DEQP_RUNNER_GIT_TAG}${DEQP_RUNNER_GIT_REV}" ]; then
+ # Build and install from source
+ DEQP_RUNNER_CARGO_ARGS="--git ${DEQP_RUNNER_GIT_URL:-https://gitlab.freedesktop.org/anholt/deqp-runner.git}"
+
+ if [ -n "${DEQP_RUNNER_GIT_TAG}" ]; then
+ DEQP_RUNNER_CARGO_ARGS="--tag ${DEQP_RUNNER_GIT_TAG} ${DEQP_RUNNER_CARGO_ARGS}"
+ else
+ DEQP_RUNNER_CARGO_ARGS="--rev ${DEQP_RUNNER_GIT_REV} ${DEQP_RUNNER_CARGO_ARGS}"
+ fi
+
+ DEQP_RUNNER_CARGO_ARGS="${DEQP_RUNNER_CARGO_ARGS} ${EXTRA_CARGO_ARGS}"
+else
+ # Install from package registry
+ DEQP_RUNNER_CARGO_ARGS="--version 0.13.1 ${EXTRA_CARGO_ARGS} -- deqp-runner"
+fi
+
+cargo install --locked \
+ -j ${FDO_CI_CONCURRENT:-4} \
+ --root /usr/local \
+ ${DEQP_RUNNER_CARGO_ARGS}
diff --git a/lib/mesa/.gitlab-ci/container/build-deqp.sh b/lib/mesa/.gitlab-ci/container/build-deqp.sh
index 616d504a3..f239fef2a 100644
--- a/lib/mesa/.gitlab-ci/container/build-deqp.sh
+++ b/lib/mesa/.gitlab-ci/container/build-deqp.sh
@@ -6,11 +6,15 @@ git config --global user.email "mesa@example.com"
git config --global user.name "Mesa CI"
git clone \
https://github.com/KhronosGroup/VK-GL-CTS.git \
- -b vulkan-cts-1.2.7.1 \
+ -b vulkan-cts-1.3.1.1 \
--depth 1 \
/VK-GL-CTS
pushd /VK-GL-CTS
+# Cherry-pick fix for zlib dependency
+git fetch origin main
+git cherry-pick -x ec1804831b654ac55bd2a7a5dd27a556afe05030
+
# --insecure is due to SSL cert failures hitting sourceforge for zlib and
# libpng (sigh). The archives get their checksums checked anyway, and git
# always goes through ssh or https.
@@ -68,7 +72,11 @@ cp /deqp/executor/testlog-to-* /deqp/executor.save
rm -rf /deqp/executor
mv /deqp/executor.save /deqp/executor
+# Remove other mustpass files, since we saved off the ones we wanted to conventient locations above.
rm -rf /deqp/external/openglcts/modules/gl_cts/data/mustpass
+rm -rf /deqp/external/vulkancts/modules/vulkan/vk-master*
+rm -rf /deqp/external/vulkancts/modules/vulkan/vk-default
+
rm -rf /deqp/external/openglcts/modules/cts-runner
rm -rf /deqp/modules/internal
rm -rf /deqp/execserver
diff --git a/lib/mesa/.gitlab-ci/container/build-fossilize.sh b/lib/mesa/.gitlab-ci/container/build-fossilize.sh
index 2195c4bf5..a8abbb938 100644
--- a/lib/mesa/.gitlab-ci/container/build-fossilize.sh
+++ b/lib/mesa/.gitlab-ci/container/build-fossilize.sh
@@ -4,7 +4,7 @@ set -ex
git clone https://github.com/ValveSoftware/Fossilize.git
cd Fossilize
-git checkout 72088685d90bc814d14aad5505354ffa8a642789
+git checkout 16fba1b8b5d9310126bb02323d7bae3227338461
git submodule update --init
mkdir build
cd build
diff --git a/lib/mesa/.gitlab-ci/container/build-kernel.sh b/lib/mesa/.gitlab-ci/container/build-kernel.sh
index 7367924c7..426ecb780 100644
--- a/lib/mesa/.gitlab-ci/container/build-kernel.sh
+++ b/lib/mesa/.gitlab-ci/container/build-kernel.sh
@@ -28,7 +28,7 @@ if [[ -n ${DEVICE_TREES} ]]; then
cp ${DEVICE_TREES} /lava-files/.
fi
-if [[ ${DEBIAN_ARCH} = "amd64" ]]; then
+if [[ ${DEBIAN_ARCH} = "amd64" || ${DEBIAN_ARCH} = "arm64" ]]; then
make modules
INSTALL_MOD_PATH=/lava-files/rootfs-${DEBIAN_ARCH}/ make modules_install
fi
diff --git a/lib/mesa/.gitlab-ci/container/build-libdrm.sh b/lib/mesa/.gitlab-ci/container/build-libdrm.sh
index e765f9322..65bd7768a 100644
--- a/lib/mesa/.gitlab-ci/container/build-libdrm.sh
+++ b/lib/mesa/.gitlab-ci/container/build-libdrm.sh
@@ -2,7 +2,7 @@
set -ex
-export LIBDRM_VERSION=libdrm-2.4.107
+export LIBDRM_VERSION=libdrm-2.4.110
wget https://dri.freedesktop.org/libdrm/$LIBDRM_VERSION.tar.xz
tar -xvf $LIBDRM_VERSION.tar.xz && rm $LIBDRM_VERSION.tar.xz
diff --git a/lib/mesa/.gitlab-ci/container/build-piglit.sh b/lib/mesa/.gitlab-ci/container/build-piglit.sh
index 5bc07ed04..b19d7c067 100644
--- a/lib/mesa/.gitlab-ci/container/build-piglit.sh
+++ b/lib/mesa/.gitlab-ci/container/build-piglit.sh
@@ -4,7 +4,7 @@ set -ex
git clone https://gitlab.freedesktop.org/mesa/piglit.git --single-branch --no-checkout /piglit
pushd /piglit
-git checkout 7d7dd2688c214e1b3c00f37226500cbec4a58efb
+git checkout 445711587d461539a4d8f9d35a7fe996a86d3c8d
patch -p1 <$OLDPWD/.gitlab-ci/piglit/disable-vs_in.diff
cmake -S . -B . -G Ninja -DCMAKE_BUILD_TYPE=Release $PIGLIT_OPTS $EXTRA_CMAKE_ARGS
ninja $PIGLIT_BUILD_TARGETS
diff --git a/lib/mesa/.gitlab-ci/container/build-skqp.sh b/lib/mesa/.gitlab-ci/container/build-skqp.sh
new file mode 100755
index 000000000..af95eba32
--- /dev/null
+++ b/lib/mesa/.gitlab-ci/container/build-skqp.sh
@@ -0,0 +1,97 @@
+#!/bin/bash
+#
+# Copyright (C) 2022 Collabora Limited
+# Author: Guilherme Gallo <guilherme.gallo@collabora.com>
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+
+create_gn_args() {
+ # gn can be configured to cross-compile skia and its tools
+ # It is important to set the target_cpu to guarantee the intended target
+ # machine
+ cp "${BASE_ARGS_GN_FILE}" "${SKQP_OUT_DIR}"/args.gn
+ echo "target_cpu = \"${SKQP_ARCH}\"" >> "${SKQP_OUT_DIR}"/args.gn
+}
+
+
+download_skia_source() {
+ if [ -z ${SKIA_DIR+x} ]
+ then
+ return 1
+ fi
+
+ # Skia cloned from https://android.googlesource.com/platform/external/skqp
+ # has all needed assets tracked on git-fs
+ SKQP_REPO=https://android.googlesource.com/platform/external/skqp
+ SKQP_BRANCH=android-cts-10.0_r11
+
+ git clone --branch "${SKQP_BRANCH}" --depth 1 "${SKQP_REPO}" "${SKIA_DIR}"
+}
+
+set -ex
+
+SCRIPT_DIR=$(realpath "$(dirname "$0")")
+SKQP_PATCH_DIR="${SCRIPT_DIR}"
+BASE_ARGS_GN_FILE="${SCRIPT_DIR}/build-skqp_base.gn"
+
+SKQP_ARCH=${SKQP_ARCH:-x64}
+SKIA_DIR=${SKIA_DIR:-$(mktemp -d)}
+SKQP_OUT_DIR=${SKIA_DIR}/out/${SKQP_ARCH}
+SKQP_INSTALL_DIR=/skqp
+SKQP_ASSETS_DIR="${SKQP_INSTALL_DIR}/assets"
+SKQP_BINARIES=(skqp)
+
+download_skia_source
+
+pushd "${SKIA_DIR}"
+
+# Apply all skqp patches for Mesa CI
+cat "${SKQP_PATCH_DIR}"/build-skqp_*.patch |
+ patch -p1
+
+# Fetch some needed build tools needed to build skia/skqp.
+# Basically, it clones repositories with commits SHAs from ${SKIA_DIR}/DEPS
+# directory.
+python tools/git-sync-deps
+
+mkdir -p "${SKQP_OUT_DIR}"
+mkdir -p "${SKQP_INSTALL_DIR}"
+
+create_gn_args
+
+# Build and install skqp binaries
+bin/gn gen "${SKQP_OUT_DIR}"
+
+for BINARY in "${SKQP_BINARIES[@]}"
+do
+ /usr/bin/ninja -C "${SKQP_OUT_DIR}" "${BINARY}"
+ # Strip binary, since gn is not stripping it even when `is_debug == false`
+ ${STRIP_CMD:-strip} "${SKQP_OUT_DIR}/${BINARY}"
+ install -m 0755 "${SKQP_OUT_DIR}/${BINARY}" "${SKQP_INSTALL_DIR}"
+done
+
+# Move assets to the target directory, which will reside in rootfs.
+mv platform_tools/android/apps/skqp/src/main/assets/ "${SKQP_ASSETS_DIR}"
+
+popd
+rm -Rf "${SKIA_DIR}"
+
+set +ex
diff --git a/lib/mesa/.gitlab-ci/container/build-skqp_BUILD.gn.patch b/lib/mesa/.gitlab-ci/container/build-skqp_BUILD.gn.patch
new file mode 100644
index 000000000..a1e82af6b
--- /dev/null
+++ b/lib/mesa/.gitlab-ci/container/build-skqp_BUILD.gn.patch
@@ -0,0 +1,13 @@
+diff --git a/BUILD.gn b/BUILD.gn
+index d2b1407..7b60c90 100644
+--- a/BUILD.gn
++++ b/BUILD.gn
+@@ -144,7 +144,7 @@ config("skia_public") {
+
+ # Skia internal APIs, used by Skia itself and a few test tools.
+ config("skia_private") {
+- visibility = [ ":*" ]
++ visibility = [ "*" ]
+
+ include_dirs = [
+ "include/private",
diff --git a/lib/mesa/.gitlab-ci/container/build-skqp_base.gn b/lib/mesa/.gitlab-ci/container/build-skqp_base.gn
new file mode 100644
index 000000000..3df11647a
--- /dev/null
+++ b/lib/mesa/.gitlab-ci/container/build-skqp_base.gn
@@ -0,0 +1,47 @@
+cc = "clang"
+cxx = "clang++"
+
+extra_cflags = [ "-DSK_ENABLE_DUMP_GPU", "-DSK_BUILD_FOR_SKQP" ]
+extra_cflags_cc = [
+ "-Wno-error",
+
+ # skqp build process produces a lot of compilation warnings, silencing
+ # most of them to remove clutter and avoid the CI job log to exceed the
+ # maximum size
+
+ # GCC flags
+ "-Wno-redundant-move",
+ "-Wno-suggest-override",
+ "-Wno-class-memaccess",
+ "-Wno-deprecated-copy",
+ "-Wno-uninitialized",
+
+ # Clang flags
+ "-Wno-macro-redefined",
+ "-Wno-anon-enum-enum-conversion",
+ "-Wno-suggest-destructor-override",
+ "-Wno-return-std-move-in-c++11",
+ "-Wno-extra-semi-stmt",
+ ]
+
+cc_wrapper = "ccache"
+
+is_debug = false
+
+skia_enable_fontmgr_android = false
+skia_enable_fontmgr_empty = true
+skia_enable_pdf = false
+skia_enable_skottie = false
+
+skia_skqp_global_error_tolerance = 8
+skia_tools_require_resources = true
+
+skia_use_dng_sdk = false
+skia_use_expat = true
+skia_use_icu = false
+skia_use_libheif = false
+skia_use_lua = false
+skia_use_piex = false
+skia_use_vulkan = true
+
+target_os = "linux"
diff --git a/lib/mesa/.gitlab-ci/container/build-skqp_fetch_gn.patch b/lib/mesa/.gitlab-ci/container/build-skqp_fetch_gn.patch
new file mode 100644
index 000000000..545cf2af7
--- /dev/null
+++ b/lib/mesa/.gitlab-ci/container/build-skqp_fetch_gn.patch
@@ -0,0 +1,68 @@
+diff --git a/bin/fetch-gn b/bin/fetch-gn
+index d5e94a2..59c4591 100755
+--- a/bin/fetch-gn
++++ b/bin/fetch-gn
+@@ -5,39 +5,44 @@
+ # Use of this source code is governed by a BSD-style license that can be
+ # found in the LICENSE file.
+
+-import hashlib
+ import os
++import platform
+ import shutil
+ import stat
+ import sys
+-import urllib2
++import tempfile
++import zipfile
++
++if sys.version_info[0] < 3:
++ from urllib2 import urlopen
++else:
++ from urllib.request import urlopen
+
+ os.chdir(os.path.join(os.path.dirname(__file__), os.pardir))
+
+-dst = 'bin/gn.exe' if 'win32' in sys.platform else 'bin/gn'
++gnzip = os.path.join(tempfile.mkdtemp(), 'gn.zip')
++with open(gnzip, 'wb') as f:
++ OS = {'darwin': 'mac', 'linux': 'linux', 'linux2': 'linux', 'win32': 'windows'}[sys.platform]
++ cpu = {'amd64': 'amd64', 'arm64': 'arm64', 'x86_64': 'amd64', 'aarch64': 'arm64'}[platform.machine().lower()]
+
+-sha1 = '2f27ff0b6118e5886df976da5effa6003d19d1ce' if 'linux' in sys.platform else \
+- '9be792dd9010ce303a9c3a497a67bcc5ac8c7666' if 'darwin' in sys.platform else \
+- 'eb69be2d984b4df60a8c21f598135991f0ad1742' # Windows
++ rev = 'd62642c920e6a0d1756316d225a90fd6faa9e21e'
++ url = 'https://chrome-infra-packages.appspot.com/dl/gn/gn/{}-{}/+/git_revision:{}'.format(
++ OS,cpu,rev)
++ f.write(urlopen(url).read())
+
+-def sha1_of_file(path):
+- h = hashlib.sha1()
+- if os.path.isfile(path):
+- with open(path, 'rb') as f:
+- h.update(f.read())
+- return h.hexdigest()
++gn = 'gn.exe' if 'win32' in sys.platform else 'gn'
++with zipfile.ZipFile(gnzip, 'r') as f:
++ f.extract(gn, 'bin')
+
+-if sha1_of_file(dst) != sha1:
+- with open(dst, 'wb') as f:
+- f.write(urllib2.urlopen('https://chromium-gn.storage-download.googleapis.com/' + sha1).read())
++gn = os.path.join('bin', gn)
+
+- os.chmod(dst, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR |
+- stat.S_IRGRP | stat.S_IXGRP |
+- stat.S_IROTH | stat.S_IXOTH )
++os.chmod(gn, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR |
++ stat.S_IRGRP | stat.S_IXGRP |
++ stat.S_IROTH | stat.S_IXOTH )
+
+ # We'll also copy to a path that depot_tools' GN wrapper will expect to find the binary.
+ copy_path = 'buildtools/linux64/gn' if 'linux' in sys.platform else \
+ 'buildtools/mac/gn' if 'darwin' in sys.platform else \
+ 'buildtools/win/gn.exe'
+ if os.path.isdir(os.path.dirname(copy_path)):
+- shutil.copy(dst, copy_path)
++ shutil.copy(gn, copy_path)
diff --git a/lib/mesa/.gitlab-ci/container/build-skqp_git-sync-deps.patch b/lib/mesa/.gitlab-ci/container/build-skqp_git-sync-deps.patch
new file mode 100644
index 000000000..d088349ad
--- /dev/null
+++ b/lib/mesa/.gitlab-ci/container/build-skqp_git-sync-deps.patch
@@ -0,0 +1,142 @@
+Patch based from diff with skia repository from commit
+013397884c73959dc07cb0a26ee742b1cdfbda8a
+
+Adds support for Python3, but removes the constraint of only SHA based refs in
+DEPS
+diff --git a/tools/git-sync-deps b/tools/git-sync-deps
+index c7379c0b5c..f63d4d9ccf 100755
+--- a/tools/git-sync-deps
++++ b/tools/git-sync-deps
+@@ -43,7 +43,7 @@ def git_executable():
+ A string suitable for passing to subprocess functions, or None.
+ """
+ envgit = os.environ.get('GIT_EXECUTABLE')
+- searchlist = ['git']
++ searchlist = ['git', 'git.bat']
+ if envgit:
+ searchlist.insert(0, envgit)
+ with open(os.devnull, 'w') as devnull:
+@@ -94,21 +94,25 @@ def is_git_toplevel(git, directory):
+ try:
+ toplevel = subprocess.check_output(
+ [git, 'rev-parse', '--show-toplevel'], cwd=directory).strip()
+- return os.path.realpath(directory) == os.path.realpath(toplevel)
++ return os.path.realpath(directory) == os.path.realpath(toplevel.decode())
+ except subprocess.CalledProcessError:
+ return False
+
+
+-def status(directory, checkoutable):
+- def truncate(s, length):
++def status(directory, commithash, change):
++ def truncate_beginning(s, length):
++ return s if len(s) <= length else '...' + s[-(length-3):]
++ def truncate_end(s, length):
+ return s if len(s) <= length else s[:(length - 3)] + '...'
++
+ dlen = 36
+- directory = truncate(directory, dlen)
+- checkoutable = truncate(checkoutable, 40)
+- sys.stdout.write('%-*s @ %s\n' % (dlen, directory, checkoutable))
++ directory = truncate_beginning(directory, dlen)
++ commithash = truncate_end(commithash, 40)
++ symbol = '>' if change else '@'
++ sys.stdout.write('%-*s %s %s\n' % (dlen, directory, symbol, commithash))
+
+
+-def git_checkout_to_directory(git, repo, checkoutable, directory, verbose):
++def git_checkout_to_directory(git, repo, commithash, directory, verbose):
+ """Checkout (and clone if needed) a Git repository.
+
+ Args:
+@@ -117,8 +121,7 @@ def git_checkout_to_directory(git, repo, checkoutable, directory, verbose):
+ repo (string) the location of the repository, suitable
+ for passing to `git clone`.
+
+- checkoutable (string) a tag, branch, or commit, suitable for
+- passing to `git checkout`
++ commithash (string) a commit, suitable for passing to `git checkout`
+
+ directory (string) the path into which the repository
+ should be checked out.
+@@ -129,7 +132,12 @@ def git_checkout_to_directory(git, repo, checkoutable, directory, verbose):
+ """
+ if not os.path.isdir(directory):
+ subprocess.check_call(
+- [git, 'clone', '--quiet', repo, directory])
++ [git, 'clone', '--quiet', '--no-checkout', repo, directory])
++ subprocess.check_call([git, 'checkout', '--quiet', commithash],
++ cwd=directory)
++ if verbose:
++ status(directory, commithash, True)
++ return
+
+ if not is_git_toplevel(git, directory):
+ # if the directory exists, but isn't a git repo, you will modify
+@@ -145,11 +153,11 @@ def git_checkout_to_directory(git, repo, checkoutable, directory, verbose):
+ with open(os.devnull, 'w') as devnull:
+ # If this fails, we will fetch before trying again. Don't spam user
+ # with error infomation.
+- if 0 == subprocess.call([git, 'checkout', '--quiet', checkoutable],
++ if 0 == subprocess.call([git, 'checkout', '--quiet', commithash],
+ cwd=directory, stderr=devnull):
+ # if this succeeds, skip slow `git fetch`.
+ if verbose:
+- status(directory, checkoutable) # Success.
++ status(directory, commithash, False) # Success.
+ return
+
+ # If the repo has changed, always force use of the correct repo.
+@@ -159,18 +167,24 @@ def git_checkout_to_directory(git, repo, checkoutable, directory, verbose):
+
+ subprocess.check_call([git, 'fetch', '--quiet'], cwd=directory)
+
+- subprocess.check_call([git, 'checkout', '--quiet', checkoutable], cwd=directory)
++ subprocess.check_call([git, 'checkout', '--quiet', commithash], cwd=directory)
+
+ if verbose:
+- status(directory, checkoutable) # Success.
++ status(directory, commithash, True) # Success.
+
+
+ def parse_file_to_dict(path):
+ dictionary = {}
+- execfile(path, dictionary)
++ with open(path) as f:
++ exec('def Var(x): return vars[x]\n' + f.read(), dictionary)
+ return dictionary
+
+
++def is_sha1_sum(s):
++ """SHA1 sums are 160 bits, encoded as lowercase hexadecimal."""
++ return len(s) == 40 and all(c in '0123456789abcdef' for c in s)
++
++
+ def git_sync_deps(deps_file_path, command_line_os_requests, verbose):
+ """Grab dependencies, with optional platform support.
+
+@@ -204,19 +218,19 @@ def git_sync_deps(deps_file_path, command_line_os_requests, verbose):
+ raise Exception('%r is parent of %r' % (other_dir, directory))
+ list_of_arg_lists = []
+ for directory in sorted(dependencies):
+- if not isinstance(dependencies[directory], basestring):
++ if not isinstance(dependencies[directory], str):
+ if verbose:
+- print 'Skipping "%s".' % directory
++ sys.stdout.write( 'Skipping "%s".\n' % directory)
+ continue
+ if '@' in dependencies[directory]:
+- repo, checkoutable = dependencies[directory].split('@', 1)
++ repo, commithash = dependencies[directory].split('@', 1)
+ else:
+- raise Exception("please specify commit or tag")
++ raise Exception("please specify commit")
+
+ relative_directory = os.path.join(deps_file_directory, directory)
+
+ list_of_arg_lists.append(
+- (git, repo, checkoutable, relative_directory, verbose))
++ (git, repo, commithash, relative_directory, verbose))
+
+ multithread(git_checkout_to_directory, list_of_arg_lists)
+
diff --git a/lib/mesa/.gitlab-ci/container/build-skqp_is_clang.py.patch b/lib/mesa/.gitlab-ci/container/build-skqp_is_clang.py.patch
new file mode 100644
index 000000000..af6f6cff3
--- /dev/null
+++ b/lib/mesa/.gitlab-ci/container/build-skqp_is_clang.py.patch
@@ -0,0 +1,13 @@
+diff --git a/gn/BUILDCONFIG.gn b/gn/BUILDCONFIG.gn
+index 454334a..1797594 100644
+--- a/gn/BUILDCONFIG.gn
++++ b/gn/BUILDCONFIG.gn
+@@ -80,7 +80,7 @@ if (current_cpu == "") {
+ is_clang = is_android || is_ios || is_mac ||
+ (cc == "clang" && cxx == "clang++") || clang_win != ""
+ if (!is_clang && !is_win) {
+- is_clang = exec_script("gn/is_clang.py",
++ is_clang = exec_script("//gn/is_clang.py",
+ [
+ cc,
+ cxx,
diff --git a/lib/mesa/.gitlab-ci/container/build-vkd3d-proton.sh b/lib/mesa/.gitlab-ci/container/build-vkd3d-proton.sh
index b2898271e..0da94de9d 100644
--- a/lib/mesa/.gitlab-ci/container/build-vkd3d-proton.sh
+++ b/lib/mesa/.gitlab-ci/container/build-vkd3d-proton.sh
@@ -2,8 +2,8 @@
set -ex
-VKD3D_PROTON_VERSION="2.3.1"
-VKD3D_PROTON_COMMIT="3ed3526332f53d7d35cf1b685fa8096b01f26ff0"
+VKD3D_PROTON_VERSION="2.6"
+VKD3D_PROTON_COMMIT="3e5aab6fb3e18f81a71b339be4cb5cdf55140980"
VKD3D_PROTON_DST_DIR="/vkd3d-proton-tests"
VKD3D_PROTON_SRC_DIR="/vkd3d-proton-src"
diff --git a/lib/mesa/.gitlab-ci/container/build-wayland.sh b/lib/mesa/.gitlab-ci/container/build-wayland.sh
new file mode 100644
index 000000000..893ee058f
--- /dev/null
+++ b/lib/mesa/.gitlab-ci/container/build-wayland.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+set -ex
+
+export LIBWAYLAND_VERSION="1.18.0"
+export WAYLAND_PROTOCOLS_VERSION="1.24"
+
+git clone https://gitlab.freedesktop.org/wayland/wayland
+cd wayland
+git checkout "$LIBWAYLAND_VERSION"
+meson -Ddocumentation=false -Ddtd_validation=false -Dlibraries=true _build
+ninja -C _build install
+cd ..
+rm -rf wayland
+
+git clone https://gitlab.freedesktop.org/wayland/wayland-protocols
+cd wayland-protocols
+git checkout "$WAYLAND_PROTOCOLS_VERSION"
+meson _build
+ninja -C _build install
+cd ..
+rm -rf wayland-protocols
diff --git a/lib/mesa/.gitlab-ci/container/container_pre_build.sh b/lib/mesa/.gitlab-ci/container/container_pre_build.sh
index dc36970c8..7bfa5b0c6 100755
--- a/lib/mesa/.gitlab-ci/container/container_pre_build.sh
+++ b/lib/mesa/.gitlab-ci/container/container_pre_build.sh
@@ -10,7 +10,7 @@ fi
export CCACHE_COMPILERCHECK=content
export CCACHE_COMPRESS=true
-export CCACHE_DIR=/cache/mesa/ccache
+export CCACHE_DIR=/cache/$CI_PROJECT_NAME/ccache
export PATH=$CCACHE_PATH:$PATH
# CMake ignores $PATH, so we have to force CC/GCC to the ccache versions.
diff --git a/lib/mesa/.gitlab-ci/container/create-rootfs.sh b/lib/mesa/.gitlab-ci/container/create-rootfs.sh
index f8a888cad..5922745f9 100644
--- a/lib/mesa/.gitlab-ci/container/create-rootfs.sh
+++ b/lib/mesa/.gitlab-ci/container/create-rootfs.sh
@@ -3,13 +3,26 @@
set -ex
if [ $DEBIAN_ARCH = arm64 ]; then
- ARCH_PACKAGES="firmware-qcom-media"
+ ARCH_PACKAGES="firmware-qcom-media
+ firmware-linux-nonfree
+ libfontconfig1
+ libgl1
+ libglu1-mesa
+ libvulkan-dev
+ "
elif [ $DEBIAN_ARCH = amd64 ]; then
ARCH_PACKAGES="firmware-amd-graphics
+ inetutils-syslogd
+ iptables
+ libcap2
libelf1
+ libfdt1
libllvm11
libva2
libva-drm2
+ socat
+ spirv-tools
+ sysvinit-core
"
fi
@@ -23,6 +36,8 @@ INSTALL_CI_FAIRY_PACKAGES="git
apt-get -y install --no-install-recommends \
$ARCH_PACKAGES \
$INSTALL_CI_FAIRY_PACKAGES \
+ $EXTRA_LOCAL_PACKAGES \
+ bash \
ca-certificates \
firmware-realtek \
initramfs-tools \
@@ -66,12 +81,11 @@ apt-get -y install --no-install-recommends \
waffle-utils \
wget \
xinit \
- xserver-xorg-core \
- xz-utils
+ xserver-xorg-core
# Needed for ci-fairy, this revision is able to upload files to
# MinIO and doesn't depend on git
-pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@0f1abc24c043e63894085a6bd12f14263e8b29eb
+pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@34f4ade99434043f88e164933f570301fd18b125
apt-get purge -y \
$INSTALL_CI_FAIRY_PACKAGES
@@ -90,10 +104,6 @@ chmod +x /init
# Strip the image to a small minimal system without removing the debian
# toolchain.
-# xz compress firmware so it doesn't waste RAM at runtime on ramdisk systems
-find /lib/firmware -type f -print0 | \
- xargs -0r -P4 -n4 xz -T1 -C crc32
-
# Copy timezone file and remove tzdata package
rm -rf /etc/localtime
cp /usr/share/zoneinfo/Etc/UTC /etc/localtime
@@ -146,6 +156,8 @@ rm -rf usr/sbin/update-usbids
rm -rf var/lib/usbutils/usb.ids
rm -rf usr/share/misc/usb.ids
+rm -rf /root/.pip
+
#######################################################################
# Crush into a minimal production image to be deployed via some type of image
# updating system.
@@ -160,9 +172,7 @@ UNNEEDED_PACKAGES="apt libapt-pkg6.0 "\
"insserv "\
"udev "\
"init-system-helpers "\
-"bash "\
"cpio "\
-"xz-utils "\
"passwd "\
"libsemanage1 libsemanage-common "\
"libsepol1 "\
@@ -207,7 +217,7 @@ rm -rf var/* opt srv share
# ca-certificates are in /etc drop the source
rm -rf usr/share/ca-certificates
-# No bash, no need for completions
+# No need for completions
rm -rf usr/share/bash-completion
# No zsh, no need for comletions
diff --git a/lib/mesa/.gitlab-ci/container/debian/android_build.sh b/lib/mesa/.gitlab-ci/container/debian/android_build.sh
index 88c05af4e..121976f1b 100644
--- a/lib/mesa/.gitlab-ci/container/debian/android_build.sh
+++ b/lib/mesa/.gitlab-ci/container/debian/android_build.sh
@@ -3,6 +3,7 @@
set -ex
EPHEMERAL="\
+ autoconf \
rdfind \
unzip \
"
@@ -29,7 +30,7 @@ sh .gitlab-ci/container/create-android-cross-file.sh /$ndk arm-linux-androideabi
# Not using build-libdrm.sh because we don't want its cleanup after building
# each arch. Fetch and extract now.
-export LIBDRM_VERSION=libdrm-2.4.102
+export LIBDRM_VERSION=libdrm-2.4.110
wget https://dri.freedesktop.org/libdrm/$LIBDRM_VERSION.tar.xz
tar -xf $LIBDRM_VERSION.tar.xz && rm $LIBDRM_VERSION.tar.xz
@@ -50,11 +51,56 @@ for arch in \
-Detnaviv=false \
-Dfreedreno=false \
-Dintel=false \
- -Dcairo-tests=false
+ -Dcairo-tests=false \
+ -Dvalgrind=false
ninja -C build-$arch install
cd ..
done
rm -rf $LIBDRM_VERSION
+export LIBELF_VERSION=libelf-0.8.13
+wget https://fossies.org/linux/misc/old/$LIBELF_VERSION.tar.gz
+
+# Not 100% sure who runs the mirror above so be extra careful
+if ! echo "4136d7b4c04df68b686570afa26988ac ${LIBELF_VERSION}.tar.gz" | md5sum -c -; then
+ echo "Checksum failed"
+ exit 1
+fi
+
+tar -xf ${LIBELF_VERSION}.tar.gz
+cd $LIBELF_VERSION
+
+# Work around a bug in the original configure not enabling __LIBELF64.
+autoreconf
+
+for arch in \
+ x86_64-linux-android \
+ i686-linux-android \
+ aarch64-linux-android \
+ arm-linux-androideabi ; do
+
+ ccarch=${arch}
+ if [ "${arch}" == 'arm-linux-androideabi' ]
+ then
+ ccarch=armv7a-linux-androideabi
+ fi
+
+ export CC=/android-ndk-r21d/toolchains/llvm/prebuilt/linux-x86_64/bin/${arch}-ar
+ export CC=/android-ndk-r21d/toolchains/llvm/prebuilt/linux-x86_64/bin/${ccarch}29-clang
+ export CXX=/android-ndk-r21d/toolchains/llvm/prebuilt/linux-x86_64/bin/${ccarch}29-clang++
+ export LD=/android-ndk-r21d/toolchains/llvm/prebuilt/linux-x86_64/bin/${arch}-ld
+ export RANLIB=/android-ndk-r21d/toolchains/llvm/prebuilt/linux-x86_64/bin/${arch}-ranlib
+
+ # The configure script doesn't know about android, but doesn't really use the host anyway it
+ # seems
+ ./configure --host=x86_64-linux-gnu --disable-nls --disable-shared \
+ --libdir=/usr/local/lib/${arch}
+ make install
+ make distclean
+done
+
+cd ..
+rm -rf $LIBELF_VERSION
+
apt-get purge -y $EPHEMERAL
diff --git a/lib/mesa/.gitlab-ci/container/debian/arm_build.sh b/lib/mesa/.gitlab-ci/container/debian/arm_build.sh
index c16e9474d..0f9883721 100644
--- a/lib/mesa/.gitlab-ci/container/debian/arm_build.sh
+++ b/lib/mesa/.gitlab-ci/container/debian/arm_build.sh
@@ -9,6 +9,7 @@ echo 'deb https://deb.debian.org/debian buster main' >/etc/apt/sources.list.d/bu
apt-get update
apt-get -y install \
+ ${EXTRA_LOCAL_PACKAGES} \
abootimg \
autoconf \
automake \
@@ -57,7 +58,7 @@ apt-get -y install \
apt-get install -y --no-remove -t buster \
android-sdk-ext4-utils
-pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@6f5af7e5574509726c79109e3c147cee95e81366
+pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@34f4ade99434043f88e164933f570301fd18b125
arch=armhf
. .gitlab-ci/container/cross_build.sh
diff --git a/lib/mesa/.gitlab-ci/container/debian/arm_test.sh b/lib/mesa/.gitlab-ci/container/debian/arm_test.sh
index eedb88e91..a420dbb47 100644
--- a/lib/mesa/.gitlab-ci/container/debian/arm_test.sh
+++ b/lib/mesa/.gitlab-ci/container/debian/arm_test.sh
@@ -31,3 +31,9 @@ arch=armhf . .gitlab-ci/container/baremetal_build.sh
# This firmware file from Debian bullseye causes hangs
wget https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/plain/qcom/a530_pfp.fw?id=d5f9eea5a251d43412b07f5295d03e97b89ac4a5 \
-O /rootfs-arm64/lib/firmware/qcom/a530_pfp.fw
+
+mkdir -p /baremetal-files/jetson-nano/boot/
+ln -s \
+ /baremetal-files/Image \
+ /baremetal-files/tegra210-p3450-0000.dtb \
+ /baremetal-files/jetson-nano/boot/
diff --git a/lib/mesa/.gitlab-ci/container/debian/x86_build-base.sh b/lib/mesa/.gitlab-ci/container/debian/x86_build-base.sh
index 6333fa70e..85a6a6f00 100644
--- a/lib/mesa/.gitlab-ci/container/debian/x86_build-base.sh
+++ b/lib/mesa/.gitlab-ci/container/debian/x86_build-base.sh
@@ -63,7 +63,6 @@ apt-get install -y --no-remove \
python3-requests \
qemu-user \
valgrind \
- wayland-protocols \
wget \
wine64 \
x11proto-dri2-dev \
@@ -73,7 +72,7 @@ apt-get install -y --no-remove \
zlib1g-dev
# Needed for ci-fairy, this revision is able to upload files to MinIO
-pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@6f5af7e5574509726c79109e3c147cee95e81366
+pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@34f4ade99434043f88e164933f570301fd18b125
############### Uninstall ephemeral packages
diff --git a/lib/mesa/.gitlab-ci/container/debian/x86_build.sh b/lib/mesa/.gitlab-ci/container/debian/x86_build.sh
index 318a0bc49..72e326af1 100644
--- a/lib/mesa/.gitlab-ci/container/debian/x86_build.sh
+++ b/lib/mesa/.gitlab-ci/container/debian/x86_build.sh
@@ -11,8 +11,6 @@ STABLE_EPHEMERAL=" \
automake \
autotools-dev \
bzip2 \
- cmake \
- libgbm-dev \
libtool \
python3-pip \
"
@@ -23,10 +21,13 @@ apt-get update
apt-get install -y --no-remove \
$STABLE_EPHEMERAL \
+ check \
clang \
+ cmake \
libasan6 \
libarchive-dev \
libclang-cpp11-dev \
+ libgbm-dev \
libglvnd-dev \
libllvmspirvlib-dev \
liblua5.3-dev \
@@ -43,6 +44,8 @@ apt-get install -y --no-remove \
llvm-11-dev \
llvm-9-dev \
ocl-icd-opencl-dev \
+ python3-freezegun \
+ python3-pytest \
procps \
spirv-tools \
strace \
@@ -67,10 +70,8 @@ chmod +x /usr/local/bin/x86_64-w64-mingw32-pkg-config
# dependencies where we want a specific version
export XORG_RELEASES=https://xorg.freedesktop.org/releases/individual
-export WAYLAND_RELEASES=https://wayland.freedesktop.org/releases
export XORGMACROS_VERSION=util-macros-1.19.0
-export LIBWAYLAND_VERSION=wayland-1.18.0
wget $XORG_RELEASES/util/$XORGMACROS_VERSION.tar.bz2
tar -xvf $XORGMACROS_VERSION.tar.bz2 && rm $XORGMACROS_VERSION.tar.bz2
@@ -79,11 +80,7 @@ rm -rf $XORGMACROS_VERSION
. .gitlab-ci/container/build-libdrm.sh
-wget $WAYLAND_RELEASES/$LIBWAYLAND_VERSION.tar.xz
-tar -xvf $LIBWAYLAND_VERSION.tar.xz && rm $LIBWAYLAND_VERSION.tar.xz
-cd $LIBWAYLAND_VERSION; ./configure --enable-libraries --without-host-scanner --disable-documentation --disable-dtd-validation; make install; cd ..
-rm -rf $LIBWAYLAND_VERSION
-
+. .gitlab-ci/container/build-wayland.sh
pushd /usr/local
git clone https://gitlab.freedesktop.org/mesa/shader-db.git --depth 1
diff --git a/lib/mesa/.gitlab-ci/container/debian/x86_test-base.sh b/lib/mesa/.gitlab-ci/container/debian/x86_test-base.sh
index 362cf7e9e..7d56e7825 100644
--- a/lib/mesa/.gitlab-ci/container/debian/x86_test-base.sh
+++ b/lib/mesa/.gitlab-ci/container/debian/x86_test-base.sh
@@ -59,7 +59,7 @@ apt-get install -y --no-install-recommends \
# Needed for ci-fairy, this revision is able to upload files to MinIO
# and doesn't depend on git
-pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@0f1abc24c043e63894085a6bd12f14263e8b29eb
+pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@34f4ade99434043f88e164933f570301fd18b125
############### Build dEQP runner
. .gitlab-ci/container/build-deqp-runner.sh
diff --git a/lib/mesa/.gitlab-ci/container/debian/x86_test-gl.sh b/lib/mesa/.gitlab-ci/container/debian/x86_test-gl.sh
index ce9bec8b9..9dc8b395d 100644
--- a/lib/mesa/.gitlab-ci/container/debian/x86_test-gl.sh
+++ b/lib/mesa/.gitlab-ci/container/debian/x86_test-gl.sh
@@ -22,6 +22,7 @@ STABLE_EPHEMERAL=" \
libcap-dev \
libclang-cpp11-dev \
libelf-dev \
+ libexpat1-dev \
libfdt-dev \
libgbm-dev \
libgles2-mesa-dev \
@@ -31,7 +32,6 @@ STABLE_EPHEMERAL=" \
libudev-dev \
libvulkan-dev \
libwaffle-dev \
- libwayland-dev \
libx11-xcb-dev \
libxcb-dri2-0-dev \
libxext-dev \
@@ -45,20 +45,18 @@ STABLE_EPHEMERAL=" \
patch \
pkg-config \
python3-distutils \
- wayland-protocols \
- wget \
xz-utils \
"
apt-get install -y --no-remove \
$STABLE_EPHEMERAL \
clinfo \
- inetutils-syslogd \
iptables \
libclang-common-11-dev \
libclang-cpp11 \
libcap2 \
libegl1 \
+ libepoxy-dev \
libfdt1 \
libllvmspirvlib11 \
libxcb-shm0 \
@@ -66,12 +64,29 @@ apt-get install -y --no-remove \
python3-lxml \
python3-renderdoc \
python3-simplejson \
+ socat \
spirv-tools \
- sysvinit-core
+ sysvinit-core \
+ wget
. .gitlab-ci/container/container_pre_build.sh
+############### Build libdrm
+
+. .gitlab-ci/container/build-libdrm.sh
+
+############### Build Wayland
+
+. .gitlab-ci/container/build-wayland.sh
+
+############### Build Crosvm
+
+. .gitlab-ci/container/build-rust.sh
+. .gitlab-ci/container/build-crosvm.sh
+rm -rf /root/.cargo
+rm -rf /root/.rustup
+
############### Build kernel
export DEFCONFIG="arch/x86/configs/x86_64_defconfig"
@@ -82,28 +97,14 @@ export DEBIAN_ARCH=amd64
mkdir -p /lava-files/
. .gitlab-ci/container/build-kernel.sh
-############### Build libdrm
-
-. .gitlab-ci/container/build-libdrm.sh
-
############### Build libclc
. .gitlab-ci/container/build-libclc.sh
-############### Build virglrenderer
-
-. .gitlab-ci/container/build-virglrenderer.sh
-
############### Build piglit
PIGLIT_OPTS="-DPIGLIT_BUILD_CL_TESTS=ON -DPIGLIT_BUILD_DMA_BUF_TESTS=ON" . .gitlab-ci/container/build-piglit.sh
-############### Build Crosvm
-
-. .gitlab-ci/container/build-rust.sh
-. .gitlab-ci/container/build-crosvm.sh
-rm -rf /root/.cargo
-
############### Build dEQP GL
DEQP_TARGET=surfaceless . .gitlab-ci/container/build-deqp.sh
diff --git a/lib/mesa/.gitlab-ci/container/debian/x86_test-vk.sh b/lib/mesa/.gitlab-ci/container/debian/x86_test-vk.sh
index b10d61162..67389861d 100644
--- a/lib/mesa/.gitlab-ci/container/debian/x86_test-vk.sh
+++ b/lib/mesa/.gitlab-ci/container/debian/x86_test-vk.sh
@@ -13,6 +13,7 @@ STABLE_EPHEMERAL=" \
g++-mingw-w64-i686-posix \
g++-mingw-w64-x86-64-posix \
glslang-tools \
+ libexpat1-dev \
libgbm-dev \
libgles2-mesa-dev \
liblz4-dev \
@@ -20,7 +21,6 @@ STABLE_EPHEMERAL=" \
libudev-dev \
libvulkan-dev \
libwaffle-dev \
- libwayland-dev \
libx11-xcb-dev \
libxcb-ewmh-dev \
libxcb-keysyms1-dev \
@@ -124,6 +124,10 @@ wine \
. .gitlab-ci/container/build-libdrm.sh
+############### Build Wayland
+
+. .gitlab-ci/container/build-wayland.sh
+
############### Build parallel-deqp-runner's hang-detection tool
. .gitlab-ci/container/build-hang-detection.sh
diff --git a/lib/mesa/.gitlab-ci/container/fedora/x86_build.sh b/lib/mesa/.gitlab-ci/container/fedora/x86_build.sh
index 718e49e93..e1841cfec 100644
--- a/lib/mesa/.gitlab-ci/container/fedora/x86_build.sh
+++ b/lib/mesa/.gitlab-ci/container/fedora/x86_build.sh
@@ -27,6 +27,7 @@ dnf install -y --setopt=install_weak_deps=False \
gettext \
kernel-headers \
llvm-devel \
+ clang-devel \
meson \
"pkgconfig(dri2proto)" \
"pkgconfig(expat)" \
@@ -40,9 +41,6 @@ dnf install -y --setopt=install_weak_deps=False \
"pkgconfig(pciaccess)" \
"pkgconfig(vdpau)" \
"pkgconfig(vulkan)" \
- "pkgconfig(wayland-egl-backend)" \
- "pkgconfig(wayland-protocols)" \
- "pkgconfig(wayland-scanner)" \
"pkgconfig(x11)" \
"pkgconfig(x11-xcb)" \
"pkgconfig(xcb)" \
@@ -66,6 +64,8 @@ dnf install -y --setopt=install_weak_deps=False \
python3-devel \
python3-mako \
vulkan-headers \
+ spirv-tools-devel \
+ spirv-llvm-translator-devel \
$EPHEMERAL
@@ -74,10 +74,8 @@ dnf install -y --setopt=install_weak_deps=False \
# dependencies where we want a specific version
export XORG_RELEASES=https://xorg.freedesktop.org/releases/individual
-export WAYLAND_RELEASES=https://wayland.freedesktop.org/releases
export XORGMACROS_VERSION=util-macros-1.19.0
-export LIBWAYLAND_VERSION=wayland-1.18.0
wget $XORG_RELEASES/util/$XORGMACROS_VERSION.tar.bz2
tar -xvf $XORGMACROS_VERSION.tar.bz2 && rm $XORGMACROS_VERSION.tar.bz2
@@ -86,11 +84,7 @@ rm -rf $XORGMACROS_VERSION
. .gitlab-ci/container/build-libdrm.sh
-wget $WAYLAND_RELEASES/$LIBWAYLAND_VERSION.tar.xz
-tar -xvf $LIBWAYLAND_VERSION.tar.xz && rm $LIBWAYLAND_VERSION.tar.xz
-cd $LIBWAYLAND_VERSION; ./configure --enable-libraries --without-host-scanner --disable-documentation --disable-dtd-validation; make install; cd ..
-rm -rf $LIBWAYLAND_VERSION
-
+. .gitlab-ci/container/build-wayland.sh
pushd /usr/local
git clone https://gitlab.freedesktop.org/mesa/shader-db.git --depth 1
diff --git a/lib/mesa/.gitlab-ci/container/gitlab-ci.yml b/lib/mesa/.gitlab-ci/container/gitlab-ci.yml
new file mode 100644
index 000000000..aa4580bb3
--- /dev/null
+++ b/lib/mesa/.gitlab-ci/container/gitlab-ci.yml
@@ -0,0 +1,420 @@
+# Docker image tag helper templates
+
+.incorporate-templates-commit:
+ variables:
+ FDO_DISTRIBUTION_TAG: "${MESA_IMAGE_TAG}--${MESA_TEMPLATES_COMMIT}"
+
+.incorporate-base-tag+templates-commit:
+ variables:
+ FDO_BASE_IMAGE: "${CI_REGISTRY_IMAGE}/${MESA_BASE_IMAGE}:${MESA_BASE_TAG}--${MESA_TEMPLATES_COMMIT}"
+ FDO_DISTRIBUTION_TAG: "${MESA_IMAGE_TAG}--${MESA_BASE_TAG}--${MESA_TEMPLATES_COMMIT}"
+
+.set-image:
+ extends:
+ - .incorporate-templates-commit
+ variables:
+ MESA_IMAGE: "$CI_REGISTRY_IMAGE/${MESA_IMAGE_PATH}:${FDO_DISTRIBUTION_TAG}"
+ image: "$MESA_IMAGE"
+
+.set-image-base-tag:
+ extends:
+ - .set-image
+ - .incorporate-base-tag+templates-commit
+ variables:
+ MESA_IMAGE: "$CI_REGISTRY_IMAGE/${MESA_IMAGE_PATH}:${FDO_DISTRIBUTION_TAG}"
+
+
+# Build the CI docker images.
+#
+# MESA_IMAGE_TAG is the tag of the docker image used by later stage jobs. If the
+# image doesn't exist yet, the container stage job generates it.
+#
+# In order to generate a new image, one should generally change the tag.
+# While removing the image from the registry would also work, that's not
+# recommended except for ephemeral images during development: Replacing
+# an image after a significant amount of time might pull in newer
+# versions of gcc/clang or other packages, which might break the build
+# with older commits using the same tag.
+#
+# After merging a change resulting in generating a new image to the
+# main repository, it's recommended to remove the image from the source
+# repository's container registry, so that the image from the main
+# repository's registry will be used there as well.
+
+.container:
+ stage: container
+ extends:
+ - .container-rules
+ - .incorporate-templates-commit
+ variables:
+ FDO_DISTRIBUTION_VERSION: bullseye-slim
+ FDO_REPO_SUFFIX: $CI_JOB_NAME
+ FDO_DISTRIBUTION_EXEC: 'env FDO_CI_CONCURRENT=${FDO_CI_CONCURRENT} bash .gitlab-ci/container/${CI_JOB_NAME}.sh'
+ # no need to pull the whole repo to build the container image
+ GIT_STRATEGY: none
+
+.use-base-image:
+ extends:
+ - .container
+ - .incorporate-base-tag+templates-commit
+ # Don't want the .container rules
+ - .ci-run-policy
+
+# Debian 11 based x86 build image base
+debian/x86_build-base:
+ extends:
+ - .fdo.container-build@debian
+ - .container
+ variables:
+ MESA_IMAGE_TAG: &debian-x86_build-base ${DEBIAN_BASE_TAG}
+
+.use-debian/x86_build-base:
+ extends:
+ - .fdo.container-build@debian
+ - .use-base-image
+ variables:
+ MESA_BASE_IMAGE: ${DEBIAN_X86_BUILD_BASE_IMAGE}
+ MESA_BASE_TAG: *debian-x86_build-base
+ MESA_ARTIFACTS_BASE_TAG: *debian-x86_build-base
+ needs:
+ - debian/x86_build-base
+
+# Debian 11 based x86 main build image
+debian/x86_build:
+ extends:
+ - .use-debian/x86_build-base
+ variables:
+ MESA_IMAGE_TAG: &debian-x86_build ${DEBIAN_BUILD_TAG}
+
+.use-debian/x86_build:
+ extends:
+ - .set-image-base-tag
+ variables:
+ MESA_BASE_TAG: *debian-x86_build-base
+ MESA_IMAGE_PATH: ${DEBIAN_X86_BUILD_IMAGE_PATH}
+ MESA_IMAGE_TAG: *debian-x86_build
+ needs:
+ - debian/x86_build
+
+# Debian 11 based i386 cross-build image
+debian/i386_build:
+ extends:
+ - .use-debian/x86_build-base
+ variables:
+ MESA_IMAGE_TAG: &debian-i386_build ${DEBIAN_BUILD_TAG}
+
+.use-debian/i386_build:
+ extends:
+ - .set-image-base-tag
+ variables:
+ MESA_BASE_TAG: *debian-x86_build-base
+ MESA_IMAGE_PATH: "debian/i386_build"
+ MESA_IMAGE_TAG: *debian-i386_build
+ needs:
+ - debian/i386_build
+
+# Debian 11 based ppc64el cross-build image
+debian/ppc64el_build:
+ extends:
+ - .use-debian/x86_build-base
+ variables:
+ MESA_IMAGE_TAG: &debian-ppc64el_build ${DEBIAN_BUILD_TAG}
+
+.use-debian/ppc64el_build:
+ extends:
+ - .set-image-base-tag
+ variables:
+ MESA_BASE_TAG: *debian-x86_build-base
+ MESA_IMAGE_PATH: "debian/ppc64el_build"
+ MESA_IMAGE_TAG: *debian-ppc64el_build
+ needs:
+ - debian/ppc64el_build
+
+# Debian 11 based s390x cross-build image
+debian/s390x_build:
+ extends:
+ - .use-debian/x86_build-base
+ variables:
+ MESA_IMAGE_TAG: &debian-s390x_build ${DEBIAN_BUILD_TAG}
+
+.use-debian/s390x_build:
+ extends:
+ - .set-image-base-tag
+ variables:
+ MESA_BASE_TAG: *debian-x86_build-base
+ MESA_IMAGE_PATH: "debian/s390x_build"
+ MESA_IMAGE_TAG: *debian-s390x_build
+ needs:
+ - debian/s390x_build
+
+# Android NDK cross-build image
+debian/android_build:
+ extends:
+ - .use-debian/x86_build-base
+ variables:
+ MESA_IMAGE_TAG: &debian-android_build ${DEBIAN_BUILD_TAG}
+
+.use-debian/android_build:
+ extends:
+ - .set-image-base-tag
+ variables:
+ MESA_BASE_TAG: *debian-x86_build-base
+ MESA_IMAGE_PATH: "debian/android_build"
+ MESA_IMAGE_TAG: *debian-android_build
+ needs:
+ - debian/android_build
+
+# Debian 11 based x86 test image base
+debian/x86_test-base:
+ extends: debian/x86_build-base
+ variables:
+ MESA_IMAGE_TAG: &debian-x86_test-base ${DEBIAN_BASE_TAG}
+
+.use-debian/x86_test-base:
+ extends:
+ - .fdo.container-build@debian
+ - .use-base-image
+ variables:
+ MESA_BASE_IMAGE: ${DEBIAN_X86_TEST_BASE_IMAGE}
+ MESA_BASE_TAG: *debian-x86_test-base
+ needs:
+ - debian/x86_test-base
+
+# Debian 11 based x86 test image for GL
+debian/x86_test-gl:
+ extends: .use-debian/x86_test-base
+ variables:
+ FDO_DISTRIBUTION_EXEC: 'env KERNEL_URL=${KERNEL_URL} FDO_CI_CONCURRENT=${FDO_CI_CONCURRENT} bash .gitlab-ci/container/${CI_JOB_NAME}.sh'
+ KERNEL_URL: &kernel-rootfs-url "https://gitlab.freedesktop.org/gfx-ci/linux/-/archive/v5.16-for-mesa-ci-991fec6622591/linux-v5.16-for-mesa-ci-991fec6622591.tar.bz2"
+ MESA_IMAGE_TAG: &debian-x86_test-gl ${DEBIAN_X86_TEST_GL_TAG}
+
+.use-debian/x86_test-gl:
+ extends:
+ - .set-image-base-tag
+ variables:
+ MESA_BASE_TAG: *debian-x86_test-base
+ MESA_IMAGE_PATH: ${DEBIAN_X86_TEST_IMAGE_PATH}
+ MESA_IMAGE_TAG: *debian-x86_test-gl
+ needs:
+ - debian/x86_test-gl
+
+# Debian 11 based x86 test image for VK
+debian/x86_test-vk:
+ extends: .use-debian/x86_test-base
+ variables:
+ MESA_IMAGE_TAG: &debian-x86_test-vk ${DEBIAN_X86_TEST_VK_TAG}
+
+.use-debian/x86_test-vk:
+ extends:
+ - .set-image-base-tag
+ variables:
+ MESA_BASE_TAG: *debian-x86_test-base
+ MESA_IMAGE_PATH: "debian/x86_test-vk"
+ MESA_IMAGE_TAG: *debian-x86_test-vk
+ needs:
+ - debian/x86_test-vk
+
+# Debian 11 based ARM build image
+debian/arm_build:
+ extends:
+ - .fdo.container-build@debian
+ - .container
+ tags:
+ - aarch64
+ variables:
+ MESA_IMAGE_TAG: &debian-arm_build ${DEBIAN_BASE_TAG}
+
+.use-debian/arm_build:
+ extends:
+ - .set-image
+ variables:
+ MESA_IMAGE_PATH: "debian/arm_build"
+ MESA_IMAGE_TAG: *debian-arm_build
+ MESA_ARTIFACTS_TAG: *debian-arm_build
+ needs:
+ - debian/arm_build
+
+
+# Fedora 34 based x86 build image
+fedora/x86_build:
+ extends:
+ - .fdo.container-build@fedora
+ - .container
+ variables:
+ FDO_DISTRIBUTION_VERSION: 34
+ MESA_IMAGE_TAG: &fedora-x86_build ${FEDORA_X86_BUILD_TAG}
+
+.use-fedora/x86_build:
+ extends:
+ - .set-image
+ variables:
+ MESA_IMAGE_PATH: "fedora/x86_build"
+ MESA_IMAGE_TAG: *fedora-x86_build
+ needs:
+ - fedora/x86_build
+
+
+.kernel+rootfs:
+ extends:
+ - .ci-run-policy
+ stage: container
+ variables:
+ GIT_STRATEGY: fetch
+ KERNEL_URL: *kernel-rootfs-url
+ MESA_ROOTFS_TAG: &kernel-rootfs ${KERNEL_ROOTFS_TAG}
+ DISTRIBUTION_TAG: &distribution-tag-arm "${MESA_ROOTFS_TAG}--${MESA_ARTIFACTS_TAG}--${MESA_TEMPLATES_COMMIT}"
+ script:
+ - .gitlab-ci/container/lava_build.sh
+
+kernel+rootfs_amd64:
+ extends:
+ - .use-debian/x86_build-base
+ - .kernel+rootfs
+ image: "$FDO_BASE_IMAGE"
+ variables:
+ DEBIAN_ARCH: "amd64"
+ DISTRIBUTION_TAG: &distribution-tag-amd64 "${MESA_ROOTFS_TAG}--${MESA_ARTIFACTS_BASE_TAG}--${MESA_TEMPLATES_COMMIT}"
+
+kernel+rootfs_arm64:
+ extends:
+ - .use-debian/arm_build
+ - .kernel+rootfs
+ tags:
+ - aarch64
+ variables:
+ DEBIAN_ARCH: "arm64"
+
+kernel+rootfs_armhf:
+ extends:
+ - kernel+rootfs_arm64
+ variables:
+ DEBIAN_ARCH: "armhf"
+
+# Cannot use anchors defined here from included files, so use extends: instead
+.use-kernel+rootfs-arm:
+ variables:
+ DISTRIBUTION_TAG: *distribution-tag-arm
+ MESA_ROOTFS_TAG: *kernel-rootfs
+
+.use-kernel+rootfs-amd64:
+ variables:
+ DISTRIBUTION_TAG: *distribution-tag-amd64
+ MESA_ROOTFS_TAG: *kernel-rootfs
+
+# x86 image with ARM64 & armhf kernel & rootfs for baremetal testing
+debian/arm_test:
+ extends:
+ - .fdo.container-build@debian
+ - .container
+ # Don't want the .container rules
+ - .ci-run-policy
+ needs:
+ - kernel+rootfs_arm64
+ - kernel+rootfs_armhf
+ variables:
+ FDO_DISTRIBUTION_EXEC: 'env ARTIFACTS_PREFIX=https://${MINIO_HOST}/mesa-lava ARTIFACTS_SUFFIX=${MESA_ROOTFS_TAG}--${MESA_ARM_BUILD_TAG}--${MESA_TEMPLATES_COMMIT} CI_PROJECT_PATH=${CI_PROJECT_PATH} FDO_CI_CONCURRENT=${FDO_CI_CONCURRENT} FDO_UPSTREAM_REPO=${FDO_UPSTREAM_REPO} bash .gitlab-ci/container/${CI_JOB_NAME}.sh'
+ FDO_DISTRIBUTION_TAG: "${MESA_IMAGE_TAG}--${MESA_ROOTFS_TAG}--${MESA_ARM_BUILD_TAG}--${MESA_TEMPLATES_COMMIT}"
+ MESA_ARM_BUILD_TAG: *debian-arm_build
+ MESA_IMAGE_TAG: &debian-arm_test ${DEBIAN_BASE_TAG}
+ MESA_ROOTFS_TAG: *kernel-rootfs
+
+.use-debian/arm_test:
+ image: "$CI_REGISTRY_IMAGE/${MESA_IMAGE_PATH}:${MESA_IMAGE_TAG}--${MESA_ROOTFS_TAG}--${MESA_ARM_BUILD_TAG}--${MESA_TEMPLATES_COMMIT}"
+ variables:
+ MESA_ARM_BUILD_TAG: *debian-arm_build
+ MESA_IMAGE_PATH: "debian/arm_test"
+ MESA_IMAGE_TAG: *debian-arm_test
+ MESA_ROOTFS_TAG: *kernel-rootfs
+ needs:
+ - debian/arm_test
+
+# Native Windows docker builds
+#
+# Unlike the above Linux-based builds - including MinGW builds which
+# cross-compile for Windows - which use the freedesktop ci-templates, we
+# cannot use the same scheme here. As Windows lacks support for
+# Docker-in-Docker, and Podman does not run natively on Windows, we have
+# to open-code much of the same ourselves.
+#
+# This is achieved by first running in a native Windows shell instance
+# (host PowerShell) in the container stage to build and push the image,
+# then in the build stage by executing inside Docker.
+
+.windows-docker-vs2019:
+ variables:
+ MESA_IMAGE: "$CI_REGISTRY_IMAGE/${MESA_IMAGE_PATH}:${MESA_IMAGE_TAG}"
+ MESA_UPSTREAM_IMAGE: "$CI_REGISTRY/$FDO_UPSTREAM_REPO/$MESA_IMAGE_PATH:${MESA_IMAGE_TAG}"
+
+.windows_container_build:
+ inherit:
+ default: false
+ extends:
+ - .container
+ - .windows-docker-vs2019
+ rules:
+ - if: '$MICROSOFT_FARM == "offline"'
+ when: never
+ - !reference [.container-rules, rules]
+ variables:
+ GIT_STRATEGY: fetch # we do actually need the full repository though
+ MESA_BASE_IMAGE: None
+ tags:
+ - windows
+ - shell
+ - "1809"
+ - mesa
+ script:
+ - .\.gitlab-ci\windows\mesa_container.ps1 $CI_REGISTRY $CI_REGISTRY_USER $CI_REGISTRY_PASSWORD $MESA_IMAGE $MESA_UPSTREAM_IMAGE ${DOCKERFILE} ${MESA_BASE_IMAGE}
+
+windows_build_vs2019:
+ inherit:
+ default: false
+ extends:
+ - .windows_container_build
+ variables:
+ MESA_IMAGE_PATH: &windows_build_image_path ${WINDOWS_X64_BUILD_PATH}
+ MESA_IMAGE_TAG: &windows_build_image_tag ${WINDOWS_X64_BUILD_TAG}
+ DOCKERFILE: Dockerfile_build
+ timeout: 2h 30m # LLVM takes ages
+
+windows_test_vs2019:
+ inherit:
+ default: false
+ extends:
+ - .windows_container_build
+ rules:
+ - if: '$MICROSOFT_FARM == "offline"'
+ when: never
+ - !reference [.ci-run-policy, rules]
+ variables:
+ MESA_IMAGE_PATH: &windows_test_image_path ${WINDOWS_X64_TEST_PATH}
+ MESA_IMAGE_TAG: &windows_test_image_tag ${WINDOWS_X64_BUILD_TAG}--${WINDOWS_X64_TEST_TAG}
+ DOCKERFILE: Dockerfile_test
+ # Right now this only needs the VS install to get DXIL.dll. Maybe see about decoupling this at some point
+ MESA_BASE_IMAGE_PATH: *windows_build_image_path
+ MESA_BASE_IMAGE_TAG: *windows_build_image_tag
+ MESA_BASE_IMAGE: "$CI_REGISTRY_IMAGE/${MESA_BASE_IMAGE_PATH}:${MESA_BASE_IMAGE_TAG}"
+ script:
+ - .\.gitlab-ci\windows\mesa_container.ps1 $CI_REGISTRY $CI_REGISTRY_USER $CI_REGISTRY_PASSWORD $MESA_IMAGE $MESA_UPSTREAM_IMAGE Dockerfile_test ${MESA_BASE_IMAGE}
+ needs:
+ - windows_build_vs2019
+
+.use-windows_build_vs2019:
+ inherit:
+ default: false
+ extends: .windows-docker-vs2019
+ image: "$MESA_IMAGE"
+ variables:
+ MESA_IMAGE_PATH: *windows_build_image_path
+ MESA_IMAGE_TAG: *windows_build_image_tag
+ needs:
+ - windows_build_vs2019
+
+.use-windows_test_vs2019:
+ inherit:
+ default: false
+ extends: .windows-docker-vs2019
+ image: "$MESA_IMAGE"
+ variables:
+ MESA_IMAGE_PATH: *windows_test_image_path
+ MESA_IMAGE_TAG: *windows_test_image_tag
diff --git a/lib/mesa/.gitlab-ci/container/lava_build.sh b/lib/mesa/.gitlab-ci/container/lava_build.sh
index bd0f04021..3b9216197 100755
--- a/lib/mesa/.gitlab-ci/container/lava_build.sh
+++ b/lib/mesa/.gitlab-ci/container/lava_build.sh
@@ -34,6 +34,7 @@ if [[ "$DEBIAN_ARCH" = "arm64" ]]; then
DEVICE_TREES+=" arch/arm64/boot/dts/qcom/apq8096-db820c.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-g12b-a311d-khadas-vim3.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-juniper-sku16.dtb"
+ DEVICE_TREES+=" arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots.dtb"
KERNEL_IMAGE_NAME="Image"
elif [[ "$DEBIAN_ARCH" = "armhf" ]]; then
@@ -51,7 +52,7 @@ else
DEFCONFIG="arch/x86/configs/x86_64_defconfig"
DEVICE_TREES=""
KERNEL_IMAGE_NAME="bzImage"
- ARCH_PACKAGES="libva-dev"
+ ARCH_PACKAGES="libasound2-dev libcap-dev libfdt-dev libva-dev wayland-protocols"
fi
# Determine if we're in a cross build.
@@ -76,14 +77,20 @@ apt-get install -y --no-remove \
${ARCH_PACKAGES} \
automake \
bc \
+ clang \
cmake \
debootstrap \
git \
glslang-tools \
libdrm-dev \
libegl1-mesa-dev \
+ libxext-dev \
+ libfontconfig-dev \
libgbm-dev \
+ libgl-dev \
libgles2-mesa-dev \
+ libglu1-mesa-dev \
+ libglx-dev \
libpng-dev \
libssl-dev \
libudev-dev \
@@ -93,11 +100,14 @@ apt-get install -y --no-remove \
libx11-xcb-dev \
libxcb-dri2-0-dev \
libxkbcommon-dev \
+ ninja-build \
patch \
+ python-is-python3 \
python3-distutils \
python3-mako \
python3-numpy \
python3-serial \
+ unzip \
wget
@@ -119,7 +129,7 @@ fi
############### Building
STRIP_CMD="${GCC_ARCH}-strip"
-mkdir -p /lava-files/rootfs-${DEBIAN_ARCH}
+mkdir -p /lava-files/rootfs-${DEBIAN_ARCH}/usr/lib/$GCC_ARCH
############### Build apitrace
@@ -141,6 +151,13 @@ DEQP_TARGET=surfaceless . .gitlab-ci/container/build-deqp.sh
mv /deqp /lava-files/rootfs-${DEBIAN_ARCH}/.
+############### Build SKQP
+if [[ "$DEBIAN_ARCH" = "arm64" ]]; then
+ SKQP_ARCH="arm64" . .gitlab-ci/container/build-skqp.sh
+ mv /skqp /lava-files/rootfs-${DEBIAN_ARCH}/.
+fi
+
+
############### Build piglit
PIGLIT_OPTS="-DPIGLIT_BUILD_DMA_BUF_TESTS=ON" . .gitlab-ci/container/build-piglit.sh
mv /piglit /lava-files/rootfs-${DEBIAN_ARCH}/.
@@ -151,15 +168,32 @@ if [[ "$DEBIAN_ARCH" = "amd64" ]]; then
mv /va/bin/* /lava-files/rootfs-${DEBIAN_ARCH}/usr/bin/
fi
+############### Build Crosvm
+if [[ ${DEBIAN_ARCH} = "amd64" ]]; then
+ . .gitlab-ci/container/build-crosvm.sh
+ mv /usr/local/bin/crosvm /lava-files/rootfs-${DEBIAN_ARCH}/usr/bin/
+ mv /usr/local/lib/$GCC_ARCH/libvirglrenderer.* /lava-files/rootfs-${DEBIAN_ARCH}/usr/lib/$GCC_ARCH/
+fi
+
############### Build libdrm
EXTRA_MESON_ARGS+=" -D prefix=/libdrm"
. .gitlab-ci/container/build-libdrm.sh
+
+############### Build local stuff for use by igt and kernel testing, which
+############### will reuse most of our container build process from a specific
+############### hash of the Mesa tree.
+if [[ -e ".gitlab-ci/local/build-rootfs.sh" ]]; then
+ . .gitlab-ci/local/build-rootfs.sh
+fi
+
+
############### Build kernel
. .gitlab-ci/container/build-kernel.sh
############### Delete rust, since the tests won't be compiling anything.
rm -rf /root/.cargo
+rm -rf /root/.rustup
############### Create rootfs
set +e
@@ -184,7 +218,6 @@ rm /lava-files/rootfs-${DEBIAN_ARCH}/create-rootfs.sh
# Dependencies pulled during the creation of the rootfs may overwrite
# the built libdrm. Hence, we add it after the rootfs has been already
# created.
-mkdir -p /lava-files/rootfs-${DEBIAN_ARCH}/usr/lib/$GCC_ARCH
find /libdrm/ -name lib\*\.so\* | xargs cp -t /lava-files/rootfs-${DEBIAN_ARCH}/usr/lib/$GCC_ARCH/.
mkdir -p /lava-files/rootfs-${DEBIAN_ARCH}/libdrm/
cp -Rp /libdrm/share /lava-files/rootfs-${DEBIAN_ARCH}/libdrm/share
@@ -205,7 +238,7 @@ popd
. .gitlab-ci/container/container_post_build.sh
############### Upload the files!
-ci-fairy minio login $CI_JOB_JWT
+ci-fairy minio login --token-file "${CI_JOB_JWT_FILE}"
FILES_TO_UPLOAD="lava-rootfs.tgz \
$KERNEL_IMAGE_NAME"
diff --git a/lib/mesa/.gitlab-ci/container/x86_64.config b/lib/mesa/.gitlab-ci/container/x86_64.config
index 1859540fb..8f2cec020 100644
--- a/lib/mesa/.gitlab-ci/container/x86_64.config
+++ b/lib/mesa/.gitlab-ci/container/x86_64.config
@@ -72,9 +72,11 @@ CONFIG_PARPORT_PC=y
CONFIG_PARPORT_SERIAL=y
CONFIG_SERIAL_8250_DW=y
CONFIG_CHROME_PLATFORMS=y
+CONFIG_KVM_AMD=m
#options for Intel devices
CONFIG_MFD_INTEL_LPSS_PCI=y
+CONFIG_KVM_INTEL=m
#options for KVM guests
CONFIG_FUSE_FS=y
@@ -98,3 +100,6 @@ CONFIG_CRYPTO_DEV_VIRTIO=y
CONFIG_HW_RANDOM_VIRTIO=y
CONFIG_BLK_MQ_VIRTIO=y
CONFIG_TUN=y
+CONFIG_VSOCKETS=y
+CONFIG_VIRTIO_VSOCKETS=y
+CONFIG_VHOST_VSOCK=m
diff --git a/lib/mesa/.gitlab-ci/cross-xfail-s390x b/lib/mesa/.gitlab-ci/cross-xfail-s390x
index 2bfef18e6..3c3c8ac0c 100644
--- a/lib/mesa/.gitlab-ci/cross-xfail-s390x
+++ b/lib/mesa/.gitlab-ci/cross-xfail-s390x
@@ -1,2 +1 @@
-lp_test_arit
lp_test_format
diff --git a/lib/mesa/.gitlab-ci/crosvm-init.sh b/lib/mesa/.gitlab-ci/crosvm-init.sh
index 63124fe0f..15e68f835 100755
--- a/lib/mesa/.gitlab-ci/crosvm-init.sh
+++ b/lib/mesa/.gitlab-ci/crosvm-init.sh
@@ -1,27 +1,42 @@
#!/bin/sh
-set -ex
+set -e
+
+VSOCK_STDOUT=$1
+VSOCK_STDERR=$2
+VSOCK_TEMP_DIR=$3
mount -t proc none /proc
mount -t sysfs none /sys
-mount -t devtmpfs none /dev || echo possibly already mounted
mkdir -p /dev/pts
mount -t devpts devpts /dev/pts
mount -t tmpfs tmpfs /tmp
-. /crosvm-env.sh
+. ${VSOCK_TEMP_DIR}/crosvm-env.sh
-# / is ro
-export PIGLIT_REPLAY_EXTRA_ARGS="$PIGLIT_REPLAY_EXTRA_ARGS --db-path /tmp/replayer-db"
+# .gitlab-ci.yml script variable is using relative paths to install directory,
+# so change to that dir before running `crosvm-script`
+cd "${CI_PROJECT_DIR}"
-if sh $CROSVM_TEST_SCRIPT; then
- touch /results/success
-fi
+# The exception is the dEQP binary, as it needs to run from its own directory
+[ -z "${DEQP_BIN_DIR}" ] || cd "${DEQP_BIN_DIR}"
-sleep 5 # Leave some time to get the last output flushed out
+# Use a FIFO to collect relevant error messages
+STDERR_FIFO=/tmp/crosvm-stderr.fifo
+mkfifo -m 600 ${STDERR_FIFO}
-poweroff -d -n -f || true
+dmesg --level crit,err,warn -w > ${STDERR_FIFO} &
+DMESG_PID=$!
+
+# Transfer the errors and crosvm-script output via a pair of virtio-vsocks
+socat -d -u pipe:${STDERR_FIFO} vsock-listen:${VSOCK_STDERR} &
+socat -d -U vsock-listen:${VSOCK_STDOUT} \
+ system:"stdbuf -eL sh ${VSOCK_TEMP_DIR}/crosvm-script.sh 2> ${STDERR_FIFO}; echo \$? > ${VSOCK_TEMP_DIR}/exit_code",nofork
-sleep 10 # Just in case init would exit before the kernel shuts down the VM
+kill ${DMESG_PID}
+wait
+
+sync
+poweroff -d -n -f || true
-exit 1
+sleep 1 # Just in case init would exit before the kernel shuts down the VM
diff --git a/lib/mesa/.gitlab-ci/crosvm-runner.sh b/lib/mesa/.gitlab-ci/crosvm-runner.sh
index b3ddfcf16..82cc37fb1 100755
--- a/lib/mesa/.gitlab-ci/crosvm-runner.sh
+++ b/lib/mesa/.gitlab-ci/crosvm-runner.sh
@@ -1,49 +1,125 @@
#!/bin/sh
-set -x
+set -e
-ln -sf $CI_PROJECT_DIR/install /install
+#
+# Helper to generate CIDs for virtio-vsock based communication with processes
+# running inside crosvm guests.
+#
+# A CID is a 32-bit Context Identifier to be assigned to a crosvm instance
+# and must be unique across the host system. For this purpose, let's take
+# the least significant 25 bits from CI_JOB_ID as a base and generate a 7-bit
+# prefix number to handle up to 128 concurrent crosvm instances per job runner.
+#
+# As a result, the following variables are set:
+# - VSOCK_CID: the crosvm unique CID to be passed as a run argument
+#
+# - VSOCK_STDOUT, VSOCK_STDERR: the port numbers the guest should accept
+# vsock connections on in order to transfer output messages
+#
+# - VSOCK_TEMP_DIR: the temporary directory path used to pass additional
+# context data towards the guest
+#
+set_vsock_context() {
+ [ -n "${CI_JOB_ID}" ] || {
+ echo "Missing or unset CI_JOB_ID env variable" >&2
+ exit 1
+ }
-export LD_LIBRARY_PATH=$CI_PROJECT_DIR/install/lib/
-export EGL_PLATFORM=surfaceless
+ local dir_prefix="/tmp-vsock."
+ local cid_prefix=0
+ unset VSOCK_TEMP_DIR
-export -p > /crosvm-env.sh
-export GALLIUM_DRIVER="$CROSVM_GALLIUM_DRIVER"
-export GALLIVM_PERF="nopt"
-export LIBGL_ALWAYS_SOFTWARE="true"
+ while [ ${cid_prefix} -lt 128 ]; do
+ VSOCK_TEMP_DIR=${dir_prefix}${cid_prefix}
+ mkdir "${VSOCK_TEMP_DIR}" >/dev/null 2>&1 && break || unset VSOCK_TEMP_DIR
+ cid_prefix=$((cid_prefix + 1))
+ done
-CROSVM_KERNEL_ARGS="root=my_root rw rootfstype=virtiofs loglevel=3 init=$CI_PROJECT_DIR/install/crosvm-init.sh ip=192.168.30.2::192.168.30.1:255.255.255.0:crosvm:eth0"
+ [ -n "${VSOCK_TEMP_DIR}" ] || return 1
-# Temporary results dir because from the guest we cannot write to /
-mkdir -p /results
-mount -t tmpfs tmpfs /results
+ VSOCK_CID=$(((CI_JOB_ID & 0x1ffffff) | ((cid_prefix & 0x7f) << 25)))
+ VSOCK_STDOUT=5001
+ VSOCK_STDERR=5002
-mkdir -p /piglit/.gitlab-ci/piglit
-mount -t tmpfs tmpfs /piglit/.gitlab-ci/piglit
+ return 0
+}
+# The dEQP binary needs to run from the directory it's in
+if [ -n "${1##*.sh}" ] && [ -z "${1##*"deqp"*}" ]; then
+ DEQP_BIN_DIR=$(dirname "$1")
+ export DEQP_BIN_DIR
+fi
+
+set_vsock_context || { echo "Could not generate crosvm vsock CID" >&2; exit 1; }
+
+# Ensure cleanup on script exit
+trap 'exit ${exit_code}' INT TERM
+trap 'exit_code=$?; [ -z "${CROSVM_PID}${SOCAT_PIDS}" ] || kill ${CROSVM_PID} ${SOCAT_PIDS} >/dev/null 2>&1 || true; rm -rf ${VSOCK_TEMP_DIR}' EXIT
+
+# Securely pass the current variables to the crosvm environment
+echo "Variables passed through:"
+SCRIPT_DIR=$(readlink -en "${0%/*}")
+${SCRIPT_DIR}/common/generate-env.sh | tee ${VSOCK_TEMP_DIR}/crosvm-env.sh
+
+# Set the crosvm-script as the arguments of the current script
+echo "$@" > ${VSOCK_TEMP_DIR}/crosvm-script.sh
+
+# Setup networking
+/usr/sbin/iptables-legacy -w -t nat -A POSTROUTING -o eth0 -j MASQUERADE
+echo 1 > /proc/sys/net/ipv4/ip_forward
+
+# Start background processes to receive output from guest
+socat -u vsock-connect:${VSOCK_CID}:${VSOCK_STDERR},retry=200,interval=0.1 stderr &
+SOCAT_PIDS=$!
+socat -u vsock-connect:${VSOCK_CID}:${VSOCK_STDOUT},retry=200,interval=0.1 stdout &
+SOCAT_PIDS="${SOCAT_PIDS} $!"
+
+# Prepare to start crosvm
unset DISPLAY
unset XDG_RUNTIME_DIR
-/usr/sbin/iptables-legacy -t nat -A POSTROUTING -o eth0 -j MASQUERADE
-echo 1 > /proc/sys/net/ipv4/ip_forward
+CROSVM_KERN_ARGS="quiet console=null root=my_root rw rootfstype=virtiofs ip=192.168.30.2::192.168.30.1:255.255.255.0:crosvm:eth0"
+CROSVM_KERN_ARGS="${CROSVM_KERN_ARGS} init=${SCRIPT_DIR}/crosvm-init.sh -- ${VSOCK_STDOUT} ${VSOCK_STDERR} ${VSOCK_TEMP_DIR}"
-# Crosvm wants this
-syslogd > /dev/null
+[ "${CROSVM_GALLIUM_DRIVER}" = "llvmpipe" ] && \
+ CROSVM_LIBGL_ALWAYS_SOFTWARE=true || CROSVM_LIBGL_ALWAYS_SOFTWARE=false
-# We aren't testing LLVMPipe here, so we don't need to validate NIR on the host
-export NIR_VALIDATE=0
+set +e -x
+# We aren't testing the host driver here, so we don't need to validate NIR on the host
+NIR_DEBUG="novalidate" \
+LIBGL_ALWAYS_SOFTWARE=${CROSVM_LIBGL_ALWAYS_SOFTWARE} \
+GALLIUM_DRIVER=${CROSVM_GALLIUM_DRIVER} \
crosvm run \
- --gpu "$CROSVM_GPU_ARGS" \
- -m 4096 \
- -c $((FDO_CI_CONCURRENT > 1 ? FDO_CI_CONCURRENT - 1 : 1)) \
- --disable-sandbox \
- --shared-dir /:my_root:type=fs:writeback=true:timeout=60:cache=always \
- --host_ip=192.168.30.1 --netmask=255.255.255.0 --mac "AA:BB:CC:00:00:12" \
- -p "$CROSVM_KERNEL_ARGS" \
- /lava-files/bzImage
-
-mkdir -p $CI_PROJECT_DIR/results
-mv /results/* $CI_PROJECT_DIR/results/.
-
-test -f $CI_PROJECT_DIR/results/success
+ --gpu "${CROSVM_GPU_ARGS}" -m 4096 -c 2 --disable-sandbox \
+ --shared-dir /:my_root:type=fs:writeback=true:timeout=60:cache=always \
+ --host_ip "192.168.30.1" --netmask "255.255.255.0" --mac "AA:BB:CC:00:00:12" \
+ --cid ${VSOCK_CID} -p "${CROSVM_KERN_ARGS}" \
+ /lava-files/${KERNEL_IMAGE_NAME:-bzImage} > ${VSOCK_TEMP_DIR}/crosvm 2>&1 &
+
+# Wait for crosvm process to terminate
+CROSVM_PID=$!
+wait ${CROSVM_PID}
+CROSVM_RET=$?
+unset CROSVM_PID
+
+[ ${CROSVM_RET} -eq 0 ] && {
+ # socat background processes terminate gracefully on remote peers exit
+ wait
+ unset SOCAT_PIDS
+ # The actual return code is the crosvm guest script's exit code
+ CROSVM_RET=$(cat ${VSOCK_TEMP_DIR}/exit_code 2>/dev/null)
+ # Force error when the guest script's exit code is not available
+ CROSVM_RET=${CROSVM_RET:-1}
+}
+
+# Show crosvm output on error to help with debugging
+[ ${CROSVM_RET} -eq 0 ] || {
+ set +x
+ echo "Dumping crosvm output.." >&2
+ cat ${VSOCK_TEMP_DIR}/crosvm >&2
+ set -x
+}
+
+exit ${CROSVM_RET}
diff --git a/lib/mesa/.gitlab-ci/deqp-runner.sh b/lib/mesa/.gitlab-ci/deqp-runner.sh
index ea1b81564..a04cbc929 100755
--- a/lib/mesa/.gitlab-ci/deqp-runner.sh
+++ b/lib/mesa/.gitlab-ci/deqp-runner.sh
@@ -1,7 +1,12 @@
#!/bin/sh
+echo -e "\e[0Ksection_start:$(date +%s):test_setup[collapsed=true]\r\e[0Kpreparing test setup"
+
set -ex
+# Needed so configuration files can contain paths to files in /install
+ln -sf $CI_PROJECT_DIR/install /install
+
if [ -z "$GPU_VERSION" ]; then
echo 'GPU_VERSION must be set to something like "llvmpipe" or "freedreno-a630" (the name used in .gitlab-ci/gpu-version-*.txt)'
exit 1
@@ -17,6 +22,15 @@ export VK_ICD_FILENAMES=`pwd`/install/share/vulkan/icd.d/"$VK_DRIVER"_icd.${VK_C
RESULTS=`pwd`/${DEQP_RESULTS_DIR:-results}
mkdir -p $RESULTS
+# Ensure Mesa Shader Cache resides on tmpfs.
+SHADER_CACHE_HOME=${XDG_CACHE_HOME:-${HOME}/.cache}
+SHADER_CACHE_DIR=${MESA_SHADER_CACHE_DIR:-${SHADER_CACHE_HOME}/mesa_shader_cache}
+
+findmnt -n tmpfs ${SHADER_CACHE_HOME} || findmnt -n tmpfs ${SHADER_CACHE_DIR} || {
+ mkdir -p ${SHADER_CACHE_DIR}
+ mount -t tmpfs -o nosuid,nodev,size=2G,mode=1755 tmpfs ${SHADER_CACHE_DIR}
+}
+
HANG_DETECTION_CMD=""
if [ -z "$DEQP_SUITE" ]; then
@@ -109,8 +123,6 @@ if [ -e "$INSTALL/$GPU_VERSION-skips.txt" ]; then
DEQP_SKIPS="$DEQP_SKIPS $INSTALL/$GPU_VERSION-skips.txt"
fi
-set +e
-
report_load() {
echo "System load: $(cut -d' ' -f1-3 < /proc/loadavg)"
echo "# of CPU cores: $(cat /proc/cpuinfo | grep processor | wc -l)"
@@ -133,7 +145,6 @@ if [ "$GALLIUM_DRIVER" = "virpipe" ]; then
fi
GALLIUM_DRIVER=llvmpipe \
- GALLIVM_PERF="nopt" \
virgl_test_server $VTEST_ARGS >$RESULTS/vtest-log.txt 2>&1 &
sleep 1
@@ -146,7 +157,16 @@ if [ -z "$DEQP_SUITE" ]; then
if [ $DEQP_VER != vk -a $DEQP_VER != egl ]; then
export DEQP_RUNNER_OPTIONS="$DEQP_RUNNER_OPTIONS --version-check `cat $INSTALL/VERSION | sed 's/[() ]/./g'`"
fi
+fi
+set +x
+echo -e "\e[0Ksection_end:$(date +%s):test_setup\r\e[0K"
+
+echo -e "\e[0Ksection_start:$(date +%s):deqp[collapsed=false]\r\e[0Kdeqp-runner"
+set -x
+
+set +e
+if [ -z "$DEQP_SUITE" ]; then
deqp-runner \
run \
--deqp $DEQP \
@@ -168,14 +188,20 @@ else
--flakes $INSTALL/$GPU_VERSION-flakes.txt \
--testlog-to-xml /deqp/executor/testlog-to-xml \
--fraction-start $CI_NODE_INDEX \
- --fraction $CI_NODE_TOTAL \
+ --fraction `expr $CI_NODE_TOTAL \* ${DEQP_FRACTION:-1}` \
--jobs ${FDO_CI_CONCURRENT:-4} \
$DEQP_RUNNER_OPTIONS
fi
DEQP_EXITCODE=$?
-quiet report_load
+set +x
+echo -e "\e[0Ksection_end:$(date +%s):deqp\r\e[0K"
+
+report_load
+
+echo -e "\e[0Ksection_start:$(date +%s):test_post_process[collapsed=true]\r\e[0Kpost-processing test results"
+set -x
# Remove all but the first 50 individual XML files uploaded as artifacts, to
# save fd.o space when you break everything.
@@ -211,4 +237,6 @@ if [ -n "$FLAKES_CHANNEL" ]; then
--branch-title "${CI_MERGE_REQUEST_TITLE:-$CI_COMMIT_TITLE}"
fi
+echo -e "\e[0Ksection_end:$(date +%s):test_post_process\r\e[0K"
+
exit $DEQP_EXITCODE
diff --git a/lib/mesa/.gitlab-ci/download-git-cache.sh b/lib/mesa/.gitlab-ci/download-git-cache.sh
index d7c7d72c9..ece216017 100644
--- a/lib/mesa/.gitlab-ci/download-git-cache.sh
+++ b/lib/mesa/.gitlab-ci/download-git-cache.sh
@@ -5,7 +5,7 @@ set -o xtrace
# if we run this script outside of gitlab-ci for testing, ensure
# we got meaningful variables
-CI_PROJECT_DIR=${CI_PROJECT_DIR:-$(mktemp -d)/mesa}
+CI_PROJECT_DIR=${CI_PROJECT_DIR:-$(mktemp -d)/$CI_PROJECT_NAME}
if [[ -e $CI_PROJECT_DIR/.git ]]
then
@@ -16,8 +16,8 @@ fi
TMP_DIR=$(mktemp -d)
echo "Downloading archived master..."
-/usr/bin/wget -O $TMP_DIR/mesa.tar.gz \
- https://${MINIO_HOST}/git-cache/${FDO_UPSTREAM_REPO}/mesa.tar.gz
+/usr/bin/wget -O $TMP_DIR/$CI_PROJECT_NAME.tar.gz \
+ https://${MINIO_HOST}/git-cache/${FDO_UPSTREAM_REPO}/$CI_PROJECT_NAME.tar.gz
# check wget error code
if [[ $? -ne 0 ]]
@@ -31,6 +31,6 @@ set -e
rm -rf "$CI_PROJECT_DIR"
echo "Extracting tarball into '$CI_PROJECT_DIR'..."
mkdir -p "$CI_PROJECT_DIR"
-tar xzf "$TMP_DIR/mesa.tar.gz" -C "$CI_PROJECT_DIR"
+tar xzf "$TMP_DIR/$CI_PROJECT_NAME.tar.gz" -C "$CI_PROJECT_DIR"
rm -rf "$TMP_DIR"
chmod a+w "$CI_PROJECT_DIR"
diff --git a/lib/mesa/.gitlab-ci/image-tags.yml b/lib/mesa/.gitlab-ci/image-tags.yml
new file mode 100644
index 000000000..34985f5a3
--- /dev/null
+++ b/lib/mesa/.gitlab-ci/image-tags.yml
@@ -0,0 +1,21 @@
+variables:
+ DEBIAN_X86_BUILD_BASE_IMAGE: "debian/x86_build-base"
+ DEBIAN_BASE_TAG: "2022-02-21-libdrm"
+
+ DEBIAN_X86_BUILD_IMAGE_PATH: "debian/x86_build"
+ DEBIAN_BUILD_TAG: "2022-02-21-libdrm"
+
+ DEBIAN_X86_TEST_BASE_IMAGE: "debian/x86_test-base"
+
+ DEBIAN_X86_TEST_IMAGE_PATH: "debian/x86_test-gl"
+ DEBIAN_X86_TEST_GL_TAG: "2022-04-07-virgl-crosvm"
+ DEBIAN_X86_TEST_VK_TAG: "2022-04-05-deqp-runner"
+
+ FEDORA_X86_BUILD_TAG: "2022-03-18-spirv-tools-5"
+ KERNEL_ROOTFS_TAG: "2022-04-07-prefix-skqp"
+
+ WINDOWS_X64_BUILD_PATH: "windows/x64_build"
+ WINDOWS_X64_BUILD_TAG: "2022-20-02-base_split"
+
+ WINDOWS_X64_TEST_PATH: "windows/x64_test"
+ WINDOWS_X64_TEST_TAG: "2022-04-13-dozen_ci"
diff --git a/lib/mesa/.gitlab-ci/lava/lava-gitlab-ci.yml b/lib/mesa/.gitlab-ci/lava/lava-gitlab-ci.yml
index 7bd368468..6c9cd5652 100755
--- a/lib/mesa/.gitlab-ci/lava/lava-gitlab-ci.yml
+++ b/lib/mesa/.gitlab-ci/lava/lava-gitlab-ci.yml
@@ -14,20 +14,23 @@
BASE_SYSTEM_MAINLINE_HOST_PATH: "${BASE_SYSTEM_HOST_PREFIX}/${FDO_UPSTREAM_REPO}/${DISTRIBUTION_TAG}/${ARCH}"
BASE_SYSTEM_FORK_HOST_PATH: "${BASE_SYSTEM_HOST_PREFIX}/${CI_PROJECT_PATH}/${DISTRIBUTION_TAG}/${ARCH}"
# per-job build artifacts
- MESA_BUILD_PATH: "${PIPELINE_ARTIFACTS_BASE}/mesa-${ARCH}.tar.gz"
+ BUILD_PATH: "${PIPELINE_ARTIFACTS_BASE}/${CI_PROJECT_NAME}-${ARCH}.tar.gz"
JOB_ROOTFS_OVERLAY_PATH: "${JOB_ARTIFACTS_BASE}/job-rootfs-overlay.tar.gz"
JOB_RESULTS_PATH: "${JOB_ARTIFACTS_BASE}/results.tar.gz"
+ MINIO_RESULTS_UPLOAD: "${JOB_ARTIFACTS_BASE}"
PIGLIT_NO_WINDOW: 1
VISIBILITY_GROUP: "Collabora+fdo"
script:
- ./artifacts/lava/lava-submit.sh
artifacts:
- name: "mesa_${CI_JOB_NAME}"
+ name: "${CI_PROJECT_NAME}_${CI_JOB_NAME}"
when: always
paths:
- results/
exclude:
- results/*.shader_cache
+ tags:
+ - $RUNNER_TAG
after_script:
- wget -q "https://${JOB_RESULTS_PATH}" -O- | tar -xz
@@ -85,7 +88,7 @@
.lava-traces-base:
variables:
- HWCI_TEST_SCRIPT: "/install/piglit/run.sh"
+ HWCI_TEST_SCRIPT: "/install/piglit/piglit-traces.sh"
artifacts:
reports:
junit: results/junit.xml
diff --git a/lib/mesa/.gitlab-ci/lava/lava-pytest.sh b/lib/mesa/.gitlab-ci/lava/lava-pytest.sh
new file mode 100755
index 000000000..311a2c453
--- /dev/null
+++ b/lib/mesa/.gitlab-ci/lava/lava-pytest.sh
@@ -0,0 +1,34 @@
+#!/bin/sh
+#
+# Copyright (C) 2022 Collabora Limited
+# Author: Guilherme Gallo <guilherme.gallo@collabora.com>
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# This script runs unit/integration tests related with LAVA CI tools
+
+set -ex
+
+TEST_DIR=${CI_PROJECT_DIR}/.gitlab-ci/tests
+
+PYTHONPATH="${TEST_DIR}:${PYTHONPATH}" python3 -m \
+ pytest "${TEST_DIR}" \
+ -W ignore::DeprecationWarning \
+ --junitxml=artifacts/ci_scripts_report.xml
diff --git a/lib/mesa/.gitlab-ci/lava/lava-submit.sh b/lib/mesa/.gitlab-ci/lava/lava-submit.sh
index 1d3a24531..a61665dee 100755
--- a/lib/mesa/.gitlab-ci/lava/lava-submit.sh
+++ b/lib/mesa/.gitlab-ci/lava/lava-submit.sh
@@ -14,15 +14,16 @@ fi
rm -rf results
mkdir -p results/job-rootfs-overlay/
-# LAVA always uploads to MinIO when necessary as we don't have direct upload
-# from the DUT
-export PIGLIT_REPLAY_UPLOAD_TO_MINIO=1
cp artifacts/ci-common/capture-devcoredump.sh results/job-rootfs-overlay/
cp artifacts/ci-common/init-*.sh results/job-rootfs-overlay/
-artifacts/ci-common/generate-env.sh > results/job-rootfs-overlay/set-job-env-vars.sh
+cp artifacts/ci-common/intel-gpu-freq.sh results/job-rootfs-overlay/
+
+# Prepare env vars for upload.
+KERNEL_IMAGE_BASE_URL="https://${BASE_SYSTEM_HOST_PATH}" \
+ artifacts/ci-common/generate-env.sh > results/job-rootfs-overlay/set-job-env-vars.sh
tar zcf job-rootfs-overlay.tar.gz -C results/job-rootfs-overlay/ .
-ci-fairy minio login "${CI_JOB_JWT}"
+ci-fairy minio login --token-file "${CI_JOB_JWT_FILE}"
ci-fairy minio cp job-rootfs-overlay.tar.gz "minio://${JOB_ROOTFS_OVERLAY_PATH}"
touch results/lava.log
@@ -30,16 +31,16 @@ tail -f results/lava.log &
artifacts/lava/lava_job_submitter.py \
--dump-yaml \
--pipeline-info "$CI_JOB_NAME: $CI_PIPELINE_URL on $CI_COMMIT_REF_NAME ${CI_NODE_INDEX}/${CI_NODE_TOTAL}" \
- --base-system-url-prefix "https://${BASE_SYSTEM_HOST_PATH}" \
- --mesa-build-url "${FDO_HTTP_CACHE_URI:-}https://${MESA_BUILD_PATH}" \
+ --rootfs-url-prefix "https://${BASE_SYSTEM_HOST_PATH}" \
+ --kernel-url-prefix "https://${BASE_SYSTEM_HOST_PATH}" \
+ --build-url "${FDO_HTTP_CACHE_URI:-}https://${BUILD_PATH}" \
--job-rootfs-overlay-url "${FDO_HTTP_CACHE_URI:-}https://${JOB_ROOTFS_OVERLAY_PATH}" \
- --job-artifacts-base ${JOB_ARTIFACTS_BASE} \
--job-timeout ${JOB_TIMEOUT:-30} \
--first-stage-init artifacts/ci-common/init-stage1.sh \
--ci-project-dir ${CI_PROJECT_DIR} \
--device-type ${DEVICE_TYPE} \
--dtb ${DTB} \
- --jwt "${CI_JOB_JWT}" \
+ --jwt-file "${CI_JOB_JWT_FILE}" \
--kernel-image-name ${KERNEL_IMAGE_NAME} \
--kernel-image-type "${KERNEL_IMAGE_TYPE}" \
--boot-method ${BOOT_METHOD} \
diff --git a/lib/mesa/.gitlab-ci/lava/lava_job_submitter.py b/lib/mesa/.gitlab-ci/lava/lava_job_submitter.py
index bf2032c4f..0975a3a0d 100755
--- a/lib/mesa/.gitlab-ci/lava/lava_job_submitter.py
+++ b/lib/mesa/.gitlab-ci/lava/lava_job_submitter.py
@@ -25,31 +25,33 @@
"""Send a job to LAVA, track it and collect log back"""
import argparse
-import lavacli
-import os
+import pathlib
import sys
import time
import traceback
import urllib.parse
import xmlrpc
-import yaml
from datetime import datetime, timedelta
+from os import getenv
+
+import lavacli
+import yaml
from lavacli.utils import loader
-# Timeout in minutes to decide if the device from the dispatched LAVA job has
+# Timeout in seconds to decide if the device from the dispatched LAVA job has
# hung or not due to the lack of new log output.
-DEVICE_HANGING_TIMEOUT_MIN = 5
+DEVICE_HANGING_TIMEOUT_SEC = int(getenv("LAVA_DEVICE_HANGING_TIMEOUT_SEC", 5*60))
# How many seconds the script should wait before try a new polling iteration to
# check if the dispatched LAVA job is running or waiting in the job queue.
-WAIT_FOR_DEVICE_POLLING_TIME_SEC = 10
+WAIT_FOR_DEVICE_POLLING_TIME_SEC = int(getenv("LAVA_WAIT_FOR_DEVICE_POLLING_TIME_SEC", 10))
# How many seconds to wait between log output LAVA RPC calls.
-LOG_POLLING_TIME_SEC = 5
+LOG_POLLING_TIME_SEC = int(getenv("LAVA_LOG_POLLING_TIME_SEC", 5))
# How many retries should be made when a timeout happen.
-NUMBER_OF_RETRIES_TIMEOUT_DETECTION = 2
+NUMBER_OF_RETRIES_TIMEOUT_DETECTION = int(getenv("LAVA_NUMBER_OF_RETRIES_TIMEOUT_DETECTION", 2))
def print_log(msg):
@@ -59,6 +61,11 @@ def fatal_err(msg):
print_log(msg)
sys.exit(1)
+
+def hide_sensitive_data(yaml_data, hide_tag="HIDEME"):
+ return "".join(line for line in yaml_data.splitlines(True) if hide_tag not in line)
+
+
def generate_lava_yaml(args):
# General metadata and permissions, plus also inexplicably kernel arguments
values = {
@@ -67,7 +74,7 @@ def generate_lava_yaml(args):
'visibility': { 'group': [ args.visibility_group ] },
'priority': 75,
'context': {
- 'extra_nfsroot_args': ' init=/init rootwait minio_results={}'.format(args.job_artifacts_base)
+ 'extra_nfsroot_args': ' init=/init rootwait usbcore.quirks=0bda:8153:k'
},
'timeouts': {
'job': {
@@ -86,10 +93,10 @@ def generate_lava_yaml(args):
'to': 'tftp',
'os': 'oe',
'kernel': {
- 'url': '{}/{}'.format(args.base_system_url_prefix, args.kernel_image_name),
+ 'url': '{}/{}'.format(args.kernel_url_prefix, args.kernel_image_name),
},
'nfsrootfs': {
- 'url': '{}/lava-rootfs.tgz'.format(args.base_system_url_prefix),
+ 'url': '{}/lava-rootfs.tgz'.format(args.rootfs_url_prefix),
'compression': 'gz',
}
}
@@ -97,7 +104,7 @@ def generate_lava_yaml(args):
deploy['kernel']['type'] = args.kernel_image_type
if args.dtb:
deploy['dtb'] = {
- 'url': '{}/{}.dtb'.format(args.base_system_url_prefix, args.dtb)
+ 'url': '{}/{}.dtb'.format(args.kernel_url_prefix, args.dtb)
}
# always boot over NFS
@@ -140,15 +147,22 @@ def generate_lava_yaml(args):
# - fetch and unpack per-job environment from lava-submit.sh
# - exec .gitlab-ci/common/init-stage2.sh
init_lines = []
+
with open(args.first_stage_init, 'r') as init_sh:
init_lines += [ x.rstrip() for x in init_sh if not x.startswith('#') and x.rstrip() ]
+
+ with open(args.jwt_file) as jwt_file:
+ init_lines += [
+ "set +x",
+ f'echo -n "{jwt_file.read()}" > "{args.jwt_file}" # HIDEME',
+ "set -x",
+ ]
+
init_lines += [
'mkdir -p {}'.format(args.ci_project_dir),
- 'wget -S --progress=dot:giga -O- {} | tar -xz -C {}'.format(args.mesa_build_url, args.ci_project_dir),
+ 'wget -S --progress=dot:giga -O- {} | tar -xz -C {}'.format(args.build_url, args.ci_project_dir),
'wget -S --progress=dot:giga -O- {} | tar -xz -C /'.format(args.job_rootfs_overlay_url),
- 'set +x',
- 'export CI_JOB_JWT="{}"'.format(args.jwt),
- 'set -x',
+ f'echo "export CI_JOB_JWT_FILE={args.jwt_file}" >> /set-job-env-vars.sh',
'exec /init-stage2.sh',
]
test['definitions'][0]['repository']['run']['steps'] = init_lines
@@ -192,7 +206,6 @@ def _call_proxy(fn, *args):
fatal_err("A protocol error occurred (Err {} {})".format(err.errcode, err.errmsg))
else:
time.sleep(15)
- pass
except xmlrpc.client.Fault as err:
traceback.print_exc()
fatal_err("FATAL: Fault: {} (code: {})".format(err.faultString, err.faultCode))
@@ -203,8 +216,8 @@ def get_job_results(proxy, job_id, test_suite, test_case):
results_yaml = _call_proxy(proxy.results.get_testjob_results_yaml, job_id)
results = yaml.load(results_yaml, Loader=loader(False))
for res in results:
- metadata = res['metadata']
- if not 'result' in metadata or metadata['result'] != 'fail':
+ metadata = res["metadata"]
+ if "result" not in metadata or metadata["result"] != "fail":
continue
if 'error_type' in metadata and metadata['error_type'] == "Infrastructure":
print_log("LAVA job {} failed with Infrastructure Error. Retry.".format(job_id))
@@ -241,8 +254,7 @@ def follow_job_execution(proxy, job_id):
last_time_logs = datetime.now()
while not finished:
(finished, data) = _call_proxy(proxy.scheduler.jobs.logs, job_id, line_count)
- logs = yaml.load(str(data), Loader=loader(False))
- if logs:
+ if logs := yaml.load(str(data), Loader=loader(False)):
# Reset the timeout
last_time_logs = datetime.now()
for line in logs:
@@ -251,7 +263,7 @@ def follow_job_execution(proxy, job_id):
line_count += len(logs)
else:
- time_limit = timedelta(minutes=DEVICE_HANGING_TIMEOUT_MIN)
+ time_limit = timedelta(seconds=DEVICE_HANGING_TIMEOUT_SEC)
if datetime.now() - last_time_logs > time_limit:
print_log("LAVA job {} doesn't advance (machine got hung?). Retry.".format(job_id))
return False
@@ -279,23 +291,7 @@ def submit_job(proxy, job_file):
return _call_proxy(proxy.scheduler.jobs.submit, job_file)
-def main(args):
- proxy = setup_lava_proxy()
-
- yaml_file = generate_lava_yaml(args)
-
- if args.dump_yaml:
- censored_args = args
- censored_args.jwt = "jwt-hidden"
- print(generate_lava_yaml(censored_args))
-
- if args.validate_only:
- ret = validate_job(proxy, yaml_file)
- if not ret:
- fatal_err("Error in LAVA job definition")
- print("LAVA job definition validated successfully")
- return
-
+def retriable_follow_job(proxy, yaml_file):
retry_count = NUMBER_OF_RETRIES_TIMEOUT_DETECTION
while retry_count >= 0:
@@ -315,23 +311,45 @@ def main(args):
show_job_data(proxy, job_id)
- if get_job_results(proxy, job_id, "0_mesa", "mesa") == True:
- break
+ if get_job_results(proxy, job_id, "0_mesa", "mesa") == True:
+ break
+ else:
+ # The script attempted all the retries. The job seemed to fail.
+ return False
+ return True
-if __name__ == '__main__':
- # given that we proxy from DUT -> LAVA dispatcher -> LAVA primary -> us ->
- # GitLab runner -> GitLab primary -> user, safe to say we don't need any
- # more buffering
- sys.stdout.reconfigure(line_buffering=True)
- sys.stderr.reconfigure(line_buffering=True)
+
+def main(args):
+ proxy = setup_lava_proxy()
+
+ yaml_file = generate_lava_yaml(args)
+
+ if args.dump_yaml:
+ print(hide_sensitive_data(generate_lava_yaml(args)))
+
+ if args.validate_only:
+ ret = validate_job(proxy, yaml_file)
+ if not ret:
+ fatal_err("Error in LAVA job definition")
+ print("LAVA job definition validated successfully")
+ return
+
+ if not retriable_follow_job(proxy, yaml_file):
+ fatal_err(
+ "Job failed after it exceeded the number of"
+ f"{NUMBER_OF_RETRIES_TIMEOUT_DETECTION} retries."
+ )
+
+
+def create_parser():
parser = argparse.ArgumentParser("LAVA job submitter")
parser.add_argument("--pipeline-info")
- parser.add_argument("--base-system-url-prefix")
- parser.add_argument("--mesa-build-url")
+ parser.add_argument("--rootfs-url-prefix")
+ parser.add_argument("--kernel-url-prefix")
+ parser.add_argument("--build-url")
parser.add_argument("--job-rootfs-overlay-url")
- parser.add_argument("--job-artifacts-base")
parser.add_argument("--job-timeout", type=int)
parser.add_argument("--first-stage-init")
parser.add_argument("--ci-project-dir")
@@ -341,11 +359,22 @@ if __name__ == '__main__':
parser.add_argument("--kernel-image-type", nargs='?', default="")
parser.add_argument("--boot-method")
parser.add_argument("--lava-tags", nargs='?', default="")
- parser.add_argument("--jwt")
+ parser.add_argument("--jwt-file", type=pathlib.Path)
parser.add_argument("--validate-only", action='store_true')
parser.add_argument("--dump-yaml", action='store_true')
parser.add_argument("--visibility-group")
+ return parser
+
+if __name__ == "__main__":
+ # given that we proxy from DUT -> LAVA dispatcher -> LAVA primary -> us ->
+ # GitLab runner -> GitLab primary -> user, safe to say we don't need any
+ # more buffering
+ sys.stdout.reconfigure(line_buffering=True)
+ sys.stderr.reconfigure(line_buffering=True)
+
+ parser = create_parser()
+
parser.set_defaults(func=main)
args = parser.parse_args()
args.func(args)
diff --git a/lib/mesa/.gitlab-ci/meson/build.sh b/lib/mesa/.gitlab-ci/meson/build.sh
index 56391e51c..d052397cf 100755
--- a/lib/mesa/.gitlab-ci/meson/build.sh
+++ b/lib/mesa/.gitlab-ci/meson/build.sh
@@ -68,7 +68,6 @@ meson _build --native-file=native.file \
-D cpp_args="$(echo -n $CPP_ARGS)" \
-D libunwind=${UNWIND} \
${DRI_LOADERS} \
- -D dri-drivers=${DRI_DRIVERS:-[]} \
${GALLIUM_ST} \
-D gallium-drivers=${GALLIUM_DRIVERS:-[]} \
-D vulkan-drivers=${VULKAN_DRIVERS:-[]} \
diff --git a/lib/mesa/.gitlab-ci/piglit/piglit-runner.sh b/lib/mesa/.gitlab-ci/piglit/piglit-runner.sh
index d228f6cea..ad5445c64 100755
--- a/lib/mesa/.gitlab-ci/piglit/piglit-runner.sh
+++ b/lib/mesa/.gitlab-ci/piglit/piglit-runner.sh
@@ -17,6 +17,31 @@ export VK_ICD_FILENAMES=`pwd`/install/share/vulkan/icd.d/"$VK_DRIVER"_icd.${VK_C
RESULTS=`pwd`/${PIGLIT_RESULTS_DIR:-results}
mkdir -p $RESULTS
+# Ensure Mesa Shader Cache resides on tmpfs.
+SHADER_CACHE_HOME=${XDG_CACHE_HOME:-${HOME}/.cache}
+SHADER_CACHE_DIR=${MESA_SHADER_CACHE_DIR:-${SHADER_CACHE_HOME}/mesa_shader_cache}
+
+findmnt -n tmpfs ${SHADER_CACHE_HOME} || findmnt -n tmpfs ${SHADER_CACHE_DIR} || {
+ mkdir -p ${SHADER_CACHE_DIR}
+ mount -t tmpfs -o nosuid,nodev,size=2G,mode=1755 tmpfs ${SHADER_CACHE_DIR}
+}
+
+if [ "$GALLIUM_DRIVER" = "virpipe" ]; then
+ # deqp is to use virpipe, and virgl_test_server llvmpipe
+ export GALLIUM_DRIVER="$GALLIUM_DRIVER"
+
+ VTEST_ARGS="--use-egl-surfaceless"
+ if [ "$VIRGL_HOST_API" = "GLES" ]; then
+ VTEST_ARGS="$VTEST_ARGS --use-gles"
+ fi
+
+ GALLIUM_DRIVER=llvmpipe \
+ GALLIVM_PERF="nopt" \
+ virgl_test_server $VTEST_ARGS >$RESULTS/vtest-log.txt 2>&1 &
+
+ sleep 1
+fi
+
if [ -n "$PIGLIT_FRACTION" -o -n "$CI_NODE_INDEX" ]; then
FRACTION=`expr ${PIGLIT_FRACTION:-1} \* ${CI_NODE_TOTAL:-1}`
PIGLIT_RUNNER_OPTIONS="$PIGLIT_RUNNER_OPTIONS --fraction $FRACTION"
diff --git a/lib/mesa/.gitlab-ci/piglit/piglit-traces.sh b/lib/mesa/.gitlab-ci/piglit/piglit-traces.sh
new file mode 100755
index 000000000..8a913b37b
--- /dev/null
+++ b/lib/mesa/.gitlab-ci/piglit/piglit-traces.sh
@@ -0,0 +1,241 @@
+#!/bin/sh
+
+set -ex
+
+INSTALL=$(realpath -s "$PWD"/install)
+MINIO_ARGS="--credentials=/tmp/.minio_credentials"
+
+RESULTS=$(realpath -s "$PWD"/results)
+mkdir -p "$RESULTS"
+
+# Set up the driver environment.
+# Modifiying here directly LD_LIBRARY_PATH may cause problems when
+# using a command wrapper. Hence, we will just set it when running the
+# command.
+export __LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$INSTALL/lib/"
+
+# Sanity check to ensure that our environment is sufficient to make our tests
+# run against the Mesa built by CI, rather than any installed distro version.
+MESA_VERSION=$(head -1 "$INSTALL/VERSION" | sed 's/\./\\./g')
+
+print_red() {
+ RED='\033[0;31m'
+ NC='\033[0m' # No Color
+ printf "${RED}"
+ "$@"
+ printf "${NC}"
+}
+
+# wrapper to supress +x to avoid spamming the log
+quiet() {
+ set +x
+ "$@"
+ set -x
+}
+
+if [ "$VK_DRIVER" ]; then
+
+ ### VULKAN ###
+
+ # Set the Vulkan driver to use.
+ export VK_ICD_FILENAMES="$INSTALL/share/vulkan/icd.d/${VK_DRIVER}_icd.x86_64.json"
+
+ # Set environment for Wine.
+ export WINEDEBUG="-all"
+ export WINEPREFIX="/dxvk-wine64"
+ export WINEESYNC=1
+
+ # Set environment for DXVK.
+ export DXVK_LOG_LEVEL="none"
+ export DXVK_STATE_CACHE=0
+
+ # Set environment for gfxreconstruct executables.
+ export PATH="/gfxreconstruct/build/bin:$PATH"
+
+ SANITY_MESA_VERSION_CMD="vulkaninfo"
+
+ HANG_DETECTION_CMD="/parallel-deqp-runner/build/bin/hang-detection"
+
+
+ # Set up the Window System Interface (WSI)
+
+ if [ ${TEST_START_XORG:-0} -eq 1 ]; then
+ "$INSTALL"/common/start-x.sh "$INSTALL"
+ export DISPLAY=:0
+ else
+ # Run vulkan against the host's running X server (xvfb doesn't
+ # have DRI3 support).
+ # Set the DISPLAY env variable in each gitlab-runner's
+ # configuration file:
+ # https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-runners-section
+ quiet printf "%s%s\n" "Running against the hosts' X server. " \
+ "DISPLAY is \"$DISPLAY\"."
+ fi
+else
+
+ ### GL/ES ###
+
+ # Set environment for apitrace executable.
+ export PATH="/apitrace/build:$PATH"
+
+ # Our rootfs may not have "less", which apitrace uses during
+ # apitrace dump
+ export PAGER=cat
+
+ SANITY_MESA_VERSION_CMD="wflinfo"
+
+ HANG_DETECTION_CMD=""
+
+
+ # Set up the platform windowing system.
+
+ if [ "x$EGL_PLATFORM" = "xsurfaceless" ]; then
+
+ # Use the surfaceless EGL platform.
+ export DISPLAY=
+ export WAFFLE_PLATFORM="surfaceless_egl"
+
+ SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform surfaceless_egl --api gles2"
+
+ if [ "x$GALLIUM_DRIVER" = "xvirpipe" ]; then
+ # piglit is to use virpipe, and virgl_test_server llvmpipe
+ export GALLIUM_DRIVER="$GALLIUM_DRIVER"
+
+ LD_LIBRARY_PATH="$__LD_LIBRARY_PATH" \
+ GALLIUM_DRIVER=llvmpipe \
+ VTEST_USE_EGL_SURFACELESS=1 \
+ VTEST_USE_GLES=1 \
+ virgl_test_server >"$RESULTS"/vtest-log.txt 2>&1 &
+
+ sleep 1
+ fi
+ elif [ "x$PIGLIT_PLATFORM" = "xgbm" ]; then
+ SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform gbm --api gl"
+ elif [ "x$PIGLIT_PLATFORM" = "xmixed_glx_egl" ]; then
+ # It is assumed that you have already brought up your X server before
+ # calling this script.
+ SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform glx --api gl"
+ else
+ SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform glx --api gl --profile core"
+ RUN_CMD_WRAPPER="xvfb-run --server-args=\"-noreset\" sh -c"
+ fi
+fi
+
+if [ "$ZINK_USE_LAVAPIPE" ]; then
+ export VK_ICD_FILENAMES="$INSTALL/share/vulkan/icd.d/lvp_icd.x86_64.json"
+fi
+
+# If the job is parallel at the gitlab job level, will take the corresponding
+# fraction of the caselist.
+if [ -n "$CI_NODE_INDEX" ]; then
+ USE_CASELIST=1
+fi
+
+replay_minio_upload_images() {
+ find "$RESULTS/$__PREFIX" -type f -name "*.png" -printf "%P\n" \
+ | while read -r line; do
+
+ __TRACE="${line%-*-*}"
+ if grep -q "^$__PREFIX/$__TRACE: pass$" ".gitlab-ci/piglit/$PIGLIT_RESULTS.txt.orig"; then
+ if [ "x$CI_PROJECT_PATH" != "x$FDO_UPSTREAM_REPO" ]; then
+ continue
+ fi
+ __MINIO_PATH="$PIGLIT_REPLAY_REFERENCE_IMAGES_BASE"
+ __DESTINATION_FILE_PATH="${line##*-}"
+ if wget -q --method=HEAD "https://${__MINIO_PATH}/${__DESTINATION_FILE_PATH}" 2>/dev/null; then
+ continue
+ fi
+ else
+ __MINIO_PATH="$JOB_ARTIFACTS_BASE"
+ __DESTINATION_FILE_PATH="$__MINIO_TRACES_PREFIX/${line##*-}"
+ fi
+
+ ci-fairy minio cp $MINIO_ARGS "$RESULTS/$__PREFIX/$line" \
+ "minio://${__MINIO_PATH}/${__DESTINATION_FILE_PATH}"
+ done
+}
+
+SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD | tee /tmp/version.txt | grep \"Mesa $MESA_VERSION\(\s\|$\)\""
+
+if [ -d results ]; then
+ cd results && rm -rf ..?* .[!.]* *
+fi
+cd /piglit
+
+if [ -n "$USE_CASELIST" ]; then
+ PIGLIT_TESTS=$(printf "%s" "$PIGLIT_TESTS")
+ PIGLIT_GENTESTS="./piglit print-cmd $PIGLIT_TESTS replay --format \"{name}\" > /tmp/case-list.txt"
+ RUN_GENTESTS="export LD_LIBRARY_PATH=$__LD_LIBRARY_PATH; $PIGLIT_GENTESTS"
+
+ eval $RUN_GENTESTS
+
+ sed -ni $CI_NODE_INDEX~$CI_NODE_TOTAL"p" /tmp/case-list.txt
+
+ PIGLIT_TESTS="--test-list /tmp/case-list.txt"
+fi
+
+PIGLIT_OPTIONS=$(printf "%s" "$PIGLIT_OPTIONS")
+
+PIGLIT_TESTS=$(printf "%s" "$PIGLIT_TESTS")
+
+PIGLIT_CMD="./piglit run --timeout 300 -j${FDO_CI_CONCURRENT:-4} $PIGLIT_OPTIONS $PIGLIT_TESTS replay "$(/usr/bin/printf "%q" "$RESULTS")
+
+RUN_CMD="export LD_LIBRARY_PATH=$__LD_LIBRARY_PATH; $SANITY_MESA_VERSION_CMD && $HANG_DETECTION_CMD $PIGLIT_CMD"
+
+if [ "$RUN_CMD_WRAPPER" ]; then
+ RUN_CMD="set +e; $RUN_CMD_WRAPPER "$(/usr/bin/printf "%q" "$RUN_CMD")"; set -e"
+fi
+
+ci-fairy minio login $MINIO_ARGS --token-file "${CI_JOB_JWT_FILE}"
+
+# The replayer doesn't do any size or checksum verification for the traces in
+# the replayer db, so if we had to restart the system due to intermittent device
+# errors (or tried to cache replayer-db between runs, which would be nice to
+# have), you could get a corrupted local trace that would spuriously fail the
+# run.
+rm -rf replayer-db
+
+eval $RUN_CMD
+
+if [ $? -ne 0 ]; then
+ printf "%s\n" "Found $(cat /tmp/version.txt), expected $MESA_VERSION"
+fi
+
+ARTIFACTS_BASE_URL="https://${CI_PROJECT_ROOT_NAMESPACE}.${CI_PAGES_DOMAIN}/-/${CI_PROJECT_NAME}/-/jobs/${CI_JOB_ID}/artifacts"
+
+./piglit summary aggregate "$RESULTS" -o junit.xml
+
+PIGLIT_RESULTS="${PIGLIT_RESULTS:-replay}"
+RESULTSFILE="$RESULTS/$PIGLIT_RESULTS.txt"
+mkdir -p .gitlab-ci/piglit
+./piglit summary console "$RESULTS"/results.json.bz2 \
+ | tee ".gitlab-ci/piglit/$PIGLIT_RESULTS.txt.orig" \
+ | head -n -1 | grep -v ": pass" \
+ | sed '/^summary:/Q' \
+ > $RESULTSFILE
+
+__PREFIX="trace/$PIGLIT_REPLAY_DEVICE_NAME"
+__MINIO_PATH="$PIGLIT_REPLAY_ARTIFACTS_BASE_URL"
+__MINIO_TRACES_PREFIX="traces"
+
+if [ "x$PIGLIT_REPLAY_SUBCOMMAND" != "xprofile" ]; then
+ quiet replay_minio_upload_images
+fi
+
+
+if [ ! -s $RESULTSFILE ]; then
+ exit 0
+fi
+
+./piglit summary html --exclude-details=pass \
+"$RESULTS"/summary "$RESULTS"/results.json.bz2
+
+find "$RESULTS"/summary -type f -name "*.html" -print0 \
+ | xargs -0 sed -i 's%<img src="file://'"${RESULTS}"'.*-\([0-9a-f]*\)\.png%<img src="https://'"${JOB_ARTIFACTS_BASE}"'/traces/\1.png%g'
+find "$RESULTS"/summary -type f -name "*.html" -print0 \
+ | xargs -0 sed -i 's%<img src="file://%<img src="https://'"${PIGLIT_REPLAY_REFERENCE_IMAGES_BASE}"'/%g'
+
+quiet print_red echo "Failures in traces:"
+cat $RESULTSFILE
+quiet print_red echo "Review the image changes and get the new checksums at: ${ARTIFACTS_BASE_URL}/results/summary/problems.html"
+exit 1
diff --git a/lib/mesa/.gitlab-ci/prepare-artifacts.sh b/lib/mesa/.gitlab-ci/prepare-artifacts.sh
index cbbe0a318..bb96de36a 100755
--- a/lib/mesa/.gitlab-ci/prepare-artifacts.sh
+++ b/lib/mesa/.gitlab-ci/prepare-artifacts.sh
@@ -47,11 +47,12 @@ mkdir -p artifacts/
tar -cf artifacts/install.tar install
cp -Rp .gitlab-ci/common artifacts/ci-common
cp -Rp .gitlab-ci/lava artifacts/
+cp -Rp .gitlab-ci/valve artifacts/
if [ -n "$MINIO_ARTIFACT_NAME" ]; then
# Pass needed files to the test stage
MINIO_ARTIFACT_NAME="$MINIO_ARTIFACT_NAME.tar.gz"
gzip -c artifacts/install.tar > ${MINIO_ARTIFACT_NAME}
- ci-fairy minio login $CI_JOB_JWT
+ ci-fairy minio login --token-file "${CI_JOB_JWT_FILE}"
ci-fairy minio cp ${MINIO_ARTIFACT_NAME} minio://${PIPELINE_ARTIFACTS_BASE}/${MINIO_ARTIFACT_NAME}
fi
diff --git a/lib/mesa/.gitlab-ci/skqp-runner.sh b/lib/mesa/.gitlab-ci/skqp-runner.sh
new file mode 100755
index 000000000..68aca2d33
--- /dev/null
+++ b/lib/mesa/.gitlab-ci/skqp-runner.sh
@@ -0,0 +1,153 @@
+#!/bin/sh
+#
+# Copyright (C) 2022 Collabora Limited
+# Author: Guilherme Gallo <guilherme.gallo@collabora.com>
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+
+copy_tests_files() (
+ # Copy either unit test or render test files from a specific driver given by
+ # GPU VERSION variable.
+ # If there is no test file at the expected location, this function will
+ # return error_code 1
+ SKQP_BACKEND="${1}"
+ SKQP_FILE_PREFIX="${INSTALL}/${GPU_VERSION}-skqp"
+
+ if echo "${SKQP_BACKEND}" | grep -qE 'vk|gl(es)?'
+ then
+ SKQP_RENDER_TESTS_FILE="${SKQP_FILE_PREFIX}-${SKQP_BACKEND}_rendertests.txt"
+ [ -f "${SKQP_RENDER_TESTS_FILE}" ] || return 1
+ cp "${SKQP_RENDER_TESTS_FILE}" "${SKQP_ASSETS_DIR}"/skqp/rendertests.txt
+ return 0
+ fi
+
+ # The unittests.txt path is hardcoded inside assets directory,
+ # that is why it needs to be a special case.
+ if echo "${SKQP_BACKEND}" | grep -qE "unitTest"
+ then
+ SKQP_UNIT_TESTS_FILE="${SKQP_FILE_PREFIX}_unittests.txt"
+ [ -f "${SKQP_UNIT_TESTS_FILE}" ] || return 1
+ cp "${SKQP_UNIT_TESTS_FILE}" "${SKQP_ASSETS_DIR}"/skqp/unittests.txt
+ fi
+)
+
+test_vk_backend() {
+ if echo "${SKQP_BACKENDS}" | grep -qE 'vk'
+ then
+ if [ -n "$VK_DRIVER" ]; then
+ return 0
+ fi
+
+ echo "VK_DRIVER environment variable is missing."
+ VK_DRIVERS=$(ls "$INSTALL"/share/vulkan/icd.d/ | cut -f 1 -d '_')
+ if [ -n "${VK_DRIVERS}" ]
+ then
+ echo "Please set VK_DRIVER to the correct driver from the list:"
+ echo "${VK_DRIVERS}"
+ fi
+ echo "No Vulkan tests will be executed, but it was requested in SKQP_BACKENDS variable. Exiting."
+ exit 2
+ fi
+
+ # Vulkan environment is not configured, but it was not requested by the job
+ return 1
+}
+
+setup_backends() {
+ if test_vk_backend
+ then
+ export VK_ICD_FILENAMES="$INSTALL"/share/vulkan/icd.d/"$VK_DRIVER"_icd."${VK_CPU:-$(uname -m)}".json
+ fi
+}
+
+set -ex
+
+# Needed so configuration files can contain paths to files in /install
+ln -sf "$CI_PROJECT_DIR"/install /install
+INSTALL=${PWD}/install
+
+if [ -z "$GPU_VERSION" ]; then
+ echo 'GPU_VERSION must be set to something like "llvmpipe" or
+"freedreno-a630" (it will serve as a component to find the path for files
+residing in src/**/ci/*.txt)'
+ exit 1
+fi
+
+LD_LIBRARY_PATH=$INSTALL:$LD_LIBRARY_PATH
+setup_backends
+
+SKQP_ASSETS_DIR=/skqp/assets
+SKQP_RESULTS_DIR="${SKQP_RESULTS_DIR:-$PWD/results}"
+
+mkdir -p "${SKQP_ASSETS_DIR}"/skqp
+
+SKQP_EXITCODE=0
+for SKQP_BACKEND in ${SKQP_BACKENDS}
+do
+ set -e
+ if ! copy_tests_files "${SKQP_BACKEND}"
+ then
+ echo "No override test file found for ${SKQP_BACKEND}. Using the default one."
+ fi
+
+ set +e
+ SKQP_BACKEND_RESULTS_DIR="${SKQP_RESULTS_DIR}"/"${SKQP_BACKEND}"
+ mkdir -p "${SKQP_BACKEND_RESULTS_DIR}"
+ /skqp/skqp "${SKQP_ASSETS_DIR}" "${SKQP_BACKEND_RESULTS_DIR}" "${SKQP_BACKEND}_"
+ BACKEND_EXITCODE=$?
+
+ if [ ! $BACKEND_EXITCODE -eq 0 ]
+ then
+ echo "skqp failed on ${SKQP_BACKEND} tests with ${BACKEND_EXITCODE} exit code."
+ fi
+
+ # Propagate error codes to leverage the final job result
+ SKQP_EXITCODE=$(( SKQP_EXITCODE | BACKEND_EXITCODE ))
+done
+
+set +x
+
+# Unit tests produce empty HTML reports, guide the user to check the TXT file.
+if echo "${SKQP_BACKENDS}" | grep -qE "unitTest"
+then
+ # Remove the empty HTML report to avoid confusion
+ rm -f "${SKQP_RESULTS_DIR}"/unitTest/report.html
+
+ echo "See skqp unit test results at:"
+ echo "https://$CI_PROJECT_ROOT_NAMESPACE.pages.freedesktop.org/-/$CI_PROJECT_NAME/-/jobs/$CI_JOB_ID/artifacts/${SKQP_RESULTS_DIR}/unitTest/unit_tests.txt"
+fi
+
+REPORT_FILES=$(mktemp)
+find "${SKQP_RESULTS_DIR}"/**/report.html -type f > "${REPORT_FILES}"
+while read -r REPORT
+do
+ BACKEND_NAME=$(echo "${REPORT}" | sed 's@.*/\([^/]*\)/report.html@\1@')
+ echo "See skqp ${BACKEND_NAME} render tests report at:"
+ echo "https://$CI_PROJECT_ROOT_NAMESPACE.pages.freedesktop.org/-/$CI_PROJECT_NAME/-/jobs/$CI_JOB_ID/artifacts/${REPORT}"
+done < "${REPORT_FILES}"
+
+# If there is no report available, tell the user that something is wrong.
+if [ ! -s "${REPORT_FILES}" ]
+then
+ echo "No skqp report available. Probably some fatal error has occured during the skqp execution."
+fi
+
+exit $SKQP_EXITCODE
diff --git a/lib/mesa/.gitlab-ci/test-source-dep.yml b/lib/mesa/.gitlab-ci/test-source-dep.yml
index d654b928b..e19bb216e 100644
--- a/lib/mesa/.gitlab-ci/test-source-dep.yml
+++ b/lib/mesa/.gitlab-ci/test-source-dep.yml
@@ -18,6 +18,7 @@
- .gitlab-ci/**/*
- include/**/*
- meson.build
+ - .gitattributes
- src/*
- src/compiler/**/*
- src/drm-shim/**/*
@@ -30,10 +31,6 @@
- src/loader/**/*
- src/mapi/**/*
- src/mesa/*
- - src/mesa/drivers/*
- - src/mesa/drivers/common/**/*
- - src/mesa/drivers/dri/*
- - src/mesa/drivers/dri/common/**/*
- src/mesa/main/**/*
- src/mesa/math/**/*
- src/mesa/program/**/*
@@ -41,11 +38,10 @@
- src/mesa/state_tracker/**/*
- src/mesa/swrast/**/*
- src/mesa/swrast_setup/**/*
- - src/mesa/tnl/**/*
- - src/mesa/tnl_dd/**/*
- src/mesa/vbo/**/*
- src/mesa/x86/**/*
- src/mesa/x86-64/**/*
+ - src/tool/**/*
- src/util/**/*
.vulkan-rules:
@@ -132,6 +128,7 @@
- .gitlab-ci.yml
- .gitlab-ci/**/*
- meson.build
+ - .gitattributes
- include/**/*
- src/compiler/**/*
- src/include/**/*
@@ -153,6 +150,8 @@
rules:
- if: '$FD_FARM == "offline"'
when: never
+ - if: '$COLLABORA_FARM == "offline" && $RUNNER_TAG =~ /^mesa-ci-x86-64-lava-/'
+ when: never
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
@@ -180,9 +179,11 @@
rules:
- if: '$FD_FARM == "offline"'
when: never
+ - if: '$COLLABORA_FARM == "offline" && $RUNNER_TAG =~ /^mesa-ci-x86-64-lava-/'
+ when: never
# If the triggerer has access to the restricted traces and if it is pre-merge
- if: '($GITLAB_USER_LOGIN !~ "/^(robclark|anholt|flto|cwabbott0|Danil|tomeu)$/") &&
- ($GITLAB_USER_LOGIN != "marge-bot" || $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME != $CI_COMMIT_REF_NAME)'
+ ($GITLAB_USER_LOGIN != "marge-bot" || $CI_COMMIT_BRANCH)'
when: never
- *ignore_scheduled_pipelines
- changes:
@@ -206,9 +207,11 @@
rules:
- if: '$FD_FARM == "offline"'
when: never
+ - if: '$COLLABORA_FARM == "offline" && $RUNNER_TAG =~ /^mesa-ci-x86-64-lava-/'
+ when: never
- *ignore_scheduled_pipelines
# Run only on pre-merge pipelines from Marge
- - if: '$GITLAB_USER_LOGIN != "marge-bot" || $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME != $CI_COMMIT_REF_NAME'
+ - if: '$GITLAB_USER_LOGIN != "marge-bot" || $CI_COMMIT_BRANCH'
when: never
- changes:
*mesa_core_file_list
@@ -224,10 +227,30 @@
when: manual
- when: never
+.nouveau-rules:
+ stage: nouveau
+ rules:
+ - *ignore_scheduled_pipelines
+ - changes:
+ *mesa_core_file_list
+ when: on_success
+ - changes:
+ *gallium_core_file_list
+ when: on_success
+ - changes:
+ - src/nouveau/**/*
+ - src/gallium/drivers/nouveau/**/*
+ - src/gallium/winsys/kmsro/**/*
+ - src/gallium/winsys/nouveau/**/*
+ when: on_success
+ - when: never
+
.panfrost-midgard-rules:
stage: arm
rules:
- *ignore_scheduled_pipelines
+ - if: '$COLLABORA_FARM == "offline" && $RUNNER_TAG =~ /^mesa-ci-x86-64-lava-/'
+ when: never
- changes:
*mesa_core_file_list
when: on_success
@@ -254,6 +277,8 @@
stage: arm
rules:
- *ignore_scheduled_pipelines
+ - if: '$COLLABORA_FARM == "offline" && $RUNNER_TAG =~ /^mesa-ci-x86-64-lava-/'
+ when: never
- changes:
*mesa_core_file_list
when: on_success
@@ -349,6 +374,8 @@
stage: amd
rules:
- *ignore_scheduled_pipelines
+ - if: '$COLLABORA_FARM == "offline" && $RUNNER_TAG =~ /^mesa-ci-x86-64-lava-/'
+ when: never
- changes:
*mesa_core_file_list
when: on_success
@@ -377,10 +404,37 @@
when: on_success
- when: never
+# Unfortunately YAML doesn't let us concatenate arrays, so we have to do the
+# rules duplication manually
+.virgl-lava-rules-performance:
+ stage: layered-backends
+ rules:
+ - *ignore_scheduled_pipelines
+ - if: '$COLLABORA_FARM == "offline" && $RUNNER_TAG =~ /^mesa-ci-x86-64-lava-/'
+ when: never
+ # Run only on pre-merge pipelines from Marge
+ - if: '$GITLAB_USER_LOGIN != "marge-bot" || $CI_COMMIT_BRANCH'
+ when: never
+ - changes:
+ *mesa_core_file_list
+ when: manual
+ - changes:
+ *gallium_core_file_list
+ when: manual
+ - changes:
+ *llvmpipe_file_list
+ when: manual
+ - changes:
+ *virgl_file_list
+ when: manual
+ - when: never
+
.radeonsi-rules:
stage: amd
rules:
- *ignore_scheduled_pipelines
+ - if: '$COLLABORA_FARM == "offline" && $RUNNER_TAG =~ /^mesa-ci-x86-64-lava-/'
+ when: never
- changes:
*mesa_core_file_list
when: on_success
@@ -402,6 +456,8 @@
stage: amd
rules:
- *ignore_scheduled_pipelines
+ - if: '$COLLABORA_FARM == "offline" && $RUNNER_TAG =~ /^mesa-ci-x86-64-lava-/'
+ when: never
- changes:
*mesa_core_file_list
when: on_success
@@ -434,10 +490,29 @@
when: on_success
- when: never
+.crocus-rules:
+ stage: intel
+ rules:
+ - *ignore_scheduled_pipelines
+ - changes:
+ *mesa_core_file_list
+ when: on_success
+ - changes:
+ *gallium_core_file_list
+ when: on_success
+ - changes:
+ - src/gallium/drivers/crocus/**/*
+ - src/gallium/winsys/crocus/**/*
+ - src/intel/**/*
+ when: on_success
+ - when: never
+
.iris-rules:
stage: intel
rules:
- *ignore_scheduled_pipelines
+ - if: '$COLLABORA_FARM == "offline" && $RUNNER_TAG =~ /^mesa-ci-x86-64-lava-/'
+ when: never
- changes:
*mesa_core_file_list
when: on_success
@@ -457,8 +532,10 @@
stage: intel
rules:
- *ignore_scheduled_pipelines
+ - if: '$COLLABORA_FARM == "offline" && $RUNNER_TAG =~ /^mesa-ci-x86-64-lava-/'
+ when: never
# Run only on pre-merge pipelines from Marge
- - if: '$GITLAB_USER_LOGIN != "marge-bot" || $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME != $CI_COMMIT_REF_NAME'
+ - if: '$GITLAB_USER_LOGIN != "marge-bot" || $CI_COMMIT_BRANCH'
when: never
- changes:
*mesa_core_file_list
@@ -475,6 +552,8 @@
stage: intel
rules:
- *ignore_scheduled_pipelines
+ - if: '$COLLABORA_FARM == "offline" && $RUNNER_TAG =~ /^mesa-ci-x86-64-lava-/'
+ when: never
- changes:
*mesa_core_file_list
when: on_success
@@ -508,6 +587,8 @@
# rules duplication manually
.windows-build-rules:
rules:
+ - if: '$MICROSOFT_FARM == "offline"'
+ when: never
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
@@ -516,6 +597,9 @@
*gallium_core_file_list
when: on_success
- changes:
+ *softpipe_file_list
+ when: on_success
+ - changes:
*lavapipe_file_list
when: on_success
- changes:
@@ -539,6 +623,8 @@
.windows-test-rules:
rules:
+ - if: '$MICROSOFT_FARM == "offline"'
+ when: never
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
diff --git a/lib/mesa/.gitlab-ci/test/gitlab-ci.yml b/lib/mesa/.gitlab-ci/test/gitlab-ci.yml
new file mode 100644
index 000000000..888d73863
--- /dev/null
+++ b/lib/mesa/.gitlab-ci/test/gitlab-ci.yml
@@ -0,0 +1,314 @@
+.test:
+ extends:
+ - .ci-run-policy
+ # Cancel job if a newer commit is pushed to the same branch
+ interruptible: true
+ variables:
+ GIT_STRATEGY: none # testing doesn't build anything from source
+ before_script:
+ - !reference [default, before_script]
+ # Note: Build dir (and thus install) may be dirty due to GIT_STRATEGY
+ - rm -rf install
+ - tar -xf artifacts/install.tar
+ - echo -e "\e[0Ksection_start:$(date +%s):ldd_section[collapsed=true]\r\e[0KChecking ldd on driver build"
+ - LD_LIBRARY_PATH=install/lib find install/lib -name "*.so" -print -exec ldd {} \;
+ - echo -e "\e[0Ksection_end:$(date +%s):ldd_section\r\e[0K"
+ artifacts:
+ when: always
+ name: "mesa_${CI_JOB_NAME}"
+ paths:
+ - results/
+
+.test-gl:
+ extends:
+ - .test
+ - .use-debian/x86_test-gl
+ needs:
+ - debian/x86_test-gl
+ - debian-testing
+
+.test-vk:
+ extends:
+ - .test
+ - .use-debian/x86_test-vk
+ needs:
+ - debian-testing
+ - debian/x86_test-vk
+
+.test-cl:
+ extends:
+ - .test
+ - .use-debian/x86_test-gl
+ needs:
+ - debian/x86_test-gl
+ - debian-clover-testing
+
+.vkd3d-proton-test:
+ artifacts:
+ when: on_failure
+ name: "mesa_${CI_JOB_NAME}"
+ paths:
+ - results/vkd3d-proton.log
+ script:
+ - ./install/vkd3d-proton/run.sh
+
+.piglit-test:
+ artifacts:
+ name: "mesa_${CI_JOB_NAME}"
+ paths:
+ - results
+ reports:
+ junit: results/junit.xml
+ variables:
+ PIGLIT_NO_WINDOW: 1
+ HWCI_TEST_SCRIPT: "/install/piglit/piglit-runner.sh"
+ script:
+ - install/piglit/piglit-runner.sh
+
+.piglit-traces-test:
+ extends:
+ - .piglit-test
+ cache:
+ key: ${CI_JOB_NAME}
+ paths:
+ - replayer-db/
+ artifacts:
+ when: on_failure
+ name: "mesa_${CI_JOB_NAME}"
+ reports:
+ junit: results/junit.xml
+ paths:
+ - results/summary/
+ - results/*.txt
+ variables:
+ PIGLIT_REPLAY_EXTRA_ARGS: --keep-image --db-path ${CI_PROJECT_DIR}/replayer-db/ --minio_host=minio-packet.freedesktop.org --minio_bucket=mesa-tracie-public --role-session-name=${CI_PROJECT_PATH}:${CI_JOB_ID} --jwt-file=${CI_JOB_JWT_FILE}
+ script:
+ - install/piglit/piglit-traces.sh
+
+.deqp-test:
+ script:
+ - ./install/deqp-runner.sh
+ artifacts:
+ exclude:
+ - results/*.shader_cache
+ reports:
+ junit: results/junit.xml
+
+.deqp-test-vk:
+ extends:
+ - .deqp-test
+ variables:
+ DEQP_VER: vk
+
+.fossilize-test:
+ script:
+ - ./install/fossilize-runner.sh
+ artifacts:
+ when: on_failure
+ name: "mesa_${CI_JOB_NAME}"
+ paths:
+ - results/
+
+.baremetal-test:
+ extends:
+ - .ci-run-policy
+ - .test
+ # Cancel job if a newer commit is pushed to the same branch
+ interruptible: true
+ stage: test
+ before_script:
+ - !reference [default, before_script]
+ # Use this instead of gitlab's artifacts download because it hits packet.net
+ # instead of fd.o. Set FDO_HTTP_CACHE_URI to an http cache for your test lab to
+ # improve it even more (see https://docs.mesa3d.org/ci/bare-metal.html for
+ # setup).
+ - wget ${FDO_HTTP_CACHE_URI:-}https://${PIPELINE_ARTIFACTS_BASE}/${MINIO_ARTIFACT_NAME}.tar.gz -S --progress=dot:giga -O- | tar -xz
+ artifacts:
+ when: always
+ name: "mesa_${CI_JOB_NAME}"
+ paths:
+ - results/
+ - serial*.txt
+ exclude:
+ - results/*.shader_cache
+ reports:
+ junit: results/junit.xml
+
+.baremetal-test-armhf:
+ extends:
+ - .baremetal-test
+ variables:
+ BM_ROOTFS: /rootfs-armhf
+ MINIO_ARTIFACT_NAME: mesa-armhf
+
+.baremetal-test-arm64:
+ extends:
+ - .baremetal-test
+ variables:
+ BM_ROOTFS: /rootfs-arm64
+ MINIO_ARTIFACT_NAME: mesa-arm64
+
+.baremetal-arm64-asan-test:
+ variables:
+ DEQP_RUNNER_OPTIONS: "--env LD_PRELOAD=libasan.so.6:/install/lib/libdlclose-skip.so"
+ MINIO_ARTIFACT_NAME: mesa-arm64-asan
+ needs:
+ - debian/arm_test
+ - job: debian-arm64-asan
+ artifacts: false
+
+.baremetal-deqp-test:
+ variables:
+ HWCI_TEST_SCRIPT: "/install/deqp-runner.sh"
+ FDO_CI_CONCURRENT: 0 # Default to number of CPUs
+
+.baremetal-skqp-test:
+ variables:
+ HWCI_START_XORG: 1
+ HWCI_TEST_SCRIPT: "/install/skqp-runner.sh"
+
+# For Valve's bare-metal testing farm jobs.
+.b2c-test:
+ # It would be nice to use ci-templates within Mesa CI for this job's
+ # image:, but the integration is not possible for the current
+ # use-case. Within this job, two containers are managed. 1) the
+ # gitlab runner container from which the job is submitted to the
+ # DUT, and 2) the test container (e.g. debian/x86_test-vk) within
+ # which the test cases will run on the DUT. Since ci-templates and
+ # the associated image setting macros in this file rely on variables
+ # like FDO_DISTRIBUTION_TAG for *the* image, there is no way to
+ # depend on more than one image per job. So, the job container is
+ # built as part of the CI in the boot2container project.
+ image: registry.freedesktop.org/mupuf/valve-infra/mesa-trigger:2022-03-03.2
+ extends:
+ - .use-debian/x86_test-vk
+ variables:
+ # No need by default to pull the whole repo
+ GIT_STRATEGY: none
+ # boot2container initrd configuration parameters.
+ B2C_KERNEL_URL: 'https://gitlab.freedesktop.org/mupuf/valve-infra/-/package_files/117/download' # 5.16-for-mesa-ci
+ B2C_INITRAMFS_URL: 'https://gitlab.freedesktop.org/mupuf/boot2container/-/releases/v0.9.4/downloads/initramfs.linux_amd64.cpio.xz'
+ B2C_JOB_SUCCESS_REGEX: '\[.*\]: Execution is over, pipeline status: 0\r$'
+ B2C_JOB_WARN_REGEX: 'null'
+ B2C_LOG_LEVEL: 6
+ B2C_POWEROFF_DELAY: 15
+ B2C_SESSION_END_REGEX: '^.*It''s now safe to turn off your computer\r$'
+ B2C_SESSION_REBOOT_REGEX: 'GPU hang detected!'
+ B2C_TIMEOUT_BOOT_MINUTES: 240
+ B2C_TIMEOUT_BOOT_RETRIES: 2
+ B2C_TIMEOUT_FIRST_MINUTES: 5
+ B2C_TIMEOUT_FIRST_RETRIES: 3
+ B2C_TIMEOUT_MINUTES: 2
+ B2C_TIMEOUT_OVERALL_MINUTES: 240
+ B2C_TIMEOUT_RETRIES: 0
+
+ # As noted in the top description, we make a distinction between the
+ # container used by gitlab-runner to queue the work, and the container
+ # used by the DUTs/test machines. To make this distinction quite clear,
+ # we rename the MESA_IMAGE variable into IMAGE_UNDER_TEST.
+ IMAGE_UNDER_TEST: "$MESA_IMAGE"
+
+ INSTALL_TARBALL: "./artifacts/install.tar"
+ CI_VALVE_ARTIFACTS: "./artifacts/valve"
+ CI_COMMON_SCRIPTS: "./artifacts/ci-common"
+ GENERATE_ENV_SCRIPT: "${CI_COMMON_SCRIPTS}/generate-env.sh"
+ B2C_JOB_TEMPLATE: "${CI_VALVE_ARTIFACTS}/b2c.yml.jinja2.jinja2"
+ JOB_FOLDER: "job_folder"
+ before_script:
+ # We don't want the tarball unpacking of .test, but will take the JWT bits.
+ - !reference [default, before_script]
+ - |
+ set -x
+
+ # Useful as a hook point for runner admins. You may edit the
+ # config.toml for the Gitlab runner and use a bind-mount to
+ # populate the hook script with some executable commands. This
+ # allows quicker feedback than resubmitting pipelines and
+ # potentially having to wait for a debug build of Mesa to
+ # complete.
+ if [ -x /runner-before-script.sh ]; then
+ echo "Executing runner before-script hook..."
+ sh /runner-before-script.sh
+ if [ $? -ne 0 ]; then
+ echo "Runner hook failed, goodbye"
+ exit $?
+ fi
+ fi
+
+ [ -s "$INSTALL_TARBALL" ] || exit 1
+ [ -d "$CI_VALVE_ARTIFACTS" ] || exit 1
+ [ -d "$CI_COMMON_SCRIPTS" ] || exit 1
+
+
+ B2C_TEST_SCRIPT="bash -c 'source ./set-job-env-vars.sh ; ${B2C_TEST_SCRIPT}'"
+
+ # The Valve CI gateway receives jobs in a YAML format. Create a
+ # job description from the CI environment.
+ python3 "$CI_VALVE_ARTIFACTS"/generate_b2c.py \
+ --ci-job-id "${CI_JOB_ID}" \
+ --container-cmd "${B2C_TEST_SCRIPT}" \
+ --initramfs-url "${B2C_INITRAMFS_URL}" \
+ --job-success-regex "${B2C_JOB_SUCCESS_REGEX}" \
+ --job-warn-regex "${B2C_JOB_WARN_REGEX}" \
+ --kernel-url "${B2C_KERNEL_URL}" \
+ --log-level "${B2C_LOG_LEVEL}" \
+ --poweroff-delay "${B2C_POWEROFF_DELAY}" \
+ --session-end-regex "${B2C_SESSION_END_REGEX}" \
+ --session-reboot-regex "${B2C_SESSION_REBOOT_REGEX}" \
+ --tags "${CI_RUNNER_TAGS}" \
+ --template "${B2C_JOB_TEMPLATE}" \
+ --timeout-boot-minutes "${B2C_TIMEOUT_BOOT_MINUTES}" \
+ --timeout-boot-retries "${B2C_TIMEOUT_BOOT_RETRIES}" \
+ --timeout-first-minutes "${B2C_TIMEOUT_FIRST_MINUTES}" \
+ --timeout-first-retries "${B2C_TIMEOUT_FIRST_RETRIES}" \
+ --timeout-minutes "${B2C_TIMEOUT_MINUTES}" \
+ --timeout-overall-minutes "${B2C_TIMEOUT_OVERALL_MINUTES}" \
+ --timeout-retries "${B2C_TIMEOUT_RETRIES}" \
+ --job-volume-exclusions "${B2C_JOB_VOLUME_EXCLUSIONS}" \
+ --local-container "${IMAGE_UNDER_TEST}" \
+ ${B2C_EXTRA_VOLUME_ARGS} \
+ --working-dir "$CI_PROJECT_DIR"
+
+ cat b2c.yml.jinja2
+
+ rm -rf ${JOB_FOLDER} || true
+ mkdir -v ${JOB_FOLDER}
+ # Create a script to regenerate the CI environment when this job
+ # begins running on the remote DUT.
+ set +x
+ "$CI_COMMON_SCRIPTS"/generate-env.sh > ${JOB_FOLDER}/set-job-env-vars.sh
+ chmod +x ${JOB_FOLDER}/set-job-env-vars.sh
+ echo "Variables passed through:"
+ cat ${JOB_FOLDER}/set-job-env-vars.sh
+ echo "export CI_JOB_JWT=${CI_JOB_JWT}" >> ${JOB_FOLDER}/set-job-env-vars.sh
+ set -x
+
+ # Extract the Mesa distribution into the location expected by
+ # the Mesa CI deqp-runner scripts.
+ tar x -C ${JOB_FOLDER} -f $INSTALL_TARBALL
+
+ script: |
+ slugify () {
+ echo "$1" | sed -r s/[~\^]+//g | sed -r s/[^a-zA-Z0-9]+/-/g | sed -r s/^-+\|-+$//g | tr A-Z a-z
+ }
+
+ # Submit the job to Valve's CI gateway service with the CI
+ # provisioned job_folder.
+ env PYTHONUNBUFFERED=1 executorctl \
+ run -w b2c.yml.jinja2 -j $(slugify "$CI_JOB_NAME") -s ${JOB_FOLDER}
+
+ ls -l
+ # Anything our job places in results/ will be collected by the
+ # Gitlab coordinator for status presentation. results/junit.xml
+ # will be parsed by the UI for more detailed explanations of
+ # test execution.
+ needs:
+ - debian/x86_test-vk
+ - debian-testing
+ artifacts:
+ when: always
+ name: "mesa_${CI_JOB_NAME}"
+ paths:
+ - ${JOB_FOLDER}/results
+ reports:
+ junit: ${JOB_FOLDER}/results/junit.xml
diff --git a/lib/mesa/.gitlab-ci/tests/__init__.py b/lib/mesa/.gitlab-ci/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/mesa/.gitlab-ci/tests/__init__.py
diff --git a/lib/mesa/.gitlab-ci/tests/test_lava_job_submitter.py b/lib/mesa/.gitlab-ci/tests/test_lava_job_submitter.py
new file mode 100644
index 000000000..0ed19efee
--- /dev/null
+++ b/lib/mesa/.gitlab-ci/tests/test_lava_job_submitter.py
@@ -0,0 +1,250 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2022 Collabora Limited
+# Author: Guilherme Gallo <guilherme.gallo@collabora.com>
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+import xmlrpc.client
+from contextlib import nullcontext as does_not_raise
+from datetime import datetime
+from itertools import repeat
+from typing import Tuple
+from unittest.mock import MagicMock, patch
+
+import pytest
+import yaml
+from freezegun import freeze_time
+from lava.lava_job_submitter import (
+ DEVICE_HANGING_TIMEOUT_SEC,
+ follow_job_execution,
+ hide_sensitive_data,
+ retriable_follow_job,
+)
+
+
+def jobs_logs_response(finished=False, msg=None) -> Tuple[bool, str]:
+ timed_msg = {"dt": str(datetime.now()), "msg": "New message"}
+ logs = [timed_msg] if msg is None else msg
+
+ return finished, yaml.safe_dump(logs)
+
+
+def result_get_testjob_results_response() -> str:
+ result = {"result": "test"}
+ results = [{"metadata": result}]
+
+ return yaml.safe_dump(results)
+
+
+def result_get_testcase_results_response() -> str:
+ result = {"result": "pass"}
+ test_cases = [result]
+
+ return yaml.safe_dump(test_cases)
+
+
+@pytest.fixture
+def mock_proxy():
+ def create_proxy_mock(**kwargs):
+ proxy_mock = MagicMock()
+ proxy_submit_mock = proxy_mock.scheduler.jobs.submit
+ proxy_submit_mock.return_value = "1234"
+
+ proxy_results_mock = proxy_mock.results.get_testjob_results_yaml
+ proxy_results_mock.return_value = result_get_testjob_results_response()
+
+ proxy_test_cases_mock = proxy_mock.results.get_testcase_results_yaml
+ proxy_test_cases_mock.return_value = result_get_testcase_results_response()
+
+ proxy_logs_mock = proxy_mock.scheduler.jobs.logs
+ proxy_logs_mock.return_value = jobs_logs_response()
+ for key, value in kwargs.items():
+ setattr(proxy_logs_mock, key, value)
+
+ return proxy_mock
+
+ yield create_proxy_mock
+
+
+@pytest.fixture
+def mock_proxy_waiting_time(mock_proxy):
+ def update_mock_proxy(frozen_time, **kwargs):
+ wait_time = kwargs.pop("wait_time", 0)
+ proxy_mock = mock_proxy(**kwargs)
+ proxy_job_state = proxy_mock.scheduler.job_state
+ proxy_job_state.return_value = {"job_state": "Running"}
+ proxy_job_state.side_effect = frozen_time.tick(wait_time)
+
+ return proxy_mock
+
+ return update_mock_proxy
+
+
+@pytest.fixture
+def mock_sleep():
+ """Mock time.sleep to make test faster"""
+ with patch("time.sleep", return_value=None):
+ yield
+
+
+@pytest.fixture
+def frozen_time(mock_sleep):
+ with freeze_time() as frozen_time:
+ yield frozen_time
+
+
+@pytest.mark.parametrize("exception", [RuntimeError, SystemError, KeyError])
+def test_submit_and_follow_respects_exceptions(mock_sleep, mock_proxy, exception):
+ with pytest.raises(exception):
+ follow_job_execution(mock_proxy(side_effect=exception), "")
+
+
+def generate_n_logs(n=1, tick_sec=1):
+ """Simulate a log partitionated in n components"""
+ with freeze_time(datetime.now()) as time_travel:
+ while True:
+ # Simulate a scenario where the target job is waiting for being started
+ for _ in range(n - 1):
+ time_travel.tick(tick_sec)
+ yield jobs_logs_response(finished=False, msg=[])
+
+ time_travel.tick(tick_sec)
+ yield jobs_logs_response(finished=True)
+
+
+NETWORK_EXCEPTION = xmlrpc.client.ProtocolError("", 0, "test", {})
+XMLRPC_FAULT = xmlrpc.client.Fault(0, "test")
+
+PROXY_SCENARIOS = {
+ "finish case": (generate_n_logs(1), does_not_raise(), True),
+ "works at last retry": (
+ generate_n_logs(n=3, tick_sec=DEVICE_HANGING_TIMEOUT_SEC + 1),
+ does_not_raise(),
+ True,
+ ),
+ "timed out more times than retry attempts": (
+ generate_n_logs(n=4, tick_sec=DEVICE_HANGING_TIMEOUT_SEC + 1),
+ does_not_raise(),
+ False,
+ ),
+ "long log case, no silence": (
+ generate_n_logs(n=1000, tick_sec=0),
+ does_not_raise(),
+ True,
+ ),
+ "very long silence": (
+ generate_n_logs(n=4, tick_sec=100000),
+ does_not_raise(),
+ False,
+ ),
+ # If a protocol error happens, _call_proxy will retry without affecting timeouts
+ "unstable connection, ProtocolError followed by final message": (
+ (NETWORK_EXCEPTION, jobs_logs_response(finished=True)),
+ does_not_raise(),
+ True,
+ ),
+ # After an arbitrary number of retries, _call_proxy should call sys.exit
+ "unreachable case, subsequent ProtocolErrors": (
+ repeat(NETWORK_EXCEPTION),
+ pytest.raises(SystemExit),
+ False,
+ ),
+ "XMLRPC Fault": ([XMLRPC_FAULT], pytest.raises(SystemExit, match="1"), False),
+}
+
+
+@patch("time.sleep", return_value=None) # mock sleep to make test faster
+@pytest.mark.parametrize(
+ "side_effect, expectation, has_finished",
+ PROXY_SCENARIOS.values(),
+ ids=PROXY_SCENARIOS.keys(),
+)
+def test_retriable_follow_job(
+ mock_sleep, side_effect, expectation, has_finished, mock_proxy
+):
+ with expectation:
+ result = retriable_follow_job(mock_proxy(side_effect=side_effect), "")
+ assert has_finished == result
+
+
+WAIT_FOR_JOB_SCENARIOS = {
+ "one log run taking (sec):": (generate_n_logs(1), True),
+}
+
+
+@pytest.mark.parametrize("wait_time", (0, DEVICE_HANGING_TIMEOUT_SEC * 2))
+@pytest.mark.parametrize(
+ "side_effect, has_finished",
+ WAIT_FOR_JOB_SCENARIOS.values(),
+ ids=WAIT_FOR_JOB_SCENARIOS.keys(),
+)
+def test_simulate_a_long_wait_to_start_a_job(
+ frozen_time,
+ wait_time,
+ side_effect,
+ has_finished,
+ mock_proxy_waiting_time,
+):
+ start_time = datetime.now()
+ result = retriable_follow_job(
+ mock_proxy_waiting_time(
+ frozen_time, side_effect=side_effect, wait_time=wait_time
+ ),
+ "",
+ )
+
+ end_time = datetime.now()
+ delta_time = end_time - start_time
+
+ assert has_finished == result
+ assert delta_time.total_seconds() >= wait_time
+
+
+SENSITIVE_DATA_SCENARIOS = {
+ "no sensitive data tagged": (
+ ["bla bla", "mytoken: asdkfjsde1341=="],
+ ["bla bla", "mytoken: asdkfjsde1341=="],
+ "HIDEME",
+ ),
+ "sensitive data tagged": (
+ ["bla bla", "mytoken: asdkfjsde1341== # HIDEME"],
+ ["bla bla"],
+ "HIDEME",
+ ),
+ "sensitive data tagged with custom word": (
+ ["bla bla", "mytoken: asdkfjsde1341== # DELETETHISLINE", "third line"],
+ ["bla bla", "third line"],
+ "DELETETHISLINE",
+ ),
+}
+
+
+@pytest.mark.parametrize(
+ "input, expectation, tag",
+ SENSITIVE_DATA_SCENARIOS.values(),
+ ids=SENSITIVE_DATA_SCENARIOS.keys(),
+)
+def test_hide_sensitive_data(input, expectation, tag):
+ yaml_data = yaml.safe_dump(input)
+ yaml_result = hide_sensitive_data(yaml_data, tag)
+ result = yaml.safe_load(yaml_result)
+
+ assert result == expectation
diff --git a/lib/mesa/.gitlab-ci/valve/b2c.yml.jinja2.jinja2 b/lib/mesa/.gitlab-ci/valve/b2c.yml.jinja2.jinja2
new file mode 100644
index 000000000..238fb6e88
--- /dev/null
+++ b/lib/mesa/.gitlab-ci/valve/b2c.yml.jinja2.jinja2
@@ -0,0 +1,63 @@
+version: 1
+
+# Rules to match for a machine to qualify
+target:
+{% if tags %}
+{% set b2ctags = tags.split(',') %}
+ tags:
+{% for tag in b2ctags %}
+ - '{{ tag | trim }}'
+{% endfor %}
+{% endif %}
+
+timeouts:
+ first_console_activity: # This limits the time it can take to receive the first console log
+ minutes: {{ timeout_first_minutes }}
+ retries: {{ timeout_first_retries }}
+ console_activity: # Reset every time we receive a message from the logs
+ minutes: {{ timeout_minutes }}
+ retries: {{ timeout_retries }}
+ boot_cycle:
+ minutes: {{ timeout_boot_minutes }}
+ retries: {{ timeout_boot_retries }}
+ overall: # Maximum time the job can take, not overrideable by the "continue" deployment
+ minutes: {{ timeout_overall_minutes }}
+ retries: 0
+ # no retries possible here
+
+console_patterns:
+ session_end:
+ regex: >-
+ {{ session_end_regex }}
+ session_reboot:
+ regex: >-
+ {{ session_reboot_regex }}
+ job_success:
+ regex: >-
+ {{ job_success_regex }}
+
+# Environment to deploy
+deployment:
+ # Initial boot
+ start:
+ kernel:
+ url: '{{ kernel_url }}'
+ cmdline: >
+ SALAD.machine_id={{ '{{' }} machine_id }}
+ console={{ '{{' }} local_tty_device }},115200 earlyprintk=vga,keep
+ loglevel={{ log_level }} amdgpu.gpu_recovery=0 no_hash_pointers
+ b2c.container="-ti --tls-verify=false docker://{{ '{{' }} fdo_proxy_registry }}/mupuf/valve-infra/machine_registration:latest check"
+ b2c.ntp_peer=10.42.0.1 b2c.pipefail b2c.cache_device=auto b2c.poweroff_delay={{ poweroff_delay }}
+ b2c.minio="gateway,{{ '{{' }} minio_url }},{{ '{{' }} job_bucket_access_key }},{{ '{{' }} job_bucket_secret_key }}"
+ b2c.volume="{{ '{{' }} job_bucket }}-results,mirror=gateway/{{ '{{' }} job_bucket }},pull_on=pipeline_start,push_on=changes,overwrite{% for excl in job_volume_exclusions %},exclude={{ excl }}{% endfor %},expiration=pipeline_end,preserve"
+{% for volume in volumes %}
+ b2c.volume={{ volume }}
+{% endfor %}
+ b2c.container="-v {{ '{{' }} job_bucket }}-results:{{ working_dir }} -w {{ working_dir }} {% for mount_volume in mount_volumes %} -v {{ mount_volume }}{% endfor %} --tls-verify=false docker://{{ local_container }} {{ container_cmd }}"
+ {% if cmdline_extras is defined %}
+ {{ cmdline_extras }}
+ {% endif %}
+
+ initramfs:
+ url: '{{ initramfs_url }}'
+
diff --git a/lib/mesa/.gitlab-ci/valve/generate_b2c.py b/lib/mesa/.gitlab-ci/valve/generate_b2c.py
new file mode 100755
index 000000000..a68b34a14
--- /dev/null
+++ b/lib/mesa/.gitlab-ci/valve/generate_b2c.py
@@ -0,0 +1,101 @@
+#!/usr/bin/env python3
+
+# Copyright © 2022 Valve Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from jinja2 import Environment, FileSystemLoader
+from argparse import ArgumentParser
+from os import environ, path
+
+
+parser = ArgumentParser()
+parser.add_argument('--ci-job-id')
+parser.add_argument('--container-cmd')
+parser.add_argument('--initramfs-url')
+parser.add_argument('--job-success-regex')
+parser.add_argument('--job-warn-regex')
+parser.add_argument('--kernel-url')
+parser.add_argument('--log-level', type=int)
+parser.add_argument('--poweroff-delay', type=int)
+parser.add_argument('--session-end-regex')
+parser.add_argument('--session-reboot-regex')
+parser.add_argument('--tags', nargs='?', default='')
+parser.add_argument('--template', default='b2c.yml.jinja2.jinja2')
+parser.add_argument('--timeout-boot-minutes', type=int)
+parser.add_argument('--timeout-boot-retries', type=int)
+parser.add_argument('--timeout-first-minutes', type=int)
+parser.add_argument('--timeout-first-retries', type=int)
+parser.add_argument('--timeout-minutes', type=int)
+parser.add_argument('--timeout-overall-minutes', type=int)
+parser.add_argument('--timeout-retries', type=int)
+parser.add_argument('--job-volume-exclusions', nargs='?', default='')
+parser.add_argument('--volume', action='append')
+parser.add_argument('--mount-volume', action='append')
+parser.add_argument('--local-container', default=environ.get('B2C_LOCAL_CONTAINER', 'alpine:latest'))
+parser.add_argument('--working-dir')
+args = parser.parse_args()
+
+env = Environment(loader=FileSystemLoader(path.dirname(args.template)),
+ trim_blocks=True, lstrip_blocks=True)
+
+template = env.get_template(path.basename(args.template))
+
+values = {}
+values['ci_job_id'] = args.ci_job_id
+values['container_cmd'] = args.container_cmd
+values['initramfs_url'] = args.initramfs_url
+values['job_success_regex'] = args.job_success_regex
+values['job_warn_regex'] = args.job_warn_regex
+values['kernel_url'] = args.kernel_url
+values['log_level'] = args.log_level
+values['poweroff_delay'] = args.poweroff_delay
+values['session_end_regex'] = args.session_end_regex
+values['session_reboot_regex'] = args.session_reboot_regex
+values['tags'] = args.tags
+values['template'] = args.template
+values['timeout_boot_minutes'] = args.timeout_boot_minutes
+values['timeout_boot_retries'] = args.timeout_boot_retries
+values['timeout_first_minutes'] = args.timeout_first_minutes
+values['timeout_first_retries'] = args.timeout_first_retries
+values['timeout_minutes'] = args.timeout_minutes
+values['timeout_overall_minutes'] = args.timeout_overall_minutes
+values['timeout_retries'] = args.timeout_retries
+if len(args.job_volume_exclusions) > 0:
+ exclusions = args.job_volume_exclusions.split(",")
+ values['job_volume_exclusions'] = [excl for excl in exclusions if len(excl) > 0]
+if args.volume is not None:
+ values['volumes'] = args.volume
+if args.mount_volume is not None:
+ values['mount_volumes'] = args.mount_volume
+values['working_dir'] = args.working_dir
+
+assert(len(args.local_container) > 0)
+values['local_container'] = args.local_container.replace(
+ # Use the gateway's pull-through registry cache to reduce load on fd.o.
+ 'registry.freedesktop.org', '{{ fdo_proxy_registry }}'
+)
+
+if 'B2C_KERNEL_CMDLINE_EXTRAS' in environ:
+ values['cmdline_extras'] = environ['B2C_KERNEL_CMDLINE_EXTRAS']
+
+f = open(path.splitext(path.basename(args.template))[0], "w")
+f.write(template.render(values))
+f.close()
diff --git a/lib/mesa/.gitlab-ci/windows/Dockerfile_build b/lib/mesa/.gitlab-ci/windows/Dockerfile_build
new file mode 100644
index 000000000..f94050424
--- /dev/null
+++ b/lib/mesa/.gitlab-ci/windows/Dockerfile_build
@@ -0,0 +1,13 @@
+# escape=`
+
+FROM mcr.microsoft.com/windows:1809
+
+# Make sure any failure in PowerShell scripts is fatal
+SHELL ["powershell", "-ExecutionPolicy", "RemoteSigned", "-Command", "$ErrorActionPreference = 'Stop';"]
+ENV ErrorActionPreference='Stop'
+
+COPY mesa_deps_vs2019.ps1 C:\
+RUN C:\mesa_deps_vs2019.ps1
+
+COPY mesa_deps_build.ps1 C:\
+RUN C:\mesa_deps_build.ps1
diff --git a/lib/mesa/.gitlab-ci/windows/Dockerfile_test b/lib/mesa/.gitlab-ci/windows/Dockerfile_test
new file mode 100644
index 000000000..106e49335
--- /dev/null
+++ b/lib/mesa/.gitlab-ci/windows/Dockerfile_test
@@ -0,0 +1,7 @@
+# escape=`
+
+ARG base_image
+FROM ${base_image}
+
+COPY mesa_deps_test.ps1 C:\
+RUN C:\mesa_deps_test.ps1
diff --git a/lib/mesa/.gitlab-ci/windows/deqp_runner_run.ps1 b/lib/mesa/.gitlab-ci/windows/deqp_runner_run.ps1
new file mode 100644
index 000000000..571836fa4
--- /dev/null
+++ b/lib/mesa/.gitlab-ci/windows/deqp_runner_run.ps1
@@ -0,0 +1,31 @@
+$dxil_dll = cmd.exe /C "C:\BuildTools\Common7\Tools\VsDevCmd.bat -host_arch=amd64 -arch=amd64 -no_logo && where dxil.dll" 2>&1
+if ($dxil_dll -notmatch "dxil.dll$") {
+ Write-Output "Couldn't get path to dxil.dll"
+ exit 1
+}
+$env:Path = "$(Split-Path $dxil_dll);$env:Path"
+
+# VK_ICD_FILENAMES environment variable is not used when running with
+# elevated privileges. Add a key to the registry instead.
+$hkey_path = "HKLM:\SOFTWARE\Khronos\Vulkan\Drivers\"
+$hkey_name = Join-Path -Path $pwd -ChildPath "_install\share\vulkan\icd.d\dzn_icd.x86_64.json"
+New-Item -Path $hkey_path -force
+New-ItemProperty -Path $hkey_path -Name $hkey_name -Value 0 -PropertyType DWORD
+
+$results = New-Item -ItemType Directory results
+$deqp_options = @("--deqp-surface-width", 256, "--deqp-surface-height", 256, "--deqp-surface-type", "pbuffer", "--deqp-gl-config-name", "rgba8888d24s8ms0", "--deqp-visibility", "hidden")
+$deqp_module = "C:\deqp\external\vulkancts\modules\vulkan\deqp-vk.exe"
+$caselist = "C:\deqp\mustpass\vk-master.txt"
+$baseline = ".\_install\warp-fails.txt"
+$includes = @("-t", "dEQP-VK.api.*", "-t", "dEQP-VK.info.*", "-t", "dEQP-VK.draw.*", "-t", "dEQP-VK.query_pool.*", "-t", "dEQP-VK.memory.*")
+
+$env:DZN_DEBUG = "warp"
+deqp-runner run --deqp $($deqp_module) --output $($results) --caselist $($caselist) --baseline $($baseline) $($includes) --testlog-to-xml C:\deqp\executor\testlog-to-xml.exe --jobs 4 -- $($deqp_options)
+$deqpstatus = $?
+
+$template = "See https://$($env:CI_PROJECT_ROOT_NAMESPACE).pages.freedesktop.org/-/$($env:CI_PROJECT_NAME)/-/jobs/$($env:CI_JOB_ID)/artifacts/results/{{testcase}}.xml"
+deqp-runner junit --testsuite dEQP --results "$($results)/failures.csv" --output "$($results)/junit.xml" --limit 50 --template $template
+
+if (!$deqpstatus) {
+ Exit 1
+}
diff --git a/lib/mesa/.gitlab-ci/windows/mesa_build.ps1 b/lib/mesa/.gitlab-ci/windows/mesa_build.ps1
index 42794c6ec..e4427a7c2 100644
--- a/lib/mesa/.gitlab-ci/windows/mesa_build.ps1
+++ b/lib/mesa/.gitlab-ci/windows/mesa_build.ps1
@@ -6,10 +6,48 @@ $env:PYTHONUTF8=1
Get-Date
Write-Host "Compiling Mesa"
-$builddir = New-Item -ItemType Directory -Name "_build"
-$installdir = New-Item -ItemType Directory -Name "_install"
-Push-Location $builddir.FullName
-cmd.exe /C "C:\BuildTools\Common7\Tools\VsDevCmd.bat -host_arch=amd64 -arch=amd64 && meson --default-library=shared -Dzlib:default_library=static --buildtype=release -Db_ndebug=false -Dc_std=c17 -Dcpp_std=vc++latest -Db_vscrt=mt --cmake-prefix-path=`"C:\llvm-10`" --pkg-config-path=`"C:\llvm-10\lib\pkgconfig;C:\llvm-10\share\pkgconfig;C:\spirv-tools\lib\pkgconfig`" --prefix=`"$installdir`" -Dllvm=enabled -Dshared-llvm=disabled -Dvulkan-drivers=swrast,amd -Dgallium-drivers=swrast,d3d12,zink -Dshared-glapi=enabled -Dgles2=enabled -Dmicrosoft-clc=enabled -Dstatic-libclc=all -Dspirv-to-dxil=true -Dbuild-tests=true -Dwerror=true -Dwarning_level=2 -Dzlib:warning_level=1 -Dlibelf:warning_level=1 && ninja -j32 install && meson test --num-processes 32"
+$builddir = New-Item -Force -ItemType Directory -Name "_build"
+$installdir = New-Item -Force -ItemType Directory -Name "_install"
+$builddir=$builddir.FullName
+$installdir=$installdir.FullName
+$sourcedir=$PWD
+
+Remove-Item -Recurse -Force $builddir
+Remove-Item -Recurse -Force $installdir
+New-Item -ItemType Directory -Path $builddir
+New-Item -ItemType Directory -Path $installdir
+
+Write-Output builddir:$builddir
+Write-Output installdir:$installdir
+Write-Output sourcedir:$sourcedir
+
+$installPath=& "C:\Program Files (x86)\Microsoft Visual Studio\Installer\vswhere.exe" -version 16.0 -property installationpath
+Write-Output "vswhere.exe installPath: $installPath"
+$installPath="C:\BuildTools"
+Write-Output "Final installPath: $installPath"
+Import-Module (Join-Path $installPath "Common7\Tools\Microsoft.VisualStudio.DevShell.dll")
+Enter-VsDevShell -VsInstallPath $installPath -SkipAutomaticLocation -DevCmdArguments '-arch=x64 -no_logo -host_arch=amd64'
+
+Push-Location $builddir
+
+meson --default-library=shared -Dzlib:default_library=static --buildtype=release -Db_ndebug=false `
+-Db_vscrt=mt --cmake-prefix-path="C:\llvm-10" `
+--pkg-config-path="C:\llvm-10\lib\pkgconfig;C:\llvm-10\share\pkgconfig;C:\spirv-tools\lib\pkgconfig" `
+--prefix="$installdir" `
+-Dllvm=enabled -Dshared-llvm=disabled `
+"-Dvulkan-drivers=swrast,amd,microsoft-experimental" "-Dgallium-drivers=swrast,d3d12,zink" `
+-Dshared-glapi=enabled -Dgles2=enabled -Dmicrosoft-clc=enabled -Dstatic-libclc=all -Dspirv-to-dxil=true `
+-Dbuild-tests=true -Dwerror=true -Dwarning_level=2 -Dzlib:warning_level=1 -Dlibelf:warning_level=1 `
+$sourcedir
+
+if ($?) {
+ ninja install -j32
+}
+
+if ($?) {
+ meson test --num-processes 32
+}
+
$buildstatus = $?
Pop-Location
@@ -21,4 +59,10 @@ if (!$buildstatus) {
}
Copy-Item ".\.gitlab-ci\windows\piglit_run.ps1" -Destination $installdir
-Copy-Item ".\.gitlab-ci\windows\quick_gl.txt" -Destination $installdir
+
+Copy-Item ".\.gitlab-ci\windows\spirv2dxil_check.ps1" -Destination $installdir
+Copy-Item ".\.gitlab-ci\windows\spirv2dxil_run.ps1" -Destination $installdir
+
+Copy-Item ".\.gitlab-ci\windows\deqp_runner_run.ps1" -Destination $installdir
+
+Get-ChildItem -Recurse -Filter "ci" | Get-ChildItem -Filter "*.txt" | Copy-Item -Destination $installdir
diff --git a/lib/mesa/.gitlab-ci/windows/mesa_container.ps1 b/lib/mesa/.gitlab-ci/windows/mesa_container.ps1
index 7a2f6c237..cbb9e2235 100644
--- a/lib/mesa/.gitlab-ci/windows/mesa_container.ps1
+++ b/lib/mesa/.gitlab-ci/windows/mesa_container.ps1
@@ -6,6 +6,8 @@ $registry_username = $args[1]
$registry_password = $args[2]
$registry_user_image = $args[3]
$registry_central_image = $args[4]
+$build_dockerfile = $args[5]
+$registry_base_image = $args[6]
Set-Location -Path ".\.gitlab-ci\windows"
@@ -39,7 +41,7 @@ if ($?) {
}
Write-Host "No image found at $registry_user_image or $registry_central_image; rebuilding"
-docker --config "windows-docker.conf" build --no-cache -t "$registry_user_image" .
+docker --config "windows-docker.conf" build --no-cache -t "$registry_user_image" -f "$build_dockerfile" --build-arg base_image="$registry_base_image" .
if (!$?) {
Write-Host "Container build failed"
docker --config "windows-docker.conf" logout "$registry_uri"
diff --git a/lib/mesa/.gitlab-ci/windows/mesa_deps_build.ps1 b/lib/mesa/.gitlab-ci/windows/mesa_deps_build.ps1
new file mode 100644
index 000000000..a771ca254
--- /dev/null
+++ b/lib/mesa/.gitlab-ci/windows/mesa_deps_build.ps1
@@ -0,0 +1,146 @@
+# Download new TLS certs from Windows Update
+Get-Date
+Write-Host "Updating TLS certificate store"
+$certdir = (New-Item -ItemType Directory -Name "_tlscerts")
+certutil -syncwithWU "$certdir"
+Foreach ($file in (Get-ChildItem -Path "$certdir\*" -Include "*.crt")) {
+ Import-Certificate -FilePath $file -CertStoreLocation Cert:\LocalMachine\Root
+}
+Remove-Item -Recurse -Path $certdir
+
+
+Get-Date
+Write-Host "Installing Chocolatey"
+Invoke-Expression ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))
+Import-Module "$env:ProgramData\chocolatey\helpers\chocolateyProfile.psm1"
+Update-SessionEnvironment
+Write-Host "Installing Chocolatey packages"
+
+# Chocolatey tries to download winflexbison from SourceForge, which is not super reliable, and has no retry
+# loop of its own - so we give it a helping hand here
+For ($i = 0; $i -lt 5; $i++) {
+ choco install -y python3 --params="/InstallDir:C:\python3"
+ $python_install = $?
+ choco install --allow-empty-checksums -y cmake git git-lfs ninja pkgconfiglite winflexbison vulkan-sdk --installargs "ADD_CMAKE_TO_PATH=System"
+ $other_install = $?
+ $choco_installed = $other_install -and $python_install
+ if ($choco_installed) {
+ Break
+ }
+}
+
+if (!$choco_installed) {
+ Write-Host "Couldn't install dependencies from Chocolatey"
+ Exit 1
+}
+
+# Add Chocolatey's native install path
+Update-SessionEnvironment
+# Python and CMake add themselves to the system environment path, which doesn't get refreshed
+# until we start a new shell
+$env:PATH = "C:\python3;C:\python3\scripts;C:\Program Files\CMake\bin;$env:PATH"
+
+Start-Process -NoNewWindow -Wait git -ArgumentList 'config --global core.autocrlf false'
+
+Get-Date
+Write-Host "Installing Meson, Mako and numpy"
+pip3 install meson mako numpy
+if (!$?) {
+ Write-Host "Failed to install dependencies from pip"
+ Exit 1
+}
+
+# we want more secure TLS 1.2 for most things, but it breaks SourceForge
+# downloads so must be done after Chocolatey use
+[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 -bor [Net.SecurityProtocolType]::Tls13;
+
+Get-Date
+Write-Host "Cloning LLVM release/12.x"
+git clone -b release/12.x --depth=1 https://github.com/llvm/llvm-project llvm-project
+if (!$?) {
+ Write-Host "Failed to clone LLVM repository"
+ Exit 1
+}
+
+# ideally we want to use a tag here insted of a sha,
+# but as of today, SPIRV-LLVM-Translator doesn't have
+# a tag matching LLVM 12.0.0
+Get-Date
+Write-Host "Cloning SPIRV-LLVM-Translator"
+git clone https://github.com/KhronosGroup/SPIRV-LLVM-Translator llvm-project/llvm/projects/SPIRV-LLVM-Translator
+if (!$?) {
+ Write-Host "Failed to clone SPIRV-LLVM-Translator repository"
+ Exit 1
+}
+Push-Location llvm-project/llvm/projects/SPIRV-LLVM-Translator
+git checkout 5b641633b3bcc3251a52260eee11db13a79d7258
+Pop-Location
+
+Get-Date
+# slightly convoluted syntax but avoids the CWD being under the PS filesystem meta-path
+$llvm_build = New-Item -ItemType Directory -Path ".\llvm-project" -Name "build"
+Push-Location -Path $llvm_build.FullName
+Write-Host "Compiling LLVM and Clang"
+cmd.exe /C 'C:\BuildTools\Common7\Tools\VsDevCmd.bat -host_arch=amd64 -arch=amd64 && cmake ../llvm -GNinja -DCMAKE_BUILD_TYPE=Release -DLLVM_USE_CRT_RELEASE=MT -DCMAKE_INSTALL_PREFIX="C:\llvm-10" -DLLVM_ENABLE_PROJECTS="clang;lld" -DLLVM_TARGETS_TO_BUILD=AMDGPU;X86 -DLLVM_OPTIMIZED_TABLEGEN=TRUE -DLLVM_ENABLE_ASSERTIONS=TRUE -DLLVM_INCLUDE_UTILS=OFF -DLLVM_INCLUDE_RUNTIMES=OFF -DLLVM_INCLUDE_TESTS=OFF -DLLVM_INCLUDE_EXAMPLES=OFF -DLLVM_INCLUDE_GO_TESTS=OFF -DLLVM_INCLUDE_BENCHMARKS=OFF -DLLVM_BUILD_LLVM_C_DYLIB=OFF -DLLVM_ENABLE_DIA_SDK=OFF -DCLANG_BUILD_TOOLS=ON -DLLVM_SPIRV_INCLUDE_TESTS=OFF && ninja -j32 install'
+$buildstatus = $?
+Pop-Location
+if (!$buildstatus) {
+ Write-Host "Failed to compile LLVM"
+ Exit 1
+}
+
+Get-Date
+$libclc_build = New-Item -ItemType Directory -Path ".\llvm-project" -Name "build-libclc"
+Push-Location -Path $libclc_build.FullName
+Write-Host "Compiling libclc"
+# libclc can only be built with Ninja, because CMake's VS backend doesn't know how to compile new language types
+cmd.exe /C 'C:\BuildTools\Common7\Tools\VsDevCmd.bat -host_arch=amd64 -arch=amd64 && cmake ../libclc -GNinja -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS="-m64" -DCMAKE_POLICY_DEFAULT_CMP0091=NEW -DCMAKE_MSVC_RUNTIME_LIBRARY=MultiThreaded -DCMAKE_INSTALL_PREFIX="C:\llvm-10" -DLIBCLC_TARGETS_TO_BUILD="spirv-mesa3d-;spirv64-mesa3d-" && ninja -j32 install'
+$buildstatus = $?
+Pop-Location
+Remove-Item -Recurse -Path $libclc_build
+if (!$buildstatus) {
+ Write-Host "Failed to compile libclc"
+ Exit 1
+}
+Remove-Item -Recurse -Path $llvm_build
+
+Get-Date
+Write-Host "Cloning SPIRV-Tools"
+git clone https://github.com/KhronosGroup/SPIRV-Tools
+if (!$?) {
+ Write-Host "Failed to clone SPIRV-Tools repository"
+ Exit 1
+}
+git clone https://github.com/KhronosGroup/SPIRV-Headers SPIRV-Tools/external/SPIRV-Headers
+if (!$?) {
+ Write-Host "Failed to clone SPIRV-Headers repository"
+ Exit 1
+}
+Write-Host "Building SPIRV-Tools"
+$spv_build = New-Item -ItemType Directory -Path ".\SPIRV-Tools" -Name "build"
+Push-Location -Path $spv_build.FullName
+# SPIRV-Tools doesn't use multi-threaded MSVCRT, but we need it to
+cmd.exe /C 'C:\BuildTools\Common7\Tools\VsDevCmd.bat -host_arch=amd64 -arch=amd64 && cmake .. -GNinja -DCMAKE_BUILD_TYPE=Release -DCMAKE_POLICY_DEFAULT_CMP0091=NEW -DCMAKE_MSVC_RUNTIME_LIBRARY=MultiThreaded -DCMAKE_INSTALL_PREFIX="C:\spirv-tools" && ninja -j32 install'
+$buildstatus = $?
+Pop-Location
+Remove-Item -Recurse -Path $spv_build
+if (!$buildstatus) {
+ Write-Host "Failed to compile SPIRV-Tools"
+ Exit 1
+}
+
+# See https://gitlab.freedesktop.org/mesa/mesa/-/issues/3855
+# Until that's resolved, we need the vulkan-runtime as a build dependency to be able to run any unit tests on GL
+Get-Date
+Write-Host "Downloading Vulkan-Runtime"
+Invoke-WebRequest -Uri 'https://sdk.lunarg.com/sdk/download/latest/windows/vulkan-runtime.exe' -OutFile 'C:\vulkan-runtime.exe' | Out-Null
+Write-Host "Installing Vulkan-Runtime"
+Start-Process -NoNewWindow -Wait C:\vulkan-runtime.exe -ArgumentList '/S'
+if (!$?) {
+ Write-Host "Failed to install Vulkan-Runtime"
+ Exit 1
+}
+Remove-Item C:\vulkan-runtime.exe -Force
+
+Get-Date
+Write-Host "Complete"
diff --git a/lib/mesa/.gitlab-ci/windows/mesa_deps_test.ps1 b/lib/mesa/.gitlab-ci/windows/mesa_deps_test.ps1
new file mode 100644
index 000000000..0600be893
--- /dev/null
+++ b/lib/mesa/.gitlab-ci/windows/mesa_deps_test.ps1
@@ -0,0 +1,124 @@
+Get-Date
+Write-Host "Downloading Freeglut"
+
+$freeglut_zip = 'freeglut-MSVC.zip'
+$freeglut_url = "https://www.transmissionzero.co.uk/files/software/development/GLUT/$freeglut_zip"
+
+For ($i = 0; $i -lt 5; $i++) {
+ Invoke-WebRequest -Uri $freeglut_url -OutFile $freeglut_zip
+ $freeglut_downloaded = $?
+ if ($freeglut_downloaded) {
+ Break
+ }
+}
+
+if (!$freeglut_downloaded) {
+ Write-Host "Failed to download Freeglut"
+ Exit 1
+}
+
+Get-Date
+Write-Host "Installing Freeglut"
+Expand-Archive $freeglut_zip -DestinationPath C:\
+if (!$?) {
+ Write-Host "Failed to install Freeglut"
+ Exit 1
+}
+
+Get-Date
+Write-Host "Downloading glext.h"
+New-Item -ItemType Directory -Path ".\glext" -Name "GL"
+$ProgressPreference = "SilentlyContinue"
+Invoke-WebRequest -Uri 'https://www.khronos.org/registry/OpenGL/api/GL/glext.h' -OutFile '.\glext\GL\glext.h' | Out-Null
+
+Get-Date
+Write-Host "Cloning Piglit"
+git clone --no-progress --single-branch --no-checkout https://gitlab.freedesktop.org/mesa/piglit.git 'C:\src\piglit'
+if (!$?) {
+ Write-Host "Failed to clone Piglit repository"
+ Exit 1
+}
+Push-Location -Path C:\src\piglit
+git checkout f7f2a6c2275cae023a27b6cc81be3dda8c99492d
+Pop-Location
+
+Get-Date
+$piglit_build = New-Item -ItemType Directory -Path "C:\src\piglit" -Name "build"
+Push-Location -Path $piglit_build.FullName
+Write-Host "Compiling Piglit"
+cmd.exe /C 'C:\BuildTools\Common7\Tools\VsDevCmd.bat -host_arch=amd64 -arch=amd64 && cmake .. -GNinja -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX="C:\Piglit" -DGLUT_INCLUDE_DIR=C:\freeglut\include -DGLUT_glut_LIBRARY_RELEASE=C:\freeglut\lib\x64\freeglut.lib -DGLEXT_INCLUDE_DIR=.\glext && ninja -j32'
+$buildstatus = $?
+ninja -j32 install | Out-Null
+$installstatus = $?
+Pop-Location
+Remove-Item -Recurse -Path $piglit_build
+if (!$buildstatus -Or !$installstatus) {
+ Write-Host "Failed to compile or install Piglit"
+ Exit 1
+}
+
+Copy-Item -Path C:\freeglut\bin\x64\freeglut.dll -Destination C:\Piglit\lib\piglit\bin\freeglut.dll
+
+Get-Date
+Write-Host "Cloning spirv-samples"
+git clone --no-progress --single-branch --no-checkout https://github.com/dneto0/spirv-samples.git C:\spirv-samples\
+Push-Location -Path C:\spirv-samples\
+git checkout 7ac0ad5a7fe0ec884faba1dc2916028d0268eeef
+Pop-Location
+
+Get-Date
+Write-Host "Cloning Vulkan and GL Conformance Tests"
+$deqp_source = "C:\src\VK-GL-CTS\"
+git clone --no-progress --single-branch https://github.com/lfrb/VK-GL-CTS.git -b windows-flush $deqp_source
+if (!$?) {
+ Write-Host "Failed to clone deqp repository"
+ Exit 1
+}
+
+Push-Location -Path $deqp_source
+# --insecure is due to SSL cert failures hitting sourceforge for zlib and
+# libpng (sigh). The archives get their checksums checked anyway, and git
+# always goes through ssh or https.
+py .\external\fetch_sources.py --insecure
+Pop-Location
+
+Get-Date
+$deqp_build = New-Item -ItemType Directory -Path "C:\deqp"
+Push-Location -Path $deqp_build.FullName
+Write-Host "Compiling deqp"
+cmd.exe /C "C:\BuildTools\Common7\Tools\VsDevCmd.bat -host_arch=amd64 -arch=amd64 && cmake -S $($deqp_source) -B . -GNinja -DCMAKE_BUILD_TYPE=Release -DDEQP_TARGET=default && ninja -j32"
+$buildstatus = $?
+Pop-Location
+if (!$buildstatus -Or !$installstatus) {
+ Write-Host "Failed to compile or install deqp"
+ Exit 1
+}
+
+# Copy test result templates
+Copy-Item -Path "$($deqp_source)\doc\testlog-stylesheet\testlog.css" -Destination $deqp_build
+Copy-Item -Path "$($deqp_source)\doc\testlog-stylesheet\testlog.xsl" -Destination $deqp_build
+
+# Copy Vulkan must-pass list
+$deqp_mustpass = New-Item -ItemType Directory -Path $deqp_build -Name "mustpass"
+$root_mustpass = Join-Path -Path $deqp_source -ChildPath "external\vulkancts\mustpass\master"
+$files = Get-Content "$($root_mustpass)\vk-default.txt"
+foreach($file in $files) {
+ Get-Content "$($root_mustpass)\$($file)" | Add-Content -Path "$($deqp_mustpass)\vk-master.txt"
+}
+Remove-Item -Force -Recurse $deqp_source
+
+Get-Date
+$url = 'https://static.rust-lang.org/rustup/dist/x86_64-pc-windows-msvc/rustup-init.exe';
+Write-Host ('Downloading {0} ...' -f $url);
+Invoke-WebRequest -Uri $url -OutFile 'rustup-init.exe';
+Write-Host "Installing rust toolchain"
+C:\rustup-init.exe -y;
+Remove-Item C:\rustup-init.exe;
+
+Get-Date
+Write-Host "Installing deqp-runner"
+$env:Path += ";$($env:USERPROFILE)\.cargo\bin"
+cargo install --git https://gitlab.freedesktop.org/anholt/deqp-runner.git
+
+Get-Date
+Write-Host "Complete"
diff --git a/lib/mesa/.gitlab-ci/windows/piglit_run.ps1 b/lib/mesa/.gitlab-ci/windows/piglit_run.ps1
index 3414fc2dc..2205b7ba0 100644
--- a/lib/mesa/.gitlab-ci/windows/piglit_run.ps1
+++ b/lib/mesa/.gitlab-ci/windows/piglit_run.ps1
@@ -9,7 +9,7 @@ cmd.exe /C "C:\BuildTools\Common7\Tools\VsDevCmd.bat -host_arch=amd64 -arch=amd6
py -3 C:\Piglit\bin\piglit.py summary console .\results | Select -SkipLast 1 | Select-String -NotMatch -Pattern ': pass' | Set-Content -Path .\result.txt
-$reference = Get-Content ".\_install\$env:PIGLIT_PROFILE.txt"
+$reference = Get-Content ".\_install\$env:PIGLIT_RESULTS.txt"
$result = Get-Content .\result.txt
if (-Not ($reference -And $result)) {
Exit 1
diff --git a/lib/mesa/.gitlab-ci/windows/spirv2dxil_check.ps1 b/lib/mesa/.gitlab-ci/windows/spirv2dxil_check.ps1
new file mode 100644
index 000000000..bfa9fdc3d
--- /dev/null
+++ b/lib/mesa/.gitlab-ci/windows/spirv2dxil_check.ps1
@@ -0,0 +1,54 @@
+# Ensure that dxil.dll in on the %PATH%
+$dxil_dll = cmd.exe /C "C:\BuildTools\Common7\Tools\VsDevCmd.bat -host_arch=amd64 -arch=amd64 -no_logo && where dxil.dll" 2>&1
+if ($dxil_dll -notmatch "dxil.dll$") {
+ Write-Output "Couldn't get path to dxil.dll"
+ exit 1
+}
+$env:Path = "$(Split-Path $dxil_dll);$env:Path"
+
+$exec_mode_to_stage = @{ Fragment = "fragment"; Vertex = "vertex"; GLCompute = "compute" }
+
+$spvasm_files = (Get-ChildItem C:\spirv-samples\spvasm\*.spvasm) | Sort-Object Name
+foreach ($spvasm in $spvasm_files) {
+ $test_name = "Test:$($spvasm.Name):"
+ $spvfile = ($spvasm -replace '\.spvasm$', '.spv')
+ $content = Get-Content $spvasm
+ $spv_version = "1.0"
+ if ($content | Where-Object { $_ -match 'Version:\s(\d+\.\d+)' }) {
+ $spv_version = $Matches[1]
+ }
+
+ $as_output = C:\spirv-tools\bin\spirv-as.exe --target-env spv$spv_version --preserve-numeric-ids -o $spvfile $spvasm 2>&1 | % { if ($_ -is [System.Management.Automation.ErrorRecord]) { $_.Exception.Message } else { $_ } } | Out-String
+ if ($LASTEXITCODE -ne 0) {
+ Write-Output "$test_name Skip: Unable to assemble shader"
+ Write-Output "$as_output`n"
+ continue
+ }
+
+ $entry_points = $content | Select-String -Pattern '^OpEntryPoint\s(\w+)[^"]+"(\w+)"' | Select-Object -ExpandProperty Matches -First 1
+ if ($entry_points.Count -eq 0) {
+ Write-Output "$test_name Skip"
+ Write-Output "No OpEntryPoint not found`n"
+ continue
+ }
+
+ foreach ($match in $entry_points) {
+ $exec_mode, $entry_point = $match.Groups[1].Value, $match.Groups[2].Value
+ $subtest = "$test_name$entry_point|${exec_mode}:"
+ $stage = $exec_mode_to_stage[$exec_mode]
+ if ($stage -eq '') {
+ Write-Output "$subtest Fail: Unknown shader type ($exec_mode)"
+ continue
+ }
+
+ $s2d_output = .\_install\bin\spirv2dxil.exe -v -e "$entry_point" -s "$stage" -o NUL $spvfile 2>&1 | ForEach-Object { if ($_ -is [System.Management.Automation.ErrorRecord]) { $_.Exception.Message } else { $_ } } | Out-String
+ if ($LASTEXITCODE -eq 0) {
+ Write-Output "$subtest Pass"
+ }
+ else {
+ Write-Output "$subtest Fail"
+ $sanitized_output = $s2d_output -replace ', file .+, line \d+' -replace ' In file .+:\d+'
+ Write-Output "$sanitized_output`n"
+ }
+ }
+}
diff --git a/lib/mesa/.gitlab-ci/windows/spirv2dxil_run.ps1 b/lib/mesa/.gitlab-ci/windows/spirv2dxil_run.ps1
new file mode 100644
index 000000000..ae6c1c2cd
--- /dev/null
+++ b/lib/mesa/.gitlab-ci/windows/spirv2dxil_run.ps1
@@ -0,0 +1,16 @@
+. .\_install\spirv2dxil_check.ps1 2>&1 | Set-Content -Path .\spirv2dxil_results.txt
+$reference = Get-Content .\_install\spirv2dxil_reference.txt
+$result = Get-Content .\spirv2dxil_results.txt
+if (-Not ($reference -And $result)) {
+ Exit 1
+}
+
+$diff = Compare-Object -ReferenceObject $reference -DifferenceObject $result
+if (-Not $diff) {
+ Exit 0
+}
+
+Write-Host "Unexpected change in results:"
+Write-Output $diff | Format-Table -Property SideIndicator, InputObject -Wrap
+
+Exit 1