diff options
Diffstat (limited to 'lib/libdrm')
-rw-r--r-- | lib/libdrm/Makefile | 16 | ||||
-rw-r--r-- | lib/libdrm/intel/Makefile | 36 | ||||
-rw-r--r-- | lib/libdrm/intel/intel_bufmgr.c | 436 | ||||
-rw-r--r-- | lib/libdrm/intel/intel_bufmgr.h | 188 | ||||
-rw-r--r-- | lib/libdrm/intel/intel_bufmgr_fake.c | 1522 | ||||
-rw-r--r-- | lib/libdrm/intel/intel_bufmgr_gem.c | 1301 | ||||
-rw-r--r-- | lib/libdrm/intel/intel_bufmgr_priv.h | 175 | ||||
-rw-r--r-- | lib/libdrm/intel/mm.c | 281 | ||||
-rw-r--r-- | lib/libdrm/intel/mm.h | 96 | ||||
-rw-r--r-- | lib/libdrm/intel/shlib_version | 2 | ||||
-rw-r--r-- | lib/libdrm/libdrm_lists.h | 87 | ||||
-rw-r--r-- | lib/libdrm/shlib_version | 2 | ||||
-rw-r--r-- | lib/libdrm/xf86drm.c | 232 | ||||
-rw-r--r-- | lib/libdrm/xf86drm.h | 13 | ||||
-rw-r--r-- | lib/libdrm/xf86drmMode.c | 665 | ||||
-rw-r--r-- | lib/libdrm/xf86drmMode.h | 251 |
16 files changed, 5203 insertions, 100 deletions
diff --git a/lib/libdrm/Makefile b/lib/libdrm/Makefile index a4ce71c0b..10b0e7b55 100644 --- a/lib/libdrm/Makefile +++ b/lib/libdrm/Makefile @@ -1,26 +1,32 @@ -# $OpenBSD: Makefile,v 1.1 2009/01/10 16:29:26 oga Exp $ +# $OpenBSD: Makefile,v 1.2 2009/01/26 23:14:37 oga Exp $ .include <bsd.xconf.mk> +.if ${MACHINE_ARCH} == "i386" || ${MACHINE_ARCH} == "amd64" +SUBDIR=intel +.endif + LIB= drm DRM_MAJOR= 2 -DRM_MINOR= 3 -DRM_TINY= 1 +DRM_MINOR= 4 +DRM_TINY= 4 INCSDIR= ${X11BASE}/include/ CPP= cpp -notraditional DEBUG?= -CPPFLAGS+= -I. \ +CPPFLAGS+= -I${.CURDIR} \ -I${X11BASE}/include \ -I/usr/include/dev/pci/drm \ -DX_PRIVSEP -INCS= xf86drm.h +INCS= xf86drm.h \ + xf86drmMode.h SRCS= xf86drm.c \ xf86drmHash.c \ + xf86drmMode.c \ xf86drmRandom.c \ xf86drmSL.c diff --git a/lib/libdrm/intel/Makefile b/lib/libdrm/intel/Makefile new file mode 100644 index 000000000..0c82559bc --- /dev/null +++ b/lib/libdrm/intel/Makefile @@ -0,0 +1,36 @@ +# $OpenBSD: Makefile,v 1.1 2009/01/26 23:14:37 oga Exp $ +.include <bsd.xconf.mk> + +LIB= drm_intel + +INCSDIR= ${X11BASE}/include/ + +CPPFLAGS+= -I${.CURDIR} \ + -I${.CURDIR}/.. \ + -I${X11BASE}/include \ + -I/usr/include/dev/pci/drm + +INCS= intel_bufmgr.h + +SRCS= intel_bufmgr.c \ + intel_bufmgr_fake.c \ + intel_bufmgr_gem.c \ + mm.c + +LDADD+= -L${X11BASE}/lib -lX11 + +includes: + cd ${.CURDIR}; for i in ${INCS}; do \ + j="cmp -s $$i ${DESTDIR}${INCSDIR}/$$i || \ + ${INSTALL} ${INSTALL_COPY} -o ${BINOWN} -g ${BINGRP} -m 444 \ + $$i ${DESTDIR}${INCSDIR}/"; \ + echo "\tinstalling $$i"; \ + eval "$$j"; \ + done + +NOPROFILE = + +obj: _xenocara_obj + +.include <bsd.lib.mk> +.include <bsd.xorg.mk> diff --git a/lib/libdrm/intel/intel_bufmgr.c b/lib/libdrm/intel/intel_bufmgr.c new file mode 100644 index 000000000..ac9b5f578 --- /dev/null +++ b/lib/libdrm/intel/intel_bufmgr.c @@ -0,0 +1,436 @@ +/* + * Copyright © 2007 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt <eric@anholt.net> + * + */ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include <string.h> +#include <stdlib.h> +#include <stdint.h> +#include <assert.h> +#include <errno.h> +#include <drm.h> +#include <i915_drm.h> +#include "intel_bufmgr.h" +#include "intel_bufmgr_priv.h" + +/** @file intel_bufmgr.c + * + * Convenience functions for buffer management methods. + */ + +drm_intel_bo * +drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name, + unsigned long size, unsigned int alignment) +{ + return bufmgr->bo_alloc(bufmgr, name, size, alignment); +} + +void +drm_intel_bo_reference(drm_intel_bo *bo) +{ + bo->bufmgr->bo_reference(bo); +} + +void +drm_intel_bo_unreference(drm_intel_bo *bo) +{ + if (bo == NULL) + return; + + bo->bufmgr->bo_unreference(bo); +} + +int +drm_intel_bo_map(drm_intel_bo *buf, int write_enable) +{ + return buf->bufmgr->bo_map(buf, write_enable); +} + +int +drm_intel_bo_unmap(drm_intel_bo *buf) +{ + return buf->bufmgr->bo_unmap(buf); +} + +int +drm_intel_bo_subdata(drm_intel_bo *bo, unsigned long offset, + unsigned long size, const void *data) +{ + int ret; + + if (bo->bufmgr->bo_subdata) + return bo->bufmgr->bo_subdata(bo, offset, size, data); + if (size == 0 || data == NULL) + return 0; + + ret = drm_intel_bo_map(bo, 1); + if (ret) + return ret; + memcpy((unsigned char *)bo->virtual + offset, data, size); + drm_intel_bo_unmap(bo); + return 0; +} + +int +drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset, + unsigned long size, void *data) +{ + int ret; + if (bo->bufmgr->bo_subdata) + return bo->bufmgr->bo_get_subdata(bo, offset, size, data); + + if (size == 0 || data == NULL) + return 0; + + ret = drm_intel_bo_map(bo, 0); + if (ret) + return ret; + memcpy(data, (unsigned char *)bo->virtual + offset, size); + drm_intel_bo_unmap(bo); + return 0; +} + +void +drm_intel_bo_wait_rendering(drm_intel_bo *bo) +{ + bo->bufmgr->bo_wait_rendering(bo); +} + +void +drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr) +{ + bufmgr->destroy(bufmgr); +} + +int +drm_intel_bo_exec(drm_intel_bo *bo, int used, + drm_clip_rect_t *cliprects, int num_cliprects, + int DR4) +{ + return bo->bufmgr->bo_exec(bo, used, cliprects, num_cliprects, DR4); +} + +void +drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug) +{ + bufmgr->debug = enable_debug; +} + +int +drm_intel_bufmgr_check_aperture_space(drm_intel_bo **bo_array, int count) +{ + return bo_array[0]->bufmgr->check_aperture_space(bo_array, count); +} + +int +drm_intel_bo_flink(drm_intel_bo *bo, uint32_t *name) +{ + if (bo->bufmgr->bo_flink) + return bo->bufmgr->bo_flink(bo, name); + + return -ENODEV; +} + +int +drm_intel_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset, + drm_intel_bo *target_bo, uint32_t target_offset, + uint32_t read_domains, uint32_t write_domain) +{ + return bo->bufmgr->bo_emit_reloc(bo, offset, + target_bo, target_offset, + read_domains, write_domain); +} + +int +drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment) +{ + if (bo->bufmgr->bo_pin) + return bo->bufmgr->bo_pin(bo, alignment); + + return -ENODEV; +} + +int +drm_intel_bo_unpin(drm_intel_bo *bo) +{ + if (bo->bufmgr->bo_unpin) + return bo->bufmgr->bo_unpin(bo); + + return -ENODEV; +} + +int drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t *tiling_mode, + uint32_t stride) +{ + if (bo->bufmgr->bo_set_tiling) + return bo->bufmgr->bo_set_tiling(bo, tiling_mode, stride); + + *tiling_mode = I915_TILING_NONE; + return 0; +} + +int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t *tiling_mode, + uint32_t *swizzle_mode) +{ + if (bo->bufmgr->bo_get_tiling) + return bo->bufmgr->bo_get_tiling(bo, tiling_mode, swizzle_mode); + + *tiling_mode = I915_TILING_NONE; + *swizzle_mode = I915_BIT_6_SWIZZLE_NONE; + return 0; +} + +#if 0 +/* + * $XFree86: xc/lib/XThrStub/UIThrStubs.c,v 3.3 2001/11/18 21:13:26 herrb Exp $ + * + * Copyright (c) 1995 David E. Wexelblat. All rights reserved + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL DAVID E. WEXELBLAT BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Except as contained in this notice, the name of David E. Wexelblat shall + * not be used in advertising or otherwise to promote the sale, use or + * other dealings in this Software without prior written authorization + * from David E. Wexelblat. + * + */ + +/* + * Stubs for thread functions needed by the X library. Supports + * UnixWare 2.x threads; may support Solaris 2 threads as well, but not + * tested. Defining things this way removes the dependency of the X + * library on the threads library, but still supports threads if the user + * specificies the thread library on the link line. + */ + +/* + * Modifications by Carlos A M dos Santos, XFree86 Project, November 1999. + * + * Explanation from <X11/Xos_r.h>: + * The structure below is complicated, mostly because P1003.1c (the + * IEEE POSIX Threads spec) went through lots of drafts, and some + * vendors shipped systems based on draft API that were changed later. + * Unfortunately POSIX did not provide a feature-test macro for + * distinguishing each of the drafts. + */ + +#include <stdlib.h> + +static int _Xthr_once_stub_(void *, void (*)(void)); +static int _Xthr_key_create_stub_(unsigned int *, void (*)(void *)); +static int _Xthr_setspecific_stub_(unsigned int, const void *); +static void *_Xthr_getspecific_stub_(unsigned int); + +#ifdef CTHREADS +#ifdef HAVE_CONFIG_H +#include <config.h> +#endif +#include <cthreads.h> +typedef cthread_t xthread_t; +#define xthread_self cthread_self +#pragma weak cthread_self = _Xthr_self_stub_ +#define xmutex_init mutex_init +#pragma weak mutex_init = _Xthr_zero_stub_ +#pragma weak mutex_clear = _Xthr_zero_stub_ +#pragma weak mutex_lock = _Xthr_zero_stub_ +#pragma weak mutex_unlock = _Xthr_zero_stub_ +#pragma weak condition_init = _Xthr_zero_stub_ +#pragma weak condition_clear = _Xthr_zero_stub_ +#pragma weak condition_wait = _Xthr_zero_stub_ +#pragma weak condition_signal = _Xthr_zero_stub_ +#pragma weak condition_broadcast = _Xthr_zero_stub_ +#else /* !CTHREADS */ +#if defined(SVR4) && !defined(__sgi) +#include <thread.h> +typedef thread_t xthread_t; +#pragma weak thr_self = _Xthr_self_stub_ +#pragma weak mutex_init = _Xthr_zero_stub_ +#pragma weak mutex_destroy = _Xthr_zero_stub_ +#pragma weak mutex_lock = _Xthr_zero_stub_ +#pragma weak mutex_unlock = _Xthr_zero_stub_ +#pragma weak cond_init = _Xthr_zero_stub_ +#pragma weak cond_destroy = _Xthr_zero_stub_ +#pragma weak cond_wait = _Xthr_zero_stub_ +#pragma weak cond_signal = _Xthr_zero_stub_ +#pragma weak cond_broadcast = _Xthr_zero_stub_ +#else /* !SVR4 */ +#ifdef WIN32 + /* + * Don't know what to do here. Is there something do be done at all? + */ +#else /* !WIN32 */ +#ifdef USE_TIS_SUPPORT +#include <tis.h> +typedef pthread_t xthread_t; +#pragma weak tis_self = _Xthr_self_stub_ +#pragma weak tis_mutex_init = _Xthr_zero_stub_ +#pragma weak tis_mutex_destroy = _Xthr_zero_stub_ +#pragma weak tis_mutex_lock = _Xthr_zero_stub_ +#pragma weak tis_mutex_unlock = _Xthr_zero_stub_ +#pragma weak tis_cond_init = _Xthr_zero_stub_ +#pragma weak tis_cond_destroy = _Xthr_zero_stub_ +#pragma weak tis_cond_wait = _Xthr_zero_stub_ +#pragma weak tis_cond_signal = _Xthr_zero_stub_ +#pragma weak tis_cond_broadcast = _Xthr_zero_stub_ +#else +#include <pthread.h> +typedef pthread_t xthread_t; +#if __GNUC__ >= 3 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 95) +xthread_t pthread_self() __attribute__ ((weak, alias ("_Xthr_self_stub_"))); +int pthread_mutex_init() __attribute__ ((weak, alias ("_Xthr_zero_stub_"))); +int pthread_mutex_destroy() __attribute__ ((weak, alias ("_Xthr_zero_stub_"))); +int pthread_mutex_lock() __attribute__ ((weak, alias ("_Xthr_zero_stub_"))); +int pthread_mutex_unlock() __attribute__ ((weak, alias ("_Xthr_zero_stub_"))); +int pthread_cond_init() __attribute__ ((weak, alias ("_Xthr_zero_stub_"))); +int pthread_cond_destroy() __attribute__ ((weak, alias ("_Xthr_zero_stub_"))); +int pthread_cond_wait() __attribute__ ((weak, alias ("_Xthr_zero_stub_"))); +int pthread_cond_signal() __attribute__ ((weak, alias ("_Xthr_zero_stub_"))); +int pthread_cond_broadcast() __attribute__ ((weak, alias ("_Xthr_zero_stub_"))); +int pthread_key_create() __attribute__ ((weak, alias ("_Xthr_key_create_stub_"))); +void *pthread_getspecific() __attribute__ ((weak, alias ("_Xthr_getspecific_stub_"))); +int pthread_setspecific() __attribute__ ((weak, alias ("_Xthr_setspecific_stub_"))); +int pthread_once() __attribute__ ((weak, alias ("_Xthr_once_stub_"))); +#else /* __GNUC__ */ +#pragma weak pthread_self = _Xthr_self_stub_ +#pragma weak pthread_mutex_init = _Xthr_zero_stub_ +#pragma weak pthread_mutex_destroy = _Xthr_zero_stub_ +#pragma weak pthread_mutex_lock = _Xthr_zero_stub_ +#pragma weak pthread_mutex_unlock = _Xthr_zero_stub_ +#pragma weak pthread_cond_init = _Xthr_zero_stub_ +#pragma weak pthread_cond_destroy = _Xthr_zero_stub_ +#pragma weak pthread_cond_wait = _Xthr_zero_stub_ +#pragma weak pthread_cond_signal = _Xthr_zero_stub_ +#pragma weak pthread_cond_broadcast = _Xthr_zero_stub_ +/* These are added for libGL */ +#pragma weak pthread_key_create = _Xthr_key_create_stub_ +#pragma weak pthread_getspecific = _Xthr_getspecific_stub_ +#pragma weak pthread_setspecific = _Xthr_setspecific_stub_ +#pragam weak pthread_once = _Xthr_once_stub_ +#endif /* __GNUC__ */ +#if defined(_DECTHREADS_) || defined(linux) +#pragma weak pthread_equal = _Xthr_equal_stub_ /* See Xthreads.h! */ +int +_Xthr_equal_stub_() +{ + return(1); +} +#endif /* _DECTHREADS_ || linux */ +#endif /* USE_TIS_SUPPORT */ +#endif /* WIN32 */ +#endif /* SVR4 */ +#endif /* CTHREADS */ + +static xthread_t +_Xthr_self_stub_() +{ + static xthread_t _X_no_thread_id; + + return(_X_no_thread_id); /* defined by <X11/Xthreads.h> */ +} + +static int +_Xthr_zero_stub_() +{ + return(0); +} + +static int +_Xthr_once_stub_(void *id, void (*routine)(void)) +{ + static int done = 0; + + if (!done) { + routine(); + done++; + } + return 0; +} + +#include <errno.h> + +#define XTHR_KEYS_CHUNK 100 + +static void **_Xthr_keys_ = NULL; +static unsigned int _Xthr_last_key_ = 0; + +static int +_Xthr_key_create_stub_(unsigned int *key, void (*destructor)(void *)) +{ + void **tmp; + unsigned int i; + + if ((_Xthr_last_key_ % XTHR_KEYS_CHUNK) == 0) { + tmp = realloc(_Xthr_keys_, + (_Xthr_last_key_ + XTHR_KEYS_CHUNK)*sizeof(void *)); + if (tmp == NULL) { + free(_Xthr_keys_); + return ENOMEM; + } + for (i = 0; i < XTHR_KEYS_CHUNK; i++) + tmp[_Xthr_last_key_ + i] = 0; + _Xthr_keys_ = tmp; + } + *key = _Xthr_last_key_++; + return 0; +} + +static int +_Xthr_setspecific_stub_(unsigned int key, const void *value) +{ + if (_Xthr_last_key_ == 0 || key >= _Xthr_last_key_) + return EINVAL; + _Xthr_keys_[key] = value; + return 0; +} + +static void * +_Xthr_getspecific_stub_(unsigned int key) +{ + if (_Xthr_last_key_ == 0 || key >= _Xthr_last_key_) + return NULL; + return(_Xthr_keys_[key]); +} +#endif diff --git a/lib/libdrm/intel/intel_bufmgr.h b/lib/libdrm/intel/intel_bufmgr.h new file mode 100644 index 000000000..e8c2e063c --- /dev/null +++ b/lib/libdrm/intel/intel_bufmgr.h @@ -0,0 +1,188 @@ +/* + * Copyright © 2008 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt <eric@anholt.net> + * + */ + +/** + * @file intel_bufmgr.h + * + * Public definitions of Intel-specific bufmgr functions. + */ + +#ifndef INTEL_BUFMGR_H +#define INTEL_BUFMGR_H + +#include <stdint.h> + +typedef struct _drm_intel_bufmgr drm_intel_bufmgr; +typedef struct _drm_intel_bo drm_intel_bo; + +struct _drm_intel_bo { + /** + * Size in bytes of the buffer object. + * + * The size may be larger than the size originally requested for the + * allocation, such as being aligned to page size. + */ + unsigned long size; + /** + * Alignment requirement for object + * + * Used for GTT mapping & pinning the object. + */ + unsigned long align; + + /** + * Card virtual address (offset from the beginning of the aperture) for the + * object. Only valid while validated. + */ + unsigned long offset; + /** + * Virtual address for accessing the buffer data. Only valid while mapped. + */ + void *virtual; + + /** Buffer manager context associated with this buffer object */ + drm_intel_bufmgr *bufmgr; + + /** + * MM-specific handle for accessing object + */ + int handle; +}; + +drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name, + unsigned long size, unsigned int alignment); +void drm_intel_bo_reference(drm_intel_bo *bo); +void drm_intel_bo_unreference(drm_intel_bo *bo); +int drm_intel_bo_map(drm_intel_bo *bo, int write_enable); +int drm_intel_bo_unmap(drm_intel_bo *bo); + +int drm_intel_bo_subdata(drm_intel_bo *bo, unsigned long offset, + unsigned long size, const void *data); +int drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset, + unsigned long size, void *data); +void drm_intel_bo_wait_rendering(drm_intel_bo *bo); + +void drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug); +void drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr); +int drm_intel_bo_exec(drm_intel_bo *bo, int used, + drm_clip_rect_t *cliprects, int num_cliprects, + int DR4); +int drm_intel_bufmgr_check_aperture_space(drm_intel_bo **bo_array, int count); + +int drm_intel_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset, + drm_intel_bo *target_bo, uint32_t target_offset, + uint32_t read_domains, uint32_t write_domain); +int drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment); +int drm_intel_bo_unpin(drm_intel_bo *bo); +int drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t *tiling_mode, + uint32_t stride); +int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t *tiling_mode, + uint32_t *swizzle_mode); +int drm_intel_bo_flink(drm_intel_bo *bo, uint32_t *name); + +/* drm_intel_bufmgr_gem.c */ +drm_intel_bufmgr *drm_intel_bufmgr_gem_init(int fd, int batch_size); +drm_intel_bo *drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr, + const char *name, + unsigned int handle); +void drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr); +int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo); +void drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable); + +/* drm_intel_bufmgr_fake.c */ +drm_intel_bufmgr *drm_intel_bufmgr_fake_init(int fd, + unsigned long low_offset, + void *low_virtual, + unsigned long size, + volatile unsigned int *last_dispatch); +void drm_intel_bufmgr_fake_set_last_dispatch(drm_intel_bufmgr *bufmgr, + volatile unsigned int *last_dispatch); +void drm_intel_bufmgr_fake_set_exec_callback(drm_intel_bufmgr *bufmgr, + int (*exec)(drm_intel_bo *bo, + unsigned int used, + void *priv), + void *priv); +void drm_intel_bufmgr_fake_set_fence_callback(drm_intel_bufmgr *bufmgr, + unsigned int (*emit)(void *priv), + void (*wait)(unsigned int fence, + void *priv), + void *priv); +drm_intel_bo *drm_intel_bo_fake_alloc_static(drm_intel_bufmgr *bufmgr, + const char *name, + unsigned long offset, unsigned long size, + void *virtual); +void drm_intel_bo_fake_disable_backing_store(drm_intel_bo *bo, + void (*invalidate_cb)(drm_intel_bo *bo, + void *ptr), + void *ptr); + +void drm_intel_bufmgr_fake_contended_lock_take(drm_intel_bufmgr *bufmgr); +void drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr); + +/** @{ Compatibility defines to keep old code building despite the symbol rename + * from dri_* to drm_intel_* + */ +#define dri_bo drm_intel_bo +#define dri_bufmgr drm_intel_bufmgr +#define dri_bo_alloc drm_intel_bo_alloc +#define dri_bo_reference drm_intel_bo_reference +#define dri_bo_unreference drm_intel_bo_unreference +#define dri_bo_map drm_intel_bo_map +#define dri_bo_unmap drm_intel_bo_unmap +#define dri_bo_subdata drm_intel_bo_subdata +#define dri_bo_get_subdata drm_intel_bo_get_subdata +#define dri_bo_wait_rendering drm_intel_bo_wait_rendering +#define dri_bufmgr_set_debug drm_intel_bufmgr_set_debug +#define dri_bufmgr_destroy drm_intel_bufmgr_destroy +#define dri_bo_exec drm_intel_bo_exec +#define dri_bufmgr_check_aperture_space drm_intel_bufmgr_check_aperture_space +#define dri_bo_emit_reloc(reloc_bo, read, write, target_offset, \ + reloc_offset, target_bo) \ + drm_intel_bo_emit_reloc(reloc_bo, reloc_offset, \ + target_bo, target_offset, \ + read, write); +#define dri_bo_pin drm_intel_bo_pin +#define dri_bo_unpin drm_intel_bo_unpin +#define dri_bo_get_tiling drm_intel_bo_get_tiling +#define dri_bo_set_tiling(bo, mode) drm_intel_bo_set_tiling(bo, mode, 0) +#define dri_bo_flink drm_intel_bo_flink +#define intel_bufmgr_gem_init drm_intel_bufmgr_gem_init +#define intel_bo_gem_create_from_name drm_intel_bo_gem_create_from_name +#define intel_bufmgr_gem_enable_reuse drm_intel_bufmgr_gem_enable_reuse +#define intel_bufmgr_fake_init drm_intel_bufmgr_fake_init +#define intel_bufmgr_fake_set_last_dispatch drm_intel_bufmgr_fake_set_last_dispatch +#define intel_bufmgr_fake_set_exec_callback drm_intel_bufmgr_fake_set_exec_callback +#define intel_bufmgr_fake_set_fence_callback drm_intel_bufmgr_fake_set_fence_callback +#define intel_bo_fake_alloc_static drm_intel_bo_fake_alloc_static +#define intel_bo_fake_disable_backing_store drm_intel_bo_fake_disable_backing_store +#define intel_bufmgr_fake_contended_lock_take drm_intel_bufmgr_fake_contended_lock_take +#define intel_bufmgr_fake_evict_all drm_intel_bufmgr_fake_evict_all + +/** @{ */ + +#endif /* INTEL_BUFMGR_H */ + diff --git a/lib/libdrm/intel/intel_bufmgr_fake.c b/lib/libdrm/intel/intel_bufmgr_fake.c new file mode 100644 index 000000000..6c2162543 --- /dev/null +++ b/lib/libdrm/intel/intel_bufmgr_fake.c @@ -0,0 +1,1522 @@ +/************************************************************************** + * + * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +/* Originally a fake version of the buffer manager so that we can + * prototype the changes in a driver fairly quickly, has been fleshed + * out to a fully functional interim solution. + * + * Basically wraps the old style memory management in the new + * programming interface, but is more expressive and avoids many of + * the bugs in the old texture manager. + */ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include <stdlib.h> +#include <string.h> +#include <assert.h> +#include <errno.h> +#include <xf86drm.h> +#include <pthread.h> +#include "intel_bufmgr.h" +#include "intel_bufmgr_priv.h" +#include "drm.h" +#include "i915_drm.h" +#include "mm.h" +#include "libdrm_lists.h" + +#define ALIGN(value, alignment) ((value + alignment - 1) & ~(alignment - 1)) + +#define DBG(...) do { \ + if (bufmgr_fake->bufmgr.debug) \ + drmMsg(__VA_ARGS__); \ +} while (0) + +/* Internal flags: + */ +#define BM_NO_BACKING_STORE 0x00000001 +#define BM_NO_FENCE_SUBDATA 0x00000002 +#define BM_PINNED 0x00000004 + +/* Wrapper around mm.c's mem_block, which understands that you must + * wait for fences to expire before memory can be freed. This is + * specific to our use of memcpy for uploads - an upload that was + * processed through the command queue wouldn't need to care about + * fences. + */ +#define MAX_RELOCS 4096 + +struct fake_buffer_reloc +{ + /** Buffer object that the relocation points at. */ + drm_intel_bo *target_buf; + /** Offset of the relocation entry within reloc_buf. */ + uint32_t offset; + /** Cached value of the offset when we last performed this relocation. */ + uint32_t last_target_offset; + /** Value added to target_buf's offset to get the relocation entry. */ + uint32_t delta; + /** Cache domains the target buffer is read into. */ + uint32_t read_domains; + /** Cache domain the target buffer will have dirty cachelines in. */ + uint32_t write_domain; +}; + +struct block { + struct block *next, *prev; + struct mem_block *mem; /* BM_MEM_AGP */ + + /** + * Marks that the block is currently in the aperture and has yet to be + * fenced. + */ + unsigned on_hardware:1; + /** + * Marks that the block is currently fenced (being used by rendering) and + * can't be freed until @fence is passed. + */ + unsigned fenced:1; + + /** Fence cookie for the block. */ + unsigned fence; /* Split to read_fence, write_fence */ + + drm_intel_bo *bo; + void *virtual; +}; + +typedef struct _bufmgr_fake { + drm_intel_bufmgr bufmgr; + + pthread_mutex_t lock; + + unsigned long low_offset; + unsigned long size; + void *virtual; + + struct mem_block *heap; + + unsigned buf_nr; /* for generating ids */ + + /** + * List of blocks which are currently in the GART but haven't been + * fenced yet. + */ + struct block on_hardware; + /** + * List of blocks which are in the GART and have an active fence on them. + */ + struct block fenced; + /** + * List of blocks which have an expired fence and are ready to be evicted. + */ + struct block lru; + + unsigned int last_fence; + + unsigned fail:1; + unsigned need_fence:1; + int thrashing; + + /** + * Driver callback to emit a fence, returning the cookie. + * + * This allows the driver to hook in a replacement for the DRM usage in + * bufmgr_fake. + * + * Currently, this also requires that a write flush be emitted before + * emitting the fence, but this should change. + */ + unsigned int (*fence_emit)(void *private); + /** Driver callback to wait for a fence cookie to have passed. */ + void (*fence_wait)(unsigned int fence, void *private); + void *fence_priv; + + /** + * Driver callback to execute a buffer. + * + * This allows the driver to hook in a replacement for the DRM usage in + * bufmgr_fake. + */ + int (*exec)(drm_intel_bo *bo, unsigned int used, void *priv); + void *exec_priv; + + /** Driver-supplied argument to driver callbacks */ + void *driver_priv; + /* Pointer to kernel-updated sarea data for the last completed user irq */ + volatile int *last_dispatch; + + int fd; + + int debug; + + int performed_rendering; +} drm_intel_bufmgr_fake; + +typedef struct _drm_intel_bo_fake { + drm_intel_bo bo; + + unsigned id; /* debug only */ + const char *name; + + unsigned dirty:1; + /** has the card written to this buffer - we make need to copy it back */ + unsigned card_dirty:1; + unsigned int refcount; + /* Flags may consist of any of the DRM_BO flags, plus + * DRM_BO_NO_BACKING_STORE and BM_NO_FENCE_SUBDATA, which are the first two + * driver private flags. + */ + uint64_t flags; + /** Cache domains the target buffer is read into. */ + uint32_t read_domains; + /** Cache domain the target buffer will have dirty cachelines in. */ + uint32_t write_domain; + + unsigned int alignment; + int is_static, validated; + unsigned int map_count; + + /** relocation list */ + struct fake_buffer_reloc *relocs; + int nr_relocs; + /** + * Total size of the target_bos of this buffer. + * + * Used for estimation in check_aperture. + */ + unsigned int child_size; + + struct block *block; + void *backing_store; + void (*invalidate_cb)(drm_intel_bo *bo, void *ptr); + void *invalidate_ptr; +} drm_intel_bo_fake; + +static int clear_fenced(drm_intel_bufmgr_fake *bufmgr_fake, + unsigned int fence_cookie); + +#define MAXFENCE 0x7fffffff + +static int FENCE_LTE( unsigned a, unsigned b ) +{ + if (a == b) + return 1; + + if (a < b && b - a < (1<<24)) + return 1; + + if (a > b && MAXFENCE - a + b < (1<<24)) + return 1; + + return 0; +} + +void drm_intel_bufmgr_fake_set_fence_callback(drm_intel_bufmgr *bufmgr, + unsigned int (*emit)(void *priv), + void (*wait)(unsigned int fence, + void *priv), + void *priv) +{ + drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr; + + bufmgr_fake->fence_emit = emit; + bufmgr_fake->fence_wait = wait; + bufmgr_fake->fence_priv = priv; +} + +static unsigned int +_fence_emit_internal(drm_intel_bufmgr_fake *bufmgr_fake) +{ + struct drm_i915_irq_emit ie; + int ret, seq = 1; + + if (bufmgr_fake->fence_emit != NULL) { + seq = bufmgr_fake->fence_emit(bufmgr_fake->fence_priv); + return seq; + } + + ie.irq_seq = &seq; + ret = drmCommandWriteRead(bufmgr_fake->fd, DRM_I915_IRQ_EMIT, + &ie, sizeof(ie)); + if (ret) { + drmMsg("%s: drm_i915_irq_emit: %d\n", __FUNCTION__, ret); + abort(); + } + + DBG("emit 0x%08x\n", seq); + return seq; +} + +static void +_fence_wait_internal(drm_intel_bufmgr_fake *bufmgr_fake, int seq) +{ + struct drm_i915_irq_wait iw; + int hw_seq, busy_count = 0; + int ret; + int kernel_lied; + + if (bufmgr_fake->fence_wait != NULL) { + bufmgr_fake->fence_wait(seq, bufmgr_fake->fence_priv); + clear_fenced(bufmgr_fake, seq); + return; + } + + DBG("wait 0x%08x\n", iw.irq_seq); + + iw.irq_seq = seq; + + /* The kernel IRQ_WAIT implementation is all sorts of broken. + * 1) It returns 1 to 0x7fffffff instead of using the full 32-bit unsigned + * range. + * 2) It returns 0 if hw_seq >= seq, not seq - hw_seq < 0 on the 32-bit + * signed range. + * 3) It waits if seq < hw_seq, not seq - hw_seq > 0 on the 32-bit + * signed range. + * 4) It returns -EBUSY in 3 seconds even if the hardware is still + * successfully chewing through buffers. + * + * Assume that in userland we treat sequence numbers as ints, which makes + * some of the comparisons convenient, since the sequence numbers are + * all postive signed integers. + * + * From this we get several cases we need to handle. Here's a timeline. + * 0x2 0x7 0x7ffffff8 0x7ffffffd + * | | | | + * ------------------------------------------------------------------- + * + * A) Normal wait for hw to catch up + * hw_seq seq + * | | + * ------------------------------------------------------------------- + * seq - hw_seq = 5. If we call IRQ_WAIT, it will wait for hw to catch up. + * + * B) Normal wait for a sequence number that's already passed. + * seq hw_seq + * | | + * ------------------------------------------------------------------- + * seq - hw_seq = -5. If we call IRQ_WAIT, it returns 0 quickly. + * + * C) Hardware has already wrapped around ahead of us + * hw_seq seq + * | | + * ------------------------------------------------------------------- + * seq - hw_seq = 0x80000000 - 5. If we called IRQ_WAIT, it would wait + * for hw_seq >= seq, which may never occur. Thus, we want to catch this + * in userland and return 0. + * + * D) We've wrapped around ahead of the hardware. + * seq hw_seq + * | | + * ------------------------------------------------------------------- + * seq - hw_seq = -(0x80000000 - 5). If we called IRQ_WAIT, it would return + * 0 quickly because hw_seq >= seq, even though the hardware isn't caught up. + * Thus, we need to catch this early return in userland and bother the + * kernel until the hardware really does catch up. + * + * E) Hardware might wrap after we test in userland. + * hw_seq seq + * | | + * ------------------------------------------------------------------- + * seq - hw_seq = 5. If we call IRQ_WAIT, it will likely see seq >= hw_seq + * and wait. However, suppose hw_seq wraps before we make it into the + * kernel. The kernel sees hw_seq >= seq and waits for 3 seconds then + * returns -EBUSY. This is case C). We should catch this and then return + * successfully. + * + * F) Hardware might take a long time on a buffer. + * hw_seq seq + * | | + * ------------------------------------------------------------------- + * seq - hw_seq = 5. If we call IRQ_WAIT, if sequence 2 through 5 take too + * long, it will return -EBUSY. Batchbuffers in the gltestperf demo were + * seen to take up to 7 seconds. We should catch early -EBUSY return + * and keep trying. + */ + + do { + /* Keep a copy of last_dispatch so that if the wait -EBUSYs because the + * hardware didn't catch up in 3 seconds, we can see if it at least made + * progress and retry. + */ + hw_seq = *bufmgr_fake->last_dispatch; + + /* Catch case C */ + if (seq - hw_seq > 0x40000000) + return; + + ret = drmCommandWrite(bufmgr_fake->fd, DRM_I915_IRQ_WAIT, + &iw, sizeof(iw)); + /* Catch case D */ + kernel_lied = (ret == 0) && (seq - *bufmgr_fake->last_dispatch < + -0x40000000); + + /* Catch case E */ + if (ret == -EBUSY && (seq - *bufmgr_fake->last_dispatch > 0x40000000)) + ret = 0; + + /* Catch case F: Allow up to 15 seconds chewing on one buffer. */ + if ((ret == -EBUSY) && (hw_seq != *bufmgr_fake->last_dispatch)) + busy_count = 0; + else + busy_count++; + } while (kernel_lied || ret == -EAGAIN || ret == -EINTR || + (ret == -EBUSY && busy_count < 5)); + + if (ret != 0) { + drmMsg("%s:%d: Error waiting for fence: %s.\n", __FILE__, __LINE__, + strerror(-ret)); + abort(); + } + clear_fenced(bufmgr_fake, seq); +} + +static int +_fence_test(drm_intel_bufmgr_fake *bufmgr_fake, unsigned fence) +{ + /* Slight problem with wrap-around: + */ + return fence == 0 || FENCE_LTE(fence, bufmgr_fake->last_fence); +} + +/** + * Allocate a memory manager block for the buffer. + */ +static int +alloc_block(drm_intel_bo *bo) +{ + drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo; + drm_intel_bufmgr_fake *bufmgr_fake= (drm_intel_bufmgr_fake *)bo->bufmgr; + struct block *block = (struct block *)calloc(sizeof *block, 1); + unsigned int align_log2 = ffs(bo_fake->alignment) - 1; + unsigned int sz; + + if (!block) + return 1; + + sz = (bo->size + bo_fake->alignment - 1) & ~(bo_fake->alignment - 1); + + block->mem = mmAllocMem(bufmgr_fake->heap, sz, align_log2, 0); + if (!block->mem) { + free(block); + return 0; + } + + DRMINITLISTHEAD(block); + + /* Insert at head or at tail??? + */ + DRMLISTADDTAIL(block, &bufmgr_fake->lru); + + block->virtual = (uint8_t *)bufmgr_fake->virtual + + block->mem->ofs - bufmgr_fake->low_offset; + block->bo = bo; + + bo_fake->block = block; + + return 1; +} + +/* Release the card storage associated with buf: + */ +static void free_block(drm_intel_bufmgr_fake *bufmgr_fake, struct block *block) +{ + drm_intel_bo_fake *bo_fake; + DBG("free block %p %08x %d %d\n", block, block->mem->ofs, block->on_hardware, block->fenced); + + if (!block) + return; + + bo_fake = (drm_intel_bo_fake *)block->bo; + if (!(bo_fake->flags & (BM_PINNED | BM_NO_BACKING_STORE)) && (bo_fake->card_dirty == 1)) { + memcpy(bo_fake->backing_store, block->virtual, block->bo->size); + bo_fake->card_dirty = 0; + bo_fake->dirty = 1; + } + + if (block->on_hardware) { + block->bo = NULL; + } + else if (block->fenced) { + block->bo = NULL; + } + else { + DBG(" - free immediately\n"); + DRMLISTDEL(block); + + mmFreeMem(block->mem); + free(block); + } +} + +static void +alloc_backing_store(drm_intel_bo *bo) +{ + drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr; + drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo; + assert(!bo_fake->backing_store); + assert(!(bo_fake->flags & (BM_PINNED|BM_NO_BACKING_STORE))); + + bo_fake->backing_store = malloc(bo->size); + + DBG("alloc_backing - buf %d %p %d\n", bo_fake->id, bo_fake->backing_store, bo->size); + assert(bo_fake->backing_store); +} + +static void +free_backing_store(drm_intel_bo *bo) +{ + drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo; + + if (bo_fake->backing_store) { + assert(!(bo_fake->flags & (BM_PINNED|BM_NO_BACKING_STORE))); + free(bo_fake->backing_store); + bo_fake->backing_store = NULL; + } +} + +static void +set_dirty(drm_intel_bo *bo) +{ + drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr; + drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo; + + if (bo_fake->flags & BM_NO_BACKING_STORE && bo_fake->invalidate_cb != NULL) + bo_fake->invalidate_cb(bo, bo_fake->invalidate_ptr); + + assert(!(bo_fake->flags & BM_PINNED)); + + DBG("set_dirty - buf %d\n", bo_fake->id); + bo_fake->dirty = 1; +} + +static int +evict_lru(drm_intel_bufmgr_fake *bufmgr_fake, unsigned int max_fence) +{ + struct block *block, *tmp; + + DBG("%s\n", __FUNCTION__); + + DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) { + drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)block->bo; + + if (bo_fake != NULL && (bo_fake->flags & BM_NO_FENCE_SUBDATA)) + continue; + + if (block->fence && max_fence && !FENCE_LTE(block->fence, max_fence)) + return 0; + + set_dirty(&bo_fake->bo); + bo_fake->block = NULL; + + free_block(bufmgr_fake, block); + return 1; + } + + return 0; +} + +static int +evict_mru(drm_intel_bufmgr_fake *bufmgr_fake) +{ + struct block *block, *tmp; + + DBG("%s\n", __FUNCTION__); + + DRMLISTFOREACHSAFEREVERSE(block, tmp, &bufmgr_fake->lru) { + drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)block->bo; + + if (bo_fake && (bo_fake->flags & BM_NO_FENCE_SUBDATA)) + continue; + + set_dirty(&bo_fake->bo); + bo_fake->block = NULL; + + free_block(bufmgr_fake, block); + return 1; + } + + return 0; +} + +/** + * Removes all objects from the fenced list older than the given fence. + */ +static int clear_fenced(drm_intel_bufmgr_fake *bufmgr_fake, + unsigned int fence_cookie) +{ + struct block *block, *tmp; + int ret = 0; + + bufmgr_fake->last_fence = fence_cookie; + DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->fenced) { + assert(block->fenced); + + if (_fence_test(bufmgr_fake, block->fence)) { + + block->fenced = 0; + + if (!block->bo) { + DBG("delayed free: offset %x sz %x\n", + block->mem->ofs, block->mem->size); + DRMLISTDEL(block); + mmFreeMem(block->mem); + free(block); + } + else { + DBG("return to lru: offset %x sz %x\n", + block->mem->ofs, block->mem->size); + DRMLISTDEL(block); + DRMLISTADDTAIL(block, &bufmgr_fake->lru); + } + + ret = 1; + } + else { + /* Blocks are ordered by fence, so if one fails, all from + * here will fail also: + */ + DBG("fence not passed: offset %x sz %x %d %d \n", + block->mem->ofs, block->mem->size, block->fence, bufmgr_fake->last_fence); + break; + } + } + + DBG("%s: %d\n", __FUNCTION__, ret); + return ret; +} + +static void fence_blocks(drm_intel_bufmgr_fake *bufmgr_fake, unsigned fence) +{ + struct block *block, *tmp; + + DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->on_hardware) { + DBG("Fence block %p (sz 0x%x ofs %x buf %p) with fence %d\n", block, + block->mem->size, block->mem->ofs, block->bo, fence); + block->fence = fence; + + block->on_hardware = 0; + block->fenced = 1; + + /* Move to tail of pending list here + */ + DRMLISTDEL(block); + DRMLISTADDTAIL(block, &bufmgr_fake->fenced); + } + + assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware)); +} + +static int evict_and_alloc_block(drm_intel_bo *bo) +{ + drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr; + drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo; + + assert(bo_fake->block == NULL); + + /* Search for already free memory: + */ + if (alloc_block(bo)) + return 1; + + /* If we're not thrashing, allow lru eviction to dig deeper into + * recently used textures. We'll probably be thrashing soon: + */ + if (!bufmgr_fake->thrashing) { + while (evict_lru(bufmgr_fake, 0)) + if (alloc_block(bo)) + return 1; + } + + /* Keep thrashing counter alive? + */ + if (bufmgr_fake->thrashing) + bufmgr_fake->thrashing = 20; + + /* Wait on any already pending fences - here we are waiting for any + * freed memory that has been submitted to hardware and fenced to + * become available: + */ + while (!DRMLISTEMPTY(&bufmgr_fake->fenced)) { + uint32_t fence = bufmgr_fake->fenced.next->fence; + _fence_wait_internal(bufmgr_fake, fence); + + if (alloc_block(bo)) + return 1; + } + + if (!DRMLISTEMPTY(&bufmgr_fake->on_hardware)) { + while (!DRMLISTEMPTY(&bufmgr_fake->fenced)) { + uint32_t fence = bufmgr_fake->fenced.next->fence; + _fence_wait_internal(bufmgr_fake, fence); + } + + if (!bufmgr_fake->thrashing) { + DBG("thrashing\n"); + } + bufmgr_fake->thrashing = 20; + + if (alloc_block(bo)) + return 1; + } + + while (evict_mru(bufmgr_fake)) + if (alloc_block(bo)) + return 1; + + DBG("%s 0x%x bytes failed\n", __FUNCTION__, bo->size); + + return 0; +} + +/*********************************************************************** + * Public functions + */ + +/** + * Wait for hardware idle by emitting a fence and waiting for it. + */ +static void +drm_intel_bufmgr_fake_wait_idle(drm_intel_bufmgr_fake *bufmgr_fake) +{ + unsigned int cookie; + + cookie = _fence_emit_internal(bufmgr_fake); + _fence_wait_internal(bufmgr_fake, cookie); +} + +/** + * Wait for rendering to a buffer to complete. + * + * It is assumed that the bathcbuffer which performed the rendering included + * the necessary flushing. + */ +static void +drm_intel_fake_bo_wait_rendering_locked(drm_intel_bo *bo) +{ + drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr; + drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo; + + if (bo_fake->block == NULL || !bo_fake->block->fenced) + return; + + _fence_wait_internal(bufmgr_fake, bo_fake->block->fence); +} + +static void +drm_intel_fake_bo_wait_rendering(drm_intel_bo *bo) +{ + drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr; + + pthread_mutex_lock(&bufmgr_fake->lock); + drm_intel_fake_bo_wait_rendering_locked(bo); + pthread_mutex_unlock(&bufmgr_fake->lock); +} + +/* Specifically ignore texture memory sharing. + * -- just evict everything + * -- and wait for idle + */ +void +drm_intel_bufmgr_fake_contended_lock_take(drm_intel_bufmgr *bufmgr) +{ + drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr; + struct block *block, *tmp; + + pthread_mutex_lock(&bufmgr_fake->lock); + + bufmgr_fake->need_fence = 1; + bufmgr_fake->fail = 0; + + /* Wait for hardware idle. We don't know where acceleration has been + * happening, so we'll need to wait anyway before letting anything get + * put on the card again. + */ + drm_intel_bufmgr_fake_wait_idle(bufmgr_fake); + + /* Check that we hadn't released the lock without having fenced the last + * set of buffers. + */ + assert(DRMLISTEMPTY(&bufmgr_fake->fenced)); + assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware)); + + DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) { + assert(_fence_test(bufmgr_fake, block->fence)); + set_dirty(block->bo); + } + + pthread_mutex_unlock(&bufmgr_fake->lock); +} + +static drm_intel_bo * +drm_intel_fake_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name, + unsigned long size, unsigned int alignment) +{ + drm_intel_bufmgr_fake *bufmgr_fake; + drm_intel_bo_fake *bo_fake; + + bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr; + + assert(size != 0); + + bo_fake = calloc(1, sizeof(*bo_fake)); + if (!bo_fake) + return NULL; + + bo_fake->bo.size = size; + bo_fake->bo.offset = -1; + bo_fake->bo.virtual = NULL; + bo_fake->bo.bufmgr = bufmgr; + bo_fake->refcount = 1; + + /* Alignment must be a power of two */ + assert((alignment & (alignment - 1)) == 0); + if (alignment == 0) + alignment = 1; + bo_fake->alignment = alignment; + bo_fake->id = ++bufmgr_fake->buf_nr; + bo_fake->name = name; + bo_fake->flags = 0; + bo_fake->is_static = 0; + + DBG("drm_bo_alloc: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name, + bo_fake->bo.size / 1024); + + return &bo_fake->bo; +} + +drm_intel_bo * +drm_intel_bo_fake_alloc_static(drm_intel_bufmgr *bufmgr, const char *name, + unsigned long offset, unsigned long size, + void *virtual) +{ + drm_intel_bufmgr_fake *bufmgr_fake; + drm_intel_bo_fake *bo_fake; + + bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr; + + assert(size != 0); + + bo_fake = calloc(1, sizeof(*bo_fake)); + if (!bo_fake) + return NULL; + + bo_fake->bo.size = size; + bo_fake->bo.offset = offset; + bo_fake->bo.virtual = virtual; + bo_fake->bo.bufmgr = bufmgr; + bo_fake->refcount = 1; + bo_fake->id = ++bufmgr_fake->buf_nr; + bo_fake->name = name; + bo_fake->flags = BM_PINNED; + bo_fake->is_static = 1; + + DBG("drm_bo_alloc_static: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name, + bo_fake->bo.size / 1024); + + return &bo_fake->bo; +} + +static void +drm_intel_fake_bo_reference(drm_intel_bo *bo) +{ + drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr; + drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo; + + pthread_mutex_lock(&bufmgr_fake->lock); + bo_fake->refcount++; + pthread_mutex_unlock(&bufmgr_fake->lock); +} + +static void +drm_intel_fake_bo_reference_locked(drm_intel_bo *bo) +{ + drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo; + + bo_fake->refcount++; +} + +static void +drm_intel_fake_bo_unreference_locked(drm_intel_bo *bo) +{ + drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr; + drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo; + int i; + + if (--bo_fake->refcount == 0) { + assert(bo_fake->map_count == 0); + /* No remaining references, so free it */ + if (bo_fake->block) + free_block(bufmgr_fake, bo_fake->block); + free_backing_store(bo); + + for (i = 0; i < bo_fake->nr_relocs; i++) + drm_intel_fake_bo_unreference_locked(bo_fake->relocs[i].target_buf); + + DBG("drm_bo_unreference: free buf %d %s\n", bo_fake->id, bo_fake->name); + + free(bo_fake->relocs); + free(bo); + } +} + +static void +drm_intel_fake_bo_unreference(drm_intel_bo *bo) +{ + drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr; + + pthread_mutex_lock(&bufmgr_fake->lock); + drm_intel_fake_bo_unreference_locked(bo); + pthread_mutex_unlock(&bufmgr_fake->lock); +} + +/** + * Set the buffer as not requiring backing store, and instead get the callback + * invoked whenever it would be set dirty. + */ +void drm_intel_bo_fake_disable_backing_store(drm_intel_bo *bo, + void (*invalidate_cb)(drm_intel_bo *bo, + void *ptr), + void *ptr) +{ + drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr; + drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo; + + pthread_mutex_lock(&bufmgr_fake->lock); + + if (bo_fake->backing_store) + free_backing_store(bo); + + bo_fake->flags |= BM_NO_BACKING_STORE; + + DBG("disable_backing_store set buf %d dirty\n", bo_fake->id); + bo_fake->dirty = 1; + bo_fake->invalidate_cb = invalidate_cb; + bo_fake->invalidate_ptr = ptr; + + /* Note that it is invalid right from the start. Also note + * invalidate_cb is called with the bufmgr locked, so cannot + * itself make bufmgr calls. + */ + if (invalidate_cb != NULL) + invalidate_cb(bo, ptr); + + pthread_mutex_unlock(&bufmgr_fake->lock); +} + +/** + * Map a buffer into bo->virtual, allocating either card memory space (If + * BM_NO_BACKING_STORE or BM_PINNED) or backing store, as necessary. + */ +static int +drm_intel_fake_bo_map_locked(drm_intel_bo *bo, int write_enable) +{ + drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr; + drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo; + + /* Static buffers are always mapped. */ + if (bo_fake->is_static) { + if (bo_fake->card_dirty) { + drm_intel_bufmgr_fake_wait_idle(bufmgr_fake); + bo_fake->card_dirty = 0; + } + return 0; + } + + /* Allow recursive mapping. Mesa may recursively map buffers with + * nested display loops, and it is used internally in bufmgr_fake + * for relocation. + */ + if (bo_fake->map_count++ != 0) + return 0; + + { + DBG("drm_bo_map: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name, + bo_fake->bo.size / 1024); + + if (bo->virtual != NULL) { + drmMsg("%s: already mapped\n", __FUNCTION__); + abort(); + } + else if (bo_fake->flags & (BM_NO_BACKING_STORE|BM_PINNED)) { + + if (!bo_fake->block && !evict_and_alloc_block(bo)) { + DBG("%s: alloc failed\n", __FUNCTION__); + bufmgr_fake->fail = 1; + return 1; + } + else { + assert(bo_fake->block); + bo_fake->dirty = 0; + + if (!(bo_fake->flags & BM_NO_FENCE_SUBDATA) && + bo_fake->block->fenced) { + drm_intel_fake_bo_wait_rendering_locked(bo); + } + + bo->virtual = bo_fake->block->virtual; + } + } + else { + if (write_enable) + set_dirty(bo); + + if (bo_fake->backing_store == 0) + alloc_backing_store(bo); + + if ((bo_fake->card_dirty == 1) && bo_fake->block) { + if (bo_fake->block->fenced) + drm_intel_fake_bo_wait_rendering_locked(bo); + + memcpy(bo_fake->backing_store, bo_fake->block->virtual, bo_fake->block->bo->size); + bo_fake->card_dirty = 0; + } + + bo->virtual = bo_fake->backing_store; + } + } + + return 0; +} + +static int +drm_intel_fake_bo_map(drm_intel_bo *bo, int write_enable) +{ + drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr; + int ret; + + pthread_mutex_lock(&bufmgr_fake->lock); + ret = drm_intel_fake_bo_map_locked(bo, write_enable); + pthread_mutex_unlock(&bufmgr_fake->lock); + + return ret; +} + +static int +drm_intel_fake_bo_unmap_locked(drm_intel_bo *bo) +{ + drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr; + drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo; + + /* Static buffers are always mapped. */ + if (bo_fake->is_static) + return 0; + + assert(bo_fake->map_count != 0); + if (--bo_fake->map_count != 0) + return 0; + + DBG("drm_bo_unmap: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name, + bo_fake->bo.size / 1024); + + bo->virtual = NULL; + + return 0; +} + +static int +drm_intel_fake_bo_unmap(drm_intel_bo *bo) +{ + drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr; + int ret; + + pthread_mutex_lock(&bufmgr_fake->lock); + ret = drm_intel_fake_bo_unmap_locked(bo); + pthread_mutex_unlock(&bufmgr_fake->lock); + + return ret; +} + +static void +drm_intel_fake_kick_all_locked(drm_intel_bufmgr_fake *bufmgr_fake) +{ + struct block *block, *tmp; + + bufmgr_fake->performed_rendering = 0; + /* okay for ever BO that is on the HW kick it off. + seriously not afraid of the POLICE right now */ + DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->on_hardware) { + drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)block->bo; + + block->on_hardware = 0; + free_block(bufmgr_fake, block); + bo_fake->block = NULL; + bo_fake->validated = 0; + if (!(bo_fake->flags & BM_NO_BACKING_STORE)) + bo_fake->dirty = 1; + } + +} + +static int +drm_intel_fake_bo_validate(drm_intel_bo *bo) +{ + drm_intel_bufmgr_fake *bufmgr_fake; + drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo; + + bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr; + + DBG("drm_bo_validate: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name, + bo_fake->bo.size / 1024); + + /* Sanity check: Buffers should be unmapped before being validated. + * This is not so much of a problem for bufmgr_fake, but TTM refuses, + * and the problem is harder to debug there. + */ + assert(bo_fake->map_count == 0); + + if (bo_fake->is_static) { + /* Add it to the needs-fence list */ + bufmgr_fake->need_fence = 1; + return 0; + } + + /* Allocate the card memory */ + if (!bo_fake->block && !evict_and_alloc_block(bo)) { + bufmgr_fake->fail = 1; + DBG("Failed to validate buf %d:%s\n", bo_fake->id, bo_fake->name); + return -1; + } + + assert(bo_fake->block); + assert(bo_fake->block->bo == &bo_fake->bo); + + bo->offset = bo_fake->block->mem->ofs; + + /* Upload the buffer contents if necessary */ + if (bo_fake->dirty) { + DBG("Upload dirty buf %d:%s, sz %d offset 0x%x\n", bo_fake->id, + bo_fake->name, bo->size, bo_fake->block->mem->ofs); + + assert(!(bo_fake->flags & + (BM_NO_BACKING_STORE|BM_PINNED))); + + /* Actually, should be able to just wait for a fence on the memory, + * which we would be tracking when we free it. Waiting for idle is + * a sufficiently large hammer for now. + */ + drm_intel_bufmgr_fake_wait_idle(bufmgr_fake); + + /* we may never have mapped this BO so it might not have any backing + * store if this happens it should be rare, but 0 the card memory + * in any case */ + if (bo_fake->backing_store) + memcpy(bo_fake->block->virtual, bo_fake->backing_store, bo->size); + else + memset(bo_fake->block->virtual, 0, bo->size); + + bo_fake->dirty = 0; + } + + bo_fake->block->fenced = 0; + bo_fake->block->on_hardware = 1; + DRMLISTDEL(bo_fake->block); + DRMLISTADDTAIL(bo_fake->block, &bufmgr_fake->on_hardware); + + bo_fake->validated = 1; + bufmgr_fake->need_fence = 1; + + return 0; +} + +static void +drm_intel_fake_fence_validated(drm_intel_bufmgr *bufmgr) +{ + drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr; + unsigned int cookie; + + cookie = _fence_emit_internal(bufmgr_fake); + fence_blocks(bufmgr_fake, cookie); + + DBG("drm_fence_validated: 0x%08x cookie\n", cookie); +} + +static void +drm_intel_fake_destroy(drm_intel_bufmgr *bufmgr) +{ + drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr; + + pthread_mutex_destroy(&bufmgr_fake->lock); + mmDestroy(bufmgr_fake->heap); + free(bufmgr); +} + +static int +drm_intel_fake_emit_reloc(drm_intel_bo *bo, uint32_t offset, + drm_intel_bo *target_bo, uint32_t target_offset, + uint32_t read_domains, uint32_t write_domain) +{ + drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr; + struct fake_buffer_reloc *r; + drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo; + drm_intel_bo_fake *target_fake = (drm_intel_bo_fake *)target_bo; + int i; + + pthread_mutex_lock(&bufmgr_fake->lock); + + assert(bo); + assert(target_bo); + + if (bo_fake->relocs == NULL) { + bo_fake->relocs = malloc(sizeof(struct fake_buffer_reloc) * MAX_RELOCS); + } + + r = &bo_fake->relocs[bo_fake->nr_relocs++]; + + assert(bo_fake->nr_relocs <= MAX_RELOCS); + + drm_intel_fake_bo_reference_locked(target_bo); + + if (!target_fake->is_static) { + bo_fake->child_size += ALIGN(target_bo->size, target_fake->alignment); + bo_fake->child_size += target_fake->child_size; + } + r->target_buf = target_bo; + r->offset = offset; + r->last_target_offset = target_bo->offset; + r->delta = target_offset; + r->read_domains = read_domains; + r->write_domain = write_domain; + + if (bufmgr_fake->debug) { + /* Check that a conflicting relocation hasn't already been emitted. */ + for (i = 0; i < bo_fake->nr_relocs - 1; i++) { + struct fake_buffer_reloc *r2 = &bo_fake->relocs[i]; + + assert(r->offset != r2->offset); + } + } + + pthread_mutex_unlock(&bufmgr_fake->lock); + + return 0; +} + +/** + * Incorporates the validation flags associated with each relocation into + * the combined validation flags for the buffer on this batchbuffer submission. + */ +static void +drm_intel_fake_calculate_domains(drm_intel_bo *bo) +{ + drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo; + int i; + + for (i = 0; i < bo_fake->nr_relocs; i++) { + struct fake_buffer_reloc *r = &bo_fake->relocs[i]; + drm_intel_bo_fake *target_fake = (drm_intel_bo_fake *)r->target_buf; + + /* Do the same for the tree of buffers we depend on */ + drm_intel_fake_calculate_domains(r->target_buf); + + target_fake->read_domains |= r->read_domains; + target_fake->write_domain |= r->write_domain; + } +} + + +static int +drm_intel_fake_reloc_and_validate_buffer(drm_intel_bo *bo) +{ + drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr; + drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo; + int i, ret; + + assert(bo_fake->map_count == 0); + + for (i = 0; i < bo_fake->nr_relocs; i++) { + struct fake_buffer_reloc *r = &bo_fake->relocs[i]; + drm_intel_bo_fake *target_fake = (drm_intel_bo_fake *)r->target_buf; + uint32_t reloc_data; + + /* Validate the target buffer if that hasn't been done. */ + if (!target_fake->validated) { + ret = drm_intel_fake_reloc_and_validate_buffer(r->target_buf); + if (ret != 0) { + if (bo->virtual != NULL) + drm_intel_fake_bo_unmap_locked(bo); + return ret; + } + } + + /* Calculate the value of the relocation entry. */ + if (r->target_buf->offset != r->last_target_offset) { + reloc_data = r->target_buf->offset + r->delta; + + if (bo->virtual == NULL) + drm_intel_fake_bo_map_locked(bo, 1); + + *(uint32_t *)((uint8_t *)bo->virtual + r->offset) = reloc_data; + + r->last_target_offset = r->target_buf->offset; + } + } + + if (bo->virtual != NULL) + drm_intel_fake_bo_unmap_locked(bo); + + if (bo_fake->write_domain != 0) { + if (!(bo_fake->flags & (BM_NO_BACKING_STORE|BM_PINNED))) { + if (bo_fake->backing_store == 0) + alloc_backing_store(bo); + } + bo_fake->card_dirty = 1; + bufmgr_fake->performed_rendering = 1; + } + + return drm_intel_fake_bo_validate(bo); +} + +static void +drm_intel_bo_fake_post_submit(drm_intel_bo *bo) +{ + drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr; + drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo; + int i; + + for (i = 0; i < bo_fake->nr_relocs; i++) { + struct fake_buffer_reloc *r = &bo_fake->relocs[i]; + drm_intel_bo_fake *target_fake = (drm_intel_bo_fake *)r->target_buf; + + if (target_fake->validated) + drm_intel_bo_fake_post_submit(r->target_buf); + + DBG("%s@0x%08x + 0x%08x -> %s@0x%08x + 0x%08x\n", + bo_fake->name, (uint32_t)bo->offset, r->offset, + target_fake->name, (uint32_t)r->target_buf->offset, r->delta); + } + + assert(bo_fake->map_count == 0); + bo_fake->validated = 0; + bo_fake->read_domains = 0; + bo_fake->write_domain = 0; +} + + +void drm_intel_bufmgr_fake_set_exec_callback(drm_intel_bufmgr *bufmgr, + int (*exec)(drm_intel_bo *bo, + unsigned int used, + void *priv), + void *priv) +{ + drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr; + + bufmgr_fake->exec = exec; + bufmgr_fake->exec_priv = priv; +} + +static int +drm_intel_fake_bo_exec(drm_intel_bo *bo, int used, + drm_clip_rect_t *cliprects, int num_cliprects, + int DR4) +{ + drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr; + drm_intel_bo_fake *batch_fake = (drm_intel_bo_fake *)bo; + struct drm_i915_batchbuffer batch; + int ret; + int retry_count = 0; + + pthread_mutex_lock(&bufmgr_fake->lock); + + bufmgr_fake->performed_rendering = 0; + + drm_intel_fake_calculate_domains(bo); + + batch_fake->read_domains = I915_GEM_DOMAIN_COMMAND; + + /* we've ran out of RAM so blow the whole lot away and retry */ + restart: + ret = drm_intel_fake_reloc_and_validate_buffer(bo); + if (bufmgr_fake->fail == 1) { + if (retry_count == 0) { + retry_count++; + drm_intel_fake_kick_all_locked(bufmgr_fake); + bufmgr_fake->fail = 0; + goto restart; + } else /* dump out the memory here */ + mmDumpMemInfo(bufmgr_fake->heap); + } + + assert(ret == 0); + + if (bufmgr_fake->exec != NULL) { + int ret = bufmgr_fake->exec(bo, used, bufmgr_fake->exec_priv); + if (ret != 0) { + pthread_mutex_unlock(&bufmgr_fake->lock); + return ret; + } + } else { + batch.start = bo->offset; + batch.used = used; + batch.cliprects = cliprects; + batch.num_cliprects = num_cliprects; + batch.DR1 = 0; + batch.DR4 = DR4; + + if (drmCommandWrite(bufmgr_fake->fd, DRM_I915_BATCHBUFFER, &batch, + sizeof(batch))) { + drmMsg("DRM_I915_BATCHBUFFER: %d\n", -errno); + pthread_mutex_unlock(&bufmgr_fake->lock); + return -errno; + } + } + + drm_intel_fake_fence_validated(bo->bufmgr); + + drm_intel_bo_fake_post_submit(bo); + + pthread_mutex_unlock(&bufmgr_fake->lock); + + return 0; +} + +/** + * Return an error if the list of BOs will exceed the aperture size. + * + * This is a rough guess and likely to fail, as during the validate sequence we + * may place a buffer in an inopportune spot early on and then fail to fit + * a set smaller than the aperture. + */ +static int +drm_intel_fake_check_aperture_space(drm_intel_bo **bo_array, int count) +{ + drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo_array[0]->bufmgr; + unsigned int sz = 0; + int i; + + for (i = 0; i < count; i++) { + drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo_array[i]; + + if (bo_fake == NULL) + continue; + + if (!bo_fake->is_static) + sz += ALIGN(bo_array[i]->size, bo_fake->alignment); + sz += bo_fake->child_size; + } + + if (sz > bufmgr_fake->size) { + DBG("check_space: overflowed bufmgr size, %dkb vs %dkb\n", + sz / 1024, bufmgr_fake->size / 1024); + return -1; + } + + DBG("drm_check_space: sz %dkb vs bufgr %dkb\n", sz / 1024 , + bufmgr_fake->size / 1024); + return 0; +} + +/** + * Evicts all buffers, waiting for fences to pass and copying contents out + * as necessary. + * + * Used by the X Server on LeaveVT, when the card memory is no longer our + * own. + */ +void +drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr) +{ + drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr; + struct block *block, *tmp; + + pthread_mutex_lock(&bufmgr_fake->lock); + + bufmgr_fake->need_fence = 1; + bufmgr_fake->fail = 0; + + /* Wait for hardware idle. We don't know where acceleration has been + * happening, so we'll need to wait anyway before letting anything get + * put on the card again. + */ + drm_intel_bufmgr_fake_wait_idle(bufmgr_fake); + + /* Check that we hadn't released the lock without having fenced the last + * set of buffers. + */ + assert(DRMLISTEMPTY(&bufmgr_fake->fenced)); + assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware)); + + DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) { + /* Releases the memory, and memcpys dirty contents out if necessary. */ + free_block(bufmgr_fake, block); + } + + pthread_mutex_unlock(&bufmgr_fake->lock); +} +void drm_intel_bufmgr_fake_set_last_dispatch(drm_intel_bufmgr *bufmgr, + volatile unsigned int *last_dispatch) +{ + drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr; + + bufmgr_fake->last_dispatch = (volatile int *)last_dispatch; +} + +drm_intel_bufmgr * +drm_intel_bufmgr_fake_init(int fd, + unsigned long low_offset, void *low_virtual, + unsigned long size, + volatile unsigned int *last_dispatch) +{ + drm_intel_bufmgr_fake *bufmgr_fake; + + bufmgr_fake = calloc(1, sizeof(*bufmgr_fake)); + + if (pthread_mutex_init(&bufmgr_fake->lock, NULL) != 0) { + free(bufmgr_fake); + return NULL; + } + + /* Initialize allocator */ + DRMINITLISTHEAD(&bufmgr_fake->fenced); + DRMINITLISTHEAD(&bufmgr_fake->on_hardware); + DRMINITLISTHEAD(&bufmgr_fake->lru); + + bufmgr_fake->low_offset = low_offset; + bufmgr_fake->virtual = low_virtual; + bufmgr_fake->size = size; + bufmgr_fake->heap = mmInit(low_offset, size); + + /* Hook in methods */ + bufmgr_fake->bufmgr.bo_alloc = drm_intel_fake_bo_alloc; + bufmgr_fake->bufmgr.bo_reference = drm_intel_fake_bo_reference; + bufmgr_fake->bufmgr.bo_unreference = drm_intel_fake_bo_unreference; + bufmgr_fake->bufmgr.bo_map = drm_intel_fake_bo_map; + bufmgr_fake->bufmgr.bo_unmap = drm_intel_fake_bo_unmap; + bufmgr_fake->bufmgr.bo_wait_rendering = drm_intel_fake_bo_wait_rendering; + bufmgr_fake->bufmgr.bo_emit_reloc = drm_intel_fake_emit_reloc; + bufmgr_fake->bufmgr.destroy = drm_intel_fake_destroy; + bufmgr_fake->bufmgr.bo_exec = drm_intel_fake_bo_exec; + bufmgr_fake->bufmgr.check_aperture_space = drm_intel_fake_check_aperture_space; + bufmgr_fake->bufmgr.debug = 0; + + bufmgr_fake->fd = fd; + bufmgr_fake->last_dispatch = (volatile int *)last_dispatch; + + return &bufmgr_fake->bufmgr; +} + diff --git a/lib/libdrm/intel/intel_bufmgr_gem.c b/lib/libdrm/intel/intel_bufmgr_gem.c new file mode 100644 index 000000000..a06dca061 --- /dev/null +++ b/lib/libdrm/intel/intel_bufmgr_gem.c @@ -0,0 +1,1301 @@ +/************************************************************************** + * + * Copyright © 2007 Red Hat Inc. + * Copyright © 2007 Intel Corporation + * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * + **************************************************************************/ +/* + * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com> + * Keith Whitwell <keithw-at-tungstengraphics-dot-com> + * Eric Anholt <eric@anholt.net> + * Dave Airlie <airlied@linux.ie> + */ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include <xf86drm.h> +#include <fcntl.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> +#include <assert.h> +#include <pthread.h> +#include <sys/ioctl.h> +#include <sys/mman.h> +#include <sys/stat.h> +#include <sys/types.h> + +#include "errno.h" +#include "intel_bufmgr.h" +#include "intel_bufmgr_priv.h" +#include "string.h" + +#include "i915_drm.h" + +#define DBG(...) do { \ + if (bufmgr_gem->bufmgr.debug) \ + fprintf(stderr, __VA_ARGS__); \ +} while (0) + +typedef struct _drm_intel_bo_gem drm_intel_bo_gem; + +struct drm_intel_gem_bo_bucket { + drm_intel_bo_gem *head, **tail; + /** + * Limit on the number of entries in this bucket. + * + * 0 means that this caching at this bucket size is disabled. + * -1 means that there is no limit to caching at this size. + */ + int max_entries; + int num_entries; +}; + +/* Arbitrarily chosen, 16 means that the maximum size we'll cache for reuse + * is 1 << 16 pages, or 256MB. + */ +#define DRM_INTEL_GEM_BO_BUCKETS 16 +typedef struct _drm_intel_bufmgr_gem { + drm_intel_bufmgr bufmgr; + + int fd; + + int max_relocs; + + pthread_mutex_t lock; + + struct drm_i915_gem_exec_object *exec_objects; + drm_intel_bo **exec_bos; + int exec_size; + int exec_count; + + /** Array of lists of cached gem objects of power-of-two sizes */ + struct drm_intel_gem_bo_bucket cache_bucket[DRM_INTEL_GEM_BO_BUCKETS]; + + uint64_t gtt_size; +} drm_intel_bufmgr_gem; + +struct _drm_intel_bo_gem { + drm_intel_bo bo; + + int refcount; + /** Boolean whether the mmap ioctl has been called for this buffer yet. */ + uint32_t gem_handle; + const char *name; + + /** + * Kenel-assigned global name for this object + */ + unsigned int global_name; + + /** + * Index of the buffer within the validation list while preparing a + * batchbuffer execution. + */ + int validate_index; + + /** + * Boolean whether we've started swrast + * Set when the buffer has been mapped + * Cleared when the buffer is unmapped + */ + int swrast; + + /** + * Current tiling mode + */ + uint32_t tiling_mode; + uint32_t swizzle_mode; + + /** Array passed to the DRM containing relocation information. */ + struct drm_i915_gem_relocation_entry *relocs; + /** Array of bos corresponding to relocs[i].target_handle */ + drm_intel_bo **reloc_target_bo; + /** Number of entries in relocs */ + int reloc_count; + /** Mapped address for the buffer, saved across map/unmap cycles */ + void *virtual; + + /** free list */ + drm_intel_bo_gem *next; + + /** + * Boolean of whether this BO and its children have been included in + * the current drm_intel_bufmgr_check_aperture_space() total. + */ + char included_in_check_aperture; + + /** + * Boolean of whether this buffer has been used as a relocation + * target and had its size accounted for, and thus can't have any + * further relocations added to it. + */ + char used_as_reloc_target; + + /** + * Size in bytes of this buffer and its relocation descendents. + * + * Used to avoid costly tree walking in drm_intel_bufmgr_check_aperture in + * the common case. + */ + int reloc_tree_size; +}; + +static void drm_intel_gem_bo_reference_locked(drm_intel_bo *bo); + +static unsigned int +drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count); + +static unsigned int +drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count); + +static int +drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t *tiling_mode, + uint32_t *swizzle_mode); + +static int +drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t *tiling_mode, + uint32_t stride); + +static void +drm_intel_gem_bo_unreference(drm_intel_bo *bo); + +static int +logbase2(int n) +{ + int i = 1; + int log2 = 0; + + while (n > i) { + i *= 2; + log2++; + } + + return log2; +} + +static struct drm_intel_gem_bo_bucket * +drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem, + unsigned long size) +{ + int i; + + /* We only do buckets in power of two increments */ + if ((size & (size - 1)) != 0) + return NULL; + + /* We should only see sizes rounded to pages. */ + assert((size % 4096) == 0); + + /* We always allocate in units of pages */ + i = ffs(size / 4096) - 1; + if (i >= DRM_INTEL_GEM_BO_BUCKETS) + return NULL; + + return &bufmgr_gem->cache_bucket[i]; +} + + +static void drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem) +{ + int i, j; + + for (i = 0; i < bufmgr_gem->exec_count; i++) { + drm_intel_bo *bo = bufmgr_gem->exec_bos[i]; + drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; + + if (bo_gem->relocs == NULL) { + DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle, bo_gem->name); + continue; + } + + for (j = 0; j < bo_gem->reloc_count; j++) { + drm_intel_bo *target_bo = bo_gem->reloc_target_bo[j]; + drm_intel_bo_gem *target_gem = (drm_intel_bo_gem *)target_bo; + + DBG("%2d: %d (%s)@0x%08llx -> %d (%s)@0x%08lx + 0x%08x\n", + i, + bo_gem->gem_handle, bo_gem->name, + (unsigned long long)bo_gem->relocs[j].offset, + target_gem->gem_handle, target_gem->name, target_bo->offset, + bo_gem->relocs[j].delta); + } + } +} + +/** + * Adds the given buffer to the list of buffers to be validated (moved into the + * appropriate memory type) with the next batch submission. + * + * If a buffer is validated multiple times in a batch submission, it ends up + * with the intersection of the memory type flags and the union of the + * access flags. + */ +static void +drm_intel_add_validate_buffer(drm_intel_bo *bo) +{ + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; + drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; + int index; + + if (bo_gem->validate_index != -1) + return; + + /* Extend the array of validation entries as necessary. */ + if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) { + int new_size = bufmgr_gem->exec_size * 2; + + if (new_size == 0) + new_size = 5; + + bufmgr_gem->exec_objects = + realloc(bufmgr_gem->exec_objects, + sizeof(*bufmgr_gem->exec_objects) * new_size); + bufmgr_gem->exec_bos = + realloc(bufmgr_gem->exec_bos, + sizeof(*bufmgr_gem->exec_bos) * new_size); + bufmgr_gem->exec_size = new_size; + } + + index = bufmgr_gem->exec_count; + bo_gem->validate_index = index; + /* Fill in array entry */ + bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle; + bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count; + bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs; + bufmgr_gem->exec_objects[index].alignment = 0; + bufmgr_gem->exec_objects[index].offset = 0; + bufmgr_gem->exec_bos[index] = bo; + drm_intel_gem_bo_reference_locked(bo); + bufmgr_gem->exec_count++; +} + + +#define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \ + sizeof(uint32_t)) + +static int +drm_intel_setup_reloc_list(drm_intel_bo *bo) +{ + drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; + + bo_gem->relocs = malloc(bufmgr_gem->max_relocs * + sizeof(struct drm_i915_gem_relocation_entry)); + bo_gem->reloc_target_bo = malloc(bufmgr_gem->max_relocs * + sizeof(drm_intel_bo *)); + + return 0; +} + +static drm_intel_bo * +drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name, + unsigned long size, unsigned int alignment) +{ + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; + drm_intel_bo_gem *bo_gem; + unsigned int page_size = getpagesize(); + int ret; + struct drm_intel_gem_bo_bucket *bucket; + int alloc_from_cache = 0; + unsigned long bo_size; + + /* Round the allocated size up to a power of two number of pages. */ + bo_size = 1 << logbase2(size); + if (bo_size < page_size) + bo_size = page_size; + bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo_size); + + /* If we don't have caching at this size, don't actually round the + * allocation up. + */ + if (bucket == NULL || bucket->max_entries == 0) { + bo_size = size; + if (bo_size < page_size) + bo_size = page_size; + } + + pthread_mutex_lock(&bufmgr_gem->lock); + /* Get a buffer out of the cache if available */ + if (bucket != NULL && bucket->num_entries > 0) { + struct drm_i915_gem_busy busy; + + bo_gem = bucket->head; + busy.handle = bo_gem->gem_handle; + + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy); + alloc_from_cache = (ret == 0 && busy.busy == 0); + + if (alloc_from_cache) { + bucket->head = bo_gem->next; + if (bo_gem->next == NULL) + bucket->tail = &bucket->head; + bucket->num_entries--; + } + } + pthread_mutex_unlock(&bufmgr_gem->lock); + + if (!alloc_from_cache) { + struct drm_i915_gem_create create; + + bo_gem = calloc(1, sizeof(*bo_gem)); + if (!bo_gem) + return NULL; + + bo_gem->bo.size = bo_size; + memset(&create, 0, sizeof(create)); + create.size = bo_size; + + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CREATE, &create); + bo_gem->gem_handle = create.handle; + bo_gem->bo.handle = bo_gem->gem_handle; + if (ret != 0) { + free(bo_gem); + return NULL; + } + bo_gem->bo.bufmgr = bufmgr; + } + + bo_gem->name = name; + bo_gem->refcount = 1; + bo_gem->validate_index = -1; + bo_gem->reloc_tree_size = bo_gem->bo.size; + bo_gem->used_as_reloc_target = 0; + bo_gem->tiling_mode = I915_TILING_NONE; + bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; + + DBG("bo_create: buf %d (%s) %ldb\n", + bo_gem->gem_handle, bo_gem->name, size); + + return &bo_gem->bo; +} + +/** + * Returns a drm_intel_bo wrapping the given buffer object handle. + * + * This can be used when one application needs to pass a buffer object + * to another. + */ +drm_intel_bo * +drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr, const char *name, + unsigned int handle) +{ + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; + drm_intel_bo_gem *bo_gem; + int ret; + struct drm_gem_open open_arg; + struct drm_i915_gem_get_tiling get_tiling; + + bo_gem = calloc(1, sizeof(*bo_gem)); + if (!bo_gem) + return NULL; + + memset(&open_arg, 0, sizeof(open_arg)); + open_arg.name = handle; + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_OPEN, &open_arg); + if (ret != 0) { + fprintf(stderr, "Couldn't reference %s handle 0x%08x: %s\n", + name, handle, strerror(errno)); + free(bo_gem); + return NULL; + } + bo_gem->bo.size = open_arg.size; + bo_gem->bo.offset = 0; + bo_gem->bo.virtual = NULL; + bo_gem->bo.bufmgr = bufmgr; + bo_gem->name = name; + bo_gem->refcount = 1; + bo_gem->validate_index = -1; + bo_gem->gem_handle = open_arg.handle; + bo_gem->global_name = handle; + + get_tiling.handle = bo_gem->gem_handle; + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling); + if (ret != 0) { + drm_intel_gem_bo_unreference(&bo_gem->bo); + return NULL; + } + bo_gem->tiling_mode = get_tiling.tiling_mode; + bo_gem->swizzle_mode = get_tiling.swizzle_mode; + + DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name); + + return &bo_gem->bo; +} + +static void +drm_intel_gem_bo_reference(drm_intel_bo *bo) +{ + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; + drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; + + pthread_mutex_lock(&bufmgr_gem->lock); + bo_gem->refcount++; + pthread_mutex_unlock(&bufmgr_gem->lock); +} + +static void +drm_intel_gem_bo_reference_locked(drm_intel_bo *bo) +{ + drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; + + bo_gem->refcount++; +} + +static void +drm_intel_gem_bo_free(drm_intel_bo *bo) +{ + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; + drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; + struct drm_gem_close close; + int ret; + + if (bo_gem->virtual) + munmap (bo_gem->virtual, bo_gem->bo.size); + + /* Close this object */ + close.handle = bo_gem->gem_handle; + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close); + if (ret != 0) { + fprintf(stderr, + "DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n", + bo_gem->gem_handle, bo_gem->name, strerror(errno)); + } + free(bo); +} + +static void +drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo) +{ + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; + drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; + + if (--bo_gem->refcount == 0) { + struct drm_intel_gem_bo_bucket *bucket; + uint32_t tiling_mode; + + if (bo_gem->relocs != NULL) { + int i; + + /* Unreference all the target buffers */ + for (i = 0; i < bo_gem->reloc_count; i++) + drm_intel_gem_bo_unreference_locked(bo_gem->reloc_target_bo[i]); + free(bo_gem->reloc_target_bo); + free(bo_gem->relocs); + } + + DBG("bo_unreference final: %d (%s)\n", + bo_gem->gem_handle, bo_gem->name); + + bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size); + /* Put the buffer into our internal cache for reuse if we can. */ + tiling_mode = I915_TILING_NONE; + if (bo_gem->global_name == 0 && + bucket != NULL && + (bucket->max_entries == -1 || + (bucket->max_entries > 0 && + bucket->num_entries < bucket->max_entries)) && + drm_intel_gem_bo_set_tiling(bo, &tiling_mode, 0) == 0) + { + bo_gem->name = NULL; + bo_gem->validate_index = -1; + bo_gem->relocs = NULL; + bo_gem->reloc_target_bo = NULL; + bo_gem->reloc_count = 0; + + bo_gem->next = NULL; + *bucket->tail = bo_gem; + bucket->tail = &bo_gem->next; + bucket->num_entries++; + } else { + drm_intel_gem_bo_free(bo); + } + } +} + +static void +drm_intel_gem_bo_unreference(drm_intel_bo *bo) +{ + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; + + pthread_mutex_lock(&bufmgr_gem->lock); + drm_intel_gem_bo_unreference_locked(bo); + pthread_mutex_unlock(&bufmgr_gem->lock); +} + +static int +drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable) +{ + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; + drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; + struct drm_i915_gem_set_domain set_domain; + int ret; + + pthread_mutex_lock(&bufmgr_gem->lock); + + /* Allow recursive mapping. Mesa may recursively map buffers with + * nested display loops. + */ + if (!bo_gem->virtual) { + struct drm_i915_gem_mmap mmap_arg; + + DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name); + + memset(&mmap_arg, 0, sizeof(mmap_arg)); + mmap_arg.handle = bo_gem->gem_handle; + mmap_arg.offset = 0; + mmap_arg.size = bo->size; + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg); + if (ret != 0) { + fprintf(stderr, "%s:%d: Error mapping buffer %d (%s): %s .\n", + __FILE__, __LINE__, + bo_gem->gem_handle, bo_gem->name, strerror(errno)); + pthread_mutex_unlock(&bufmgr_gem->lock); + return ret; + } + bo_gem->virtual = (void *)(uintptr_t)mmap_arg.addr_ptr; + bo_gem->swrast = 0; + } + DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name, + bo_gem->virtual); + bo->virtual = bo_gem->virtual; + + if (bo_gem->global_name != 0 || !bo_gem->swrast) { + set_domain.handle = bo_gem->gem_handle; + set_domain.read_domains = I915_GEM_DOMAIN_CPU; + if (write_enable) + set_domain.write_domain = I915_GEM_DOMAIN_CPU; + else + set_domain.write_domain = 0; + do { + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, + &set_domain); + } while (ret == -1 && errno == EINTR); + if (ret != 0) { + fprintf (stderr, "%s:%d: Error setting swrast %d: %s\n", + __FILE__, __LINE__, bo_gem->gem_handle, strerror (errno)); + pthread_mutex_unlock(&bufmgr_gem->lock); + return ret; + } + bo_gem->swrast = 1; + } + + pthread_mutex_unlock(&bufmgr_gem->lock); + + return 0; +} + +int +drm_intel_gem_bo_map_gtt(drm_intel_bo *bo) +{ +#ifdef __OpenBSD__ + /* + * OpenBSD gtt mapping will work differently, but isn't written yet, + * so just fail for now. It's only used in the modesetting paths + * anyway. + */ + return EINVAL; +#else + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; + drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; + struct drm_i915_gem_set_domain set_domain; + int ret; + + pthread_mutex_lock(&bufmgr_gem->lock); + + /* Get a mapping of the buffer if we haven't before. */ + if (bo_gem->virtual == NULL) { + struct drm_i915_gem_mmap_gtt mmap_arg; + + DBG("bo_map_gtt: %d (%s)\n", bo_gem->gem_handle, bo_gem->name); + + memset(&mmap_arg, 0, sizeof(mmap_arg)); + mmap_arg.handle = bo_gem->gem_handle; + + /* Get the fake offset back... */ + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg); + if (ret != 0) { + fprintf(stderr, + "%s:%d: Error preparing buffer map %d (%s): %s .\n", + __FILE__, __LINE__, + bo_gem->gem_handle, bo_gem->name, + strerror(errno)); + pthread_mutex_unlock(&bufmgr_gem->lock); + return ret; + } + + /* and mmap it */ + bo_gem->virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE, + MAP_SHARED, bufmgr_gem->fd, + mmap_arg.offset); + if (bo_gem->virtual == MAP_FAILED) { + fprintf(stderr, + "%s:%d: Error mapping buffer %d (%s): %s .\n", + __FILE__, __LINE__, + bo_gem->gem_handle, bo_gem->name, + strerror(errno)); + pthread_mutex_unlock(&bufmgr_gem->lock); + return errno; + } + } + + bo->virtual = bo_gem->virtual; + + DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name, + bo_gem->virtual); + + /* Now move it to the GTT domain so that the CPU caches are flushed */ + set_domain.handle = bo_gem->gem_handle; + set_domain.read_domains = I915_GEM_DOMAIN_GTT; + set_domain.write_domain = I915_GEM_DOMAIN_GTT; + do { + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, + &set_domain); + } while (ret == -1 && errno == EINTR); + + if (ret != 0) { + fprintf (stderr, "%s:%d: Error setting swrast %d: %s\n", + __FILE__, __LINE__, bo_gem->gem_handle, strerror (errno)); + } + + pthread_mutex_unlock(&bufmgr_gem->lock); + + return 0; +#endif +} + +static int +drm_intel_gem_bo_unmap(drm_intel_bo *bo) +{ + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; + drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; + struct drm_i915_gem_sw_finish sw_finish; + int ret; + + if (bo == NULL) + return 0; + + assert(bo_gem->virtual != NULL); + + pthread_mutex_lock(&bufmgr_gem->lock); + if (bo_gem->swrast) { + sw_finish.handle = bo_gem->gem_handle; + do { + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SW_FINISH, + &sw_finish); + } while (ret == -1 && errno == EINTR); + bo_gem->swrast = 0; + } + pthread_mutex_unlock(&bufmgr_gem->lock); + return 0; +} + +static int +drm_intel_gem_bo_subdata (drm_intel_bo *bo, unsigned long offset, + unsigned long size, const void *data) +{ + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; + drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; + struct drm_i915_gem_pwrite pwrite; + int ret; + + memset (&pwrite, 0, sizeof (pwrite)); + pwrite.handle = bo_gem->gem_handle; + pwrite.offset = offset; + pwrite.size = size; + pwrite.data_ptr = (uint64_t) (uintptr_t) data; + do { + ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite); + } while (ret == -1 && errno == EINTR); + if (ret != 0) { + fprintf (stderr, "%s:%d: Error writing data to buffer %d: (%d %d) %s .\n", + __FILE__, __LINE__, + bo_gem->gem_handle, (int) offset, (int) size, + strerror (errno)); + } + return 0; +} + +static int +drm_intel_gem_bo_get_subdata (drm_intel_bo *bo, unsigned long offset, + unsigned long size, void *data) +{ + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; + drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; + struct drm_i915_gem_pread pread; + int ret; + + memset (&pread, 0, sizeof (pread)); + pread.handle = bo_gem->gem_handle; + pread.offset = offset; + pread.size = size; + pread.data_ptr = (uint64_t) (uintptr_t) data; + do { + ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PREAD, &pread); + } while (ret == -1 && errno == EINTR); + if (ret != 0) { + fprintf (stderr, "%s:%d: Error reading data from buffer %d: (%d %d) %s .\n", + __FILE__, __LINE__, + bo_gem->gem_handle, (int) offset, (int) size, + strerror (errno)); + } + return 0; +} + +/** Waits for all GPU rendering to the object to have completed. */ +static void +drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo) +{ + return drm_intel_gem_bo_start_gtt_access(bo, 0); +} + +/** + * Sets the object to the GTT read and possibly write domain, used by the X + * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt(). + * + * In combination with drm_intel_gem_bo_pin() and manual fence management, we + * can do tiled pixmaps this way. + */ +void +drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable) +{ + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; + drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; + struct drm_i915_gem_set_domain set_domain; + int ret; + + set_domain.handle = bo_gem->gem_handle; + set_domain.read_domains = I915_GEM_DOMAIN_GTT; + set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0; + do { + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain); + } while (ret == -1 && errno == EINTR); + if (ret != 0) { + fprintf (stderr, "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n", + __FILE__, __LINE__, + bo_gem->gem_handle, set_domain.read_domains, set_domain.write_domain, + strerror (errno)); + } +} + +static void +drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr) +{ + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; + int i; + + free(bufmgr_gem->exec_objects); + free(bufmgr_gem->exec_bos); + + pthread_mutex_destroy(&bufmgr_gem->lock); + + /* Free any cached buffer objects we were going to reuse */ + for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) { + struct drm_intel_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i]; + drm_intel_bo_gem *bo_gem; + + while ((bo_gem = bucket->head) != NULL) { + bucket->head = bo_gem->next; + if (bo_gem->next == NULL) + bucket->tail = &bucket->head; + bucket->num_entries--; + + drm_intel_gem_bo_free(&bo_gem->bo); + } + } + + free(bufmgr); +} + +/** + * Adds the target buffer to the validation list and adds the relocation + * to the reloc_buffer's relocation list. + * + * The relocation entry at the given offset must already contain the + * precomputed relocation value, because the kernel will optimize out + * the relocation entry write when the buffer hasn't moved from the + * last known offset in target_bo. + */ +static int +drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset, + drm_intel_bo *target_bo, uint32_t target_offset, + uint32_t read_domains, uint32_t write_domain) +{ + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; + drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; + drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *)target_bo; + + pthread_mutex_lock(&bufmgr_gem->lock); + + /* Create a new relocation list if needed */ + if (bo_gem->relocs == NULL) + drm_intel_setup_reloc_list(bo); + + /* Check overflow */ + assert(bo_gem->reloc_count < bufmgr_gem->max_relocs); + + /* Check args */ + assert (offset <= bo->size - 4); + assert ((write_domain & (write_domain-1)) == 0); + + /* Make sure that we're not adding a reloc to something whose size has + * already been accounted for. + */ + assert(!bo_gem->used_as_reloc_target); + bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size; + + /* Flag the target to disallow further relocations in it. */ + target_bo_gem->used_as_reloc_target = 1; + + bo_gem->relocs[bo_gem->reloc_count].offset = offset; + bo_gem->relocs[bo_gem->reloc_count].delta = target_offset; + bo_gem->relocs[bo_gem->reloc_count].target_handle = + target_bo_gem->gem_handle; + bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains; + bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain; + bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset; + + bo_gem->reloc_target_bo[bo_gem->reloc_count] = target_bo; + drm_intel_gem_bo_reference_locked(target_bo); + + bo_gem->reloc_count++; + + pthread_mutex_unlock(&bufmgr_gem->lock); + + return 0; +} + +/** + * Walk the tree of relocations rooted at BO and accumulate the list of + * validations to be performed and update the relocation buffers with + * index values into the validation list. + */ +static void +drm_intel_gem_bo_process_reloc(drm_intel_bo *bo) +{ + drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; + int i; + + if (bo_gem->relocs == NULL) + return; + + for (i = 0; i < bo_gem->reloc_count; i++) { + drm_intel_bo *target_bo = bo_gem->reloc_target_bo[i]; + + /* Continue walking the tree depth-first. */ + drm_intel_gem_bo_process_reloc(target_bo); + + /* Add the target to the validate list */ + drm_intel_add_validate_buffer(target_bo); + } +} + +static void +drm_intel_update_buffer_offsets (drm_intel_bufmgr_gem *bufmgr_gem) +{ + int i; + + for (i = 0; i < bufmgr_gem->exec_count; i++) { + drm_intel_bo *bo = bufmgr_gem->exec_bos[i]; + drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; + + /* Update the buffer offset */ + if (bufmgr_gem->exec_objects[i].offset != bo->offset) { + DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n", + bo_gem->gem_handle, bo_gem->name, bo->offset, + (unsigned long long)bufmgr_gem->exec_objects[i].offset); + bo->offset = bufmgr_gem->exec_objects[i].offset; + } + } +} + +static int +drm_intel_gem_bo_exec(drm_intel_bo *bo, int used, + drm_clip_rect_t *cliprects, int num_cliprects, + int DR4) +{ + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; + struct drm_i915_gem_execbuffer execbuf; + int ret, i; + + pthread_mutex_lock(&bufmgr_gem->lock); + /* Update indices and set up the validate list. */ + drm_intel_gem_bo_process_reloc(bo); + + /* Add the batch buffer to the validation list. There are no relocations + * pointing to it. + */ + drm_intel_add_validate_buffer(bo); + + execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec_objects; + execbuf.buffer_count = bufmgr_gem->exec_count; + execbuf.batch_start_offset = 0; + execbuf.batch_len = used; + execbuf.cliprects_ptr = (uintptr_t)cliprects; + execbuf.num_cliprects = num_cliprects; + execbuf.DR1 = 0; + execbuf.DR4 = DR4; + + do { + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_EXECBUFFER, &execbuf); + } while (ret != 0 && errno == EAGAIN); + + if (ret != 0 && errno == ENOMEM) { + fprintf(stderr, "Execbuffer fails to pin. Estimate: %u. Actual: %u. Available: %u\n", + drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos, + bufmgr_gem->exec_count), + drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos, + bufmgr_gem->exec_count), + (unsigned int) bufmgr_gem->gtt_size); + } + drm_intel_update_buffer_offsets (bufmgr_gem); + + if (bufmgr_gem->bufmgr.debug) + drm_intel_gem_dump_validation_list(bufmgr_gem); + + for (i = 0; i < bufmgr_gem->exec_count; i++) { + drm_intel_bo *bo = bufmgr_gem->exec_bos[i]; + drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; + + /* Need to call swrast on next bo_map */ + bo_gem->swrast = 0; + + /* Disconnect the buffer from the validate list */ + bo_gem->validate_index = -1; + drm_intel_gem_bo_unreference_locked(bo); + bufmgr_gem->exec_bos[i] = NULL; + } + bufmgr_gem->exec_count = 0; + pthread_mutex_unlock(&bufmgr_gem->lock); + + return 0; +} + +static int +drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment) +{ + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; + drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; + struct drm_i915_gem_pin pin; + int ret; + + pin.handle = bo_gem->gem_handle; + pin.alignment = alignment; + + do { + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PIN, &pin); + } while (ret == -1 && errno == EINTR); + + if (ret != 0) + return -errno; + + bo->offset = pin.offset; + return 0; +} + +static int +drm_intel_gem_bo_unpin(drm_intel_bo *bo) +{ + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; + drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; + struct drm_i915_gem_unpin unpin; + int ret; + + unpin.handle = bo_gem->gem_handle; + + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin); + if (ret != 0) + return -errno; + + return 0; +} + +static int +drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t *tiling_mode, + uint32_t stride) +{ + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; + drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; + struct drm_i915_gem_set_tiling set_tiling; + int ret; + + if (bo_gem->global_name == 0 && *tiling_mode == bo_gem->tiling_mode) + return 0; + + set_tiling.handle = bo_gem->gem_handle; + set_tiling.tiling_mode = *tiling_mode; + set_tiling.stride = stride; + + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling); + if (ret != 0) { + *tiling_mode = bo_gem->tiling_mode; + return -errno; + } + bo_gem->tiling_mode = set_tiling.tiling_mode; + bo_gem->swizzle_mode = set_tiling.swizzle_mode; + + *tiling_mode = bo_gem->tiling_mode; + return 0; +} + +static int +drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t *tiling_mode, + uint32_t *swizzle_mode) +{ + drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; + + *tiling_mode = bo_gem->tiling_mode; + *swizzle_mode = bo_gem->swizzle_mode; + return 0; +} + +static int +drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t *name) +{ + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; + drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; + struct drm_gem_flink flink; + int ret; + + if (!bo_gem->global_name) { + flink.handle = bo_gem->gem_handle; + + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink); + if (ret != 0) + return -errno; + bo_gem->global_name = flink.name; + } + + *name = bo_gem->global_name; + return 0; +} + +/** + * Enables unlimited caching of buffer objects for reuse. + * + * This is potentially very memory expensive, as the cache at each bucket + * size is only bounded by how many buffers of that size we've managed to have + * in flight at once. + */ +void +drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr) +{ + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; + int i; + + for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) { + bufmgr_gem->cache_bucket[i].max_entries = -1; + } +} + +/** + * Return the additional aperture space required by the tree of buffer objects + * rooted at bo. + */ +static int +drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo) +{ + drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; + int i; + int total = 0; + + if (bo == NULL || bo_gem->included_in_check_aperture) + return 0; + + total += bo->size; + bo_gem->included_in_check_aperture = 1; + + for (i = 0; i < bo_gem->reloc_count; i++) + total += drm_intel_gem_bo_get_aperture_space(bo_gem->reloc_target_bo[i]); + + return total; +} + +/** + * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready + * for the next drm_intel_bufmgr_check_aperture_space() call. + */ +static void +drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo) +{ + drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; + int i; + + if (bo == NULL || !bo_gem->included_in_check_aperture) + return; + + bo_gem->included_in_check_aperture = 0; + + for (i = 0; i < bo_gem->reloc_count; i++) + drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->reloc_target_bo[i]); +} + +/** + * Return a conservative estimate for the amount of aperture required + * for a collection of buffers. This may double-count some buffers. + */ +static unsigned int +drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count) +{ + int i; + unsigned int total = 0; + + for (i = 0; i < count; i++) { + drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo_array[i]; + if (bo_gem != NULL) + total += bo_gem->reloc_tree_size; + } + return total; +} + +/** + * Return the amount of aperture needed for a collection of buffers. + * This avoids double counting any buffers, at the cost of looking + * at every buffer in the set. + */ +static unsigned int +drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count) +{ + int i; + unsigned int total = 0; + + for (i = 0; i < count; i++) + total += drm_intel_gem_bo_get_aperture_space(bo_array[i]); + + for (i = 0; i < count; i++) + drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]); + return total; +} + +/** + * Return -1 if the batchbuffer should be flushed before attempting to + * emit rendering referencing the buffers pointed to by bo_array. + * + * This is required because if we try to emit a batchbuffer with relocations + * to a tree of buffers that won't simultaneously fit in the aperture, + * the rendering will return an error at a point where the software is not + * prepared to recover from it. + * + * However, we also want to emit the batchbuffer significantly before we reach + * the limit, as a series of batchbuffers each of which references buffers + * covering almost all of the aperture means that at each emit we end up + * waiting to evict a buffer from the last rendering, and we get synchronous + * performance. By emitting smaller batchbuffers, we eat some CPU overhead to + * get better parallelism. + */ +static int +drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count) +{ + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo_array[0]->bufmgr; + unsigned int total = 0; + unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4; + + total = drm_intel_gem_estimate_batch_space(bo_array, count); + + if (total > threshold) + total = drm_intel_gem_compute_batch_space(bo_array, count); + + if (total > threshold) { + DBG("check_space: overflowed available aperture, %dkb vs %dkb\n", + total / 1024, (int)bufmgr_gem->gtt_size / 1024); + return -1; + } else { + DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024 , + (int)bufmgr_gem->gtt_size / 1024); + return 0; + } +} + +/** + * Initializes the GEM buffer manager, which uses the kernel to allocate, map, + * and manage map buffer objections. + * + * \param fd File descriptor of the opened DRM device. + */ +drm_intel_bufmgr * +drm_intel_bufmgr_gem_init(int fd, int batch_size) +{ + drm_intel_bufmgr_gem *bufmgr_gem; + struct drm_i915_gem_get_aperture aperture; + int ret, i; + + bufmgr_gem = calloc(1, sizeof(*bufmgr_gem)); + bufmgr_gem->fd = fd; + + if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) { + free(bufmgr_gem); + return NULL; + } + + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture); + + if (ret == 0) + bufmgr_gem->gtt_size = aperture.aper_available_size; + else { + fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n", + strerror(errno)); + bufmgr_gem->gtt_size = 128 * 1024 * 1024; + fprintf(stderr, "Assuming %dkB available aperture size.\n" + "May lead to reduced performance or incorrect rendering.\n", + (int)bufmgr_gem->gtt_size / 1024); + } + + /* Let's go with one relocation per every 2 dwords (but round down a bit + * since a power of two will mean an extra page allocation for the reloc + * buffer). + * + * Every 4 was too few for the blender benchmark. + */ + bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2; + + bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc; + bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference; + bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference; + bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map; + bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap; + bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata; + bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata; + bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering; + bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc; + bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin; + bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin; + bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling; + bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling; + bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink; + bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec; + bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy; + bufmgr_gem->bufmgr.debug = 0; + bufmgr_gem->bufmgr.check_aperture_space = drm_intel_gem_check_aperture_space; + /* Initialize the linked lists for BO reuse cache. */ + for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) + bufmgr_gem->cache_bucket[i].tail = &bufmgr_gem->cache_bucket[i].head; + + return &bufmgr_gem->bufmgr; +} + diff --git a/lib/libdrm/intel/intel_bufmgr_priv.h b/lib/libdrm/intel/intel_bufmgr_priv.h new file mode 100644 index 000000000..76d31e478 --- /dev/null +++ b/lib/libdrm/intel/intel_bufmgr_priv.h @@ -0,0 +1,175 @@ +/* + * Copyright © 2008 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt <eric@anholt.net> + * + */ + +/** + * @file intel_bufmgr_priv.h + * + * Private definitions of Intel-specific bufmgr functions and structures. + */ + +#ifndef INTEL_BUFMGR_PRIV_H +#define INTEL_BUFMGR_PRIV_H + +/** + * Context for a buffer manager instance. + * + * Contains public methods followed by private storage for the buffer manager. + */ +struct _drm_intel_bufmgr { + /** + * Allocate a buffer object. + * + * Buffer objects are not necessarily initially mapped into CPU virtual + * address space or graphics device aperture. They must be mapped using + * bo_map() to be used by the CPU, and validated for use using bo_validate() + * to be used from the graphics device. + */ + drm_intel_bo *(*bo_alloc)(drm_intel_bufmgr *bufmgr, const char *name, + unsigned long size, unsigned int alignment); + + /** Takes a reference on a buffer object */ + void (*bo_reference)(drm_intel_bo *bo); + + /** + * Releases a reference on a buffer object, freeing the data if + * rerefences remain. + */ + void (*bo_unreference)(drm_intel_bo *bo); + + /** + * Maps the buffer into userspace. + * + * This function will block waiting for any existing execution on the + * buffer to complete, first. The resulting mapping is available at + * buf->virtual. + */ + int (*bo_map)(drm_intel_bo *bo, int write_enable); + + /** Reduces the refcount on the userspace mapping of the buffer object. */ + int (*bo_unmap)(drm_intel_bo *bo); + + /** + * Write data into an object. + * + * This is an optional function, if missing, + * drm_intel_bo will map/memcpy/unmap. + */ + int (*bo_subdata)(drm_intel_bo *bo, unsigned long offset, + unsigned long size, const void *data); + + /** + * Read data from an object + * + * This is an optional function, if missing, + * drm_intel_bo will map/memcpy/unmap. + */ + int (*bo_get_subdata)(drm_intel_bo *bo, unsigned long offset, + unsigned long size, void *data); + + /** + * Waits for rendering to an object by the GPU to have completed. + * + * This is not required for any access to the BO by bo_map, bo_subdata, etc. + * It is merely a way for the driver to implement glFinish. + */ + void (*bo_wait_rendering)(drm_intel_bo *bo); + + /** + * Tears down the buffer manager instance. + */ + void (*destroy)(drm_intel_bufmgr *bufmgr); + + /** + * Add relocation entry in reloc_buf, which will be updated with the + * target buffer's real offset on on command submission. + * + * Relocations remain in place for the lifetime of the buffer object. + * + * \param bo Buffer to write the relocation into. + * \param offset Byte offset within reloc_bo of the pointer to target_bo. + * \param target_bo Buffer whose offset should be written into the + * relocation entry. + * \param target_offset Constant value to be added to target_bo's offset in + * relocation entry. + * \param read_domains GEM read domains which the buffer will be read into + * by the command that this relocation is part of. + * \param write_domains GEM read domains which the buffer will be dirtied + * in by the command that this relocation is part of. + */ + int (*bo_emit_reloc)(drm_intel_bo *bo, uint32_t offset, + drm_intel_bo *target_bo, uint32_t target_offset, + uint32_t read_domains, uint32_t write_domain); + + /** Executes the command buffer pointed to by bo. */ + int (*bo_exec)(drm_intel_bo *bo, int used, + drm_clip_rect_t *cliprects, int num_cliprects, + int DR4); + + /** + * Pin a buffer to the aperture and fix the offset until unpinned + * + * \param buf Buffer to pin + * \param alignment Required alignment for aperture, in bytes + */ + int (*bo_pin)(drm_intel_bo *bo, uint32_t alignment); + /** + * Unpin a buffer from the aperture, allowing it to be removed + * + * \param buf Buffer to unpin + */ + int (*bo_unpin)(drm_intel_bo *bo); + /** + * Ask that the buffer be placed in tiling mode + * + * \param buf Buffer to set tiling mode for + * \param tiling_mode desired, and returned tiling mode + */ + int (*bo_set_tiling)(drm_intel_bo *bo, uint32_t *tiling_mode, + uint32_t stride); + /** + * Get the current tiling (and resulting swizzling) mode for the bo. + * + * \param buf Buffer to get tiling mode for + * \param tiling_mode returned tiling mode + * \param swizzle_mode returned swizzling mode + */ + int (*bo_get_tiling)(drm_intel_bo *bo, uint32_t *tiling_mode, + uint32_t *swizzle_mode); + /** + * Create a visible name for a buffer which can be used by other apps + * + * \param buf Buffer to create a name for + * \param name Returned name + */ + int (*bo_flink)(drm_intel_bo *bo, uint32_t *name); + + int (*check_aperture_space)(drm_intel_bo **bo_array, int count); + int debug; /**< Enables verbose debugging printouts */ +}; + +#endif /* INTEL_BUFMGR_PRIV_H */ + diff --git a/lib/libdrm/intel/mm.c b/lib/libdrm/intel/mm.c new file mode 100644 index 000000000..98146405a --- /dev/null +++ b/lib/libdrm/intel/mm.c @@ -0,0 +1,281 @@ +/* + * GLX Hardware Device Driver common code + * Copyright (C) 1999 Wittawat Yamwong + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * WITTAWAT YAMWONG, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <stdlib.h> +#include <assert.h> + +#include "xf86drm.h" +#include "mm.h" + +void +mmDumpMemInfo(const struct mem_block *heap) +{ + drmMsg("Memory heap %p:\n", (void *)heap); + if (heap == 0) { + drmMsg(" heap == 0\n"); + } else { + const struct mem_block *p; + + for(p = heap->next; p != heap; p = p->next) { + drmMsg(" Offset:%08x, Size:%08x, %c%c\n",p->ofs,p->size, + p->free ? 'F':'.', + p->reserved ? 'R':'.'); + } + + drmMsg("\nFree list:\n"); + + for(p = heap->next_free; p != heap; p = p->next_free) { + drmMsg(" FREE Offset:%08x, Size:%08x, %c%c\n",p->ofs,p->size, + p->free ? 'F':'.', + p->reserved ? 'R':'.'); + } + + } + drmMsg("End of memory blocks\n"); +} + +struct mem_block * +mmInit(int ofs, int size) +{ + struct mem_block *heap, *block; + + if (size <= 0) + return NULL; + + heap = (struct mem_block *) calloc(1, sizeof(struct mem_block)); + if (!heap) + return NULL; + + block = (struct mem_block *) calloc(1, sizeof(struct mem_block)); + if (!block) { + free(heap); + return NULL; + } + + heap->next = block; + heap->prev = block; + heap->next_free = block; + heap->prev_free = block; + + block->heap = heap; + block->next = heap; + block->prev = heap; + block->next_free = heap; + block->prev_free = heap; + + block->ofs = ofs; + block->size = size; + block->free = 1; + + return heap; +} + + +static struct mem_block * +SliceBlock(struct mem_block *p, + int startofs, int size, + int reserved, int alignment) +{ + struct mem_block *newblock; + + /* break left [p, newblock, p->next], then p = newblock */ + if (startofs > p->ofs) { + newblock = (struct mem_block*) calloc(1, sizeof(struct mem_block)); + if (!newblock) + return NULL; + newblock->ofs = startofs; + newblock->size = p->size - (startofs - p->ofs); + newblock->free = 1; + newblock->heap = p->heap; + + newblock->next = p->next; + newblock->prev = p; + p->next->prev = newblock; + p->next = newblock; + + newblock->next_free = p->next_free; + newblock->prev_free = p; + p->next_free->prev_free = newblock; + p->next_free = newblock; + + p->size -= newblock->size; + p = newblock; + } + + /* break right, also [p, newblock, p->next] */ + if (size < p->size) { + newblock = (struct mem_block*) calloc(1, sizeof(struct mem_block)); + if (!newblock) + return NULL; + newblock->ofs = startofs + size; + newblock->size = p->size - size; + newblock->free = 1; + newblock->heap = p->heap; + + newblock->next = p->next; + newblock->prev = p; + p->next->prev = newblock; + p->next = newblock; + + newblock->next_free = p->next_free; + newblock->prev_free = p; + p->next_free->prev_free = newblock; + p->next_free = newblock; + + p->size = size; + } + + /* p = middle block */ + p->free = 0; + + /* Remove p from the free list: + */ + p->next_free->prev_free = p->prev_free; + p->prev_free->next_free = p->next_free; + + p->next_free = 0; + p->prev_free = 0; + + p->reserved = reserved; + return p; +} + + +struct mem_block * +mmAllocMem(struct mem_block *heap, int size, int align2, int startSearch) +{ + struct mem_block *p; + const int mask = (1 << align2)-1; + int startofs = 0; + int endofs; + + if (!heap || align2 < 0 || size <= 0) + return NULL; + + for (p = heap->next_free; p != heap; p = p->next_free) { + assert(p->free); + + startofs = (p->ofs + mask) & ~mask; + if ( startofs < startSearch ) { + startofs = startSearch; + } + endofs = startofs+size; + if (endofs <= (p->ofs+p->size)) + break; + } + + if (p == heap) + return NULL; + + assert(p->free); + p = SliceBlock(p,startofs,size,0,mask+1); + + return p; +} + + +struct mem_block * +mmFindBlock(struct mem_block *heap, int start) +{ + struct mem_block *p; + + for (p = heap->next; p != heap; p = p->next) { + if (p->ofs == start) + return p; + } + + return NULL; +} + + +static int +Join2Blocks(struct mem_block *p) +{ + /* XXX there should be some assertions here */ + + /* NOTE: heap->free == 0 */ + + if (p->free && p->next->free) { + struct mem_block *q = p->next; + + assert(p->ofs + p->size == q->ofs); + p->size += q->size; + + p->next = q->next; + q->next->prev = p; + + q->next_free->prev_free = q->prev_free; + q->prev_free->next_free = q->next_free; + + free(q); + return 1; + } + return 0; +} + +int +mmFreeMem(struct mem_block *b) +{ + if (!b) + return 0; + + if (b->free) { + drmMsg("block already free\n"); + return -1; + } + if (b->reserved) { + drmMsg("block is reserved\n"); + return -1; + } + + b->free = 1; + b->next_free = b->heap->next_free; + b->prev_free = b->heap; + b->next_free->prev_free = b; + b->prev_free->next_free = b; + + Join2Blocks(b); + if (b->prev != b->heap) + Join2Blocks(b->prev); + + return 0; +} + + +void +mmDestroy(struct mem_block *heap) +{ + struct mem_block *p; + + if (!heap) + return; + + for (p = heap->next; p != heap; ) { + struct mem_block *next = p->next; + free(p); + p = next; + } + + free(heap); +} diff --git a/lib/libdrm/intel/mm.h b/lib/libdrm/intel/mm.h new file mode 100644 index 000000000..49e3eecc5 --- /dev/null +++ b/lib/libdrm/intel/mm.h @@ -0,0 +1,96 @@ +/* + * GLX Hardware Device Driver common code + * Copyright (C) 1999 Wittawat Yamwong + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * KEITH WHITWELL, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + + +/** + * Memory manager code. Primarily used by device drivers to manage texture + * heaps, etc. + */ + + +#ifndef MM_H +#define MM_H + +struct mem_block { + struct mem_block *next, *prev; + struct mem_block *next_free, *prev_free; + struct mem_block *heap; + int ofs,size; + unsigned int free:1; + unsigned int reserved:1; +}; + +/* Rename the variables in the drm copy of this code so that it doesn't + * conflict with mesa or whoever else has copied it around. + */ +#define mmInit drm_mmInit +#define mmAllocMem drm_mmAllocMem +#define mmFreeMem drm_mmFreeMem +#define mmFindBlock drm_mmFindBlock +#define mmDestroy drm_mmDestroy +#define mmDumpMemInfo drm_mmDumpMemInfo + +/** + * input: total size in bytes + * return: a heap pointer if OK, NULL if error + */ +extern struct mem_block *mmInit(int ofs, int size); + +/** + * Allocate 'size' bytes with 2^align2 bytes alignment, + * restrict the search to free memory after 'startSearch' + * depth and back buffers should be in different 4mb banks + * to get better page hits if possible + * input: size = size of block + * align2 = 2^align2 bytes alignment + * startSearch = linear offset from start of heap to begin search + * return: pointer to the allocated block, 0 if error + */ +extern struct mem_block *mmAllocMem(struct mem_block *heap, int size, + int align2, int startSearch); + +/** + * Free block starts at offset + * input: pointer to a block + * return: 0 if OK, -1 if error + */ +extern int mmFreeMem(struct mem_block *b); + +/** + * Free block starts at offset + * input: pointer to a heap, start offset + * return: pointer to a block + */ +extern struct mem_block *mmFindBlock(struct mem_block *heap, int start); + +/** + * destroy MM + */ +extern void mmDestroy(struct mem_block *mmInit); + +/** + * For debuging purpose. + */ +extern void mmDumpMemInfo(const struct mem_block *mmInit); + +#endif diff --git a/lib/libdrm/intel/shlib_version b/lib/libdrm/intel/shlib_version new file mode 100644 index 000000000..1edea46de --- /dev/null +++ b/lib/libdrm/intel/shlib_version @@ -0,0 +1,2 @@ +major=1 +minor=0 diff --git a/lib/libdrm/libdrm_lists.h b/lib/libdrm/libdrm_lists.h new file mode 100644 index 000000000..8e23991f1 --- /dev/null +++ b/lib/libdrm/libdrm_lists.h @@ -0,0 +1,87 @@ +/************************************************************************** + * + * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + */ + +/* + * List macros heavily inspired by the Linux kernel + * list handling. No list looping yet. + */ + +typedef struct _drmMMListHead +{ + struct _drmMMListHead *prev; + struct _drmMMListHead *next; +} drmMMListHead; + +#define DRMINITLISTHEAD(__item) \ + do{ \ + (__item)->prev = (__item); \ + (__item)->next = (__item); \ + } while (0) + +#define DRMLISTADD(__item, __list) \ + do { \ + (__item)->prev = (__list); \ + (__item)->next = (__list)->next; \ + (__list)->next->prev = (__item); \ + (__list)->next = (__item); \ + } while (0) + +#define DRMLISTADDTAIL(__item, __list) \ + do { \ + (__item)->next = (__list); \ + (__item)->prev = (__list)->prev; \ + (__list)->prev->next = (__item); \ + (__list)->prev = (__item); \ + } while(0) + +#define DRMLISTDEL(__item) \ + do { \ + (__item)->prev->next = (__item)->next; \ + (__item)->next->prev = (__item)->prev; \ + } while(0) + +#define DRMLISTDELINIT(__item) \ + do { \ + (__item)->prev->next = (__item)->next; \ + (__item)->next->prev = (__item)->prev; \ + (__item)->next = (__item); \ + (__item)->prev = (__item); \ + } while(0) + +#define DRMLISTENTRY(__type, __item, __field) \ + ((__type *)(((char *) (__item)) - offsetof(__type, __field))) + +#define DRMLISTEMPTY(__item) ((__item)->next == (__item)) + +#define DRMLISTFOREACHSAFE(__item, __temp, __list) \ + for ((__item) = (__list)->next, (__temp) = (__item)->next; \ + (__item) != (__list); \ + (__item) = (__temp), (__temp) = (__item)->next) + +#define DRMLISTFOREACHSAFEREVERSE(__item, __temp, __list) \ + for ((__item) = (__list)->prev, (__temp) = (__item)->prev; \ + (__item) != (__list); \ + (__item) = (__temp), (__temp) = (__item)->prev) diff --git a/lib/libdrm/shlib_version b/lib/libdrm/shlib_version index b363be444..c87e1c60d 100644 --- a/lib/libdrm/shlib_version +++ b/lib/libdrm/shlib_version @@ -1,2 +1,2 @@ major=2 -minor=3 +minor=4 diff --git a/lib/libdrm/xf86drm.c b/lib/libdrm/xf86drm.c index 380254cc0..b96357cde 100644 --- a/lib/libdrm/xf86drm.c +++ b/lib/libdrm/xf86drm.c @@ -39,6 +39,7 @@ #include <fcntl.h> #include <errno.h> #include <signal.h> +#include <time.h> #include <sys/types.h> #include <sys/stat.h> #define stat_t struct stat @@ -54,18 +55,6 @@ #include "xf86drm.h" -#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__) -#define DRM_MAJOR 145 -#endif - -#ifdef __NetBSD__ -#define DRM_MAJOR 34 -#endif - -#ifndef DRM_MAJOR -#define DRM_MAJOR 226 /* Linux */ -#endif - #ifndef DRM_MAX_MINOR #define DRM_MAX_MINOR 16 #endif @@ -80,6 +69,9 @@ #define DRM_MSG_VERBOSITY 3 +#define DRM_NODE_CONTROL 0 +#define DRM_NODE_RENDER 1 + static drmServerInfoPtr drm_server_info; void drmSetServerInfo(drmServerInfoPtr info) @@ -103,7 +95,7 @@ static int drmDebugPrint(const char *format, va_list ap) static int (*drm_debug_print)(const char *format, va_list ap) = drmDebugPrint; -static void +void drmMsg(const char *format, ...) { va_list ap; @@ -164,6 +156,19 @@ static char *drmStrdup(const char *s) return retval; } +/** + * Call ioctl, restarting if it is interupted + */ +int +drmIoctl(int fd, unsigned long request, void *arg) +{ + int ret; + + do { + ret = ioctl(fd, request, arg); + } while (ret == -1 && (errno == EINTR || errno == EAGAIN)); + return ret; +} static unsigned long drmGetKeyFromFd(int fd) { @@ -257,7 +262,7 @@ static int drmMatchBusID(const char *id1, const char *id2) * special file node with the major and minor numbers specified by \p dev and * parent directory if necessary and was called by root. */ -static int drmOpenDevice(long dev, int minor) +static int drmOpenDevice(long dev, int minor, int type) { stat_t st; char buf[64]; @@ -267,13 +272,20 @@ static int drmOpenDevice(long dev, int minor) uid_t user = DRM_DEV_UID; gid_t group = DRM_DEV_GID, serv_group; - sprintf(buf, DRM_DEV_NAME, DRM_DIR_NAME, minor); + sprintf(buf, type ? DRM_DEV_NAME : DRM_CONTROL_DEV_NAME, DRM_DIR_NAME, minor); drmMsg("drmOpenDevice: node name is %s\n", buf); + if (drm_server_info) { + drm_server_info->get_perms(&serv_group, &serv_mode); + devmode = serv_mode ? serv_mode : DRM_DEV_MODE; + devmode &= ~(S_IXUSR|S_IXGRP|S_IXOTH); + group = (serv_group >= 0) ? serv_group : DRM_DEV_GID; + } + #ifndef X_PRIVSEP - fd = open(buf, O_RDWR, 0); + fb = open(buf, O_RDWR, 0); #else - fd = priv_open_device(buf); + fd = priv_open_device(buf); #endif drmMsg("drmOpenDevice: open result is %d, (%s)\n", fd, fd < 0 ? strerror(errno) : "OK"); @@ -298,22 +310,22 @@ static int drmOpenDevice(long dev, int minor) * Calls drmOpenDevice() if \p create is set, otherwise assembles the device * name from \p minor and opens it. */ -static int drmOpenMinor(int minor, int create) +static int drmOpenMinor(int minor, int create, int type) { int fd; char buf[64]; if (create) - return drmOpenDevice(makedev(DRM_MAJOR, minor), minor); + return drmOpenDevice(makedev(DRM_MAJOR, minor), minor, type); - sprintf(buf, DRM_DEV_NAME, DRM_DIR_NAME, minor); + sprintf(buf, type ? DRM_DEV_NAME : DRM_CONTROL_DEV_NAME, DRM_DIR_NAME, minor); #ifndef X_PRIVSEP fd = open(buf, O_RDWR, 0); #else fd = priv_open_device(buf); #endif if (fd >= 0) - return fd; + return fd; return -errno; } @@ -334,7 +346,7 @@ int drmAvailable(void) int retval = 0; int fd; - if ((fd = drmOpenMinor(0, 1)) < 0) { + if ((fd = drmOpenMinor(0, 1, DRM_NODE_RENDER)) < 0) { #ifdef __linux__ /* Try proc for backward Linux compatibility */ if (!access("/proc/dri/0", R_OK)) @@ -375,7 +387,7 @@ static int drmOpenByBusid(const char *busid) drmMsg("drmOpenByBusid: Searching for BusID %s\n", busid); for (i = 0; i < DRM_MAX_MINOR; i++) { - fd = drmOpenMinor(i, 1); + fd = drmOpenMinor(i, 1, DRM_NODE_RENDER); drmMsg("drmOpenByBusid: drmOpenMinor returns %d\n", fd); if (fd >= 0) { sv.drm_di_major = 1; @@ -437,7 +449,7 @@ static int drmOpenByName(const char *name) * already in use. If it's in use it will have a busid assigned already. */ for (i = 0; i < DRM_MAX_MINOR; i++) { - if ((fd = drmOpenMinor(i, 1)) >= 0) { + if ((fd = drmOpenMinor(i, 1, DRM_NODE_RENDER)) >= 0) { if ((version = drmGetVersion(fd))) { if (!strcmp(version->name, name)) { drmFreeVersion(version); @@ -481,7 +493,7 @@ static int drmOpenByName(const char *name) if (*pt) { /* Found busid */ return drmOpenByBusid(++pt); } else { /* No busid */ - return drmOpenDevice(strtol(devstring, NULL, 0),i); + return drmOpenDevice(strtol(devstring, NULL, 0),i, DRM_NODE_RENDER); } } } @@ -531,6 +543,10 @@ int drmOpen(const char *name, const char *busid) return -1; } +int drmOpenControl(int minor) +{ + return drmOpenMinor(minor, 0, DRM_NODE_CONTROL); +} /** * Free the version information returned by drmGetVersion(). @@ -623,7 +639,7 @@ drmVersionPtr drmGetVersion(int fd) version->desc_len = 0; version->desc = NULL; - if (ioctl(fd, DRM_IOCTL_VERSION, version)) { + if (drmIoctl(fd, DRM_IOCTL_VERSION, version)) { drmFreeKernelVersion(version); return NULL; } @@ -635,7 +651,7 @@ drmVersionPtr drmGetVersion(int fd) if (version->desc_len) version->desc = drmMalloc(version->desc_len + 1); - if (ioctl(fd, DRM_IOCTL_VERSION, version)) { + if (drmIoctl(fd, DRM_IOCTL_VERSION, version)) { drmMsg("DRM_IOCTL_VERSION: %s\n", strerror(errno)); drmFreeKernelVersion(version); return NULL; @@ -721,10 +737,10 @@ char *drmGetBusid(int fd) u.unique_len = 0; u.unique = NULL; - if (ioctl(fd, DRM_IOCTL_GET_UNIQUE, &u)) + if (drmIoctl(fd, DRM_IOCTL_GET_UNIQUE, &u)) return NULL; u.unique = drmMalloc(u.unique_len + 1); - if (ioctl(fd, DRM_IOCTL_GET_UNIQUE, &u)) + if (drmIoctl(fd, DRM_IOCTL_GET_UNIQUE, &u)) return NULL; u.unique[u.unique_len] = '\0'; @@ -751,7 +767,7 @@ int drmSetBusid(int fd, const char *busid) u.unique = (char *)busid; u.unique_len = strlen(busid); - if (ioctl(fd, DRM_IOCTL_SET_UNIQUE, &u)) { + if (drmIoctl(fd, DRM_IOCTL_SET_UNIQUE, &u)) { return -errno; } return 0; @@ -762,7 +778,7 @@ int drmGetMagic(int fd, drm_magic_t * magic) drm_auth_t auth; *magic = 0; - if (ioctl(fd, DRM_IOCTL_GET_MAGIC, &auth)) + if (drmIoctl(fd, DRM_IOCTL_GET_MAGIC, &auth)) return -errno; *magic = auth.magic; return 0; @@ -773,7 +789,7 @@ int drmAuthMagic(int fd, drm_magic_t magic) drm_auth_t auth; auth.magic = magic; - if (ioctl(fd, DRM_IOCTL_AUTH_MAGIC, &auth)) + if (drmIoctl(fd, DRM_IOCTL_AUTH_MAGIC, &auth)) return -errno; return 0; } @@ -838,7 +854,7 @@ int drmAddMap(int fd, drm_handle_t offset, drmSize size, drmMapType type, map.handle = 0; map.type = type; map.flags = flags; - if (ioctl(fd, DRM_IOCTL_ADD_MAP, &map)) + if (drmIoctl(fd, DRM_IOCTL_ADD_MAP, &map)) return -errno; if (handle) *handle = (drm_handle_t)map.handle; @@ -851,7 +867,7 @@ int drmRmMap(int fd, drm_handle_t handle) map.handle = (void *)handle; - if(ioctl(fd, DRM_IOCTL_RM_MAP, &map)) + if(drmIoctl(fd, DRM_IOCTL_RM_MAP, &map)) return -errno; return 0; } @@ -884,7 +900,7 @@ int drmAddBufs(int fd, int count, int size, drmBufDescFlags flags, request.flags = flags; request.agp_start = agp_offset; - if (ioctl(fd, DRM_IOCTL_ADD_BUFS, &request)) + if (drmIoctl(fd, DRM_IOCTL_ADD_BUFS, &request)) return -errno; return request.count; } @@ -897,7 +913,7 @@ int drmMarkBufs(int fd, double low, double high) info.count = 0; info.list = NULL; - if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info)) + if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info)) return -EINVAL; if (!info.count) @@ -906,7 +922,7 @@ int drmMarkBufs(int fd, double low, double high) if (!(info.list = drmMalloc(info.count * sizeof(*info.list)))) return -ENOMEM; - if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info)) { + if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info)) { int retval = -errno; drmFree(info.list); return retval; @@ -915,7 +931,7 @@ int drmMarkBufs(int fd, double low, double high) for (i = 0; i < info.count; i++) { info.list[i].low_mark = low * info.list[i].count; info.list[i].high_mark = high * info.list[i].count; - if (ioctl(fd, DRM_IOCTL_MARK_BUFS, &info.list[i])) { + if (drmIoctl(fd, DRM_IOCTL_MARK_BUFS, &info.list[i])) { int retval = -errno; drmFree(info.list); return retval; @@ -947,7 +963,7 @@ int drmFreeBufs(int fd, int count, int *list) request.count = count; request.list = list; - if (ioctl(fd, DRM_IOCTL_FREE_BUFS, &request)) + if (drmIoctl(fd, DRM_IOCTL_FREE_BUFS, &request)) return -errno; return 0; } @@ -1036,14 +1052,14 @@ drmBufInfoPtr drmGetBufInfo(int fd) info.count = 0; info.list = NULL; - if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info)) + if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info)) return NULL; if (info.count) { if (!(info.list = drmMalloc(info.count * sizeof(*info.list)))) return NULL; - if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info)) { + if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info)) { drmFree(info.list); return NULL; } @@ -1087,7 +1103,7 @@ drmBufMapPtr drmMapBufs(int fd) bufs.count = 0; bufs.list = NULL; bufs.virtual = NULL; - if (ioctl(fd, DRM_IOCTL_MAP_BUFS, &bufs)) + if (drmIoctl(fd, DRM_IOCTL_MAP_BUFS, &bufs)) return NULL; if (!bufs.count) @@ -1096,7 +1112,7 @@ drmBufMapPtr drmMapBufs(int fd) if (!(bufs.list = drmMalloc(bufs.count * sizeof(*bufs.list)))) return NULL; - if (ioctl(fd, DRM_IOCTL_MAP_BUFS, &bufs)) { + if (drmIoctl(fd, DRM_IOCTL_MAP_BUFS, &bufs)) { drmFree(bufs.list); return NULL; } @@ -1211,7 +1227,7 @@ int drmGetLock(int fd, drm_context_t context, drmLockFlags flags) if (flags & DRM_HALT_ALL_QUEUES) lock.flags |= _DRM_HALT_ALL_QUEUES; if (flags & DRM_HALT_CUR_QUEUES) lock.flags |= _DRM_HALT_CUR_QUEUES; - while (ioctl(fd, DRM_IOCTL_LOCK, &lock)) + while (drmIoctl(fd, DRM_IOCTL_LOCK, &lock)) ; return 0; } @@ -1234,7 +1250,7 @@ int drmUnlock(int fd, drm_context_t context) lock.context = context; lock.flags = 0; - return ioctl(fd, DRM_IOCTL_UNLOCK, &lock); + return drmIoctl(fd, DRM_IOCTL_UNLOCK, &lock); } drm_context_t *drmGetReservedContextList(int fd, int *count) @@ -1246,7 +1262,7 @@ drm_context_t *drmGetReservedContextList(int fd, int *count) res.count = 0; res.contexts = NULL; - if (ioctl(fd, DRM_IOCTL_RES_CTX, &res)) + if (drmIoctl(fd, DRM_IOCTL_RES_CTX, &res)) return NULL; if (!res.count) @@ -1260,7 +1276,7 @@ drm_context_t *drmGetReservedContextList(int fd, int *count) } res.contexts = list; - if (ioctl(fd, DRM_IOCTL_RES_CTX, &res)) + if (drmIoctl(fd, DRM_IOCTL_RES_CTX, &res)) return NULL; for (i = 0; i < res.count; i++) @@ -1299,7 +1315,7 @@ int drmCreateContext(int fd, drm_context_t *handle) drm_ctx_t ctx; ctx.flags = 0; /* Modified with functions below */ - if (ioctl(fd, DRM_IOCTL_ADD_CTX, &ctx)) + if (drmIoctl(fd, DRM_IOCTL_ADD_CTX, &ctx)) return -errno; *handle = ctx.handle; return 0; @@ -1310,7 +1326,7 @@ int drmSwitchToContext(int fd, drm_context_t context) drm_ctx_t ctx; ctx.handle = context; - if (ioctl(fd, DRM_IOCTL_SWITCH_CTX, &ctx)) + if (drmIoctl(fd, DRM_IOCTL_SWITCH_CTX, &ctx)) return -errno; return 0; } @@ -1331,7 +1347,7 @@ int drmSetContextFlags(int fd, drm_context_t context, drm_context_tFlags flags) ctx.flags |= _DRM_CONTEXT_PRESERVED; if (flags & DRM_CONTEXT_2DONLY) ctx.flags |= _DRM_CONTEXT_2DONLY; - if (ioctl(fd, DRM_IOCTL_MOD_CTX, &ctx)) + if (drmIoctl(fd, DRM_IOCTL_MOD_CTX, &ctx)) return -errno; return 0; } @@ -1342,7 +1358,7 @@ int drmGetContextFlags(int fd, drm_context_t context, drm_ctx_t ctx; ctx.handle = context; - if (ioctl(fd, DRM_IOCTL_GET_CTX, &ctx)) + if (drmIoctl(fd, DRM_IOCTL_GET_CTX, &ctx)) return -errno; *flags = 0; if (ctx.flags & _DRM_CONTEXT_PRESERVED) @@ -1373,7 +1389,7 @@ int drmDestroyContext(int fd, drm_context_t handle) { drm_ctx_t ctx; ctx.handle = handle; - if (ioctl(fd, DRM_IOCTL_RM_CTX, &ctx)) + if (drmIoctl(fd, DRM_IOCTL_RM_CTX, &ctx)) return -errno; return 0; } @@ -1381,7 +1397,7 @@ int drmDestroyContext(int fd, drm_context_t handle) int drmCreateDrawable(int fd, drm_drawable_t *handle) { drm_draw_t draw; - if (ioctl(fd, DRM_IOCTL_ADD_DRAW, &draw)) + if (drmIoctl(fd, DRM_IOCTL_ADD_DRAW, &draw)) return -errno; *handle = draw.handle; return 0; @@ -1391,7 +1407,7 @@ int drmDestroyDrawable(int fd, drm_drawable_t handle) { drm_draw_t draw; draw.handle = handle; - if (ioctl(fd, DRM_IOCTL_RM_DRAW, &draw)) + if (drmIoctl(fd, DRM_IOCTL_RM_DRAW, &draw)) return -errno; return 0; } @@ -1407,7 +1423,7 @@ int drmUpdateDrawableInfo(int fd, drm_drawable_t handle, update.num = num; update.data = (unsigned long long)(unsigned long)data; - if (ioctl(fd, DRM_IOCTL_UPDATE_DRAW, &update)) + if (drmIoctl(fd, DRM_IOCTL_UPDATE_DRAW, &update)) return -errno; return 0; @@ -1427,7 +1443,7 @@ int drmUpdateDrawableInfo(int fd, drm_drawable_t handle, */ int drmAgpAcquire(int fd) { - if (ioctl(fd, DRM_IOCTL_AGP_ACQUIRE, NULL)) + if (drmIoctl(fd, DRM_IOCTL_AGP_ACQUIRE, NULL)) return -errno; return 0; } @@ -1445,7 +1461,7 @@ int drmAgpAcquire(int fd) */ int drmAgpRelease(int fd) { - if (ioctl(fd, DRM_IOCTL_AGP_RELEASE, NULL)) + if (drmIoctl(fd, DRM_IOCTL_AGP_RELEASE, NULL)) return -errno; return 0; } @@ -1468,7 +1484,7 @@ int drmAgpEnable(int fd, unsigned long mode) drm_agp_mode_t m; m.mode = mode; - if (ioctl(fd, DRM_IOCTL_AGP_ENABLE, &m)) + if (drmIoctl(fd, DRM_IOCTL_AGP_ENABLE, &m)) return -errno; return 0; } @@ -1499,7 +1515,7 @@ int drmAgpAlloc(int fd, unsigned long size, unsigned long type, b.size = size; b.handle = 0; b.type = type; - if (ioctl(fd, DRM_IOCTL_AGP_ALLOC, &b)) + if (drmIoctl(fd, DRM_IOCTL_AGP_ALLOC, &b)) return -errno; if (address != 0UL) *address = b.physical; @@ -1526,7 +1542,7 @@ int drmAgpFree(int fd, drm_handle_t handle) b.size = 0; b.handle = handle; - if (ioctl(fd, DRM_IOCTL_AGP_FREE, &b)) + if (drmIoctl(fd, DRM_IOCTL_AGP_FREE, &b)) return -errno; return 0; } @@ -1551,7 +1567,7 @@ int drmAgpBind(int fd, drm_handle_t handle, unsigned long offset) b.handle = handle; b.offset = offset; - if (ioctl(fd, DRM_IOCTL_AGP_BIND, &b)) + if (drmIoctl(fd, DRM_IOCTL_AGP_BIND, &b)) return -errno; return 0; } @@ -1575,7 +1591,7 @@ int drmAgpUnbind(int fd, drm_handle_t handle) b.handle = handle; b.offset = 0; - if (ioctl(fd, DRM_IOCTL_AGP_UNBIND, &b)) + if (drmIoctl(fd, DRM_IOCTL_AGP_UNBIND, &b)) return -errno; return 0; } @@ -1596,7 +1612,7 @@ int drmAgpVersionMajor(int fd) { drm_agp_info_t i; - if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) + if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i)) return -errno; return i.agp_version_major; } @@ -1617,7 +1633,7 @@ int drmAgpVersionMinor(int fd) { drm_agp_info_t i; - if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) + if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i)) return -errno; return i.agp_version_minor; } @@ -1638,7 +1654,7 @@ unsigned long drmAgpGetMode(int fd) { drm_agp_info_t i; - if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) + if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i)) return 0; return i.mode; } @@ -1659,7 +1675,7 @@ unsigned long drmAgpBase(int fd) { drm_agp_info_t i; - if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) + if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i)) return 0; return i.aperture_base; } @@ -1680,7 +1696,7 @@ unsigned long drmAgpSize(int fd) { drm_agp_info_t i; - if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) + if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i)) return 0; return i.aperture_size; } @@ -1701,7 +1717,7 @@ unsigned long drmAgpMemoryUsed(int fd) { drm_agp_info_t i; - if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) + if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i)) return 0; return i.memory_used; } @@ -1722,7 +1738,7 @@ unsigned long drmAgpMemoryAvail(int fd) { drm_agp_info_t i; - if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) + if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i)) return 0; return i.memory_allowed; } @@ -1743,7 +1759,7 @@ unsigned int drmAgpVendorId(int fd) { drm_agp_info_t i; - if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) + if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i)) return 0; return i.id_vendor; } @@ -1764,7 +1780,7 @@ unsigned int drmAgpDeviceId(int fd) { drm_agp_info_t i; - if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) + if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i)) return 0; return i.id_device; } @@ -1776,7 +1792,7 @@ int drmScatterGatherAlloc(int fd, unsigned long size, drm_handle_t *handle) *handle = 0; sg.size = size; sg.handle = 0; - if (ioctl(fd, DRM_IOCTL_SG_ALLOC, &sg)) + if (drmIoctl(fd, DRM_IOCTL_SG_ALLOC, &sg)) return -errno; *handle = sg.handle; return 0; @@ -1788,7 +1804,7 @@ int drmScatterGatherFree(int fd, drm_handle_t handle) sg.size = 0; sg.handle = handle; - if (ioctl(fd, DRM_IOCTL_SG_FREE, &sg)) + if (drmIoctl(fd, DRM_IOCTL_SG_FREE, &sg)) return -errno; return 0; } @@ -1806,13 +1822,33 @@ int drmScatterGatherFree(int fd, drm_handle_t handle) */ int drmWaitVBlank(int fd, drmVBlankPtr vbl) { + struct timespec timeout, cur; int ret; + ret = clock_gettime(CLOCK_MONOTONIC, &timeout); + if (ret < 0) { + fprintf(stderr, "clock_gettime failed: %s\n", strerror(ret)); + goto out; + } + timeout.tv_sec++; + do { ret = ioctl(fd, DRM_IOCTL_WAIT_VBLANK, vbl); vbl->request.type &= ~DRM_VBLANK_RELATIVE; + if (ret && errno == EINTR) { + clock_gettime(CLOCK_MONOTONIC, &cur); + /* Timeout after 1s */ + if (cur.tv_sec > timeout.tv_sec + 1 || + (cur.tv_sec == timeout.tv_sec && cur.tv_nsec >= + timeout.tv_nsec)) { + errno = EBUSY; + ret = -1; + break; + } + } } while (ret && errno == EINTR); +out: return ret; } @@ -1859,7 +1895,7 @@ int drmCtlInstHandler(int fd, int irq) ctl.func = DRM_INST_HANDLER; ctl.irq = irq; - if (ioctl(fd, DRM_IOCTL_CONTROL, &ctl)) + if (drmIoctl(fd, DRM_IOCTL_CONTROL, &ctl)) return -errno; return 0; } @@ -1882,7 +1918,7 @@ int drmCtlUninstHandler(int fd) ctl.func = DRM_UNINST_HANDLER; ctl.irq = 0; - if (ioctl(fd, DRM_IOCTL_CONTROL, &ctl)) + if (drmIoctl(fd, DRM_IOCTL_CONTROL, &ctl)) return -errno; return 0; } @@ -1899,7 +1935,7 @@ int drmFinish(int fd, int context, drmLockFlags flags) if (flags & DRM_LOCK_FLUSH_ALL) lock.flags |= _DRM_LOCK_FLUSH_ALL; if (flags & DRM_HALT_ALL_QUEUES) lock.flags |= _DRM_HALT_ALL_QUEUES; if (flags & DRM_HALT_CUR_QUEUES) lock.flags |= _DRM_HALT_CUR_QUEUES; - if (ioctl(fd, DRM_IOCTL_FINISH, &lock)) + if (drmIoctl(fd, DRM_IOCTL_FINISH, &lock)) return -errno; return 0; } @@ -1925,7 +1961,7 @@ int drmGetInterruptFromBusID(int fd, int busnum, int devnum, int funcnum) p.busnum = busnum; p.devnum = devnum; p.funcnum = funcnum; - if (ioctl(fd, DRM_IOCTL_IRQ_BUSID, &p)) + if (drmIoctl(fd, DRM_IOCTL_IRQ_BUSID, &p)) return -errno; return p.irq; } @@ -1967,7 +2003,7 @@ int drmAddContextPrivateMapping(int fd, drm_context_t ctx_id, map.ctx_id = ctx_id; map.handle = (void *)handle; - if (ioctl(fd, DRM_IOCTL_SET_SAREA_CTX, &map)) + if (drmIoctl(fd, DRM_IOCTL_SET_SAREA_CTX, &map)) return -errno; return 0; } @@ -1979,7 +2015,7 @@ int drmGetContextPrivateMapping(int fd, drm_context_t ctx_id, map.ctx_id = ctx_id; - if (ioctl(fd, DRM_IOCTL_GET_SAREA_CTX, &map)) + if (drmIoctl(fd, DRM_IOCTL_GET_SAREA_CTX, &map)) return -errno; if (handle) *handle = (drm_handle_t)map.handle; @@ -1994,7 +2030,7 @@ int drmGetMap(int fd, int idx, drm_handle_t *offset, drmSize *size, drm_map_t map; map.offset = idx; - if (ioctl(fd, DRM_IOCTL_GET_MAP, &map)) + if (drmIoctl(fd, DRM_IOCTL_GET_MAP, &map)) return -errno; *offset = map.offset; *size = map.size; @@ -2011,7 +2047,7 @@ int drmGetClient(int fd, int idx, int *auth, int *pid, int *uid, drm_client_t client; client.idx = idx; - if (ioctl(fd, DRM_IOCTL_GET_CLIENT, &client)) + if (drmIoctl(fd, DRM_IOCTL_GET_CLIENT, &client)) return -errno; *auth = client.auth; *pid = client.pid; @@ -2026,7 +2062,7 @@ int drmGetStats(int fd, drmStatsT *stats) drm_stats_t s; int i; - if (ioctl(fd, DRM_IOCTL_GET_STATS, &s)) + if (drmIoctl(fd, DRM_IOCTL_GET_STATS, &s)) return -errno; stats->count = 0; @@ -2168,7 +2204,7 @@ int drmSetInterfaceVersion(int fd, drmSetVersion *version) sv.drm_dd_major = version->drm_dd_major; sv.drm_dd_minor = version->drm_dd_minor; - if (ioctl(fd, DRM_IOCTL_SET_VERSION, &sv)) { + if (drmIoctl(fd, DRM_IOCTL_SET_VERSION, &sv)) { retcode = -errno; } @@ -2199,7 +2235,7 @@ int drmCommandNone(int fd, unsigned long drmCommandIndex) request = DRM_IO( DRM_COMMAND_BASE + drmCommandIndex); - if (ioctl(fd, request, data)) { + if (drmIoctl(fd, request, data)) { return -errno; } return 0; @@ -2228,7 +2264,7 @@ int drmCommandRead(int fd, unsigned long drmCommandIndex, void *data, request = DRM_IOC( DRM_IOC_READ, DRM_IOCTL_BASE, DRM_COMMAND_BASE + drmCommandIndex, size); - if (ioctl(fd, request, data)) { + if (drmIoctl(fd, request, data)) { return -errno; } return 0; @@ -2257,7 +2293,7 @@ int drmCommandWrite(int fd, unsigned long drmCommandIndex, void *data, request = DRM_IOC( DRM_IOC_WRITE, DRM_IOCTL_BASE, DRM_COMMAND_BASE + drmCommandIndex, size); - if (ioctl(fd, request, data)) { + if (drmIoctl(fd, request, data)) { return -errno; } return 0; @@ -2286,9 +2322,8 @@ int drmCommandWriteRead(int fd, unsigned long drmCommandIndex, void *data, request = DRM_IOC( DRM_IOC_READ|DRM_IOC_WRITE, DRM_IOCTL_BASE, DRM_COMMAND_BASE + drmCommandIndex, size); - if (ioctl(fd, request, data)) { + if (drmIoctl(fd, request, data)) return -errno; - } return 0; } @@ -2353,6 +2388,25 @@ void drmCloseOnce(int fd) } } +#ifdef NOTYET +int drmSetMaster(int fd) +{ + int ret; + + fprintf(stderr,"Setting master \n"); + ret = ioctl(fd, DRM_IOCTL_SET_MASTER, 0); + return ret; +} + +int drmDropMaster(int fd) +{ + int ret; + fprintf(stderr,"Dropping master \n"); + ret = ioctl(fd, DRM_IOCTL_DROP_MASTER, 0); + return ret; +} +#endif + #ifdef X_PRIVSEP static int _priv_open_device(const char *path) @@ -2361,6 +2415,6 @@ _priv_open_device(const char *path) return open(path, O_RDWR, 0); } -int priv_open_device(const char *) +int priv_open_device(const char *) __attribute__((weak, alias ("_priv_open_device"))); #endif diff --git a/lib/libdrm/xf86drm.h b/lib/libdrm/xf86drm.h index 9a943b00d..784896700 100644 --- a/lib/libdrm/xf86drm.h +++ b/lib/libdrm/xf86drm.h @@ -47,13 +47,10 @@ (S_IRUSR|S_IWUSR|S_IXUSR|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH) #define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP) -#ifdef __OpenBSD__ + #define DRM_DIR_NAME "/dev" #define DRM_DEV_NAME "%s/drm%d" -#else -#define DRM_DIR_NAME "/dev/dri" -#define DRM_DEV_NAME "%s/card%d" -#endif +#define DRM_CONTROL_DEV_NAME "%s/drmC%d" #define DRM_PROC_NAME "/proc/dri/" /* For backward Linux compatibility */ #define DRM_ERR_NO_DEVICE (-1001) @@ -79,6 +76,7 @@ typedef struct drmHashEntry { void *tagTable; } drmHashEntry; +extern int drmIoctl(int fd, unsigned long request, void *arg); extern void *drmGetHashTable(void); extern drmHashEntry *drmGetEntry(int fd); @@ -513,6 +511,7 @@ do { register unsigned int __old __asm("o0"); \ /* General user-level programmer's API: unprivileged */ extern int drmAvailable(void); extern int drmOpen(const char *name, const char *busid); +extern int drmOpenControl(int minor); extern int drmClose(int fd); extern drmVersionPtr drmGetVersion(int fd); extern drmVersionPtr drmGetLibVersion(int fd); @@ -662,5 +661,9 @@ extern int drmSLLookupNeighbors(void *l, unsigned long key, extern int drmOpenOnce(void *unused, const char *BusID, int *newlyopened); extern void drmCloseOnce(int fd); +extern void drmMsg(const char *format, ...); + +extern int drmSetMaster(int fd); +extern int drmDropMaster(int fd); #endif diff --git a/lib/libdrm/xf86drmMode.c b/lib/libdrm/xf86drmMode.c new file mode 100644 index 000000000..6ec7d59ee --- /dev/null +++ b/lib/libdrm/xf86drmMode.c @@ -0,0 +1,665 @@ +/* + * \file xf86drmMode.c + * Header for DRM modesetting interface. + * + * \author Jakob Bornecrantz <wallbraker@gmail.com> + * + * \par Acknowledgements: + * Feb 2007, Dave Airlie <airlied@linux.ie> + */ + +/* + * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas. + * Copyright (c) 2007-2008 Dave Airlie <airlied@linux.ie> + * Copyright (c) 2007-2008 Jakob Bornecrantz <wallbraker@gmail.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +/* + * TODO the types we are after are defined in diffrent headers on diffrent + * platforms find which headers to include to get uint32_t + */ +#include <stdint.h> +#include <sys/ioctl.h> +#include <stdio.h> + +#include "xf86drmMode.h" +#include "xf86drm.h" +#include <drm.h> +#include <string.h> +#include <dirent.h> +#include <errno.h> + +#define U642VOID(x) ((void *)(unsigned long)(x)) +#define VOID2U64(x) ((uint64_t)(unsigned long)(x)) + +/* + * Util functions + */ + +void* drmAllocCpy(void *array, int count, int entry_size) +{ + char *r; + int i; + + if (!count || !array || !entry_size) + return 0; + + if (!(r = drmMalloc(count*entry_size))) + return 0; + + for (i = 0; i < count; i++) + memcpy(r+(entry_size*i), array+(entry_size*i), entry_size); + + return r; +} + +/* + * A couple of free functions. + */ + +void drmModeFreeModeInfo(struct drm_mode_modeinfo *ptr) +{ + if (!ptr) + return; + + drmFree(ptr); +} + +void drmModeFreeResources(drmModeResPtr ptr) +{ + if (!ptr) + return; + + drmFree(ptr); + +} + +void drmModeFreeFB(drmModeFBPtr ptr) +{ + if (!ptr) + return; + + /* we might add more frees later. */ + drmFree(ptr); +} + +void drmModeFreeCrtc(drmModeCrtcPtr ptr) +{ + if (!ptr) + return; + + drmFree(ptr); + +} + +void drmModeFreeConnector(drmModeConnectorPtr ptr) +{ + if (!ptr) + return; + + drmFree(ptr->modes); + drmFree(ptr); + +} + +void drmModeFreeEncoder(drmModeEncoderPtr ptr) +{ + drmFree(ptr); +} + +/* + * ModeSetting functions. + */ + +drmModeResPtr drmModeGetResources(int fd) +{ + struct drm_mode_card_res res; + drmModeResPtr r = 0; + + memset(&res, 0, sizeof(struct drm_mode_card_res)); + + if (drmIoctl(fd, DRM_IOCTL_MODE_GETRESOURCES, &res)) + return 0; + + if (res.count_fbs) + res.fb_id_ptr = VOID2U64(drmMalloc(res.count_fbs*sizeof(uint32_t))); + if (res.count_crtcs) + res.crtc_id_ptr = VOID2U64(drmMalloc(res.count_crtcs*sizeof(uint32_t))); + if (res.count_connectors) + res.connector_id_ptr = VOID2U64(drmMalloc(res.count_connectors*sizeof(uint32_t))); + if (res.count_encoders) + res.encoder_id_ptr = VOID2U64(drmMalloc(res.count_encoders*sizeof(uint32_t))); + + if (drmIoctl(fd, DRM_IOCTL_MODE_GETRESOURCES, &res)) { + r = NULL; + goto err_allocs; + } + + /* + * return + */ + + + if (!(r = drmMalloc(sizeof(*r)))) + return 0; + + r->min_width = res.min_width; + r->max_width = res.max_width; + r->min_height = res.min_height; + r->max_height = res.max_height; + r->count_fbs = res.count_fbs; + r->count_crtcs = res.count_crtcs; + r->count_connectors = res.count_connectors; + r->count_encoders = res.count_encoders; + /* TODO we realy should test if these allocs fails. */ + r->fbs = drmAllocCpy(U642VOID(res.fb_id_ptr), res.count_fbs, sizeof(uint32_t)); + r->crtcs = drmAllocCpy(U642VOID(res.crtc_id_ptr), res.count_crtcs, sizeof(uint32_t)); + r->connectors = drmAllocCpy(U642VOID(res.connector_id_ptr), res.count_connectors, sizeof(uint32_t)); + r->encoders = drmAllocCpy(U642VOID(res.encoder_id_ptr), res.count_encoders, sizeof(uint32_t)); + +err_allocs: + drmFree(U642VOID(res.fb_id_ptr)); + drmFree(U642VOID(res.crtc_id_ptr)); + drmFree(U642VOID(res.connector_id_ptr)); + drmFree(U642VOID(res.encoder_id_ptr)); + + return r; +} + +int drmModeAddFB(int fd, uint32_t width, uint32_t height, uint8_t depth, + uint8_t bpp, uint32_t pitch, uint32_t bo_handle, + uint32_t *buf_id) +{ + struct drm_mode_fb_cmd f; + int ret; + + f.width = width; + f.height = height; + f.pitch = pitch; + f.bpp = bpp; + f.depth = depth; + f.handle = bo_handle; + + if ((ret = drmIoctl(fd, DRM_IOCTL_MODE_ADDFB, &f))) + return ret; + + *buf_id = f.fb_id; + return 0; +} + +int drmModeRmFB(int fd, uint32_t bufferId) +{ + return drmIoctl(fd, DRM_IOCTL_MODE_RMFB, &bufferId); + + +} + +drmModeFBPtr drmModeGetFB(int fd, uint32_t buf) +{ + struct drm_mode_fb_cmd info; + drmModeFBPtr r; + + info.fb_id = buf; + + if (drmIoctl(fd, DRM_IOCTL_MODE_GETFB, &info)) + return NULL; + + if (!(r = drmMalloc(sizeof(*r)))) + return NULL; + + r->fb_id = info.fb_id; + r->width = info.width; + r->height = info.height; + r->pitch = info.pitch; + r->bpp = info.bpp; + r->handle = info.handle; + r->depth = info.depth; + + return r; +} + + +/* + * Crtc functions + */ + +drmModeCrtcPtr drmModeGetCrtc(int fd, uint32_t crtcId) +{ + struct drm_mode_crtc crtc; + drmModeCrtcPtr r; + + crtc.crtc_id = crtcId; + + if (drmIoctl(fd, DRM_IOCTL_MODE_GETCRTC, &crtc)) + return 0; + + /* + * return + */ + + if (!(r = drmMalloc(sizeof(*r)))) + return 0; + + r->crtc_id = crtc.crtc_id; + r->x = crtc.x; + r->y = crtc.y; + r->mode_valid = crtc.mode_valid; + if (r->mode_valid) + memcpy(&r->mode, &crtc.mode, sizeof(struct drm_mode_modeinfo)); + r->buffer_id = crtc.fb_id; + r->gamma_size = crtc.gamma_size; + return r; +} + + +int drmModeSetCrtc(int fd, uint32_t crtcId, uint32_t bufferId, + uint32_t x, uint32_t y, uint32_t *connectors, int count, + struct drm_mode_modeinfo *mode) +{ + struct drm_mode_crtc crtc; + + crtc.x = x; + crtc.y = y; + crtc.crtc_id = crtcId; + crtc.fb_id = bufferId; + crtc.set_connectors_ptr = VOID2U64(connectors); + crtc.count_connectors = count; + if (mode) { + memcpy(&crtc.mode, mode, sizeof(struct drm_mode_modeinfo)); + crtc.mode_valid = 1; + } else + crtc.mode_valid = 0; + + return drmIoctl(fd, DRM_IOCTL_MODE_SETCRTC, &crtc); +} + +/* + * Cursor manipulation + */ + +int drmModeSetCursor(int fd, uint32_t crtcId, uint32_t bo_handle, uint32_t width, uint32_t height) +{ + struct drm_mode_cursor arg; + + arg.flags = DRM_MODE_CURSOR_BO; + arg.crtc_id = crtcId; + arg.width = width; + arg.height = height; + arg.handle = bo_handle; + + return drmIoctl(fd, DRM_IOCTL_MODE_CURSOR, &arg); +} + +int drmModeMoveCursor(int fd, uint32_t crtcId, int x, int y) +{ + struct drm_mode_cursor arg; + + arg.flags = DRM_MODE_CURSOR_MOVE; + arg.crtc_id = crtcId; + arg.x = x; + arg.y = y; + + return drmIoctl(fd, DRM_IOCTL_MODE_CURSOR, &arg); +} + +/* + * Encoder get + */ +drmModeEncoderPtr drmModeGetEncoder(int fd, uint32_t encoder_id) +{ + struct drm_mode_get_encoder enc; + drmModeEncoderPtr r = NULL; + + enc.encoder_id = encoder_id; + enc.encoder_type = 0; + enc.possible_crtcs = 0; + enc.possible_clones = 0; + + if (drmIoctl(fd, DRM_IOCTL_MODE_GETENCODER, &enc)) + return 0; + + if (!(r = drmMalloc(sizeof(*r)))) + return 0; + + r->encoder_id = enc.encoder_id; + r->crtc_id = enc.crtc_id; + r->encoder_type = enc.encoder_type; + r->possible_crtcs = enc.possible_crtcs; + r->possible_clones = enc.possible_clones; + + return r; +} + +/* + * Connector manipulation + */ + +drmModeConnectorPtr drmModeGetConnector(int fd, uint32_t connector_id) +{ + struct drm_mode_get_connector conn; + drmModeConnectorPtr r = NULL; + + conn.connector_id = connector_id; + conn.connector_type_id = 0; + conn.connector_type = 0; + conn.count_modes = 0; + conn.modes_ptr = 0; + conn.count_props = 0; + conn.props_ptr = 0; + conn.prop_values_ptr = 0; + conn.count_encoders = 0; + conn.encoders_ptr = 0; + + if (drmIoctl(fd, DRM_IOCTL_MODE_GETCONNECTOR, &conn)) + return 0; + + if (conn.count_props) { + conn.props_ptr = VOID2U64(drmMalloc(conn.count_props*sizeof(uint32_t))); + conn.prop_values_ptr = VOID2U64(drmMalloc(conn.count_props*sizeof(uint64_t))); + } + + if (conn.count_modes) + conn.modes_ptr = VOID2U64(drmMalloc(conn.count_modes*sizeof(struct drm_mode_modeinfo))); + + if (conn.count_encoders) + conn.encoders_ptr = VOID2U64(drmMalloc(conn.count_encoders*sizeof(uint32_t))); + + if (drmIoctl(fd, DRM_IOCTL_MODE_GETCONNECTOR, &conn)) + goto err_allocs; + + if(!(r = drmMalloc(sizeof(*r)))) { + goto err_allocs; + } + + r->connector_id = conn.connector_id; + r->encoder_id = conn.encoder_id; + r->connection = conn.connection; + r->mmWidth = conn.mm_width; + r->mmHeight = conn.mm_height; + r->subpixel = conn.subpixel; + r->count_modes = conn.count_modes; + /* TODO we should test if these alloc & cpy fails. */ + r->count_props = conn.count_props; + r->props = drmAllocCpy(U642VOID(conn.props_ptr), conn.count_props, sizeof(uint32_t)); + r->prop_values = drmAllocCpy(U642VOID(conn.prop_values_ptr), conn.count_props, sizeof(uint64_t)); + r->modes = drmAllocCpy(U642VOID(conn.modes_ptr), conn.count_modes, sizeof(struct drm_mode_modeinfo)); + r->count_encoders = conn.count_encoders; + r->encoders = drmAllocCpy(U642VOID(conn.encoders_ptr), conn.count_encoders, sizeof(uint32_t)); + r->connector_type = conn.connector_type; + r->connector_type_id = conn.connector_type_id; + + if (!r->props || !r->prop_values || !r->modes || !r->encoders) + goto err_allocs; + +err_allocs: + drmFree(U642VOID(conn.prop_values_ptr)); + drmFree(U642VOID(conn.props_ptr)); + drmFree(U642VOID(conn.modes_ptr)); + drmFree(U642VOID(conn.encoders_ptr)); + + return r; +} + +int drmModeAttachMode(int fd, uint32_t connector_id, struct drm_mode_modeinfo *mode_info) +{ + struct drm_mode_mode_cmd res; + + memcpy(&res.mode, mode_info, sizeof(struct drm_mode_modeinfo)); + res.connector_id = connector_id; + + return drmIoctl(fd, DRM_IOCTL_MODE_ATTACHMODE, &res); +} + +int drmModeDetachMode(int fd, uint32_t connector_id, struct drm_mode_modeinfo *mode_info) +{ + struct drm_mode_mode_cmd res; + + memcpy(&res.mode, mode_info, sizeof(struct drm_mode_modeinfo)); + res.connector_id = connector_id; + + return drmIoctl(fd, DRM_IOCTL_MODE_DETACHMODE, &res); +} + + +drmModePropertyPtr drmModeGetProperty(int fd, uint32_t property_id) +{ + struct drm_mode_get_property prop; + drmModePropertyPtr r; + + prop.prop_id = property_id; + prop.count_enum_blobs = 0; + prop.count_values = 0; + prop.flags = 0; + prop.enum_blob_ptr = 0; + prop.values_ptr = 0; + + if (drmIoctl(fd, DRM_IOCTL_MODE_GETPROPERTY, &prop)) + return 0; + + if (prop.count_values) + prop.values_ptr = VOID2U64(drmMalloc(prop.count_values * sizeof(uint64_t))); + + if (prop.count_enum_blobs && (prop.flags & DRM_MODE_PROP_ENUM)) + prop.enum_blob_ptr = VOID2U64(drmMalloc(prop.count_enum_blobs * sizeof(struct drm_mode_property_enum))); + + if (prop.count_enum_blobs && (prop.flags & DRM_MODE_PROP_BLOB)) { + prop.values_ptr = VOID2U64(drmMalloc(prop.count_enum_blobs * sizeof(uint32_t))); + prop.enum_blob_ptr = VOID2U64(drmMalloc(prop.count_enum_blobs * sizeof(uint32_t))); + } + + if (drmIoctl(fd, DRM_IOCTL_MODE_GETPROPERTY, &prop)) { + r = NULL; + goto err_allocs; + } + + if (!(r = drmMalloc(sizeof(*r)))) + return NULL; + + r->prop_id = prop.prop_id; + r->count_values = prop.count_values; + + r->flags = prop.flags; + if (prop.count_values) + r->values = drmAllocCpy(U642VOID(prop.values_ptr), prop.count_values, sizeof(uint64_t)); + if (prop.flags & DRM_MODE_PROP_ENUM) { + r->count_enums = prop.count_enum_blobs; + r->enums = drmAllocCpy(U642VOID(prop.enum_blob_ptr), prop.count_enum_blobs, sizeof(struct drm_mode_property_enum)); + } else if (prop.flags & DRM_MODE_PROP_BLOB) { + r->values = drmAllocCpy(U642VOID(prop.values_ptr), prop.count_enum_blobs, sizeof(uint32_t)); + r->blob_ids = drmAllocCpy(U642VOID(prop.enum_blob_ptr), prop.count_enum_blobs, sizeof(uint32_t)); + r->count_blobs = prop.count_enum_blobs; + } + strncpy(r->name, prop.name, DRM_PROP_NAME_LEN); + r->name[DRM_PROP_NAME_LEN-1] = 0; + +err_allocs: + drmFree(U642VOID(prop.values_ptr)); + drmFree(U642VOID(prop.enum_blob_ptr)); + + return r; +} + +void drmModeFreeProperty(drmModePropertyPtr ptr) +{ + if (!ptr) + return; + + drmFree(ptr->values); + drmFree(ptr->enums); + drmFree(ptr); +} + +drmModePropertyBlobPtr drmModeGetPropertyBlob(int fd, uint32_t blob_id) +{ + struct drm_mode_get_blob blob; + drmModePropertyBlobPtr r; + + blob.length = 0; + blob.data = 0; + blob.blob_id = blob_id; + + if (drmIoctl(fd, DRM_IOCTL_MODE_GETPROPBLOB, &blob)) + return NULL; + + if (blob.length) + blob.data = VOID2U64(drmMalloc(blob.length)); + + if (drmIoctl(fd, DRM_IOCTL_MODE_GETPROPBLOB, &blob)) { + r = NULL; + goto err_allocs; + } + + if (!(r = drmMalloc(sizeof(*r)))) + return NULL; + + r->id = blob.blob_id; + r->length = blob.length; + r->data = drmAllocCpy(U642VOID(blob.data), 1, blob.length); + +err_allocs: + drmFree(U642VOID(blob.data)); + return r; +} + +void drmModeFreePropertyBlob(drmModePropertyBlobPtr ptr) +{ + if (!ptr) + return; + + drmFree(ptr->data); + drmFree(ptr); +} + +int drmModeConnectorSetProperty(int fd, uint32_t connector_id, uint32_t property_id, + uint64_t value) +{ + struct drm_mode_connector_set_property osp; + int ret; + + osp.connector_id = connector_id; + osp.prop_id = property_id; + osp.value = value; + + if ((ret = drmIoctl(fd, DRM_IOCTL_MODE_SETPROPERTY, &osp))) + return ret; + + return 0; +} + +/* + * checks if a modesetting capable driver has attached to the pci id + * returns 0 if modesetting supported. + * -EINVAL or invalid bus id + * -ENOSYS if no modesetting support +*/ +int drmCheckModesettingSupported(const char *busid) +{ +#ifdef __linux__ + char pci_dev_dir[1024]; + int domain, bus, dev, func; + DIR *sysdir; + struct dirent *dent; + int found = 0, ret; + + ret = sscanf(busid, "pci:%04x:%02x:%02x.%d", &domain, &bus, &dev, &func); + if (ret != 4) + return -EINVAL; + + sprintf(pci_dev_dir, "/sys/bus/pci/devices/%04x:%02x:%02x.%d/drm", + domain, bus, dev, func); + + sysdir = opendir(pci_dev_dir); + if (sysdir) { + dent = readdir(sysdir); + while (dent) { + if (!strncmp(dent->d_name, "controlD", 8)) { + found = 1; + break; + } + + dent = readdir(sysdir); + } + closedir(sysdir); + if (found) + return 0; + } + + sprintf(pci_dev_dir, "/sys/bus/pci/devices/%04x:%02x:%02x.%d/", + domain, bus, dev, func); + + sysdir = opendir(pci_dev_dir); + if (!sysdir) + return -EINVAL; + + dent = readdir(sysdir); + while (dent) { + if (!strncmp(dent->d_name, "drm:controlD", 12)) { + found = 1; + break; + } + + dent = readdir(sysdir); + } + + closedir(sysdir); + if (found) + return 0; +#endif + return -ENOSYS; + +} + +int drmModeCrtcGetGamma(int fd, uint32_t crtc_id, uint32_t size, + uint16_t *red, uint16_t *green, uint16_t *blue) +{ + int ret; + struct drm_mode_crtc_lut l; + + l.crtc_id = crtc_id; + l.gamma_size = size; + l.red = VOID2U64(red); + l.green = VOID2U64(green); + l.blue = VOID2U64(blue); + + if ((ret = drmIoctl(fd, DRM_IOCTL_MODE_GETGAMMA, &l))) + return ret; + + return 0; +} + +int drmModeCrtcSetGamma(int fd, uint32_t crtc_id, uint32_t size, + uint16_t *red, uint16_t *green, uint16_t *blue) +{ + int ret; + struct drm_mode_crtc_lut l; + + l.crtc_id = crtc_id; + l.gamma_size = size; + l.red = VOID2U64(red); + l.green = VOID2U64(green); + l.blue = VOID2U64(blue); + + if ((ret = drmIoctl(fd, DRM_IOCTL_MODE_SETGAMMA, &l))) + return ret; + + return 0; +} diff --git a/lib/libdrm/xf86drmMode.h b/lib/libdrm/xf86drmMode.h new file mode 100644 index 000000000..378afe4ef --- /dev/null +++ b/lib/libdrm/xf86drmMode.h @@ -0,0 +1,251 @@ +/* + * \file xf86drmMode.h + * Header for DRM modesetting interface. + * + * \author Jakob Bornecrantz <wallbraker@gmail.com> + * + * \par Acknowledgements: + * Feb 2007, Dave Airlie <airlied@linux.ie> + */ + +/* + * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas. + * Copyright (c) 2007-2008 Dave Airlie <airlied@linux.ie> + * Copyright (c) 2007-2008 Jakob Bornecrantz <wallbraker@gmail.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include <drm.h> + +/* + * This is the interface for modesetting for drm. + * + * In order to use this interface you must include either <stdint.h> or another + * header defining uint32_t, int32_t and uint16_t. + * + * It aims to provide a randr1.2 compatible interface for modesettings in the + * kernel, the interface is also ment to be used by libraries like EGL. + * + * More information can be found in randrproto.txt which can be found here: + * http://gitweb.freedesktop.org/?p=xorg/proto/randrproto.git + * + * There are some major diffrences to be noted. Unlike the randr1.2 proto you + * need to create the memory object of the framebuffer yourself with the ttm + * buffer object interface. This object needs to be pinned. + */ + +typedef struct _drmModeRes { + + int count_fbs; + uint32_t *fbs; + + int count_crtcs; + uint32_t *crtcs; + + int count_connectors; + uint32_t *connectors; + + int count_encoders; + uint32_t *encoders; + + uint32_t min_width, max_width; + uint32_t min_height, max_height; +} drmModeRes, *drmModeResPtr; + +typedef struct drm_mode_fb_cmd drmModeFB, *drmModeFBPtr; + +typedef struct _drmModePropertyBlob { + uint32_t id; + uint32_t length; + void *data; +} drmModePropertyBlobRes, *drmModePropertyBlobPtr; + +typedef struct _drmModeProperty { + uint32_t prop_id; + uint32_t flags; + char name[DRM_PROP_NAME_LEN]; + int count_values; + uint64_t *values; // store the blob lengths + int count_enums; + struct drm_mode_property_enum *enums; + int count_blobs; + uint32_t *blob_ids; // store the blob IDs +} drmModePropertyRes, *drmModePropertyPtr; + +typedef struct _drmModeCrtc { + uint32_t crtc_id; + uint32_t buffer_id; /**< FB id to connect to 0 = disconnect */ + + uint32_t x, y; /**< Position on the framebuffer */ + uint32_t width, height; + int mode_valid; + struct drm_mode_modeinfo mode; + + int gamma_size; /**< Number of gamma stops */ + +} drmModeCrtc, *drmModeCrtcPtr; + +typedef struct _drmModeEncoder { + uint32_t encoder_id; + uint32_t encoder_type; + uint32_t crtc_id; + uint32_t possible_crtcs; + uint32_t possible_clones; +} drmModeEncoder, *drmModeEncoderPtr; + +typedef enum { + DRM_MODE_CONNECTED = 1, + DRM_MODE_DISCONNECTED = 2, + DRM_MODE_UNKNOWNCONNECTION = 3 +} drmModeConnection; + +typedef enum { + DRM_MODE_SUBPIXEL_UNKNOWN = 1, + DRM_MODE_SUBPIXEL_HORIZONTAL_RGB = 2, + DRM_MODE_SUBPIXEL_HORIZONTAL_BGR = 3, + DRM_MODE_SUBPIXEL_VERTICAL_RGB = 4, + DRM_MODE_SUBPIXEL_VERTICAL_BGR = 5, + DRM_MODE_SUBPIXEL_NONE = 6 +} drmModeSubPixel; + +typedef struct _drmModeConnector { + uint32_t connector_id; + uint32_t encoder_id; /**< Encoder currently connected to */ + uint32_t connector_type; + uint32_t connector_type_id; + drmModeConnection connection; + uint32_t mmWidth, mmHeight; /**< HxW in millimeters */ + drmModeSubPixel subpixel; + + int count_modes; + struct drm_mode_modeinfo *modes; + + int count_props; + uint32_t *props; /**< List of property ids */ + uint64_t *prop_values; /**< List of property values */ + + int count_encoders; + uint32_t *encoders; /**< List of encoder ids */ +} drmModeConnector, *drmModeConnectorPtr; + + + +extern void drmModeFreeModeInfo( struct drm_mode_modeinfo *ptr ); +extern void drmModeFreeResources( drmModeResPtr ptr ); +extern void drmModeFreeFB( drmModeFBPtr ptr ); +extern void drmModeFreeCrtc( drmModeCrtcPtr ptr ); +extern void drmModeFreeConnector( drmModeConnectorPtr ptr ); +extern void drmModeFreeEncoder( drmModeEncoderPtr ptr ); + +/** + * Retrives all of the resources associated with a card. + */ +extern drmModeResPtr drmModeGetResources(int fd); + +/* + * FrameBuffer manipulation. + */ + +/** + * Retrive information about framebuffer bufferId + */ +extern drmModeFBPtr drmModeGetFB(int fd, uint32_t bufferId); + +/** + * Creates a new framebuffer with an buffer object as its scanout buffer. + */ +extern int drmModeAddFB(int fd, uint32_t width, uint32_t height, uint8_t depth, + uint8_t bpp, uint32_t pitch, uint32_t bo_handle, + uint32_t *buf_id); +/** + * Destroies the given framebuffer. + */ +extern int drmModeRmFB(int fd, uint32_t bufferId); + +/* + * Crtc functions + */ + +/** + * Retrive information about the ctrt crtcId + */ +extern drmModeCrtcPtr drmModeGetCrtc(int fd, uint32_t crtcId); + +/** + * Set the mode on a crtc crtcId with the given mode modeId. + */ +int drmModeSetCrtc(int fd, uint32_t crtcId, uint32_t bufferId, + uint32_t x, uint32_t y, uint32_t *connectors, int count, + struct drm_mode_modeinfo *mode); + +/* + * Cursor functions + */ + +/** + * Set the cursor on crtc + */ +int drmModeSetCursor(int fd, uint32_t crtcId, uint32_t bo_handle, uint32_t width, uint32_t height); + +/** + * Move the cursor on crtc + */ +int drmModeMoveCursor(int fd, uint32_t crtcId, int x, int y); + +/** + * Encoder functions + */ +drmModeEncoderPtr drmModeGetEncoder(int fd, uint32_t encoder_id); + +/* + * Connector manipulation + */ + +/** + * Retrive information about the connector connectorId. + */ +extern drmModeConnectorPtr drmModeGetConnector(int fd, + uint32_t connectorId); + +/** + * Attaches the given mode to an connector. + */ +extern int drmModeAttachMode(int fd, uint32_t connectorId, struct drm_mode_modeinfo *mode_info); + +/** + * Detaches a mode from the connector + * must be unused, by the given mode. + */ +extern int drmModeDetachMode(int fd, uint32_t connectorId, struct drm_mode_modeinfo *mode_info); + +extern drmModePropertyPtr drmModeGetProperty(int fd, uint32_t propertyId); +extern void drmModeFreeProperty(drmModePropertyPtr ptr); + +extern drmModePropertyBlobPtr drmModeGetPropertyBlob(int fd, uint32_t blob_id); +extern void drmModeFreePropertyBlob(drmModePropertyBlobPtr ptr); +extern int drmModeConnectorSetProperty(int fd, uint32_t connector_id, uint32_t property_id, + uint64_t value); +extern int drmCheckModesettingSupported(const char *busid); + +extern int drmModeCrtcSetGamma(int fd, uint32_t crtc_id, uint32_t size, + uint16_t *red, uint16_t *green, uint16_t *blue); +extern int drmModeCrtcGetGamma(int fd, uint32_t crtc_id, uint32_t size, + uint16_t *red, uint16_t *green, uint16_t *blue); |