diff options
author | Mark Kettenis <kettenis@cvs.openbsd.org> | 2013-08-07 19:49:08 +0000 |
---|---|---|
committer | Mark Kettenis <kettenis@cvs.openbsd.org> | 2013-08-07 19:49:08 +0000 |
commit | b2ed49e46ee968bda56a0db4c27b54ede0b63ad1 (patch) | |
tree | 7e7e07c10abeb0f447758d4d851ff06507319b7e | |
parent | 36533235f0281dbe3635ed3f5eeedc9c7f0ccc5d (diff) |
Another major overhaul of the inteldrm(4) GEM code, bringing us considerably
closer to the Linux 3.8.13 codebase. Almost certainly squashes a few more
bugs.
ok jsg@
-rw-r--r-- | sys/dev/pci/drm/drmP.h | 8 | ||||
-rw-r--r-- | sys/dev/pci/drm/drm_drv.c | 89 | ||||
-rw-r--r-- | sys/dev/pci/drm/i915/i915_dma.c | 42 | ||||
-rw-r--r-- | sys/dev/pci/drm/i915/i915_drv.c | 54 | ||||
-rw-r--r-- | sys/dev/pci/drm/i915/i915_drv.h | 48 | ||||
-rw-r--r-- | sys/dev/pci/drm/i915/i915_gem.c | 735 | ||||
-rw-r--r-- | sys/dev/pci/drm/i915/i915_gem_evict.c | 22 | ||||
-rw-r--r-- | sys/dev/pci/drm/i915/i915_gem_execbuffer.c | 34 | ||||
-rw-r--r-- | sys/dev/pci/drm/i915/i915_gem_tiling.c | 6 | ||||
-rw-r--r-- | sys/dev/pci/drm/i915/i915_trace.h | 42 |
10 files changed, 607 insertions, 473 deletions
diff --git a/sys/dev/pci/drm/drmP.h b/sys/dev/pci/drm/drmP.h index 854f377190f..264e6cd0cea 100644 --- a/sys/dev/pci/drm/drmP.h +++ b/sys/dev/pci/drm/drmP.h @@ -1,4 +1,4 @@ -/* $OpenBSD: drmP.h,v 1.139 2013/07/08 09:43:18 jsg Exp $ */ +/* $OpenBSD: drmP.h,v 1.140 2013/08/07 19:49:04 kettenis Exp $ */ /* drmP.h -- Private header for Direct Rendering Manager -*- linux-c -*- * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com */ @@ -717,10 +717,6 @@ struct drm_device { atomic_t obj_count; u_int obj_name; atomic_t obj_memory; - atomic_t pin_count; - atomic_t pin_memory; - atomic_t gtt_count; - atomic_t gtt_memory; uint32_t gtt_total; SPLAY_HEAD(drm_name_tree, drm_obj) name_tree; struct pool objpl; @@ -950,8 +946,6 @@ struct drm_obj *drm_gem_object_lookup(struct drm_device *, int drm_gem_close_ioctl(struct drm_device *, void *, struct drm_file *); int drm_gem_flink_ioctl(struct drm_device *, void *, struct drm_file *); int drm_gem_open_ioctl(struct drm_device *, void *, struct drm_file *); -int drm_gem_load_uao(bus_dma_tag_t, bus_dmamap_t, struct uvm_object *, - bus_size_t, int, bus_dma_segment_t **); static __inline void drm_gem_object_reference(struct drm_obj *obj) diff --git a/sys/dev/pci/drm/drm_drv.c b/sys/dev/pci/drm/drm_drv.c index 65f0eb717cb..d502ab3f018 100644 --- a/sys/dev/pci/drm/drm_drv.c +++ b/sys/dev/pci/drm/drm_drv.c @@ -1,4 +1,4 @@ -/* $OpenBSD: drm_drv.c,v 1.108 2013/06/17 20:55:41 kettenis Exp $ */ +/* $OpenBSD: drm_drv.c,v 1.109 2013/08/07 19:49:04 kettenis Exp $ */ /*- * Copyright 2007-2009 Owain G. Ainsworth <oga@openbsd.org> * Copyright © 2008 Intel Corporation @@ -1674,93 +1674,6 @@ drm_handle_unref(struct drm_obj *obj) drm_unref(&obj->uobj); } -/* - * Helper function to load a uvm anonymous object into a dmamap, to be used - * for binding to a translation-table style sg mechanism (e.g. agp, or intel - * gtt). - * - * For now we ignore maxsegsz. - */ -int -drm_gem_load_uao(bus_dma_tag_t dmat, bus_dmamap_t map, struct uvm_object *uao, - bus_size_t size, int flags, bus_dma_segment_t **segp) -{ - bus_dma_segment_t *segs; - struct vm_page *pg; - struct pglist plist; - u_long npages = size >> PAGE_SHIFT, i = 0; - int ret; - - TAILQ_INIT(&plist); - - /* - * This is really quite ugly, but nothing else would need - * bus_dmamap_load_uao() yet. - */ - segs = malloc(npages * sizeof(*segs), M_DRM, - M_WAITOK | M_CANFAIL | M_ZERO); - if (segs == NULL) - return (ENOMEM); - - /* This may sleep, no choice in the matter */ - if (uvm_objwire(uao, 0, size, &plist) != 0) { - ret = ENOMEM; - goto free; - } - - TAILQ_FOREACH(pg, &plist, pageq) { - paddr_t pa = VM_PAGE_TO_PHYS(pg); - - if (i > 0 && pa == (segs[i - 1].ds_addr + - segs[i - 1].ds_len)) { - /* contiguous, yay */ - segs[i - 1].ds_len += PAGE_SIZE; - continue; - } - segs[i].ds_addr = pa; - segs[i].ds_len = PAGE_SIZE; - if (i++ > npages) - break; - } - /* this should be impossible */ - if (pg != TAILQ_END(&plist)) { - ret = EINVAL; - goto unwire; - } - - if ((ret = bus_dmamap_load_raw(dmat, map, segs, i, size, flags)) != 0) - goto unwire; - -#if defined(__amd64__) || defined(__i386__) - /* - * Create a mapping that wraps around once; the second half - * maps to the same set of physical pages as the first half. - * Used to implement fast vertical scrolling in inteldrm(4). - * - * XXX This is an ugly hack that wastes pages and abuses the - * internals of the scatter gather DMA code. - */ - if (flags & BUS_DMA_GTT_WRAPAROUND) { - struct sg_page_map *spm = map->_dm_cookie; - - for (i = spm->spm_pagecnt / 2; i < spm->spm_pagecnt; i++) - spm->spm_map[i].spe_pa = - spm->spm_map[i - spm->spm_pagecnt / 2].spe_pa; - agp_bus_dma_rebind(dmat, map, flags); - } -#endif - - *segp = segs; - - return (0); - -unwire: - uvm_objunwire(uao, 0, size); -free: - free(segs, M_DRM); - return (ret); -} - /** * drm_gem_free_mmap_offset - release a fake mmap offset for an object * @obj: obj in question diff --git a/sys/dev/pci/drm/i915/i915_dma.c b/sys/dev/pci/drm/i915/i915_dma.c index 8b13310db12..eadeb49e00a 100644 --- a/sys/dev/pci/drm/i915/i915_dma.c +++ b/sys/dev/pci/drm/i915/i915_dma.c @@ -1,4 +1,4 @@ -/* $OpenBSD: i915_dma.c,v 1.9 2013/08/07 00:04:27 jsg Exp $ */ +/* $OpenBSD: i915_dma.c,v 1.10 2013/08/07 19:49:05 kettenis Exp $ */ /* i915_dma.c -- DMA support for the I915 -*- linux-c -*- */ /* @@ -68,11 +68,16 @@ i915_kernel_lost_context(struct drm_device * dev) int -i915_getparam(struct inteldrm_softc *dev_priv, void *data) +i915_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) { - drm_i915_getparam_t *param = data; - struct drm_device *dev = (struct drm_device *)dev_priv->drmdev; - int value; + drm_i915_private_t *dev_priv = dev->dev_private; + drm_i915_getparam_t *param = data; + int value; + + if (!dev_priv) { + DRM_ERROR("called with no initialization\n"); + return -EINVAL; + } switch (param->param) { case I915_PARAM_CHIPSET_ID: @@ -103,7 +108,7 @@ i915_getparam(struct inteldrm_softc *dev_priv, void *data) #ifdef notyet value = 1; #else - return EINVAL; + return -EINVAL; #endif break; case I915_PARAM_HAS_COHERENT_RINGS: @@ -131,28 +136,37 @@ i915_getparam(struct inteldrm_softc *dev_priv, void *data) value = 1; break; default: - DRM_DEBUG("Unknown parameter %d\n", param->param); - return (EINVAL); + DRM_DEBUG_DRIVER("Unknown parameter %d\n", + param->param); + return -EINVAL; } - return (copyout(&value, param->value, sizeof(int))); + + return -copyout(&value, param->value, sizeof(int)); } int -i915_setparam(struct inteldrm_softc *dev_priv, void *data) +i915_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv) { - drm_i915_setparam_t *param = data; + drm_i915_private_t *dev_priv = dev->dev_private; + drm_i915_setparam_t *param = data; + + if (!dev_priv) { + DRM_ERROR("called with no initialization\n"); + return -EINVAL; + } switch (param->param) { case I915_SETPARAM_NUM_USED_FENCES: if (param->value > dev_priv->num_fence_regs || param->value < 0) - return EINVAL; + return -EINVAL; /* Userspace can use first N regs */ dev_priv->fence_reg_start = param->value; break; default: - DRM_DEBUG("unknown parameter %d\n", param->param); - return (EINVAL); + DRM_DEBUG_DRIVER("unknown parameter %d\n", + param->param); + return -EINVAL; } return 0; diff --git a/sys/dev/pci/drm/i915/i915_drv.c b/sys/dev/pci/drm/i915/i915_drv.c index a77f55403fc..518ddece748 100644 --- a/sys/dev/pci/drm/i915/i915_drv.c +++ b/sys/dev/pci/drm/i915/i915_drv.c @@ -1,4 +1,4 @@ -/* $OpenBSD: i915_drv.c,v 1.36 2013/08/07 00:04:28 jsg Exp $ */ +/* $OpenBSD: i915_drv.c,v 1.37 2013/08/07 19:49:05 kettenis Exp $ */ /* * Copyright (c) 2008-2009 Owain G. Ainsworth <oga@openbsd.org> * @@ -1336,7 +1336,7 @@ inteldrm_ioctl(struct drm_device *dev, u_long cmd, caddr_t data, dev_priv->entries++; - error = inteldrm_doioctl(dev, cmd, data, file_priv); + error = -inteldrm_doioctl(dev, cmd, data, file_priv); dev_priv->entries--; return (error); @@ -1346,12 +1346,10 @@ int inteldrm_doioctl(struct drm_device *dev, u_long cmd, caddr_t data, struct drm_file *file_priv) { - struct inteldrm_softc *dev_priv = dev->dev_private; - if (file_priv->authenticated == 1) { switch (cmd) { case DRM_IOCTL_I915_GETPARAM: - return (i915_getparam(dev_priv, data)); + return (i915_getparam(dev, data, file_priv)); case DRM_IOCTL_I915_GEM_EXECBUFFER2: return (i915_gem_execbuffer2(dev, data, file_priv)); case DRM_IOCTL_I915_GEM_BUSY: @@ -1404,7 +1402,7 @@ inteldrm_doioctl(struct drm_device *dev, u_long cmd, caddr_t data, if (file_priv->master == 1) { switch (cmd) { case DRM_IOCTL_I915_SETPARAM: - return (i915_setparam(dev_priv, data)); + return (i915_setparam(dev, data, file_priv)); case DRM_IOCTL_I915_GEM_INIT: return (i915_gem_init_ioctl(dev, data, file_priv)); case DRM_IOCTL_I915_GEM_ENTERVT: @@ -1425,7 +1423,7 @@ inteldrm_doioctl(struct drm_device *dev, u_long cmd, caddr_t data, return (intel_sprite_set_colorkey(dev, data, file_priv)); } } - return (EINVAL); + return -EINVAL; } void @@ -1528,20 +1526,6 @@ i915_gem_chipset_flush(struct drm_device *dev) } } -void -inteldrm_set_max_obj_size(struct inteldrm_softc *dev_priv) -{ - struct drm_device *dev = (struct drm_device *)dev_priv->drmdev; - - /* - * Allow max obj size up to the size where ony 2 would fit the - * aperture, but some slop exists due to alignment etc - */ - dev_priv->max_gem_obj_size = (dev->gtt_total - - atomic_read(&dev->pin_memory)) * 3 / 4 / 2; - -} - /** * Pin an object to the GTT and evaluate the relocations landing in it. */ @@ -1595,12 +1579,12 @@ i915_gem_object_pin_and_relocate(struct drm_obj *obj, /* object must have come before us in the list */ if (target_obj == NULL) { i915_gem_object_unpin(obj_priv); - return (ENOENT); + return -ENOENT; } if ((target_obj->do_flags & I915_IN_EXEC) == 0) { printf("%s: object not already in execbuffer\n", __func__); - ret = EBADF; + ret = -EBADF; goto err; } @@ -1612,13 +1596,13 @@ i915_gem_object_pin_and_relocate(struct drm_obj *obj, if (target_obj_priv->dmamap == 0) { DRM_ERROR("No GTT space found for object %d\n", reloc->target_handle); - ret = EINVAL; + ret = -EINVAL; goto err; } /* must be in one write domain and one only */ if (reloc->write_domain & (reloc->write_domain - 1)) { - ret = EINVAL; + ret = -EINVAL; goto err; } if (reloc->read_domains & I915_GEM_DOMAIN_CPU || @@ -1628,7 +1612,7 @@ i915_gem_object_pin_and_relocate(struct drm_obj *obj, "read %08x write %08x", obj, reloc->target_handle, (int)reloc->offset, reloc->read_domains, reloc->write_domain); - ret = EINVAL; + ret = -EINVAL; goto err; } @@ -1641,7 +1625,7 @@ i915_gem_object_pin_and_relocate(struct drm_obj *obj, (int) reloc->offset, reloc->write_domain, target_obj->pending_write_domain); - ret = EINVAL; + ret = -EINVAL; goto err; } @@ -1654,7 +1638,7 @@ i915_gem_object_pin_and_relocate(struct drm_obj *obj, "obj %p target %d offset %d size %d.\n", obj, reloc->target_handle, (int) reloc->offset, (int) obj->size); - ret = EINVAL; + ret = -EINVAL; goto err; } if (reloc->offset & 3) { @@ -1662,7 +1646,7 @@ i915_gem_object_pin_and_relocate(struct drm_obj *obj, "obj %p target %d offset %d.\n", obj, reloc->target_handle, (int) reloc->offset); - ret = EINVAL; + ret = -EINVAL; goto err; } @@ -1678,7 +1662,7 @@ i915_gem_object_pin_and_relocate(struct drm_obj *obj, } ret = i915_gem_object_set_to_gtt_domain(obj_priv, true); - if (ret != 0) + if (ret) goto err; if ((ret = agp_map_subregion(dev_priv->agph, @@ -1702,7 +1686,7 @@ err: /* we always jump to here mid-loop */ drm_gem_object_unreference(target_obj); i915_gem_object_unpin(obj_priv); - return (ret); + return ret; } int @@ -1771,10 +1755,12 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list, void inteldrm_timeout(void *arg) { - struct inteldrm_softc *dev_priv = arg; + struct inteldrm_softc *dev_priv = arg; + int err; - if (workq_add_task(dev_priv->workq, 0, i915_gem_retire_work_handler, - dev_priv, NULL) == ENOMEM) + err = workq_add_task(dev_priv->workq, 0, i915_gem_retire_work_handler, + dev_priv, NULL); + if (err) DRM_ERROR("failed to run retire handler\n"); } diff --git a/sys/dev/pci/drm/i915/i915_drv.h b/sys/dev/pci/drm/i915/i915_drv.h index 7012b11cb1f..fd1d73d93bd 100644 --- a/sys/dev/pci/drm/i915/i915_drv.h +++ b/sys/dev/pci/drm/i915/i915_drv.h @@ -1,4 +1,4 @@ -/* $OpenBSD: i915_drv.h,v 1.25 2013/08/07 00:04:28 jsg Exp $ */ +/* $OpenBSD: i915_drv.h,v 1.26 2013/08/07 19:49:06 kettenis Exp $ */ /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- */ /* @@ -560,7 +560,6 @@ struct inteldrm_softc { } ifp; struct workq *workq; struct vm_page *pgs; - size_t max_gem_obj_size; /* XXX */ /* Protects user_irq_refcount and irq_mask reg */ struct mutex irq_lock; @@ -739,6 +738,9 @@ struct inteldrm_softc { /* accounting, useful for userland debugging */ size_t gtt_total; + size_t mappable_gtt_total; + size_t object_memory; + u32 object_count; } mm; /* for hangcheck */ @@ -895,7 +897,6 @@ struct drm_i915_gem_object { struct list_head exec_list; /* GTT binding. */ bus_dmamap_t dmamap; - bus_dma_segment_t *dma_segs; /* Current offset of the object in GTT space. */ bus_addr_t gtt_offset; struct intel_ring_buffer *ring; @@ -904,10 +905,6 @@ struct drm_i915_gem_object { int dma_flags; /* Fence register for this object. needed for tiling. */ int fence_reg; - /** refcount for times pinned this object in GTT space */ - int pin_count; - /* number of times pinned by pin ioctl. */ - u_int user_pin_count; /** Breadcrumb of last rendering to the buffer. */ u_int32_t last_read_seqno; @@ -945,6 +942,18 @@ struct drm_i915_gem_object { */ unsigned int fence_dirty:1; + /** How many users have pinned this object in GTT space. The following + * users can each hold at most one reference: pwrite/pread, pin_ioctl + * (via user_pin_count), execbuffer (objects are not allowed multiple + * times for the same batchbuffer), and the framebuffer code. When + * switching/pageflipping, the framebuffer code has at most two buffers + * pinned per crtc. + * + * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 + * bits with absolutely no headroom. So use 4 bits. */ + unsigned int pin_count:4; +#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf + /** * Is the object at the current location in the gtt mappable and * fenceable? Used to avoid costly recalculations. @@ -967,12 +976,19 @@ struct drm_i915_gem_object { unsigned int cache_level:2; + bus_dma_segment_t *pages; + int pages_pin_count; + /** * Used for performing relocations during execbuffer insertion. */ unsigned long exec_handle; struct drm_i915_gem_exec_object2 *exec_entry; + /** User space pin count and filp owning the pin */ + uint32_t user_pin_count; + struct drm_file *pin_filp; + /** for phy allocated objects */ struct drm_i915_gem_phys_object *phys_obj; @@ -1186,14 +1202,13 @@ int intel_setup_mchbar(struct inteldrm_softc *, struct pci_attach_args *); void intel_teardown_mchbar(struct inteldrm_softc *, struct pci_attach_args *, int); -int i915_getparam(struct inteldrm_softc *dev_priv, void *data); -int i915_setparam(struct inteldrm_softc *dev_priv, void *data); +int i915_getparam(struct drm_device *, void *, struct drm_file *); +int i915_setparam(struct drm_device *, void *, struct drm_file *); void i915_kernel_lost_context(struct drm_device *); int i915_driver_open(struct drm_device *, struct drm_file *); void i915_driver_close(struct drm_device *, struct drm_file *); /* i915_drv.c */ -void inteldrm_set_max_obj_size(struct inteldrm_softc *); void inteldrm_purge_obj(struct drm_obj *); void i915_gem_chipset_flush(struct drm_device *); int intel_gpu_reset(struct drm_device *); @@ -1277,6 +1292,19 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, int i915_gem_object_sync(struct drm_i915_gem_object *, struct intel_ring_buffer *); +int i915_gem_object_get_pages(struct drm_i915_gem_object *obj); + +static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) +{ + BUG_ON(obj->pages == NULL); + obj->pages_pin_count++; +} +static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) +{ + BUG_ON(obj->pages_pin_count == 0); + obj->pages_pin_count--; +} + int i915_mutex_lock_interruptible(struct drm_device *dev); /* intel_opregion.c */ diff --git a/sys/dev/pci/drm/i915/i915_gem.c b/sys/dev/pci/drm/i915/i915_gem.c index b5fccc52d4f..0d83ee19abb 100644 --- a/sys/dev/pci/drm/i915/i915_gem.c +++ b/sys/dev/pci/drm/i915/i915_gem.c @@ -1,4 +1,4 @@ -/* $OpenBSD: i915_gem.c,v 1.30 2013/08/07 00:04:28 jsg Exp $ */ +/* $OpenBSD: i915_gem.c,v 1.31 2013/08/07 19:49:06 kettenis Exp $ */ /* * Copyright (c) 2008-2009 Owain G. Ainsworth <oga@openbsd.org> * @@ -45,6 +45,7 @@ #include <dev/pci/drm/drm.h> #include <dev/pci/drm/i915_drm.h> #include "i915_drv.h" +#include "i915_trace.h" #include "intel_drv.h" #include <machine/pmap.h> @@ -55,6 +56,7 @@ int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj); int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj); void i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj); +int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); uint32_t i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode); uint32_t i915_gem_get_gtt_alignment(struct drm_device *dev, @@ -77,6 +79,7 @@ void i915_gem_request_remove_from_client(struct drm_i915_gem_request *); int i915_gem_object_flush_active(struct drm_i915_gem_object *); int i915_gem_check_olr(struct intel_ring_buffer *, u32); void i915_gem_object_truncate(struct drm_i915_gem_object *obj); +void i915_gem_verify_gtt(struct drm_device *dev); int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, unsigned alignment, bool map_and_fenceable, bool nonblocking); @@ -88,8 +91,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *); extern int ticks; -static inline void -i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) +static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) { if (obj->tiling_mode) i915_gem_release_mmap(obj); @@ -101,8 +103,20 @@ i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) obj->fence_reg = I915_FENCE_REG_NONE; } -// i915_gem_info_add_obj -// i915_gem_info_remove_obj +/* some bookkeeping */ +static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, + size_t size) +{ + dev_priv->mm.object_count++; + dev_priv->mm.object_memory += size; +} + +static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, + size_t size) +{ + dev_priv->mm.object_count--; + dev_priv->mm.object_memory -= size; +} int i915_gem_wait_for_error(struct drm_device *dev) @@ -122,14 +136,19 @@ i915_gem_wait_for_error(struct drm_device *dev) while (dev_priv->error_completion == 0) { ret = -msleep(&dev_priv->error_completion, &dev_priv->error_completion_lock, PCATCH, "915wco", 10*hz); - if (ret != 0) { + if (ret) { mtx_leave(&dev_priv->error_completion_lock); - return (ret); + return ret; } } mtx_leave(&dev_priv->error_completion_lock); if (atomic_read(&dev_priv->mm.wedged)) { + /* GPU is hung, bump the completion count to account for + * the token we just consumed so that we never hit zero and + * end up waiting upon a subsequent completion event that + * will never happen. + */ mtx_enter(&dev_priv->error_completion_lock); dev_priv->error_completion++; mtx_leave(&dev_priv->error_completion_lock); @@ -146,7 +165,7 @@ i915_mutex_lock_interruptible(struct drm_device *dev) if (ret) return ret; - ret = rw_enter(&dev->dev_lock, RW_WRITE | RW_INTR); + ret = -rw_enter(&dev->dev_lock, RW_WRITE | RW_INTR); if (ret) return ret; @@ -157,7 +176,7 @@ i915_mutex_lock_interruptible(struct drm_device *dev) static inline bool i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) { - return obj->dmamap && !obj->active && obj->pin_count == 0; + return obj->dmamap && !obj->active; } int @@ -168,7 +187,7 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_init *args = data; if (drm_core_check_feature(dev, DRIVER_MODESET)) - return ENODEV; + return -ENODEV; DRM_LOCK(); @@ -177,7 +196,7 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data, (args->gtt_start & PAGE_MASK) != 0 || (args->gtt_end & PAGE_MASK) != 0) { DRM_UNLOCK(); - return (EINVAL); + return -EINVAL; } /* * putting stuff in the last page of the aperture can cause nasty @@ -192,11 +211,10 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data, dev->agp->base + args->gtt_start, dev->agp->base + args->gtt_end, &dev_priv->agpdmat) != 0) { DRM_UNLOCK(); - return (ENOMEM); + return -ENOMEM; } dev->gtt_total = (uint32_t)(args->gtt_end - args->gtt_start); - inteldrm_set_max_obj_size(dev_priv); DRM_UNLOCK(); @@ -207,15 +225,21 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_get_aperture *args = data; + struct drm_i915_gem_object *obj; + size_t pinned; - /* we need a write lock here to make sure we get the right value */ + pinned = 0; DRM_LOCK(); - args->aper_size = dev->gtt_total; - args->aper_available_size = (args->aper_size - - atomic_read(&dev->pin_memory)); + list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) + if (obj->pin_count) + pinned += obj->dmamap->dm_segs[0].ds_len; DRM_UNLOCK(); + args->aper_size = dev_priv->mm.gtt_total; + args->aper_available_size = args->aper_size - pinned; + return 0; } @@ -238,11 +262,10 @@ i915_gem_create(struct drm_file *file, if (obj == NULL) return -ENOMEM; - handle = 0; - ret = drm_handle_create(file, &obj->base, &handle); + ret = -drm_handle_create(file, &obj->base, &handle); if (ret != 0) { drm_unref(&obj->base.uobj); - return (-ret); + return ret; } *handle_p = handle; @@ -265,9 +288,8 @@ int i915_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, uint32_t handle) { - printf("%s stub\n", __func__); - return ENOSYS; + return -ENOSYS; // return (drm_gem_handle_delete(file, handle)); } @@ -278,35 +300,10 @@ int i915_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { - struct inteldrm_softc *dev_priv = dev->dev_private; - struct drm_i915_gem_create *args = data; - struct drm_i915_gem_object *obj; - int handle, ret; - - args->size = round_page(args->size); - /* - * XXX to avoid copying between 2 objs more than half the aperture size - * we don't allow allocations that are that big. This will be fixed - * eventually by intelligently falling back to cpu reads/writes in - * such cases. (linux allows this but does cpu maps in the ddx instead). - */ - if (args->size > dev_priv->max_gem_obj_size) - return (EFBIG); - - /* Allocate the new object */ - obj = i915_gem_alloc_object(dev, args->size); - if (obj == NULL) - return (ENOMEM); - - /* we give our reference to the handle */ - ret = drm_handle_create(file, &obj->base, &handle); + struct drm_i915_gem_create *args = data; - if (ret == 0) - args->handle = handle; - else - drm_unref(&obj->base.uobj); - - return (ret); + return i915_gem_create(file, dev, + args->size, &args->handle); } int @@ -338,7 +335,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (obj == NULL) - return ENOENT; + return -ENOENT; DRM_READLOCK(); drm_hold_object(&obj->base); @@ -347,7 +344,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, */ if (args->offset > obj->base.size || args->size > obj->base.size || args->offset + args->size > obj->base.size) { - ret = EINVAL; + ret = -EINVAL; goto out; } @@ -367,11 +364,11 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, goto unpin; vaddr = bus_space_vaddr(dev->bst, bsh); if (vaddr == NULL) { - ret = EFAULT; + ret = -EFAULT; goto unmap; } - ret = copyout(vaddr + (offset & PAGE_MASK), + ret = -copyout(vaddr + (offset & PAGE_MASK), (char *)(uintptr_t)args->data_ptr, args->size); unmap: @@ -382,7 +379,7 @@ out: drm_unhold_and_unref(&obj->base); DRM_READUNLOCK(); - return (ret); + return ret; } /** @@ -405,14 +402,14 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (obj == NULL) - return ENOENT; + return -ENOENT; DRM_READLOCK(); drm_hold_object(&obj->base); /* Bounds check destination. */ if (args->offset > obj->base.size || args->size > obj->base.size || args->offset + args->size > obj->base.size) { - ret = EINVAL; + ret = -EINVAL; goto out; } @@ -441,11 +438,11 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, goto unpin; vaddr = bus_space_vaddr(dev_priv->bst, bsh); if (vaddr == NULL) { - ret = EFAULT; + ret = -EFAULT; goto unmap; } - ret = copyin((char *)(uintptr_t)args->data_ptr, + ret = -copyin((char *)(uintptr_t)args->data_ptr, vaddr + (offset & PAGE_MASK), args->size); unmap: @@ -456,7 +453,7 @@ out: drm_unhold_and_unref(&obj->base); DRM_READUNLOCK(); - return (ret); + return ret; } int @@ -537,9 +534,9 @@ __wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno, } mtx_leave(&dev_priv->irq_lock); if (dev_priv->mm.wedged) - ret = EIO; + ret = -EIO; - return (ret); + return ret; } /** @@ -568,6 +565,10 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno) return __wait_seqno(ring, seqno, interruptible, NULL); } +/** + * Ensures that all rendering to the object has completed and the object is + * safe to unbind from the GTT or access from the CPU. + */ int i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, bool readonly) @@ -660,16 +661,16 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, /* Only handle setting domains to types used by the CPU. */ if (write_domain & I915_GEM_GPU_DOMAINS) - return EINVAL; + return -EINVAL; if (read_domains & I915_GEM_GPU_DOMAINS) - return EINVAL; + return -EINVAL; /* Having something in the write domain implies it's in the read * domain, and only that read domain. Enforce that in the request. */ if (write_domain != 0 && read_domains != write_domain) - return EINVAL; + return -EINVAL; ret = i915_mutex_lock_interruptible(dev); if (ret) @@ -677,7 +678,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (&obj->base == NULL) { - ret = ENOENT; + ret = -ENOENT; goto unlock; } @@ -696,7 +697,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, * to success, since the client was just asking us to * make sure everything was done. */ - if (ret == EINVAL) + if (ret == -EINVAL) ret = 0; } else { ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); @@ -726,7 +727,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (&obj->base == NULL) { - ret = ENOENT; + ret = -ENOENT; goto unlock; } @@ -760,7 +761,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, obj = drm_gem_object_lookup(dev, file, args->handle); if (obj == NULL) - return ENOENT; + return -ENOENT; /* Since we are doing purely uvm-related operations here we do * not need to hold the object, a reference alone is sufficient @@ -770,7 +771,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, if (args->size == 0 || args->offset > obj->size || args->size > obj->size || (args->offset + args->size) > obj->size || i915_gem_object_is_purgeable(to_intel_bo(obj))) { - ret = EINVAL; + ret = -EINVAL; goto done; } @@ -795,7 +796,7 @@ done: else drm_unref(&obj->uobj); - return (ret); + return ret; } int @@ -836,29 +837,18 @@ i915_gem_fault(struct drm_obj *gem_obj, struct uvm_faultinfo *ufi, */ drm_unlock_obj(&obj->base); - /* Now bind into the GTT if needed */ - if (!obj->map_and_fenceable) { - ret = i915_gem_object_unbind(obj); - if (ret) - goto error; - } - - if (obj->dmamap == NULL) { - ret = i915_gem_object_bind_to_gtt(obj, 0, true, false); - if (ret) - goto error; + /* Now bind it into the GTT if needed */ + ret = i915_gem_object_pin(obj, 0, true, false); + if (ret) + goto unlock; - ret = i915_gem_object_set_to_gtt_domain(obj, write); - if (ret) - goto error; - } + ret = i915_gem_object_set_to_gtt_domain(obj, write); + if (ret) + goto unpin; ret = i915_gem_object_get_fence(obj); if (ret) - goto error; - - if (i915_gem_object_is_inactive(obj)) - list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); + goto unpin; obj->fault_mappable = true; @@ -884,6 +874,7 @@ i915_gem_fault(struct drm_obj *gem_obj, struct uvm_faultinfo *ufi, if (pmap_enter(ufi->orig_map->pmap, vaddr, paddr, mapprot, PMAP_CANFAIL | mapprot) != 0) { + i915_gem_object_unpin(obj); drm_unhold_object(&obj->base); uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, NULL, NULL); @@ -893,13 +884,15 @@ i915_gem_fault(struct drm_obj *gem_obj, struct uvm_faultinfo *ufi, return (VM_PAGER_REFAULT); } } -error: +unpin: + i915_gem_object_unpin(obj); +unlock: drm_unhold_object(&obj->base); uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, NULL, NULL); DRM_READUNLOCK(); dev_priv->entries--; pmap_update(ufi->orig_map->pmap); - if (ret == EIO) { + if (ret == -EIO) { /* * EIO means we're wedged, so upon resetting the gpu we'll * be alright and can refault. XXX only on resettable chips. @@ -910,7 +903,7 @@ error: } else { ret = VM_PAGER_OK; } - return (ret); + return ret; } /** @@ -1156,41 +1149,193 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj) } // i915_gem_object_is_purgeable -// i915_gem_object_put_pages + +void +i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) +{ + int page_count = obj->base.size / PAGE_SIZE; +#if 0 + struct scatterlist *sg; +#endif + int ret, i; + + BUG_ON(obj->madv == __I915_MADV_PURGED); + + ret = i915_gem_object_set_to_cpu_domain(obj, true); + if (ret) { + /* In the event of a disaster, abandon all caches and + * hope for the best. + */ + WARN_ON(ret != -EIO); + i915_gem_clflush_object(obj); + obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; + } + + if (i915_gem_object_needs_bit17_swizzle(obj)) + i915_gem_object_save_bit_17_swizzle(obj); + + if (obj->madv == I915_MADV_DONTNEED) + obj->dirty = 0; + +#if 0 + for_each_sg(obj->pages->sgl, sg, page_count, i) { + struct page *page = sg_page(sg); + + if (obj->dirty) + set_page_dirty(page); + + if (obj->madv == I915_MADV_WILLNEED) + mark_page_accessed(page); + + page_cache_release(page); + } +#else + for (i = 0; i < page_count; i++) { + struct vm_page *pg = PHYS_TO_VM_PAGE(obj->pages[i].ds_addr); + + if (obj->dirty) + atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); + } + uvm_objunwire(obj->base.uao, 0, obj->base.size); +#endif + obj->dirty = 0; + +#if 0 + sg_free_table(obj->pages); + kfree(obj->pages); +#else + drm_free(obj->pages); +#endif +} + +int +i915_gem_object_put_pages(struct drm_i915_gem_object *obj) +{ +#if 0 + const struct drm_i915_gem_object_ops *ops = obj->ops; +#endif + + if (obj->pages == NULL) + return 0; + + BUG_ON(obj->dmamap); + + if (obj->pages_pin_count) + return -EBUSY; + + /* ->put_pages might need to allocate memory for the bit17 swizzle + * array, hence protect them from being reaped by removing them from gtt + * lists early. */ + list_del(&obj->gtt_list); + +#if 0 + ops->put_pages(obj); +#else + i915_gem_object_put_pages_gtt(obj); +#endif + obj->pages = NULL; + + if (i915_gem_object_is_purgeable(obj)) + i915_gem_object_truncate(obj); + + return 0; +} + // __i915_gem_shrink // i915_gem_purge // i915_gem_shrink_all -// i915_gem_object_get_pages int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) { #if 0 + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; int page_count, i; struct address_space *mapping; - struct inode *inode; + struct sg_table *st; + struct scatterlist *sg; struct page *page; + gfp_t gfp; +#else + int page_count, i; + bus_dma_segment_t *segs; + struct pglist plist; + struct vm_page *pg; +#endif + + /* Assert that the object is not currently in any GPU domain. As it + * wasn't in the GTT, there shouldn't be any way it could have been in + * a GPU cache + */ + BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); + BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); + +#if 0 + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (st == NULL) + return -ENOMEM; + + page_count = obj->base.size / PAGE_SIZE; + if (sg_alloc_table(st, page_count, GFP_KERNEL)) { + sg_free_table(st); + kfree(st); + return -ENOMEM; + } /* Get the list of pages out of our struct file. They'll be pinned * at this point until we release them. + * + * Fail silently without starting the shrinker */ + mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; + gfp = mapping_gfp_mask(mapping); + gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD; + gfp &= ~(__GFP_IO | __GFP_WAIT); + for_each_sg(st->sgl, sg, page_count, i) { + page = shmem_read_mapping_page_gfp(mapping, i, gfp); + if (IS_ERR(page)) { + i915_gem_purge(dev_priv, page_count); + page = shmem_read_mapping_page_gfp(mapping, i, gfp); + } + if (IS_ERR(page)) { + /* We've tried hard to allocate the memory by reaping + * our own buffer, now let the real VM do its job and + * go down in flames if truly OOM. + */ + gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD); + gfp |= __GFP_IO | __GFP_WAIT; + + i915_gem_shrink_all(dev_priv); + page = shmem_read_mapping_page_gfp(mapping, i, gfp); + if (IS_ERR(page)) + goto err_pages; + + gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD; + gfp &= ~(__GFP_IO | __GFP_WAIT); + } + + sg_set_page(sg, page, PAGE_SIZE, 0); + } + + obj->pages = st; +#else page_count = obj->base.size / PAGE_SIZE; - BUG_ON(obj->pages != NULL); - obj->pages = drm_malloc_ab(page_count, sizeof(struct page *)); - if (obj->pages == NULL) + segs = malloc(page_count * sizeof(*segs), M_DRM, + M_WAITOK | M_CANFAIL | M_ZERO); + if (segs == NULL) return -ENOMEM; - inode = obj->base.filp->f_path.dentry->d_inode; - mapping = inode->i_mapping; - gfpmask |= mapping_gfp_mask(mapping); + TAILQ_INIT(&plist); + if (uvm_objwire(obj->base.uao, 0, obj->base.size, &plist)) + goto err_pages; - for (i = 0; i < page_count; i++) { - page = shmem_read_mapping_page_gfp(mapping, i, gfpmask); - if (IS_ERR(page)) - goto err_pages; - - obj->pages[i] = page; + i = 0; + TAILQ_FOREACH(pg, &plist, pageq) { + segs[i].ds_addr = VM_PAGE_TO_PHYS(pg); + segs[i].ds_len = PAGE_SIZE; + i++; } + obj->pages = segs; #endif if (i915_gem_object_needs_bit17_swizzle(obj)) @@ -1200,48 +1345,49 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) #if 0 err_pages: - while (i--) - page_cache_release(obj->pages[i]); - - drm_free_large(obj->pages); - obj->pages = NULL; + for_each_sg(st->sgl, sg, i, page_count) + page_cache_release(sg_page(sg)); + sg_free_table(st); + kfree(st); return PTR_ERR(page); +#else +err_pages: + drm_free(segs); + return -ENOMEM; #endif } -void -i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) +/* Ensure that the associated pages are gathered from the backing storage + * and pinned into our object. i915_gem_object_get_pages() may be called + * multiple times before they are released by a single call to + * i915_gem_object_put_pages() - once the pages are no longer referenced + * either as a result of memory pressure (reaping pages under the shrinker) + * or as the object is itself released. + */ +int +i915_gem_object_get_pages(struct drm_i915_gem_object *obj) { + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; #if 0 - int page_count = obj->base.size / PAGE_SIZE; - int i; + const struct drm_i915_gem_object_ops *ops = obj->ops; #endif + int ret; - BUG_ON(obj->madv == __I915_MADV_PURGED); - - if (i915_gem_object_needs_bit17_swizzle(obj)) - i915_gem_object_save_bit_17_swizzle(obj); + if (obj->pages) + return 0; - if (obj->madv == I915_MADV_DONTNEED) - obj->dirty = 0; + BUG_ON(obj->pages_pin_count); #if 0 - for (i = 0; i < page_count; i++) { - if (obj->dirty) - set_page_dirty(obj->pages[i]); - - if (obj->madv == I915_MADV_WILLNEED) - mark_page_accessed(obj->pages[i]); - - page_cache_release(obj->pages[i]); - } + ret = ops->get_pages(obj); +#else + ret = i915_gem_object_get_pages_gtt(obj); #endif - obj->dirty = 0; + if (ret) + return ret; -#if 0 - drm_free_large(obj->pages); - obj->pages = NULL; -#endif + list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list); + return 0; } void @@ -1290,10 +1436,7 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); BUG_ON(!obj->active); - if (obj->pin_count != 0) - list_del_init(&obj->mm_list); - else - list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); + list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); list_del_init(&obj->ring_list); obj->ring = NULL; @@ -1419,6 +1562,7 @@ i915_add_request(struct intel_ring_buffer *ring, mtx_leave(&file_priv->mm.lock); } + trace_i915_gem_request_add(ring, request->seqno); ring->outstanding_lazy_request = 0; if (!dev_priv->mm.suspended) { @@ -1466,7 +1610,7 @@ i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, list_del(&request->list); i915_gem_request_remove_from_client(request); - free(request, M_DRM); + drm_free(request); } while (!list_empty(&ring->active_list)) { @@ -1538,6 +1682,8 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) if (list_empty(&ring->request_list)) return; + WARN_ON(i915_verify_lists(ring->dev)); + seqno = ring->get_seqno(ring, true); while (!list_empty(&ring->request_list)) { @@ -1550,7 +1696,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) if (!i915_seqno_passed(seqno, request->seqno)) break; -// trace_i915_gem_request_retire(ring, request->seqno); + trace_i915_gem_request_retire(ring, request->seqno); /* We know the GPU must have read the request to have * sent us the seqno + interrupt, so use the position * of tail of the request to update the last known position @@ -1578,6 +1724,14 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) i915_gem_object_move_to_inactive(obj); } + + if (unlikely(ring->trace_irq_seqno && + i915_seqno_passed(seqno, ring->trace_irq_seqno))) { + ring->irq_put(ring); + ring->trace_irq_seqno = 0; + } + + WARN_ON(i915_verify_lists(ring->dev)); } void @@ -1719,23 +1873,18 @@ i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj) obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT; obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT; -#if 0 trace_i915_gem_object_change_domain(obj, old_read_domains, old_write_domain); -#endif } /** * Unbinds an object from the GTT aperture. - * - * XXX track dirty and pass down to uvm (note, DONTNEED buffers are clean). */ int i915_gem_object_unbind(struct drm_i915_gem_object *obj) { drm_i915_private_t *dev_priv = obj->base.dev->dev_private; - struct drm_device *dev = obj->base.dev; int ret = 0; DRM_ASSERT_HELD(&obj->base); @@ -1747,10 +1896,12 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) return 0; if (obj->pin_count) - return EBUSY; + return -EBUSY; + + BUG_ON(obj->pages == NULL); ret = i915_gem_object_finish_gpu(obj); - if (ret == ERESTART || ret == EINTR) + if (ret) return ret; /* Continue on if we fail due to EIO, the GPU is hung so we * should be safe and we need to cleanup or else we might @@ -1761,35 +1912,30 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) /* release the fence reg _after_ flushing */ ret = i915_gem_object_put_fence(obj); - if (ret == ERESTART || ret == EINTR) + if (ret) return ret; - i915_gem_object_put_pages_gtt(obj); + trace_i915_gem_object_unbind(obj); /* * unload the map, then unwire the backing object. */ bus_dmamap_unload(dev_priv->agpdmat, obj->dmamap); - uvm_objunwire(obj->base.uao, 0, obj->base.size); - /* XXX persistent dmamap worth the memory? */ - bus_dmamap_destroy(dev_priv->agpdmat, obj->dmamap); - obj->dmamap = NULL; - free(obj->dma_segs, M_DRM); - obj->dma_segs = NULL; - list_del_init(&obj->gtt_list); - list_del_init(&obj->mm_list); + list_del(&obj->mm_list); + list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list); /* Avoid an unnecessary call to unbind on rebind. */ obj->map_and_fenceable = true; + /* XXX persistent dmamap worth the memory? */ + bus_dmamap_destroy(dev_priv->agpdmat, obj->dmamap); + obj->dmamap = NULL; obj->gtt_offset = 0; - atomic_dec(&dev->gtt_count); - atomic_sub(obj->base.size, &dev->gtt_memory); - if (i915_gem_object_is_purgeable(obj)) - i915_gem_object_truncate(obj); + /* XXX Until we've hooked up the shrinking functions. */ + i915_gem_object_put_pages(obj); - return ret; + return 0; } int @@ -2147,7 +2293,11 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj) } // i915_gem_valid_gtt_space -// i915_gem_verify_gtt + +void +i915_gem_verify_gtt(struct drm_device *dev) +{ +} /** * Finds free space in the GTT aperture and binds the object there. @@ -2169,7 +2319,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, if (obj->madv != I915_MADV_WILLNEED) { DRM_ERROR("Attempting to bind a purgeable object\n"); - return EINVAL; + return -EINVAL; } fence_size = i915_gem_get_gtt_size(dev, @@ -2188,7 +2338,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, unfenced_alignment; if (map_and_fenceable && alignment & (fence_alignment - 1)) { DRM_ERROR("Invalid object alignment requested %u\n", alignment); - return EINVAL; + return -EINVAL; } size = map_and_fenceable ? fence_size : obj->base.size; @@ -2202,10 +2352,20 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, return -E2BIG; } - if ((ret = bus_dmamap_create(dev_priv->agpdmat, size, 1, - size, 0, BUS_DMA_WAITOK, &obj->dmamap)) != 0) { + ret = i915_gem_object_get_pages(obj); + if (ret) + return ret; + + i915_gem_object_pin_pages(obj); + + ret = -bus_dmamap_create(dev_priv->agpdmat, size, 1, + size, 0, BUS_DMA_WAITOK, &obj->dmamap); + if (ret) { DRM_ERROR("Failed to create dmamap\n"); - return (ret); + i915_gem_object_unpin_pages(obj); + /* XXX Until we've hooked up the shrinking functions. */ + i915_gem_object_put_pages(obj); + return ret; } agp_bus_dma_set_alignment(dev_priv->agpdmat, obj->dmamap, alignment); @@ -2224,40 +2384,36 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, default: BUG(); } - /* - * the helper function wires the uao then binds it to the aperture for - * us, so all we have to do is set up the dmamap then load it. - */ - ret = drm_gem_load_uao(dev_priv->agpdmat, obj->dmamap, obj->base.uao, - obj->base.size, BUS_DMA_WAITOK | obj->dma_flags | flags, - &obj->dma_segs); - /* XXX NOWAIT? */ - if (ret != 0) { - /* If the gtt is empty and we're still having trouble - * fitting our object in, we're out of memory. - */ - if (list_empty(&dev_priv->mm.inactive_list) && - list_empty(&dev_priv->mm.active_list)) { - DRM_ERROR("GTT full, but LRU list empty\n"); - goto error; - } - + ret = -bus_dmamap_load_raw(dev_priv->agpdmat, obj->dmamap, obj->pages, + obj->base.size / PAGE_SIZE, obj->base.size, + BUS_DMA_WAITOK | obj->dma_flags | flags); + if (ret) { ret = i915_gem_evict_something(dev_priv, obj->base.size); - if (ret != 0) + if (ret) goto error; goto search_free; } - i915_gem_object_get_pages_gtt(obj); + /* + * Create a mapping that wraps around once; the second half + * maps to the same set of physical pages as the first half. + * Used to implement fast vertical scrolling in inteldrm(4). + * + * XXX This is an ugly hack that wastes pages and abuses the + * internals of the scatter gather DMA code. + */ + if (obj->dma_flags & BUS_DMA_GTT_WRAPAROUND) { + struct sg_page_map *spm = obj->dmamap->_dm_cookie; + int i; - list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list); + for (i = spm->spm_pagecnt / 2; i < spm->spm_pagecnt; i++) + spm->spm_map[i].spe_pa = + spm->spm_map[i - spm->spm_pagecnt / 2].spe_pa; + agp_bus_dma_rebind(dev_priv->agpdmat, obj->dmamap, flags); + } - /* Assert that the object is not currently in any GPU domain. As it - * wasn't in the GTT, there shouldn't be any way it could have been in - * a GPU cache - */ - BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); - BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); + list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list); + list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); obj->gtt_offset = obj->dmamap->dm_segs[0].ds_addr - dev->agp->base; @@ -2270,16 +2426,19 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, obj->map_and_fenceable = mappable && fenceable; - atomic_inc(&dev->gtt_count); - atomic_add(obj->base.size, &dev->gtt_memory); - - return (0); + i915_gem_object_unpin_pages(obj); + trace_i915_gem_object_bind(obj, map_and_fenceable); + i915_gem_verify_gtt(dev); + return 0; error: + i915_gem_object_unpin_pages(obj); + /* XXX Until we've hooked up the shrinking functions. */ + i915_gem_object_put_pages(obj); bus_dmamap_destroy(dev_priv->agpdmat, obj->dmamap); obj->dmamap = NULL; obj->gtt_offset = 0; - return (ret); + return ret; } void @@ -2291,6 +2450,10 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj) /* If we don't have a page list set up, then we're not pinned * to GPU, and we can ignore the cache flush because it'll happen * again at bind time. + * + * XXX On OpenBSD we check if we have a DMA mapping instead, + * as the bus_dmamap_sync(9) call below needs one. If we're + * not pinned to the GPU, we don't have a DMA mapping either. */ if (obj->dmamap == NULL) return; @@ -2332,11 +2495,9 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) old_write_domain = obj->base.write_domain; obj->base.write_domain = 0; -#if 0 trace_i915_gem_object_change_domain(obj, obj->base.read_domains, old_write_domain); -#endif } /** Flushes the CPU write domain for the object if it's dirty. */ @@ -2353,11 +2514,9 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) old_write_domain = obj->base.write_domain; obj->base.write_domain = 0; -#if 0 trace_i915_gem_object_change_domain(obj, obj->base.read_domains, old_write_domain); -#endif } /** @@ -2370,14 +2529,14 @@ int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) { drm_i915_private_t *dev_priv = obj->base.dev->dev_private; -// uint32_t old_write_domain, old_read_domains; + uint32_t old_write_domain, old_read_domains; int ret; DRM_ASSERT_HELD(&obj->base); /* Not valid to be called on unbound objects. */ if (obj->dmamap == NULL) - return (EINVAL); + return -EINVAL; if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) return 0; @@ -2388,8 +2547,8 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) i915_gem_object_flush_cpu_write_domain(obj); -// old_write_domain = obj->base.write_domain; -// old_read_domains = obj->base.read_domains; + old_write_domain = obj->base.write_domain; + old_read_domains = obj->base.read_domains; /* It should now be out of any other write domains, and we can update * the domain values for our changes. @@ -2402,9 +2561,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) obj->dirty = 1; } -// trace_i915_gem_object_change_domain(obj, -// old_read_domains, -// old_write_domain); + trace_i915_gem_object_change_domain(obj, + old_read_domains, + old_write_domain); /* And bump the LRU for this access */ if (i915_gem_object_is_inactive(obj)) @@ -2429,7 +2588,7 @@ i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, return -EBUSY; } - if (obj->dmamap != NULL) { + if (obj->dmamap) { ret = i915_gem_object_finish_gpu(obj); if (ret) return ret; @@ -2473,14 +2632,13 @@ i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, obj->base.read_domains = I915_GEM_DOMAIN_CPU; obj->base.write_domain = I915_GEM_DOMAIN_CPU; -#if 0 trace_i915_gem_object_change_domain(obj, old_read_domains, old_write_domain); -#endif } obj->cache_level = cache_level; + i915_gem_verify_gtt(dev); return 0; } @@ -2498,7 +2656,7 @@ i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (&obj->base == NULL) { - ret = ENOENT; + ret = -ENOENT; goto unlock; } @@ -2527,7 +2685,7 @@ i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, level = I915_CACHE_LLC; break; default: - return EINVAL; + return -EINVAL; } ret = i915_mutex_lock_interruptible(dev); @@ -2536,7 +2694,7 @@ i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (&obj->base == NULL) { - ret = ENOENT; + ret = -ENOENT; goto unlock; } @@ -2558,7 +2716,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, u32 alignment, struct intel_ring_buffer *pipelined) { -// u32 old_read_domains, old_write_domain; + u32 old_read_domains, old_write_domain; int ret; if (pipelined != obj->ring) { @@ -2590,8 +2748,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, i915_gem_object_flush_cpu_write_domain(obj); -// old_write_domain = obj->write_domain; -// old_read_domains = obj->read_domains; + old_write_domain = obj->base.write_domain; + old_read_domains = obj->base.read_domains; /* It should now be out of any other write domains, and we can update * the domain values for our changes. @@ -2599,9 +2757,9 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, obj->base.write_domain = 0; obj->base.read_domains |= I915_GEM_DOMAIN_GTT; -// trace_i915_gem_object_change_domain(obj, -// old_read_domains, -// old_write_domain); + trace_i915_gem_object_change_domain(obj, + old_read_domains, + old_write_domain); return 0; } @@ -2632,7 +2790,7 @@ i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj) int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) { -// uint32_t old_write_domain, old_read_domains; + uint32_t old_write_domain, old_read_domains; int ret; DRM_ASSERT_HELD(obj); @@ -2646,8 +2804,8 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) i915_gem_object_flush_gtt_write_domain(obj); -// old_write_domain = obj->base.write_domain; -// old_read_domains = obj->base.read_domains; + old_write_domain = obj->base.write_domain; + old_read_domains = obj->base.read_domains; /* Flush the CPU cache if it's still invalid. */ if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) { @@ -2669,9 +2827,9 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) obj->base.write_domain = I915_GEM_DOMAIN_CPU; } -// trace_i915_gem_object_change_domain(obj, -// old_read_domains, -// old_write_domain); + trace_i915_gem_object_change_domain(obj, + old_read_domains, + old_write_domain); return 0; } @@ -2698,7 +2856,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) int ret; if (atomic_read(&dev_priv->mm.wedged)) - return EIO; + return -EIO; mtx_enter(&file_priv->mm.lock); list_for_each_entry(request, &file_priv->mm.request_list, client_list) { @@ -2726,11 +2884,12 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, bool map_and_fenceable, bool nonblocking) { - struct drm_device *dev = obj->base.dev; int ret; DRM_ASSERT_HELD(&obj->base); - inteldrm_verify_inactive(dev_priv, __FILE__, __LINE__); + + if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) + return -EBUSY; if (obj->dmamap != NULL) { if ((alignment && obj->gtt_offset & (alignment - 1)) || @@ -2756,13 +2915,8 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, return ret; } - if (obj->pin_count++ == 0) { - atomic_inc(&dev->pin_count); - atomic_add(obj->base.size, &dev->pin_memory); - if (!obj->active) - list_del_init(&obj->mm_list); - } - inteldrm_verify_inactive(dev_priv, __FILE__, __LINE__); + obj->pin_count++; + obj->pin_mappable |= map_and_fenceable; return 0; } @@ -2770,30 +2924,17 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, void i915_gem_object_unpin(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->base.dev; - drm_i915_private_t *dev_priv = dev->dev_private; - - DRM_ASSERT_HELD(&obj->base); - inteldrm_verify_inactive(dev_priv, __FILE__, __LINE__); - BUG_ON(obj->pin_count == 0); BUG_ON(obj->dmamap == NULL); - if (--obj->pin_count == 0) { - if (!obj->active) - list_move_tail(&obj->mm_list, - &dev_priv->mm.inactive_list); - atomic_dec(&dev->pin_count); - atomic_sub(obj->base.size, &dev->pin_memory); - } - inteldrm_verify_inactive(dev_priv, __FILE__, __LINE__); + if (--obj->pin_count == 0) + obj->pin_mappable = false; } int i915_gem_pin_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { - struct inteldrm_softc *dev_priv = dev->dev_private; struct drm_i915_gem_pin *args = data; struct drm_i915_gem_object *obj; int ret; @@ -2804,7 +2945,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (&obj->base == NULL) { - ret = ENOENT; + ret = -ENOENT; goto unlock; } @@ -2812,7 +2953,14 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, if (obj->madv != I915_MADV_WILLNEED) { DRM_ERROR("Attempting to pin a purgeable buffer\n"); - ret = EINVAL; + ret = -EINVAL; + goto out; + } + + if (obj->pin_filp != NULL && obj->pin_filp != file) { + DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", + args->handle); + ret = -EINVAL; goto out; } @@ -2820,15 +2968,15 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, ret = i915_gem_object_pin(obj, args->alignment, true, false); if (ret) goto out; - inteldrm_set_max_obj_size(dev_priv); } obj->user_pin_count++; + obj->pin_filp = file; /* XXX - flush the CPU caches for pinned objects * as the X server doesn't manage domains yet */ - i915_gem_object_set_to_gtt_domain(obj, true); + i915_gem_object_flush_cpu_write_domain(obj); args->offset = obj->gtt_offset; out: drm_unhold_and_unref(&obj->base); @@ -2841,7 +2989,6 @@ int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { - struct inteldrm_softc *dev_priv = dev->dev_private; struct drm_i915_gem_pin *args = data; struct drm_i915_gem_object *obj; int ret; @@ -2852,21 +2999,22 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (&obj->base == NULL) { - ret = ENOENT; + ret = -ENOENT; goto unlock; } drm_hold_object(&obj->base); - if (obj->user_pin_count == 0) { - ret = EINVAL; + if (obj->pin_filp != file) { + DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", + args->handle); + ret = -EINVAL; goto out; } - obj->user_pin_count--; if (obj->user_pin_count == 0) { + obj->pin_filp = NULL; i915_gem_object_unpin(obj); - inteldrm_set_max_obj_size(dev_priv); } out: @@ -2890,12 +3038,22 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (&obj->base == NULL) { - ret = ENOENT; + ret = -ENOENT; goto unlock; } + /* Count all active objects as busy, even if they are currently not used + * by the gpu. Users of this interface expect objects to eventually + * become non-busy without any further actions, therefore emit any + * necessary flushes here. + */ ret = i915_gem_object_flush_active(obj); + args->busy = obj->active; + if (obj->ring) { +// BUILD_BUG_ON(I915_NUM_RINGS > 16); + args->busy |= intel_ring_flag(obj->ring) << 16; + } drm_gem_object_unreference(&obj->base); unlock: @@ -2918,7 +3076,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, case I915_MADV_WILLNEED: break; default: - return EINVAL; + return -EINVAL; } ret = i915_mutex_lock_interruptible(dev); @@ -2927,15 +3085,14 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle)); if (&obj->base == NULL) { - ret = ENOENT; + ret = -ENOENT; goto unlock; } drm_hold_object(&obj->base); - /* invalid to madvise on a pinned BO */ if (obj->pin_count) { - ret = EINVAL; + ret = -EINVAL; goto out; } @@ -2943,7 +3100,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, obj->madv = args->madv; /* if the object is no longer attached, discard its backing storage */ - if (i915_gem_object_is_purgeable(obj) && obj->dmamap == NULL) + if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL) i915_gem_object_truncate(obj); args->retained = obj->madv != __I915_MADV_PURGED; @@ -2968,9 +3125,7 @@ i915_gem_object_init(struct drm_i915_gem_object *obj) /* Avoid an unnecessary call to unbind on the first bind. */ obj->map_and_fenceable = true; -#ifdef notyet i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size); -#endif } struct drm_i915_gem_object * @@ -3024,22 +3179,25 @@ void i915_gem_free_object(struct drm_obj *gem_obj) { struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); - struct drm_device *dev = gem_obj->dev; + struct drm_device *dev = obj->base.dev; + drm_i915_private_t *dev_priv = dev->dev_private; DRM_ASSERT_HELD(&obj->base); if (obj->phys_obj) i915_gem_detach_phys_object(dev, obj); - - while (obj->pin_count > 0) - i915_gem_object_unpin(obj); + obj->pin_count = 0; i915_gem_object_unbind(obj); + obj->pages_pin_count = 0; + i915_gem_object_put_pages(obj); + i915_gem_object_free_mmap_offset(obj); + + BUG_ON(obj->pages); + drm_gem_object_release(&obj->base); -#ifdef notyet i915_gem_info_remove_obj(dev_priv, obj->base.size); -#endif drm_free(obj->bit_17); pool_put(&dev->objpl, obj); @@ -3212,11 +3370,10 @@ i915_gem_init(struct drm_device *dev) dev->agp->base + gtt_start, dev->agp->base + gtt_end, &dev_priv->agpdmat) != 0) { DRM_UNLOCK(); - return (ENOMEM); + return -ENOMEM; } dev->gtt_total = (uint32_t)(gtt_end - gtt_start); - inteldrm_set_max_obj_size(dev_priv); dev_priv->mm.gtt_start = gtt_start; dev_priv->mm.gtt_mappable_end = gtt_end; @@ -3224,12 +3381,11 @@ i915_gem_init(struct drm_device *dev) dev_priv->mm.gtt_total = gtt_end - gtt_start; ret = i915_gem_init_hw(dev); - if (ret != 0) { - DRM_UNLOCK(); - return (ret); - } - DRM_UNLOCK(); + if (ret) { + i915_gem_cleanup_aliasing_ppgtt(dev); + return ret; + } return 0; } @@ -3503,7 +3659,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, void *vaddr = obj->phys_obj->handle->kva + args->offset; int ret; - ret = copyin((char *)(uintptr_t)args->data_ptr, + ret = -copyin((char *)(uintptr_t)args->data_ptr, vaddr, args->size); i915_gem_chipset_flush(dev); @@ -3533,6 +3689,5 @@ i915_gem_release(struct drm_device *dev, struct drm_file *file) mtx_leave(&file_priv->mm.lock); } -// i915_gem_release // mutex_is_locked_by // i915_gem_inactive_shrink diff --git a/sys/dev/pci/drm/i915/i915_gem_evict.c b/sys/dev/pci/drm/i915/i915_gem_evict.c index 9fda6128715..8ca17222997 100644 --- a/sys/dev/pci/drm/i915/i915_gem_evict.c +++ b/sys/dev/pci/drm/i915/i915_gem_evict.c @@ -1,4 +1,4 @@ -/* $OpenBSD: i915_gem_evict.c,v 1.3 2013/04/17 20:04:04 kettenis Exp $ */ +/* $OpenBSD: i915_gem_evict.c,v 1.4 2013/08/07 19:49:06 kettenis Exp $ */ /* * Copyright (c) 2008-2009 Owain G. Ainsworth <oga@openbsd.org> * @@ -67,6 +67,8 @@ i915_gem_find_inactive_object(struct inteldrm_softc *dev_priv, */ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) { obj = &obj_priv->base; + if (obj_priv->pin_count) + continue; if (obj->size >= min_size) { if ((!obj_priv->dirty || i915_gem_object_is_purgeable(obj_priv)) && @@ -92,7 +94,7 @@ i915_gem_find_inactive_object(struct inteldrm_softc *dev_priv, best = NULL; } } - return (best); + return best; } int @@ -126,7 +128,7 @@ i915_gem_evict_something(struct inteldrm_softc *dev_priv, size_t min_size) /* Wait on the rendering and unbind the buffer. */ ret = i915_gem_object_unbind(obj_priv); drm_unhold_and_unref(obj); - return (ret); + return ret; } /* If we didn't get anything, but the ring is still processing @@ -143,7 +145,7 @@ i915_gem_evict_something(struct inteldrm_softc *dev_priv, size_t min_size) ret = i915_wait_seqno(request->ring, seqno); if (ret) - return (ret); + return ret; found = 1; break; @@ -158,9 +160,9 @@ i915_gem_evict_something(struct inteldrm_softc *dev_priv, size_t min_size) * everything and start again. (This should be rare.) */ if (!list_empty(&dev_priv->mm.inactive_list)) - return (i915_gem_evict_inactive(dev_priv)); + return i915_gem_evict_inactive(dev_priv); else - return (i915_gem_evict_everything(dev)); + return i915_gem_evict_everything(dev); } /* NOTREACHED */ } @@ -173,7 +175,7 @@ i915_gem_evict_everything(struct drm_device *dev) if (list_empty(&dev_priv->mm.inactive_list) && list_empty(&dev_priv->mm.active_list)) - return (ENOSPC); + return -ENOSPC; /* The gpu_idle will flush everything in the write domain to the * active list. Then we must move everything off the active list @@ -194,7 +196,7 @@ i915_gem_evict_everything(struct drm_device *dev) KASSERT(list_empty(&dev_priv->mm.inactive_list)); KASSERT(list_empty(&dev_priv->mm.active_list)); - return (0); + return 0; } /* Clear out the inactive list and unbind everything in it. */ @@ -207,7 +209,7 @@ i915_gem_evict_inactive(struct inteldrm_softc *dev_priv) list_for_each_entry_safe(obj_priv, next, &dev_priv->mm.inactive_list, mm_list) { if (obj_priv->pin_count != 0) { - ret = EINVAL; + ret = -EINVAL; DRM_ERROR("Pinned object in unbind list\n"); break; } @@ -222,5 +224,5 @@ i915_gem_evict_inactive(struct inteldrm_softc *dev_priv) break; } - return (ret); + return ret; } diff --git a/sys/dev/pci/drm/i915/i915_gem_execbuffer.c b/sys/dev/pci/drm/i915/i915_gem_execbuffer.c index cfc81528992..fd35978918d 100644 --- a/sys/dev/pci/drm/i915/i915_gem_execbuffer.c +++ b/sys/dev/pci/drm/i915/i915_gem_execbuffer.c @@ -1,4 +1,4 @@ -/* $OpenBSD: i915_gem_execbuffer.c,v 1.8 2013/08/07 00:04:28 jsg Exp $ */ +/* $OpenBSD: i915_gem_execbuffer.c,v 1.9 2013/08/07 19:49:07 kettenis Exp $ */ /* * Copyright (c) 2008-2009 Owain G. Ainsworth <oga@openbsd.org> * @@ -444,17 +444,17 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, args->batch_start_offset + args->batch_len < args->batch_len || args->batch_start_offset + args->batch_len < args->batch_start_offset) - return (EINVAL); + return -EINVAL; if (args->buffer_count < 1) { DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); - return (EINVAL); + return -EINVAL; } flags = 0; if (args->flags & I915_EXEC_SECURE) { if (!DRM_SUSER(curproc)) - return (EPERM); + return -EPERM; flags |= I915_DISPATCH_SECURE; } @@ -475,12 +475,12 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, default: printf("unknown ring %d\n", (int)(args->flags & I915_EXEC_RING_MASK)); - return (EINVAL); + return -EINVAL; } if (!intel_ring_initialized(ring)) { DRM_DEBUG("execbuf with invalid ring: %d\n", (int)(args->flags & I915_EXEC_RING_MASK)); - return (EINVAL); + return -EINVAL; } mode = args->flags & I915_EXEC_CONSTANTS_MASK; @@ -492,11 +492,11 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, if (ring == &dev_priv->ring[RCS] && mode != dev_priv->relative_constants_mode) { if (INTEL_INFO(dev)->gen < 4) - return EINVAL; + return -EINVAL; if (INTEL_INFO(dev)->gen > 5 && mode == I915_EXEC_CONSTANTS_REL_SURFACE) - return EINVAL; + return -EINVAL; /* The HW changed the meaning on this bit on gen6 */ if (INTEL_INFO(dev)->gen >= 6) @@ -505,17 +505,17 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, break; default: DRM_DEBUG("execbuf with unknown constants: %d\n", mode); - return EINVAL; + return -EINVAL; } /* Copy in the exec list from userland, check for overflow */ oflow = SIZE_MAX / args->buffer_count; if (oflow < sizeof(*exec_list) || oflow < sizeof(*object_list)) - return (EINVAL); + return -EINVAL; exec_list = drm_alloc(sizeof(*exec_list) * args->buffer_count); object_list = drm_alloc(sizeof(*object_list) * args->buffer_count); if (exec_list == NULL || object_list == NULL) { - ret = ENOMEM; + ret = -ENOMEM; goto pre_mutex_err; } ret = copyin((void *)(uintptr_t)args->buffers_ptr, exec_list, @@ -535,12 +535,12 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, /* XXX check these before we copyin... but we do need the lock */ if (dev_priv->mm.wedged) { - ret = EIO; + ret = -EIO; goto unlock; } if (dev_priv->mm.suspended) { - ret = EBUSY; + ret = -EBUSY; goto unlock; } @@ -552,13 +552,13 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, if (obj == NULL) { DRM_ERROR("Invalid object handle %d at index %d\n", exec_list[i].handle, i); - ret = ENOENT; + ret = -ENOENT; goto err; } if (obj->do_flags & I915_IN_EXEC) { DRM_ERROR("Object %p appears more than once in object_list\n", object_list[i]); - ret = EINVAL; + ret = -EINVAL; goto err; } atomic_setbits_int(&obj->do_flags, I915_IN_EXEC); @@ -589,7 +589,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, break; /* error other than GTT full, or we've already tried again */ - if (ret != ENOSPC || pin_tries >= 1) + if (ret != -ENOSPC || pin_tries >= 1) goto err; /* @@ -621,7 +621,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, batch_obj_priv = to_intel_bo(batch_obj); if (args->batch_start_offset + args->batch_len > batch_obj->size || batch_obj->pending_write_domain) { - ret = EINVAL; + ret = -EINVAL; goto err; } batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND; diff --git a/sys/dev/pci/drm/i915/i915_gem_tiling.c b/sys/dev/pci/drm/i915/i915_gem_tiling.c index 498bcd5ff70..77b6ae97eb7 100644 --- a/sys/dev/pci/drm/i915/i915_gem_tiling.c +++ b/sys/dev/pci/drm/i915/i915_gem_tiling.c @@ -1,4 +1,4 @@ -/* $OpenBSD: i915_gem_tiling.c,v 1.5 2013/05/18 21:43:42 kettenis Exp $ */ +/* $OpenBSD: i915_gem_tiling.c,v 1.6 2013/08/07 19:49:07 kettenis Exp $ */ /* * Copyright (c) 2008-2009 Owain G. Ainsworth <oga@openbsd.org> * @@ -519,7 +519,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) if (obj->bit_17 == NULL) return; - segp = &obj->dma_segs[0]; + segp = &obj->pages[0]; n = 0; for (i = 0; i < page_count; i++) { char new_bit_17 = (segp->ds_addr + n) >> 17; @@ -565,7 +565,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) } } - segp = &obj->dma_segs[0]; + segp = &obj->pages[0]; n = 0; for (i = 0; i < page_count; i++) { if ((segp->ds_addr + n) & (1 << 17)) diff --git a/sys/dev/pci/drm/i915/i915_trace.h b/sys/dev/pci/drm/i915/i915_trace.h new file mode 100644 index 00000000000..6b42b889088 --- /dev/null +++ b/sys/dev/pci/drm/i915/i915_trace.h @@ -0,0 +1,42 @@ +/* $OpenBSD: i915_trace.h,v 1.1 2013/08/07 19:49:07 kettenis Exp $ */ +/* + * Copyright (c) 2013 Mark Kettenis <kettenis@openbsd.org> + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +static inline void +trace_i915_gem_request_add(struct intel_ring_buffer *ring, u32 seqno) +{ +} + +static inline void +trace_i915_gem_request_retire(struct intel_ring_buffer *ring, u32 seqno) +{ +} + +static inline void +trace_i915_gem_object_change_domain(struct drm_i915_gem_object *obj, + u32 old_read, u32 old_write) +{ +} + +static inline void +trace_i915_gem_object_bind(struct drm_i915_gem_object *obj, bool mappable) +{ +} + +static inline void +trace_i915_gem_object_unbind(struct drm_i915_gem_object *obj) +{ +} |