summaryrefslogtreecommitdiff
path: root/sys/dev/pci/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
authorJonathan Gray <jsg@cvs.openbsd.org>2013-03-18 12:36:53 +0000
committerJonathan Gray <jsg@cvs.openbsd.org>2013-03-18 12:36:53 +0000
commit1e83ebc015e3e708142fa767510ae65e6892c20d (patch)
treed1f5f7f40202ef7b50c703a9c3ac746a96dd93ed /sys/dev/pci/drm/i915/intel_ringbuffer.c
parentc48141c1d8bd41bf9e7298c54917eaa386e56689 (diff)
Significantly increase the wordlist for ddb hangman,
and update our device independent DRM code and the Intel DRM code to be mostly in sync with Linux 3.8.3. Among other things this brings support for kernel modesetting and enables use of the rings on gen6+ Intel hardware. Based on some earlier work from matthieu@ with some hints from FreeBSD and with lots of help from kettenis@ (including a beautiful accelerated wscons framebuffer console!) Thanks to M:Tier and the OpenBSD Foundation for sponsoring this work.
Diffstat (limited to 'sys/dev/pci/drm/i915/intel_ringbuffer.c')
-rw-r--r--sys/dev/pci/drm/i915/intel_ringbuffer.c2007
1 files changed, 2007 insertions, 0 deletions
diff --git a/sys/dev/pci/drm/i915/intel_ringbuffer.c b/sys/dev/pci/drm/i915/intel_ringbuffer.c
new file mode 100644
index 00000000000..18628a76d2d
--- /dev/null
+++ b/sys/dev/pci/drm/i915/intel_ringbuffer.c
@@ -0,0 +1,2007 @@
+/* $OpenBSD: intel_ringbuffer.c,v 1.1 2013/03/18 12:36:52 jsg Exp $ */
+/*
+ * Copyright © 2008-2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Zou Nan hai <nanhai.zou@intel.com>
+ * Xiang Hai hao<haihao.xiang@intel.com>
+ *
+ */
+
+#include <dev/pci/drm/drmP.h>
+#include "i915_drv.h"
+#include <dev/pci/drm/i915_drm.h>
+#include "intel_drv.h"
+
+/*
+ * 965+ support PIPE_CONTROL commands, which provide finer grained control
+ * over cache flushing.
+ */
+struct pipe_control {
+ struct drm_i915_gem_object *obj;
+ volatile u32 *cpu_page;
+ u32 gtt_offset;
+};
+
+int gen2_render_ring_flush(struct intel_ring_buffer *, u32, u32);
+int gen4_render_ring_flush(struct intel_ring_buffer *, u32, u32);
+int gen6_render_ring_flush(struct intel_ring_buffer *, u32, u32);
+int gen7_render_ring_flush(struct intel_ring_buffer *, u32, u32);
+int intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *);
+int gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *);
+void ring_write_tail(struct intel_ring_buffer *, u32);
+int init_pipe_control(struct intel_ring_buffer *);
+void cleanup_pipe_control(struct intel_ring_buffer *);
+int init_render_ring(struct intel_ring_buffer *);
+void render_ring_cleanup(struct intel_ring_buffer *);
+void update_mboxes(struct intel_ring_buffer *, u32);
+int gen6_add_request(struct intel_ring_buffer *);
+int gen6_ring_sync(struct intel_ring_buffer *, struct intel_ring_buffer *,
+ u32);
+int pc_render_add_request(struct intel_ring_buffer *);
+u32 gen6_ring_get_seqno(struct intel_ring_buffer *, bool);
+u32 ring_get_seqno(struct intel_ring_buffer *, bool);
+u32 pc_render_get_seqno(struct intel_ring_buffer *, bool);
+bool gen5_ring_get_irq(struct intel_ring_buffer *);
+void gen5_ring_put_irq(struct intel_ring_buffer *);
+bool i9xx_ring_get_irq(struct intel_ring_buffer *);
+void i9xx_ring_put_irq(struct intel_ring_buffer *);
+bool i8xx_ring_get_irq(struct intel_ring_buffer *);
+void i8xx_ring_put_irq(struct intel_ring_buffer *);
+int bsd_ring_flush(struct intel_ring_buffer *, u32, u32);
+int i9xx_add_request(struct intel_ring_buffer *);
+bool gen6_ring_get_irq(struct intel_ring_buffer *);
+void gen6_ring_put_irq(struct intel_ring_buffer *);
+int i830_dispatch_execbuffer(struct intel_ring_buffer *, u32, u32,
+ unsigned);
+int i915_dispatch_execbuffer(struct intel_ring_buffer *, u32, u32,
+ unsigned);
+int i965_dispatch_execbuffer(struct intel_ring_buffer *, u32, u32,
+ unsigned);
+int init_status_page(struct intel_ring_buffer *);
+int init_phys_hws_pga(struct intel_ring_buffer *);
+int intel_ring_idle(struct intel_ring_buffer *);
+int intel_ring_wait_seqno(struct intel_ring_buffer *, u32);
+int intel_ring_wait_request(struct intel_ring_buffer *, int);
+int intel_wrap_ring_buffer(struct intel_ring_buffer *);
+int intel_ring_alloc_seqno(struct intel_ring_buffer *);
+void gen6_bsd_ring_write_tail(struct intel_ring_buffer *, u32);
+int gen6_ring_flush(struct intel_ring_buffer *, u32, u32);
+int gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *, u32, u32,
+ unsigned);
+int hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *, u32, u32,
+ unsigned);
+int blt_ring_flush(struct intel_ring_buffer *, u32, u32);
+int intel_ring_flush_all_caches(struct intel_ring_buffer *);
+int intel_ring_invalidate_all_caches(struct intel_ring_buffer *);
+
+extern int ticks;
+
+static inline int
+ring_space(struct intel_ring_buffer *ring)
+{
+ int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
+ if (space < 0)
+ space += ring->size;
+ return space;
+}
+
+int
+gen2_render_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
+{
+ u32 cmd;
+ int ret;
+
+ cmd = MI_FLUSH;
+ if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
+ cmd |= MI_NO_WRITE_FLUSH;
+
+ if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+ cmd |= MI_READ_FLUSH;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+int
+gen4_render_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
+{
+ struct drm_device *dev = ring->dev;
+ u32 cmd;
+ int ret;
+
+ /*
+ * read/write caches:
+ *
+ * I915_GEM_DOMAIN_RENDER is always invalidated, but is
+ * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
+ * also flushed at 2d versus 3d pipeline switches.
+ *
+ * read-only caches:
+ *
+ * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
+ * MI_READ_FLUSH is set, and is always flushed on 965.
+ *
+ * I915_GEM_DOMAIN_COMMAND may not exist?
+ *
+ * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
+ * invalidated when MI_EXE_FLUSH is set.
+ *
+ * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
+ * invalidated with every MI_FLUSH.
+ *
+ * TLBs:
+ *
+ * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
+ * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
+ * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
+ * are flushed at any MI_FLUSH.
+ */
+
+ cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
+ if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
+ cmd &= ~MI_NO_WRITE_FLUSH;
+ if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
+ cmd |= MI_EXE_FLUSH;
+
+ if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
+ (IS_G4X(dev) || IS_GEN5(dev)))
+ cmd |= MI_INVALIDATE_ISP;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+/**
+ * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
+ * implementing two workarounds on gen6. From section 1.4.7.1
+ * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
+ *
+ * [DevSNB-C+{W/A}] Before any depth stall flush (including those
+ * produced by non-pipelined state commands), software needs to first
+ * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
+ * 0.
+ *
+ * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
+ * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
+ *
+ * And the workaround for these two requires this workaround first:
+ *
+ * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
+ * BEFORE the pipe-control with a post-sync op and no write-cache
+ * flushes.
+ *
+ * And this last workaround is tricky because of the requirements on
+ * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
+ * volume 2 part 1:
+ *
+ * "1 of the following must also be set:
+ * - Render Target Cache Flush Enable ([12] of DW1)
+ * - Depth Cache Flush Enable ([0] of DW1)
+ * - Stall at Pixel Scoreboard ([1] of DW1)
+ * - Depth Stall ([13] of DW1)
+ * - Post-Sync Operation ([13] of DW1)
+ * - Notify Enable ([8] of DW1)"
+ *
+ * The cache flushes require the workaround flush that triggered this
+ * one, so we can't use it. Depth stall would trigger the same.
+ * Post-sync nonzero is what triggered this second workaround, so we
+ * can't use that one either. Notify enable is IRQs, which aren't
+ * really our business. That leaves only stall at scoreboard.
+ */
+int
+intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
+{
+ struct pipe_control *pc = ring->private;
+ u32 scratch_addr = pc->gtt_offset + 128;
+ int ret;
+
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+ intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_STALL_AT_SCOREBOARD);
+ intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
+ intel_ring_emit(ring, 0); /* low dword */
+ intel_ring_emit(ring, 0); /* high dword */
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+ intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
+ intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+int
+gen6_render_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains, u32 flush_domains)
+{
+ u32 flags = 0;
+ struct pipe_control *pc = ring->private;
+ u32 scratch_addr = pc->gtt_offset + 128;
+ int ret;
+
+ /* Force SNB workarounds for PIPE_CONTROL flushes */
+ ret = intel_emit_post_sync_nonzero_flush(ring);
+ if (ret)
+ return ret;
+
+ /* Just flush everything. Experiments have shown that reducing the
+ * number of bits based on the write domains has little performance
+ * impact.
+ */
+ if (flush_domains) {
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ /*
+ * Ensure that any following seqno writes only happen
+ * when the render cache is indeed flushed.
+ */
+ flags |= PIPE_CONTROL_CS_STALL;
+ }
+ if (invalidate_domains) {
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ /*
+ * TLB invalidate requires a post-sync write.
+ */
+ flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
+ }
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+ intel_ring_emit(ring, flags);
+ intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+int
+gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+ intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_STALL_AT_SCOREBOARD);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+int
+gen7_render_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains, u32 flush_domains)
+{
+ u32 flags = 0;
+ struct pipe_control *pc = ring->private;
+ u32 scratch_addr = pc->gtt_offset + 128;
+ int ret;
+
+ /*
+ * Ensure that any following seqno writes only happen when the render
+ * cache is indeed flushed.
+ *
+ * Workaround: 4th PIPE_CONTROL command (except the ones with only
+ * read-cache invalidate bits set) must have the CS_STALL bit set. We
+ * don't try to be clever and just set it unconditionally.
+ */
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ /* Just flush everything. Experiments have shown that reducing the
+ * number of bits based on the write domains has little performance
+ * impact.
+ */
+ if (flush_domains) {
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ }
+ if (invalidate_domains) {
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ /*
+ * TLB invalidate requires a post-sync write.
+ */
+ flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+
+ /* Workaround: we must issue a pipe_control with CS-stall bit
+ * set before a pipe_control command that has the state cache
+ * invalidate bit set. */
+ gen7_render_ring_cs_stall_wa(ring);
+ }
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+ intel_ring_emit(ring, flags);
+ intel_ring_emit(ring, scratch_addr);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+void
+ring_write_tail(struct intel_ring_buffer *ring, u32 value)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ I915_WRITE_TAIL(ring, value);
+}
+
+u32
+intel_ring_get_active_head(struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
+ RING_ACTHD(ring->mmio_base) : ACTHD;
+
+ return I915_READ(acthd_reg);
+}
+
+int
+init_ring_common(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj = ring->obj;
+ int ret = 0;
+ u32 head;
+ int retries;
+
+ if (HAS_FORCE_WAKE(dev))
+ gen6_gt_force_wake_get(dev_priv);
+
+ /* Stop the ring if it's running. */
+ I915_WRITE_CTL(ring, 0);
+ I915_WRITE_HEAD(ring, 0);
+ ring->write_tail(ring, 0);
+
+ head = I915_READ_HEAD(ring) & HEAD_ADDR;
+
+ /* G45 ring initialization fails to reset head to zero */
+ if (head != 0) {
+ DRM_DEBUG_KMS("%s head not reset to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+ I915_READ_CTL(ring),
+ I915_READ_HEAD(ring),
+ I915_READ_TAIL(ring),
+ I915_READ_START(ring));
+
+ I915_WRITE_HEAD(ring, 0);
+
+ if (I915_READ_HEAD(ring) & HEAD_ADDR) {
+ DRM_ERROR("failed to set %s head to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+ I915_READ_CTL(ring),
+ I915_READ_HEAD(ring),
+ I915_READ_TAIL(ring),
+ I915_READ_START(ring));
+ }
+ }
+
+ /* Initialize the ring. This must happen _after_ we've cleared the ring
+ * registers with the above sequence (the readback of the HEAD registers
+ * also enforces ordering), otherwise the hw might lose the new ring
+ * register values. */
+ I915_WRITE_START(ring, obj->gtt_offset);
+ I915_WRITE_CTL(ring,
+ ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
+ | RING_VALID);
+
+ /* If the head is still not zero, the ring is dead */
+ for (retries = 50; retries > 0; retries--) {
+ if ((I915_READ_CTL(ring) & RING_VALID) != 0 &&
+ I915_READ_START(ring) == obj->gtt_offset &&
+ (I915_READ_HEAD(ring) & HEAD_ADDR) == 0)
+ break;
+ DELAY(1000);
+ }
+ if (retries == 0) {
+ DRM_ERROR("%s initialization failed "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+ I915_READ_CTL(ring),
+ I915_READ_HEAD(ring),
+ I915_READ_TAIL(ring),
+ I915_READ_START(ring));
+ ret = -EIO;
+ goto out;
+ }
+
+ if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
+ i915_kernel_lost_context(ring->dev);
+ else {
+ ring->head = I915_READ_HEAD(ring);
+ ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+ ring->space = ring_space(ring);
+ ring->last_retired_head = -1;
+ }
+
+out:
+ if (HAS_FORCE_WAKE(dev))
+ gen6_gt_force_wake_put(dev_priv);
+
+ return ret;
+}
+
+int
+init_pipe_control(struct intel_ring_buffer *ring)
+{
+ struct pipe_control *pc;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ if (ring->private)
+ return 0;
+
+ pc = malloc(sizeof(*pc), M_DRM, M_WAITOK);
+ if (!pc)
+ return -ENOMEM;
+
+ obj = i915_gem_alloc_object(ring->dev, 4096);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate seqno page\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+
+ /*
+ * snooped gtt mapping please .
+ * Normally this flag is only to dmamem_map, but it's been overloaded
+ * for the agp mapping
+ */
+ obj->dma_flags = BUS_DMA_COHERENT | BUS_DMA_READ;
+
+ ret = i915_gem_object_pin(obj, 4096, true);
+ if (ret)
+ goto err_unref;
+
+ pc->gtt_offset = obj->gtt_offset;
+ pc->cpu_page = (volatile u_int32_t *)vm_map_min(kernel_map);
+ obj->base.uao->pgops->pgo_reference(obj->base.uao);
+ if ((ret = uvm_map(kernel_map, (vaddr_t *)&pc->cpu_page,
+ PAGE_SIZE, obj->base.uao, 0, 0, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
+ UVM_INH_SHARE, UVM_ADV_RANDOM, 0))) != 0)
+ if (ret != 0) {
+ DRM_ERROR("Failed to map status page.\n");
+ obj->base.uao->pgops->pgo_detach(obj->base.uao);
+ goto err_unpin;
+ }
+
+ pc->obj = obj;
+ ring->private = pc;
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(&obj->base);
+err:
+ free(pc, M_DRM);
+ return ret;
+}
+
+void
+cleanup_pipe_control(struct intel_ring_buffer *ring)
+{
+ struct pipe_control *pc = ring->private;
+ struct drm_i915_gem_object *obj;
+
+ if (!ring->private)
+ return;
+
+ obj = pc->obj;
+
+ uvm_unmap(kernel_map, (vaddr_t)pc->cpu_page,
+ (vaddr_t)pc->cpu_page + PAGE_SIZE);
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(&obj->base);
+
+ free(pc, M_DRM);
+ ring->private = NULL;
+}
+
+int
+init_render_ring(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret = init_ring_common(ring);
+
+ if (INTEL_INFO(dev)->gen > 3)
+ I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
+
+ /* We need to disable the AsyncFlip performance optimisations in order
+ * to use MI_WAIT_FOR_EVENT within the CS. It should already be
+ * programmed to '1' on all products.
+ */
+ if (INTEL_INFO(dev)->gen >= 6)
+ I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
+
+ /* Required for the hardware to program scanline values for waiting */
+ if (INTEL_INFO(dev)->gen == 6)
+ I915_WRITE(GFX_MODE,
+ _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS));
+
+ if (IS_GEN7(dev))
+ I915_WRITE(GFX_MODE_GEN7,
+ _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
+ _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
+
+ if (INTEL_INFO(dev)->gen >= 5) {
+ ret = init_pipe_control(ring);
+ if (ret)
+ return ret;
+ }
+
+#ifdef notyet
+ if (IS_GEN6(dev)) {
+ /* From the Sandybridge PRM, volume 1 part 3, page 24:
+ * "If this bit is set, STCunit will have LRA as replacement
+ * policy. [...] This bit must be reset. LRA replacement
+ * policy is not supported."
+ */
+ I915_WRITE(CACHE_MODE_0,
+ _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
+
+ /* This is not explicitly set for GEN6, so read the register.
+ * see intel_ring_mi_set_context() for why we care.
+ * TODO: consider explicitly setting the bit for GEN5
+ */
+ ring->itlb_before_ctx_switch =
+ !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS);
+ }
+#endif
+
+ if (INTEL_INFO(dev)->gen >= 6)
+ I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
+
+ if (HAS_L3_GPU_CACHE(dev))
+ I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
+
+ return ret;
+}
+
+void
+render_ring_cleanup(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+
+ if (!ring->private)
+ return;
+
+ if (HAS_BROKEN_CS_TLB(dev))
+ drm_gem_object_unreference(to_gem_object(ring->private));
+
+ cleanup_pipe_control(ring);
+}
+
+void
+update_mboxes(struct intel_ring_buffer *ring,
+ u32 mmio_offset)
+{
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, mmio_offset);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
+}
+
+/**
+ * gen6_add_request - Update the semaphore mailbox registers
+ *
+ * @ring - ring that is adding a request
+ * @seqno - return seqno stuck into the ring
+ *
+ * Update the mailbox registers in the *other* rings with the current seqno.
+ * This acts like a signal in the canonical semaphore.
+ */
+int
+gen6_add_request(struct intel_ring_buffer *ring)
+{
+ u32 mbox1_reg;
+ u32 mbox2_reg;
+ int ret;
+
+ ret = intel_ring_begin(ring, 10);
+ if (ret)
+ return ret;
+
+ mbox1_reg = ring->signal_mbox[0];
+ mbox2_reg = ring->signal_mbox[1];
+
+ update_mboxes(ring, mbox1_reg);
+ update_mboxes(ring, mbox2_reg);
+ intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
+ intel_ring_emit(ring, MI_USER_INTERRUPT);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+/**
+ * intel_ring_sync - sync the waiter to the signaller on seqno
+ *
+ * @waiter - ring that is waiting
+ * @signaller - ring which has, or will signal
+ * @seqno - seqno which the waiter will block on
+ */
+int
+gen6_ring_sync(struct intel_ring_buffer *waiter,
+ struct intel_ring_buffer *signaller,
+ u32 seqno)
+{
+ int ret;
+ u32 dw1 = MI_SEMAPHORE_MBOX |
+ MI_SEMAPHORE_COMPARE |
+ MI_SEMAPHORE_REGISTER;
+
+ /* Throughout all of the GEM code, seqno passed implies our current
+ * seqno is >= the last seqno executed. However for hardware the
+ * comparison is strictly greater than.
+ */
+ seqno -= 1;
+
+ WARN_ON(signaller->semaphore_register[waiter->id] ==
+ MI_SEMAPHORE_SYNC_INVALID);
+
+ ret = intel_ring_begin(waiter, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(waiter,
+ dw1 | signaller->semaphore_register[waiter->id]);
+ intel_ring_emit(waiter, seqno);
+ intel_ring_emit(waiter, 0);
+ intel_ring_emit(waiter, MI_NOOP);
+ intel_ring_advance(waiter);
+
+ return 0;
+}
+
+#define PIPE_CONTROL_FLUSH(ring__, addr__) \
+do { \
+ intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
+ PIPE_CONTROL_DEPTH_STALL); \
+ intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
+ intel_ring_emit(ring__, 0); \
+ intel_ring_emit(ring__, 0); \
+} while (0)
+
+int
+pc_render_add_request(struct intel_ring_buffer *ring)
+{
+ struct pipe_control *pc = ring->private;
+ u32 scratch_addr = pc->gtt_offset + 128;
+ int ret;
+
+ /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
+ * incoherent with writes to memory, i.e. completely fubar,
+ * so we need to use PIPE_NOTIFY instead.
+ *
+ * However, we also need to workaround the qword write
+ * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
+ * memory before requesting an interrupt.
+ */
+ ret = intel_ring_begin(ring, 32);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WRITE_FLUSH |
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
+ intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
+ intel_ring_emit(ring, 0);
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128; /* write to separate cachelines */
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WRITE_FLUSH |
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
+ PIPE_CONTROL_NOTIFY);
+ intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+u32
+gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
+{
+ /* Workaround to force correct ordering between irq and seqno writes on
+ * ivb (and maybe also on snb) by reading from a CS register (like
+ * ACTHD) before reading the status page. */
+ if (!lazy_coherency)
+ intel_ring_get_active_head(ring);
+ return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+u32
+ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
+{
+ return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+u32
+pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
+{
+ struct pipe_control *pc = ring->private;
+ return pc->cpu_page[0];
+}
+
+bool
+gen5_ring_get_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (!dev->irq_enabled)
+ return false;
+
+// mtx_enter(&dev_priv->irq_lock);
+ if (ring->irq_refcount++ == 0) {
+ dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
+ }
+// mtx_leave(&dev_priv->irq_lock);
+
+ return true;
+}
+
+void
+gen5_ring_put_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+// mtx_enter(&dev_priv->irq_lock);
+ if (--ring->irq_refcount == 0) {
+ dev_priv->gt_irq_mask |= ring->irq_enable_mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
+ }
+// mtx_leave(&dev_priv->irq_lock);
+}
+
+bool
+i9xx_ring_get_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (!dev->irq_enabled)
+ return false;
+
+// mtx_enter(&dev_priv->irq_lock);
+ if (ring->irq_refcount++ == 0) {
+ dev_priv->irq_mask &= ~ring->irq_enable_mask;
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ POSTING_READ(IMR);
+ }
+// mtx_leave(&dev_priv->irq_lock);
+
+ return true;
+}
+
+void
+i9xx_ring_put_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+// mtx_enter(&dev_priv->irq_lock);
+ if (--ring->irq_refcount == 0) {
+ dev_priv->irq_mask |= ring->irq_enable_mask;
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ POSTING_READ(IMR);
+ }
+// mtx_leave(&dev_priv->irq_lock);
+}
+
+bool
+i8xx_ring_get_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (!dev->irq_enabled)
+ return false;
+
+// mtx_enter(&dev_priv->irq_lock);
+ if (ring->irq_refcount++ == 0) {
+ dev_priv->irq_mask &= ~ring->irq_enable_mask;
+ I915_WRITE16(IMR, dev_priv->irq_mask);
+ POSTING_READ16(IMR);
+ }
+// mtx_leave(&dev_priv->irq_lock);
+
+ return true;
+}
+
+void
+i8xx_ring_put_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+// mtx_enter(&dev_priv->irq_lock);
+ if (--ring->irq_refcount == 0) {
+ dev_priv->irq_mask |= ring->irq_enable_mask;
+ I915_WRITE16(IMR, dev_priv->irq_mask);
+ POSTING_READ16(IMR);
+ }
+// mtx_leave(&dev_priv->irq_lock);
+}
+
+void
+intel_ring_setup_status_page(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ u32 mmio = 0;
+
+ /* The ring status page addresses are no longer next to the rest of
+ * the ring registers as of gen7.
+ */
+ if (IS_GEN7(dev)) {
+ switch (ring->id) {
+ case RCS:
+ mmio = RENDER_HWS_PGA_GEN7;
+ break;
+ case BCS:
+ mmio = BLT_HWS_PGA_GEN7;
+ break;
+ case VCS:
+ mmio = BSD_HWS_PGA_GEN7;
+ break;
+ }
+ } else if (IS_GEN6(dev)) {
+ mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
+ } else {
+ mmio = RING_HWS_PGA(ring->mmio_base);
+ }
+
+ I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
+ POSTING_READ(mmio);
+}
+
+int
+bsd_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_FLUSH);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ return 0;
+}
+
+int
+i9xx_add_request(struct intel_ring_buffer *ring)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
+ intel_ring_emit(ring, MI_USER_INTERRUPT);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+bool
+gen6_ring_get_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (!dev->irq_enabled)
+ return false;
+
+ /* It looks like we need to prevent the gt from suspending while waiting
+ * for an notifiy irq, otherwise irqs seem to get lost on at least the
+ * blt/bsd rings on ivb. */
+ gen6_gt_force_wake_get(dev_priv);
+
+// mtx_enter(&dev_priv->irq_lock);
+ if (ring->irq_refcount++ == 0) {
+ if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
+ I915_WRITE_IMR(ring, ~(ring->irq_enable_mask |
+ GEN6_RENDER_L3_PARITY_ERROR));
+ else
+ I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
+ dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
+ }
+// mtx_leave(&dev_priv->irq_lock);
+
+ return true;
+}
+
+void
+gen6_ring_put_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+// mtx_enter(&dev_priv->irq_lock);
+ if (--ring->irq_refcount == 0) {
+ if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
+ I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
+ else
+ I915_WRITE_IMR(ring, ~0);
+ dev_priv->gt_irq_mask |= ring->irq_enable_mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
+ }
+// mtx_leave(&dev_priv->irq_lock);
+
+ gen6_gt_force_wake_put(dev_priv);
+}
+
+int
+i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 length,
+ unsigned flags)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START |
+ MI_BATCH_GTT |
+ (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
+#define I830_BATCH_LIMIT (256*1024)
+int
+i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 len,
+ unsigned flags)
+{
+ int ret;
+
+ if (flags & I915_DISPATCH_PINNED) {
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_BATCH_BUFFER);
+ intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+ intel_ring_emit(ring, offset + len - 8);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ } else {
+ struct drm_i915_gem_object *obj = ring->private;
+ u32 cs_offset = obj->gtt_offset;
+
+ if (len > I830_BATCH_LIMIT)
+ return -ENOSPC;
+
+ ret = intel_ring_begin(ring, 9+3);
+ if (ret)
+ return ret;
+ /* Blit the batch (which has now all relocs applied) to the stable batch
+ * scratch bo area (so that the CS never stumbles over its tlb
+ * invalidation bug) ... */
+ intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
+ XY_SRC_COPY_BLT_WRITE_ALPHA |
+ XY_SRC_COPY_BLT_WRITE_RGB);
+ intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, (howmany(len, 4096) << 16) | 1024);
+ intel_ring_emit(ring, cs_offset);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 4096);
+ intel_ring_emit(ring, offset);
+ intel_ring_emit(ring, MI_FLUSH);
+
+ /* ... and execute it. */
+ intel_ring_emit(ring, MI_BATCH_BUFFER);
+ intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+ intel_ring_emit(ring, cs_offset + len - 8);
+ intel_ring_advance(ring);
+ }
+
+ return 0;
+}
+
+int
+i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 len,
+ unsigned flags)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
+ intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+void
+cleanup_status_page(struct intel_ring_buffer *ring)
+{
+ struct drm_i915_gem_object *obj;
+
+ obj = ring->status_page.obj;
+ if (obj == NULL)
+ return;
+
+ uvm_unmap(kernel_map, (vaddr_t)ring->status_page.page_addr,
+ (vaddr_t)ring->status_page.page_addr + PAGE_SIZE);
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(&obj->base);
+ ring->status_page.obj = NULL;
+}
+
+int
+init_status_page(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ obj = i915_gem_alloc_object(dev, 4096);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate status page\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+
+ /*
+ * snooped gtt mapping please .
+ * Normally this flag is only to dmamem_map, but it's been overloaded
+ * for the agp mapping
+ */
+ obj->dma_flags = BUS_DMA_COHERENT | BUS_DMA_READ;
+
+ ret = i915_gem_object_pin(obj, 4096, true);
+ if (ret != 0) {
+ goto err_unref;
+ }
+
+ ring->status_page.gfx_addr = obj->gtt_offset;
+ ring->status_page.page_addr = (u_int32_t *)vm_map_min(kernel_map);
+ obj->base.uao->pgops->pgo_reference(obj->base.uao);
+ if ((ret = uvm_map(kernel_map, (vaddr_t *)&ring->status_page.page_addr,
+ PAGE_SIZE, obj->base.uao, 0, 0, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
+ UVM_INH_SHARE, UVM_ADV_RANDOM, 0))) != 0)
+ if (ret != 0) {
+ obj->base.uao->pgops->pgo_detach(obj->base.uao);
+ ret = -ENOMEM;
+ goto err_unpin;
+ }
+ ring->status_page.obj = obj;
+ memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+
+ intel_ring_setup_status_page(ring);
+ DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
+ ring->name, ring->status_page.gfx_addr);
+
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(&obj->base);
+err:
+ return ret;
+}
+
+int
+init_phys_hws_pga(struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ u32 addr;
+
+ if (!dev_priv->status_page_dmah) {
+ dev_priv->status_page_dmah = drm_dmamem_alloc(dev_priv->dmat,
+ PAGE_SIZE, PAGE_SIZE, 1, PAGE_SIZE, 0, BUS_DMA_READ);
+ if (!dev_priv->status_page_dmah)
+ return -ENOMEM;
+ }
+
+ addr = dev_priv->status_page_dmah->map->dm_segs[0].ds_addr;
+ if (INTEL_INFO(ring->dev)->gen >= 4)
+ addr |= (dev_priv->status_page_dmah->map->dm_segs[0].ds_addr >> 28) & 0xf0;
+ bus_dmamap_sync(dev_priv->dmat, dev_priv->status_page_dmah->map, 0,
+ PAGE_SIZE, BUS_DMASYNC_PREREAD);
+ I915_WRITE(HWS_PGA, addr);
+
+ ring->status_page.page_addr = (u32 *)dev_priv->status_page_dmah->kva;
+ memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+
+ return 0;
+}
+
+u32
+intel_read_status_page(struct intel_ring_buffer *ring, int reg)
+{
+ struct inteldrm_softc *dev_priv = ring->dev->dev_private;
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_gem_object *obj_priv;
+ bus_dma_tag_t tag;
+ bus_dmamap_t map;
+ u32 val;
+
+ if (I915_NEED_GFX_HWS(dev)) {
+ obj_priv = ring->status_page.obj;
+ map = obj_priv->dmamap;
+ tag = dev_priv->agpdmat;
+ } else {
+ map = dev_priv->status_page_dmah->map;
+ tag = dev->dmat;
+ }
+ /* Ensure that the compiler doesn't optimize away the load. */
+ bus_dmamap_sync(tag, map, 0, PAGE_SIZE, BUS_DMASYNC_POSTREAD);
+ val = ((volatile u_int32_t *)(ring->status_page.page_addr))[reg];
+ bus_dmamap_sync(tag, map, 0, PAGE_SIZE, BUS_DMASYNC_PREREAD);
+
+ return (val);
+}
+
+int
+intel_init_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_i915_gem_object *obj;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ ring->dev = dev;
+ INIT_LIST_HEAD(&ring->active_list);
+ INIT_LIST_HEAD(&ring->request_list);
+ ring->size = 32 * PAGE_SIZE;
+ memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
+
+// init_waitqueue_head(&ring->irq_queue);
+
+ if (I915_NEED_GFX_HWS(dev)) {
+ ret = init_status_page(ring);
+ if (ret)
+ return ret;
+ } else {
+ BUG_ON(ring->id != RCS);
+ ret = init_phys_hws_pga(ring);
+ if (ret)
+ return ret;
+ }
+
+ obj = i915_gem_alloc_object(dev, ring->size);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate ringbuffer\n");
+ ret = -ENOMEM;
+ goto err_hws;
+ }
+
+ ring->obj = obj;
+
+ ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
+ if (ret)
+ goto err_unref;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ goto err_unpin;
+
+ if ((ret = agp_map_subregion(dev_priv->agph, obj->gtt_offset,
+ ring->size, &ring->bsh)) != 0) {
+ DRM_ERROR("Failed to map ringbuffer.\n");
+ ret = -EINVAL;
+ goto err_unpin;
+ }
+
+ ret = ring->init(ring);
+ if (ret)
+ goto err_unmap;
+
+ /* Workaround an erratum on the i830 which causes a hang if
+ * the TAIL pointer points to within the last 2 cachelines
+ * of the buffer.
+ */
+ ring->effective_size = ring->size;
+ if (IS_I830(ring->dev) || IS_845G(ring->dev))
+ ring->effective_size -= 128;
+
+ return 0;
+
+err_unmap:
+ agp_unmap_subregion(dev_priv->agph, ring->bsh, ring->size);
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(&obj->base);
+ ring->obj = NULL;
+err_hws:
+ cleanup_status_page(ring);
+ return ret;
+}
+
+void
+intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv;
+ int ret;
+
+ if (ring->obj == NULL)
+ return;
+
+ /* Disable the ring buffer. The ring must be idle at this point */
+ dev_priv = ring->dev->dev_private;
+ ret = intel_ring_idle(ring);
+ if (ret)
+ DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
+ ring->name, ret);
+
+ I915_WRITE_CTL(ring, 0);
+
+ agp_unmap_subregion(dev_priv->agph, ring->bsh, ring->size);
+
+ i915_gem_object_unpin(ring->obj);
+ drm_gem_object_unreference(&ring->obj->base);
+ ring->obj = NULL;
+
+ if (ring->cleanup)
+ ring->cleanup(ring);
+
+ cleanup_status_page(ring);
+}
+
+int
+intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ bool was_interruptible;
+ int ret;
+
+ /* XXX As we have not yet audited all the paths to check that
+ * they are ready for ERESTARTSYS from intel_ring_begin, do not
+ * allow us to be interruptible by a signal.
+ */
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
+
+ ret = i915_wait_seqno(ring, seqno);
+
+ if (!ret)
+ i915_gem_retire_requests_ring(ring);
+
+ dev_priv->mm.interruptible = was_interruptible;
+
+ return ret;
+}
+
+int
+intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
+{
+ struct drm_i915_gem_request *request;
+ u32 seqno = 0;
+ int ret;
+
+ i915_gem_retire_requests_ring(ring);
+
+ if (ring->last_retired_head != -1) {
+ ring->head = ring->last_retired_head;
+ ring->last_retired_head = -1;
+ ring->space = ring_space(ring);
+ if (ring->space >= n)
+ return 0;
+ }
+
+ list_for_each_entry(request, &ring->request_list, list) {
+ int space;
+
+ if (request->tail == -1)
+ continue;
+
+ space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
+ if (space < 0)
+ space += ring->size;
+ if (space >= n) {
+ seqno = request->seqno;
+ break;
+ }
+
+ /* Consume this request in case we need more space than
+ * is available and so need to prevent a race between
+ * updating last_retired_head and direct reads of
+ * I915_RING_HEAD. It also provides a nice sanity check.
+ */
+ request->tail = -1;
+ }
+
+ if (seqno == 0)
+ return -ENOSPC;
+
+ ret = intel_ring_wait_seqno(ring, seqno);
+ if (ret)
+ return ret;
+
+ if (WARN_ON(ring->last_retired_head == -1))
+ return -ENOSPC;
+
+ ring->head = ring->last_retired_head;
+ ring->last_retired_head = -1;
+ ring->space = ring_space(ring);
+ if (WARN_ON(ring->space < n))
+ return -ENOSPC;
+
+ return 0;
+}
+
+int
+ring_wait_for_space(struct intel_ring_buffer *ring, int n)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long end;
+ int ret;
+
+ ret = intel_ring_wait_request(ring, n);
+ if (ret != -ENOSPC)
+ return ret;
+
+// trace_i915_ring_wait_begin(ring);
+ /* With GEM the hangcheck timer should kick us out of the loop,
+ * leaving it early runs the risk of corrupting GEM state (due
+ * to running on almost untested codepaths). But on resume
+ * timers don't work yet, so prevent a complete hang in that
+ * case by choosing an insanely large timeout. */
+ end = ticks + 60 * hz;
+
+ do {
+ ring->head = I915_READ_HEAD(ring);
+ ring->space = ring_space(ring);
+ if (ring->space >= n) {
+// trace_i915_ring_wait_end(ring);
+ return 0;
+ }
+
+#if 0
+ if (dev->primary->master) {
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+ }
+#endif
+
+ drm_msleep(1, "915wfs");
+
+ ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
+ if (ret)
+ return ret;
+ } while (!time_after(ticks, end));
+// trace_i915_ring_wait_end(ring);
+ return -EBUSY;
+}
+
+int
+intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
+{
+ struct inteldrm_softc *dev_priv = ring->dev->dev_private;
+ int rem = ring->size - ring->tail;
+
+ if (ring->space < rem) {
+ int ret = ring_wait_for_space(ring, rem);
+ if (ret)
+ return ret;
+ }
+
+ bus_space_set_region_4(dev_priv->bst, ring->bsh,
+ ring->tail, MI_NOOP, rem / 4);
+
+ ring->tail = 0;
+ ring->space = ring_space(ring);
+
+ return 0;
+}
+
+int
+intel_ring_idle(struct intel_ring_buffer *ring)
+{
+ u32 seqno;
+ int ret;
+
+ if (ring->outstanding_lazy_request) {
+ ret = i915_add_request(ring, NULL, NULL);
+ if (ret)
+ return ret;
+ }
+
+ if (list_empty(&ring->request_list))
+ return 0;
+
+ seqno = list_entry(ring->request_list.prev,
+ struct drm_i915_gem_request,
+ list)->seqno;
+
+
+ return i915_wait_seqno(ring, seqno);
+}
+
+int
+intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
+{
+ if (ring->outstanding_lazy_request)
+ return 0;
+
+ return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
+}
+
+int
+intel_ring_begin(struct intel_ring_buffer *ring, int num_dwords)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ int n = 4*num_dwords;
+ int ret;
+
+ ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
+ if (ret)
+ return ret;
+
+ /* Preallocate the olr before touching the ring */
+ ret = intel_ring_alloc_seqno(ring);
+ if (ret)
+ return ret;
+
+ if (unlikely(ring->tail + n > ring->effective_size)) {
+ ret = intel_wrap_ring_buffer(ring);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ if (unlikely(ring->space < n)) {
+ ret = ring_wait_for_space(ring, n);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ ring->space -= n;
+ return 0;
+}
+
+void
+intel_ring_advance(struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+
+ ring->tail &= ring->size - 1;
+ if (dev_priv->stop_rings & intel_ring_flag(ring))
+ return;
+ ring->write_tail(ring, ring->tail);
+}
+
+void
+gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, u32 value)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ int retries;
+
+ /* Every tail move must follow the sequence below */
+
+ /* Disable notification that the ring is IDLE. The GT
+ * will then assume that it is busy and bring it out of rc6.
+ */
+ I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
+ _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+
+ /* Clear the context id. Here be magic! */
+ I915_WRITE64(GEN6_BSD_RNCID, 0x0);
+
+ /* Wait for the ring not to be idle, i.e. for it to wake up. */
+ for (retries = 50; retries > 0; retries--) {
+ if ((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
+ GEN6_BSD_SLEEP_INDICATOR) == 0)
+ break;
+ DELAY(1000);
+ }
+ if (retries == 0)
+ DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
+
+ /* Now that the ring is fully powered up, update the tail */
+ I915_WRITE_TAIL(ring, value);
+ POSTING_READ(RING_TAIL(ring->mmio_base));
+
+ /* Let the ring send IDLE messages to the GT again,
+ * and so let it sleep to conserve power when idle.
+ */
+ I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
+ _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+}
+
+int
+gen6_ring_flush(struct intel_ring_buffer *ring, u32 invalidate, u32 flush)
+{
+ uint32_t cmd;
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ cmd = MI_FLUSH_DW;
+ /*
+ * Bspec vol 1c.5 - video engine command streamer:
+ * "If ENABLED, all TLBs will be invalidated once the flush
+ * operation is complete. This bit is only valid when the
+ * Post-Sync Operation field is a value of 1h or 3h."
+ */
+ if (invalidate & I915_GEM_GPU_DOMAINS)
+ cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
+ MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ return 0;
+}
+
+int
+hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 len,
+ unsigned flags)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
+ (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
+ /* bit0-7 is the length on GEN6+ */
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+int
+gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 len,
+ unsigned flags)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START |
+ (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
+ /* bit0-7 is the length on GEN6+ */
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+/* Blitter support (SandyBridge+) */
+
+int
+blt_ring_flush(struct intel_ring_buffer *ring, u32 invalidate, u32 flush)
+{
+ uint32_t cmd;
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ cmd = MI_FLUSH_DW;
+ /*
+ * Bspec vol 1c.3 - blitter engine command streamer:
+ * "If ENABLED, all TLBs will be invalidated once the flush
+ * operation is complete. This bit is only valid when the
+ * Post-Sync Operation field is a value of 1h or 3h."
+ */
+ if (invalidate & I915_GEM_DOMAIN_RENDER)
+ cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
+ MI_FLUSH_DW_OP_STOREDW;
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ return 0;
+}
+
+int
+intel_init_render_ring_buffer(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
+
+ ring->name = "render ring";
+ ring->id = RCS;
+ ring->mmio_base = RENDER_RING_BASE;
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ ring->add_request = gen6_add_request;
+ ring->flush = gen7_render_ring_flush;
+ if (INTEL_INFO(dev)->gen == 6)
+ ring->flush = gen6_render_ring_flush;
+ ring->irq_get = gen6_ring_get_irq;
+ ring->irq_put = gen6_ring_put_irq;
+ ring->irq_enable_mask = GT_USER_INTERRUPT;
+ ring->get_seqno = gen6_ring_get_seqno;
+ ring->sync_to = gen6_ring_sync;
+ ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
+ ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB;
+ ring->signal_mbox[0] = GEN6_VRSYNC;
+ ring->signal_mbox[1] = GEN6_BRSYNC;
+ } else if (IS_GEN5(dev)) {
+ ring->add_request = pc_render_add_request;
+ ring->flush = gen4_render_ring_flush;
+ ring->get_seqno = pc_render_get_seqno;
+ ring->irq_get = gen5_ring_get_irq;
+ ring->irq_put = gen5_ring_put_irq;
+ ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
+ } else {
+ ring->add_request = i9xx_add_request;
+ if (INTEL_INFO(dev)->gen < 4)
+ ring->flush = gen2_render_ring_flush;
+ else
+ ring->flush = gen4_render_ring_flush;
+ ring->get_seqno = ring_get_seqno;
+ if (IS_GEN2(dev)) {
+ ring->irq_get = i8xx_ring_get_irq;
+ ring->irq_put = i8xx_ring_put_irq;
+ } else {
+ ring->irq_get = i9xx_ring_get_irq;
+ ring->irq_put = i9xx_ring_put_irq;
+ }
+ ring->irq_enable_mask = I915_USER_INTERRUPT;
+ }
+ ring->write_tail = ring_write_tail;
+ if (IS_HASWELL(dev))
+ ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
+ else if (INTEL_INFO(dev)->gen >= 6)
+ ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ else if (INTEL_INFO(dev)->gen >= 4)
+ ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+ else if (IS_I830(dev) || IS_845G(dev))
+ ring->dispatch_execbuffer = i830_dispatch_execbuffer;
+ else
+ ring->dispatch_execbuffer = i915_dispatch_execbuffer;
+ ring->init = init_render_ring;
+ ring->cleanup = render_ring_cleanup;
+
+ /* Workaround batchbuffer to combat CS tlb bug. */
+ if (HAS_BROKEN_CS_TLB(dev)) {
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate batch bo\n");
+ return -ENOMEM;
+ }
+
+ ret = i915_gem_object_pin(obj, 0, true);
+ if (ret != 0) {
+ drm_gem_object_unreference(&obj->base);
+ DRM_ERROR("Failed to ping batch bo\n");
+ return ret;
+ }
+
+ ring->private = obj;
+ }
+
+ return intel_init_ring_buffer(dev, ring);
+}
+
+int
+intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
+ int ret;
+
+ ring->name = "render ring";
+ ring->id = RCS;
+ ring->mmio_base = RENDER_RING_BASE;
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ /* non-kms not supported on gen6+ */
+ return -ENODEV;
+ }
+
+ /* Note: gem is not supported on gen5/ilk without kms (the corresponding
+ * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
+ * the special gen5 functions. */
+ ring->add_request = i9xx_add_request;
+ if (INTEL_INFO(dev)->gen < 4)
+ ring->flush = gen2_render_ring_flush;
+ else
+ ring->flush = gen4_render_ring_flush;
+ ring->get_seqno = ring_get_seqno;
+ if (IS_GEN2(dev)) {
+ ring->irq_get = i8xx_ring_get_irq;
+ ring->irq_put = i8xx_ring_put_irq;
+ } else {
+ ring->irq_get = i9xx_ring_get_irq;
+ ring->irq_put = i9xx_ring_put_irq;
+ }
+ ring->irq_enable_mask = I915_USER_INTERRUPT;
+ ring->write_tail = ring_write_tail;
+ if (INTEL_INFO(dev)->gen >= 4)
+ ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+ else if (IS_I830(dev) || IS_845G(dev))
+ ring->dispatch_execbuffer = i830_dispatch_execbuffer;
+ else
+ ring->dispatch_execbuffer = i915_dispatch_execbuffer;
+ ring->init = init_render_ring;
+ ring->cleanup = render_ring_cleanup;
+
+ ring->dev = dev;
+ INIT_LIST_HEAD(&ring->active_list);
+ INIT_LIST_HEAD(&ring->request_list);
+
+ ring->size = size;
+ ring->effective_size = ring->size;
+ if (IS_I830(ring->dev) || IS_845G(ring->dev))
+ ring->effective_size -= 128;
+
+ if ((ret = agp_map_subregion(dev_priv->agph, start,
+ size, &ring->bsh)) != 0) {
+ DRM_ERROR("Failed to map ringbuffer.\n");
+ return -ENOMEM;
+ }
+
+ if (!I915_NEED_GFX_HWS(dev)) {
+ ret = init_phys_hws_pga(ring);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int
+intel_init_bsd_ring_buffer(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->rings[VCS];
+
+ ring->name = "bsd ring";
+ ring->id = VCS;
+
+ ring->write_tail = ring_write_tail;
+ if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ ring->mmio_base = GEN6_BSD_RING_BASE;
+ /* gen6 bsd needs a special wa for tail updates */
+ if (IS_GEN6(dev))
+ ring->write_tail = gen6_bsd_ring_write_tail;
+ ring->flush = gen6_ring_flush;
+ ring->add_request = gen6_add_request;
+ ring->get_seqno = gen6_ring_get_seqno;
+ ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
+ ring->irq_get = gen6_ring_get_irq;
+ ring->irq_put = gen6_ring_put_irq;
+ ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ ring->sync_to = gen6_ring_sync;
+ ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR;
+ ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB;
+ ring->signal_mbox[0] = GEN6_RVSYNC;
+ ring->signal_mbox[1] = GEN6_BVSYNC;
+ } else {
+ ring->mmio_base = BSD_RING_BASE;
+ ring->flush = bsd_ring_flush;
+ ring->add_request = i9xx_add_request;
+ ring->get_seqno = ring_get_seqno;
+ if (IS_GEN5(dev)) {
+ ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
+ ring->irq_get = gen5_ring_get_irq;
+ ring->irq_put = gen5_ring_put_irq;
+ } else {
+ ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
+ ring->irq_get = i9xx_ring_get_irq;
+ ring->irq_put = i9xx_ring_put_irq;
+ }
+ ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+ }
+ ring->init = init_ring_common;
+
+ return intel_init_ring_buffer(dev, ring);
+}
+
+int
+intel_init_blt_ring_buffer(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->rings[BCS];
+
+ ring->name = "blitter ring";
+ ring->id = BCS;
+
+ ring->mmio_base = BLT_RING_BASE;
+ ring->write_tail = ring_write_tail;
+ ring->flush = blt_ring_flush;
+ ring->add_request = gen6_add_request;
+ ring->get_seqno = gen6_ring_get_seqno;
+ ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
+ ring->irq_get = gen6_ring_get_irq;
+ ring->irq_put = gen6_ring_put_irq;
+ ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ ring->sync_to = gen6_ring_sync;
+ ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR;
+ ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV;
+ ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->signal_mbox[0] = GEN6_RBSYNC;
+ ring->signal_mbox[1] = GEN6_VBSYNC;
+ ring->init = init_ring_common;
+
+ return intel_init_ring_buffer(dev, ring);
+}
+
+int
+intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
+{
+ int ret;
+
+ if (!ring->gpu_caches_dirty)
+ return 0;
+
+ ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
+ if (ret)
+ return ret;
+
+// trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
+
+ ring->gpu_caches_dirty = false;
+ return 0;
+}
+
+int
+intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
+{
+ uint32_t flush_domains;
+ int ret;
+
+ flush_domains = 0;
+ if (ring->gpu_caches_dirty)
+ flush_domains = I915_GEM_GPU_DOMAINS;
+
+ ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+ if (ret)
+ return ret;
+
+// trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+
+ ring->gpu_caches_dirty = false;
+ return 0;
+}
+
+void
+intel_ring_emit(struct intel_ring_buffer *ring, uint32_t data)
+{
+ struct drm_device *dev = ring->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ bus_space_write_4(dev_priv->bst, ring->bsh,
+ ring->tail, data);
+ ring->tail += 4;
+}