summaryrefslogtreecommitdiff
path: root/sys/dev/pci
diff options
context:
space:
mode:
authorJonathan Gray <jsg@jsg.id.au>2013-03-08 13:55:20 +1100
committerJonathan Gray <jsg@jsg.id.au>2013-03-08 13:55:20 +1100
commitb436809e9a782907db2f1f00b45b4904a676ac10 (patch)
treef3075b44c6bf89f270b3da8173967c8bac1aad75 /sys/dev/pci
parent590918f52807f5325366ee632e1742c3e27a76d0 (diff)
remove gpu_write_list
Diffstat (limited to 'sys/dev/pci')
-rw-r--r--sys/dev/pci/drm/i915_drv.c5
-rw-r--r--sys/dev/pci/drm/i915_drv.h8
-rw-r--r--sys/dev/pci/drm/i915_gem.c102
-rw-r--r--sys/dev/pci/drm/i915_gem_execbuffer.c7
-rw-r--r--sys/dev/pci/drm/intel_ringbuffer.c21
-rw-r--r--sys/dev/pci/drm/intel_ringbuffer.h2
6 files changed, 63 insertions, 82 deletions
diff --git a/sys/dev/pci/drm/i915_drv.c b/sys/dev/pci/drm/i915_drv.c
index e6969803123..98c5c91e48a 100644
--- a/sys/dev/pci/drm/i915_drv.c
+++ b/sys/dev/pci/drm/i915_drv.c
@@ -1492,11 +1492,8 @@ i915_gem_retire_work_handler(void *arg1, void *unused)
i915_gem_retire_requests(dev_priv);
idle = true;
for_each_ring(ring, dev_priv, i) {
-
- if (!list_empty(&ring->gpu_write_list)) {
- i915_gem_flush_ring(ring, 0, I915_GEM_GPU_DOMAINS);
+ if (ring->gpu_caches_dirty)
i915_add_request(ring, NULL, NULL);
- }
idle &= list_empty(&ring->request_list);
}
diff --git a/sys/dev/pci/drm/i915_drv.h b/sys/dev/pci/drm/i915_drv.h
index f2092c99b54..7267ac42eb4 100644
--- a/sys/dev/pci/drm/i915_drv.h
+++ b/sys/dev/pci/drm/i915_drv.h
@@ -856,7 +856,6 @@ struct drm_i915_gem_object {
/** This object's place on the active/flushing/inactive lists */
struct list_head ring_list;
struct list_head mm_list;
- struct list_head gpu_write_list;
/* GTT binding. */
bus_dmamap_t dmamap;
bus_dma_segment_t *dma_segs;
@@ -896,12 +895,6 @@ struct drm_i915_gem_object {
unsigned int dirty:1;
/**
- * This is set if the object has been written to since the last
- * GPU flush.
- */
- unsigned int pending_gpu_write:1;
-
- /**
* Advice: are the backing pages purgeable?
*/
unsigned int madv:2;
@@ -1062,7 +1055,6 @@ void i915_gem_object_move_off_active(struct drm_i915_gem_object *);
void i915_gem_object_move_to_inactive(struct drm_i915_gem_object *);
void i915_gem_object_move_to_inactive_locked(struct drm_i915_gem_object *);
int i915_add_request(struct intel_ring_buffer *, struct drm_file *, u32 *);
-void i915_gem_process_flushing_list(struct intel_ring_buffer *, u_int32_t);
int init_pipe_control(struct intel_ring_buffer *);
void cleanup_status_page(struct intel_ring_buffer *);
void i915_gem_init_swizzling(struct drm_device *);
diff --git a/sys/dev/pci/drm/i915_gem.c b/sys/dev/pci/drm/i915_gem.c
index e4f34c3f50b..b820a8c155a 100644
--- a/sys/dev/pci/drm/i915_gem.c
+++ b/sys/dev/pci/drm/i915_gem.c
@@ -464,20 +464,27 @@ int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly)
{
+ struct intel_ring_buffer *ring = obj->ring;
+ u32 seqno;
int ret;
- /* This function only exists to support waiting for existing rendering,
- * not for emitting required flushes.
- */
- BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
+ seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
+ if (seqno == 0)
+ return 0;
- /* If there is rendering queued on the buffer being evicted, wait for
- * it.
+ ret = i915_wait_seqno(ring, seqno);
+ if (ret)
+ return ret;
+
+ i915_gem_retire_requests_ring(ring);
+
+ /* Manually manage the write flush as we may have not yet
+ * retired the buffer.
*/
- if (obj->active) {
- ret = i915_wait_seqno(obj->ring, obj->last_read_seqno);
- if (ret)
- return ret;
+ if (obj->last_write_seqno &&
+ i915_seqno_passed(seqno, obj->last_write_seqno)) {
+ obj->last_write_seqno = 0;
+ obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
}
return 0;
@@ -851,16 +858,24 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
obj->active = 1;
}
- if (obj->fenced_gpu_access) {
- obj->last_fenced_seqno = seqno;
- }
- if (obj->base.write_domain)
- obj->last_write_seqno = seqno;
-
/* Move from whatever list we were on to the tail of execution. */
list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
list_move_tail(&obj->ring_list, &ring->active_list);
+
obj->last_read_seqno = seqno;
+
+ if (obj->fenced_gpu_access) {
+ obj->last_fenced_seqno = seqno;
+
+ /* Bump MRU to take account of the delayed flush */
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_fence_reg *reg;
+
+ reg = &dev_priv->fence_regs[obj->fence_reg];
+ list_move_tail(&reg->lru_list,
+ &dev_priv->mm.fence_list);
+ }
+ }
}
void
@@ -871,8 +886,7 @@ i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
list_del_init(&obj->ring_list);
obj->last_read_seqno = 0;
obj->last_fenced_seqno = 0;
- if (obj->base.write_domain == 0)
- obj->last_write_seqno = 0;
+ obj->last_write_seqno = 0;
}
void
@@ -902,7 +916,6 @@ i915_gem_object_move_to_inactive_locked(struct drm_i915_gem_object *obj)
else
list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
- BUG_ON(!list_empty(&obj->gpu_write_list));
BUG_ON(!obj->active);
obj->ring = NULL;
@@ -910,7 +923,6 @@ i915_gem_object_move_to_inactive_locked(struct drm_i915_gem_object *obj)
obj->fenced_gpu_access = false;
obj->active = 0;
- obj->pending_gpu_write = false;
drm_gem_object_unreference(&obj->base);
inteldrm_verify_inactive(dev_priv, __FILE__, __LINE__);
@@ -927,29 +939,6 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
i915_gem_object_move_to_inactive_locked(obj);
}
-void
-i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
- uint32_t flush_domains)
-{
- struct drm_i915_gem_object *obj, *next;
-
- list_for_each_entry_safe(obj, next,
- &ring->gpu_write_list,
- gpu_write_list) {
- if (obj->base.write_domain & flush_domains) {
-// uint32_t old_write_domain = obj->base.write_domain;
-
- obj->base.write_domain = 0;
- list_del_init(&obj->gpu_write_list);
- i915_gem_object_move_to_active(obj, ring);
-
-// trace_i915_gem_object_change_domain(obj,
-// obj->base.read_domains,
-// old_write_domain);
- }
- }
-}
-
int
i915_gem_handle_seqno_wrap(struct drm_device *dev)
{
@@ -1030,6 +1019,17 @@ i915_add_request(struct intel_ring_buffer *ring,
u32 request_ring_position;
int was_empty, ret;
+ /*
+ * Emit any outstanding flushes - execbuf can fail to emit the flush
+ * after having emitted the batchbuffer command. Hence we need to fix
+ * things up similar to emitting the lazy request. The difference here
+ * is that the flush _must_ happen before the next request, no matter
+ * what.
+ */
+ ret = intel_ring_flush_all_caches(ring);
+ if (ret)
+ return ret;
+
request = drm_calloc(1, sizeof(*request));
if (request == NULL) {
printf("%s: failed to allocate request\n", __func__);
@@ -1125,7 +1125,6 @@ i915_gem_reset_ring_lists(drm_i915_private_t *dev_priv,
ring_list);
obj->base.write_domain = 0;
- list_del_init(&obj->gpu_write_list);
i915_gem_object_move_to_inactive(obj);
}
}
@@ -1172,7 +1171,6 @@ i915_gem_reset(struct drm_device *dev)
mm_list);
obj->base.write_domain = 0;
- list_del_init(&obj->gpu_write_list);
i915_gem_object_move_to_inactive(obj);
}
@@ -1398,9 +1396,6 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring,
if (ret)
return ret;
- if (flush_domains & I915_GEM_GPU_DOMAINS)
- i915_gem_process_flushing_list(ring, flush_domains);
-
return 0;
}
@@ -1990,11 +1985,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, int write)
if (ret)
return ret;
- if (obj->pending_gpu_write || write) {
- ret = i915_gem_object_wait_rendering(obj, false);
- if (ret)
- return ret;
- }
+ ret = i915_gem_object_wait_rendering(obj, !write);
+ if (ret)
+ return ret;
i915_gem_object_flush_cpu_write_domain(obj);
@@ -2015,7 +2008,6 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, int write)
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
if (write) {
obj->base.read_domains = I915_GEM_DOMAIN_GTT;
@@ -2640,7 +2632,6 @@ i915_gem_init_object(struct drm_obj *obj)
INIT_LIST_HEAD(&obj_priv->mm_list);
INIT_LIST_HEAD(&obj_priv->ring_list);
- INIT_LIST_HEAD(&obj_priv->gpu_write_list);
return 0;
}
@@ -2936,7 +2927,6 @@ init_ring_lists(struct intel_ring_buffer *ring)
{
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
- INIT_LIST_HEAD(&ring->gpu_write_list);
}
// i915_gem_load
diff --git a/sys/dev/pci/drm/i915_gem_execbuffer.c b/sys/dev/pci/drm/i915_gem_execbuffer.c
index 4b4230e8372..e5a3d0ff2e7 100644
--- a/sys/dev/pci/drm/i915_gem_execbuffer.c
+++ b/sys/dev/pci/drm/i915_gem_execbuffer.c
@@ -437,9 +437,7 @@ i915_gem_execbuffer_move_to_active(struct drm_obj **object_list,
i915_gem_object_move_to_active(obj, ring);
if (obj->base.write_domain) {
obj->dirty = 1;
- obj->pending_gpu_write = true;
- list_move_tail(&obj->gpu_write_list,
- &ring->gpu_write_list);
+ obj->last_write_seqno = i915_gem_next_request_seqno(ring);
intel_mark_busy(ring->dev);
}
@@ -454,6 +452,9 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
{
u32 invalidate;
+ /* Unconditionally force add_request to emit a full flush. */
+ ring->gpu_caches_dirty = true;
+
/*
* Ensure that the commands in the batch buffer are
* finished before the interrupt fires.
diff --git a/sys/dev/pci/drm/intel_ringbuffer.c b/sys/dev/pci/drm/intel_ringbuffer.c
index e73a4fed21d..756a87513a6 100644
--- a/sys/dev/pci/drm/intel_ringbuffer.c
+++ b/sys/dev/pci/drm/intel_ringbuffer.c
@@ -1277,7 +1277,6 @@ intel_init_ring_buffer(struct drm_device *dev,
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
- INIT_LIST_HEAD(&ring->gpu_write_list);
ring->size = 32 * PAGE_SIZE;
memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
@@ -1526,19 +1525,24 @@ intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
int
intel_ring_idle(struct intel_ring_buffer *ring)
{
+ u32 seqno;
int ret;
- if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
- return 0;
-
- if (!list_empty(&ring->gpu_write_list)) {
- ret = i915_gem_flush_ring(ring,
- I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ if (ring->outstanding_lazy_request) {
+ ret = i915_add_request(ring, NULL, NULL);
if (ret)
return ret;
}
- return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
+ if (list_empty(&ring->request_list))
+ return 0;
+
+ seqno = list_entry(ring->request_list.prev,
+ struct drm_i915_gem_request,
+ list)->seqno;
+
+
+ return i915_wait_seqno(ring, seqno);
}
#if 0
@@ -1867,7 +1871,6 @@ intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
- INIT_LIST_HEAD(&ring->gpu_write_list);
ring->size = size;
ring->effective_size = ring->size;
diff --git a/sys/dev/pci/drm/intel_ringbuffer.h b/sys/dev/pci/drm/intel_ringbuffer.h
index 3226cd0b64c..c6dc0b9b985 100644
--- a/sys/dev/pci/drm/intel_ringbuffer.h
+++ b/sys/dev/pci/drm/intel_ringbuffer.h
@@ -124,8 +124,6 @@ struct intel_ring_buffer {
*/
struct list_head request_list;
- struct list_head gpu_write_list;
-
/**
* Do we have some not yet emitted requests outstanding?
*/