diff options
author | Jonathan Gray <jsg@jsg.id.au> | 2013-02-15 15:57:33 +1100 |
---|---|---|
committer | Jonathan Gray <jsg@jsg.id.au> | 2013-02-15 15:57:33 +1100 |
commit | ceaa7c2701c921d65983e87c7923670d2bceee54 (patch) | |
tree | 40cde43e07bda1c3a877d241908d0e1cbd3cfc83 /sys/dev/pci | |
parent | 2d66ed1668973feeafdd2edc323f0159acb5fca9 (diff) |
Revert "use the ring list when retiring requests"
Was causing breakage with multiple dri clients.
This reverts commit e95e1a96594914b25d1c49f806fcbae7673f12f0.
Conflicts:
sys/dev/pci/drm/i915_gem.c
Diffstat (limited to 'sys/dev/pci')
-rw-r--r-- | sys/dev/pci/drm/i915_drv.c | 28 | ||||
-rw-r--r-- | sys/dev/pci/drm/i915_drv.h | 3 | ||||
-rw-r--r-- | sys/dev/pci/drm/i915_gem.c | 5 |
3 files changed, 14 insertions, 22 deletions
diff --git a/sys/dev/pci/drm/i915_drv.c b/sys/dev/pci/drm/i915_drv.c index 04c2ceda07a..9d3a6df77b3 100644 --- a/sys/dev/pci/drm/i915_drv.c +++ b/sys/dev/pci/drm/i915_drv.c @@ -1301,44 +1301,40 @@ inteldrm_process_flushing(struct inteldrm_softc *dev_priv, * called with and sleeps with the drm_lock. */ void -i915_gem_retire_request(struct intel_ring_buffer *ring, +i915_gem_retire_request(struct inteldrm_softc *dev_priv, struct drm_i915_gem_request *request) { - struct inteldrm_softc *dev_priv = ring->dev->dev_private; + struct drm_i915_gem_object *obj_priv; MUTEX_ASSERT_LOCKED(&dev_priv->request_lock); mtx_enter(&dev_priv->list_lock); /* Move any buffers on the active list that are no longer referenced * by the ringbuffer to the flushing/inactive lists as appropriate. */ - while (!list_empty(&ring->active_list)) { - struct drm_i915_gem_object *obj; - - obj = list_first_entry(&ring->active_list, - struct drm_i915_gem_object, - ring_list); + while ((obj_priv = TAILQ_FIRST(&dev_priv->mm.active_list)) != NULL) { + struct drm_obj *obj = &obj_priv->base; /* If the seqno being retired doesn't match the oldest in the * list, then the oldest in the list must still be newer than * this seqno. */ - if (obj->last_rendering_seqno != request->seqno) + if (obj_priv->last_rendering_seqno != request->seqno) break; - drm_lock_obj(&obj->base); + drm_lock_obj(obj); /* * If we're now clean and can be read from, move inactive, * else put on the flushing list to signify that we're not * available quite yet. */ - if (obj->base.write_domain != 0) { - KASSERT(obj->active); - i915_move_to_tail(obj, + if (obj->write_domain != 0) { + KASSERT(obj_priv->active); + i915_move_to_tail(obj_priv, &dev_priv->mm.flushing_list); - i915_gem_object_move_off_active(obj); - drm_unlock_obj(&obj->base); + i915_gem_object_move_off_active(obj_priv); + drm_unlock_obj(obj); } else { /* unlocks object for us and drops ref */ - i915_gem_object_move_to_inactive_locked(obj); + i915_gem_object_move_to_inactive_locked(obj_priv); mtx_enter(&dev_priv->list_lock); } } diff --git a/sys/dev/pci/drm/i915_drv.h b/sys/dev/pci/drm/i915_drv.h index 592f5166f8a..68a308d5266 100644 --- a/sys/dev/pci/drm/i915_drv.h +++ b/sys/dev/pci/drm/i915_drv.h @@ -852,7 +852,6 @@ struct drm_i915_gem_object { TAILQ_ENTRY(drm_i915_gem_object) list; TAILQ_ENTRY(drm_i915_gem_object) write_list; struct i915_gem_list *current_list; - struct list_head ring_list; /* GTT binding. */ bus_dmamap_t dmamap; bus_dma_segment_t *dma_segs; @@ -995,7 +994,7 @@ int i915_gem_init_object(struct drm_obj *); void i915_gem_free_object(struct drm_obj *); int i915_gem_object_pin(struct drm_i915_gem_object *, uint32_t, int); void i915_gem_object_unpin(struct drm_i915_gem_object *); -void i915_gem_retire_request(struct intel_ring_buffer *, +void i915_gem_retire_request(struct inteldrm_softc *, struct drm_i915_gem_request *); void i915_gem_retire_requests_ring(struct intel_ring_buffer *); int i915_gem_check_wedge(struct inteldrm_softc *, diff --git a/sys/dev/pci/drm/i915_gem.c b/sys/dev/pci/drm/i915_gem.c index 771d165c51d..08291bdf281 100644 --- a/sys/dev/pci/drm/i915_gem.c +++ b/sys/dev/pci/drm/i915_gem.c @@ -788,7 +788,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, /* Move from whatever list we were on to the tail of execution. */ i915_move_to_tail(obj, &dev_priv->mm.active_list); - list_move_tail(&obj->ring_list, &ring->active_list); obj->last_rendering_seqno = seqno; } @@ -823,7 +822,6 @@ i915_gem_object_move_to_inactive_locked(struct drm_i915_gem_object *obj) else i915_move_to_tail(obj, &dev_priv->mm.inactive_list); - list_del_init(&obj->ring_list); obj->ring = NULL; i915_gem_object_move_off_active(obj); @@ -999,7 +997,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) if (i915_seqno_passed(seqno, request->seqno) || dev_priv->mm.wedged) { list_del(&request->list); - i915_gem_retire_request(ring, request); + i915_gem_retire_request(dev_priv, request); mtx_leave(&dev_priv->request_lock); drm_free(request); @@ -1895,7 +1893,6 @@ out: void i915_gem_object_init(struct drm_i915_gem_object *obj) { - INIT_LIST_HEAD(&obj->ring_list); } struct drm_i915_gem_object * |