diff options
author | Jonathan Gray <jsg@jsg.id.au> | 2013-02-22 16:52:16 +1100 |
---|---|---|
committer | Jonathan Gray <jsg@jsg.id.au> | 2013-02-22 16:52:16 +1100 |
commit | 59b9d29256cb3784ac50f5437b2d1fb101fb48d1 (patch) | |
tree | 5b05b23b8640d464249a7702ebe90b2801429546 | |
parent | 675a797fcd799014a0001216bd7b6dedcd73c46e (diff) |
remove list lock removed by linux back in 2010
commit de227ef0907258359d53e3e1530c1f3678eb2bb9
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Sat Jul 3 07:58:38 2010 +0100
drm/i915: Kill the active list spinlock
This spinlock only served debugging purposes in a time when we could not
be sure of the mutex ever being released upon a GPU hang. As we now
should be able rely on hangcheck to do the job for us (and that error
reporting should not itself require the struct mutex) we can kill the
incomplete attempt at protection.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r-- | sys/dev/pci/drm/i915_drv.c | 11 | ||||
-rw-r--r-- | sys/dev/pci/drm/i915_drv.h | 3 | ||||
-rw-r--r-- | sys/dev/pci/drm/i915_gem.c | 16 | ||||
-rw-r--r-- | sys/dev/pci/drm/i915_gem_evict.c | 6 | ||||
-rw-r--r-- | sys/dev/pci/drm/i915_gem_execbuffer.c | 2 |
5 files changed, 1 insertions, 37 deletions
diff --git a/sys/dev/pci/drm/i915_drv.c b/sys/dev/pci/drm/i915_drv.c index fb521bb1fc3..297d28987a7 100644 --- a/sys/dev/pci/drm/i915_drv.c +++ b/sys/dev/pci/drm/i915_drv.c @@ -803,7 +803,6 @@ inteldrm_attach(struct device *parent, struct device *self, void *aux) printf(": %s\n", pci_intr_string(pa->pa_pc, dev_priv->ih)); mtx_init(&dev_priv->irq_lock, IPL_TTY); - mtx_init(&dev_priv->list_lock, IPL_NONE); mtx_init(&dev_priv->request_lock, IPL_NONE); mtx_init(&dev_priv->rps.lock, IPL_NONE); mtx_init(&dev_priv->dpio_lock, IPL_NONE); @@ -1253,7 +1252,6 @@ i915_gem_process_flushing(struct intel_ring_buffer *ring, struct drm_i915_gem_object *obj_priv, *next; MUTEX_ASSERT_LOCKED(&dev_priv->request_lock); - mtx_enter(&dev_priv->list_lock); list_for_each_entry_safe(obj_priv, next, &ring->gpu_write_list, @@ -1279,7 +1277,6 @@ i915_gem_process_flushing(struct intel_ring_buffer *ring, } } - mtx_leave(&dev_priv->list_lock); } #if 0 @@ -1296,7 +1293,6 @@ i915_gem_retire_request(struct inteldrm_softc *dev_priv, struct drm_i915_gem_object *obj_priv; MUTEX_ASSERT_LOCKED(&dev_priv->request_lock); - mtx_enter(&dev_priv->list_lock); /* Move any buffers on the active list that are no longer referenced * by the ringbuffer to the flushing/inactive lists as appropriate. */ while ((obj_priv = TAILQ_FIRST(&dev_priv->mm.active_list)) != NULL) { @@ -1324,10 +1320,8 @@ i915_gem_retire_request(struct inteldrm_softc *dev_priv, } else { /* unlocks object for us and drops ref */ i915_gem_object_move_to_inactive_locked(obj_priv); - mtx_enter(&dev_priv->list_lock); } } - mtx_leave(&dev_priv->list_lock); } #endif @@ -1393,7 +1387,6 @@ i915_gem_find_inactive_object(struct inteldrm_softc *dev_priv, * We don't need references to the object as long as we hold the list * lock, they won't disappear until we release the lock. */ - mtx_enter(&dev_priv->list_lock); list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) { obj = &obj_priv->base; if (obj->size >= min_size) { @@ -1421,7 +1414,6 @@ i915_gem_find_inactive_object(struct inteldrm_softc *dev_priv, best = NULL; } } - mtx_leave(&dev_priv->list_lock); return (best); } @@ -1900,7 +1892,6 @@ inteldrm_hung(void *arg, void *reset_type) * flushed or completed otherwise. nuke the domains since * they're now irrelavent. */ - mtx_enter(&dev_priv->list_lock); while (!list_empty(&dev_priv->mm.flushing_list)) { obj_priv = list_first_entry(&dev_priv->mm.flushing_list, struct drm_i915_gem_object, @@ -1915,9 +1906,7 @@ inteldrm_hung(void *arg, void *reset_type) } /* unlocks object and list */ i915_gem_object_move_to_inactive_locked(obj_priv); - mtx_enter(&dev_priv->list_lock); } - mtx_leave(&dev_priv->list_lock); /* unbind everything */ (void)i915_gem_evict_inactive(dev_priv); diff --git a/sys/dev/pci/drm/i915_drv.h b/sys/dev/pci/drm/i915_drv.h index 811713691c9..f444e010608 100644 --- a/sys/dev/pci/drm/i915_drv.h +++ b/sys/dev/pci/drm/i915_drv.h @@ -613,9 +613,6 @@ struct inteldrm_softc { /* number of ioctls + faults in flight */ int entries; - /* protects inactive, flushing, active and exec locks */ - struct mutex list_lock; - /* protects access to request_list */ struct mutex request_lock; diff --git a/sys/dev/pci/drm/i915_gem.c b/sys/dev/pci/drm/i915_gem.c index 7554277e74b..23f9bc483e6 100644 --- a/sys/dev/pci/drm/i915_gem.c +++ b/sys/dev/pci/drm/i915_gem.c @@ -807,7 +807,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, seqno = i915_gem_next_request_seqno(ring); MUTEX_ASSERT_LOCKED(&dev_priv->request_lock); - MUTEX_ASSERT_LOCKED(&dev_priv->list_lock); obj->ring = ring; /* Add a reference if we're newly entering the active list. */ @@ -831,10 +830,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, void i915_gem_object_move_off_active(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->base.dev; - struct inteldrm_softc *dev_priv = dev->dev_private; - - MUTEX_ASSERT_LOCKED(&dev_priv->list_lock); DRM_OBJ_ASSERT_LOCKED(&obj->base); obj->last_read_seqno = 0; @@ -862,7 +857,6 @@ i915_gem_object_move_to_inactive_locked(struct drm_i915_gem_object *obj) struct drm_device *dev = obj->base.dev; struct inteldrm_softc *dev_priv = dev->dev_private; - MUTEX_ASSERT_LOCKED(&dev_priv->list_lock); DRM_OBJ_ASSERT_LOCKED(&obj->base); inteldrm_verify_inactive(dev_priv, __FILE__, __LINE__); @@ -880,7 +874,6 @@ i915_gem_object_move_to_inactive_locked(struct drm_i915_gem_object *obj) KASSERT((obj->base.do_flags & I915_GPU_WRITE) == 0); /* unlock because this unref could recurse */ - mtx_leave(&dev_priv->list_lock); if (obj->active) { obj->active = 0; drm_unref_locked(&obj->base.uobj); @@ -896,12 +889,8 @@ i915_gem_object_move_to_inactive_locked(struct drm_i915_gem_object *obj) void i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->base.dev; - struct inteldrm_softc *dev_priv = dev->dev_private; - - mtx_enter(&dev_priv->list_lock); drm_lock_obj(&obj->base); - /* unlocks list lock and object lock */ + /* unlocks object lock */ i915_gem_object_move_to_inactive_locked(obj); } @@ -1102,7 +1091,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) /* Move any buffers on the active list that are no longer referenced * by the ringbuffer to the flushing/inactive lists as appropriate. */ - mtx_enter(&dev_priv->list_lock); while (!list_empty(&ring->active_list)) { struct drm_i915_gem_object *obj; @@ -1124,10 +1112,8 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) } else { /* unlocks object for us and drops ref */ i915_gem_object_move_to_inactive_locked(obj); - mtx_enter(&dev_priv->list_lock); } } - mtx_leave(&dev_priv->list_lock); mtx_leave(&dev_priv->request_lock); } diff --git a/sys/dev/pci/drm/i915_gem_evict.c b/sys/dev/pci/drm/i915_gem_evict.c index e79d06245bc..70a5019ebe4 100644 --- a/sys/dev/pci/drm/i915_gem_evict.c +++ b/sys/dev/pci/drm/i915_gem_evict.c @@ -119,7 +119,6 @@ i915_gem_evict_something(struct inteldrm_softc *dev_priv, size_t min_size) * When we wait on it, those buffers waiting for that flush * will get moved to inactive. */ - mtx_enter(&dev_priv->list_lock); list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) { obj = &obj_priv->base; @@ -129,7 +128,6 @@ i915_gem_evict_something(struct inteldrm_softc *dev_priv, size_t min_size) } obj = NULL; } - mtx_leave(&dev_priv->list_lock); if (write_domain) { if (i915_gem_flush(obj_priv->ring, write_domain, @@ -192,7 +190,6 @@ i915_gem_evict_inactive(struct inteldrm_softc *dev_priv) struct drm_i915_gem_object *obj_priv, *next; int ret = 0; - mtx_enter(&dev_priv->list_lock); list_for_each_entry_safe(obj_priv, next, &dev_priv->mm.inactive_list, mm_list) { if (obj_priv->pin_count != 0) { @@ -202,17 +199,14 @@ i915_gem_evict_inactive(struct inteldrm_softc *dev_priv) } /* reference it so that we can frob it outside the lock */ drm_ref(&obj_priv->base.uobj); - mtx_leave(&dev_priv->list_lock); drm_hold_object(&obj_priv->base); ret = i915_gem_object_unbind(obj_priv); drm_unhold_and_unref(&obj_priv->base); - mtx_enter(&dev_priv->list_lock); if (ret) break; } - mtx_leave(&dev_priv->list_lock); return (ret); } diff --git a/sys/dev/pci/drm/i915_gem_execbuffer.c b/sys/dev/pci/drm/i915_gem_execbuffer.c index 754b15fe3f0..6dde3523d5e 100644 --- a/sys/dev/pci/drm/i915_gem_execbuffer.c +++ b/sys/dev/pci/drm/i915_gem_execbuffer.c @@ -484,7 +484,6 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, * then we could fail in much worse ways. */ mtx_enter(&dev_priv->request_lock); /* to prevent races on next_seqno */ - mtx_enter(&dev_priv->list_lock); for (i = 0; i < args->buffer_count; i++) { obj = object_list[i]; obj_priv = to_intel_bo(obj); @@ -521,7 +520,6 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, i915_gem_object_move_to_active(to_intel_bo(object_list[i]), ring); drm_unlock_obj(obj); } - mtx_leave(&dev_priv->list_lock); inteldrm_verify_inactive(dev_priv, __FILE__, __LINE__); mtx_leave(&dev_priv->request_lock); |