summaryrefslogtreecommitdiff
path: root/sys/dev
diff options
context:
space:
mode:
authorJonathan Gray <jsg@cvs.openbsd.org>2015-02-10 10:50:50 +0000
committerJonathan Gray <jsg@cvs.openbsd.org>2015-02-10 10:50:50 +0000
commitb693ade7dde6b5b1fe94e2ff51b798784fb07494 (patch)
tree307aac508e68308b57c35c3ebb5a8e0bfb2528b1 /sys/dev
parent6ebba4debac7e9c3db2627ec7bc17e8ab289f04c (diff)
switch most mtx_* calls back to linux spinlocks
Diffstat (limited to 'sys/dev')
-rw-r--r--sys/dev/pci/drm/drm_crtc.c17
-rw-r--r--sys/dev/pci/drm/drm_fb_helper.c6
-rw-r--r--sys/dev/pci/drm/drm_irq.c60
-rw-r--r--sys/dev/pci/drm/drm_mm.c24
-rw-r--r--sys/dev/pci/drm/i915/i915_drv.c17
-rw-r--r--sys/dev/pci/drm/i915/i915_gem.c18
-rw-r--r--sys/dev/pci/drm/i915/i915_irq.c99
-rw-r--r--sys/dev/pci/drm/i915/intel_display.c55
-rw-r--r--sys/dev/pci/drm/i915/intel_pm.c70
-rw-r--r--sys/dev/pci/drm/i915/intel_ringbuffer.c42
-rw-r--r--sys/dev/pci/drm/i915/intel_tv.c11
-rw-r--r--sys/dev/pci/drm/radeon/r100.c13
-rw-r--r--sys/dev/pci/drm/radeon/radeon_device.c6
-rw-r--r--sys/dev/pci/drm/radeon/radeon_display.c28
-rw-r--r--sys/dev/pci/drm/radeon/radeon_irq_kms.c58
-rw-r--r--sys/dev/pci/drm/radeon/radeon_kms.c12
-rw-r--r--sys/dev/pci/drm/radeon/radeon_object.c6
-rw-r--r--sys/dev/pci/drm/radeon/radeon_sa.c20
-rw-r--r--sys/dev/pci/drm/radeon/radeon_ttm.c6
-rw-r--r--sys/dev/pci/drm/ttm/ttm_bo.c112
-rw-r--r--sys/dev/pci/drm/ttm/ttm_bo_manager.c22
-rw-r--r--sys/dev/pci/drm/ttm/ttm_bo_util.c12
-rw-r--r--sys/dev/pci/drm/ttm/ttm_bo_vm.c8
-rw-r--r--sys/dev/pci/drm/ttm/ttm_execbuf_util.c30
-rw-r--r--sys/dev/pci/drm/ttm/ttm_lock.c56
-rw-r--r--sys/dev/pci/drm/ttm/ttm_memory.c30
-rw-r--r--sys/dev/pci/drm/ttm/ttm_memory.h12
-rw-r--r--sys/dev/pci/drm/ttm/ttm_object.c18
-rw-r--r--sys/dev/pci/drm/ttm/ttm_page_alloc.c30
29 files changed, 490 insertions, 408 deletions
diff --git a/sys/dev/pci/drm/drm_crtc.c b/sys/dev/pci/drm/drm_crtc.c
index c64f9db79ad..f694823e7d0 100644
--- a/sys/dev/pci/drm/drm_crtc.c
+++ b/sys/dev/pci/drm/drm_crtc.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: drm_crtc.c,v 1.9 2015/02/10 03:39:41 jsg Exp $ */
+/* $OpenBSD: drm_crtc.c,v 1.10 2015/02/10 10:50:49 jsg Exp $ */
/*
* Copyright (c) 2006-2008 Intel Corporation
* Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
@@ -3619,6 +3619,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
struct drm_pending_vblank_event *e = NULL;
+ unsigned long flags;
int hdisplay, vdisplay;
int ret = -EINVAL;
@@ -3675,19 +3676,19 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
ret = -ENOMEM;
- mtx_enter(&dev->event_lock);
+ spin_lock_irqsave(&dev->event_lock, flags);
if (file_priv->event_space < sizeof e->event) {
- mtx_leave(&dev->event_lock);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
goto out;
}
file_priv->event_space -= sizeof e->event;
- mtx_leave(&dev->event_lock);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
e = kzalloc(sizeof *e, GFP_KERNEL);
if (e == NULL) {
- mtx_enter(&dev->event_lock);
+ spin_lock_irqsave(&dev->event_lock, flags);
file_priv->event_space += sizeof e->event;
- mtx_leave(&dev->event_lock);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
goto out;
}
@@ -3703,9 +3704,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
ret = crtc->funcs->page_flip(crtc, fb, e);
if (ret) {
if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
- mtx_enter(&dev->event_lock);
+ spin_lock_irqsave(&dev->event_lock, flags);
file_priv->event_space += sizeof e->event;
- mtx_leave(&dev->event_lock);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(e);
}
}
diff --git a/sys/dev/pci/drm/drm_fb_helper.c b/sys/dev/pci/drm/drm_fb_helper.c
index 07071d40a57..c0707306864 100644
--- a/sys/dev/pci/drm/drm_fb_helper.c
+++ b/sys/dev/pci/drm/drm_fb_helper.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: drm_fb_helper.c,v 1.7 2015/02/10 03:39:41 jsg Exp $ */
+/* $OpenBSD: drm_fb_helper.c,v 1.8 2015/02/10 10:50:49 jsg Exp $ */
/*
* Copyright (c) 2006-2009 Red Hat Inc.
* Copyright (c) 2006-2008 Intel Corporation
@@ -1433,9 +1433,9 @@ static int __init drm_fb_helper_modinit(void)
const char *name = "fbcon";
struct module *fbcon;
- mtx_enter(&module_mutex);
+ mutex_lock(&module_mutex);
fbcon = find_module(name);
- mtx_leave(&module_mutex);
+ mutex_unlock(&module_mutex);
if (!fbcon)
request_module_nowait(name);
diff --git a/sys/dev/pci/drm/drm_irq.c b/sys/dev/pci/drm/drm_irq.c
index 0a0e1f63d67..636f1f3e35d 100644
--- a/sys/dev/pci/drm/drm_irq.c
+++ b/sys/dev/pci/drm/drm_irq.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: drm_irq.c,v 1.56 2015/02/10 01:39:32 jsg Exp $ */
+/* $OpenBSD: drm_irq.c,v 1.57 2015/02/10 10:50:49 jsg Exp $ */
/**
* \file drm_irq.c
* IRQ support
@@ -157,6 +157,7 @@ abs64(int64_t x)
*/
static void vblank_disable_and_save(struct drm_device *dev, int crtc)
{
+ unsigned long irqflags;
u32 vblcount;
s64 diff_ns;
int vblrc;
@@ -167,7 +168,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
* so no updates of timestamps or count can happen after we've
* disabled. Needed to prevent races in case of delayed irq's.
*/
- mtx_enter(&dev->vblank_time_lock);
+ spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
dev->driver->disable_vblank(dev, crtc);
dev->vblank_enabled[crtc] = 0;
@@ -219,25 +220,26 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
/* Invalidate all timestamps while vblank irq's are off. */
clear_vblank_timestamps(dev, crtc);
- mtx_leave(&dev->vblank_time_lock);
+ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
}
static void vblank_disable_fn(void *arg)
{
struct drm_device *dev = (struct drm_device *)arg;
+ unsigned long irqflags;
int i;
if (!dev->vblank_disable_allowed)
return;
for (i = 0; i < dev->num_crtcs; i++) {
- mtx_enter(&dev->vbl_lock);
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
dev->vblank_enabled[i]) {
DPRINTF("disabling vblank on crtc %d\n", i);
vblank_disable_and_save(dev, i);
}
- mtx_leave(&dev->vbl_lock);
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
}
@@ -414,6 +416,7 @@ EXPORT_SYMBOL(drm_irq_install);
*/
int drm_irq_uninstall(struct drm_device *dev)
{
+ unsigned long irqflags;
int i;
mutex_lock(&dev->struct_mutex);
@@ -431,14 +434,14 @@ int drm_irq_uninstall(struct drm_device *dev)
* so that we can continue refcounting correctly.
*/
if (dev->num_crtcs) {
- mtx_enter(&dev->vbl_lock);
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
for (i = 0; i < dev->num_crtcs; i++) {
wakeup(&dev->vbl_queue[i]);
dev->vblank_enabled[i] = 0;
dev->last_vblank[i] =
dev->driver->get_vblank_counter(dev, i);
}
- mtx_leave(&dev->vbl_lock);
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
DRM_DEBUG("irq=%d\n", dev->irq);
@@ -963,12 +966,13 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
*/
int drm_vblank_get(struct drm_device *dev, int crtc)
{
+ unsigned long irqflags, irqflags2;
int ret = 0;
- mtx_enter(&dev->vbl_lock);
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Going from 0->1 means we have to enable interrupts again */
if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
- mtx_enter(&dev->vblank_time_lock);
+ spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
if (!dev->vblank_enabled[crtc]) {
/* Enable vblank irqs under vblank_time_lock protection.
* All vblank count & timestamp updates are held off
@@ -986,14 +990,14 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
drm_update_vblank_count(dev, crtc);
}
}
- mtx_leave(&dev->vblank_time_lock);
+ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
} else {
if (!dev->vblank_enabled[crtc]) {
atomic_dec(&dev->vblank_refcount[crtc]);
ret = -EINVAL;
}
}
- mtx_leave(&dev->vbl_lock);
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
return ret;
}
@@ -1030,9 +1034,10 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
struct drm_pending_event *ev, *tmp;
struct drm_pending_vblank_event *vev;
struct timeval now;
+ unsigned long irqflags;
unsigned int seq;
- mtx_enter(&dev->vbl_lock);
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
vblank_disable_and_save(dev, crtc);
wakeup(&dev->vbl_queue[crtc]);
@@ -1040,7 +1045,7 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
/* Send any queued vblank events, lest the natives grow disquiet */
seq = drm_vblank_count_and_time(dev, crtc, &now);
- mtx_enter(&dev->event_lock);
+ spin_lock(&dev->event_lock);
for (ev = TAILQ_FIRST(list); ev != NULL; ev = tmp) {
tmp = TAILQ_NEXT(ev, link);
@@ -1055,9 +1060,9 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
drm_vblank_put(dev, vev->pipe);
send_vblank_event(dev, vev, seq, &now);
}
- mtx_leave(&dev->event_lock);
+ spin_unlock(&dev->event_lock);
- mtx_leave(&dev->vbl_lock);
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
/**
@@ -1090,14 +1095,16 @@ EXPORT_SYMBOL(drm_vblank_pre_modeset);
void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
{
+ unsigned long irqflags;
+
/* vblank is not initialized (IRQ not installed ?), or has been freed */
if (!dev->num_crtcs)
return;
if (dev->vblank_inmodeset[crtc]) {
- mtx_enter(&dev->vbl_lock);
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
dev->vblank_disable_allowed = 1;
- mtx_leave(&dev->vbl_lock);
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
if (dev->vblank_inmodeset[crtc] & 0x2)
drm_vblank_put(dev, crtc);
@@ -1156,6 +1163,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
{
struct drm_pending_vblank_event *e;
struct timeval now;
+ unsigned long flags;
unsigned int seq;
int ret;
@@ -1174,7 +1182,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
e->base.file_priv = file_priv;
e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
- mtx_enter(&dev->event_lock);
+ spin_lock_irqsave(&dev->event_lock, flags);
if (file_priv->event_space < sizeof e->event) {
ret = -EBUSY;
@@ -1209,12 +1217,12 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
vblwait->reply.sequence = vblwait->request.sequence;
}
- mtx_leave(&dev->event_lock);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
return 0;
err_unlock:
- mtx_leave(&dev->event_lock);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(e);
err_put:
drm_vblank_put(dev, pipe);
@@ -1329,12 +1337,13 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
struct drm_pending_event *ev, *tmp;
struct drm_pending_vblank_event *vev;
struct timeval now;
+ unsigned long flags;
unsigned int seq;
list = &dev->vbl_events;
seq = drm_vblank_count_and_time(dev, crtc, &now);
- mtx_enter(&dev->event_lock);
+ spin_lock_irqsave(&dev->event_lock, flags);
for (ev = TAILQ_FIRST(list); ev != NULL; ev = tmp) {
tmp = TAILQ_NEXT(ev, link);
@@ -1354,7 +1363,7 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
send_vblank_event(dev, vev, seq, &now);
}
- mtx_leave(&dev->event_lock);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
// trace_drm_vblank_event(crtc, seq);
}
@@ -1372,6 +1381,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
u32 vblcount;
s64 diff_ns;
struct timeval tvblank;
+ unsigned long irqflags;
if (!dev->num_crtcs)
return false;
@@ -1380,11 +1390,11 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
* vblank enable/disable, as this would cause inconsistent
* or corrupted timestamps and vblank counts.
*/
- mtx_enter(&dev->vblank_time_lock);
+ spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
/* Vblank irq handling disabled. Nothing to do. */
if (!dev->vblank_enabled[crtc]) {
- mtx_leave(&dev->vblank_time_lock);
+ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
return false;
}
@@ -1427,7 +1437,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
wakeup(&dev->vbl_queue[crtc]);
drm_handle_vblank_events(dev, crtc);
- mtx_leave(&dev->vblank_time_lock);
+ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
return true;
}
EXPORT_SYMBOL(drm_handle_vblank);
diff --git a/sys/dev/pci/drm/drm_mm.c b/sys/dev/pci/drm/drm_mm.c
index 190ce15b206..d473fa40d6a 100644
--- a/sys/dev/pci/drm/drm_mm.c
+++ b/sys/dev/pci/drm/drm_mm.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: drm_mm.c,v 1.4 2014/03/09 11:07:18 jsg Exp $ */
+/* $OpenBSD: drm_mm.c,v 1.5 2015/02/10 10:50:49 jsg Exp $ */
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
@@ -57,7 +57,7 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
child = kzalloc(sizeof(*child), GFP_KERNEL);
if (unlikely(child == NULL)) {
- mtx_enter(&mm->unused_lock);
+ spin_lock(&mm->unused_lock);
if (list_empty(&mm->unused_nodes))
child = NULL;
else {
@@ -67,7 +67,7 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
list_del(&child->node_list);
--mm->num_unused;
}
- mtx_leave(&mm->unused_lock);
+ spin_unlock(&mm->unused_lock);
}
return child;
}
@@ -81,21 +81,21 @@ int drm_mm_pre_get(struct drm_mm *mm)
{
struct drm_mm_node *node;
- mtx_enter(&mm->unused_lock);
+ spin_lock(&mm->unused_lock);
while (mm->num_unused < MM_UNUSED_TARGET) {
- mtx_leave(&mm->unused_lock);
+ spin_unlock(&mm->unused_lock);
node = kzalloc(sizeof(*node), GFP_KERNEL);
- mtx_enter(&mm->unused_lock);
+ spin_lock(&mm->unused_lock);
if (unlikely(node == NULL)) {
int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
- mtx_leave(&mm->unused_lock);
+ spin_unlock(&mm->unused_lock);
return ret;
}
++mm->num_unused;
list_add_tail(&node->node_list, &mm->unused_nodes);
}
- mtx_leave(&mm->unused_lock);
+ spin_unlock(&mm->unused_lock);
return 0;
}
EXPORT_SYMBOL(drm_mm_pre_get);
@@ -355,13 +355,13 @@ void drm_mm_put_block(struct drm_mm_node *node)
drm_mm_remove_node(node);
- mtx_enter(&mm->unused_lock);
+ spin_lock(&mm->unused_lock);
if (mm->num_unused < MM_UNUSED_TARGET) {
list_add(&node->node_list, &mm->unused_nodes);
++mm->num_unused;
} else
kfree(node);
- mtx_leave(&mm->unused_lock);
+ spin_unlock(&mm->unused_lock);
}
EXPORT_SYMBOL(drm_mm_put_block);
@@ -672,13 +672,13 @@ void drm_mm_takedown(struct drm_mm * mm)
return;
}
- mtx_enter(&mm->unused_lock);
+ spin_lock(&mm->unused_lock);
list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
list_del(&entry->node_list);
kfree(entry);
--mm->num_unused;
}
- mtx_leave(&mm->unused_lock);
+ spin_unlock(&mm->unused_lock);
BUG_ON(mm->num_unused != 0);
}
diff --git a/sys/dev/pci/drm/i915/i915_drv.c b/sys/dev/pci/drm/i915/i915_drv.c
index ddeba9450d0..61eceeee34a 100644
--- a/sys/dev/pci/drm/i915/i915_drv.c
+++ b/sys/dev/pci/drm/i915/i915_drv.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: i915_drv.c,v 1.72 2015/02/10 01:39:32 jsg Exp $ */
+/* $OpenBSD: i915_drv.c,v 1.73 2015/02/10 10:50:49 jsg Exp $ */
/*
* Copyright (c) 2008-2009 Owain G. Ainsworth <oga@openbsd.org>
*
@@ -1485,12 +1485,13 @@ static int gen6_do_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
+ unsigned long irqflags;
int retries;
/* Hold gt_lock across reset to prevent any register access
* with forcewake not set correctly
*/
- mtx_enter(&dev_priv->gt_lock);
+ spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
/* Reset the chip */
@@ -1520,7 +1521,7 @@ static int gen6_do_reset(struct drm_device *dev)
/* Restore fifo count */
dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
- mtx_leave(&dev_priv->gt_lock);
+ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
return ret;
}
@@ -2180,8 +2181,9 @@ ilk_dummy_write(struct drm_i915_private *dev_priv)
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
struct drm_device *dev = (struct drm_device *)dev_priv->drmdev; \
+ unsigned long irqflags; \
u##x val = 0; \
- mtx_enter(&dev_priv->gt_lock); \
+ spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
if (IS_GEN5(dev)) \
ilk_dummy_write(dev_priv); \
if (NEEDS_FORCE_WAKE((dev), (reg))) { \
@@ -2195,7 +2197,7 @@ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
} else { \
val = read##x(dev_priv, reg); \
} \
- mtx_leave(&dev_priv->gt_lock); \
+ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
trace_i915_reg_rw(false, reg, val, sizeof(val)); \
return val; \
}
@@ -2208,10 +2210,11 @@ __i915_read(64, q)
#define __i915_write(x, y) \
void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
+ unsigned long irqflags; \
u32 __fifo_ret = 0; \
struct drm_device *dev = (struct drm_device *)dev_priv->drmdev; \
trace_i915_reg_rw(true, reg, val, sizeof(val)); \
- mtx_enter(&dev_priv->gt_lock); \
+ spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
if (NEEDS_FORCE_WAKE((dev), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
@@ -2233,7 +2236,7 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
DRM_ERROR("Unclaimed write to %x\n", reg); \
write32(dev_priv, GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
} \
- mtx_leave(&dev_priv->gt_lock); \
+ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
}
__i915_write(8, b)
__i915_write(16, w)
diff --git a/sys/dev/pci/drm/i915/i915_gem.c b/sys/dev/pci/drm/i915/i915_gem.c
index 5b7971b6545..7281b0684ac 100644
--- a/sys/dev/pci/drm/i915/i915_gem.c
+++ b/sys/dev/pci/drm/i915/i915_gem.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: i915_gem.c,v 1.81 2015/02/10 06:19:36 jsg Exp $ */
+/* $OpenBSD: i915_gem.c,v 1.82 2015/02/10 10:50:49 jsg Exp $ */
/*
* Copyright (c) 2008-2009 Owain G. Ainsworth <oga@openbsd.org>
*
@@ -2276,11 +2276,11 @@ i915_add_request(struct intel_ring_buffer *ring,
if (file) {
struct drm_i915_file_private *file_priv = file->driver_priv;
- mtx_enter(&file_priv->mm.lock);
+ spin_lock(&file_priv->mm.lock);
request->file_priv = file_priv;
list_add_tail(&request->client_list,
&file_priv->mm.request_list);
- mtx_leave(&file_priv->mm.lock);
+ spin_unlock(&file_priv->mm.lock);
}
trace_i915_gem_request_add(ring, request->seqno);
@@ -2310,12 +2310,12 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
if (!file_priv)
return;
- mtx_enter(&file_priv->mm.lock);
+ spin_lock(&file_priv->mm.lock);
if (request->file_priv) {
list_del(&request->client_list);
request->file_priv = NULL;
}
- mtx_leave(&file_priv->mm.lock);
+ spin_unlock(&file_priv->mm.lock);
}
static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
@@ -3644,7 +3644,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (atomic_read(&dev_priv->mm.wedged))
return -EIO;
- mtx_enter(&file_priv->mm.lock);
+ spin_lock(&file_priv->mm.lock);
list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
if (time_after_eq(request->emitted_ticks, recent_enough))
break;
@@ -3652,7 +3652,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
ring = request->ring;
seqno = request->seqno;
}
- mtx_leave(&file_priv->mm.lock);
+ spin_unlock(&file_priv->mm.lock);
if (seqno == 0)
return 0;
@@ -4577,7 +4577,7 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
* later retire_requests won't dereference our soon-to-be-gone
* file_priv.
*/
- mtx_enter(&file_priv->mm.lock);
+ spin_lock(&file_priv->mm.lock);
while (!list_empty(&file_priv->mm.request_list)) {
struct drm_i915_gem_request *request;
@@ -4587,7 +4587,7 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
list_del(&request->client_list);
request->file_priv = NULL;
}
- mtx_leave(&file_priv->mm.lock);
+ spin_unlock(&file_priv->mm.lock);
}
#ifdef notyet
diff --git a/sys/dev/pci/drm/i915/i915_irq.c b/sys/dev/pci/drm/i915/i915_irq.c
index d84f5a1344e..14c812d08f8 100644
--- a/sys/dev/pci/drm/i915/i915_irq.c
+++ b/sys/dev/pci/drm/i915/i915_irq.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: i915_irq.c,v 1.17 2015/02/10 06:19:36 jsg Exp $ */
+/* $OpenBSD: i915_irq.c,v 1.18 2015/02/10 10:50:49 jsg Exp $ */
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
*/
/*
@@ -89,12 +89,13 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
void intel_enable_asle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long irqflags;
/* FIXME: opregion/asle for VLV */
if (IS_VALLEYVIEW(dev))
return;
- mtx_enter(&dev_priv->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (HAS_PCH_SPLIT(dev))
ironlake_enable_display_irq(dev_priv, DE_GSE);
@@ -106,7 +107,7 @@ void intel_enable_asle(struct drm_device *dev)
PIPE_LEGACY_BLC_EVENT_ENABLE);
}
- mtx_leave(&dev_priv->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
/**
@@ -306,8 +307,9 @@ static void ironlake_handle_rps_change(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
u32 busy_up, busy_down, max_avg, min_avg;
u8 new_delay;
+ unsigned long flags;
- mtx_enter(&mchdev_lock);
+ spin_lock_irqsave(&mchdev_lock, flags);
I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
@@ -335,7 +337,7 @@ static void ironlake_handle_rps_change(struct drm_device *dev)
if (ironlake_set_drps(dev, new_delay))
dev_priv->ips.cur_delay = new_delay;
- mtx_leave(&mchdev_lock);
+ spin_unlock_irqrestore(&mchdev_lock, flags);
return;
}
@@ -365,12 +367,12 @@ static void gen6_pm_rps_work(void *arg1)
u32 pm_iir, pm_imr;
u8 new_delay;
- mtx_enter(&dev_priv->rps.lock);
+ spin_lock_irq(&dev_priv->rps.lock);
pm_iir = dev_priv->rps.pm_iir;
dev_priv->rps.pm_iir = 0;
pm_imr = I915_READ(GEN6_PMIMR);
I915_WRITE(GEN6_PMIMR, 0);
- mtx_leave(&dev_priv->rps.lock);
+ spin_unlock_irq(&dev_priv->rps.lock);
if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
return;
@@ -409,6 +411,7 @@ static void ivybridge_parity_work(void *arg1)
u32 error_status, row, bank, subbank;
// char *parity_event[5];
uint32_t misccpctl;
+ unsigned long flags;
/* We must turn off DOP level clock gating to access the L3 registers.
* In order to prevent a get/put style interface, acquire struct mutex
@@ -431,10 +434,10 @@ static void ivybridge_parity_work(void *arg1)
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
- mtx_enter(&dev_priv->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
- mtx_leave(&dev_priv->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
mutex_unlock(&dev->struct_mutex);
@@ -464,14 +467,15 @@ static void ivybridge_parity_work(void *arg1)
static void ivybridge_handle_parity_error(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long flags;
if (!HAS_L3_GPU_CACHE(dev))
return;
- mtx_enter(&dev_priv->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
- mtx_leave(&dev_priv->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
task_add(systq, &dev_priv->l3_parity.error_task);
}
@@ -503,6 +507,8 @@ static void snb_gt_irq_handler(struct drm_device *dev,
static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
u32 pm_iir)
{
+ unsigned long flags;
+
/*
* IIR bits should never already be set because IMR should
* prevent an interrupt from being shown in IIR. The warning
@@ -513,11 +519,11 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
* The mask bit in IMR is cleared by dev_priv->rps.work.
*/
- mtx_enter(&dev_priv->rps.lock);
+ spin_lock_irqsave(&dev_priv->rps.lock, flags);
dev_priv->rps.pm_iir |= pm_iir;
I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
POSTING_READ(GEN6_PMIMR);
- mtx_leave(&dev_priv->rps.lock);
+ spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
task_add(systq, &dev_priv->rps.task);
}
@@ -528,6 +534,7 @@ static int valleyview_intr(void *arg)
struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
u32 iir, gt_iir, pm_iir;
int ret = IRQ_NONE;
+ unsigned long irqflags;
int pipe;
u32 pipe_stats[I915_MAX_PIPES];
bool blc_event;
@@ -546,7 +553,7 @@ static int valleyview_intr(void *arg)
snb_gt_irq_handler(dev, dev_priv, gt_iir);
- mtx_enter(&dev_priv->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
for_each_pipe(pipe) {
int reg = PIPESTAT(pipe);
pipe_stats[pipe] = I915_READ(reg);
@@ -561,7 +568,7 @@ static int valleyview_intr(void *arg)
I915_WRITE(reg, pipe_stats[pipe]);
}
}
- mtx_leave(&dev_priv->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
for_each_pipe(pipe) {
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
@@ -1479,20 +1486,21 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_gem_object *obj;
struct intel_unpin_work *work;
+ unsigned long flags;
bool stall_detected;
/* Ignore early vblank irqs */
if (intel_crtc == NULL)
return;
- mtx_enter(&dev->event_lock);
+ spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
if (work == NULL ||
atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
!work->enable_stall_check) {
/* Either the pending flip IRQ arrived, or we're too early. Don't check */
- mtx_leave(&dev->event_lock);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
return;
}
@@ -1509,7 +1517,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
crtc->x * crtc->fb->bits_per_pixel/8);
}
- mtx_leave(&dev->event_lock);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
if (stall_detected) {
DRM_DEBUG_DRIVER("Pageflip stall detected\n");
@@ -1523,11 +1531,12 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
static int i915_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
- mtx_enter(&dev_priv->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (INTEL_INFO(dev)->gen >= 4)
i915_enable_pipestat(dev_priv, pipe,
PIPE_START_VBLANK_INTERRUPT_ENABLE);
@@ -1538,7 +1547,7 @@ static int i915_enable_vblank(struct drm_device *dev, int pipe)
/* maintain vblank delivery even in deep C-states */
if (dev_priv->info->gen == 3)
I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
- mtx_leave(&dev_priv->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
}
@@ -1546,14 +1555,15 @@ static int i915_enable_vblank(struct drm_device *dev, int pipe)
static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
- mtx_enter(&dev_priv->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
- mtx_leave(&dev_priv->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
}
@@ -1561,14 +1571,15 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
- mtx_enter(&dev_priv->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_enable_display_irq(dev_priv,
DE_PIPEA_VBLANK_IVB << (5 * pipe));
- mtx_leave(&dev_priv->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
}
@@ -1576,12 +1587,13 @@ static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
u32 imr;
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
- mtx_enter(&dev_priv->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
imr = I915_READ(VLV_IMR);
if (pipe == 0)
imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
@@ -1590,7 +1602,7 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
I915_WRITE(VLV_IMR, imr);
i915_enable_pipestat(dev_priv, pipe,
PIPE_START_VBLANK_INTERRUPT_ENABLE);
- mtx_leave(&dev_priv->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
}
@@ -1601,43 +1613,47 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
static void i915_disable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
- mtx_enter(&dev_priv->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (dev_priv->info->gen == 3)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
i915_disable_pipestat(dev_priv, pipe,
PIPE_VBLANK_INTERRUPT_ENABLE |
PIPE_START_VBLANK_INTERRUPT_ENABLE);
- mtx_leave(&dev_priv->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
- mtx_enter(&dev_priv->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
- mtx_leave(&dev_priv->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
- mtx_enter(&dev_priv->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_disable_display_irq(dev_priv,
DE_PIPEA_VBLANK_IVB << (pipe * 5));
- mtx_leave(&dev_priv->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
u32 imr;
- mtx_enter(&dev_priv->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_disable_pipestat(dev_priv, pipe,
PIPE_START_VBLANK_INTERRUPT_ENABLE);
imr = I915_READ(VLV_IMR);
@@ -1646,7 +1662,7 @@ static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
else
imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
I915_WRITE(VLV_IMR, imr);
- mtx_leave(&dev_priv->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
static u32
@@ -2145,6 +2161,7 @@ static int i8xx_intr(void *arg)
struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
u16 iir, new_iir;
u32 pipe_stats[2];
+ unsigned long irqflags;
int irq_received;
int pipe;
u16 flip_mask =
@@ -2163,7 +2180,7 @@ static int i8xx_intr(void *arg)
* It doesn't set the bit in iir again, but it still produces
* interrupts (for non-MSI).
*/
- mtx_enter(&dev_priv->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
i915_handle_error(dev, false);
@@ -2182,7 +2199,7 @@ static int i8xx_intr(void *arg)
irq_received = 1;
}
}
- mtx_leave(&dev_priv->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
I915_WRITE16(IIR, iir & ~flip_mask);
new_iir = I915_READ16(IIR); /* Flush posted writes */
@@ -2321,6 +2338,7 @@ static int i915_intr(void *arg)
drm_i915_private_t *dev_priv = arg;
struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
+ unsigned long irqflags;
u32 flip_mask =
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
@@ -2342,7 +2360,7 @@ static int i915_intr(void *arg)
* It doesn't set the bit in iir again, but it still produces
* interrupts (for non-MSI).
*/
- mtx_enter(&dev_priv->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
i915_handle_error(dev, false);
@@ -2359,7 +2377,7 @@ static int i915_intr(void *arg)
irq_received = true;
}
}
- mtx_leave(&dev_priv->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
if (!irq_received)
break;
@@ -2558,6 +2576,7 @@ static int i965_intr(void *arg)
struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
u32 iir, new_iir;
u32 pipe_stats[I915_MAX_PIPES];
+ unsigned long irqflags;
int irq_received;
int ret = IRQ_NONE, pipe;
@@ -2575,7 +2594,7 @@ static int i965_intr(void *arg)
* It doesn't set the bit in iir again, but it still produces
* interrupts (for non-MSI).
*/
- mtx_enter(&dev_priv->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
i915_handle_error(dev, false);
@@ -2594,7 +2613,7 @@ static int i965_intr(void *arg)
irq_received = 1;
}
}
- mtx_leave(&dev_priv->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
if (!irq_received)
break;
diff --git a/sys/dev/pci/drm/i915/intel_display.c b/sys/dev/pci/drm/i915/intel_display.c
index a630d8e5424..1f89958e089 100644
--- a/sys/dev/pci/drm/i915/intel_display.c
+++ b/sys/dev/pci/drm/i915/intel_display.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: intel_display.c,v 1.39 2015/02/10 01:39:32 jsg Exp $ */
+/* $OpenBSD: intel_display.c,v 1.40 2015/02/10 10:50:49 jsg Exp $ */
/*
* Copyright © 2006-2007 Intel Corporation
*
@@ -408,10 +408,11 @@ static const intel_limit_t intel_limits_vlv_dp = {
u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
{
+ unsigned long flags;
u32 val = 0;
int retries;
- mtx_enter(&dev_priv->dpio_lock);
+ spin_lock_irqsave(&dev_priv->dpio_lock, flags);
for (retries = 50; retries > 0; retries--) {
if ((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0)
break;
@@ -437,16 +438,17 @@ u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
val = I915_READ(DPIO_DATA);
out_unlock:
- mtx_leave(&dev_priv->dpio_lock);
+ spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
return val;
}
static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
u32 val)
{
+ unsigned long flags;
int retries;
- mtx_enter(&dev_priv->dpio_lock);
+ spin_lock_irqsave(&dev_priv->dpio_lock, flags);
for (retries = 50; retries > 0; retries--) {
if ((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0)
break;
@@ -470,7 +472,7 @@ static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
DRM_ERROR("DPIO write wait timed out\n");
out_unlock:
- mtx_leave(&dev_priv->dpio_lock);
+ spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
}
static void vlv_init_dpio(struct drm_device *dev)
@@ -1553,10 +1555,11 @@ static void
intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
enum intel_sbi_destination destination)
{
+ unsigned long flags;
u32 tmp;
int retries;
- mtx_enter(&dev_priv->dpio_lock);
+ spin_lock_irqsave(&dev_priv->dpio_lock, flags);
for (retries = 100; retries > 0; retries--) {
if ((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0)
break;
@@ -1587,17 +1590,18 @@ intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
}
out_unlock:
- mtx_leave(&dev_priv->dpio_lock);
+ spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
}
static u32
intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
enum intel_sbi_destination destination)
{
+ unsigned long flags;
u32 value = 0;
int retries;
- mtx_enter(&dev_priv->dpio_lock);
+ spin_lock_irqsave(&dev_priv->dpio_lock, flags);
for (retries = 100; retries > 0; retries--) {
if ((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0)
break;
@@ -1629,7 +1633,7 @@ intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
value = I915_READ(SBI_DATA);
out_unlock:
- mtx_leave(&dev_priv->dpio_lock);
+ spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
return value;
}
@@ -3038,14 +3042,15 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long flags;
bool pending;
if (atomic_read(&dev_priv->mm.wedged))
return false;
- mtx_enter(&dev->event_lock);
+ spin_lock_irqsave(&dev->event_lock, flags);
pending = to_intel_crtc(crtc)->unpin_work != NULL;
- mtx_leave(&dev->event_lock);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
return pending;
}
@@ -7168,11 +7173,12 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct intel_unpin_work *work;
+ unsigned long flags;
- mtx_enter(&dev->event_lock);
+ spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
intel_crtc->unpin_work = NULL;
- mtx_leave(&dev->event_lock);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
if (work) {
task_del(systq, &work->task);
@@ -7210,19 +7216,20 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
struct drm_i915_gem_object *obj;
+ unsigned long flags;
/* Ignore early vblank irqs */
if (intel_crtc == NULL)
return;
- mtx_enter(&dev->event_lock);
+ spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
/* Ensure we don't miss a work->pending update ... */
DRM_READMEMORYBARRIER();
if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
- mtx_leave(&dev->event_lock);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
return;
}
@@ -7236,7 +7243,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
drm_vblank_put(dev, intel_crtc->pipe);
- mtx_leave(&dev->event_lock);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
obj = work->old_fb_obj;
@@ -7269,15 +7276,16 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
+ unsigned long flags;
/* NB: An MMIO update of the plane base pointer will also
* generate a page-flip completion irq, i.e. every modeset
* is also accompanied by a spurious intel_prepare_page_flip().
*/
- mtx_enter(&dev->event_lock);
+ spin_lock_irqsave(&dev->event_lock, flags);
if (intel_crtc->unpin_work)
atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
- mtx_leave(&dev->event_lock);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
}
static inline void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
@@ -7554,6 +7562,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
+ unsigned long flags;
int ret;
/* Can't change pixel format via MI display flips. */
@@ -7583,9 +7592,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
goto free_work;
/* We borrow the event spin lock for protecting unpin_work */
- mtx_enter(&dev->event_lock);
+ spin_lock_irqsave(&dev->event_lock, flags);
if (intel_crtc->unpin_work) {
- mtx_leave(&dev->event_lock);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(work);
drm_vblank_put(dev, intel_crtc->pipe);
@@ -7593,7 +7602,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return -EBUSY;
}
intel_crtc->unpin_work = work;
- mtx_leave(&dev->event_lock);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
#ifdef notyet
if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
@@ -7641,9 +7650,9 @@ cleanup_pending:
mutex_unlock(&dev->struct_mutex);
cleanup:
- mtx_enter(&dev->event_lock);
+ spin_lock_irqsave(&dev->event_lock, flags);
intel_crtc->unpin_work = NULL;
- mtx_leave(&dev->event_lock);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
drm_vblank_put(dev, intel_crtc->pipe);
free_work:
diff --git a/sys/dev/pci/drm/i915/intel_pm.c b/sys/dev/pci/drm/i915/intel_pm.c
index 233a24eca47..b6930fec132 100644
--- a/sys/dev/pci/drm/i915/intel_pm.c
+++ b/sys/dev/pci/drm/i915/intel_pm.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: intel_pm.c,v 1.24 2015/02/10 06:19:36 jsg Exp $ */
+/* $OpenBSD: intel_pm.c,v 1.25 2015/02/10 10:50:49 jsg Exp $ */
/*
* Copyright © 2012 Intel Corporation
*
@@ -2354,7 +2354,7 @@ static void ironlake_enable_drps(struct drm_device *dev)
u8 fmax, fmin, fstart, vstart;
int retries;
- mtx_enter(&mchdev_lock);
+ spin_lock_irq(&mchdev_lock);
/* Enable temp reporting */
I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
@@ -2418,7 +2418,7 @@ static void ironlake_enable_drps(struct drm_device *dev)
dev_priv->ips.last_count2 = I915_READ(0x112f4);
nanouptime(&dev_priv->ips.last_time2);
- mtx_leave(&mchdev_lock);
+ spin_unlock_irq(&mchdev_lock);
}
static void ironlake_disable_drps(struct drm_device *dev)
@@ -2426,7 +2426,7 @@ static void ironlake_disable_drps(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u16 rgvswctl;
- mtx_enter(&mchdev_lock);
+ spin_lock_irq(&mchdev_lock);
rgvswctl = I915_READ16(MEMSWCTL);
@@ -2444,7 +2444,7 @@ static void ironlake_disable_drps(struct drm_device *dev)
I915_WRITE(MEMSWCTL, rgvswctl);
DELAY(1000);
- mtx_leave(&mchdev_lock);
+ spin_unlock_irq(&mchdev_lock);
}
/* There's a funny hw issue where the hw returns all 0 when reading from
@@ -2518,9 +2518,9 @@ static void gen6_disable_rps(struct drm_device *dev)
* register (PMIMR) to mask PM interrupts. The only risk is in leaving
* stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
- mtx_enter(&dev_priv->rps.lock);
+ spin_lock_irq(&dev_priv->rps.lock);
dev_priv->rps.pm_iir = 0;
- mtx_leave(&dev_priv->rps.lock);
+ spin_unlock_irq(&dev_priv->rps.lock);
I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
}
@@ -2671,10 +2671,10 @@ static void gen6_enable_rps(struct drm_device *dev)
/* requires MSI enabled */
I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS);
- mtx_enter(&dev_priv->rps.lock);
+ spin_lock_irq(&dev_priv->rps.lock);
WARN_ON(dev_priv->rps.pm_iir != 0);
I915_WRITE(GEN6_PMIMR, 0);
- mtx_leave(&dev_priv->rps.lock);
+ spin_unlock_irq(&dev_priv->rps.lock);
/* enable all PM interrupts */
I915_WRITE(GEN6_PMINTRMSK, 0);
@@ -2959,11 +2959,11 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
if (dev_priv->info->gen != 5)
return 0;
- mtx_enter(&mchdev_lock);
+ spin_lock_irq(&mchdev_lock);
val = __i915_chipset_val(dev_priv);
- mtx_leave(&mchdev_lock);
+ spin_unlock_irq(&mchdev_lock);
return val;
}
@@ -3164,11 +3164,11 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv)
if (dev_priv->info->gen != 5)
return;
- mtx_enter(&mchdev_lock);
+ spin_lock_irq(&mchdev_lock);
__i915_update_gfx_val(dev_priv);
- mtx_leave(&mchdev_lock);
+ spin_unlock_irq(&mchdev_lock);
}
static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
@@ -3215,11 +3215,11 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
if (dev_priv->info->gen != 5)
return 0;
- mtx_enter(&mchdev_lock);
+ spin_lock_irq(&mchdev_lock);
val = __i915_gfx_val(dev_priv);
- mtx_leave(&mchdev_lock);
+ spin_unlock_irq(&mchdev_lock);
return val;
}
@@ -3235,7 +3235,7 @@ unsigned long i915_read_mch_val(void)
struct drm_i915_private *dev_priv;
unsigned long chipset_val, graphics_val, ret = 0;
- mtx_enter(&mchdev_lock);
+ spin_lock_irq(&mchdev_lock);
if (!i915_mch_dev)
goto out_unlock;
dev_priv = i915_mch_dev;
@@ -3246,7 +3246,7 @@ unsigned long i915_read_mch_val(void)
ret = chipset_val + graphics_val;
out_unlock:
- mtx_leave(&mchdev_lock);
+ spin_unlock_irq(&mchdev_lock);
return ret;
}
@@ -3261,7 +3261,7 @@ bool i915_gpu_raise(void)
struct drm_i915_private *dev_priv;
bool ret = true;
- mtx_enter(&mchdev_lock);
+ spin_lock_irq(&mchdev_lock);
if (!i915_mch_dev) {
ret = false;
goto out_unlock;
@@ -3272,7 +3272,7 @@ bool i915_gpu_raise(void)
dev_priv->ips.max_delay--;
out_unlock:
- mtx_leave(&mchdev_lock);
+ spin_unlock_irq(&mchdev_lock);
return ret;
}
@@ -3288,7 +3288,7 @@ bool i915_gpu_lower(void)
struct drm_i915_private *dev_priv;
bool ret = true;
- mtx_enter(&mchdev_lock);
+ spin_lock_irq(&mchdev_lock);
if (!i915_mch_dev) {
ret = false;
goto out_unlock;
@@ -3299,7 +3299,7 @@ bool i915_gpu_lower(void)
dev_priv->ips.max_delay++;
out_unlock:
- mtx_leave(&mchdev_lock);
+ spin_unlock_irq(&mchdev_lock);
return ret;
}
@@ -3316,7 +3316,7 @@ bool i915_gpu_busy(void)
bool ret = false;
int i;
- mtx_enter(&mchdev_lock);
+ spin_lock_irq(&mchdev_lock);
if (!i915_mch_dev)
goto out_unlock;
dev_priv = i915_mch_dev;
@@ -3325,7 +3325,7 @@ bool i915_gpu_busy(void)
ret |= !list_empty(&ring->request_list);
out_unlock:
- mtx_leave(&mchdev_lock);
+ spin_unlock_irq(&mchdev_lock);
return ret;
}
@@ -3342,7 +3342,7 @@ bool i915_gpu_turbo_disable(void)
struct drm_device *dev;
bool ret = true;
- mtx_enter(&mchdev_lock);
+ spin_lock_irq(&mchdev_lock);
if (!i915_mch_dev) {
ret = false;
goto out_unlock;
@@ -3355,7 +3355,7 @@ bool i915_gpu_turbo_disable(void)
ret = false;
out_unlock:
- mtx_leave(&mchdev_lock);
+ spin_unlock_irq(&mchdev_lock);
return ret;
}
@@ -3386,18 +3386,18 @@ void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
{
/* We only register the i915 ips part with intel-ips once everything is
* set up, to avoid intel-ips sneaking in and reading bogus values. */
- mtx_enter(&mchdev_lock);
+ spin_lock_irq(&mchdev_lock);
i915_mch_dev = dev_priv;
- mtx_leave(&mchdev_lock);
+ spin_unlock_irq(&mchdev_lock);
// ips_ping_for_i915_load();
}
void intel_gpu_ips_teardown(void)
{
- mtx_enter(&mchdev_lock);
+ spin_lock_irq(&mchdev_lock);
i915_mch_dev = NULL;
- mtx_leave(&mchdev_lock);
+ spin_unlock_irq(&mchdev_lock);
}
static void intel_init_emon(struct drm_device *dev)
{
@@ -4358,10 +4358,12 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
*/
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
- mtx_enter(&dev_priv->gt_lock);
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
if (dev_priv->forcewake_count++ == 0)
dev_priv->gt.force_wake_get(dev_priv);
- mtx_leave(&dev_priv->gt_lock);
+ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
}
void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
@@ -4394,10 +4396,12 @@ static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
*/
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
- mtx_enter(&dev_priv->gt_lock);
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
if (--dev_priv->forcewake_count == 0)
dev_priv->gt.force_wake_put(dev_priv);
- mtx_leave(&dev_priv->gt_lock);
+ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
}
int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
diff --git a/sys/dev/pci/drm/i915/intel_ringbuffer.c b/sys/dev/pci/drm/i915/intel_ringbuffer.c
index 471286f20ed..b249a299998 100644
--- a/sys/dev/pci/drm/i915/intel_ringbuffer.c
+++ b/sys/dev/pci/drm/i915/intel_ringbuffer.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: intel_ringbuffer.c,v 1.23 2014/12/17 06:58:10 guenther Exp $ */
+/* $OpenBSD: intel_ringbuffer.c,v 1.24 2015/02/10 10:50:49 jsg Exp $ */
/*
* Copyright © 2008-2010 Intel Corporation
*
@@ -756,17 +756,18 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
if (!dev->irq_enabled)
return false;
- mtx_enter(&dev_priv->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
}
- mtx_leave(&dev_priv->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return true;
}
@@ -776,14 +777,15 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
- mtx_enter(&dev_priv->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
dev_priv->gt_irq_mask |= ring->irq_enable_mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
}
- mtx_leave(&dev_priv->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
static bool
@@ -791,17 +793,18 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
if (!dev->irq_enabled)
return false;
- mtx_enter(&dev_priv->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
dev_priv->irq_mask &= ~ring->irq_enable_mask;
I915_WRITE(IMR, dev_priv->irq_mask);
POSTING_READ(IMR);
}
- mtx_leave(&dev_priv->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return true;
}
@@ -811,14 +814,15 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
- mtx_enter(&dev_priv->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
dev_priv->irq_mask |= ring->irq_enable_mask;
I915_WRITE(IMR, dev_priv->irq_mask);
POSTING_READ(IMR);
}
- mtx_leave(&dev_priv->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
static bool
@@ -826,17 +830,18 @@ i8xx_ring_get_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
if (!dev->irq_enabled)
return false;
- mtx_enter(&dev_priv->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
dev_priv->irq_mask &= ~ring->irq_enable_mask;
I915_WRITE16(IMR, dev_priv->irq_mask);
POSTING_READ16(IMR);
}
- mtx_leave(&dev_priv->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return true;
}
@@ -846,14 +851,15 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
- mtx_enter(&dev_priv->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
dev_priv->irq_mask |= ring->irq_enable_mask;
I915_WRITE16(IMR, dev_priv->irq_mask);
POSTING_READ16(IMR);
}
- mtx_leave(&dev_priv->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
@@ -944,6 +950,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
if (!dev->irq_enabled)
return false;
@@ -953,7 +960,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
* blt/bsd rings on ivb. */
gen6_gt_force_wake_get(dev_priv);
- mtx_enter(&dev_priv->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
I915_WRITE_IMR(ring, ~(ring->irq_enable_mask |
@@ -964,7 +971,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
}
- mtx_leave(&dev_priv->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return true;
}
@@ -974,8 +981,9 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
- mtx_enter(&dev_priv->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
@@ -985,7 +993,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
}
- mtx_leave(&dev_priv->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
gen6_gt_force_wake_put(dev_priv);
}
diff --git a/sys/dev/pci/drm/i915/intel_tv.c b/sys/dev/pci/drm/i915/intel_tv.c
index 8106aacac78..b271dc0cfc8 100644
--- a/sys/dev/pci/drm/i915/intel_tv.c
+++ b/sys/dev/pci/drm/i915/intel_tv.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: intel_tv.c,v 1.7 2014/05/03 05:19:37 jsg Exp $ */
+/* $OpenBSD: intel_tv.c,v 1.8 2015/02/10 10:50:49 jsg Exp $ */
/*
* Copyright © 2006-2008 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
@@ -1172,17 +1172,18 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long irqflags;
u32 tv_ctl, save_tv_ctl;
u32 tv_dac, save_tv_dac;
int type;
/* Disable TV interrupts around load detect or we'll recurse */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
- mtx_enter(&dev_priv->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_disable_pipestat(dev_priv, 0,
PIPE_HOTPLUG_INTERRUPT_ENABLE |
PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
- mtx_leave(&dev_priv->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
save_tv_dac = tv_dac = I915_READ(TV_DAC);
@@ -1255,11 +1256,11 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
/* Restore interrupt config */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
- mtx_enter(&dev_priv->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_enable_pipestat(dev_priv, 0,
PIPE_HOTPLUG_INTERRUPT_ENABLE |
PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
- mtx_leave(&dev_priv->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
return type;
diff --git a/sys/dev/pci/drm/radeon/r100.c b/sys/dev/pci/drm/radeon/r100.c
index f26029c7ed1..38d9dc8e035 100644
--- a/sys/dev/pci/drm/radeon/r100.c
+++ b/sys/dev/pci/drm/radeon/r100.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: r100.c,v 1.8 2014/07/12 18:48:52 tedu Exp $ */
+/* $OpenBSD: r100.c,v 1.9 2015/02/10 10:50:49 jsg Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -4196,14 +4196,15 @@ uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
return bus_space_read_4(rdev->memt, rdev->rmmio, reg);
else {
+ unsigned long flags;
uint32_t ret;
- mtx_enter(&rdev->mmio_idx_lock);
+ spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
bus_space_write_4(rdev->memt, rdev->rmmio,
RADEON_MM_INDEX, reg);
ret = bus_space_read_4(rdev->memt, rdev->rmmio,
RADEON_MM_DATA);
- mtx_leave(&rdev->mmio_idx_lock);
+ spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
return ret;
}
@@ -4215,12 +4216,14 @@ void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
if (reg < rdev->rmmio_size && !always_indirect)
bus_space_write_4(rdev->memt, rdev->rmmio, reg, v);
else {
- mtx_enter(&rdev->mmio_idx_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
bus_space_write_4(rdev->memt, rdev->rmmio,
RADEON_MM_INDEX, reg);
bus_space_write_4(rdev->memt, rdev->rmmio,
RADEON_MM_DATA, v);
- mtx_leave(&rdev->mmio_idx_lock);
+ spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
}
}
diff --git a/sys/dev/pci/drm/radeon/radeon_device.c b/sys/dev/pci/drm/radeon/radeon_device.c
index 5e095dbbeb2..b476e5e4db7 100644
--- a/sys/dev/pci/drm/radeon/radeon_device.c
+++ b/sys/dev/pci/drm/radeon/radeon_device.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: radeon_device.c,v 1.7 2015/02/10 06:19:36 jsg Exp $ */
+/* $OpenBSD: radeon_device.c,v 1.8 2015/02/10 10:50:49 jsg Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -986,9 +986,9 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
bool can_switch;
- mtx_enter(&dev->count_lock);
+ spin_lock(&dev->count_lock);
can_switch = (dev->open_count == 0);
- mtx_leave(&dev->count_lock);
+ spin_unlock(&dev->count_lock);
return can_switch;
}
diff --git a/sys/dev/pci/drm/radeon/radeon_display.c b/sys/dev/pci/drm/radeon/radeon_display.c
index 17b2f9bb367..b634e82630b 100644
--- a/sys/dev/pci/drm/radeon/radeon_display.c
+++ b/sys/dev/pci/drm/radeon/radeon_display.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: radeon_display.c,v 1.8 2015/01/27 03:17:36 dlg Exp $ */
+/* $OpenBSD: radeon_display.c,v 1.9 2015/02/10 10:50:49 jsg Exp $ */
/*
* Copyright 2007-8 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -275,15 +275,16 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
struct radeon_unpin_work *work;
struct drm_pending_vblank_event *e;
struct timeval now;
+ unsigned long flags;
struct drm_file *file_priv;
u32 update_pending;
int vpos, hpos;
- mtx_enter(&rdev->ddev->event_lock);
+ spin_lock_irqsave(&rdev->ddev->event_lock, flags);
work = radeon_crtc->unpin_work;
if (work == NULL ||
(work->fence && !radeon_fence_signaled(work->fence))) {
- mtx_leave(&rdev->ddev->event_lock);
+ spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
return;
}
/* New pageflip, or just completion of a previous one? */
@@ -322,7 +323,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
* next vblank irq.
*/
radeon_crtc->deferred_flip_completion = 1;
- mtx_leave(&rdev->ddev->event_lock);
+ spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
return;
}
@@ -340,7 +341,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
wakeup(&file_priv->evlist);
selwakeup(&file_priv->rsel);
}
- mtx_leave(&rdev->ddev->event_lock);
+ spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
radeon_fence_unref(&work->fence);
@@ -360,6 +361,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
struct drm_gem_object *obj;
struct radeon_bo *rbo;
struct radeon_unpin_work *work;
+ unsigned long flags;
u32 tiling_flags, pitch_pixels;
u64 base;
int r;
@@ -382,15 +384,15 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
obj = new_radeon_fb->obj;
rbo = gem_to_radeon_bo(obj);
- mtx_enter(&rbo->tbo.bdev->fence_lock);
+ spin_lock(&rbo->tbo.bdev->fence_lock);
if (rbo->tbo.sync_obj)
work->fence = radeon_fence_ref(rbo->tbo.sync_obj);
- mtx_leave(&rbo->tbo.bdev->fence_lock);
+ spin_unlock(&rbo->tbo.bdev->fence_lock);
task_set(&work->task, radeon_unpin_work_func, work);
/* We borrow the event spin lock for protecting unpin_work */
- mtx_enter(&dev->event_lock);
+ spin_lock_irqsave(&dev->event_lock, flags);
if (radeon_crtc->unpin_work) {
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
r = -EBUSY;
@@ -398,7 +400,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
}
radeon_crtc->unpin_work = work;
radeon_crtc->deferred_flip_completion = 0;
- mtx_leave(&dev->event_lock);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
/* pin the new buffer */
DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
@@ -457,9 +459,9 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
base &= ~7;
}
- mtx_enter(&dev->event_lock);
+ spin_lock_irqsave(&dev->event_lock, flags);
work->new_crtc_base = base;
- mtx_leave(&dev->event_lock);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
/* update crtc fb */
crtc->fb = fb;
@@ -486,10 +488,10 @@ pflip_cleanup1:
radeon_bo_unreserve(rbo);
pflip_cleanup:
- mtx_enter(&dev->event_lock);
+ spin_lock_irqsave(&dev->event_lock, flags);
radeon_crtc->unpin_work = NULL;
unlock_free:
- mtx_leave(&dev->event_lock);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
radeon_fence_unref(&work->fence);
kfree(work);
diff --git a/sys/dev/pci/drm/radeon/radeon_irq_kms.c b/sys/dev/pci/drm/radeon/radeon_irq_kms.c
index affdcfd95de..31754dd2114 100644
--- a/sys/dev/pci/drm/radeon/radeon_irq_kms.c
+++ b/sys/dev/pci/drm/radeon/radeon_irq_kms.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: radeon_irq_kms.c,v 1.5 2015/01/27 03:17:36 dlg Exp $ */
+/* $OpenBSD: radeon_irq_kms.c,v 1.6 2015/02/10 10:50:49 jsg Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -102,9 +102,10 @@ radeon_hotplug_work_func(void *arg1)
void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
+ unsigned long irqflags;
unsigned i;
- mtx_enter(&rdev->irq.lock);
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
/* Disable *all* interrupts */
for (i = 0; i < RADEON_NUM_RINGS; i++)
atomic_set(&rdev->irq.ring_int[i], 0);
@@ -116,7 +117,7 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
rdev->irq.afmt[i] = false;
}
radeon_irq_set(rdev);
- mtx_leave(&rdev->irq.lock);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
/* Clear bits */
radeon_irq_process(rdev);
}
@@ -145,12 +146,13 @@ int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
+ unsigned long irqflags;
unsigned i;
if (rdev == NULL) {
return;
}
- mtx_enter(&rdev->irq.lock);
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
/* Disable *all* interrupts */
for (i = 0; i < RADEON_NUM_RINGS; i++)
atomic_set(&rdev->irq.ring_int[i], 0);
@@ -162,7 +164,7 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
rdev->irq.afmt[i] = false;
}
radeon_irq_set(rdev);
- mtx_leave(&rdev->irq.lock);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
/**
@@ -315,13 +317,15 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
*/
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
{
+ unsigned long irqflags;
+
if (!rdev->ddev->irq_enabled)
return;
if (atomic_inc_return(&rdev->irq.ring_int[ring]) == 1) {
- mtx_enter(&rdev->irq.lock);
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
radeon_irq_set(rdev);
- mtx_leave(&rdev->irq.lock);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
}
@@ -337,13 +341,15 @@ void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
*/
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
{
+ unsigned long irqflags;
+
if (!rdev->ddev->irq_enabled)
return;
if (atomic_dec_and_test(&rdev->irq.ring_int[ring])) {
- mtx_enter(&rdev->irq.lock);
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
radeon_irq_set(rdev);
- mtx_leave(&rdev->irq.lock);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
}
@@ -358,6 +364,8 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
*/
void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
{
+ unsigned long irqflags;
+
if (crtc < 0 || crtc >= rdev->num_crtc)
return;
@@ -365,9 +373,9 @@ void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
return;
if (atomic_inc_return(&rdev->irq.pflip[crtc]) == 1) {
- mtx_enter(&rdev->irq.lock);
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
radeon_irq_set(rdev);
- mtx_leave(&rdev->irq.lock);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
}
@@ -382,6 +390,8 @@ void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
*/
void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
{
+ unsigned long irqflags;
+
if (crtc < 0 || crtc >= rdev->num_crtc)
return;
@@ -389,9 +399,9 @@ void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
return;
if (atomic_dec_and_test(&rdev->irq.pflip[crtc])) {
- mtx_enter(&rdev->irq.lock);
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
radeon_irq_set(rdev);
- mtx_leave(&rdev->irq.lock);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
}
@@ -405,13 +415,15 @@ void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
*/
void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block)
{
+ unsigned long irqflags;
+
if (!rdev->ddev->irq_enabled)
return;
- mtx_enter(&rdev->irq.lock);
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
rdev->irq.afmt[block] = true;
radeon_irq_set(rdev);
- mtx_leave(&rdev->irq.lock);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
@@ -425,13 +437,15 @@ void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block)
*/
void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block)
{
+ unsigned long irqflags;
+
if (!rdev->ddev->irq_enabled)
return;
- mtx_enter(&rdev->irq.lock);
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
rdev->irq.afmt[block] = false;
radeon_irq_set(rdev);
- mtx_leave(&rdev->irq.lock);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
/**
@@ -444,16 +458,17 @@ void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block)
*/
void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
{
+ unsigned long irqflags;
int i;
if (!rdev->ddev->irq_enabled)
return;
- mtx_enter(&rdev->irq.lock);
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i));
radeon_irq_set(rdev);
- mtx_leave(&rdev->irq.lock);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
/**
@@ -466,15 +481,16 @@ void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
*/
void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
{
+ unsigned long irqflags;
int i;
if (!rdev->ddev->irq_enabled)
return;
- mtx_enter(&rdev->irq.lock);
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
rdev->irq.hpd[i] &= !(hpd_mask & (1 << i));
radeon_irq_set(rdev);
- mtx_leave(&rdev->irq.lock);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
diff --git a/sys/dev/pci/drm/radeon/radeon_kms.c b/sys/dev/pci/drm/radeon/radeon_kms.c
index 700c510e486..05a6a359db8 100644
--- a/sys/dev/pci/drm/radeon/radeon_kms.c
+++ b/sys/dev/pci/drm/radeon/radeon_kms.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: radeon_kms.c,v 1.33 2015/02/10 01:39:32 jsg Exp $ */
+/* $OpenBSD: radeon_kms.c,v 1.34 2015/02/10 10:50:49 jsg Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -1235,6 +1235,7 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
{
struct radeon_device *rdev = dev->dev_private;
+ unsigned long irqflags;
int r;
if (crtc < 0 || crtc >= rdev->num_crtc) {
@@ -1242,10 +1243,10 @@ int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
return -EINVAL;
}
- mtx_enter(&rdev->irq.lock);
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
rdev->irq.crtc_vblank_int[crtc] = true;
r = radeon_irq_set(rdev);
- mtx_leave(&rdev->irq.lock);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
return r;
}
@@ -1260,16 +1261,17 @@ int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
{
struct radeon_device *rdev = dev->dev_private;
+ unsigned long irqflags;
if (crtc < 0 || crtc >= rdev->num_crtc) {
DRM_ERROR("Invalid crtc %d\n", crtc);
return;
}
- mtx_enter(&rdev->irq.lock);
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
rdev->irq.crtc_vblank_int[crtc] = false;
radeon_irq_set(rdev);
- mtx_leave(&rdev->irq.lock);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
/**
diff --git a/sys/dev/pci/drm/radeon/radeon_object.c b/sys/dev/pci/drm/radeon/radeon_object.c
index deb435fd547..e34be49f99f 100644
--- a/sys/dev/pci/drm/radeon/radeon_object.c
+++ b/sys/dev/pci/drm/radeon/radeon_object.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: radeon_object.c,v 1.6 2015/02/10 06:19:36 jsg Exp $ */
+/* $OpenBSD: radeon_object.c,v 1.7 2015/02/10 10:50:49 jsg Exp $ */
/*
* Copyright 2009 Jerome Glisse.
* All Rights Reserved.
@@ -632,12 +632,12 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
if (unlikely(r != 0))
return r;
- mtx_enter(&bo->tbo.bdev->fence_lock);
+ spin_lock(&bo->tbo.bdev->fence_lock);
if (mem_type)
*mem_type = bo->tbo.mem.mem_type;
if (bo->tbo.sync_obj)
r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
- mtx_leave(&bo->tbo.bdev->fence_lock);
+ spin_unlock(&bo->tbo.bdev->fence_lock);
ttm_bo_unreserve(&bo->tbo);
return r;
}
diff --git a/sys/dev/pci/drm/radeon/radeon_sa.c b/sys/dev/pci/drm/radeon/radeon_sa.c
index 55682dfd9ce..e43b2608ddb 100644
--- a/sys/dev/pci/drm/radeon/radeon_sa.c
+++ b/sys/dev/pci/drm/radeon/radeon_sa.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: radeon_sa.c,v 1.4 2014/02/09 23:57:04 jsg Exp $ */
+/* $OpenBSD: radeon_sa.c,v 1.5 2015/02/10 10:50:49 jsg Exp $ */
/*
* Copyright 2011 Red Hat Inc.
* All Rights Reserved.
@@ -342,7 +342,7 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
INIT_LIST_HEAD(&(*sa_bo)->olist);
INIT_LIST_HEAD(&(*sa_bo)->flist);
- mtx_enter(&sa_manager->wq_lock);
+ spin_lock(&sa_manager->wq_lock);
do {
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
fences[i] = NULL;
@@ -354,16 +354,16 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo,
size, align)) {
- mtx_leave(&sa_manager->wq_lock);
+ spin_unlock(&sa_manager->wq_lock);
return 0;
}
/* see if we can skip over some allocations */
} while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
- mtx_leave(&sa_manager->wq_lock);
+ spin_unlock(&sa_manager->wq_lock);
r = radeon_fence_wait_any(rdev, fences, false);
- mtx_enter(&sa_manager->wq_lock);
+ spin_lock(&sa_manager->wq_lock);
/* if we have nothing to wait for block */
if (r == -ENOENT && block) {
r = 0;
@@ -383,7 +383,7 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
} while (!r);
- mtx_leave(&sa_manager->wq_lock);
+ spin_unlock(&sa_manager->wq_lock);
kfree(*sa_bo);
*sa_bo = NULL;
return r;
@@ -399,7 +399,7 @@ void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
}
sa_manager = (*sa_bo)->manager;
- mtx_enter(&sa_manager->wq_lock);
+ spin_lock(&sa_manager->wq_lock);
if (fence && !radeon_fence_signaled(fence)) {
(*sa_bo)->fence = radeon_fence_ref(fence);
list_add_tail(&(*sa_bo)->flist,
@@ -408,7 +408,7 @@ void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
radeon_sa_bo_remove_locked(*sa_bo);
}
wakeup(&sa_manager->wq);
- mtx_leave(&sa_manager->wq_lock);
+ spin_unlock(&sa_manager->wq_lock);
*sa_bo = NULL;
}
@@ -418,7 +418,7 @@ void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
{
struct radeon_sa_bo *i;
- mtx_enter(&sa_manager->wq.lock);
+ spin_lock(&sa_manager->wq_lock);
list_for_each_entry(i, &sa_manager->olist, olist) {
if (&i->olist == sa_manager->hole) {
seq_printf(m, ">");
@@ -433,6 +433,6 @@ void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
}
seq_printf(m, "\n");
}
- mtx_leave(&sa_manager->wq.lock);
+ spin_unlock(&sa_manager->wq_lock);
}
#endif
diff --git a/sys/dev/pci/drm/radeon/radeon_ttm.c b/sys/dev/pci/drm/radeon/radeon_ttm.c
index ba223e7db5d..925bf63f376 100644
--- a/sys/dev/pci/drm/radeon/radeon_ttm.c
+++ b/sys/dev/pci/drm/radeon/radeon_ttm.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: radeon_ttm.c,v 1.8 2015/02/10 06:19:36 jsg Exp $ */
+/* $OpenBSD: radeon_ttm.c,v 1.9 2015/02/10 10:50:49 jsg Exp $ */
/*
* Copyright 2009 Jerome Glisse.
* All Rights Reserved.
@@ -944,9 +944,9 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
int ret;
struct ttm_bo_global *glob = rdev->mman.bdev.glob;
- mtx_enter(&glob->lru_lock);
+ spin_lock(&glob->lru_lock);
ret = drm_mm_dump_table(m, mm);
- mtx_leave(&glob->lru_lock);
+ spin_unlock(&glob->lru_lock);
return ret;
}
#endif
diff --git a/sys/dev/pci/drm/ttm/ttm_bo.c b/sys/dev/pci/drm/ttm/ttm_bo.c
index 7524245995b..e124efb40ad 100644
--- a/sys/dev/pci/drm/ttm/ttm_bo.c
+++ b/sys/dev/pci/drm/ttm/ttm_bo.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ttm_bo.c,v 1.11 2015/02/10 06:19:36 jsg Exp $ */
+/* $OpenBSD: ttm_bo.c,v 1.12 2015/02/10 10:50:49 jsg Exp $ */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
@@ -246,9 +246,9 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
if (no_wait)
return -EBUSY;
- mtx_leave(&glob->lru_lock);
+ spin_unlock(&glob->lru_lock);
ret = ttm_bo_wait_unreserved(bo, interruptible);
- mtx_enter(&glob->lru_lock);
+ spin_lock(&glob->lru_lock);
if (unlikely(ret))
return ret;
@@ -295,12 +295,12 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
int put_count = 0;
int ret;
- mtx_enter(&glob->lru_lock);
+ spin_lock(&glob->lru_lock);
ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
sequence);
if (likely(ret == 0))
put_count = ttm_bo_del_from_lru(bo);
- mtx_leave(&glob->lru_lock);
+ spin_unlock(&glob->lru_lock);
ttm_bo_list_ref_sub(bo, put_count, true);
@@ -318,9 +318,9 @@ void ttm_bo_unreserve(struct ttm_buffer_object *bo)
{
struct ttm_bo_global *glob = bo->glob;
- mtx_enter(&glob->lru_lock);
+ spin_lock(&glob->lru_lock);
ttm_bo_unreserve_locked(bo);
- mtx_leave(&glob->lru_lock);
+ spin_unlock(&glob->lru_lock);
}
EXPORT_SYMBOL(ttm_bo_unreserve);
@@ -520,16 +520,16 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
int put_count;
int ret;
- mtx_enter(&glob->lru_lock);
+ spin_lock(&glob->lru_lock);
ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
- mtx_enter(&bdev->fence_lock);
+ spin_lock(&bdev->fence_lock);
(void) ttm_bo_wait(bo, false, false, true);
if (!ret && !bo->sync_obj) {
- mtx_leave(&bdev->fence_lock);
+ spin_unlock(&bdev->fence_lock);
put_count = ttm_bo_del_from_lru(bo);
- mtx_leave(&glob->lru_lock);
+ spin_unlock(&glob->lru_lock);
ttm_bo_cleanup_memtype_use(bo);
ttm_bo_list_ref_sub(bo, put_count, true);
@@ -538,7 +538,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
}
if (bo->sync_obj)
sync_obj = driver->sync_obj_ref(bo->sync_obj);
- mtx_leave(&bdev->fence_lock);
+ spin_unlock(&bdev->fence_lock);
if (!ret) {
atomic_set(&bo->reserved, 0);
@@ -547,7 +547,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
refcount_acquire(&bo->list_kref);
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
- mtx_leave(&glob->lru_lock);
+ spin_unlock(&glob->lru_lock);
if (sync_obj) {
driver->sync_obj_flush(sync_obj);
@@ -579,7 +579,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
int put_count;
int ret;
- mtx_enter(&bdev->fence_lock);
+ spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, false, true);
if (ret && !no_wait_gpu) {
@@ -591,11 +591,11 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
* no new sync objects can be attached.
*/
sync_obj = driver->sync_obj_ref(bo->sync_obj);
- mtx_leave(&bdev->fence_lock);
+ spin_unlock(&bdev->fence_lock);
atomic_set(&bo->reserved, 0);
wakeup(&bo->event_queue);
- mtx_leave(&glob->lru_lock);
+ spin_unlock(&glob->lru_lock);
ret = driver->sync_obj_wait(sync_obj, false, interruptible);
driver->sync_obj_unref(&sync_obj);
@@ -606,14 +606,14 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
* remove sync_obj with ttm_bo_wait, the wait should be
* finished, and no new wait object should have been added.
*/
- mtx_enter(&bdev->fence_lock);
+ spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, false, true);
WARN_ON(ret);
- mtx_leave(&bdev->fence_lock);
+ spin_unlock(&bdev->fence_lock);
if (ret)
return ret;
- mtx_enter(&glob->lru_lock);
+ spin_lock(&glob->lru_lock);
ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
/*
@@ -625,16 +625,16 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
* here.
*/
if (ret) {
- mtx_leave(&glob->lru_lock);
+ spin_unlock(&glob->lru_lock);
return 0;
}
} else
- mtx_leave(&bdev->fence_lock);
+ spin_unlock(&bdev->fence_lock);
if (ret || unlikely(list_empty(&bo->ddestroy))) {
atomic_set(&bo->reserved, 0);
wakeup(&bo->event_queue);
- mtx_leave(&glob->lru_lock);
+ spin_unlock(&glob->lru_lock);
return ret;
}
@@ -642,7 +642,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
list_del_init(&bo->ddestroy);
++put_count;
- mtx_leave(&glob->lru_lock);
+ spin_unlock(&glob->lru_lock);
ttm_bo_cleanup_memtype_use(bo);
ttm_bo_list_ref_sub(bo, put_count, true);
@@ -661,7 +661,7 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
struct ttm_buffer_object *entry = NULL;
int ret = 0;
- mtx_enter(&glob->lru_lock);
+ spin_lock(&glob->lru_lock);
if (list_empty(&bdev->ddestroy))
goto out_unlock;
@@ -683,7 +683,7 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
!remove_all);
else
- mtx_leave(&glob->lru_lock);
+ spin_unlock(&glob->lru_lock);
if (refcount_release(&entry->list_kref))
ttm_bo_release_list(entry);
@@ -692,13 +692,13 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
if (ret || !entry)
goto out;
- mtx_enter(&glob->lru_lock);
+ spin_lock(&glob->lru_lock);
if (list_empty(&entry->ddestroy))
break;
}
out_unlock:
- mtx_leave(&glob->lru_lock);
+ spin_unlock(&glob->lru_lock);
out:
if (entry && refcount_release(&entry->list_kref))
ttm_bo_release_list(entry);
@@ -778,9 +778,9 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
struct ttm_placement placement;
int ret = 0;
- mtx_enter(&bdev->fence_lock);
+ spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
- mtx_leave(&bdev->fence_lock);
+ spin_unlock(&bdev->fence_lock);
if (unlikely(ret != 0)) {
if (ret != -ERESTART) {
@@ -835,7 +835,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
struct ttm_buffer_object *bo;
int ret = -EBUSY, put_count;
- mtx_enter(&glob->lru_lock);
+ spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &man->lru, lru) {
ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
if (!ret)
@@ -843,7 +843,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
}
if (ret) {
- mtx_leave(&glob->lru_lock);
+ spin_unlock(&glob->lru_lock);
return ret;
}
@@ -858,7 +858,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
}
put_count = ttm_bo_del_from_lru(bo);
- mtx_leave(&glob->lru_lock);
+ spin_unlock(&glob->lru_lock);
BUG_ON(ret != 0);
@@ -1088,9 +1088,9 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
* Have the driver move function wait for idle when necessary,
* instead of doing it here.
*/
- mtx_enter(&bdev->fence_lock);
+ spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
- mtx_leave(&bdev->fence_lock);
+ spin_unlock(&bdev->fence_lock);
if (ret)
return ret;
mem.num_pages = bo->num_pages;
@@ -1359,9 +1359,9 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
* Can't use standard list traversal since we're unlocking.
*/
- mtx_enter(&glob->lru_lock);
+ spin_lock(&glob->lru_lock);
while (!list_empty(&man->lru)) {
- mtx_leave(&glob->lru_lock);
+ spin_unlock(&glob->lru_lock);
ret = ttm_mem_evict_first(bdev, mem_type, false, false);
if (ret) {
if (allow_errors) {
@@ -1370,9 +1370,9 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
printf("Cleanup eviction failed\n");
}
}
- mtx_enter(&glob->lru_lock);
+ spin_lock(&glob->lru_lock);
}
- mtx_leave(&glob->lru_lock);
+ spin_unlock(&glob->lru_lock);
return 0;
}
@@ -1549,13 +1549,13 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
while (ttm_bo_delayed_delete(bdev, true))
;
- mtx_enter(&glob->lru_lock);
+ spin_lock(&glob->lru_lock);
if (list_empty(&bdev->ddestroy))
TTM_DEBUG("Delayed destroy list was clean\n");
if (list_empty(&bdev->man[0].lru))
TTM_DEBUG("Swap list was clean\n");
- mtx_leave(&glob->lru_lock);
+ spin_unlock(&glob->lru_lock);
BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
write_lock(&bdev->vm_lock);
@@ -1748,9 +1748,9 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
void *tmp_obj = bo->sync_obj;
bo->sync_obj = NULL;
clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
- mtx_leave(&bdev->fence_lock);
+ spin_unlock(&bdev->fence_lock);
driver->sync_obj_unref(&tmp_obj);
- mtx_enter(&bdev->fence_lock);
+ spin_lock(&bdev->fence_lock);
continue;
}
@@ -1758,28 +1758,28 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
return -EBUSY;
sync_obj = driver->sync_obj_ref(bo->sync_obj);
- mtx_leave(&bdev->fence_lock);
+ spin_unlock(&bdev->fence_lock);
ret = driver->sync_obj_wait(sync_obj,
lazy, interruptible);
if (unlikely(ret != 0)) {
driver->sync_obj_unref(&sync_obj);
- mtx_enter(&bdev->fence_lock);
+ spin_lock(&bdev->fence_lock);
return ret;
}
- mtx_enter(&bdev->fence_lock);
+ spin_lock(&bdev->fence_lock);
if (likely(bo->sync_obj == sync_obj)) {
void *tmp_obj = bo->sync_obj;
bo->sync_obj = NULL;
clear_bit(TTM_BO_PRIV_FLAG_MOVING,
&bo->priv_flags);
- mtx_leave(&bdev->fence_lock);
+ spin_unlock(&bdev->fence_lock);
driver->sync_obj_unref(&sync_obj);
driver->sync_obj_unref(&tmp_obj);
- mtx_enter(&bdev->fence_lock);
+ spin_lock(&bdev->fence_lock);
} else {
- mtx_leave(&bdev->fence_lock);
+ spin_unlock(&bdev->fence_lock);
driver->sync_obj_unref(&sync_obj);
- mtx_enter(&bdev->fence_lock);
+ spin_lock(&bdev->fence_lock);
}
}
return 0;
@@ -1798,9 +1798,9 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
if (unlikely(ret != 0))
return ret;
- mtx_enter(&bdev->fence_lock);
+ spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, true, no_wait);
- mtx_leave(&bdev->fence_lock);
+ spin_unlock(&bdev->fence_lock);
if (likely(ret == 0))
atomic_inc(&bo->cpu_writers);
ttm_bo_unreserve(bo);
@@ -1828,7 +1828,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
int put_count;
uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
- mtx_enter(&glob->lru_lock);
+ spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &glob->swap_lru, swap) {
ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
if (!ret)
@@ -1836,7 +1836,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
}
if (ret) {
- mtx_leave(&glob->lru_lock);
+ spin_unlock(&glob->lru_lock);
return ret;
}
@@ -1850,7 +1850,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
}
put_count = ttm_bo_del_from_lru(bo);
- mtx_leave(&glob->lru_lock);
+ spin_unlock(&glob->lru_lock);
ttm_bo_list_ref_sub(bo, put_count, true);
@@ -1858,9 +1858,9 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
* Wait for GPU, then move to system cached.
*/
- mtx_enter(&bo->bdev->fence_lock);
+ spin_lock(&bo->bdev->fence_lock);
ret = ttm_bo_wait(bo, false, false, false);
- mtx_leave(&bo->bdev->fence_lock);
+ spin_unlock(&bo->bdev->fence_lock);
if (unlikely(ret != 0))
goto out;
diff --git a/sys/dev/pci/drm/ttm/ttm_bo_manager.c b/sys/dev/pci/drm/ttm/ttm_bo_manager.c
index 441feb32ec0..2a8111844ca 100644
--- a/sys/dev/pci/drm/ttm/ttm_bo_manager.c
+++ b/sys/dev/pci/drm/ttm/ttm_bo_manager.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ttm_bo_manager.c,v 1.2 2014/02/09 10:57:26 jsg Exp $ */
+/* $OpenBSD: ttm_bo_manager.c,v 1.3 2015/02/10 10:50:49 jsg Exp $ */
/**************************************************************************
*
* Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
@@ -64,19 +64,19 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
if (unlikely(ret))
return ret;
- mtx_enter(&rman->lock);
+ spin_lock(&rman->lock);
node = drm_mm_search_free_in_range(mm,
mem->num_pages, mem->page_alignment,
placement->fpfn, lpfn, 1);
if (unlikely(node == NULL)) {
- mtx_leave(&rman->lock);
+ spin_unlock(&rman->lock);
return 0;
}
node = drm_mm_get_block_atomic_range(node, mem->num_pages,
mem->page_alignment,
placement->fpfn,
lpfn);
- mtx_leave(&rman->lock);
+ spin_unlock(&rman->lock);
} while (node == NULL);
mem->mm_node = node;
@@ -90,9 +90,9 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
if (mem->mm_node) {
- mtx_enter(&rman->lock);
+ spin_lock(&rman->lock);
drm_mm_put_block(mem->mm_node);
- mtx_leave(&rman->lock);
+ spin_unlock(&rman->lock);
mem->mm_node = NULL;
}
}
@@ -123,15 +123,15 @@ static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
struct drm_mm *mm = &rman->mm;
- mtx_enter(&rman->lock);
+ spin_lock(&rman->lock);
if (drm_mm_clean(mm)) {
drm_mm_takedown(mm);
- mtx_leave(&rman->lock);
+ spin_unlock(&rman->lock);
kfree(rman);
man->priv = NULL;
return 0;
}
- mtx_leave(&rman->lock);
+ spin_unlock(&rman->lock);
return -EBUSY;
}
@@ -142,9 +142,9 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
#ifdef notyet
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
- mtx_enter(&rman->lock);
+ spin_lock(&rman->lock);
drm_mm_debug_table(&rman->mm, prefix);
- mtx_leave(&rman->lock);
+ spin_unlock(&rman->lock);
#endif
}
diff --git a/sys/dev/pci/drm/ttm/ttm_bo_util.c b/sys/dev/pci/drm/ttm/ttm_bo_util.c
index 5c9e9d38ea9..7a4e6268a90 100644
--- a/sys/dev/pci/drm/ttm/ttm_bo_util.c
+++ b/sys/dev/pci/drm/ttm/ttm_bo_util.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ttm_bo_util.c,v 1.7 2015/02/10 06:19:36 jsg Exp $ */
+/* $OpenBSD: ttm_bo_util.c,v 1.8 2015/02/10 10:50:49 jsg Exp $ */
/**************************************************************************
*
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
@@ -463,12 +463,12 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
fbo->vm_node = NULL;
atomic_set(&fbo->cpu_writers, 0);
- mtx_enter(&bdev->fence_lock);
+ spin_lock(&bdev->fence_lock);
if (bo->sync_obj)
fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
else
fbo->sync_obj = NULL;
- mtx_leave(&bdev->fence_lock);
+ spin_unlock(&bdev->fence_lock);
refcount_init(&fbo->list_kref, 1);
refcount_init(&fbo->kref, 1);
fbo->destroy = &ttm_transfered_destroy;
@@ -705,7 +705,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct ttm_buffer_object *ghost_obj;
void *tmp_obj = NULL;
- mtx_enter(&bdev->fence_lock);
+ spin_lock(&bdev->fence_lock);
if (bo->sync_obj) {
tmp_obj = bo->sync_obj;
bo->sync_obj = NULL;
@@ -713,7 +713,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
bo->sync_obj = driver->sync_obj_ref(sync_obj);
if (evict) {
ret = ttm_bo_wait(bo, false, false, false);
- mtx_leave(&bdev->fence_lock);
+ spin_unlock(&bdev->fence_lock);
if (tmp_obj)
driver->sync_obj_unref(&tmp_obj);
if (ret)
@@ -736,7 +736,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
*/
set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
- mtx_leave(&bdev->fence_lock);
+ spin_unlock(&bdev->fence_lock);
if (tmp_obj)
driver->sync_obj_unref(&tmp_obj);
diff --git a/sys/dev/pci/drm/ttm/ttm_bo_vm.c b/sys/dev/pci/drm/ttm/ttm_bo_vm.c
index 9e8aa1316c0..f01f3554957 100644
--- a/sys/dev/pci/drm/ttm/ttm_bo_vm.c
+++ b/sys/dev/pci/drm/ttm/ttm_bo_vm.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ttm_bo_vm.c,v 1.3 2015/02/10 06:19:36 jsg Exp $ */
+/* $OpenBSD: ttm_bo_vm.c,v 1.4 2015/02/10 10:50:49 jsg Exp $ */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
@@ -160,17 +160,17 @@ ttm_bo_vm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
* move.
*/
- mtx_enter(&bdev->fence_lock);
+ spin_lock(&bdev->fence_lock);
if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
ret = ttm_bo_wait(bo, false, true, false);
- mtx_leave(&bdev->fence_lock);
+ spin_unlock(&bdev->fence_lock);
if (unlikely(ret != 0)) {
retval = (ret != -ERESTARTSYS) ?
VM_PAGER_ERROR : VM_PAGER_REFAULT;
goto out_unlock;
}
} else
- mtx_leave(&bdev->fence_lock);
+ spin_unlock(&bdev->fence_lock);
ret = ttm_mem_io_lock(man, true);
if (unlikely(ret != 0)) {
diff --git a/sys/dev/pci/drm/ttm/ttm_execbuf_util.c b/sys/dev/pci/drm/ttm/ttm_execbuf_util.c
index 274c472ba52..51c11c0ba8c 100644
--- a/sys/dev/pci/drm/ttm/ttm_execbuf_util.c
+++ b/sys/dev/pci/drm/ttm/ttm_execbuf_util.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ttm_execbuf_util.c,v 1.1 2013/08/12 04:11:53 jsg Exp $ */
+/* $OpenBSD: ttm_execbuf_util.c,v 1.2 2015/02/10 10:50:49 jsg Exp $ */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
@@ -87,9 +87,9 @@ static int ttm_eu_wait_unreserved_locked(struct list_head *list,
int ret;
ttm_eu_del_from_lru_locked(list);
- mtx_leave(&glob->lru_lock);
+ spin_unlock(&glob->lru_lock);
ret = ttm_bo_wait_unreserved(bo, true);
- mtx_enter(&glob->lru_lock);
+ spin_lock(&glob->lru_lock);
if (unlikely(ret != 0))
ttm_eu_backoff_reservation_locked(list);
return ret;
@@ -106,9 +106,9 @@ void ttm_eu_backoff_reservation(struct list_head *list)
entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob;
- mtx_enter(&glob->lru_lock);
+ spin_lock(&glob->lru_lock);
ttm_eu_backoff_reservation_locked(list);
- mtx_leave(&glob->lru_lock);
+ spin_unlock(&glob->lru_lock);
}
EXPORT_SYMBOL(ttm_eu_backoff_reservation);
@@ -144,7 +144,7 @@ int ttm_eu_reserve_buffers(struct list_head *list)
glob = entry->bo->glob;
retry:
- mtx_enter(&glob->lru_lock);
+ spin_lock(&glob->lru_lock);
val_seq = entry->bo->bdev->val_seq++;
list_for_each_entry(entry, list, head) {
@@ -158,14 +158,14 @@ retry_this_bo:
case -EBUSY:
ret = ttm_eu_wait_unreserved_locked(list, bo);
if (unlikely(ret != 0)) {
- mtx_leave(&glob->lru_lock);
+ spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
return ret;
}
goto retry_this_bo;
case -EAGAIN:
ttm_eu_backoff_reservation_locked(list);
- mtx_leave(&glob->lru_lock);
+ spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
ret = ttm_bo_wait_unreserved(bo, true);
if (unlikely(ret != 0))
@@ -173,7 +173,7 @@ retry_this_bo:
goto retry;
default:
ttm_eu_backoff_reservation_locked(list);
- mtx_leave(&glob->lru_lock);
+ spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
return ret;
}
@@ -181,14 +181,14 @@ retry_this_bo:
entry->reserved = true;
if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
ttm_eu_backoff_reservation_locked(list);
- mtx_leave(&glob->lru_lock);
+ spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
return -EBUSY;
}
}
ttm_eu_del_from_lru_locked(list);
- mtx_leave(&glob->lru_lock);
+ spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
return 0;
@@ -211,8 +211,8 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
driver = bdev->driver;
glob = bo->glob;
- mtx_enter(&glob->lru_lock);
- mtx_enter(&bdev->fence_lock);
+ spin_lock(&glob->lru_lock);
+ spin_lock(&bdev->fence_lock);
list_for_each_entry(entry, list, head) {
bo = entry->bo;
@@ -221,8 +221,8 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
ttm_bo_unreserve_locked(bo);
entry->reserved = false;
}
- mtx_leave(&bdev->fence_lock);
- mtx_leave(&glob->lru_lock);
+ spin_unlock(&bdev->fence_lock);
+ spin_unlock(&glob->lru_lock);
list_for_each_entry(entry, list, head) {
if (entry->old_sync_obj)
diff --git a/sys/dev/pci/drm/ttm/ttm_lock.c b/sys/dev/pci/drm/ttm/ttm_lock.c
index c2f78a8780b..418396b4fe9 100644
--- a/sys/dev/pci/drm/ttm/ttm_lock.c
+++ b/sys/dev/pci/drm/ttm/ttm_lock.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ttm_lock.c,v 1.2 2013/12/08 07:54:06 jsg Exp $ */
+/* $OpenBSD: ttm_lock.c,v 1.3 2015/02/10 10:50:49 jsg Exp $ */
/**************************************************************************
*
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
@@ -56,10 +56,10 @@ EXPORT_SYMBOL(ttm_lock_init);
void ttm_read_unlock(struct ttm_lock *lock)
{
- mtx_enter(&lock->lock);
+ spin_lock(&lock->lock);
if (--lock->rw == 0)
wakeup(&lock->queue);
- mtx_leave(&lock->lock);
+ spin_unlock(&lock->lock);
}
EXPORT_SYMBOL(ttm_read_unlock);
@@ -68,17 +68,17 @@ static bool __ttm_read_lock(struct ttm_lock *lock)
{
bool locked = false;
- mtx_enter(&lock->lock);
+ spin_lock(&lock->lock);
if (unlikely(lock->kill_takers)) {
send_sig(lock->signal, current, 0);
- mtx_leave(&lock->lock);
+ spin_unlock(&lock->lock);
return false;
}
if (lock->rw >= 0 && lock->flags == 0) {
++lock->rw;
locked = true;
}
- mtx_leave(&lock->lock);
+ spin_unlock(&lock->lock);
return locked;
}
#endif
@@ -107,10 +107,10 @@ static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
*locked = false;
- mtx_enter(&lock->lock);
+ spin_lock(&lock->lock);
if (unlikely(lock->kill_takers)) {
send_sig(lock->signal, current, 0);
- mtx_leave(&lock->lock);
+ spin_unlock(&lock->lock);
return false;
}
if (lock->rw >= 0 && lock->flags == 0) {
@@ -120,7 +120,7 @@ static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
} else if (lock->flags == 0) {
block = false;
}
- mtx_leave(&lock->lock);
+ spin_unlock(&lock->lock);
return !block;
}
@@ -151,10 +151,10 @@ int ttm_read_trylock(struct ttm_lock *lock, bool interruptible)
void ttm_write_unlock(struct ttm_lock *lock)
{
- mtx_enter(&lock->lock);
+ spin_lock(&lock->lock);
lock->rw = 0;
wakeup(&lock->queue);
- mtx_leave(&lock->lock);
+ spin_unlock(&lock->lock);
}
EXPORT_SYMBOL(ttm_write_unlock);
@@ -163,10 +163,10 @@ static bool __ttm_write_lock(struct ttm_lock *lock)
{
bool locked = false;
- mtx_enter(&lock->lock);
+ spin_lock(&lock->lock);
if (unlikely(lock->kill_takers)) {
send_sig(lock->signal, current, 0);
- mtx_leave(&lock->lock);
+ spin_unlock(&lock->lock);
return false;
}
if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
@@ -176,7 +176,7 @@ static bool __ttm_write_lock(struct ttm_lock *lock)
} else {
lock->flags |= TTM_WRITE_LOCK_PENDING;
}
- mtx_leave(&lock->lock);
+ spin_unlock(&lock->lock);
return locked;
}
#endif
@@ -192,10 +192,10 @@ int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
ret = wait_event_interruptible(lock->queue,
__ttm_write_lock(lock));
if (unlikely(ret != 0)) {
- mtx_enter(&lock->lock);
+ spin_lock(&lock->lock);
lock->flags &= ~TTM_WRITE_LOCK_PENDING;
wake_up_all(&lock->queue);
- mtx_leave(&lock->lock);
+ spin_unlock(&lock->lock);
}
} else
wait_event(lock->queue, __ttm_read_lock(lock));
@@ -207,10 +207,10 @@ EXPORT_SYMBOL(ttm_write_lock);
void ttm_write_lock_downgrade(struct ttm_lock *lock)
{
- mtx_enter(&lock->lock);
+ spin_lock(&lock->lock);
lock->rw = 1;
wakeup(&lock->queue);
- mtx_leave(&lock->lock);
+ spin_unlock(&lock->lock);
}
#ifdef notyet
@@ -218,12 +218,12 @@ static int __ttm_vt_unlock(struct ttm_lock *lock)
{
int ret = 0;
- mtx_enter(&lock->lock);
+ spin_lock(&lock->lock);
if (unlikely(!(lock->flags & TTM_VT_LOCK)))
ret = -EINVAL;
lock->flags &= ~TTM_VT_LOCK;
wakeup(&lock->queue);
- mtx_leave(&lock->lock);
+ spin_unlock(&lock->lock);
return ret;
}
@@ -247,7 +247,7 @@ static bool __ttm_vt_lock(struct ttm_lock *lock)
{
bool locked = false;
- mtx_enter(&lock->lock);
+ spin_lock(&lock->lock);
if (lock->rw == 0) {
lock->flags &= ~TTM_VT_LOCK_PENDING;
lock->flags |= TTM_VT_LOCK;
@@ -255,7 +255,7 @@ static bool __ttm_vt_lock(struct ttm_lock *lock)
} else {
lock->flags |= TTM_VT_LOCK_PENDING;
}
- mtx_leave(&lock->lock);
+ spin_unlock(&lock->lock);
return locked;
}
#endif
@@ -273,10 +273,10 @@ int ttm_vt_lock(struct ttm_lock *lock,
ret = wait_event_interruptible(lock->queue,
__ttm_vt_lock(lock));
if (unlikely(ret != 0)) {
- mtx_enter(&lock->lock);
+ spin_lock(&lock->lock);
lock->flags &= ~TTM_VT_LOCK_PENDING;
wake_up_all(&lock->queue);
- mtx_leave(&lock->lock);
+ spin_unlock(&lock->lock);
return ret;
}
} else
@@ -309,10 +309,10 @@ EXPORT_SYMBOL(ttm_vt_unlock);
void ttm_suspend_unlock(struct ttm_lock *lock)
{
- mtx_enter(&lock->lock);
+ spin_lock(&lock->lock);
lock->flags &= ~TTM_SUSPEND_LOCK;
wakeup(&lock->queue);
- mtx_leave(&lock->lock);
+ spin_unlock(&lock->lock);
}
EXPORT_SYMBOL(ttm_suspend_unlock);
@@ -321,7 +321,7 @@ static bool __ttm_suspend_lock(struct ttm_lock *lock)
{
bool locked = false;
- mtx_enter(&lock->lock);
+ spin_lock(&lock->lock);
if (lock->rw == 0) {
lock->flags &= ~TTM_SUSPEND_LOCK_PENDING;
lock->flags |= TTM_SUSPEND_LOCK;
@@ -329,7 +329,7 @@ static bool __ttm_suspend_lock(struct ttm_lock *lock)
} else {
lock->flags |= TTM_SUSPEND_LOCK_PENDING;
}
- mtx_leave(&lock->lock);
+ spin_unlock(&lock->lock);
return locked;
}
#endif
diff --git a/sys/dev/pci/drm/ttm/ttm_memory.c b/sys/dev/pci/drm/ttm/ttm_memory.c
index 8f2c38ca8c9..9c0c56a8f3b 100644
--- a/sys/dev/pci/drm/ttm/ttm_memory.c
+++ b/sys/dev/pci/drm/ttm/ttm_memory.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ttm_memory.c,v 1.6 2015/02/09 03:15:41 dlg Exp $ */
+/* $OpenBSD: ttm_memory.c,v 1.7 2015/02/10 10:50:49 jsg Exp $ */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
@@ -87,7 +87,7 @@ static ssize_t ttm_mem_zone_show(struct kobject *kobj,
container_of(kobj, struct ttm_mem_zone, kobj);
uint64_t val = 0;
- mtx_enter(&zone->glob->lock);
+ spin_lock(&zone->glob->lock);
if (attr == &ttm_mem_sys)
val = zone->zone_mem;
else if (attr == &ttm_mem_emer)
@@ -98,7 +98,7 @@ static ssize_t ttm_mem_zone_show(struct kobject *kobj,
val = zone->swap_limit;
else if (attr == &ttm_mem_used)
val = zone->used_mem;
- mtx_leave(&zone->glob->lock);
+ spin_unlock(&zone->glob->lock);
return snprintf(buffer, PAGE_SIZE, "%llu\n",
(unsigned long long) val >> 10);
@@ -124,7 +124,7 @@ static ssize_t ttm_mem_zone_store(struct kobject *kobj,
val64 = val;
val64 <<= 10;
- mtx_enter(&zone->glob->lock);
+ spin_lock(&zone->glob->lock);
if (val64 > zone->zone_mem)
val64 = zone->zone_mem;
if (attr == &ttm_mem_emer) {
@@ -137,7 +137,7 @@ static ssize_t ttm_mem_zone_store(struct kobject *kobj,
zone->emer_mem = val64;
} else if (attr == &ttm_mem_swap)
zone->swap_limit = val64;
- mtx_leave(&zone->glob->lock);
+ spin_unlock(&zone->glob->lock);
ttm_check_swapping(zone->glob);
@@ -217,21 +217,21 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
int ret;
struct ttm_mem_shrink *shrink;
- mtx_enter(&glob->lock);
+ spin_lock(&glob->lock);
if (glob->shrink == NULL)
goto out;
while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
shrink = glob->shrink;
- mtx_leave(&glob->lock);
+ spin_unlock(&glob->lock);
ret = shrink->do_shrink(shrink);
- mtx_enter(&glob->lock);
+ spin_lock(&glob->lock);
if (unlikely(ret != 0))
goto out;
}
out:
glob->task_queued = false;
- mtx_leave(&glob->lock);
+ spin_unlock(&glob->lock);
}
static void ttm_shrink_work(void *arg1)
@@ -399,7 +399,7 @@ static void ttm_check_swapping(struct ttm_mem_global *glob)
unsigned int i;
struct ttm_mem_zone *zone;
- mtx_enter(&glob->lock);
+ spin_lock(&glob->lock);
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
if (zone->used_mem > zone->swap_limit) {
@@ -412,7 +412,7 @@ static void ttm_check_swapping(struct ttm_mem_global *glob)
needs_swapping = false;
else
glob->task_queued = true;
- mtx_leave(&glob->lock);
+ spin_unlock(&glob->lock);
if (unlikely(needs_swapping))
task_add(glob->swap_queue, &glob->task);
@@ -425,14 +425,14 @@ static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
unsigned int i;
struct ttm_mem_zone *zone;
- mtx_enter(&glob->lock);
+ spin_lock(&glob->lock);
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
if (single_zone && zone != single_zone)
continue;
zone->used_mem -= amount;
}
- mtx_leave(&glob->lock);
+ spin_unlock(&glob->lock);
}
void ttm_mem_global_free(struct ttm_mem_global *glob,
@@ -451,7 +451,7 @@ static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
unsigned int i;
struct ttm_mem_zone *zone;
- mtx_enter(&glob->lock);
+ spin_lock(&glob->lock);
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
if (single_zone && zone != single_zone)
@@ -475,7 +475,7 @@ static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
ret = 0;
out_unlock:
- mtx_leave(&glob->lock);
+ spin_unlock(&glob->lock);
ttm_check_swapping(glob);
return ret;
diff --git a/sys/dev/pci/drm/ttm/ttm_memory.h b/sys/dev/pci/drm/ttm/ttm_memory.h
index 6550112b1a1..50dd163ea83 100644
--- a/sys/dev/pci/drm/ttm/ttm_memory.h
+++ b/sys/dev/pci/drm/ttm/ttm_memory.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: ttm_memory.h,v 1.2 2013/10/29 06:30:57 jsg Exp $ */
+/* $OpenBSD: ttm_memory.h,v 1.3 2015/02/10 10:50:49 jsg Exp $ */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
@@ -112,13 +112,13 @@ static inline void ttm_mem_init_shrink(struct ttm_mem_shrink *shrink,
static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob,
struct ttm_mem_shrink *shrink)
{
- mtx_enter(&glob->lock);
+ spin_lock(&glob->lock);
if (glob->shrink != NULL) {
- mtx_leave(&glob->lock);
+ spin_unlock(&glob->lock);
return -EBUSY;
}
glob->shrink = shrink;
- mtx_leave(&glob->lock);
+ spin_unlock(&glob->lock);
return 0;
}
@@ -133,10 +133,10 @@ static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob,
static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob,
struct ttm_mem_shrink *shrink)
{
- mtx_enter(&glob->lock);
+ spin_lock(&glob->lock);
BUG_ON(glob->shrink != shrink);
glob->shrink = NULL;
- mtx_leave(&glob->lock);
+ spin_unlock(&glob->lock);
}
extern int ttm_mem_global_init(struct ttm_mem_global *glob);
diff --git a/sys/dev/pci/drm/ttm/ttm_object.c b/sys/dev/pci/drm/ttm/ttm_object.c
index a8decab0fd6..9456e7b95d8 100644
--- a/sys/dev/pci/drm/ttm/ttm_object.c
+++ b/sys/dev/pci/drm/ttm/ttm_object.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ttm_object.c,v 1.3 2015/02/10 06:19:36 jsg Exp $ */
+/* $OpenBSD: ttm_object.c,v 1.4 2015/02/10 10:50:49 jsg Exp $ */
/**************************************************************************
*
* Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
@@ -154,11 +154,11 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
base->ref_obj_release = ref_obj_release;
base->object_type = object_type;
refcount_init(&base->refcount, 1);
- mtx_enter(&tdev->object_lock);
+ spin_lock(&tdev->object_lock);
ret = drm_ht_just_insert_please_rcu(&tdev->object_hash,
&base->hash,
(unsigned long)base, 31, 0, 0);
- mtx_leave(&tdev->object_lock);
+ spin_unlock(&tdev->object_lock);
if (unlikely(ret != 0))
goto out_err0;
@@ -170,9 +170,9 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
return 0;
out_err1:
- mtx_enter(&tdev->object_lock);
+ spin_lock(&tdev->object_lock);
(void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
- mtx_leave(&tdev->object_lock);
+ spin_unlock(&tdev->object_lock);
out_err0:
return ret;
}
@@ -182,9 +182,9 @@ static void ttm_release_base(struct ttm_base_object *base)
{
struct ttm_object_device *tdev = base->tfile->tdev;
- mtx_enter(&tdev->object_lock);
+ spin_lock(&tdev->object_lock);
(void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
- mtx_leave(&tdev->object_lock);
+ spin_unlock(&tdev->object_lock);
/*
* Note: We don't use synchronize_rcu() here because it's far
@@ -439,9 +439,9 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
*p_tdev = NULL;
- mtx_enter(&tdev->object_lock);
+ spin_lock(&tdev->object_lock);
drm_ht_remove(&tdev->object_hash);
- mtx_leave(&tdev->object_lock);
+ spin_unlock(&tdev->object_lock);
kfree(tdev);
}
diff --git a/sys/dev/pci/drm/ttm/ttm_page_alloc.c b/sys/dev/pci/drm/ttm/ttm_page_alloc.c
index 8345038b6e7..e5a8264237c 100644
--- a/sys/dev/pci/drm/ttm/ttm_page_alloc.c
+++ b/sys/dev/pci/drm/ttm/ttm_page_alloc.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ttm_page_alloc.c,v 1.6 2014/03/28 17:57:11 mpi Exp $ */
+/* $OpenBSD: ttm_page_alloc.c,v 1.7 2015/02/10 10:50:49 jsg Exp $ */
/*
* Copyright (c) Red Hat Inc.
@@ -339,6 +339,7 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
**/
static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
{
+ unsigned long irq_flags;
struct vm_page *p, *p1;
struct vm_page **pages_to_free;
unsigned freed_pages = 0,
@@ -356,7 +357,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
}
restart:
- mtx_enter(&pool->lock);
+ spin_lock_irqsave(&pool->lock, irq_flags);
TAILQ_FOREACH_REVERSE_SAFE(p, &pool->list, pglist, pageq, p1) {
if (freed_pages >= npages_to_free)
@@ -374,7 +375,7 @@ restart:
* Because changing page caching is costly
* we unlock the pool to prevent stalling.
*/
- mtx_leave(&pool->lock);
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
ttm_pages_put(pages_to_free, freed_pages);
if (likely(nr_free != FREE_ALL_PAGES))
@@ -392,7 +393,7 @@ restart:
goto restart;
/* Not allowed to fall through or break because
- * following context is inside mutex while we are
+ * following context is inside spinlock while we are
* outside here.
*/
goto out;
@@ -409,7 +410,7 @@ restart:
nr_free -= freed_pages;
}
- mtx_leave(&pool->lock);
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
if (freed_pages)
ttm_pages_put(pages_to_free, freed_pages);
@@ -601,7 +602,8 @@ out:
* pages is small.
*/
static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
- int ttm_flags, enum ttm_caching_state cstate, unsigned count)
+ int ttm_flags, enum ttm_caching_state cstate, unsigned count,
+ unsigned long *irq_flags)
{
struct vm_page *p;
int r;
@@ -627,12 +629,12 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
* Can't change page caching if in irqsave context. We have to
* drop the pool->lock.
*/
- mtx_leave(&pool->lock);
+ spin_unlock_irqrestore(&pool->lock, *irq_flags);
TAILQ_INIT(&new_pages);
r = ttm_alloc_new_pages(&new_pages, pool->ttm_page_alloc_flags,
ttm_flags, cstate, alloc_size);
- mtx_enter(&pool->lock);
+ spin_lock_irqsave(&pool->lock, *irq_flags);
if (!r) {
TAILQ_CONCAT(&pool->list, &new_pages, pageq);
@@ -663,11 +665,12 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
enum ttm_caching_state cstate,
unsigned count)
{
+ unsigned long irq_flags;
vm_page_t p;
unsigned i;
- mtx_enter(&pool->lock);
- ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count);
+ spin_lock_irqsave(&pool->lock, irq_flags);
+ ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
if (count >= pool->npages) {
/* take all pages from the pool */
@@ -684,7 +687,7 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
pool->npages -= count;
count = 0;
out:
- mtx_leave(&pool->lock);
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
return count;
}
@@ -692,6 +695,7 @@ out:
static void ttm_put_pages(struct vm_page **pages, unsigned npages, int flags,
enum ttm_caching_state cstate)
{
+ unsigned long irq_flags;
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
unsigned i;
@@ -706,7 +710,7 @@ static void ttm_put_pages(struct vm_page **pages, unsigned npages, int flags,
return;
}
- mtx_enter(&pool->lock);
+ spin_lock_irqsave(&pool->lock, irq_flags);
for (i = 0; i < npages; i++) {
if (pages[i]) {
TAILQ_INSERT_TAIL(&pool->list, pages[i], pageq);
@@ -723,7 +727,7 @@ static void ttm_put_pages(struct vm_page **pages, unsigned npages, int flags,
if (npages < NUM_PAGES_TO_ALLOC)
npages = NUM_PAGES_TO_ALLOC;
}
- mtx_leave(&pool->lock);
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
if (npages)
ttm_page_pool_free(pool, npages);
}