summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJonathan Gray <jsg@cvs.openbsd.org>2015-02-10 06:19:37 +0000
committerJonathan Gray <jsg@cvs.openbsd.org>2015-02-10 06:19:37 +0000
commit00efb97d6be352e5d712e51df8fbb8f2d7bfe7f6 (patch)
tree778d737fe2eb6f8a3010348ef65de3538466f62a
parent1862e4a781f54cbb4e9ad8c1ae3542f4dee0ccd5 (diff)
switch most rwlock calls back to their linux equivalents
-rw-r--r--sys/dev/pci/drm/i915/i915_gem.c4
-rw-r--r--sys/dev/pci/drm/i915/i915_irq.c6
-rw-r--r--sys/dev/pci/drm/i915/intel_pm.c10
-rw-r--r--sys/dev/pci/drm/radeon/atom.c6
-rw-r--r--sys/dev/pci/drm/radeon/atom.h4
-rw-r--r--sys/dev/pci/drm/radeon/r600.c6
-rw-r--r--sys/dev/pci/drm/radeon/radeon.h12
-rw-r--r--sys/dev/pci/drm/radeon/radeon_cs.c20
-rw-r--r--sys/dev/pci/drm/radeon/radeon_device.c22
-rw-r--r--sys/dev/pci/drm/radeon/radeon_fence.c28
-rw-r--r--sys/dev/pci/drm/radeon/radeon_gart.c60
-rw-r--r--sys/dev/pci/drm/radeon/radeon_gem.c20
-rw-r--r--sys/dev/pci/drm/radeon/radeon_i2c.c22
-rw-r--r--sys/dev/pci/drm/radeon/radeon_object.c23
-rw-r--r--sys/dev/pci/drm/radeon/radeon_pm.c57
-rw-r--r--sys/dev/pci/drm/radeon/radeon_prime.c20
-rw-r--r--sys/dev/pci/drm/radeon/radeon_ring.c28
-rw-r--r--sys/dev/pci/drm/radeon/radeon_ttm.c6
-rw-r--r--sys/dev/pci/drm/radeon/si.c6
-rw-r--r--sys/dev/pci/drm/ttm/ttm_bo.c34
-rw-r--r--sys/dev/pci/drm/ttm/ttm_bo_api.h4
-rw-r--r--sys/dev/pci/drm/ttm/ttm_bo_driver.h18
-rw-r--r--sys/dev/pci/drm/ttm/ttm_bo_util.c8
-rw-r--r--sys/dev/pci/drm/ttm/ttm_bo_vm.c10
-rw-r--r--sys/dev/pci/drm/ttm/ttm_object.c28
25 files changed, 230 insertions, 232 deletions
diff --git a/sys/dev/pci/drm/i915/i915_gem.c b/sys/dev/pci/drm/i915/i915_gem.c
index 73f0a124abf..5b7971b6545 100644
--- a/sys/dev/pci/drm/i915/i915_gem.c
+++ b/sys/dev/pci/drm/i915/i915_gem.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: i915_gem.c,v 1.80 2015/02/10 01:39:32 jsg Exp $ */
+/* $OpenBSD: i915_gem.c,v 1.81 2015/02/10 06:19:36 jsg Exp $ */
/*
* Copyright (c) 2008-2009 Owain G. Ainsworth <oga@openbsd.org>
*
@@ -159,7 +159,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
if (ret)
return ret;
- ret = -rw_enter(&dev->struct_mutex, RW_WRITE | RW_INTR);
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
diff --git a/sys/dev/pci/drm/i915/i915_irq.c b/sys/dev/pci/drm/i915/i915_irq.c
index 9ae017fe4cc..d84f5a1344e 100644
--- a/sys/dev/pci/drm/i915/i915_irq.c
+++ b/sys/dev/pci/drm/i915/i915_irq.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: i915_irq.c,v 1.16 2015/02/10 03:39:41 jsg Exp $ */
+/* $OpenBSD: i915_irq.c,v 1.17 2015/02/10 06:19:36 jsg Exp $ */
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
*/
/*
@@ -375,7 +375,7 @@ static void gen6_pm_rps_work(void *arg1)
if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
return;
- rw_enter_write(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->rps.hw_lock);
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
new_delay = dev_priv->rps.cur_delay + 1;
@@ -390,7 +390,7 @@ static void gen6_pm_rps_work(void *arg1)
gen6_set_rps(dev, new_delay);
}
- rw_exit_write(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->rps.hw_lock);
}
/**
diff --git a/sys/dev/pci/drm/i915/intel_pm.c b/sys/dev/pci/drm/i915/intel_pm.c
index c138295f3d9..233a24eca47 100644
--- a/sys/dev/pci/drm/i915/intel_pm.c
+++ b/sys/dev/pci/drm/i915/intel_pm.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: intel_pm.c,v 1.23 2015/02/10 01:39:32 jsg Exp $ */
+/* $OpenBSD: intel_pm.c,v 1.24 2015/02/10 06:19:36 jsg Exp $ */
/*
* Copyright © 2012 Intel Corporation
*
@@ -3480,9 +3480,9 @@ void intel_disable_gt_powersave(struct drm_device *dev)
} else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) {
timeout_del(&dev_priv->rps.delayed_resume_to);
task_del(systq, &dev_priv->rps.delayed_resume_task);
- rw_enter_write(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->rps.hw_lock);
gen6_disable_rps(dev);
- rw_exit_write(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->rps.hw_lock);
}
}
@@ -3491,10 +3491,10 @@ static void intel_gen6_powersave_work(void *arg1)
drm_i915_private_t *dev_priv = arg1;
struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
- rw_enter_write(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->rps.hw_lock);
gen6_enable_rps(dev);
gen6_update_ring_freq(dev);
- rw_exit_write(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->rps.hw_lock);
}
static void
diff --git a/sys/dev/pci/drm/radeon/atom.c b/sys/dev/pci/drm/radeon/atom.c
index 1129ef2a785..958b8c3147b 100644
--- a/sys/dev/pci/drm/radeon/atom.c
+++ b/sys/dev/pci/drm/radeon/atom.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: atom.c,v 1.4 2014/06/20 06:50:04 jsg Exp $ */
+/* $OpenBSD: atom.c,v 1.5 2015/02/10 06:19:36 jsg Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
*
@@ -1227,7 +1227,7 @@ int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
{
int r;
- rw_enter_write(&ctx->rwlock);
+ mutex_lock(&ctx->mutex);
/* reset data block */
ctx->data_block = 0;
/* reset reg block */
@@ -1240,7 +1240,7 @@ int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
ctx->divmul[0] = 0;
ctx->divmul[1] = 0;
r = atom_execute_table_locked(ctx, index, params);
- rw_exit_write(&ctx->rwlock);
+ mutex_unlock(&ctx->mutex);
return r;
}
diff --git a/sys/dev/pci/drm/radeon/atom.h b/sys/dev/pci/drm/radeon/atom.h
index 8abc4d2ecba..96c80b5db78 100644
--- a/sys/dev/pci/drm/radeon/atom.h
+++ b/sys/dev/pci/drm/radeon/atom.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: atom.h,v 1.1 2013/08/12 04:11:53 jsg Exp $ */
+/* $OpenBSD: atom.h,v 1.2 2015/02/10 06:19:36 jsg Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
*
@@ -125,7 +125,7 @@ struct card_info {
struct atom_context {
struct card_info *card;
- struct rwlock rwlock;
+ struct rwlock mutex;
void *bios;
uint32_t cmd_table, data_table;
uint16_t *iio;
diff --git a/sys/dev/pci/drm/radeon/r600.c b/sys/dev/pci/drm/radeon/r600.c
index 9dbdaa52e0b..ea33c16c490 100644
--- a/sys/dev/pci/drm/radeon/r600.c
+++ b/sys/dev/pci/drm/radeon/r600.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: r600.c,v 1.10 2014/07/12 18:48:52 tedu Exp $ */
+/* $OpenBSD: r600.c,v 1.11 2015/02/10 06:19:36 jsg Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -4364,10 +4364,10 @@ uint64_t r600_get_gpu_clock(struct radeon_device *rdev)
{
uint64_t clock;
- rw_enter_write(&rdev->gpu_clock_rwlock);
+ mutex_lock(&rdev->gpu_clock_mutex);
WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
- rw_exit_write(&rdev->gpu_clock_rwlock);
+ mutex_unlock(&rdev->gpu_clock_mutex);
return clock;
}
diff --git a/sys/dev/pci/drm/radeon/radeon.h b/sys/dev/pci/drm/radeon/radeon.h
index 65778594414..d9a8a780f51 100644
--- a/sys/dev/pci/drm/radeon/radeon.h
+++ b/sys/dev/pci/drm/radeon/radeon.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: radeon.h,v 1.10 2015/01/27 03:17:36 dlg Exp $ */
+/* $OpenBSD: radeon.h,v 1.11 2015/02/10 06:19:36 jsg Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -411,7 +411,7 @@ struct radeon_sa_bo {
* GEM objects.
*/
struct radeon_gem {
- struct rwlock rwlock;
+ struct rwlock mutex;
struct list_head objects;
};
@@ -692,7 +692,7 @@ struct radeon_vm {
/* array of page tables, one for each page directory entry */
struct radeon_sa_bo **page_tables;
- struct rwlock rwlock;
+ struct rwlock mutex;
/* last fence for cs using this vm */
struct radeon_fence *fence;
/* last flush or NULL if we still need to flush */
@@ -1060,7 +1060,7 @@ struct radeon_power_state {
#define RADEON_MODE_OVERCLOCK_MARGIN 500 /* 5 MHz */
struct radeon_pm {
- struct rwlock rwlock;
+ struct rwlock mutex;
/* write locked while reprogramming mclk */
struct rwlock mclk_lock;
u32 active_crtcs;
@@ -1658,7 +1658,7 @@ struct radeon_device {
struct task hotplug_task;
struct task audio_task;
int num_crtc; /* number of crtcs */
- struct rwlock dc_hw_i2c_rwlock; /* display controller hw i2c rwlock */
+ struct rwlock dc_hw_i2c_mutex; /* display controller hw i2c mutex */
bool audio_enabled;
struct r600_audio audio_status; /* audio stuff */
#ifdef notyet
@@ -1674,7 +1674,7 @@ struct radeon_device {
unsigned debugfs_count;
/* virtual memory */
struct radeon_vm_manager vm_manager;
- struct rwlock gpu_clock_rwlock;
+ struct rwlock gpu_clock_mutex;
/* ACPI interface */
struct radeon_atif atif;
struct radeon_atcs atcs;
diff --git a/sys/dev/pci/drm/radeon/radeon_cs.c b/sys/dev/pci/drm/radeon/radeon_cs.c
index c5e92f5eadf..ca02ed5f7a8 100644
--- a/sys/dev/pci/drm/radeon/radeon_cs.c
+++ b/sys/dev/pci/drm/radeon/radeon_cs.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: radeon_cs.c,v 1.2 2014/02/09 11:03:31 jsg Exp $ */
+/* $OpenBSD: radeon_cs.c,v 1.3 2015/02/10 06:19:36 jsg Exp $ */
/*
* Copyright 2008 Jerome Glisse.
* All Rights Reserved.
@@ -468,8 +468,8 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
return r;
}
- rw_enter_write(&rdev->vm_manager.lock);
- rw_enter_write(&vm->rwlock);
+ mutex_lock(&rdev->vm_manager.lock);
+ mutex_lock(&vm->mutex);
r = radeon_vm_alloc_pt(rdev, vm);
if (r) {
goto out;
@@ -495,8 +495,8 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
out:
radeon_vm_add_to_lru(rdev, vm);
- rw_exit_write(&vm->rwlock);
- rw_exit_write(&rdev->vm_manager.lock);
+ mutex_unlock(&vm->mutex);
+ mutex_unlock(&rdev->vm_manager.lock);
return r;
}
@@ -516,9 +516,9 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
struct radeon_cs_parser parser;
int r;
- rw_enter_read(&rdev->exclusive_lock);
+ down_read(&rdev->exclusive_lock);
if (!rdev->accel_working) {
- rw_exit_read(&rdev->exclusive_lock);
+ up_read(&rdev->exclusive_lock);
return -EBUSY;
}
/* initialize parser */
@@ -533,7 +533,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
radeon_cs_parser_fini(&parser, r);
- rw_exit_read(&rdev->exclusive_lock);
+ up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r);
return r;
}
@@ -542,7 +542,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to parse relocation %d!\n", r);
radeon_cs_parser_fini(&parser, r);
- rw_exit_read(&rdev->exclusive_lock);
+ up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r);
return r;
}
@@ -556,7 +556,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
}
out:
radeon_cs_parser_fini(&parser, r);
- rw_exit_read(&rdev->exclusive_lock);
+ up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r);
return r;
}
diff --git a/sys/dev/pci/drm/radeon/radeon_device.c b/sys/dev/pci/drm/radeon/radeon_device.c
index 31bdc158821..5e095dbbeb2 100644
--- a/sys/dev/pci/drm/radeon/radeon_device.c
+++ b/sys/dev/pci/drm/radeon/radeon_device.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: radeon_device.c,v 1.6 2014/04/07 06:43:11 jsg Exp $ */
+/* $OpenBSD: radeon_device.c,v 1.7 2015/02/10 06:19:36 jsg Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -771,7 +771,7 @@ int radeon_atombios_init(struct radeon_device *rdev)
atom_card_info->pll_write = cail_pll_write;
rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
- rw_init(&rdev->mode_info.atom_context->rwlock, "atomcon");
+ rw_init(&rdev->mode_info.atom_context->mutex, "atomcon");
radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
atom_allocate_fb_scratch(rdev->mode_info.atom_context);
return 0;
@@ -1033,14 +1033,14 @@ int radeon_device_init(struct radeon_device *rdev,
radeon_family_name[rdev->family], pdev->vendor, pdev->device,
pdev->subsystem_vendor, pdev->subsystem_device);
- /* rwlock initialization are all done here so we
+ /* mutex initialization are all done here so we
* can recall function without having locking issues */
rw_init(&rdev->ring_lock, "ring");
- rw_init(&rdev->dc_hw_i2c_rwlock, "dciic");
+ rw_init(&rdev->dc_hw_i2c_mutex, "dciic");
atomic_set(&rdev->ih.lock, 0);
- rw_init(&rdev->gem.rwlock, "gem");
- rw_init(&rdev->pm.rwlock, "pm");
- rw_init(&rdev->gpu_clock_rwlock, "gpuclk");
+ rw_init(&rdev->gem.mutex, "gem");
+ rw_init(&rdev->pm.mutex, "pm");
+ rw_init(&rdev->gpu_clock_mutex, "gpuclk");
rw_init(&rdev->pm.mclk_lock, "mclk");
rw_init(&rdev->exclusive_lock, "rdnexc");
#ifdef notyet
@@ -1275,7 +1275,7 @@ int radeon_suspend_kms(struct drm_device *dev)
/* evict vram memory */
radeon_bo_evict_vram(rdev);
- rw_enter_write(&rdev->ring_lock);
+ mutex_lock(&rdev->ring_lock);
/* wait for gpu to finish processing current batch */
for (i = 0; i < RADEON_NUM_RINGS; i++) {
r = radeon_fence_wait_empty_locked(rdev, i);
@@ -1287,7 +1287,7 @@ int radeon_suspend_kms(struct drm_device *dev)
if (force_completion) {
radeon_fence_driver_force_completion(rdev);
}
- rw_exit_write(&rdev->ring_lock);
+ mutex_unlock(&rdev->ring_lock);
radeon_save_bios_scratch_regs(rdev);
@@ -1400,7 +1400,7 @@ int radeon_gpu_reset(struct radeon_device *rdev)
int i, r;
int resched;
- rw_enter_write(&rdev->exclusive_lock);
+ down_write(&rdev->exclusive_lock);
radeon_save_bios_scratch_regs(rdev);
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
@@ -1457,7 +1457,7 @@ retry:
dev_info(rdev->dev, "GPU reset failed\n");
}
- rw_exit_write(&rdev->exclusive_lock);
+ up_write(&rdev->exclusive_lock);
return r;
}
diff --git a/sys/dev/pci/drm/radeon/radeon_fence.c b/sys/dev/pci/drm/radeon/radeon_fence.c
index 0e6714d6e25..c2bf182f532 100644
--- a/sys/dev/pci/drm/radeon/radeon_fence.c
+++ b/sys/dev/pci/drm/radeon/radeon_fence.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: radeon_fence.c,v 1.2 2014/02/09 11:03:31 jsg Exp $ */
+/* $OpenBSD: radeon_fence.c,v 1.3 2015/02/10 06:19:36 jsg Exp $ */
/*
* Copyright 2009 Jerome Glisse.
* All Rights Reserved.
@@ -108,7 +108,7 @@ int radeon_fence_emit(struct radeon_device *rdev,
struct radeon_fence **fence,
int ring)
{
- /* we are protected by the ring emission rwlock */
+ /* we are protected by the ring emission mutex */
*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
if ((*fence) == NULL) {
return -ENOMEM;
@@ -343,13 +343,13 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
}
if (lock_ring) {
- rw_enter_write(&rdev->ring_lock);
+ mutex_lock(&rdev->ring_lock);
}
/* test if somebody else has already decided that this is a lockup */
if (last_activity != rdev->fence_drv[ring].last_activity) {
if (lock_ring) {
- rw_exit_write(&rdev->ring_lock);
+ mutex_unlock(&rdev->ring_lock);
}
continue;
}
@@ -367,13 +367,13 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
/* mark the ring as not ready any more */
rdev->ring[ring].ready = false;
if (lock_ring) {
- rw_exit_write(&rdev->ring_lock);
+ mutex_unlock(&rdev->ring_lock);
}
return -EDEADLK;
}
if (lock_ring) {
- rw_exit_write(&rdev->ring_lock);
+ mutex_unlock(&rdev->ring_lock);
}
}
}
@@ -519,7 +519,7 @@ static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
continue;
}
- rw_enter_write(&rdev->ring_lock);
+ mutex_lock(&rdev->ring_lock);
for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
tmp = rdev->fence_drv[i].last_activity;
@@ -528,7 +528,7 @@ static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
/* test if somebody else has already decided that this is a lockup */
if (last_activity != tmp) {
last_activity = tmp;
- rw_exit_write(&rdev->ring_lock);
+ mutex_unlock(&rdev->ring_lock);
continue;
}
@@ -544,10 +544,10 @@ static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
/* mark the ring as not ready any more */
rdev->ring[ring].ready = false;
- rw_exit_write(&rdev->ring_lock);
+ mutex_unlock(&rdev->ring_lock);
return -EDEADLK;
}
- rw_exit_write(&rdev->ring_lock);
+ mutex_unlock(&rdev->ring_lock);
}
}
return 0;
@@ -728,7 +728,7 @@ bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
return false;
}
- /* we are protected by the ring rwlock */
+ /* we are protected by the ring mutex */
fdrv = &fence->rdev->fence_drv[dst_ring];
if (fence->seq <= fdrv->sync_seq[fence->ring]) {
return false;
@@ -759,7 +759,7 @@ void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
return;
}
- /* we are protected by the ring rwlock */
+ /* we are protected by the ring mutex */
src = &fence->rdev->fence_drv[fence->ring];
dst = &fence->rdev->fence_drv[dst_ring];
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -876,7 +876,7 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
{
int ring, r;
- rw_enter_write(&rdev->ring_lock);
+ mutex_lock(&rdev->ring_lock);
for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
if (!rdev->fence_drv[ring].initialized)
continue;
@@ -889,7 +889,7 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
rdev->fence_drv[ring].initialized = false;
}
- rw_exit_write(&rdev->ring_lock);
+ mutex_unlock(&rdev->ring_lock);
}
/**
diff --git a/sys/dev/pci/drm/radeon/radeon_gart.c b/sys/dev/pci/drm/radeon/radeon_gart.c
index d1f65aab3c6..aeee163f059 100644
--- a/sys/dev/pci/drm/radeon/radeon_gart.c
+++ b/sys/dev/pci/drm/radeon/radeon_gart.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: radeon_gart.c,v 1.4 2014/02/09 23:57:04 jsg Exp $ */
+/* $OpenBSD: radeon_gart.c,v 1.5 2015/02/10 06:19:36 jsg Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -503,7 +503,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
*
* Free the page table of a specific vm (cayman+).
*
- * Global and local rwlock must be lock!
+ * Global and local mutex must be lock!
*/
static void radeon_vm_free_pt(struct radeon_device *rdev,
struct radeon_vm *vm)
@@ -545,18 +545,18 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
if (!rdev->vm_manager.enabled)
return;
- rw_enter_write(&rdev->vm_manager.lock);
+ mutex_lock(&rdev->vm_manager.lock);
/* free all allocated page tables */
list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
- rw_enter_write(&vm->rwlock);
+ mutex_lock(&vm->mutex);
radeon_vm_free_pt(rdev, vm);
- rw_exit_write(&vm->rwlock);
+ mutex_unlock(&vm->mutex);
}
for (i = 0; i < RADEON_NUM_VM; ++i) {
radeon_fence_unref(&rdev->vm_manager.active[i]);
}
radeon_asic_vm_fini(rdev);
- rw_exit_write(&rdev->vm_manager.lock);
+ mutex_unlock(&rdev->vm_manager.lock);
radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager);
@@ -572,7 +572,7 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
* Evict a VM from the lru, making sure that it isn't @vm. (cayman+).
* Returns 0 for success, -ENOMEM for failure.
*
- * Global and local rwlock must be locked!
+ * Global and local mutex must be locked!
*/
static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
{
@@ -586,9 +586,9 @@ static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
if (vm_evict == vm)
return -ENOMEM;
- rw_enter_write(&vm_evict->rwlock);
+ mutex_lock(&vm_evict->mutex);
radeon_vm_free_pt(rdev, vm_evict);
- rw_exit_write(&vm_evict->rwlock);
+ mutex_unlock(&vm_evict->mutex);
return 0;
}
@@ -601,7 +601,7 @@ static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
* Allocate a page table for the requested vm (cayman+).
* Returns 0 for success, error for failure.
*
- * Global and local rwlock must be locked!
+ * Global and local mutex must be locked!
*/
int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
{
@@ -658,7 +658,7 @@ retry:
*
* Add the allocated page table to the LRU list (cayman+).
*
- * Global rwlock must be locked!
+ * Global mutex must be locked!
*/
void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm)
{
@@ -676,7 +676,7 @@ void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm)
* Allocate an id for the vm (cayman+).
* Returns the fence we need to sync to (if any).
*
- * Global and local rwlock must be locked!
+ * Global and local mutex must be locked!
*/
struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
struct radeon_vm *vm, int ring)
@@ -730,7 +730,7 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
* Fence the vm (cayman+).
* Set the fence used to protect page table and id.
*
- * Global and local rwlock must be locked!
+ * Global and local mutex must be locked!
*/
void radeon_vm_fence(struct radeon_device *rdev,
struct radeon_vm *vm,
@@ -801,10 +801,10 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
INIT_LIST_HEAD(&bo_va->bo_list);
INIT_LIST_HEAD(&bo_va->vm_list);
- rw_enter_write(&vm->rwlock);
+ mutex_lock(&vm->mutex);
list_add(&bo_va->vm_list, &vm->va);
list_add_tail(&bo_va->bo_list, &bo->va);
- rw_exit_write(&vm->rwlock);
+ mutex_unlock(&vm->mutex);
return bo_va;
}
@@ -853,7 +853,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
eoffset = last_pfn = 0;
}
- rw_enter_write(&vm->rwlock);
+ mutex_lock(&vm->mutex);
head = &vm->va;
last_offset = 0;
list_for_each_entry(tmp, &vm->va, vm_list) {
@@ -871,7 +871,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
bo_va->bo, (unsigned)bo_va->soffset, tmp->bo,
(unsigned)tmp->soffset, (unsigned)tmp->eoffset);
- rw_exit_write(&vm->rwlock);
+ mutex_unlock(&vm->mutex);
return -EINVAL;
}
last_offset = tmp->eoffset;
@@ -884,7 +884,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
bo_va->valid = false;
list_move(&bo_va->vm_list, head);
- rw_exit_write(&vm->rwlock);
+ mutex_unlock(&vm->mutex);
return 0;
}
@@ -923,7 +923,7 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
* and updates the page directory (cayman+).
* Returns 0 for success, error for failure.
*
- * Global and local rwlock must be locked!
+ * Global and local mutex must be locked!
*/
static int radeon_vm_update_pdes(struct radeon_device *rdev,
struct radeon_vm *vm,
@@ -1003,7 +1003,7 @@ retry:
*
* Update the page tables in the range @start - @end (cayman+).
*
- * Global and local rwlock must be locked!
+ * Global and local mutex must be locked!
*/
static void radeon_vm_update_ptes(struct radeon_device *rdev,
struct radeon_vm *vm,
@@ -1070,7 +1070,7 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
* Fill in the page table entries for @bo (cayman+).
* Returns 0 for success, -EINVAL for failure.
*
- * Object have to be reserved & global and local rwlock must be locked!
+ * Object have to be reserved & global and local mutex must be locked!
*/
int radeon_vm_bo_update_pte(struct radeon_device *rdev,
struct radeon_vm *vm,
@@ -1209,14 +1209,14 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
{
int r = 0;
- rw_enter_write(&rdev->vm_manager.lock);
- rw_enter_write(&bo_va->vm->rwlock);
+ mutex_lock(&rdev->vm_manager.lock);
+ mutex_lock(&bo_va->vm->mutex);
if (bo_va->soffset) {
r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL);
}
- rw_exit_write(&rdev->vm_manager.lock);
+ mutex_unlock(&rdev->vm_manager.lock);
list_del(&bo_va->vm_list);
- rw_exit_write(&bo_va->vm->rwlock);
+ mutex_unlock(&bo_va->vm->mutex);
list_del(&bo_va->bo_list);
kfree(bo_va);
@@ -1254,7 +1254,7 @@ void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
{
vm->id = 0;
vm->fence = NULL;
- rw_init(&vm->rwlock, "vmlk");
+ rw_init(&vm->mutex, "vmlk");
INIT_LIST_HEAD(&vm->list);
INIT_LIST_HEAD(&vm->va);
}
@@ -1273,10 +1273,10 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
struct radeon_bo_va *bo_va, *tmp;
int r;
- rw_enter_write(&rdev->vm_manager.lock);
- rw_enter_write(&vm->rwlock);
+ mutex_lock(&rdev->vm_manager.lock);
+ mutex_lock(&vm->mutex);
radeon_vm_free_pt(rdev, vm);
- rw_exit_write(&rdev->vm_manager.lock);
+ mutex_unlock(&rdev->vm_manager.lock);
if (!list_empty(&vm->va)) {
dev_err(rdev->dev, "still active bo inside vm\n");
@@ -1292,5 +1292,5 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
}
radeon_fence_unref(&vm->fence);
radeon_fence_unref(&vm->last_flush);
- rw_exit_write(&vm->rwlock);
+ mutex_unlock(&vm->mutex);
}
diff --git a/sys/dev/pci/drm/radeon/radeon_gem.c b/sys/dev/pci/drm/radeon/radeon_gem.c
index a99dde76a75..840d8c8bd3d 100644
--- a/sys/dev/pci/drm/radeon/radeon_gem.c
+++ b/sys/dev/pci/drm/radeon/radeon_gem.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: radeon_gem.c,v 1.4 2013/12/05 13:29:56 kettenis Exp $ */
+/* $OpenBSD: radeon_gem.c,v 1.5 2015/02/10 06:19:36 jsg Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -94,9 +94,9 @@ retry:
}
*obj = &robj->gem_base;
- rw_enter_write(&rdev->gem.rwlock);
+ mutex_lock(&rdev->gem.mutex);
list_add_tail(&robj->list, &rdev->gem.objects);
- rw_exit_write(&rdev->gem.rwlock);
+ mutex_unlock(&rdev->gem.mutex);
return 0;
}
@@ -263,14 +263,14 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
uint32_t handle;
int r;
- rw_enter_read(&rdev->exclusive_lock);
+ down_read(&rdev->exclusive_lock);
/* create a gem object to contain this object in */
args->size = roundup(args->size, PAGE_SIZE);
r = radeon_gem_object_create(rdev, args->size, args->alignment,
args->initial_domain, false,
false, &gobj);
if (r) {
- rw_exit_read(&rdev->exclusive_lock);
+ up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(rdev, r);
return r;
}
@@ -278,12 +278,12 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(gobj);
if (r) {
- rw_exit_read(&rdev->exclusive_lock);
+ up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(rdev, r);
return r;
}
args->handle = handle;
- rw_exit_read(&rdev->exclusive_lock);
+ up_read(&rdev->exclusive_lock);
return 0;
}
@@ -300,12 +300,12 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
/* for now if someone requests domain CPU -
* just make sure the buffer is finished with */
- rw_enter_read(&rdev->exclusive_lock);
+ down_read(&rdev->exclusive_lock);
/* just do a BO wait for now */
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) {
- rw_exit_read(&rdev->exclusive_lock);
+ up_read(&rdev->exclusive_lock);
return -ENOENT;
}
robj = gem_to_radeon_bo(gobj);
@@ -313,7 +313,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
drm_gem_object_unreference_unlocked(gobj);
- rw_exit_read(&rdev->exclusive_lock);
+ up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(robj->rdev, r);
return r;
}
diff --git a/sys/dev/pci/drm/radeon/radeon_i2c.c b/sys/dev/pci/drm/radeon/radeon_i2c.c
index fad85fb4781..e291b7d9b99 100644
--- a/sys/dev/pci/drm/radeon/radeon_i2c.c
+++ b/sys/dev/pci/drm/radeon/radeon_i2c.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: radeon_i2c.c,v 1.3 2014/02/15 12:40:08 jsg Exp $ */
+/* $OpenBSD: radeon_i2c.c,v 1.4 2015/02/10 06:19:36 jsg Exp $ */
/*
* Copyright 2007-8 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -115,7 +115,7 @@ pre_xfer(void *cookie)
else
reg = RADEON_GPIO_CRT2_DDC;
- rw_enter_write(&rdev->dc_hw_i2c_rwlock);
+ mutex_lock(&rdev->dc_hw_i2c_mutex);
if (rec->a_clk_reg == reg) {
WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1)));
@@ -123,7 +123,7 @@ pre_xfer(void *cookie)
WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3)));
}
- rw_exit_write(&rdev->dc_hw_i2c_rwlock);
+ mutex_unlock(&rdev->dc_hw_i2c_mutex);
}
}
@@ -433,9 +433,9 @@ static int r100_hw_i2c_xfer(struct i2c_controller *i2c_adap,
u32 i2c_cntl_0, i2c_cntl_1, i2c_data;
u32 tmp, reg;
- rw_enter_write(&rdev->dc_hw_i2c_rwlock);
+ mutex_lock(&rdev->dc_hw_i2c_mutex);
/* take the pm lock since we need a constant sclk */
- rw_enter_write(&rdev->pm.rwlock);
+ mutex_lock(&rdev->pm.mutex);
prescale = radeon_get_i2c_prescale(rdev);
@@ -665,8 +665,8 @@ done:
WREG32(RADEON_BIOS_6_SCRATCH, tmp);
}
- rw_exit_write(&rdev->pm.rwlock);
- rw_exit_write(&rdev->dc_hw_i2c_rwlock);
+ mutex_unlock(&rdev->pm.mutex);
+ mutex_unlock(&rdev->dc_hw_i2c_mutex);
return ret;
}
@@ -686,9 +686,9 @@ static int r500_hw_i2c_xfer(struct i2c_controller *i2c_adap,
u32 tmp, reg;
u32 saved1, saved2;
- rw_enter_write(&rdev->dc_hw_i2c_rwlock);
+ mutex_lock(&rdev->dc_hw_i2c_mutex);
/* take the pm lock since we need a constant sclk */
- rw_enter_write(&rdev->pm.rwlock);
+ mutex_lock(&rdev->pm.mutex);
prescale = radeon_get_i2c_prescale(rdev);
@@ -901,8 +901,8 @@ done:
tmp &= ~ATOM_S6_HW_I2C_BUSY_STATE;
WREG32(RADEON_BIOS_6_SCRATCH, tmp);
- rw_exit_write(&rdev->pm.rwlock);
- rw_exit_write(&rdev->dc_hw_i2c_rwlock);
+ mutex_unlock(&rdev->pm.mutex);
+ mutex_unlock(&rdev->dc_hw_i2c_mutex);
return ret;
}
diff --git a/sys/dev/pci/drm/radeon/radeon_object.c b/sys/dev/pci/drm/radeon/radeon_object.c
index 6a10d22732b..deb435fd547 100644
--- a/sys/dev/pci/drm/radeon/radeon_object.c
+++ b/sys/dev/pci/drm/radeon/radeon_object.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: radeon_object.c,v 1.5 2015/02/10 01:39:32 jsg Exp $ */
+/* $OpenBSD: radeon_object.c,v 1.6 2015/02/10 06:19:36 jsg Exp $ */
/*
* Copyright 2009 Jerome Glisse.
* All Rights Reserved.
@@ -63,9 +63,9 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
struct radeon_bo *bo;
bo = container_of(tbo, struct radeon_bo, tbo);
- rw_enter_write(&bo->rdev->gem.rwlock);
+ mutex_lock(&bo->rdev->gem.mutex);
list_del_init(&bo->list);
- rw_exit_write(&bo->rdev->gem.rwlock);
+ mutex_unlock(&bo->rdev->gem.mutex);
radeon_bo_clear_surface_reg(bo);
radeon_bo_clear_va(bo);
drm_gem_object_release(&bo->gem_base);
@@ -151,11 +151,11 @@ int radeon_bo_create(struct radeon_device *rdev,
INIT_LIST_HEAD(&bo->va);
radeon_ttm_placement_from_domain(bo, domain);
/* Kernel allocation are uninterruptible */
- rw_enter_read(&rdev->pm.mclk_lock);
+ down_read(&rdev->pm.mclk_lock);
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, !kernel, NULL,
acc_size, sg, &radeon_ttm_bo_destroy);
- rw_exit_read(&rdev->pm.mclk_lock);
+ up_read(&rdev->pm.mclk_lock);
if (unlikely(r != 0)) {
return r;
}
@@ -209,9 +209,9 @@ void radeon_bo_unref(struct radeon_bo **bo)
return;
rdev = (*bo)->rdev;
tbo = &((*bo)->tbo);
- rw_enter_read(&rdev->pm.mclk_lock);
+ down_read(&rdev->pm.mclk_lock);
ttm_bo_unref(&tbo);
- rw_exit_read(&rdev->pm.mclk_lock);
+ up_read(&rdev->pm.mclk_lock);
if (tbo == NULL)
*bo = NULL;
}
@@ -303,7 +303,6 @@ int radeon_bo_evict_vram(struct radeon_device *rdev)
void radeon_bo_force_delete(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
struct radeon_bo *bo, *n;
if (list_empty(&rdev->gem.objects)) {
@@ -311,18 +310,18 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
}
DRM_ERROR("Userspace still has active objects !\n");
list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&rdev->ddev->struct_mutex);
#ifdef notyet
DRM_ERROR("%p %p %lu %lu force free\n",
&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
*((unsigned long *)&bo->gem_base.refcount));
#endif
- rw_enter_write(&bo->rdev->gem.rwlock);
+ mutex_lock(&bo->rdev->gem.mutex);
list_del_init(&bo->list);
- rw_exit_write(&bo->rdev->gem.rwlock);
+ mutex_unlock(&bo->rdev->gem.mutex);
/* this should unref the ttm bo */
drm_gem_object_unreference(&bo->gem_base);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&rdev->ddev->struct_mutex);
}
}
diff --git a/sys/dev/pci/drm/radeon/radeon_pm.c b/sys/dev/pci/drm/radeon/radeon_pm.c
index d411ce29fea..b8fe36cf30b 100644
--- a/sys/dev/pci/drm/radeon/radeon_pm.c
+++ b/sys/dev/pci/drm/radeon/radeon_pm.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: radeon_pm.c,v 1.9 2015/02/10 01:39:32 jsg Exp $ */
+/* $OpenBSD: radeon_pm.c,v 1.10 2015/02/10 06:19:36 jsg Exp $ */
/*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -83,10 +83,10 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
{
if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (rdev->pm.profile == PM_PROFILE_AUTO) {
- rw_enter_write(&rdev->pm.rwlock);
+ mutex_lock(&rdev->pm.mutex);
radeon_pm_update_profile(rdev);
radeon_pm_set_clocks(rdev);
- rw_exit_write(&rdev->pm.rwlock);
+ mutex_unlock(&rdev->pm.mutex);
}
}
}
@@ -256,7 +256,6 @@ static void radeon_set_power_state(struct radeon_device *rdev)
static void radeon_pm_set_clocks(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
int i, r;
/* no need to take locks, etc. if nothing's going to change */
@@ -264,9 +263,9 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
(rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
return;
- mutex_lock(&dev->struct_mutex);
- rw_enter_write(&rdev->pm.mclk_lock);
- rw_enter_write(&rdev->ring_lock);
+ mutex_lock(&rdev->ddev->struct_mutex);
+ down_write(&rdev->pm.mclk_lock);
+ mutex_lock(&rdev->ring_lock);
/* wait for the rings to drain */
for (i = 0; i < RADEON_NUM_RINGS; i++) {
@@ -277,9 +276,9 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
r = radeon_fence_wait_empty_locked(rdev, i);
if (r) {
/* needs a GPU reset dont reset here */
- rw_exit_write(&rdev->ring_lock);
- rw_exit_write(&rdev->pm.mclk_lock);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&rdev->ring_lock);
+ up_write(&rdev->pm.mclk_lock);
+ mutex_unlock(&rdev->ddev->struct_mutex);
return;
}
}
@@ -313,9 +312,9 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
- rw_exit_write(&rdev->ring_lock);
- rw_exit_write(&rdev->pm.mclk_lock);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&rdev->ring_lock);
+ up_write(&rdev->pm.mclk_lock);
+ mutex_unlock(&rdev->ddev->struct_mutex);
}
static void radeon_pm_print_states(struct radeon_device *rdev)
@@ -378,7 +377,7 @@ radeon_set_pm_profile(struct device *dev,
struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
struct radeon_device *rdev = ddev->dev_private;
- rw_enter_write(&rdev->pm.rwlock);
+ mutex_lock(&rdev->pm.mutex);
if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (strncmp("default", buf, strlen("default")) == 0)
rdev->pm.profile = PM_PROFILE_DEFAULT;
@@ -400,7 +399,7 @@ radeon_set_pm_profile(struct device *dev,
count = -EINVAL;
fail:
- rw_exit_write(&rdev->pm.rwlock);
+ mutex_unlock(&rdev->pm.mutex);
return count;
}
@@ -429,18 +428,18 @@ radeon_set_pm_method(struct device *dev,
if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
- rw_enter_write(&rdev->pm.rwlock);
+ mutex_lock(&rdev->pm.mutex);
rdev->pm.pm_method = PM_METHOD_DYNPM;
rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
- rw_exit_write(&rdev->pm.rwlock);
+ mutex_unlock(&rdev->pm.mutex);
} else if (strncmp("profile", buf, strlen("profile")) == 0) {
- rw_enter_write(&rdev->pm.rwlock);
+ mutex_lock(&rdev->pm.mutex);
/* disable dynpm */
rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
rdev->pm.pm_method = PM_METHOD_PROFILE;
- rw_exit_write(&rdev->pm.rwlock);
+ mutex_unlock(&rdev->pm.mutex);
timeout_del(&rdev->pm.dynpm_idle_to);
task_del(systq, &rdev->pm.dynpm_idle_task);
} else {
@@ -565,12 +564,12 @@ static void radeon_hwmon_fini(struct radeon_device *rdev)
void radeon_pm_suspend(struct radeon_device *rdev)
{
- rw_enter_write(&rdev->pm.rwlock);
+ mutex_lock(&rdev->pm.mutex);
if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
}
- rw_exit_write(&rdev->pm.rwlock);
+ mutex_unlock(&rdev->pm.mutex);
timeout_del(&rdev->pm.dynpm_idle_to);
task_del(systq, &rdev->pm.dynpm_idle_task);
@@ -594,7 +593,7 @@ void radeon_pm_resume(struct radeon_device *rdev)
radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
}
/* asic init will reset the default power state */
- rw_enter_write(&rdev->pm.rwlock);
+ mutex_lock(&rdev->pm.mutex);
rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
rdev->pm.current_clock_mode_index = 0;
rdev->pm.current_sclk = rdev->pm.default_sclk;
@@ -608,7 +607,7 @@ void radeon_pm_resume(struct radeon_device *rdev)
rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
timeout_add_msec(&rdev->pm.dynpm_idle_to, RADEON_IDLE_LOOP_MS);
}
- rw_exit_write(&rdev->pm.rwlock);
+ mutex_unlock(&rdev->pm.mutex);
radeon_pm_compute_clocks(rdev);
}
@@ -688,7 +687,7 @@ int radeon_pm_init(struct radeon_device *rdev)
void radeon_pm_fini(struct radeon_device *rdev)
{
if (rdev->pm.num_power_states > 1) {
- rw_enter_write(&rdev->pm.rwlock);
+ mutex_lock(&rdev->pm.mutex);
if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
rdev->pm.profile = PM_PROFILE_DEFAULT;
radeon_pm_update_profile(rdev);
@@ -699,7 +698,7 @@ void radeon_pm_fini(struct radeon_device *rdev)
rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
radeon_pm_set_clocks(rdev);
}
- rw_exit_write(&rdev->pm.rwlock);
+ mutex_unlock(&rdev->pm.mutex);
timeout_del(&rdev->pm.dynpm_idle_to);
task_del(systq, &rdev->pm.dynpm_idle_task);
@@ -725,7 +724,7 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev)
if (rdev->pm.num_power_states < 2)
return;
- rw_enter_write(&rdev->pm.rwlock);
+ mutex_lock(&rdev->pm.mutex);
rdev->pm.active_crtcs = 0;
rdev->pm.active_crtc_count = 0;
@@ -784,7 +783,7 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev)
}
}
- rw_exit_write(&rdev->pm.rwlock);
+ mutex_unlock(&rdev->pm.mutex);
}
static bool radeon_pm_in_vbl(struct radeon_device *rdev)
@@ -835,7 +834,7 @@ radeon_dynpm_idle_work_handler(void *arg1)
int resched;
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
- rw_enter_write(&rdev->pm.rwlock);
+ mutex_lock(&rdev->pm.mutex);
if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
int not_processed = 0;
int i;
@@ -883,7 +882,7 @@ radeon_dynpm_idle_work_handler(void *arg1)
timeout_add_msec(&rdev->pm.dynpm_idle_to, RADEON_IDLE_LOOP_MS);
}
- rw_exit_write(&rdev->pm.rwlock);
+ mutex_unlock(&rdev->pm.mutex);
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
}
diff --git a/sys/dev/pci/drm/radeon/radeon_prime.c b/sys/dev/pci/drm/radeon/radeon_prime.c
index 3f740afb831..3d3ec7cffa5 100644
--- a/sys/dev/pci/drm/radeon/radeon_prime.c
+++ b/sys/dev/pci/drm/radeon/radeon_prime.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: radeon_prime.c,v 1.3 2014/02/09 11:03:31 jsg Exp $ */
+/* $OpenBSD: radeon_prime.c,v 1.4 2015/02/10 06:19:36 jsg Exp $ */
/*
* Copyright 2012 Advanced Micro Devices, Inc.
*
@@ -38,10 +38,10 @@ static struct sg_table *radeon_gem_map_dma_buf(struct dma_buf_attachment *attach
struct sg_table *sg;
int nents;
- rw_enter_write(&dev->struct_rwlock);
+ mutex_lock(&dev->struct_mutex);
sg = drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
- rw_exit_write(&dev->struct_rwlock);
+ mutex_unlock(&dev->struct_mutex);
return sg;
}
@@ -94,7 +94,7 @@ static void *radeon_gem_prime_vmap(struct dma_buf *dma_buf)
struct drm_device *dev = bo->rdev->ddev;
int ret;
- rw_enter_write(&dev->struct_rwlock);
+ mutex_lock(&dev->struct_mutex);
if (bo->vmapping_count) {
bo->vmapping_count++;
goto out_unlock;
@@ -103,12 +103,12 @@ static void *radeon_gem_prime_vmap(struct dma_buf *dma_buf)
ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
&bo->dma_buf_vmap);
if (ret) {
- rw_exit_write(&dev->struct_rwlock);
+ mutex_unlock(&dev->struct_mutex);
return ERR_PTR(ret);
}
bo->vmapping_count = 1;
out_unlock:
- rw_exit_write(&dev->struct_rwlock);
+ mutex_unlock(&dev->struct_mutex);
return bo->dma_buf_vmap.virtual;
}
@@ -117,12 +117,12 @@ static void radeon_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
struct radeon_bo *bo = dma_buf->priv;
struct drm_device *dev = bo->rdev->ddev;
- rw_enter_write(&dev->struct_rwlock);
+ mutex_lock(&dev->struct_mutex);
bo->vmapping_count--;
if (bo->vmapping_count == 0) {
ttm_bo_kunmap(&bo->dma_buf_vmap);
}
- rw_exit_write(&dev->struct_rwlock);
+ mutex_unlock(&dev->struct_mutex);
}
const static struct dma_buf_ops radeon_dmabuf_ops = {
.map_dma_buf = radeon_gem_map_dma_buf,
@@ -153,9 +153,9 @@ static int radeon_prime_create(struct drm_device *dev,
bo = *pbo;
bo->gem_base.driver_private = bo;
- rw_enter_write(&rdev->gem.rwlock);
+ mutex_lock(&rdev->gem.mutex);
list_add_tail(&bo->list, &rdev->gem.objects);
- rw_exit_write(&rdev->gem.rwlock);
+ mutex_unlock(&rdev->gem.mutex);
return 0;
}
diff --git a/sys/dev/pci/drm/radeon/radeon_ring.c b/sys/dev/pci/drm/radeon/radeon_ring.c
index 978effc3688..bfc7a9a704e 100644
--- a/sys/dev/pci/drm/radeon/radeon_ring.c
+++ b/sys/dev/pci/drm/radeon/radeon_ring.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: radeon_ring.c,v 1.5 2014/02/10 01:01:23 jsg Exp $ */
+/* $OpenBSD: radeon_ring.c,v 1.6 2015/02/10 06:19:36 jsg Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -424,10 +424,10 @@ int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsig
{
int r;
- rw_enter_write(&rdev->ring_lock);
+ mutex_lock(&rdev->ring_lock);
r = radeon_ring_alloc(rdev, ring, ndw);
if (r) {
- rw_exit_write(&rdev->ring_lock);
+ mutex_unlock(&rdev->ring_lock);
return r;
}
return 0;
@@ -466,7 +466,7 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
{
radeon_ring_commit(rdev, ring);
- rw_exit_write(&rdev->ring_lock);
+ mutex_unlock(&rdev->ring_lock);
}
/**
@@ -491,7 +491,7 @@ void radeon_ring_undo(struct radeon_ring *ring)
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
{
radeon_ring_undo(ring);
- rw_exit_write(&rdev->ring_lock);
+ mutex_unlock(&rdev->ring_lock);
}
/**
@@ -591,17 +591,17 @@ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring
unsigned size, ptr, i;
/* just in case lock the ring */
- rw_enter_write(&rdev->ring_lock);
+ mutex_lock(&rdev->ring_lock);
*data = NULL;
if (ring->ring_obj == NULL) {
- rw_exit_write(&rdev->ring_lock);
+ mutex_unlock(&rdev->ring_lock);
return 0;
}
/* it doesn't make sense to save anything if all fences are signaled */
if (!radeon_fence_count_emitted(rdev, ring->idx)) {
- rw_exit_write(&rdev->ring_lock);
+ mutex_unlock(&rdev->ring_lock);
return 0;
}
@@ -612,7 +612,7 @@ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring
ptr = le32_to_cpu(*ring->next_rptr_cpu_addr);
else {
/* no way to read back the next rptr */
- rw_exit_write(&rdev->ring_lock);
+ mutex_unlock(&rdev->ring_lock);
return 0;
}
@@ -620,14 +620,14 @@ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring
size -= ptr;
size &= ring->ptr_mask;
if (size == 0) {
- rw_exit_write(&rdev->ring_lock);
+ mutex_unlock(&rdev->ring_lock);
return 0;
}
/* and then save the content of the ring */
*data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
if (!*data) {
- rw_exit_write(&rdev->ring_lock);
+ mutex_unlock(&rdev->ring_lock);
return 0;
}
for (i = 0; i < size; ++i) {
@@ -635,7 +635,7 @@ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring
ptr &= ring->ptr_mask;
}
- rw_exit_write(&rdev->ring_lock);
+ mutex_unlock(&rdev->ring_lock);
return size;
}
@@ -754,12 +754,12 @@ void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
int r;
struct radeon_bo *ring_obj;
- rw_enter_write(&rdev->ring_lock);
+ mutex_lock(&rdev->ring_lock);
ring_obj = ring->ring_obj;
ring->ready = false;
ring->ring = NULL;
ring->ring_obj = NULL;
- rw_exit_write(&rdev->ring_lock);
+ mutex_unlock(&rdev->ring_lock);
if (ring_obj) {
r = radeon_bo_reserve(ring_obj, false);
diff --git a/sys/dev/pci/drm/radeon/radeon_ttm.c b/sys/dev/pci/drm/radeon/radeon_ttm.c
index 3fd4f3e8fa3..ba223e7db5d 100644
--- a/sys/dev/pci/drm/radeon/radeon_ttm.c
+++ b/sys/dev/pci/drm/radeon/radeon_ttm.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: radeon_ttm.c,v 1.7 2014/12/09 07:05:06 doug Exp $ */
+/* $OpenBSD: radeon_ttm.c,v 1.8 2015/02/10 06:19:36 jsg Exp $ */
/*
* Copyright 2009 Jerome Glisse.
* All Rights Reserved.
@@ -895,10 +895,10 @@ radeon_ttm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
bo = (struct ttm_buffer_object *)ufi->entry->object.uvm_obj;
rdev = radeon_get_rdev(bo->bdev);
- rw_enter_read(&rdev->pm.mclk_lock);
+ down_read(&rdev->pm.mclk_lock);
r = ttm_vm_ops->pgo_fault(ufi, vaddr, pps, npages, centeridx,
fault_type, access_type, flags);
- rw_exit_read(&rdev->pm.mclk_lock);
+ up_read(&rdev->pm.mclk_lock);
return r;
}
diff --git a/sys/dev/pci/drm/radeon/si.c b/sys/dev/pci/drm/radeon/si.c
index 16edebcbd82..6d5e27b00d5 100644
--- a/sys/dev/pci/drm/radeon/si.c
+++ b/sys/dev/pci/drm/radeon/si.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: si.c,v 1.15 2014/07/12 18:48:52 tedu Exp $ */
+/* $OpenBSD: si.c,v 1.16 2015/02/10 06:19:36 jsg Exp $ */
/*
* Copyright 2011 Advanced Micro Devices, Inc.
*
@@ -4446,10 +4446,10 @@ uint64_t si_get_gpu_clock(struct radeon_device *rdev)
{
uint64_t clock;
- rw_enter_write(&rdev->gpu_clock_rwlock);
+ mutex_lock(&rdev->gpu_clock_mutex);
WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
- rw_exit_write(&rdev->gpu_clock_rwlock);
+ mutex_unlock(&rdev->gpu_clock_mutex);
return clock;
}
diff --git a/sys/dev/pci/drm/ttm/ttm_bo.c b/sys/dev/pci/drm/ttm/ttm_bo.c
index a431d952429..7524245995b 100644
--- a/sys/dev/pci/drm/ttm/ttm_bo.c
+++ b/sys/dev/pci/drm/ttm/ttm_bo.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ttm_bo.c,v 1.10 2015/01/27 03:17:36 dlg Exp $ */
+/* $OpenBSD: ttm_bo.c,v 1.11 2015/02/10 06:19:36 jsg Exp $ */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
@@ -325,7 +325,7 @@ void ttm_bo_unreserve(struct ttm_buffer_object *bo)
EXPORT_SYMBOL(ttm_bo_unreserve);
/*
- * Call bo->rwlock locked.
+ * Call bo->mutex locked.
*/
static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
{
@@ -335,7 +335,7 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
uint32_t page_flags = 0;
#ifdef notyet
- rw_assert_wrlock(&bo->rwlock);
+ rw_assert_wrlock(&bo->mutex);
#endif
bo->ttm = NULL;
@@ -728,14 +728,14 @@ static void ttm_bo_release(struct ttm_buffer_object *bo)
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
- rw_enter_write(&bdev->vm_lock);
+ write_lock(&bdev->vm_lock);
if (likely(bo->vm_node != NULL)) {
RB_REMOVE(ttm_bo_device_buffer_objects,
&bdev->addr_space_rb, bo);
drm_mm_put_block(bo->vm_node);
bo->vm_node = NULL;
}
- rw_exit_write(&bdev->vm_lock);
+ write_unlock(&bdev->vm_lock);
ttm_mem_io_lock(man, false);
ttm_mem_io_free_vm(bo);
ttm_mem_io_unlock(man);
@@ -1436,7 +1436,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
BUG_ON(man->has_type);
man->io_reserve_fastpath = true;
man->use_io_reserve_lru = false;
- rw_init(&man->io_reserve_rwlock, "ttm_iores");
+ rw_init(&man->io_reserve_mutex, "ttm_iores");
INIT_LIST_HEAD(&man->io_reserve_lru);
ret = bdev->driver->init_mem_type(bdev, type, man);
@@ -1484,7 +1484,7 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
struct ttm_bo_global *glob = ref->object;
int ret;
- rw_init(&glob->device_list_rwlock, "ttm_devlist");
+ rw_init(&glob->device_list_mutex, "ttm_devlist");
mtx_init(&glob->lru_lock, IPL_NONE);
glob->mem_glob = bo_ref->mem_glob;
glob->dummy_read_page = km_alloc(PAGE_SIZE, &kv_any, &kp_dma_zero,
@@ -1539,9 +1539,9 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
}
}
- rw_enter_write(&glob->device_list_rwlock);
+ mutex_lock(&glob->device_list_mutex);
list_del(&bdev->device_list);
- rw_exit_write(&glob->device_list_rwlock);
+ mutex_unlock(&glob->device_list_mutex);
timeout_del(&bdev->to);
task_del(systq, &bdev->task);
@@ -1558,9 +1558,9 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
mtx_leave(&glob->lru_lock);
BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
- rw_enter_write(&bdev->vm_lock);
+ write_lock(&bdev->vm_lock);
drm_mm_takedown(&bdev->addr_space_mm);
- rw_exit_write(&bdev->vm_lock);
+ write_unlock(&bdev->vm_lock);
return ret;
}
@@ -1600,9 +1600,9 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
bdev->need_dma32 = need_dma32;
bdev->val_seq = 0;
mtx_init(&bdev->fence_lock, IPL_NONE);
- rw_enter_write(&glob->device_list_rwlock);
+ mutex_lock(&glob->device_list_mutex);
list_add_tail(&bdev->device_list, &glob->device_list);
- rw_exit_write(&glob->device_list_rwlock);
+ mutex_unlock(&glob->device_list_mutex);
return 0;
out_no_addr_mm:
@@ -1704,7 +1704,7 @@ retry_pre_get:
if (unlikely(ret != 0))
return ret;
- rw_enter_write(&bdev->vm_lock);
+ write_lock(&bdev->vm_lock);
bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
bo->mem.num_pages, 0, 0);
@@ -1717,17 +1717,17 @@ retry_pre_get:
bo->mem.num_pages, 0);
if (unlikely(bo->vm_node == NULL)) {
- rw_exit_write(&bdev->vm_lock);
+ write_unlock(&bdev->vm_lock);
goto retry_pre_get;
}
ttm_bo_vm_insert_rb(bo);
- rw_exit_write(&bdev->vm_lock);
+ write_unlock(&bdev->vm_lock);
bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
return 0;
out_unlock:
- rw_exit_write(&bdev->vm_lock);
+ write_unlock(&bdev->vm_lock);
return ret;
}
diff --git a/sys/dev/pci/drm/ttm/ttm_bo_api.h b/sys/dev/pci/drm/ttm/ttm_bo_api.h
index ad149d9cce2..f0a09c7deba 100644
--- a/sys/dev/pci/drm/ttm/ttm_bo_api.h
+++ b/sys/dev/pci/drm/ttm/ttm_bo_api.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: ttm_bo_api.h,v 1.1 2013/08/12 04:11:53 jsg Exp $ */
+/* $OpenBSD: ttm_bo_api.h,v 1.2 2015/02/10 06:19:36 jsg Exp $ */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
@@ -320,7 +320,7 @@ ttm_bo_reference(struct ttm_buffer_object *bo)
* @interruptible: Use interruptible wait.
* @no_wait: Return immediately if buffer is busy.
*
- * This function must be called with the bo::rwlock held, and makes
+ * This function must be called with the bo::mutex held, and makes
* sure any previous rendering to the buffer is completed.
* Note: It might be necessary to block validations before the
* wait by reserving the buffer.
diff --git a/sys/dev/pci/drm/ttm/ttm_bo_driver.h b/sys/dev/pci/drm/ttm/ttm_bo_driver.h
index a5b79d4b091..e418d49a4f4 100644
--- a/sys/dev/pci/drm/ttm/ttm_bo_driver.h
+++ b/sys/dev/pci/drm/ttm/ttm_bo_driver.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: ttm_bo_driver.h,v 1.2 2013/10/29 06:30:57 jsg Exp $ */
+/* $OpenBSD: ttm_bo_driver.h,v 1.3 2015/02/10 06:19:36 jsg Exp $ */
/**************************************************************************
*
* Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
@@ -200,7 +200,7 @@ struct ttm_mem_type_manager_func {
* which has knowledge of the underlying type.
*
* This function may not be called from within atomic context, so
- * an implementation can and must use either a rwlock or a mutex to
+ * an implementation can and must use either a mutex or a spinlock to
* protect any data structures managing the space.
*/
int (*get_node)(struct ttm_mem_type_manager *man,
@@ -250,7 +250,7 @@ struct ttm_mem_type_manager_func {
* placed in this memory type if the user doesn't provide one.
* @func: structure pointer implementing the range manager. See above
* @priv: Driver private closure for @func.
- * @io_reserve_rwlock: Mutex optionally protecting shared io_reserve structures
+ * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
* @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
* reserved by the TTM vm system.
* @io_reserve_lru: Optional lru list for unreserving io mem regions.
@@ -280,12 +280,12 @@ struct ttm_mem_type_manager {
uint32_t default_caching;
const struct ttm_mem_type_manager_func *func;
void *priv;
- struct rwlock io_reserve_rwlock;
+ struct rwlock io_reserve_mutex;
bool use_io_reserve_lru;
bool io_reserve_fastpath;
/*
- * Protected by @io_reserve_rwlock:
+ * Protected by @io_reserve_mutex:
*/
struct list_head io_reserve_lru;
@@ -471,8 +471,8 @@ struct ttm_bo_global_ref {
* @dummy_read_page: Pointer to a dummy page used for mapping requests
* of unpopulated pages.
* @shrink: A shrink callback object used for buffer object swap.
- * @device_list_rwlock: Mutex protecting the device list.
- * This rwlock is held while traversing the device list for pm options.
+ * @device_list_mutex: Mutex protecting the device list.
+ * This mutex is held while traversing the device list for pm options.
* @lru_lock: Spinlock protecting the bo subsystem lru lists.
* @device_list: List of buffer object devices.
* @swap_lru: Lru list of buffer objects used for swapping.
@@ -488,11 +488,11 @@ struct ttm_bo_global {
struct ttm_mem_global *mem_glob;
struct vm_page *dummy_read_page;
struct ttm_mem_shrink shrink;
- struct rwlock device_list_rwlock;
+ struct rwlock device_list_mutex;
struct mutex lru_lock;
/**
- * Protected by device_list_rwlock.
+ * Protected by device_list_mutex.
*/
struct list_head device_list;
diff --git a/sys/dev/pci/drm/ttm/ttm_bo_util.c b/sys/dev/pci/drm/ttm/ttm_bo_util.c
index 9862576ef13..5c9e9d38ea9 100644
--- a/sys/dev/pci/drm/ttm/ttm_bo_util.c
+++ b/sys/dev/pci/drm/ttm/ttm_bo_util.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ttm_bo_util.c,v 1.6 2014/11/16 12:31:00 deraadt Exp $ */
+/* $OpenBSD: ttm_bo_util.c,v 1.7 2015/02/10 06:19:36 jsg Exp $ */
/**************************************************************************
*
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
@@ -87,9 +87,9 @@ int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
return 0;
if (interruptible)
- return rw_enter(&man->io_reserve_rwlock, RW_WRITE | RW_INTR);
+ return mutex_lock_interruptible(&man->io_reserve_mutex);
- rw_enter_write(&man->io_reserve_rwlock);
+ mutex_lock(&man->io_reserve_mutex);
return 0;
}
@@ -98,7 +98,7 @@ void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
if (likely(man->io_reserve_fastpath))
return;
- rw_exit_write(&man->io_reserve_rwlock);
+ mutex_unlock(&man->io_reserve_mutex);
}
static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
diff --git a/sys/dev/pci/drm/ttm/ttm_bo_vm.c b/sys/dev/pci/drm/ttm/ttm_bo_vm.c
index c1fb614475b..9e8aa1316c0 100644
--- a/sys/dev/pci/drm/ttm/ttm_bo_vm.c
+++ b/sys/dev/pci/drm/ttm/ttm_bo_vm.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ttm_bo_vm.c,v 1.2 2014/09/23 05:57:14 jsg Exp $ */
+/* $OpenBSD: ttm_bo_vm.c,v 1.3 2015/02/10 06:19:36 jsg Exp $ */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
@@ -305,11 +305,11 @@ ttm_bo_mmap(voff_t off, vsize_t size, struct ttm_bo_device *bdev)
struct ttm_buffer_object *bo;
int ret;
- rw_enter_read(&bdev->vm_lock);
+ read_lock(&bdev->vm_lock);
bo = ttm_bo_vm_lookup_rb(bdev, off >> PAGE_SHIFT, size >> PAGE_SHIFT);
if (likely(bo != NULL))
refcount_acquire(&bo->kref);
- rw_exit_read(&bdev->vm_lock);
+ read_unlock(&bdev->vm_lock);
if (unlikely(bo == NULL)) {
// pr_err("Could not find buffer object to map\n");
@@ -378,11 +378,11 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
bool no_wait = false;
bool dummy;
- rw_enter_read(&bdev->vm_lock);
+ read_lock(&bdev->vm_lock);
bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
if (likely(bo != NULL))
ttm_bo_reference(bo);
- rw_exit_read(&bdev->vm_lock);
+ read_unlock(&bdev->vm_lock);
if (unlikely(bo == NULL))
return -EFAULT;
diff --git a/sys/dev/pci/drm/ttm/ttm_object.c b/sys/dev/pci/drm/ttm/ttm_object.c
index f3490cf7a74..a8decab0fd6 100644
--- a/sys/dev/pci/drm/ttm/ttm_object.c
+++ b/sys/dev/pci/drm/ttm/ttm_object.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ttm_object.c,v 1.2 2014/02/09 10:57:26 jsg Exp $ */
+/* $OpenBSD: ttm_object.c,v 1.3 2015/02/10 06:19:36 jsg Exp $ */
/**************************************************************************
*
* Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
@@ -253,17 +253,17 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
*existed = true;
while (ret == -EINVAL) {
- rw_enter_read(&tfile->lock);
+ read_lock(&tfile->lock);
ret = drm_ht_find_item(ht, base->hash.key, &hash);
if (ret == 0) {
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
refcount_acquire(&ref->kref);
- rw_exit_read(&tfile->lock);
+ read_unlock(&tfile->lock);
break;
}
- rw_exit_read(&tfile->lock);
+ read_unlock(&tfile->lock);
ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
false, false);
if (unlikely(ret != 0))
@@ -280,19 +280,19 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
ref->ref_type = ref_type;
refcount_init(&ref->kref, 1);
- rw_enter_write(&tfile->lock);
+ write_lock(&tfile->lock);
ret = drm_ht_insert_item(ht, &ref->hash);
if (likely(ret == 0)) {
list_add_tail(&ref->head, &tfile->ref_list);
refcount_acquire(&base->refcount);
- rw_exit_write(&tfile->lock);
+ write_unlock(&tfile->lock);
if (existed != NULL)
*existed = false;
break;
}
- rw_exit_write(&tfile->lock);
+ write_unlock(&tfile->lock);
BUG_ON(ret != -EINVAL);
ttm_mem_global_free(mem_glob, sizeof(*ref));
@@ -313,7 +313,7 @@ static void ttm_ref_object_release(struct ttm_ref_object *ref)
ht = &tfile->ref_hash[ref->ref_type];
(void)drm_ht_remove_item(ht, &ref->hash);
list_del(&ref->head);
- rw_exit_write(&tfile->lock);
+ write_unlock(&tfile->lock);
if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
base->ref_obj_release(base, ref->ref_type);
@@ -321,7 +321,7 @@ static void ttm_ref_object_release(struct ttm_ref_object *ref)
ttm_base_object_unref(&ref->obj);
ttm_mem_global_free(mem_glob, sizeof(*ref));
kfree(ref);
- rw_enter_write(&tfile->lock);
+ write_lock(&tfile->lock);
}
int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
@@ -332,16 +332,16 @@ int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
struct drm_hash_item *hash;
int ret;
- rw_enter_write(&tfile->lock);
+ write_lock(&tfile->lock);
ret = drm_ht_find_item(ht, key, &hash);
if (unlikely(ret != 0)) {
- rw_exit_write(&tfile->lock);
+ write_unlock(&tfile->lock);
return -EINVAL;
}
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
if (refcount_release(&ref->kref))
ttm_ref_object_release(ref);
- rw_exit_write(&tfile->lock);
+ write_unlock(&tfile->lock);
return 0;
}
EXPORT_SYMBOL(ttm_ref_object_base_unref);
@@ -354,7 +354,7 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
struct ttm_object_file *tfile = *p_tfile;
*p_tfile = NULL;
- rw_enter_write(&tfile->lock);
+ write_lock(&tfile->lock);
/*
* Since we release the lock within the loop, we have to
@@ -370,7 +370,7 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
for (i = 0; i < TTM_REF_NUM; ++i)
drm_ht_remove(&tfile->ref_hash[i]);
- rw_exit_write(&tfile->lock);
+ write_unlock(&tfile->lock);
ttm_object_file_unref(&tfile);
}
EXPORT_SYMBOL(ttm_object_file_release);