summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Kettenis <kettenis@cvs.openbsd.org>2015-06-24 08:32:40 +0000
committerMark Kettenis <kettenis@cvs.openbsd.org>2015-06-24 08:32:40 +0000
commit6f311cd2637c672a39862ca5d613b5a66dc6060b (patch)
tree3e9cb0f7c2e95dbcd365d5132890055c0e18bf5e
parent2da05ab93c474b44c9b433485b061d57858a240b (diff)
Introduce Linux work queue APIs and use them. As a side-effect, this will
move some of the work from the system task queue to the driver-specific task queue. ok jsg@
-rw-r--r--sys/dev/pci/drm/drm_linux.h97
-rw-r--r--sys/dev/pci/drm/i915/i915_drv.c19
-rw-r--r--sys/dev/pci/drm/i915/i915_drv.h18
-rw-r--r--sys/dev/pci/drm/i915/i915_gem.c30
-rw-r--r--sys/dev/pci/drm/i915/i915_irq.c50
-rw-r--r--sys/dev/pci/drm/i915/intel_display.c17
-rw-r--r--sys/dev/pci/drm/i915/intel_dp.c25
-rw-r--r--sys/dev/pci/drm/i915/intel_drv.h10
-rw-r--r--sys/dev/pci/drm/i915/intel_pm.c50
9 files changed, 193 insertions, 123 deletions
diff --git a/sys/dev/pci/drm/drm_linux.h b/sys/dev/pci/drm/drm_linux.h
index f7d11876a3e..ec44a479bff 100644
--- a/sys/dev/pci/drm/drm_linux.h
+++ b/sys/dev/pci/drm/drm_linux.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: drm_linux.h,v 1.27 2015/04/18 14:47:34 jsg Exp $ */
+/* $OpenBSD: drm_linux.h,v 1.28 2015/06/24 08:32:39 kettenis Exp $ */
/*
* Copyright (c) 2013, 2014 Mark Kettenis
*
@@ -15,6 +15,8 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <sys/task.h>
+
typedef int irqreturn_t;
#define IRQ_NONE 0
#define IRQ_HANDLED 1
@@ -279,6 +281,91 @@ init_waitqueue_head(wait_queue_head_t *wq)
#define wake_up_all(x) wakeup(x)
#define wake_up_all_locked(x) wakeup(x)
+struct workqueue_struct;
+
+struct work_struct {
+ struct task task;
+ struct taskq *tq;
+};
+
+typedef void (*work_func_t)(struct work_struct *);
+
+static inline void
+INIT_WORK(struct work_struct *work, work_func_t func)
+{
+ task_set(&work->task, (void (*)(void *))func, work);
+}
+
+static inline bool
+queue_work(struct workqueue_struct *wq, struct work_struct *work)
+{
+ work->tq = (struct taskq *)wq;
+ return task_add(work->tq, &work->task);
+}
+
+static inline void
+cancel_work_sync(struct work_struct *work)
+{
+ task_del(work->tq, &work->task);
+}
+
+struct delayed_work {
+ struct work_struct work;
+ struct timeout to;
+ struct taskq *tq;
+};
+
+static inline struct delayed_work *
+to_delayed_work(struct work_struct *work)
+{
+ return container_of(work, struct delayed_work, work);
+}
+
+static void
+__delayed_work_tick(void *arg)
+{
+ struct delayed_work *dwork = arg;
+
+ task_add(dwork->tq, &dwork->work.task);
+}
+
+static inline void
+INIT_DELAYED_WORK(struct delayed_work *dwork, work_func_t func)
+{
+ INIT_WORK(&dwork->work, func);
+ timeout_set(&dwork->to, __delayed_work_tick, &dwork->work);
+}
+
+static inline bool
+schedule_delayed_work(struct delayed_work *dwork, int jiffies)
+{
+ dwork->tq = systq;
+ return timeout_add(&dwork->to, jiffies);
+}
+
+static inline bool
+queue_delayed_work(struct workqueue_struct *wq,
+ struct delayed_work *dwork, int jiffies)
+{
+ dwork->tq = (struct taskq *)wq;
+ return timeout_add(&dwork->to, jiffies);
+}
+
+static inline bool
+cancel_delayed_work(struct delayed_work *dwork)
+{
+ if (timeout_del(&dwork->to))
+ return true;
+ return task_del(dwork->tq, &dwork->work.task);
+}
+
+static inline void
+cancel_delayed_work_sync(struct delayed_work *dwork)
+{
+ timeout_del(&dwork->to);
+ task_del(dwork->tq, &dwork->work.task);
+}
+
#define NSEC_PER_USEC 1000L
#define NSEC_PER_SEC 1000000000L
#define KHZ2PICOS(a) (1000000000UL/(a))
@@ -287,6 +374,14 @@ extern struct timespec ns_to_timespec(const int64_t);
extern int64_t timeval_to_ns(const struct timeval *);
extern struct timeval ns_to_timeval(const int64_t);
+#define HZ hz
+
+static inline unsigned long
+round_jiffies_up_relative(unsigned long j)
+{
+ return roundup(j, hz);
+}
+
#define jiffies_to_msecs(x) (((int64_t)(x)) * 1000 / hz)
#define msecs_to_jiffies(x) (((int64_t)(x)) * hz / 1000)
#define time_after(a,b) ((long)(b) - (long)(a) < 0)
diff --git a/sys/dev/pci/drm/i915/i915_drv.c b/sys/dev/pci/drm/i915/i915_drv.c
index c8b341f739e..d897b6d7046 100644
--- a/sys/dev/pci/drm/i915/i915_drv.c
+++ b/sys/dev/pci/drm/i915/i915_drv.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: i915_drv.c,v 1.83 2015/04/18 14:47:34 jsg Exp $ */
+/* $OpenBSD: i915_drv.c,v 1.84 2015/06/24 08:32:39 kettenis Exp $ */
/*
* Copyright (c) 2008-2009 Owain G. Ainsworth <oga@openbsd.org>
*
@@ -173,7 +173,6 @@ int inteldrm_ioctl(struct drm_device *, u_long, caddr_t, struct drm_file *);
int inteldrm_doioctl(struct drm_device *, u_long, caddr_t, struct drm_file *);
int inteldrm_gmch_match(struct pci_attach_args *);
-void inteldrm_timeout(void *);
void i915_alloc_ifp(struct inteldrm_softc *, struct pci_attach_args *);
void i965_alloc_ifp(struct inteldrm_softc *, struct pci_attach_args *);
@@ -548,8 +547,7 @@ static int i915_drm_freeze(struct drm_device *dev)
return error;
}
- timeout_del(&dev_priv->rps.delayed_resume_to);
- task_del(systq, &dev_priv->rps.delayed_resume_task);
+ cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
intel_modeset_disable(dev);
@@ -979,8 +977,9 @@ inteldrm_attach(struct device *parent, struct device *self, void *aux)
return;
}
- dev_priv->mm.retire_taskq = taskq_create("intelrel", 1, IPL_TTY, 0);
- if (dev_priv->mm.retire_taskq == NULL) {
+ dev_priv->wq = (struct workqueue_struct *)
+ taskq_create("intelrel", 1, IPL_TTY, 0);
+ if (dev_priv->wq == NULL) {
printf("couldn't create taskq\n");
return;
}
@@ -1294,14 +1293,6 @@ intel_gtt_chipset_flush(struct drm_device *dev)
}
}
-void
-inteldrm_timeout(void *arg)
-{
- struct inteldrm_softc *dev_priv = arg;
-
- task_add(dev_priv->mm.retire_taskq, &dev_priv->mm.retire_task);
-}
-
static int i8xx_do_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/sys/dev/pci/drm/i915/i915_drv.h b/sys/dev/pci/drm/i915/i915_drv.h
index f436a20c52c..f47aa6b3938 100644
--- a/sys/dev/pci/drm/i915/i915_drv.h
+++ b/sys/dev/pci/drm/i915/i915_drv.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: i915_drv.h,v 1.63 2015/06/04 06:11:21 jsg Exp $ */
+/* $OpenBSD: i915_drv.h,v 1.64 2015/06/24 08:32:39 kettenis Exp $ */
/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
*/
/*
@@ -604,7 +604,7 @@ struct i915_suspend_saved_registers {
};
struct intel_gen6_power_mgmt {
- struct task task;
+ struct work_struct work;
u32 pm_iir;
/* lock - irqsave spinlock that protectects the work_struct and
* pm_iir. */
@@ -616,8 +616,7 @@ struct intel_gen6_power_mgmt {
u8 min_delay;
u8 max_delay;
- struct task delayed_resume_task;
- struct timeout delayed_resume_to;
+ struct delayed_work delayed_resume_work;
/*
* Protects RPS/RC6 register access and PCU communication.
@@ -663,7 +662,7 @@ struct i915_dri1_state {
struct intel_l3_parity {
u32 *remap_info;
- struct task error_task;
+ struct work_struct error_work;
};
struct inteldrm_softc {
@@ -750,7 +749,7 @@ struct inteldrm_softc {
u32 pch_irq_mask;
u32 hotplug_supported_mask;
- struct task hotplug_task;
+ struct work_struct hotplug_work;
int num_pch_pll;
@@ -816,9 +815,10 @@ struct inteldrm_softc {
spinlock_t error_lock;
/* Protected by dev->error_lock. */
struct drm_i915_error_state *first_error;
- struct task error_task;
+ struct work_struct error_work;
int error_completion;
struct mutex error_completion_lock;
+ struct workqueue_struct *wq;
/* number of ioctls + faults in flight */
int entries;
@@ -903,9 +903,7 @@ struct inteldrm_softc {
* fire periodically while the ring is running. When it
* fires, go retire requests.
*/
- struct timeout retire_timer;
- struct taskq *retire_taskq;
- struct task retire_task;
+ struct delayed_work retire_work;
/**
* Are we in a non-interruptible section of code like
diff --git a/sys/dev/pci/drm/i915/i915_gem.c b/sys/dev/pci/drm/i915/i915_gem.c
index 9e68b81fc33..3909317ff10 100644
--- a/sys/dev/pci/drm/i915/i915_gem.c
+++ b/sys/dev/pci/drm/i915/i915_gem.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: i915_gem.c,v 1.95 2015/06/22 15:20:43 kettenis Exp $ */
+/* $OpenBSD: i915_gem.c,v 1.96 2015/06/24 08:32:39 kettenis Exp $ */
/*
* Copyright (c) 2008-2009 Owain G. Ainsworth <oga@openbsd.org>
*
@@ -2209,7 +2209,9 @@ i915_add_request(struct intel_ring_buffer *ring,
DRM_I915_HANGCHECK_PERIOD);
}
if (was_empty) {
- timeout_add_sec(&dev_priv->mm.retire_timer, 1);
+ queue_delayed_work(dev_priv->wq,
+ &dev_priv->mm.retire_work,
+ round_jiffies_up_relative(HZ));
intel_mark_busy(dev_priv->dev);
}
}
@@ -2380,20 +2382,23 @@ i915_gem_retire_requests(struct drm_device *dev)
i915_gem_retire_requests_ring(ring);
}
-void
-i915_gem_retire_work_handler(void *arg1)
+static void
+i915_gem_retire_work_handler(struct work_struct *work)
{
- drm_i915_private_t *dev_priv = arg1;
+ drm_i915_private_t *dev_priv;
struct drm_device *dev;
struct intel_ring_buffer *ring;
bool idle;
int i;
+ dev_priv = container_of(work, drm_i915_private_t,
+ mm.retire_work.work);
dev = dev_priv->dev;
/* Come back later if the device is busy... */
if (rw_enter(&dev->struct_mutex, RW_NOSLEEP | RW_WRITE)) {
- timeout_add_sec(&dev_priv->mm.retire_timer, 1);
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
+ round_jiffies_up_relative(HZ));
return;
}
@@ -2411,7 +2416,8 @@ i915_gem_retire_work_handler(void *arg1)
}
if (!dev_priv->mm.suspended && !idle)
- timeout_add_sec(&dev_priv->mm.retire_timer, 1);
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
+ round_jiffies_up_relative(HZ));
if (idle)
intel_mark_idle(dev);
@@ -3612,7 +3618,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
ret = __wait_seqno(ring, seqno, true, NULL);
if (ret == 0)
- timeout_add_sec(&dev_priv->mm.retire_timer, 0);
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
return ret;
}
@@ -3995,8 +4001,7 @@ i915_gem_idle(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
/* Cancel the retire work handler, which should be idle now. */
- timeout_del(&dev_priv->mm.retire_timer);
- task_del(dev_priv->mm.retire_taskq, &dev_priv->mm.retire_task);
+ cancel_delayed_work_sync(&dev_priv->mm.retire_work);
return 0;
}
@@ -4304,9 +4309,8 @@ i915_gem_load(struct drm_device *dev)
init_ring_lists(&dev_priv->ring[i]);
for (i = 0; i < I915_MAX_NUM_FENCES; i++)
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
- task_set(&dev_priv->mm.retire_task, i915_gem_retire_work_handler,
- dev_priv);
- timeout_set(&dev_priv->mm.retire_timer, inteldrm_timeout, dev_priv);
+ INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
+ i915_gem_retire_work_handler);
#if 0
init_completion(&dev_priv->error_completion);
#else
diff --git a/sys/dev/pci/drm/i915/i915_irq.c b/sys/dev/pci/drm/i915/i915_irq.c
index 8beb31cf362..d985103e437 100644
--- a/sys/dev/pci/drm/i915/i915_irq.c
+++ b/sys/dev/pci/drm/i915/i915_irq.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: i915_irq.c,v 1.25 2015/04/12 17:10:07 kettenis Exp $ */
+/* $OpenBSD: i915_irq.c,v 1.26 2015/06/24 08:32:39 kettenis Exp $ */
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
*/
/*
@@ -277,9 +277,10 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
/*
* Handle hotplug events outside the interrupt handler proper.
*/
-static void i915_hotplug_work_func(void *arg1)
+static void i915_hotplug_work_func(struct work_struct *work)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *)arg1;
+ drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
+ hotplug_work);
struct drm_device *dev = dev_priv->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
@@ -358,9 +359,10 @@ static void notify_ring(struct drm_device *dev,
}
}
-static void gen6_pm_rps_work(void *arg1)
+static void gen6_pm_rps_work(struct work_struct *work)
{
- drm_i915_private_t *dev_priv = arg1;
+ drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
+ rps.work);
u32 pm_iir, pm_imr;
u8 new_delay;
@@ -402,9 +404,10 @@ static void gen6_pm_rps_work(void *arg1)
* this event, userspace should try to remap the bad rows since statistically
* it is likely the same row is more likely to go bad again.
*/
-static void ivybridge_parity_work(void *arg1)
+static void ivybridge_parity_work(struct work_struct *work)
{
- drm_i915_private_t *dev_priv = arg1;
+ drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
+ l3_parity.error_work);
u32 error_status, row, bank, subbank;
// char *parity_event[5];
uint32_t misccpctl;
@@ -472,7 +475,7 @@ static void ivybridge_handle_parity_error(struct drm_device *dev)
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
- task_add(systq, &dev_priv->l3_parity.error_task);
+ queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
}
static void snb_gt_irq_handler(struct drm_device *dev,
@@ -520,7 +523,7 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
POSTING_READ(GEN6_PMIMR);
spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
- task_add(systq, &dev_priv->rps.task);
+ queue_work(dev_priv->wq, &dev_priv->rps.work);
}
static irqreturn_t valleyview_irq_handler(void *arg)
@@ -582,7 +585,8 @@ static irqreturn_t valleyview_irq_handler(void *arg)
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
hotplug_status);
if (hotplug_status & dev_priv->hotplug_supported_mask)
- task_add(systq, &dev_priv->hotplug_task);
+ queue_work(dev_priv->wq,
+ &dev_priv->hotplug_work);
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
I915_READ(PORT_HOTPLUG_STAT);
@@ -609,7 +613,7 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
int pipe;
if (pch_iir & SDE_HOTPLUG_MASK)
- task_add(systq, &dev_priv->hotplug_task);
+ queue_work(dev_priv->wq, &dev_priv->hotplug_work);
if (pch_iir & SDE_AUDIO_POWER_MASK)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
@@ -652,7 +656,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
int pipe;
if (pch_iir & SDE_HOTPLUG_MASK_CPT)
- task_add(systq, &dev_priv->hotplug_task);
+ queue_work(dev_priv->wq, &dev_priv->hotplug_work);
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
@@ -834,9 +838,10 @@ done:
* Fire an error uevent so userspace can see that a hang or error
* was detected.
*/
-static void i915_error_work_func(void *arg1)
+static void i915_error_work_func(struct work_struct *work)
{
- drm_i915_private_t *dev_priv = arg1;
+ drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
+ error_work);
struct drm_device *dev = dev_priv->dev;
#if 0
char *error_event[] = { "ERROR=1", NULL };
@@ -1471,7 +1476,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
wake_up_all(ring);
}
- task_add(systq, &dev_priv->error_task);
+ queue_work(dev_priv->wq, &dev_priv->error_work);
}
static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
@@ -2385,7 +2390,8 @@ static irqreturn_t i915_irq_handler(void *arg)
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
hotplug_status);
if (hotplug_status & dev_priv->hotplug_supported_mask)
- task_add(systq, &dev_priv->hotplug_task);
+ queue_work(dev_priv->wq,
+ &dev_priv->hotplug_work);
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
POSTING_READ(PORT_HOTPLUG_STAT);
@@ -2622,7 +2628,8 @@ static irqreturn_t i965_irq_handler(void *arg)
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
hotplug_status);
if (hotplug_status & dev_priv->hotplug_supported_mask)
- task_add(systq, &dev_priv->hotplug_task);
+ queue_work(dev_priv->wq,
+ &dev_priv->hotplug_work);
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
I915_READ(PORT_HOTPLUG_STAT);
@@ -2707,11 +2714,10 @@ void intel_irq_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- task_set(&dev_priv->hotplug_task, i915_hotplug_work_func, dev_priv);
- task_set(&dev_priv->error_task, i915_error_work_func, dev_priv);
- task_set(&dev_priv->rps.task, gen6_pm_rps_work, dev_priv);
- task_set(&dev_priv->l3_parity.error_task, ivybridge_parity_work,
- dev_priv);
+ INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
+ INIT_WORK(&dev_priv->error_work, i915_error_work_func);
+ INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
+ INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
diff --git a/sys/dev/pci/drm/i915/intel_display.c b/sys/dev/pci/drm/i915/intel_display.c
index 22100a6892b..5e459386a23 100644
--- a/sys/dev/pci/drm/i915/intel_display.c
+++ b/sys/dev/pci/drm/i915/intel_display.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: intel_display.c,v 1.50 2015/04/18 14:47:34 jsg Exp $ */
+/* $OpenBSD: intel_display.c,v 1.51 2015/06/24 08:32:39 kettenis Exp $ */
/*
* Copyright © 2006-2007 Intel Corporation
*
@@ -7054,7 +7054,7 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
spin_unlock_irqrestore(&dev->event_lock, flags);
if (work) {
- task_del(systq, &work->task);
+ cancel_work_sync(&work->work);
kfree(work);
}
@@ -7063,9 +7063,10 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
kfree(intel_crtc);
}
-static void intel_unpin_work_fn(void *arg1)
+static void intel_unpin_work_fn(struct work_struct *__work)
{
- struct intel_unpin_work *work = arg1;
+ struct intel_unpin_work *work =
+ container_of(__work, struct intel_unpin_work, work);
struct drm_device *dev = work->crtc->dev;
mutex_lock(&dev->struct_mutex);
@@ -7124,7 +7125,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
&obj->pending_flip);
wake_up(&dev_priv->pending_flip_queue);
- task_add(systq, &work->task);
+ queue_work(dev_priv->wq, &work->work);
trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
}
@@ -7459,7 +7460,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->event = event;
work->crtc = crtc;
work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
- task_set(&work->task, intel_unpin_work_fn, work);
+ INIT_WORK(&work->work, intel_unpin_work_fn);
ret = drm_vblank_get(dev, intel_crtc->pipe);
if (ret)
@@ -9407,8 +9408,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
/* Disable the irq before mode object teardown, for the irq might
* enqueue unpin/hotplug work. */
drm_irq_uninstall(dev);
- task_del(systq, &dev_priv->hotplug_task);
- task_del(systq, &dev_priv->rps.task);
+ cancel_work_sync(&dev_priv->hotplug_work);
+ cancel_work_sync(&dev_priv->rps.work);
/* flush any delayed tasks or pending work */
#ifdef notyet
diff --git a/sys/dev/pci/drm/i915/intel_dp.c b/sys/dev/pci/drm/i915/intel_dp.c
index 755a12b6114..4aa8c279db4 100644
--- a/sys/dev/pci/drm/i915/intel_dp.c
+++ b/sys/dev/pci/drm/i915/intel_dp.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: intel_dp.c,v 1.24 2015/04/12 11:26:54 jsg Exp $ */
+/* $OpenBSD: intel_dp.c,v 1.25 2015/06/24 08:32:39 kettenis Exp $ */
/*
* Copyright © 2008 Intel Corporation
*
@@ -1104,9 +1104,10 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
}
}
-static void ironlake_panel_vdd_work(void *arg1)
+static void ironlake_panel_vdd_work(struct work_struct *__work)
{
- struct intel_dp *intel_dp = arg1;
+ struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
+ struct intel_dp, panel_vdd_work);
struct drm_device *dev = intel_dp_to_dev(intel_dp);
mutex_lock(&dev->mode_config.mutex);
@@ -1114,14 +1115,6 @@ static void ironlake_panel_vdd_work(void *arg1)
mutex_unlock(&dev->mode_config.mutex);
}
-static void
-ironlake_panel_vdd_tick(void *arg)
-{
- struct intel_dp *intel_dp = arg;
-
- task_add(systq, &intel_dp->panel_vdd_task);
-}
-
void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
{
if (!is_edp(intel_dp))
@@ -1140,7 +1133,8 @@ void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
* time from now (relative to the power down delay)
* to keep the panel power up across a sequence of operations
*/
- timeout_add_msec(&intel_dp->panel_vdd_to, intel_dp->panel_power_cycle_delay * 5);
+ schedule_delayed_work(&intel_dp->panel_vdd_work,
+ msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
}
}
@@ -2540,8 +2534,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
#endif
drm_encoder_cleanup(encoder);
if (is_edp(intel_dp)) {
- timeout_del(&intel_dp->panel_vdd_to);
- task_del(systq, &intel_dp->panel_vdd_task);
+ cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
ironlake_panel_vdd_off_sync(intel_dp);
}
kfree(intel_dig_port);
@@ -2808,8 +2801,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
- task_set(&intel_dp->panel_vdd_task, ironlake_panel_vdd_work, intel_dp);
- timeout_set(&intel_dp->panel_vdd_to, ironlake_panel_vdd_tick, intel_dp);
+ INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
+ ironlake_panel_vdd_work);
intel_connector_attach_encoder(intel_connector, intel_encoder);
drm_sysfs_connector_add(connector);
diff --git a/sys/dev/pci/drm/i915/intel_drv.h b/sys/dev/pci/drm/i915/intel_drv.h
index 1aee3c70a88..b4760949c0d 100644
--- a/sys/dev/pci/drm/i915/intel_drv.h
+++ b/sys/dev/pci/drm/i915/intel_drv.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: intel_drv.h,v 1.6 2015/04/12 11:26:54 jsg Exp $ */
+/* $OpenBSD: intel_drv.h,v 1.7 2015/06/24 08:32:39 kettenis Exp $ */
/*
* Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
* Copyright (c) 2007-2008 Intel Corporation
@@ -370,8 +370,7 @@ struct intel_dp {
int panel_power_cycle_delay;
int backlight_on_delay;
int backlight_off_delay;
- struct task panel_vdd_task;
- struct timeout panel_vdd_to;
+ struct delayed_work panel_vdd_work;
bool want_panel_vdd;
struct intel_connector *attached_connector;
};
@@ -399,7 +398,7 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane)
}
struct intel_unpin_work {
- struct task task;
+ struct work_struct work;
struct drm_crtc *crtc;
struct drm_i915_gem_object *old_fb_obj;
struct drm_i915_gem_object *pending_flip_obj;
@@ -412,8 +411,7 @@ struct intel_unpin_work {
};
struct intel_fbc_work {
- struct task task;
- struct timeout to;
+ struct delayed_work work;
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
int interval;
diff --git a/sys/dev/pci/drm/i915/intel_pm.c b/sys/dev/pci/drm/i915/intel_pm.c
index 6edc48d71fc..8d83756484e 100644
--- a/sys/dev/pci/drm/i915/intel_pm.c
+++ b/sys/dev/pci/drm/i915/intel_pm.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: intel_pm.c,v 1.34 2015/04/18 14:47:34 jsg Exp $ */
+/* $OpenBSD: intel_pm.c,v 1.35 2015/06/24 08:32:39 kettenis Exp $ */
/*
* Copyright © 2012 Intel Corporation
*
@@ -269,9 +269,11 @@ bool intel_fbc_enabled(struct drm_device *dev)
return dev_priv->display.fbc_enabled(dev);
}
-static void intel_fbc_work_fn(void *arg1)
+static void intel_fbc_work_fn(struct work_struct *__work)
{
- struct intel_fbc_work *work = arg1;
+ struct intel_fbc_work *work =
+ container_of(to_delayed_work(__work),
+ struct intel_fbc_work, work);
struct drm_device *dev = work->crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -296,14 +298,6 @@ static void intel_fbc_work_fn(void *arg1)
kfree(work);
}
-static void
-intel_fbc_work_tick(void *arg)
-{
- struct intel_fbc_work *work = arg;
-
- task_add(systq, &work->task);
-}
-
static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
{
if (dev_priv->fbc_work == NULL)
@@ -315,8 +309,7 @@ static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
* dev_priv->fbc_work, so we can perform the cancellation
* entirely asynchronously.
*/
- timeout_del(&dev_priv->fbc_work->to);
- if (task_del(systq, &dev_priv->fbc_work->task))
+ if (cancel_delayed_work(&dev_priv->fbc_work->work))
/* tasklet was killed before being run, clean up */
kfree(dev_priv->fbc_work);
@@ -348,8 +341,7 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
work->crtc = crtc;
work->fb = crtc->fb;
work->interval = interval;
- task_set(&work->task, intel_fbc_work_fn, work);
- timeout_set(&work->to, intel_fbc_work_tick, work);
+ INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
dev_priv->fbc_work = work;
@@ -366,7 +358,7 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
* and indeed performing the enable as a co-routine and not
* waiting synchronously upon the vblank.
*/
- timeout_add_msec(&work->to, 50);
+ schedule_delayed_work(&work->work, msecs_to_jiffies(50));
}
void intel_disable_fbc(struct drm_device *dev)
@@ -3457,17 +3449,18 @@ void intel_disable_gt_powersave(struct drm_device *dev)
ironlake_disable_drps(dev);
ironlake_disable_rc6(dev);
} else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) {
- timeout_del(&dev_priv->rps.delayed_resume_to);
- task_del(systq, &dev_priv->rps.delayed_resume_task);
+ cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
mutex_lock(&dev_priv->rps.hw_lock);
gen6_disable_rps(dev);
mutex_unlock(&dev_priv->rps.hw_lock);
}
}
-static void intel_gen6_powersave_work(void *arg1)
+static void intel_gen6_powersave_work(struct work_struct *work)
{
- drm_i915_private_t *dev_priv = arg1;
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private,
+ rps.delayed_resume_work.work);
struct drm_device *dev = dev_priv->dev;
mutex_lock(&dev_priv->rps.hw_lock);
@@ -3476,14 +3469,6 @@ static void intel_gen6_powersave_work(void *arg1)
mutex_unlock(&dev_priv->rps.hw_lock);
}
-static void
-intel_gen6_powersave_tick(void *arg)
-{
- drm_i915_private_t *dev_priv = arg;
-
- task_add(systq, &dev_priv->rps.delayed_resume_task);
-}
-
void intel_enable_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3498,7 +3483,8 @@ void intel_enable_gt_powersave(struct drm_device *dev)
* done at any specific time, so do this out of our fast path
* to make resume and init faster.
*/
- timeout_add_sec(&dev_priv->rps.delayed_resume_to, 1);
+ schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
+ round_jiffies_up_relative(HZ));
}
}
@@ -4482,10 +4468,8 @@ void intel_pm_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- task_set(&dev_priv->rps.delayed_resume_task, intel_gen6_powersave_work,
- dev_priv);
- timeout_set(&dev_priv->rps.delayed_resume_to, intel_gen6_powersave_tick,
- dev_priv);
+ INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
+ intel_gen6_powersave_work);
}
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)