-/* $OpenBSD: drm_linux.h,v 1.27 2015/04/18 14:47:34 jsg Exp $ */
+/* $OpenBSD: drm_linux.h,v 1.28 2015/06/24 08:32:39 kettenis Exp $ */
/*
* Copyright (c) 2013, 2014 Mark Kettenis
*
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <sys/task.h>
+
typedef int irqreturn_t;
#define IRQ_NONE 0
#define IRQ_HANDLED 1
#define wake_up_all(x) wakeup(x)
#define wake_up_all_locked(x) wakeup(x)
+struct workqueue_struct;
+
+struct work_struct {
+ struct task task;
+ struct taskq *tq;
+};
+
+typedef void (*work_func_t)(struct work_struct *);
+
+static inline void
+INIT_WORK(struct work_struct *work, work_func_t func)
+{
+ task_set(&work->task, (void (*)(void *))func, work);
+}
+
+static inline bool
+queue_work(struct workqueue_struct *wq, struct work_struct *work)
+{
+ work->tq = (struct taskq *)wq;
+ return task_add(work->tq, &work->task);
+}
+
+static inline void
+cancel_work_sync(struct work_struct *work)
+{
+ task_del(work->tq, &work->task);
+}
+
+struct delayed_work {
+ struct work_struct work;
+ struct timeout to;
+ struct taskq *tq;
+};
+
+static inline struct delayed_work *
+to_delayed_work(struct work_struct *work)
+{
+ return container_of(work, struct delayed_work, work);
+}
+
+static void
+__delayed_work_tick(void *arg)
+{
+ struct delayed_work *dwork = arg;
+
+ task_add(dwork->tq, &dwork->work.task);
+}
+
+static inline void
+INIT_DELAYED_WORK(struct delayed_work *dwork, work_func_t func)
+{
+ INIT_WORK(&dwork->work, func);
+ timeout_set(&dwork->to, __delayed_work_tick, &dwork->work);
+}
+
+static inline bool
+schedule_delayed_work(struct delayed_work *dwork, int jiffies)
+{
+ dwork->tq = systq;
+ return timeout_add(&dwork->to, jiffies);
+}
+
+static inline bool
+queue_delayed_work(struct workqueue_struct *wq,
+ struct delayed_work *dwork, int jiffies)
+{
+ dwork->tq = (struct taskq *)wq;
+ return timeout_add(&dwork->to, jiffies);
+}
+
+static inline bool
+cancel_delayed_work(struct delayed_work *dwork)
+{
+ if (timeout_del(&dwork->to))
+ return true;
+ return task_del(dwork->tq, &dwork->work.task);
+}
+
+static inline void
+cancel_delayed_work_sync(struct delayed_work *dwork)
+{
+ timeout_del(&dwork->to);
+ task_del(dwork->tq, &dwork->work.task);
+}
+
#define NSEC_PER_USEC 1000L
#define NSEC_PER_SEC 1000000000L
#define KHZ2PICOS(a) (1000000000UL/(a))
extern int64_t timeval_to_ns(const struct timeval *);
extern struct timeval ns_to_timeval(const int64_t);
+#define HZ hz
+
+static inline unsigned long
+round_jiffies_up_relative(unsigned long j)
+{
+ return roundup(j, hz);
+}
+
#define jiffies_to_msecs(x) (((int64_t)(x)) * 1000 / hz)
#define msecs_to_jiffies(x) (((int64_t)(x)) * hz / 1000)
#define time_after(a,b) ((long)(b) - (long)(a) < 0)
-/* $OpenBSD: i915_drv.c,v 1.83 2015/04/18 14:47:34 jsg Exp $ */
+/* $OpenBSD: i915_drv.c,v 1.84 2015/06/24 08:32:39 kettenis Exp $ */
/*
* Copyright (c) 2008-2009 Owain G. Ainsworth <oga@openbsd.org>
*
int inteldrm_doioctl(struct drm_device *, u_long, caddr_t, struct drm_file *);
int inteldrm_gmch_match(struct pci_attach_args *);
-void inteldrm_timeout(void *);
void i915_alloc_ifp(struct inteldrm_softc *, struct pci_attach_args *);
void i965_alloc_ifp(struct inteldrm_softc *, struct pci_attach_args *);
return error;
}
- timeout_del(&dev_priv->rps.delayed_resume_to);
- task_del(systq, &dev_priv->rps.delayed_resume_task);
+ cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
intel_modeset_disable(dev);
return;
}
- dev_priv->mm.retire_taskq = taskq_create("intelrel", 1, IPL_TTY, 0);
- if (dev_priv->mm.retire_taskq == NULL) {
+ dev_priv->wq = (struct workqueue_struct *)
+ taskq_create("intelrel", 1, IPL_TTY, 0);
+ if (dev_priv->wq == NULL) {
printf("couldn't create taskq\n");
return;
}
}
}
-void
-inteldrm_timeout(void *arg)
-{
- struct inteldrm_softc *dev_priv = arg;
-
- task_add(dev_priv->mm.retire_taskq, &dev_priv->mm.retire_task);
-}
-
static int i8xx_do_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
-/* $OpenBSD: i915_drv.h,v 1.63 2015/06/04 06:11:21 jsg Exp $ */
+/* $OpenBSD: i915_drv.h,v 1.64 2015/06/24 08:32:39 kettenis Exp $ */
/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
*/
/*
};
struct intel_gen6_power_mgmt {
- struct task task;
+ struct work_struct work;
u32 pm_iir;
/* lock - irqsave spinlock that protectects the work_struct and
* pm_iir. */
u8 min_delay;
u8 max_delay;
- struct task delayed_resume_task;
- struct timeout delayed_resume_to;
+ struct delayed_work delayed_resume_work;
/*
* Protects RPS/RC6 register access and PCU communication.
struct intel_l3_parity {
u32 *remap_info;
- struct task error_task;
+ struct work_struct error_work;
};
struct inteldrm_softc {
u32 pch_irq_mask;
u32 hotplug_supported_mask;
- struct task hotplug_task;
+ struct work_struct hotplug_work;
int num_pch_pll;
spinlock_t error_lock;
/* Protected by dev->error_lock. */
struct drm_i915_error_state *first_error;
- struct task error_task;
+ struct work_struct error_work;
int error_completion;
struct mutex error_completion_lock;
+ struct workqueue_struct *wq;
/* number of ioctls + faults in flight */
int entries;
* fire periodically while the ring is running. When it
* fires, go retire requests.
*/
- struct timeout retire_timer;
- struct taskq *retire_taskq;
- struct task retire_task;
+ struct delayed_work retire_work;
/**
* Are we in a non-interruptible section of code like
-/* $OpenBSD: i915_gem.c,v 1.95 2015/06/22 15:20:43 kettenis Exp $ */
+/* $OpenBSD: i915_gem.c,v 1.96 2015/06/24 08:32:39 kettenis Exp $ */
/*
* Copyright (c) 2008-2009 Owain G. Ainsworth <oga@openbsd.org>
*
DRM_I915_HANGCHECK_PERIOD);
}
if (was_empty) {
- timeout_add_sec(&dev_priv->mm.retire_timer, 1);
+ queue_delayed_work(dev_priv->wq,
+ &dev_priv->mm.retire_work,
+ round_jiffies_up_relative(HZ));
intel_mark_busy(dev_priv->dev);
}
}
i915_gem_retire_requests_ring(ring);
}
-void
-i915_gem_retire_work_handler(void *arg1)
+static void
+i915_gem_retire_work_handler(struct work_struct *work)
{
- drm_i915_private_t *dev_priv = arg1;
+ drm_i915_private_t *dev_priv;
struct drm_device *dev;
struct intel_ring_buffer *ring;
bool idle;
int i;
+ dev_priv = container_of(work, drm_i915_private_t,
+ mm.retire_work.work);
dev = dev_priv->dev;
/* Come back later if the device is busy... */
if (rw_enter(&dev->struct_mutex, RW_NOSLEEP | RW_WRITE)) {
- timeout_add_sec(&dev_priv->mm.retire_timer, 1);
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
+ round_jiffies_up_relative(HZ));
return;
}
}
if (!dev_priv->mm.suspended && !idle)
- timeout_add_sec(&dev_priv->mm.retire_timer, 1);
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
+ round_jiffies_up_relative(HZ));
if (idle)
intel_mark_idle(dev);
ret = __wait_seqno(ring, seqno, true, NULL);
if (ret == 0)
- timeout_add_sec(&dev_priv->mm.retire_timer, 0);
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
return ret;
}
mutex_unlock(&dev->struct_mutex);
/* Cancel the retire work handler, which should be idle now. */
- timeout_del(&dev_priv->mm.retire_timer);
- task_del(dev_priv->mm.retire_taskq, &dev_priv->mm.retire_task);
+ cancel_delayed_work_sync(&dev_priv->mm.retire_work);
return 0;
}
init_ring_lists(&dev_priv->ring[i]);
for (i = 0; i < I915_MAX_NUM_FENCES; i++)
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
- task_set(&dev_priv->mm.retire_task, i915_gem_retire_work_handler,
- dev_priv);
- timeout_set(&dev_priv->mm.retire_timer, inteldrm_timeout, dev_priv);
+ INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
+ i915_gem_retire_work_handler);
#if 0
init_completion(&dev_priv->error_completion);
#else
-/* $OpenBSD: i915_irq.c,v 1.25 2015/04/12 17:10:07 kettenis Exp $ */
+/* $OpenBSD: i915_irq.c,v 1.26 2015/06/24 08:32:39 kettenis Exp $ */
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
*/
/*
/*
* Handle hotplug events outside the interrupt handler proper.
*/
-static void i915_hotplug_work_func(void *arg1)
+static void i915_hotplug_work_func(struct work_struct *work)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *)arg1;
+ drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
+ hotplug_work);
struct drm_device *dev = dev_priv->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
}
}
-static void gen6_pm_rps_work(void *arg1)
+static void gen6_pm_rps_work(struct work_struct *work)
{
- drm_i915_private_t *dev_priv = arg1;
+ drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
+ rps.work);
u32 pm_iir, pm_imr;
u8 new_delay;
* this event, userspace should try to remap the bad rows since statistically
* it is likely the same row is more likely to go bad again.
*/
-static void ivybridge_parity_work(void *arg1)
+static void ivybridge_parity_work(struct work_struct *work)
{
- drm_i915_private_t *dev_priv = arg1;
+ drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
+ l3_parity.error_work);
u32 error_status, row, bank, subbank;
// char *parity_event[5];
uint32_t misccpctl;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
- task_add(systq, &dev_priv->l3_parity.error_task);
+ queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
}
static void snb_gt_irq_handler(struct drm_device *dev,
POSTING_READ(GEN6_PMIMR);
spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
- task_add(systq, &dev_priv->rps.task);
+ queue_work(dev_priv->wq, &dev_priv->rps.work);
}
static irqreturn_t valleyview_irq_handler(void *arg)
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
hotplug_status);
if (hotplug_status & dev_priv->hotplug_supported_mask)
- task_add(systq, &dev_priv->hotplug_task);
+ queue_work(dev_priv->wq,
+ &dev_priv->hotplug_work);
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
I915_READ(PORT_HOTPLUG_STAT);
int pipe;
if (pch_iir & SDE_HOTPLUG_MASK)
- task_add(systq, &dev_priv->hotplug_task);
+ queue_work(dev_priv->wq, &dev_priv->hotplug_work);
if (pch_iir & SDE_AUDIO_POWER_MASK)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
int pipe;
if (pch_iir & SDE_HOTPLUG_MASK_CPT)
- task_add(systq, &dev_priv->hotplug_task);
+ queue_work(dev_priv->wq, &dev_priv->hotplug_work);
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
* Fire an error uevent so userspace can see that a hang or error
* was detected.
*/
-static void i915_error_work_func(void *arg1)
+static void i915_error_work_func(struct work_struct *work)
{
- drm_i915_private_t *dev_priv = arg1;
+ drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
+ error_work);
struct drm_device *dev = dev_priv->dev;
#if 0
char *error_event[] = { "ERROR=1", NULL };
wake_up_all(ring);
}
- task_add(systq, &dev_priv->error_task);
+ queue_work(dev_priv->wq, &dev_priv->error_work);
}
static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
hotplug_status);
if (hotplug_status & dev_priv->hotplug_supported_mask)
- task_add(systq, &dev_priv->hotplug_task);
+ queue_work(dev_priv->wq,
+ &dev_priv->hotplug_work);
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
POSTING_READ(PORT_HOTPLUG_STAT);
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
hotplug_status);
if (hotplug_status & dev_priv->hotplug_supported_mask)
- task_add(systq, &dev_priv->hotplug_task);
+ queue_work(dev_priv->wq,
+ &dev_priv->hotplug_work);
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
I915_READ(PORT_HOTPLUG_STAT);
{
struct drm_i915_private *dev_priv = dev->dev_private;
- task_set(&dev_priv->hotplug_task, i915_hotplug_work_func, dev_priv);
- task_set(&dev_priv->error_task, i915_error_work_func, dev_priv);
- task_set(&dev_priv->rps.task, gen6_pm_rps_work, dev_priv);
- task_set(&dev_priv->l3_parity.error_task, ivybridge_parity_work,
- dev_priv);
+ INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
+ INIT_WORK(&dev_priv->error_work, i915_error_work_func);
+ INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
+ INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
-/* $OpenBSD: intel_display.c,v 1.50 2015/04/18 14:47:34 jsg Exp $ */
+/* $OpenBSD: intel_display.c,v 1.51 2015/06/24 08:32:39 kettenis Exp $ */
/*
* Copyright © 2006-2007 Intel Corporation
*
spin_unlock_irqrestore(&dev->event_lock, flags);
if (work) {
- task_del(systq, &work->task);
+ cancel_work_sync(&work->work);
kfree(work);
}
kfree(intel_crtc);
}
-static void intel_unpin_work_fn(void *arg1)
+static void intel_unpin_work_fn(struct work_struct *__work)
{
- struct intel_unpin_work *work = arg1;
+ struct intel_unpin_work *work =
+ container_of(__work, struct intel_unpin_work, work);
struct drm_device *dev = work->crtc->dev;
mutex_lock(&dev->struct_mutex);
&obj->pending_flip);
wake_up(&dev_priv->pending_flip_queue);
- task_add(systq, &work->task);
+ queue_work(dev_priv->wq, &work->work);
trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
}
work->event = event;
work->crtc = crtc;
work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
- task_set(&work->task, intel_unpin_work_fn, work);
+ INIT_WORK(&work->work, intel_unpin_work_fn);
ret = drm_vblank_get(dev, intel_crtc->pipe);
if (ret)
/* Disable the irq before mode object teardown, for the irq might
* enqueue unpin/hotplug work. */
drm_irq_uninstall(dev);
- task_del(systq, &dev_priv->hotplug_task);
- task_del(systq, &dev_priv->rps.task);
+ cancel_work_sync(&dev_priv->hotplug_work);
+ cancel_work_sync(&dev_priv->rps.work);
/* flush any delayed tasks or pending work */
#ifdef notyet
-/* $OpenBSD: intel_dp.c,v 1.24 2015/04/12 11:26:54 jsg Exp $ */
+/* $OpenBSD: intel_dp.c,v 1.25 2015/06/24 08:32:39 kettenis Exp $ */
/*
* Copyright © 2008 Intel Corporation
*
}
}
-static void ironlake_panel_vdd_work(void *arg1)
+static void ironlake_panel_vdd_work(struct work_struct *__work)
{
- struct intel_dp *intel_dp = arg1;
+ struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
+ struct intel_dp, panel_vdd_work);
struct drm_device *dev = intel_dp_to_dev(intel_dp);
mutex_lock(&dev->mode_config.mutex);
mutex_unlock(&dev->mode_config.mutex);
}
-static void
-ironlake_panel_vdd_tick(void *arg)
-{
- struct intel_dp *intel_dp = arg;
-
- task_add(systq, &intel_dp->panel_vdd_task);
-}
-
void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
{
if (!is_edp(intel_dp))
* time from now (relative to the power down delay)
* to keep the panel power up across a sequence of operations
*/
- timeout_add_msec(&intel_dp->panel_vdd_to, intel_dp->panel_power_cycle_delay * 5);
+ schedule_delayed_work(&intel_dp->panel_vdd_work,
+ msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
}
}
#endif
drm_encoder_cleanup(encoder);
if (is_edp(intel_dp)) {
- timeout_del(&intel_dp->panel_vdd_to);
- task_del(systq, &intel_dp->panel_vdd_task);
+ cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
ironlake_panel_vdd_off_sync(intel_dp);
}
kfree(intel_dig_port);
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
- task_set(&intel_dp->panel_vdd_task, ironlake_panel_vdd_work, intel_dp);
- timeout_set(&intel_dp->panel_vdd_to, ironlake_panel_vdd_tick, intel_dp);
+ INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
+ ironlake_panel_vdd_work);
intel_connector_attach_encoder(intel_connector, intel_encoder);
drm_sysfs_connector_add(connector);
-/* $OpenBSD: intel_drv.h,v 1.6 2015/04/12 11:26:54 jsg Exp $ */
+/* $OpenBSD: intel_drv.h,v 1.7 2015/06/24 08:32:39 kettenis Exp $ */
/*
* Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
* Copyright (c) 2007-2008 Intel Corporation
int panel_power_cycle_delay;
int backlight_on_delay;
int backlight_off_delay;
- struct task panel_vdd_task;
- struct timeout panel_vdd_to;
+ struct delayed_work panel_vdd_work;
bool want_panel_vdd;
struct intel_connector *attached_connector;
};
}
struct intel_unpin_work {
- struct task task;
+ struct work_struct work;
struct drm_crtc *crtc;
struct drm_i915_gem_object *old_fb_obj;
struct drm_i915_gem_object *pending_flip_obj;
};
struct intel_fbc_work {
- struct task task;
- struct timeout to;
+ struct delayed_work work;
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
int interval;
-/* $OpenBSD: intel_pm.c,v 1.34 2015/04/18 14:47:34 jsg Exp $ */
+/* $OpenBSD: intel_pm.c,v 1.35 2015/06/24 08:32:39 kettenis Exp $ */
/*
* Copyright © 2012 Intel Corporation
*
return dev_priv->display.fbc_enabled(dev);
}
-static void intel_fbc_work_fn(void *arg1)
+static void intel_fbc_work_fn(struct work_struct *__work)
{
- struct intel_fbc_work *work = arg1;
+ struct intel_fbc_work *work =
+ container_of(to_delayed_work(__work),
+ struct intel_fbc_work, work);
struct drm_device *dev = work->crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
kfree(work);
}
-static void
-intel_fbc_work_tick(void *arg)
-{
- struct intel_fbc_work *work = arg;
-
- task_add(systq, &work->task);
-}
-
static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
{
if (dev_priv->fbc_work == NULL)
* dev_priv->fbc_work, so we can perform the cancellation
* entirely asynchronously.
*/
- timeout_del(&dev_priv->fbc_work->to);
- if (task_del(systq, &dev_priv->fbc_work->task))
+ if (cancel_delayed_work(&dev_priv->fbc_work->work))
/* tasklet was killed before being run, clean up */
kfree(dev_priv->fbc_work);
work->crtc = crtc;
work->fb = crtc->fb;
work->interval = interval;
- task_set(&work->task, intel_fbc_work_fn, work);
- timeout_set(&work->to, intel_fbc_work_tick, work);
+ INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
dev_priv->fbc_work = work;
* and indeed performing the enable as a co-routine and not
* waiting synchronously upon the vblank.
*/
- timeout_add_msec(&work->to, 50);
+ schedule_delayed_work(&work->work, msecs_to_jiffies(50));
}
void intel_disable_fbc(struct drm_device *dev)
ironlake_disable_drps(dev);
ironlake_disable_rc6(dev);
} else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) {
- timeout_del(&dev_priv->rps.delayed_resume_to);
- task_del(systq, &dev_priv->rps.delayed_resume_task);
+ cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
mutex_lock(&dev_priv->rps.hw_lock);
gen6_disable_rps(dev);
mutex_unlock(&dev_priv->rps.hw_lock);
}
}
-static void intel_gen6_powersave_work(void *arg1)
+static void intel_gen6_powersave_work(struct work_struct *work)
{
- drm_i915_private_t *dev_priv = arg1;
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private,
+ rps.delayed_resume_work.work);
struct drm_device *dev = dev_priv->dev;
mutex_lock(&dev_priv->rps.hw_lock);
mutex_unlock(&dev_priv->rps.hw_lock);
}
-static void
-intel_gen6_powersave_tick(void *arg)
-{
- drm_i915_private_t *dev_priv = arg;
-
- task_add(systq, &dev_priv->rps.delayed_resume_task);
-}
-
void intel_enable_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
* done at any specific time, so do this out of our fast path
* to make resume and init faster.
*/
- timeout_add_sec(&dev_priv->rps.delayed_resume_to, 1);
+ schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
+ round_jiffies_up_relative(HZ));
}
}
{
struct drm_i915_private *dev_priv = dev->dev_private;
- task_set(&dev_priv->rps.delayed_resume_task, intel_gen6_powersave_work,
- dev_priv);
- timeout_set(&dev_priv->rps.delayed_resume_to, intel_gen6_powersave_tick,
- dev_priv);
+ INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
+ intel_gen6_powersave_work);
}
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)