diff options
Diffstat (limited to 'sys/dev/pci/drm/i915/i915_irq.c')
-rw-r--r-- | sys/dev/pci/drm/i915/i915_irq.c | 2916 |
1 files changed, 2916 insertions, 0 deletions
diff --git a/sys/dev/pci/drm/i915/i915_irq.c b/sys/dev/pci/drm/i915/i915_irq.c new file mode 100644 index 00000000000..f550d346774 --- /dev/null +++ b/sys/dev/pci/drm/i915/i915_irq.c @@ -0,0 +1,2916 @@ +/* $OpenBSD$ */ +/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- + */ +/* + * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <dev/pci/drm/drmP.h> +#include <dev/pci/drm/i915_drm.h> +#include "i915_drv.h" +#include "intel_drv.h" + +void ironlake_enable_display_irq(drm_i915_private_t *, u32); +void intel_enable_asle(struct drm_device *); +int i915_pipeconf_enabled(struct drm_device *, int); +u32 gm45_get_vblank_counter(struct drm_device *, int); +int i915_get_crtc_scanoutpos(struct drm_device *, int, int *, int *); +int i915_get_vblank_timestamp(struct drm_device *, int, int *, + struct timeval *, unsigned); +void ironlake_handle_rps_change(struct drm_device *); +void notify_ring(struct drm_device *, struct intel_ring_buffer *); +void ivybridge_handle_parity_error(struct drm_device *); +void snb_gt_irq_handler(struct drm_device *, struct inteldrm_softc *, u32); +void snb_gt_irq_handler(struct drm_device *, struct inteldrm_softc *, u32); +void gen6_queue_rps_work(struct inteldrm_softc *, u32); +int valleyview_intr(void *); +void ibx_irq_handler(struct drm_device *, u32); +void cpt_irq_handler(struct drm_device *, u32); +int ivybridge_intr(void *); +void ilk_gt_irq_handler(struct drm_device *, struct inteldrm_softc *, u32); +int ironlake_intr(void *); +void i915_get_extra_instdone(struct drm_device *, uint32_t *); +void i915_report_and_clear_eir(struct drm_device *); +void i915_handle_error(struct drm_device *, bool); +void i915_pageflip_stall_check(struct drm_device *, int); +int ironlake_enable_vblank(struct drm_device *, int); +int ivybridge_enable_vblank(struct drm_device *, int); +int valleyview_enable_vblank(struct drm_device *, int); +void ironlake_disable_vblank(struct drm_device *, int); +void ivybridge_disable_vblank(struct drm_device *, int); +void valleyview_disable_vblank(struct drm_device *, int); +u32 ring_last_seqno(struct intel_ring_buffer *); +bool i915_hangcheck_ring_idle(struct intel_ring_buffer *, bool *); +bool kick_ring(struct intel_ring_buffer *); +bool i915_hangcheck_hung(struct drm_device *); +void ironlake_irq_preinstall(struct drm_device *); +void valleyview_irq_preinstall(struct drm_device *); +void ironlake_enable_pch_hotplug(struct drm_device *); +int ironlake_irq_postinstall(struct drm_device *); +int ivybridge_irq_postinstall(struct drm_device *); +int valleyview_irq_postinstall(struct drm_device *); +void valleyview_irq_uninstall(struct drm_device *); +void ironlake_irq_uninstall(struct drm_device *); +void i8xx_irq_preinstall(struct drm_device *); +int i8xx_irq_postinstall(struct drm_device *); +int i8xx_intr(void *); +void i8xx_irq_uninstall(struct drm_device *); +void i915_irq_preinstall(struct drm_device *); +int i915_irq_postinstall(struct drm_device *); +int i915_intr(void *); +void i915_irq_uninstall(struct drm_device *); +void i965_irq_preinstall(struct drm_device *); +int i965_irq_postinstall(struct drm_device *); +int i965_intr(void *); +void i965_irq_uninstall(struct drm_device *); +void intel_irq_init(struct drm_device *); +void i915_hotplug_work_func(void *, void *); +void i915_error_work_func(void *, void *); +void ivybridge_parity_work(void *, void *); +void gen6_pm_rps_work(void *, void *); + +/* For display hotplug interrupt */ +void +ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) +{ + if ((dev_priv->irq_mask & mask) != 0) { + dev_priv->irq_mask &= ~mask; + I915_WRITE(DEIMR, dev_priv->irq_mask); + POSTING_READ(DEIMR); + } +} + +static inline void +ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) +{ + if ((dev_priv->irq_mask & mask) != mask) { + dev_priv->irq_mask |= mask; + I915_WRITE(DEIMR, dev_priv->irq_mask); + POSTING_READ(DEIMR); + } +} + +void +i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) +{ + if ((dev_priv->pipestat[pipe] & mask) != mask) { + u32 reg = PIPESTAT(pipe); + + dev_priv->pipestat[pipe] |= mask; + /* Enable the interrupt, clear any pending status */ + I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); + POSTING_READ(reg); + } +} + +void +i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) +{ + if ((dev_priv->pipestat[pipe] & mask) != 0) { + u32 reg = PIPESTAT(pipe); + + dev_priv->pipestat[pipe] &= ~mask; + I915_WRITE(reg, dev_priv->pipestat[pipe]); + POSTING_READ(reg); + } +} + +/** + * intel_enable_asle - enable ASLE interrupt for OpRegion + */ +void +intel_enable_asle(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + + /* FIXME: opregion/asle for VLV */ + if (IS_VALLEYVIEW(dev)) + return; + + mtx_enter(&dev_priv->irq_lock); + + if (HAS_PCH_SPLIT(dev)) + ironlake_enable_display_irq(dev_priv, DE_GSE); + else { + i915_enable_pipestat(dev_priv, 1, + PIPE_LEGACY_BLC_EVENT_ENABLE); + if (INTEL_INFO(dev)->gen >= 4) + i915_enable_pipestat(dev_priv, 0, + PIPE_LEGACY_BLC_EVENT_ENABLE); + } + + mtx_leave(&dev_priv->irq_lock); +} + +/** + * i915_pipeconf_enabled - check if a pipe is enabled + * @dev: DRM device + * @pipe: pipe to check + * + * Reading certain registers when the pipe is disabled can hang the chip. + * Use this routine to make sure the PLL is running and the pipe is active + * before reading such registers if unsure. + */ +int +i915_pipeconf_enabled(struct drm_device *dev, int pipe) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, + pipe); + + return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE; +} + +/* Called from drm generic code, passed a 'crtc', which + * we use as a pipe index + */ +u32 +i915_get_vblank_counter(struct drm_device *dev, int pipe) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + unsigned long high_frame; + unsigned long low_frame; + u32 high1, high2, low; + + if (!i915_pipeconf_enabled(dev, pipe)) { + DRM_DEBUG_DRIVER("trying to get vblank count for disabled " + "pipe %c\n", pipe_name(pipe)); + return 0; + } + + high_frame = PIPEFRAME(pipe); + low_frame = PIPEFRAMEPIXEL(pipe); + + /* + * High & low register fields aren't synchronized, so make sure + * we get a low value that's stable across two reads of the high + * register. + */ + do { + high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; + low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; + high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; + } while (high1 != high2); + + high1 >>= PIPE_FRAME_HIGH_SHIFT; + low >>= PIPE_FRAME_LOW_SHIFT; + return (high1 << 8) | low; +} + +u32 +gm45_get_vblank_counter(struct drm_device *dev, int pipe) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int reg = PIPE_FRMCOUNT_GM45(pipe); + + if (!i915_pipeconf_enabled(dev, pipe)) { + DRM_DEBUG_DRIVER("trying to get vblank count for disabled " + "pipe %c\n", pipe_name(pipe)); + return 0; + } + + return I915_READ(reg); +} + +int +i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, + int *vpos, int *hpos) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u32 vbl = 0, position = 0; + int vbl_start, vbl_end, htotal, vtotal; + bool in_vbl = true; + int ret = 0; + enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, + pipe); + + if (!i915_pipeconf_enabled(dev, pipe)) { + DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " + "pipe %c\n", pipe_name(pipe)); + return 0; + } + + /* Get vtotal. */ + vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff); + + if (INTEL_INFO(dev)->gen >= 4) { + /* No obvious pixelcount register. Only query vertical + * scanout position from Display scan line register. + */ + position = I915_READ(PIPEDSL(pipe)); + + /* Decode into vertical scanout position. Don't have + * horizontal scanout position. + */ + *vpos = position & 0x1fff; + *hpos = 0; + } else { + /* Have access to pixelcount since start of frame. + * We can split this into vertical and horizontal + * scanout position. + */ + position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; + + htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff); + *vpos = position / htotal; + *hpos = position - (*vpos * htotal); + } + + /* Query vblank area. */ + vbl = I915_READ(VBLANK(cpu_transcoder)); + + /* Test position against vblank region. */ + vbl_start = vbl & 0x1fff; + vbl_end = (vbl >> 16) & 0x1fff; + + if ((*vpos < vbl_start) || (*vpos > vbl_end)) + in_vbl = false; + + /* Inside "upper part" of vblank area? Apply corrective offset: */ + if (in_vbl && (*vpos >= vbl_start)) + *vpos = *vpos - vtotal; + + /* Readouts valid? */ + if (vbl > 0) + ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; + + /* In vblank? */ + if (in_vbl) + ret |= DRM_SCANOUTPOS_INVBL; + + return ret; +} + +int +i915_get_vblank_timestamp(struct drm_device *dev, int pipe, + int *max_error, + struct timeval *vblank_time, + unsigned flags) +{ + struct inteldrm_softc *dev_priv = dev->dev_private; + struct drm_crtc *crtc; + + if (pipe < 0 || pipe >= dev_priv->num_pipe) { + DRM_ERROR("Invalid crtc %d\n", pipe); + return -EINVAL; + } + + /* Get drm_crtc to timestamp: */ + crtc = intel_get_crtc_for_pipe(dev, pipe); + if (crtc == NULL) { + DRM_ERROR("Invalid crtc %d\n", pipe); + return -EINVAL; + } + + if (!crtc->enabled) { + DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); + return -EBUSY; + } + + /* Helper routine in DRM core does all the work: */ + return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, + vblank_time, flags, + crtc); +} + +/* + * Handle hotplug events outside the interrupt handler proper. + */ +void +i915_hotplug_work_func(void *arg1, void *arg2) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *)arg1; + struct drm_device *dev = (struct drm_device *)dev_priv->drmdev; + struct drm_mode_config *mode_config = &dev->mode_config; + struct intel_encoder *encoder; + + rw_enter_write(&mode_config->rwl); + DRM_DEBUG_KMS("running encoder hotplug functions\n"); + + list_for_each_entry(encoder, &mode_config->encoder_list, base.head) + if (encoder->hot_plug) + encoder->hot_plug(encoder); + + rw_exit_write(&mode_config->rwl); + + /* Just fire off a uevent and let userspace tell us what to do */ + drm_helper_hpd_irq_event(dev); +} + +/* defined intel_pm.c */ +extern struct mutex mchdev_lock; + +void +ironlake_handle_rps_change(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + u32 busy_up, busy_down, max_avg, min_avg; + u8 new_delay; + + mtx_enter(&mchdev_lock); + + I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); + + new_delay = dev_priv->ips.cur_delay; + + I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); + busy_up = I915_READ(RCPREVBSYTUPAVG); + busy_down = I915_READ(RCPREVBSYTDNAVG); + max_avg = I915_READ(RCBMAXAVG); + min_avg = I915_READ(RCBMINAVG); + + /* Handle RCS change request from hw */ + if (busy_up > max_avg) { + if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) + new_delay = dev_priv->ips.cur_delay - 1; + if (new_delay < dev_priv->ips.max_delay) + new_delay = dev_priv->ips.max_delay; + } else if (busy_down < min_avg) { + if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) + new_delay = dev_priv->ips.cur_delay + 1; + if (new_delay > dev_priv->ips.min_delay) + new_delay = dev_priv->ips.min_delay; + } + + if (ironlake_set_drps(dev, new_delay)) + dev_priv->ips.cur_delay = new_delay; + + mtx_leave(&mchdev_lock); + + return; +} + +void +notify_ring(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + struct inteldrm_softc *dev_priv = dev->dev_private; + + if (ring->obj == NULL) + return; + +// trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); + + wakeup(ring); + dev_priv->hangcheck_count = 0; + timeout_add_msec(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD); + +#ifdef notyet + wakeup(&ring->irq_queue); + if (i915_enable_hangcheck) { + dev_priv->hangcheck_count = 0; + mod_timer(&dev_priv->hangcheck_timer, + round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); + } +#endif +} + +void +gen6_pm_rps_work(void *arg1, void *arg2) +{ + drm_i915_private_t *dev_priv = arg1; + struct drm_device *dev = (struct drm_device *)dev_priv->drmdev; + u32 pm_iir, pm_imr; + u8 new_delay; + + mtx_enter(&dev_priv->rps.lock); + pm_iir = dev_priv->rps.pm_iir; + dev_priv->rps.pm_iir = 0; + pm_imr = I915_READ(GEN6_PMIMR); + I915_WRITE(GEN6_PMIMR, 0); + mtx_leave(&dev_priv->rps.lock); + + if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) + return; + + rw_enter_write(&dev_priv->rps.hw_lock); + + if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) + new_delay = dev_priv->rps.cur_delay + 1; + else + new_delay = dev_priv->rps.cur_delay - 1; + + /* sysfs frequency interfaces may have snuck in while servicing the + * interrupt + */ + if (!(new_delay > dev_priv->rps.max_delay || + new_delay < dev_priv->rps.min_delay)) { + gen6_set_rps(dev, new_delay); + } + + rw_exit_write(&dev_priv->rps.hw_lock); +} + +/** + * ivybridge_parity_work - Workqueue called when a parity error interrupt + * occurred. + * @work: workqueue struct + * + * Doesn't actually do anything except notify userspace. As a consequence of + * this event, userspace should try to remap the bad rows since statistically + * it is likely the same row is more likely to go bad again. + */ +void +ivybridge_parity_work(void *arg1, void *arg2) +{ + drm_i915_private_t *dev_priv = arg1; + struct drm_device *dev = (struct drm_device *)dev_priv->drmdev; + u32 error_status, row, bank, subbank; +// char *parity_event[5]; + uint32_t misccpctl; + + /* We must turn off DOP level clock gating to access the L3 registers. + * In order to prevent a get/put style interface, acquire struct mutex + * any time we access those registers. + */ + DRM_LOCK(); + + misccpctl = I915_READ(GEN7_MISCCPCTL); + I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); + POSTING_READ(GEN7_MISCCPCTL); + + error_status = I915_READ(GEN7_L3CDERRST1); + row = GEN7_PARITY_ERROR_ROW(error_status); + bank = GEN7_PARITY_ERROR_BANK(error_status); + subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); + + I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID | + GEN7_L3CDERRST1_ENABLE); + POSTING_READ(GEN7_L3CDERRST1); + + I915_WRITE(GEN7_MISCCPCTL, misccpctl); + + mtx_enter(&dev_priv->irq_lock); + dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + mtx_leave(&dev_priv->irq_lock); + + DRM_UNLOCK(); + +#if 0 + parity_event[0] = "L3_PARITY_ERROR=1"; + parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); + parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); + parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); + parity_event[4] = NULL; +#endif + +#ifdef notyet + kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj, + KOBJ_CHANGE, parity_event); +#endif + + DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n", + row, bank, subbank); + +#if 0 + free(parity_event[3], M_DRM); + free(parity_event[2], M_DRM); + free(parity_event[1], M_DRM); +#endif +} + +void +ivybridge_handle_parity_error(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + + if (!HAS_L3_GPU_CACHE(dev)) + return; + + mtx_enter(&dev_priv->irq_lock); + dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT; + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + mtx_leave(&dev_priv->irq_lock); + + workq_queue_task(NULL, &dev_priv->l3_parity.error_task, 0, + ivybridge_parity_work, dev_priv, NULL); + +} + +void +snb_gt_irq_handler(struct drm_device *dev, + struct inteldrm_softc *dev_priv, + u32 gt_iir) +{ + + if (gt_iir & (GEN6_RENDER_USER_INTERRUPT | + GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT)) + notify_ring(dev, &dev_priv->rings[RCS]); + if (gt_iir & GEN6_BSD_USER_INTERRUPT) + notify_ring(dev, &dev_priv->rings[VCS]); + if (gt_iir & GEN6_BLITTER_USER_INTERRUPT) + notify_ring(dev, &dev_priv->rings[BCS]); + + if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT | + GT_GEN6_BSD_CS_ERROR_INTERRUPT | + GT_RENDER_CS_ERROR_INTERRUPT)) { + DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); + i915_handle_error(dev, false); + } + + if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT) + ivybridge_handle_parity_error(dev); +} + +void +gen6_queue_rps_work(struct inteldrm_softc *dev_priv, + u32 pm_iir) +{ + /* + * IIR bits should never already be set because IMR should + * prevent an interrupt from being shown in IIR. The warning + * displays a case where we've unsafely cleared + * dev_priv->rps.pm_iir. Although missing an interrupt of the same + * type is not a problem, it displays a problem in the logic. + * + * The mask bit in IMR is cleared by dev_priv->rps.work. + */ + + mtx_enter(&dev_priv->rps.lock); + dev_priv->rps.pm_iir |= pm_iir; + I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); + POSTING_READ(GEN6_PMIMR); + mtx_leave(&dev_priv->rps.lock); + + workq_queue_task(NULL, &dev_priv->rps.task, 0, gen6_pm_rps_work, + dev_priv, NULL); +} + +int +valleyview_intr(void *arg) +{ + drm_i915_private_t *dev_priv = arg; + struct drm_device *dev = (struct drm_device *)dev_priv->drmdev; + u32 iir, gt_iir, pm_iir; + int ret = 0; + int pipe; + u32 pipe_stats[I915_MAX_PIPES]; + bool blc_event; + +// atomic_inc(&dev_priv->irq_received); + + while (true) { + iir = I915_READ(VLV_IIR); + gt_iir = I915_READ(GTIIR); + pm_iir = I915_READ(GEN6_PMIIR); + + if (gt_iir == 0 && pm_iir == 0 && iir == 0) + goto out; + + ret = 1; + + snb_gt_irq_handler(dev, dev_priv, gt_iir); + + mtx_enter(&dev_priv->irq_lock); + for_each_pipe(pipe) { + int reg = PIPESTAT(pipe); + pipe_stats[pipe] = I915_READ(reg); + + /* + * Clear the PIPE*STAT regs before the IIR + */ + if (pipe_stats[pipe] & 0x8000ffff) { + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) + DRM_DEBUG_DRIVER("pipe %c underrun\n", + pipe_name(pipe)); + I915_WRITE(reg, pipe_stats[pipe]); + } + } + mtx_leave(&dev_priv->irq_lock); + + for_each_pipe(pipe) { + if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) + drm_handle_vblank(dev, pipe); + + if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { + intel_prepare_page_flip(dev, pipe); + intel_finish_page_flip(dev, pipe); + } + } + + /* Consume port. Then clear IIR or we'll miss events */ + if (iir & I915_DISPLAY_PORT_INTERRUPT) { + u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); + + DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", + hotplug_status); + if (hotplug_status & dev_priv->hotplug_supported_mask) + workq_queue_task(NULL, &dev_priv->hotplug_task, + 0, i915_hotplug_work_func, dev_priv, NULL); + + I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); + I915_READ(PORT_HOTPLUG_STAT); + } + + if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) + blc_event = true; + + if (pm_iir & GEN6_PM_DEFERRED_EVENTS) + gen6_queue_rps_work(dev_priv, pm_iir); + + I915_WRITE(GTIIR, gt_iir); + I915_WRITE(GEN6_PMIIR, pm_iir); + I915_WRITE(VLV_IIR, iir); + } + +out: + return ret; +} + +void +ibx_irq_handler(struct drm_device *dev, u32 pch_iir) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + + if (pch_iir & SDE_HOTPLUG_MASK) + workq_queue_task(NULL, &dev_priv->hotplug_task, 0, + i915_hotplug_work_func, dev_priv, NULL); + + if (pch_iir & SDE_AUDIO_POWER_MASK) + DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", + (pch_iir & SDE_AUDIO_POWER_MASK) >> + SDE_AUDIO_POWER_SHIFT); + + if (pch_iir & SDE_GMBUS) + DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); + + if (pch_iir & SDE_AUDIO_HDCP_MASK) + DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); + + if (pch_iir & SDE_AUDIO_TRANS_MASK) + DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); + + if (pch_iir & SDE_POISON) + DRM_ERROR("PCH poison interrupt\n"); + + if (pch_iir & SDE_FDI_MASK) + for_each_pipe(pipe) + DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", + pipe_name(pipe), + I915_READ(FDI_RX_IIR(pipe))); + + if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) + DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); + + if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) + DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); + + if (pch_iir & SDE_TRANSB_FIFO_UNDER) + DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n"); + if (pch_iir & SDE_TRANSA_FIFO_UNDER) + DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); +} + +void +cpt_irq_handler(struct drm_device *dev, u32 pch_iir) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + + if (pch_iir & SDE_HOTPLUG_MASK_CPT) + workq_queue_task(NULL, &dev_priv->hotplug_task, 0, + i915_hotplug_work_func, dev_priv, NULL); + + if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) + DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", + (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> + SDE_AUDIO_POWER_SHIFT_CPT); + + if (pch_iir & SDE_AUX_MASK_CPT) + DRM_DEBUG_DRIVER("AUX channel interrupt\n"); + + if (pch_iir & SDE_GMBUS_CPT) + DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); + + if (pch_iir & SDE_AUDIO_CP_REQ_CPT) + DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); + + if (pch_iir & SDE_AUDIO_CP_CHG_CPT) + DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); + + if (pch_iir & SDE_FDI_MASK_CPT) + for_each_pipe(pipe) + DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", + pipe_name(pipe), + I915_READ(FDI_RX_IIR(pipe))); +} + +int +ivybridge_intr(void *arg) +{ + drm_i915_private_t *dev_priv = arg; + struct drm_device *dev = (struct drm_device *)dev_priv->drmdev; + u32 de_iir, gt_iir, de_ier, pm_iir; + int ret = 0; + int i; + +// atomic_inc(&dev_priv->irq_received); + + /* disable master interrupt before clearing iir */ + de_ier = I915_READ(DEIER); + I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); + + gt_iir = I915_READ(GTIIR); + if (gt_iir) { + snb_gt_irq_handler(dev, dev_priv, gt_iir); + I915_WRITE(GTIIR, gt_iir); + ret = 1; + } + + de_iir = I915_READ(DEIIR); + if (de_iir) { +#ifdef notyet + if (de_iir & DE_GSE_IVB) + intel_opregion_gse_intr(dev); +#endif + + for (i = 0; i < 3; i++) { + if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) + drm_handle_vblank(dev, i); + if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { + intel_prepare_page_flip(dev, i); + intel_finish_page_flip_plane(dev, i); + } + } + + /* check event from PCH */ + if (de_iir & DE_PCH_EVENT_IVB) { + u32 pch_iir = I915_READ(SDEIIR); + + cpt_irq_handler(dev, pch_iir); + + /* clear PCH hotplug event before clear CPU irq */ + I915_WRITE(SDEIIR, pch_iir); + } + + I915_WRITE(DEIIR, de_iir); + ret = 1; + } + + pm_iir = I915_READ(GEN6_PMIIR); + if (pm_iir) { + if (pm_iir & GEN6_PM_DEFERRED_EVENTS) + gen6_queue_rps_work(dev_priv, pm_iir); + I915_WRITE(GEN6_PMIIR, pm_iir); + ret = 1; + } + + I915_WRITE(DEIER, de_ier); + POSTING_READ(DEIER); + + return ret; +} + +void +ilk_gt_irq_handler(struct drm_device *dev, + struct inteldrm_softc *dev_priv, + u32 gt_iir) +{ + if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) + notify_ring(dev, &dev_priv->rings[RCS]); + if (gt_iir & GT_BSD_USER_INTERRUPT) + notify_ring(dev, &dev_priv->rings[VCS]); +} + +int +ironlake_intr(void *arg) +{ + drm_i915_private_t *dev_priv = arg; + struct drm_device *dev = (struct drm_device *)dev_priv->drmdev; + int ret = 0; + u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; + +// atomic_inc(&dev_priv->irq_received); + + /* disable master interrupt before clearing iir */ + de_ier = I915_READ(DEIER); + I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); + POSTING_READ(DEIER); + + de_iir = I915_READ(DEIIR); + gt_iir = I915_READ(GTIIR); + pch_iir = I915_READ(SDEIIR); + pm_iir = I915_READ(GEN6_PMIIR); + + if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && + (!IS_GEN6(dev) || pm_iir == 0)) + goto done; + + ret = 1; + + if (IS_GEN5(dev)) + ilk_gt_irq_handler(dev, dev_priv, gt_iir); + else + snb_gt_irq_handler(dev, dev_priv, gt_iir); + +#ifdef notyet + if (de_iir & DE_GSE) + intel_opregion_gse_intr(dev); +#endif + + if (de_iir & DE_PIPEA_VBLANK) + drm_handle_vblank(dev, 0); + + if (de_iir & DE_PIPEB_VBLANK) + drm_handle_vblank(dev, 1); + + if (de_iir & DE_PLANEA_FLIP_DONE) { + intel_prepare_page_flip(dev, 0); + intel_finish_page_flip_plane(dev, 0); + } + + if (de_iir & DE_PLANEB_FLIP_DONE) { + intel_prepare_page_flip(dev, 1); + intel_finish_page_flip_plane(dev, 1); + } + + /* check event from PCH */ + if (de_iir & DE_PCH_EVENT) { + if (HAS_PCH_CPT(dev)) + cpt_irq_handler(dev, pch_iir); + else + ibx_irq_handler(dev, pch_iir); + } + + if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) + ironlake_handle_rps_change(dev); + + if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) + gen6_queue_rps_work(dev_priv, pm_iir); + + /* should clear PCH hotplug event before clear CPU irq */ + I915_WRITE(SDEIIR, pch_iir); + I915_WRITE(GTIIR, gt_iir); + I915_WRITE(DEIIR, de_iir); + I915_WRITE(GEN6_PMIIR, pm_iir); + +done: + I915_WRITE(DEIER, de_ier); + POSTING_READ(DEIER); + + return ret; +} + +/** + * i915_error_work_func - do process context error handling work + * @work: work struct + * + * Fire an error uevent so userspace can see that a hang or error + * was detected. + */ +void +i915_error_work_func(void *arg1, void *arg2) +{ + drm_i915_private_t *dev_priv = arg1; + struct drm_device *dev = (struct drm_device *)dev_priv->drmdev; +#if 0 + char *error_event[] = { "ERROR=1", NULL }; + char *reset_event[] = { "RESET=1", NULL }; + char *reset_done_event[] = { "ERROR=0", NULL }; + + kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); +#endif + + if (atomic_read(&dev_priv->mm.wedged)) { + DRM_DEBUG_DRIVER("resetting chip\n"); +// kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); + if (!i915_reset(dev)) { + atomic_set(&dev_priv->mm.wedged, 0); +// kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); + } + mtx_enter(&dev_priv->error_completion_lock); + dev_priv->error_completion++; + wakeup(&dev_priv->error_completion); + mtx_leave(&dev_priv->error_completion_lock); + } +} + +/* NB: please notice the memset */ +void +i915_get_extra_instdone(struct drm_device *dev, + uint32_t *instdone) +{ + struct inteldrm_softc *dev_priv = dev->dev_private; + memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); + + switch(INTEL_INFO(dev)->gen) { + case 2: + case 3: + instdone[0] = I915_READ(INSTDONE); + break; + case 4: + case 5: + case 6: + instdone[0] = I915_READ(INSTDONE_I965); + instdone[1] = I915_READ(INSTDONE1); + break; + default: +// WARN_ONCE(1, "Unsupported platform\n"); + case 7: + instdone[0] = I915_READ(GEN7_INSTDONE_1); + instdone[1] = I915_READ(GEN7_SC_INSTDONE); + instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); + instdone[3] = I915_READ(GEN7_ROW_INSTDONE); + break; + } +} + +#ifdef CONFIG_DEBUG_FS +struct drm_i915_error_object * +i915_error_object_create(struct inteldrm_softc *dev_priv, + struct drm_i915_gem_object *src) +{ + struct drm_i915_error_object *dst; + int i, count; + u32 reloc_offset; + + if (src == NULL || src->pages == NULL) + return NULL; + + count = src->base.size / PAGE_SIZE; + + dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC); + if (dst == NULL) + return NULL; + + reloc_offset = src->gtt_offset; + for (i = 0; i < count; i++) { + unsigned long flags; + void *d; + + d = kmalloc(PAGE_SIZE, GFP_ATOMIC); + if (d == NULL) + goto unwind; + + local_irq_save(flags); + if (reloc_offset < dev_priv->mm.gtt_mappable_end && + src->has_global_gtt_mapping) { + void __iomem *s; + + /* Simply ignore tiling or any overlapping fence. + * It's part of the error state, and this hopefully + * captures what the GPU read. + */ + + s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, + reloc_offset); + memcpy_fromio(d, s, PAGE_SIZE); + io_mapping_unmap_atomic(s); + } else { + struct page *page; + void *s; + + page = i915_gem_object_get_page(src, i); + + drm_clflush_pages(&page, 1); + + s = kmap_atomic(page); + memcpy(d, s, PAGE_SIZE); + kunmap_atomic(s); + + drm_clflush_pages(&page, 1); + } + local_irq_restore(flags); + + dst->pages[i] = d; + + reloc_offset += PAGE_SIZE; + } + dst->page_count = count; + dst->gtt_offset = src->gtt_offset; + + return dst; + +unwind: + while (i--) + kfree(dst->pages[i]); + kfree(dst); + return NULL; +} + +void +i915_error_object_free(struct drm_i915_error_object *obj) +{ + int page; + + if (obj == NULL) + return; + + for (page = 0; page < obj->page_count; page++) + kfree(obj->pages[page]); + + kfree(obj); +} + +void +i915_error_state_free(struct kref *error_ref) +{ + struct drm_i915_error_state *error = container_of(error_ref, + typeof(*error), ref); + int i; + + for (i = 0; i < ARRAY_SIZE(error->ring); i++) { + i915_error_object_free(error->ring[i].batchbuffer); + i915_error_object_free(error->ring[i].ringbuffer); + kfree(error->ring[i].requests); + } + + kfree(error->active_bo); + kfree(error->overlay); + kfree(error); +} + +void +capture_bo(struct drm_i915_error_buffer *err, + struct drm_i915_gem_object *obj) +{ + err->size = obj->base.size; + err->name = obj->base.name; + err->rseqno = obj->last_read_seqno; + err->wseqno = obj->last_write_seqno; + err->gtt_offset = obj->gtt_offset; + err->read_domains = obj->base.read_domains; + err->write_domain = obj->base.write_domain; + err->fence_reg = obj->fence_reg; + err->pinned = 0; + if (obj->pin_count > 0) + err->pinned = 1; + if (obj->user_pin_count > 0) + err->pinned = -1; + err->tiling = obj->tiling_mode; + err->dirty = obj->dirty; + err->purgeable = obj->madv != I915_MADV_WILLNEED; + err->ring = obj->ring ? obj->ring->id : -1; + err->cache_level = obj->cache_level; +} + +u32 +capture_active_bo(struct drm_i915_error_buffer *err, + int count, struct list_head *head) +{ + struct drm_i915_gem_object *obj; + int i = 0; + + list_for_each_entry(obj, head, mm_list) { + capture_bo(err++, obj); + if (++i == count) + break; + } + + return i; +} + +u32 +capture_pinned_bo(struct drm_i915_error_buffer *err, + int count, struct list_head *head) +{ + struct drm_i915_gem_object *obj; + int i = 0; + + list_for_each_entry(obj, head, gtt_list) { + if (obj->pin_count == 0) + continue; + + capture_bo(err++, obj); + if (++i == count) + break; + } + + return i; +} + +void +i915_gem_record_fences(struct drm_device *dev, + struct drm_i915_error_state *error) +{ + struct inteldrm_softc *dev_priv = dev->dev_private; + int i; + + /* Fences */ + switch (INTEL_INFO(dev)->gen) { + case 7: + case 6: + for (i = 0; i < 16; i++) + error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); + break; + case 5: + case 4: + for (i = 0; i < 16; i++) + error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); + break; + case 3: + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) + for (i = 0; i < 8; i++) + error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); + case 2: + for (i = 0; i < 8; i++) + error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); + break; + + } +} + +struct drm_i915_error_object * +i915_error_first_batchbuffer(struct inteldrm_softc *dev_priv, + struct intel_ring_buffer *ring) +{ + struct drm_i915_gem_object *obj; + u32 seqno; + + if (!ring->get_seqno) + return NULL; + + if (HAS_BROKEN_CS_TLB(dev_priv->dev)) { + u32 acthd = I915_READ(ACTHD); + + if (WARN_ON(ring->id != RCS)) + return NULL; + + obj = ring->private; + if (acthd >= obj->gtt_offset && + acthd < obj->gtt_offset + obj->base.size) + return i915_error_object_create(dev_priv, obj); + } + + seqno = ring->get_seqno(ring, false); + list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { + if (obj->ring != ring) + continue; + + if (i915_seqno_passed(seqno, obj->last_read_seqno)) + continue; + + if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) + continue; + + /* We need to copy these to an anonymous buffer as the simplest + * method to avoid being overwritten by userspace. + */ + return i915_error_object_create(dev_priv, obj); + } + + return NULL; +} + +void +i915_record_ring_state(struct drm_device *dev, + struct drm_i915_error_state *error, + struct intel_ring_buffer *ring) +{ + struct inteldrm_softc *dev_priv = dev->dev_private; + + if (INTEL_INFO(dev)->gen >= 6) { + error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50); + error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); + error->semaphore_mboxes[ring->id][0] + = I915_READ(RING_SYNC_0(ring->mmio_base)); + error->semaphore_mboxes[ring->id][1] + = I915_READ(RING_SYNC_1(ring->mmio_base)); + error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0]; + error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1]; + } + + if (INTEL_INFO(dev)->gen >= 4) { + error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); + error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); + error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); + error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); + error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); + if (ring->id == RCS) + error->bbaddr = I915_READ64(BB_ADDR); + } else { + error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); + error->ipeir[ring->id] = I915_READ(IPEIR); + error->ipehr[ring->id] = I915_READ(IPEHR); + error->instdone[ring->id] = I915_READ(INSTDONE); + } + + error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); + error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); + error->seqno[ring->id] = ring->get_seqno(ring, false); + error->acthd[ring->id] = intel_ring_get_active_head(ring); + error->head[ring->id] = I915_READ_HEAD(ring); + error->tail[ring->id] = I915_READ_TAIL(ring); + error->ctl[ring->id] = I915_READ_CTL(ring); + + error->cpu_ring_head[ring->id] = ring->head; + error->cpu_ring_tail[ring->id] = ring->tail; +} + +void +i915_gem_record_rings(struct drm_device *dev, + struct drm_i915_error_state *error) +{ + struct inteldrm_softc *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; + struct drm_i915_gem_request *request; + int i, count; + + for_each_ring(ring, dev_priv, i) { + i915_record_ring_state(dev, error, ring); + + error->ring[i].batchbuffer = + i915_error_first_batchbuffer(dev_priv, ring); + + error->ring[i].ringbuffer = + i915_error_object_create(dev_priv, ring->obj); + + count = 0; + list_for_each_entry(request, &ring->request_list, list) + count++; + + error->ring[i].num_requests = count; + error->ring[i].requests = + kmalloc(count*sizeof(struct drm_i915_error_request), + GFP_ATOMIC); + if (error->ring[i].requests == NULL) { + error->ring[i].num_requests = 0; + continue; + } + + count = 0; + list_for_each_entry(request, &ring->request_list, list) { + struct drm_i915_error_request *erq; + + erq = &error->ring[i].requests[count++]; + erq->seqno = request->seqno; + erq->jiffies = request->emitted_jiffies; + erq->tail = request->tail; + } + } +} + +/** + * i915_capture_error_state - capture an error record for later analysis + * @dev: drm device + * + * Should be called when an error is detected (either a hang or an error + * interrupt) to capture error state from the time of the error. Fills + * out a structure which becomes available in debugfs for user level tools + * to pick up. + */ +void +i915_capture_error_state(struct drm_device *dev) +{ + struct inteldrm_softc *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj; + struct drm_i915_error_state *error; + unsigned long flags; + int i, pipe; + + spin_lock_irqsave(&dev_priv->error_lock, flags); + error = dev_priv->first_error; + spin_unlock_irqrestore(&dev_priv->error_lock, flags); + if (error) + return; + + /* Account for pipe specific data like PIPE*STAT */ + error = kzalloc(sizeof(*error), GFP_ATOMIC); + if (!error) { + DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); + return; + } + + DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n", + dev->primary->index); + + kref_init(&error->ref); + error->eir = I915_READ(EIR); + error->pgtbl_er = I915_READ(PGTBL_ER); + error->ccid = I915_READ(CCID); + + if (HAS_PCH_SPLIT(dev)) + error->ier = I915_READ(DEIER) | I915_READ(GTIER); + else if (IS_VALLEYVIEW(dev)) + error->ier = I915_READ(GTIER) | I915_READ(VLV_IER); + else if (IS_GEN2(dev)) + error->ier = I915_READ16(IER); + else + error->ier = I915_READ(IER); + + if (INTEL_INFO(dev)->gen >= 6) + error->derrmr = I915_READ(DERRMR); + + if (IS_VALLEYVIEW(dev)) + error->forcewake = I915_READ(FORCEWAKE_VLV); + else if (INTEL_INFO(dev)->gen >= 7) + error->forcewake = I915_READ(FORCEWAKE_MT); + else if (INTEL_INFO(dev)->gen == 6) + error->forcewake = I915_READ(FORCEWAKE); + + for_each_pipe(pipe) + error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); + + if (INTEL_INFO(dev)->gen >= 6) { + error->error = I915_READ(ERROR_GEN6); + error->done_reg = I915_READ(DONE_REG); + } + + if (INTEL_INFO(dev)->gen == 7) + error->err_int = I915_READ(GEN7_ERR_INT); + + i915_get_extra_instdone(dev, error->extra_instdone); + + i915_gem_record_fences(dev, error); + i915_gem_record_rings(dev, error); + + /* Record buffers on the active and pinned lists. */ + error->active_bo = NULL; + error->pinned_bo = NULL; + + i = 0; + list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) + i++; + error->active_bo_count = i; + list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) + if (obj->pin_count) + i++; + error->pinned_bo_count = i - error->active_bo_count; + + error->active_bo = NULL; + error->pinned_bo = NULL; + if (i) { + error->active_bo = kmalloc(sizeof(*error->active_bo)*i, + GFP_ATOMIC); + if (error->active_bo) + error->pinned_bo = + error->active_bo + error->active_bo_count; + } + + if (error->active_bo) + error->active_bo_count = + capture_active_bo(error->active_bo, + error->active_bo_count, + &dev_priv->mm.active_list); + + if (error->pinned_bo) + error->pinned_bo_count = + capture_pinned_bo(error->pinned_bo, + error->pinned_bo_count, + &dev_priv->mm.bound_list); + + do_gettimeofday(&error->time); + + error->overlay = intel_overlay_capture_error_state(dev); + error->display = intel_display_capture_error_state(dev); + + spin_lock_irqsave(&dev_priv->error_lock, flags); + if (dev_priv->first_error == NULL) { + dev_priv->first_error = error; + error = NULL; + } + spin_unlock_irqrestore(&dev_priv->error_lock, flags); + + if (error) + i915_error_state_free(&error->ref); +} + +void +i915_destroy_error_state(struct drm_device *dev) +{ + struct inteldrm_softc *dev_priv = dev->dev_private; + struct drm_i915_error_state *error; + unsigned long flags; + + spin_lock_irqsave(&dev_priv->error_lock, flags); + error = dev_priv->first_error; + dev_priv->first_error = NULL; + spin_unlock_irqrestore(&dev_priv->error_lock, flags); + + if (error) + kref_put(&error->ref, i915_error_state_free); +} +#else +#define i915_capture_error_state(x) +#endif + +void +i915_report_and_clear_eir(struct drm_device *dev) +{ + struct inteldrm_softc *dev_priv = dev->dev_private; + uint32_t instdone[I915_NUM_INSTDONE_REG]; + u32 eir = I915_READ(EIR); + int pipe, i; + + if (!eir) + return; + + printf("render error detected, EIR: 0x%08x\n", eir); + + i915_get_extra_instdone(dev, instdone); + + if (IS_G4X(dev)) { + if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { + u32 ipeir = I915_READ(IPEIR_I965); + + printf(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); + printf(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); + for (i = 0; i < ARRAY_SIZE(instdone); i++) + printf(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); + printf(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); + printf(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); + I915_WRITE(IPEIR_I965, ipeir); + POSTING_READ(IPEIR_I965); + } + if (eir & GM45_ERROR_PAGE_TABLE) { + u32 pgtbl_err = I915_READ(PGTBL_ER); + printf("page table error\n"); + printf(" PGTBL_ER: 0x%08x\n", pgtbl_err); + I915_WRITE(PGTBL_ER, pgtbl_err); + POSTING_READ(PGTBL_ER); + } + } + + if (!IS_GEN2(dev)) { + if (eir & I915_ERROR_PAGE_TABLE) { + u32 pgtbl_err = I915_READ(PGTBL_ER); + printf("page table error\n"); + printf(" PGTBL_ER: 0x%08x\n", pgtbl_err); + I915_WRITE(PGTBL_ER, pgtbl_err); + POSTING_READ(PGTBL_ER); + } + } + + if (eir & I915_ERROR_MEMORY_REFRESH) { + printf("memory refresh error:\n"); + for_each_pipe(pipe) + printf("pipe %c stat: 0x%08x\n", + pipe_name(pipe), I915_READ(PIPESTAT(pipe))); + /* pipestat has already been acked */ + } + if (eir & I915_ERROR_INSTRUCTION) { + printf("instruction error\n"); + printf(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); + for (i = 0; i < ARRAY_SIZE(instdone); i++) + printf(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); + if (INTEL_INFO(dev)->gen < 4) { + u32 ipeir = I915_READ(IPEIR); + + printf(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); + printf(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); + printf(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); + I915_WRITE(IPEIR, ipeir); + POSTING_READ(IPEIR); + } else { + u32 ipeir = I915_READ(IPEIR_I965); + + printf(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); + printf(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); + printf(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); + printf(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); + I915_WRITE(IPEIR_I965, ipeir); + POSTING_READ(IPEIR_I965); + } + } + + I915_WRITE(EIR, eir); + POSTING_READ(EIR); + eir = I915_READ(EIR); + if (eir) { + /* + * some errors might have become stuck, + * mask them. + */ + DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); + I915_WRITE(EMR, I915_READ(EMR) | eir); + I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); + } +} + +/** + * i915_handle_error - handle an error interrupt + * @dev: drm device + * + * Do some basic checking of regsiter state at error interrupt time and + * dump it to the syslog. Also call i915_capture_error_state() to make + * sure we get a record and make it available in debugfs. Fire a uevent + * so userspace knows something bad happened (should trigger collection + * of a ring dump etc.). + */ +void +i915_handle_error(struct drm_device *dev, bool wedged) +{ + struct inteldrm_softc *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; + int i; + + i915_capture_error_state(dev); + i915_report_and_clear_eir(dev); + + if (wedged) { +// INIT_COMPLETION(dev_priv->error_completion); + atomic_set(&dev_priv->mm.wedged, 1); + + /* + * Wakeup waiting processes so they don't hang + */ + for_each_ring(ring, dev_priv, i) + wakeup(ring); + } + + workq_queue_task(NULL, &dev_priv->error_task, 0, + i915_error_work_func, dev_priv, NULL); +} + +void +i915_pageflip_stall_check(struct drm_device *dev, int pipe) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct drm_i915_gem_object *obj; + struct intel_unpin_work *work; + bool stall_detected; + + /* Ignore early vblank irqs */ + if (intel_crtc == NULL) + return; + + mtx_enter(&dev->event_lock); + work = intel_crtc->unpin_work; + + if (work == NULL || + atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || + !work->enable_stall_check) { + /* Either the pending flip IRQ arrived, or we're too early. Don't check */ + mtx_leave(&dev->event_lock); + return; + } + + /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ + obj = work->pending_flip_obj; + if (INTEL_INFO(dev)->gen >= 4) { + int dspsurf = DSPSURF(intel_crtc->plane); + stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == + obj->gtt_offset; + } else { + int dspaddr = DSPADDR(intel_crtc->plane); + stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + + crtc->y * crtc->fb->pitches[0] + + crtc->x * crtc->fb->bits_per_pixel/8); + } + + mtx_leave(&dev->event_lock); + + if (stall_detected) { + DRM_DEBUG_DRIVER("Pageflip stall detected\n"); + intel_prepare_page_flip(dev, intel_crtc->plane); + } +} + +/* Called from drm generic code, passed 'crtc' which + * we use as a pipe index + */ +int +i915_enable_vblank(struct drm_device *dev, int pipe) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + + if (!i915_pipeconf_enabled(dev, pipe)) + return -EINVAL; + + mtx_enter(&dev_priv->irq_lock); + if (INTEL_INFO(dev)->gen >= 4) + i915_enable_pipestat(dev_priv, pipe, + PIPE_START_VBLANK_INTERRUPT_ENABLE); + else + i915_enable_pipestat(dev_priv, pipe, + PIPE_VBLANK_INTERRUPT_ENABLE); + + /* maintain vblank delivery even in deep C-states */ + if (dev_priv->info->gen == 3) + I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); + mtx_leave(&dev_priv->irq_lock); + + return 0; +} + +int +ironlake_enable_vblank(struct drm_device *dev, int pipe) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + + if (!i915_pipeconf_enabled(dev, pipe)) + return -EINVAL; + + mtx_enter(&dev_priv->irq_lock); + ironlake_enable_display_irq(dev_priv, (pipe == 0) ? + DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); + mtx_leave(&dev_priv->irq_lock); + + return 0; +} + +int +ivybridge_enable_vblank(struct drm_device *dev, int pipe) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + + if (!i915_pipeconf_enabled(dev, pipe)) + return -EINVAL; + + mtx_enter(&dev_priv->irq_lock); + ironlake_enable_display_irq(dev_priv, + DE_PIPEA_VBLANK_IVB << (5 * pipe)); + mtx_leave(&dev_priv->irq_lock); + + return 0; +} + +int +valleyview_enable_vblank(struct drm_device *dev, int pipe) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u32 imr; + + if (!i915_pipeconf_enabled(dev, pipe)) + return -EINVAL; + + mtx_enter(&dev_priv->irq_lock); + imr = I915_READ(VLV_IMR); + if (pipe == 0) + imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; + else + imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; + I915_WRITE(VLV_IMR, imr); + i915_enable_pipestat(dev_priv, pipe, + PIPE_START_VBLANK_INTERRUPT_ENABLE); + mtx_leave(&dev_priv->irq_lock); + + return 0; +} + +/* Called from drm generic code, passed 'crtc' which + * we use as a pipe index + */ +void +i915_disable_vblank(struct drm_device *dev, int pipe) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + + mtx_enter(&dev_priv->irq_lock); + if (dev_priv->info->gen == 3) + I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); + + i915_disable_pipestat(dev_priv, pipe, + PIPE_VBLANK_INTERRUPT_ENABLE | + PIPE_START_VBLANK_INTERRUPT_ENABLE); + mtx_leave(&dev_priv->irq_lock); +} + +void +ironlake_disable_vblank(struct drm_device *dev, int pipe) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + + mtx_enter(&dev_priv->irq_lock); + ironlake_disable_display_irq(dev_priv, (pipe == 0) ? + DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); + mtx_leave(&dev_priv->irq_lock); +} + +void +ivybridge_disable_vblank(struct drm_device *dev, int pipe) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + + mtx_enter(&dev_priv->irq_lock); + ironlake_disable_display_irq(dev_priv, + DE_PIPEA_VBLANK_IVB << (pipe * 5)); + mtx_leave(&dev_priv->irq_lock); +} + +void +valleyview_disable_vblank(struct drm_device *dev, int pipe) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u32 imr; + + mtx_enter(&dev_priv->irq_lock); + i915_disable_pipestat(dev_priv, pipe, + PIPE_START_VBLANK_INTERRUPT_ENABLE); + imr = I915_READ(VLV_IMR); + if (pipe == 0) + imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; + else + imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; + I915_WRITE(VLV_IMR, imr); + mtx_leave(&dev_priv->irq_lock); +} + +u32 +ring_last_seqno(struct intel_ring_buffer *ring) +{ + return list_entry(ring->request_list.prev, + struct drm_i915_gem_request, list)->seqno; +} + +bool +i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) +{ + if (list_empty(&ring->request_list) || + i915_seqno_passed(ring->get_seqno(ring, false), + ring_last_seqno(ring))) { + /* Issue a wake-up to catch stuck h/w. */ + if (wakeup_pending(ring)) { + DRM_ERROR("Hangcheck timer elapsed... %s idle\n", + ring->name); + wakeup(ring); + *err = true; + } + return true; + } + return false; +} + +bool +kick_ring(struct intel_ring_buffer *ring) +{ + struct drm_device *dev = ring->dev; + struct inteldrm_softc *dev_priv = dev->dev_private; + u32 tmp = I915_READ_CTL(ring); + if (tmp & RING_WAIT) { + DRM_ERROR("Kicking stuck wait on %s\n", + ring->name); + I915_WRITE_CTL(ring, tmp); + return true; + } + return false; +} + +bool +i915_hangcheck_hung(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + + if (dev_priv->hangcheck_count++ > 1) { + bool hung = true; + + DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); + i915_handle_error(dev, true); + + if (!IS_GEN2(dev)) { + struct intel_ring_buffer *ring; + int i; + + /* Is the chip hanging on a WAIT_FOR_EVENT? + * If so we can simply poke the RB_WAIT bit + * and break the hang. This should work on + * all but the second generation chipsets. + */ + for_each_ring(ring, dev_priv, i) + hung &= !kick_ring(ring); + } + + return hung; + } + + return false; +} + +/** + * This is called when the chip hasn't reported back with completed + * batchbuffers in a long time. The first time this is called we simply record + * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses + * again, we assume the chip is wedged and try to fix it. + */ +void +i915_hangcheck_elapsed(void *arg) +{ + struct inteldrm_softc *dev_priv = arg; + struct drm_device *dev = (struct drm_device *)dev_priv->drmdev; + uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG]; + struct intel_ring_buffer *ring; + bool err = false, idle; + int i; + +#ifdef notyet + if (!i915_enable_hangcheck) + return; +#endif + + memset(acthd, 0, sizeof(acthd)); + idle = true; + for_each_ring(ring, dev_priv, i) { + idle &= i915_hangcheck_ring_idle(ring, &err); + acthd[i] = intel_ring_get_active_head(ring); + } + + /* If all work is done then ACTHD clearly hasn't advanced. */ + if (idle) { + if (err) { + if (i915_hangcheck_hung(dev)) + return; + + goto repeat; + } + + dev_priv->hangcheck_count = 0; + return; + } + + i915_get_extra_instdone(dev, instdone); + if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 && + memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) { + if (i915_hangcheck_hung(dev)) + return; + } else { + dev_priv->hangcheck_count = 0; + + memcpy(dev_priv->last_acthd, acthd, sizeof(acthd)); + memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone)); + } + +repeat: + /* Reset timer case chip hangs without another request being added */ + timeout_add_msec(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD); +} + +/* drm_dma.h hooks +*/ +void +ironlake_irq_preinstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + +// atomic_set(&dev_priv->irq_received, 0); + + I915_WRITE(HWSTAM, 0xeffe); + + /* XXX hotplug from PCH */ + + I915_WRITE(DEIMR, 0xffffffff); + I915_WRITE(DEIER, 0x0); + POSTING_READ(DEIER); + + /* and GT */ + I915_WRITE(GTIMR, 0xffffffff); + I915_WRITE(GTIER, 0x0); + POSTING_READ(GTIER); + + /* south display irq */ + I915_WRITE(SDEIMR, 0xffffffff); + I915_WRITE(SDEIER, 0x0); + POSTING_READ(SDEIER); +} + +void +valleyview_irq_preinstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + +// atomic_set(&dev_priv->irq_received, 0); + + /* VLV magic */ + I915_WRITE(VLV_IMR, 0); + I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); + I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); + I915_WRITE(RING_IMR(BLT_RING_BASE), 0); + + /* and GT */ + I915_WRITE(GTIIR, I915_READ(GTIIR)); + I915_WRITE(GTIIR, I915_READ(GTIIR)); + I915_WRITE(GTIMR, 0xffffffff); + I915_WRITE(GTIER, 0x0); + POSTING_READ(GTIER); + + I915_WRITE(DPINVGTT, 0xff); + + I915_WRITE(PORT_HOTPLUG_EN, 0); + I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); + for_each_pipe(pipe) + I915_WRITE(PIPESTAT(pipe), 0xffff); + I915_WRITE(VLV_IIR, 0xffffffff); + I915_WRITE(VLV_IMR, 0xffffffff); + I915_WRITE(VLV_IER, 0x0); + POSTING_READ(VLV_IER); +} + +/* + * Enable digital hotplug on the PCH, and configure the DP short pulse + * duration to 2ms (which is the minimum in the Display Port spec) + * + * This register is the same on all known PCH chips. + */ + +void +ironlake_enable_pch_hotplug(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u32 hotplug; + + hotplug = I915_READ(PCH_PORT_HOTPLUG); + hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); + hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; + hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; + hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; + I915_WRITE(PCH_PORT_HOTPLUG, hotplug); +} + +int +ironlake_irq_postinstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + /* enable kind of interrupts always enabled */ + u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | + DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; + u32 render_irqs; + u32 hotplug_mask; + + dev_priv->irq_mask = ~display_mask; + + /* should always can generate irq */ + I915_WRITE(DEIIR, I915_READ(DEIIR)); + I915_WRITE(DEIMR, dev_priv->irq_mask); + I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK); + POSTING_READ(DEIER); + + dev_priv->gt_irq_mask = ~0; + + I915_WRITE(GTIIR, I915_READ(GTIIR)); + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + + if (IS_GEN6(dev)) + render_irqs = + GT_USER_INTERRUPT | + GEN6_BSD_USER_INTERRUPT | + GEN6_BLITTER_USER_INTERRUPT; + else + render_irqs = + GT_USER_INTERRUPT | + GT_PIPE_NOTIFY | + GT_BSD_USER_INTERRUPT; + I915_WRITE(GTIER, render_irqs); + POSTING_READ(GTIER); + + if (HAS_PCH_CPT(dev)) { + hotplug_mask = (SDE_CRT_HOTPLUG_CPT | + SDE_PORTB_HOTPLUG_CPT | + SDE_PORTC_HOTPLUG_CPT | + SDE_PORTD_HOTPLUG_CPT); + } else { + hotplug_mask = (SDE_CRT_HOTPLUG | + SDE_PORTB_HOTPLUG | + SDE_PORTC_HOTPLUG | + SDE_PORTD_HOTPLUG | + SDE_AUX_MASK); + } + + dev_priv->pch_irq_mask = ~hotplug_mask; + + I915_WRITE(SDEIIR, I915_READ(SDEIIR)); + I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); + I915_WRITE(SDEIER, hotplug_mask); + POSTING_READ(SDEIER); + + ironlake_enable_pch_hotplug(dev); + + if (IS_IRONLAKE_M(dev)) { + /* Clear & enable PCU event interrupts */ + I915_WRITE(DEIIR, DE_PCU_EVENT); + I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT); + ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); + } + + return 0; +} + +int +ivybridge_irq_postinstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + /* enable kind of interrupts always enabled */ + u32 display_mask = + DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | + DE_PLANEC_FLIP_DONE_IVB | + DE_PLANEB_FLIP_DONE_IVB | + DE_PLANEA_FLIP_DONE_IVB; + u32 render_irqs; + u32 hotplug_mask; + + dev_priv->irq_mask = ~display_mask; + + /* should always can generate irq */ + I915_WRITE(DEIIR, I915_READ(DEIIR)); + I915_WRITE(DEIMR, dev_priv->irq_mask); + I915_WRITE(DEIER, + display_mask | + DE_PIPEC_VBLANK_IVB | + DE_PIPEB_VBLANK_IVB | + DE_PIPEA_VBLANK_IVB); + POSTING_READ(DEIER); + + dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; + + I915_WRITE(GTIIR, I915_READ(GTIIR)); + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + + render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | + GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT; + I915_WRITE(GTIER, render_irqs); + POSTING_READ(GTIER); + + hotplug_mask = (SDE_CRT_HOTPLUG_CPT | + SDE_PORTB_HOTPLUG_CPT | + SDE_PORTC_HOTPLUG_CPT | + SDE_PORTD_HOTPLUG_CPT); + dev_priv->pch_irq_mask = ~hotplug_mask; + + I915_WRITE(SDEIIR, I915_READ(SDEIIR)); + I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); + I915_WRITE(SDEIER, hotplug_mask); + POSTING_READ(SDEIER); + + ironlake_enable_pch_hotplug(dev); + + return 0; +} + +int +valleyview_irq_postinstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u32 enable_mask; + u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); + u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; + u32 render_irqs; + u16 msid; + + enable_mask = I915_DISPLAY_PORT_INTERRUPT; + enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | + I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | + I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; + + /* + *Leave vblank interrupts masked initially. enable/disable will + * toggle them based on usage. + */ + dev_priv->irq_mask = (~enable_mask) | + I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | + I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; + + dev_priv->pipestat[0] = 0; + dev_priv->pipestat[1] = 0; + + /* Hack for broken MSIs on VLV */ + pci_conf_write(dev_priv->pc, dev_priv->tag, 0x94, 0xfee00000); + msid = pci_conf_read(dev_priv->pc, dev_priv->tag, 0x98); + msid &= 0xff; /* mask out delivery bits */ + msid |= (1<<14); + pci_conf_write(dev_priv->pc, dev_priv->tag, 0x98, msid); + + I915_WRITE(VLV_IMR, dev_priv->irq_mask); + I915_WRITE(VLV_IER, enable_mask); + I915_WRITE(VLV_IIR, 0xffffffff); + I915_WRITE(PIPESTAT(0), 0xffff); + I915_WRITE(PIPESTAT(1), 0xffff); + POSTING_READ(VLV_IER); + + i915_enable_pipestat(dev_priv, 0, pipestat_enable); + i915_enable_pipestat(dev_priv, 1, pipestat_enable); + + I915_WRITE(VLV_IIR, 0xffffffff); + I915_WRITE(VLV_IIR, 0xffffffff); + + I915_WRITE(GTIIR, I915_READ(GTIIR)); + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + + render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | + GEN6_BLITTER_USER_INTERRUPT; + I915_WRITE(GTIER, render_irqs); + POSTING_READ(GTIER); + + /* ack & enable invalid PTE error interrupts */ +#if 0 /* FIXME: add support to irq handler for checking these bits */ + I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); + I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); +#endif + + I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); + /* Note HDMI and DP share bits */ + if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) + hotplug_en |= HDMIB_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) + hotplug_en |= HDMIC_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) + hotplug_en |= HDMID_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) + hotplug_en |= SDVOC_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) + hotplug_en |= SDVOB_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { + hotplug_en |= CRT_HOTPLUG_INT_EN; + hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; + } + + I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); + + return 0; +} + +void +valleyview_irq_uninstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + + if (!dev_priv) + return; + + for_each_pipe(pipe) + I915_WRITE(PIPESTAT(pipe), 0xffff); + + I915_WRITE(HWSTAM, 0xffffffff); + I915_WRITE(PORT_HOTPLUG_EN, 0); + I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); + for_each_pipe(pipe) + I915_WRITE(PIPESTAT(pipe), 0xffff); + I915_WRITE(VLV_IIR, 0xffffffff); + I915_WRITE(VLV_IMR, 0xffffffff); + I915_WRITE(VLV_IER, 0x0); + POSTING_READ(VLV_IER); +} + +void +ironlake_irq_uninstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + + if (!dev_priv) + return; + + I915_WRITE(HWSTAM, 0xffffffff); + + I915_WRITE(DEIMR, 0xffffffff); + I915_WRITE(DEIER, 0x0); + I915_WRITE(DEIIR, I915_READ(DEIIR)); + + I915_WRITE(GTIMR, 0xffffffff); + I915_WRITE(GTIER, 0x0); + I915_WRITE(GTIIR, I915_READ(GTIIR)); + + I915_WRITE(SDEIMR, 0xffffffff); + I915_WRITE(SDEIER, 0x0); + I915_WRITE(SDEIIR, I915_READ(SDEIIR)); +} + +void +i8xx_irq_preinstall(struct drm_device * dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + +// atomic_set(&dev_priv->irq_received, 0); + + for_each_pipe(pipe) + I915_WRITE(PIPESTAT(pipe), 0); + I915_WRITE16(IMR, 0xffff); + I915_WRITE16(IER, 0x0); + POSTING_READ16(IER); +} + +int +i8xx_irq_postinstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + + dev_priv->pipestat[0] = 0; + dev_priv->pipestat[1] = 0; + + I915_WRITE16(EMR, + ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); + + /* Unmask the interrupts that we always want on. */ + dev_priv->irq_mask = + ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | + I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | + I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); + I915_WRITE16(IMR, dev_priv->irq_mask); + + I915_WRITE16(IER, + I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | + I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | + I915_USER_INTERRUPT); + POSTING_READ16(IER); + + return 0; +} + +int +i8xx_intr(void *arg) +{ + drm_i915_private_t *dev_priv = arg; + struct drm_device *dev = (struct drm_device *)dev_priv->drmdev; + u16 iir, new_iir; + u32 pipe_stats[2]; + int irq_received; + int pipe; + u16 flip_mask = + I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; + +// atomic_inc(&dev_priv->irq_received); + + iir = I915_READ16(IIR); + if (iir == 0) + return 0; + + while (iir & ~flip_mask) { + /* Can't rely on pipestat interrupt bit in iir as it might + * have been cleared after the pipestat interrupt was received. + * It doesn't set the bit in iir again, but it still produces + * interrupts (for non-MSI). + */ + mtx_enter(&dev_priv->irq_lock); + if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) + i915_handle_error(dev, false); + + for_each_pipe(pipe) { + int reg = PIPESTAT(pipe); + pipe_stats[pipe] = I915_READ(reg); + + /* + * Clear the PIPE*STAT regs before the IIR + */ + if (pipe_stats[pipe] & 0x8000ffff) { + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) + DRM_DEBUG_DRIVER("pipe %c underrun\n", + pipe_name(pipe)); + I915_WRITE(reg, pipe_stats[pipe]); + irq_received = 1; + } + } + mtx_leave(&dev_priv->irq_lock); + + I915_WRITE16(IIR, iir & ~flip_mask); + new_iir = I915_READ16(IIR); /* Flush posted writes */ + +// i915_update_dri1_breadcrumb(dev); + + if (iir & I915_USER_INTERRUPT) + notify_ring(dev, &dev_priv->rings[RCS]); + + if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && + drm_handle_vblank(dev, 0)) { + if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { + intel_prepare_page_flip(dev, 0); + intel_finish_page_flip(dev, 0); + flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT; + } + } + + if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && + drm_handle_vblank(dev, 1)) { + if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) { + intel_prepare_page_flip(dev, 1); + intel_finish_page_flip(dev, 1); + flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; + } + } + + iir = new_iir; + } + + return 1; +} + +void +i8xx_irq_uninstall(struct drm_device * dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + + for_each_pipe(pipe) { + /* Clear enable bits; then clear status bits */ + I915_WRITE(PIPESTAT(pipe), 0); + I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); + } + I915_WRITE16(IMR, 0xffff); + I915_WRITE16(IER, 0x0); + I915_WRITE16(IIR, I915_READ16(IIR)); +} + +void +i915_irq_preinstall(struct drm_device * dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + +// atomic_set(&dev_priv->irq_received, 0); + + if (I915_HAS_HOTPLUG(dev)) { + I915_WRITE(PORT_HOTPLUG_EN, 0); + I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); + } + + I915_WRITE16(HWSTAM, 0xeffe); + for_each_pipe(pipe) + I915_WRITE(PIPESTAT(pipe), 0); + I915_WRITE(IMR, 0xffffffff); + I915_WRITE(IER, 0x0); + POSTING_READ(IER); +} + +int +i915_irq_postinstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u32 enable_mask; + + dev_priv->pipestat[0] = 0; + dev_priv->pipestat[1] = 0; + + I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); + + /* Unmask the interrupts that we always want on. */ + dev_priv->irq_mask = +#ifdef notyet + ~(I915_ASLE_INTERRUPT | +#else + ~( +#endif + I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | + I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | + I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); + + enable_mask = +#ifdef notyet + I915_ASLE_INTERRUPT | +#endif + I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | + I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | + I915_USER_INTERRUPT; + + if (I915_HAS_HOTPLUG(dev)) { + /* Enable in IER... */ + enable_mask |= I915_DISPLAY_PORT_INTERRUPT; + /* and unmask in IMR */ + dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; + } + + I915_WRITE(IMR, dev_priv->irq_mask); + I915_WRITE(IER, enable_mask); + POSTING_READ(IER); + + if (I915_HAS_HOTPLUG(dev)) { + u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); + + if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) + hotplug_en |= HDMIB_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) + hotplug_en |= HDMIC_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) + hotplug_en |= HDMID_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) + hotplug_en |= SDVOC_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) + hotplug_en |= SDVOB_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { + hotplug_en |= CRT_HOTPLUG_INT_EN; + hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; + } + + /* Ignore TV since it's buggy */ + + I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); + } + +// intel_opregion_enable_asle(dev); + + return 0; +} + +int +i915_intr(void *arg) +{ + drm_i915_private_t *dev_priv = arg; + struct drm_device *dev = (struct drm_device *)dev_priv->drmdev; + u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; + u32 flip_mask = + I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; + u32 flip[2] = { + I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT, + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT + }; + int pipe, ret = 0; + +// atomic_inc(&dev_priv->irq_received); + + iir = I915_READ(IIR); + do { + bool irq_received = (iir & ~flip_mask) != 0; + bool blc_event = false; + + /* Can't rely on pipestat interrupt bit in iir as it might + * have been cleared after the pipestat interrupt was received. + * It doesn't set the bit in iir again, but it still produces + * interrupts (for non-MSI). + */ + mtx_enter(&dev_priv->irq_lock); + if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) + i915_handle_error(dev, false); + + for_each_pipe(pipe) { + int reg = PIPESTAT(pipe); + pipe_stats[pipe] = I915_READ(reg); + + /* Clear the PIPE*STAT regs before the IIR */ + if (pipe_stats[pipe] & 0x8000ffff) { + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) + DRM_DEBUG_DRIVER("pipe %c underrun\n", + pipe_name(pipe)); + I915_WRITE(reg, pipe_stats[pipe]); + irq_received = true; + } + } + mtx_leave(&dev_priv->irq_lock); + + if (!irq_received) + break; + + /* Consume port. Then clear IIR or we'll miss events */ + if ((I915_HAS_HOTPLUG(dev)) && + (iir & I915_DISPLAY_PORT_INTERRUPT)) { + u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); + + DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", + hotplug_status); + if (hotplug_status & dev_priv->hotplug_supported_mask) + workq_queue_task(NULL, &dev_priv->hotplug_task, + 0, i915_hotplug_work_func, dev_priv, NULL); + + I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); + POSTING_READ(PORT_HOTPLUG_STAT); + } + + I915_WRITE(IIR, iir & ~flip_mask); + new_iir = I915_READ(IIR); /* Flush posted writes */ + + if (iir & I915_USER_INTERRUPT) + notify_ring(dev, &dev_priv->rings[RCS]); + + for_each_pipe(pipe) { + int plane = pipe; + if (IS_MOBILE(dev)) + plane = !plane; + if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && + drm_handle_vblank(dev, pipe)) { + if (iir & flip[plane]) { + intel_prepare_page_flip(dev, plane); + intel_finish_page_flip(dev, pipe); + flip_mask &= ~flip[plane]; + } + } + + if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) + blc_event = true; + } + +#ifdef notyet + if (blc_event || (iir & I915_ASLE_INTERRUPT)) + intel_opregion_asle_intr(dev); +#endif + + /* With MSI, interrupts are only generated when iir + * transitions from zero to nonzero. If another bit got + * set while we were handling the existing iir bits, then + * we would never get another interrupt. + * + * This is fine on non-MSI as well, as if we hit this path + * we avoid exiting the interrupt handler only to generate + * another one. + * + * Note that for MSI this could cause a stray interrupt report + * if an interrupt landed in the time between writing IIR and + * the posting read. This should be rare enough to never + * trigger the 99% of 100,000 interrupts test for disabling + * stray interrupts. + */ + ret = 1; + iir = new_iir; + } while (iir & ~flip_mask); + +// i915_update_dri1_breadcrumb(dev); + + return ret; +} + +void +i915_irq_uninstall(struct drm_device * dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + + if (I915_HAS_HOTPLUG(dev)) { + I915_WRITE(PORT_HOTPLUG_EN, 0); + I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); + } + + I915_WRITE16(HWSTAM, 0xffff); + for_each_pipe(pipe) { + /* Clear enable bits; then clear status bits */ + I915_WRITE(PIPESTAT(pipe), 0); + I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); + } + I915_WRITE(IMR, 0xffffffff); + I915_WRITE(IER, 0x0); + + I915_WRITE(IIR, I915_READ(IIR)); +} + +void +i965_irq_preinstall(struct drm_device * dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + +// atomic_set(&dev_priv->irq_received, 0); + + I915_WRITE(PORT_HOTPLUG_EN, 0); + I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); + + I915_WRITE(HWSTAM, 0xeffe); + for_each_pipe(pipe) + I915_WRITE(PIPESTAT(pipe), 0); + I915_WRITE(IMR, 0xffffffff); + I915_WRITE(IER, 0x0); + POSTING_READ(IER); +} + +int +i965_irq_postinstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u32 hotplug_en; + u32 enable_mask; + u32 error_mask; + + /* Unmask the interrupts that we always want on. */ +#ifdef notyet + dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | +#else + dev_priv->irq_mask = ~( +#endif + I915_DISPLAY_PORT_INTERRUPT | + I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | + I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | + I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); + + enable_mask = ~dev_priv->irq_mask; + enable_mask |= I915_USER_INTERRUPT; + + if (IS_G4X(dev)) + enable_mask |= I915_BSD_USER_INTERRUPT; + + dev_priv->pipestat[0] = 0; + dev_priv->pipestat[1] = 0; + + /* + * Enable some error detection, note the instruction error mask + * bit is reserved, so we leave it masked. + */ + if (IS_G4X(dev)) { + error_mask = ~(GM45_ERROR_PAGE_TABLE | + GM45_ERROR_MEM_PRIV | + GM45_ERROR_CP_PRIV | + I915_ERROR_MEMORY_REFRESH); + } else { + error_mask = ~(I915_ERROR_PAGE_TABLE | + I915_ERROR_MEMORY_REFRESH); + } + I915_WRITE(EMR, error_mask); + + I915_WRITE(IMR, dev_priv->irq_mask); + I915_WRITE(IER, enable_mask); + POSTING_READ(IER); + + /* Note HDMI and DP share hotplug bits */ + hotplug_en = 0; + if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) + hotplug_en |= HDMIB_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) + hotplug_en |= HDMIC_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) + hotplug_en |= HDMID_HOTPLUG_INT_EN; + if (IS_G4X(dev)) { + if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X) + hotplug_en |= SDVOC_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X) + hotplug_en |= SDVOB_HOTPLUG_INT_EN; + } else { + if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965) + hotplug_en |= SDVOC_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965) + hotplug_en |= SDVOB_HOTPLUG_INT_EN; + } + if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { + hotplug_en |= CRT_HOTPLUG_INT_EN; + + /* Programming the CRT detection parameters tends + to generate a spurious hotplug event about three + seconds later. So just do it once. + */ + if (IS_G4X(dev)) + hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; + hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; + } + + /* Ignore TV since it's buggy */ + + I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); + +#ifdef notyet + intel_opregion_enable_asle(dev); +#endif + + return 0; +} + +int +i965_intr(void *arg) +{ + drm_i915_private_t *dev_priv = arg; + struct drm_device *dev = (struct drm_device *)dev_priv->drmdev; + u32 iir, new_iir; + u32 pipe_stats[I915_MAX_PIPES]; + int irq_received; + int ret = 0, pipe; + +// atomic_inc(&dev_priv->irq_received); + + iir = I915_READ(IIR); + + for (;;) { + bool blc_event = false; + + irq_received = iir != 0; + + /* Can't rely on pipestat interrupt bit in iir as it might + * have been cleared after the pipestat interrupt was received. + * It doesn't set the bit in iir again, but it still produces + * interrupts (for non-MSI). + */ + mtx_enter(&dev_priv->irq_lock); + if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) + i915_handle_error(dev, false); + + for_each_pipe(pipe) { + int reg = PIPESTAT(pipe); + pipe_stats[pipe] = I915_READ(reg); + + /* + * Clear the PIPE*STAT regs before the IIR + */ + if (pipe_stats[pipe] & 0x8000ffff) { + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) + DRM_DEBUG_DRIVER("pipe %c underrun\n", + pipe_name(pipe)); + I915_WRITE(reg, pipe_stats[pipe]); + irq_received = 1; + } + } + mtx_leave(&dev_priv->irq_lock); + + if (!irq_received) + break; + + ret = 1; + + /* Consume port. Then clear IIR or we'll miss events */ + if (iir & I915_DISPLAY_PORT_INTERRUPT) { + u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); + + DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", + hotplug_status); + if (hotplug_status & dev_priv->hotplug_supported_mask) + workq_queue_task(NULL, &dev_priv->hotplug_task, + 0, i915_hotplug_work_func, dev_priv, NULL); + + I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); + I915_READ(PORT_HOTPLUG_STAT); + } + + I915_WRITE(IIR, iir); + new_iir = I915_READ(IIR); /* Flush posted writes */ + + if (iir & I915_USER_INTERRUPT) + notify_ring(dev, &dev_priv->rings[RCS]); + if (iir & I915_BSD_USER_INTERRUPT) + notify_ring(dev, &dev_priv->rings[VCS]); + + if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) + intel_prepare_page_flip(dev, 0); + + if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) + intel_prepare_page_flip(dev, 1); + + for_each_pipe(pipe) { + if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && + drm_handle_vblank(dev, pipe)) { + i915_pageflip_stall_check(dev, pipe); + intel_finish_page_flip(dev, pipe); + } + + if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) + blc_event = true; + } + + +#ifdef notyet + if (blc_event || (iir & I915_ASLE_INTERRUPT)) + intel_opregion_asle_intr(dev); +#endif + + /* With MSI, interrupts are only generated when iir + * transitions from zero to nonzero. If another bit got + * set while we were handling the existing iir bits, then + * we would never get another interrupt. + * + * This is fine on non-MSI as well, as if we hit this path + * we avoid exiting the interrupt handler only to generate + * another one. + * + * Note that for MSI this could cause a stray interrupt report + * if an interrupt landed in the time between writing IIR and + * the posting read. This should be rare enough to never + * trigger the 99% of 100,000 interrupts test for disabling + * stray interrupts. + */ + iir = new_iir; + } + +// i915_update_dri1_breadcrumb(dev); + + return ret; +} + +void +i965_irq_uninstall(struct drm_device * dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + + if (!dev_priv) + return; + + I915_WRITE(PORT_HOTPLUG_EN, 0); + I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); + + I915_WRITE(HWSTAM, 0xffffffff); + for_each_pipe(pipe) + I915_WRITE(PIPESTAT(pipe), 0); + I915_WRITE(IMR, 0xffffffff); + I915_WRITE(IER, 0x0); + + for_each_pipe(pipe) + I915_WRITE(PIPESTAT(pipe), + I915_READ(PIPESTAT(pipe)) & 0x8000ffff); + I915_WRITE(IIR, I915_READ(IIR)); +} + +void +intel_irq_init(struct drm_device *dev) +{ +// struct inteldrm_softc *dev_priv = dev->dev_private; + + dev->driver->get_vblank_counter = i915_get_vblank_counter; + dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ + if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { + dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ + dev->driver->get_vblank_counter = gm45_get_vblank_counter; + } + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; + else + dev->driver->get_vblank_timestamp = NULL; + dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; + + if (IS_VALLEYVIEW(dev)) { + dev->driver->irq_handler = valleyview_intr; + dev->driver->irq_preinstall = valleyview_irq_preinstall; + dev->driver->irq_postinstall = valleyview_irq_postinstall; + dev->driver->irq_uninstall = valleyview_irq_uninstall; + dev->driver->enable_vblank = valleyview_enable_vblank; + dev->driver->disable_vblank = valleyview_disable_vblank; + } else if (IS_IVYBRIDGE(dev)) { + /* Share pre & uninstall handlers with ILK/SNB */ + dev->driver->irq_handler = ivybridge_intr; + dev->driver->irq_preinstall = ironlake_irq_preinstall; + dev->driver->irq_postinstall = ivybridge_irq_postinstall; + dev->driver->irq_uninstall = ironlake_irq_uninstall; + dev->driver->enable_vblank = ivybridge_enable_vblank; + dev->driver->disable_vblank = ivybridge_disable_vblank; + } else if (IS_HASWELL(dev)) { + /* Share interrupts handling with IVB */ + dev->driver->irq_handler = ivybridge_intr; + dev->driver->irq_preinstall = ironlake_irq_preinstall; + dev->driver->irq_postinstall = ivybridge_irq_postinstall; + dev->driver->irq_uninstall = ironlake_irq_uninstall; + dev->driver->enable_vblank = ivybridge_enable_vblank; + dev->driver->disable_vblank = ivybridge_disable_vblank; + } else if (HAS_PCH_SPLIT(dev)) { + dev->driver->irq_handler = ironlake_intr; + dev->driver->irq_preinstall = ironlake_irq_preinstall; + dev->driver->irq_postinstall = ironlake_irq_postinstall; + dev->driver->irq_uninstall = ironlake_irq_uninstall; + dev->driver->enable_vblank = ironlake_enable_vblank; + dev->driver->disable_vblank = ironlake_disable_vblank; + } else { + if (INTEL_INFO(dev)->gen == 2) { + dev->driver->irq_preinstall = i8xx_irq_preinstall; + dev->driver->irq_postinstall = i8xx_irq_postinstall; + dev->driver->irq_handler = i8xx_intr; + dev->driver->irq_uninstall = i8xx_irq_uninstall; + } else if (INTEL_INFO(dev)->gen == 3) { + dev->driver->irq_preinstall = i915_irq_preinstall; + dev->driver->irq_postinstall = i915_irq_postinstall; + dev->driver->irq_uninstall = i915_irq_uninstall; + dev->driver->irq_handler = i915_intr; + } else { + dev->driver->irq_preinstall = i965_irq_preinstall; + dev->driver->irq_postinstall = i965_irq_postinstall; + dev->driver->irq_uninstall = i965_irq_uninstall; + dev->driver->irq_handler = i965_intr; + } + dev->driver->enable_vblank = i915_enable_vblank; + dev->driver->disable_vblank = i915_disable_vblank; + } +} |