diff options
author | Jonathan Gray <jsg@cvs.openbsd.org> | 2015-04-03 13:11:00 +0000 |
---|---|---|
committer | Jonathan Gray <jsg@cvs.openbsd.org> | 2015-04-03 13:11:00 +0000 |
commit | bc3087c72afea4da27dc2816e0de4606381f6375 (patch) | |
tree | e1daa53b4595618e6206728a0d5da4402a9a9339 /sys/dev/pci/drm | |
parent | 2e1df06368bf1ab89bdcbd423bf3af0402c7b156 (diff) |
resync i915_drv.h to make it diffable to linux
ok kettenis@
Diffstat (limited to 'sys/dev/pci/drm')
-rw-r--r-- | sys/dev/pci/drm/drmP.h | 3 | ||||
-rw-r--r-- | sys/dev/pci/drm/drm_linux.h | 11 | ||||
-rw-r--r-- | sys/dev/pci/drm/i915/i915_dma.c | 17 | ||||
-rw-r--r-- | sys/dev/pci/drm/i915/i915_drv.c | 20 | ||||
-rw-r--r-- | sys/dev/pci/drm/i915/i915_drv.h | 1536 | ||||
-rw-r--r-- | sys/dev/pci/drm/i915/intel_i2c.c | 6 | ||||
-rw-r--r-- | sys/dev/pci/drm/i915/intel_ringbuffer.h | 4 |
7 files changed, 916 insertions, 681 deletions
diff --git a/sys/dev/pci/drm/drmP.h b/sys/dev/pci/drm/drmP.h index 4116c3a3dab..f89e25ae6c4 100644 --- a/sys/dev/pci/drm/drmP.h +++ b/sys/dev/pci/drm/drmP.h @@ -1,4 +1,4 @@ -/* $OpenBSD: drmP.h,v 1.181 2015/02/12 08:48:32 jsg Exp $ */ +/* $OpenBSD: drmP.h,v 1.182 2015/04/03 13:10:59 jsg Exp $ */ /* drmP.h -- Private header for Direct Rendering Manager -*- linux-c -*- * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com */ @@ -484,6 +484,7 @@ struct drm_dmamem { int nsegs; bus_dma_segment_t segs[1]; }; +typedef struct drm_dmamem drm_dma_handle_t; struct drm_buf_entry { struct drm_dmamem **seglist; diff --git a/sys/dev/pci/drm/drm_linux.h b/sys/dev/pci/drm/drm_linux.h index f41ab4dfb4c..997ff55ba7c 100644 --- a/sys/dev/pci/drm/drm_linux.h +++ b/sys/dev/pci/drm/drm_linux.h @@ -1,4 +1,4 @@ -/* $OpenBSD: drm_linux.h,v 1.7 2015/02/12 11:11:45 jsg Exp $ */ +/* $OpenBSD: drm_linux.h,v 1.8 2015/04/03 13:10:59 jsg Exp $ */ /* * Copyright (c) 2013, 2014 Mark Kettenis * @@ -18,7 +18,14 @@ #define IRQ_NONE 0 #define IRQ_HANDLED 1 +typedef bus_addr_t dma_addr_t; +typedef bus_addr_t phys_addr_t; +typedef int wait_queue_head_t; + #define __force +#define __always_unused +#define __read_mostly + #define KERN_INFO #define KERN_WARNING #define KERN_NOTICE @@ -72,6 +79,8 @@ do { } while(0) #endif +typedef struct mutex spinlock_t; + static inline void spin_lock_irqsave(struct mutex *mtxp, __unused unsigned long flags) { diff --git a/sys/dev/pci/drm/i915/i915_dma.c b/sys/dev/pci/drm/i915/i915_dma.c index c93d79c58fb..eb4a155c26c 100644 --- a/sys/dev/pci/drm/i915/i915_dma.c +++ b/sys/dev/pci/drm/i915/i915_dma.c @@ -1,4 +1,4 @@ -/* $OpenBSD: i915_dma.c,v 1.18 2015/02/12 04:56:03 kettenis Exp $ */ +/* $OpenBSD: i915_dma.c,v 1.19 2015/04/03 13:10:59 jsg Exp $ */ /* i915_dma.c -- DMA support for the I915 -*- linux-c -*- */ /* @@ -34,6 +34,17 @@ #include "intel_drv.h" #include <dev/pci/drm/drm_crtc_helper.h> +#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS]) + +#define BEGIN_LP_RING(n) \ + intel_ring_begin(LP_RING(dev_priv), (n)) + +#define OUT_RING(x) \ + intel_ring_emit(LP_RING(dev_priv), x) + +#define ADVANCE_LP_RING() \ + intel_ring_advance(LP_RING(dev_priv)) + void i915_kernel_lost_context(struct drm_device * dev) { @@ -311,10 +322,6 @@ i915_load_modeset_init(struct drm_device *dev) intel_register_dsm_handler(); #endif - /* IIR "flip pending" bit means done if this bit is set */ - if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE)) - dev_priv->flip_pending_is_done = true; - #ifdef notyet ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops); if (ret) diff --git a/sys/dev/pci/drm/i915/i915_drv.c b/sys/dev/pci/drm/i915/i915_drv.c index 4479d6113a0..7ed26ace120 100644 --- a/sys/dev/pci/drm/i915/i915_drv.c +++ b/sys/dev/pci/drm/i915/i915_drv.c @@ -1,4 +1,4 @@ -/* $OpenBSD: i915_drv.c,v 1.75 2015/02/12 04:56:03 kettenis Exp $ */ +/* $OpenBSD: i915_drv.c,v 1.76 2015/04/03 13:10:59 jsg Exp $ */ /* * Copyright (c) 2008-2009 Owain G. Ainsworth <oga@openbsd.org> * @@ -63,6 +63,17 @@ extern struct mutex mchdev_lock; +#define IS_I9XX(dev) (INTEL_INFO(dev)->gen >= 3) +/* MCH IFP BARs */ +#define I915_IFPADDR 0x60 +#define I965_IFPADDR 0x70 + +struct inteldrm_file { + struct drm_file file_priv; + struct { + } mm; +}; + /* * Override lid status (0=autodetect, 1=autodetect disabled [default], * -1=force lid closed, -2=force lid open) @@ -910,7 +921,7 @@ inteldrm_attach(struct device *parent, struct device *self, void *aux) return; } - intel_detect_pch(dev_priv); + intel_detect_pch(dev); /* * i945G/GM report MSI capability despite not actually supporting it. @@ -1000,7 +1011,7 @@ inteldrm_attach(struct device *parent, struct device *self, void *aux) intel_opregion_setup(dev); intel_setup_bios(dev); - intel_setup_gmbus(dev_priv); + intel_setup_gmbus(dev); /* XXX would be a lot nicer to get agp info before now */ uvm_page_physload(atop(dev_priv->mm.gtt_base_addr), @@ -2026,8 +2037,9 @@ intel_pch_match(struct pci_attach_args *pa) } void -intel_detect_pch(struct inteldrm_softc *dev_priv) +intel_detect_pch(struct drm_device *dev) { + struct drm_i915_private *dev_priv = dev->dev_private; struct pci_attach_args pa; unsigned short id; if (pci_find_device(&pa, intel_pch_match) == 0) { diff --git a/sys/dev/pci/drm/i915/i915_drv.h b/sys/dev/pci/drm/i915/i915_drv.h index 9c4aca4624e..a40b14cf4fd 100644 --- a/sys/dev/pci/drm/i915/i915_drv.h +++ b/sys/dev/pci/drm/i915/i915_drv.h @@ -1,4 +1,4 @@ -/* $OpenBSD: i915_drv.h,v 1.56 2015/02/12 04:56:03 kettenis Exp $ */ +/* $OpenBSD: i915_drv.h,v 1.57 2015/04/03 13:10:59 jsg Exp $ */ /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- */ /* @@ -37,6 +37,10 @@ #include "intel_ringbuffer.h" #include "acpi.h" +#if NACPI > 0 +#define CONFIG_ACPI +#endif + #include "drm.h" #include "vga.h" @@ -110,6 +114,8 @@ enum port { }; #define port_name(p) ((p) + 'A') +#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) + #define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++) #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ @@ -146,6 +152,10 @@ struct intel_ddi_plls { #define DRIVER_MINOR 6 #define DRIVER_PATCHLEVEL 0 +#define WATCH_COHERENCY 0 +#define WATCH_LISTS 0 +#define WATCH_GTT 0 + #define I915_GEM_PHYS_CURSOR_0 1 #define I915_GEM_PHYS_CURSOR_1 2 #define I915_GEM_PHYS_OVERLAY_REGS 3 @@ -153,11 +163,128 @@ struct intel_ddi_plls { struct drm_i915_gem_phys_object { int id; - struct drm_dmamem *handle; +#ifdef __linux__ + struct page **page_list; +#endif + drm_dma_handle_t *handle; struct drm_i915_gem_object *cur_obj; }; +struct opregion_header; +struct opregion_acpi; +struct opregion_swsci; +struct opregion_asle; struct inteldrm_softc; +#define drm_i915_private inteldrm_softc + +struct intel_opregion { + struct opregion_header __iomem *header; + struct opregion_acpi __iomem *acpi; + struct opregion_swsci __iomem *swsci; + struct opregion_asle __iomem *asle; + void __iomem *vbt; + u32 __iomem *lid_state; +}; +#define OPREGION_SIZE (8*1024) + +struct intel_overlay; +struct intel_overlay_error_state; + +#ifndef __OpenBSD__ +struct drm_i915_master_private { + drm_local_map_t *sarea; + struct _drm_i915_sarea *sarea_priv; +}; +#endif +#define I915_FENCE_REG_NONE -1 +#define I915_MAX_NUM_FENCES 16 +/* 16 fences + sign bit for FENCE_REG_NONE */ +#define I915_MAX_NUM_FENCE_BITS 5 + +struct drm_i915_fence_reg { + struct list_head lru_list; + struct drm_i915_gem_object *obj; + int pin_count; +}; + +struct sdvo_device_mapping { + u8 initialized; + u8 dvo_port; + u8 slave_addr; + u8 dvo_wiring; + u8 i2c_pin; + u8 ddc_pin; +}; + +struct intel_display_error_state; + +struct drm_i915_error_state { + int ref; + u32 eir; + u32 pgtbl_er; + u32 ier; + u32 ccid; + u32 derrmr; + u32 forcewake; + bool waiting[I915_NUM_RINGS]; + u32 pipestat[I915_MAX_PIPES]; + u32 tail[I915_NUM_RINGS]; + u32 head[I915_NUM_RINGS]; + u32 ctl[I915_NUM_RINGS]; + u32 ipeir[I915_NUM_RINGS]; + u32 ipehr[I915_NUM_RINGS]; + u32 instdone[I915_NUM_RINGS]; + u32 acthd[I915_NUM_RINGS]; + u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1]; + u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1]; + u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */ + /* our own tracking of ring head and tail */ + u32 cpu_ring_head[I915_NUM_RINGS]; + u32 cpu_ring_tail[I915_NUM_RINGS]; + u32 error; /* gen6+ */ + u32 err_int; /* gen7 */ + u32 instpm[I915_NUM_RINGS]; + u32 instps[I915_NUM_RINGS]; + u32 extra_instdone[I915_NUM_INSTDONE_REG]; + u32 seqno[I915_NUM_RINGS]; + u64 bbaddr; + u32 fault_reg[I915_NUM_RINGS]; + u32 done_reg; + u32 faddr[I915_NUM_RINGS]; + u64 fence[I915_MAX_NUM_FENCES]; + struct timeval time; + struct drm_i915_error_ring { + struct drm_i915_error_object { + int page_count; + u32 gtt_offset; + u32 *pages[0]; + } *ringbuffer, *batchbuffer; + struct drm_i915_error_request { + long jiffies; + u32 seqno; + u32 tail; + } *requests; + int num_requests; + } ring[I915_NUM_RINGS]; + struct drm_i915_error_buffer { + u32 size; + u32 name; + u32 rseqno, wseqno; + u32 gtt_offset; + u32 read_domains; + u32 write_domain; + s32 fence_reg:I915_MAX_NUM_FENCE_BITS; + s32 pinned:2; + u32 tiling:2; + u32 dirty:1; + u32 purgeable:1; + s32 ring:4; + u32 cache_level:2; + } *active_bo, *pinned_bo; + u32 active_bo_count, pinned_bo_count; + struct intel_overlay_error_state *overlay; + struct intel_display_error_state *display; +}; struct drm_i915_display_funcs { bool (*fbc_enabled)(struct drm_device *dev); @@ -196,10 +323,36 @@ struct drm_i915_display_funcs { }; struct drm_i915_gt_funcs { - void (*force_wake_get)(struct inteldrm_softc *dev_priv); - void (*force_wake_put)(struct inteldrm_softc *dev_priv); + void (*force_wake_get)(struct drm_i915_private *dev_priv); + void (*force_wake_put)(struct drm_i915_private *dev_priv); }; +#define DEV_INFO_FLAGS \ + DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \ + DEV_INFO_FLAG(is_i85x) DEV_INFO_SEP \ + DEV_INFO_FLAG(is_i915g) DEV_INFO_SEP \ + DEV_INFO_FLAG(is_i945gm) DEV_INFO_SEP \ + DEV_INFO_FLAG(is_g33) DEV_INFO_SEP \ + DEV_INFO_FLAG(need_gfx_hws) DEV_INFO_SEP \ + DEV_INFO_FLAG(is_g4x) DEV_INFO_SEP \ + DEV_INFO_FLAG(is_pineview) DEV_INFO_SEP \ + DEV_INFO_FLAG(is_broadwater) DEV_INFO_SEP \ + DEV_INFO_FLAG(is_crestline) DEV_INFO_SEP \ + DEV_INFO_FLAG(is_ivybridge) DEV_INFO_SEP \ + DEV_INFO_FLAG(is_valleyview) DEV_INFO_SEP \ + DEV_INFO_FLAG(is_haswell) DEV_INFO_SEP \ + DEV_INFO_FLAG(has_force_wake) DEV_INFO_SEP \ + DEV_INFO_FLAG(has_fbc) DEV_INFO_SEP \ + DEV_INFO_FLAG(has_pipe_cxsr) DEV_INFO_SEP \ + DEV_INFO_FLAG(has_hotplug) DEV_INFO_SEP \ + DEV_INFO_FLAG(cursor_needs_physical) DEV_INFO_SEP \ + DEV_INFO_FLAG(has_overlay) DEV_INFO_SEP \ + DEV_INFO_FLAG(overlay_needs_physical) DEV_INFO_SEP \ + DEV_INFO_FLAG(supports_tv) DEV_INFO_SEP \ + DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \ + DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \ + DEV_INFO_FLAG(has_llc) + struct intel_device_info { u8 num_pipes:3; u8 gen; @@ -229,6 +382,18 @@ struct intel_device_info { u8 has_llc:1; }; +#define I915_PPGTT_PD_ENTRIES 512 +#define I915_PPGTT_PT_ENTRIES 1024 +struct i915_hw_ppgtt { + struct drm_device *dev; + unsigned num_pd_entries; + struct vm_page **pt_pages; + uint32_t pd_offset; + dma_addr_t *pt_dma_addr; + dma_addr_t scratch_page_dma_addr; +}; + + /* This must match up with the value previously used for execbuf2.rsvd1. */ #define DEFAULT_CONTEXT_ID 0 struct i915_hw_context { @@ -256,42 +421,6 @@ enum no_fbc_reason { FBC_MODULE_PARAM, }; -struct opregion_header; -struct opregion_acpi; -struct opregion_swsci; -struct opregion_asle; - -struct intel_opregion { - struct opregion_header *header; - struct opregion_acpi *acpi; - struct opregion_swsci *swsci; - struct opregion_asle *asle; - void *vbt; - u32 *lid_state; -}; -#define OPREGION_SIZE (8*1024) - - -#define I915_FENCE_REG_NONE -1 -#define I915_MAX_NUM_FENCES 16 -/* 16 fences + sign bit for FENCE_REG_NONE */ -#define I915_MAX_NUM_FENCE_BITS 5 - -struct drm_i915_fence_reg { - struct list_head lru_list; - struct drm_i915_gem_object *obj; - int pin_count; -}; - -struct sdvo_device_mapping { - u8 initialized; - u8 dvo_port; - u8 slave_addr; - u8 dvo_wiring; - u8 i2c_pin; - u8 ddc_pin; -}; - enum intel_pch { PCH_NONE = 0, /* No PCH present */ PCH_IBX, /* Ibexpeak PCH */ @@ -304,12 +433,13 @@ enum intel_sbi_destination { SBI_MPHY, }; -#define QUIRK_PIPEA_FORCE (1<<0) -#define QUIRK_LVDS_SSC_DISABLE (1<<1) -#define QUIRK_INVERT_BRIGHTNESS (1<<2) -#define QUIRK_NO_PCH_PWM_ENABLE (1<<3) +#define QUIRK_PIPEA_FORCE (1<<0) +#define QUIRK_LVDS_SSC_DISABLE (1<<1) +#define QUIRK_INVERT_BRIGHTNESS (1<<2) +#define QUIRK_NO_PCH_PWM_ENABLE (1<<3) struct intel_fbdev; +struct intel_fbc_work; struct intel_gmbus { struct i2c_controller controller; @@ -318,7 +448,7 @@ struct intel_gmbus { u32 force_bit; u32 reg0; u32 gpio_reg; - struct inteldrm_softc *dev_priv; + struct drm_i915_private *dev_priv; }; struct i915_suspend_saved_registers { @@ -518,49 +648,52 @@ struct intel_ilk_power_mgmt { struct drm_i915_gem_object *renderctx; }; +struct i915_dri1_state { + unsigned allow_batchbuffer : 1; + u32 __iomem *gfx_hws_cpu_addr; + + unsigned int cpp; + int back_offset; + int front_offset; + int current_page; + int page_flipping; + + uint32_t counter; +}; + struct intel_l3_parity { u32 *remap_info; struct task error_task; }; -/* - * lock ordering: - * exec lock, - * request lock - * list lock. - * - * XXX fence lock ,object lock - */ struct inteldrm_softc { - struct device sc_dev; - struct drm_device *dev; - bus_dma_tag_t dmat; - bus_space_tag_t bst; - struct agp_map *agph; - bus_space_handle_t opregion_ioh; - - u_long flags; + struct device sc_dev; + struct drm_device *dev; + bus_dma_tag_t dmat; + bus_space_tag_t bst; + struct agp_map *agph; + bus_space_handle_t opregion_ioh; - struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; + const struct intel_device_info *info; - pci_chipset_tag_t pc; - pcitag_t tag; - pci_intr_handle_t ih; - void *irqh; + int relative_constants_mode; - struct vga_pci_bar *regs; + pci_chipset_tag_t pc; + pcitag_t tag; + pci_intr_handle_t ih; + void *irqh; - int nscreens; - void (*switchcb)(void *, int, int); - void *switchcbarg; - void *switchcookie; - struct task switchtask; - struct rasops_info ro; + struct vga_pci_bar *regs; - int sc_offset; - int (*sc_copyrows)(void *, int, int, int); + int nscreens; + void (*switchcb)(void *, int, int); + void *switchcbarg; + void *switchcookie; + struct task switchtask; + struct rasops_info ro; - uint32_t gpio_mmio_base; + int sc_offset; + int (*sc_copyrows)(void *, int, int, int); struct drm_i915_gt_funcs gt; /** gt_fifo_count and the subsequent register write are synchronized @@ -571,12 +704,25 @@ struct inteldrm_softc { /** gt_lock is also taken in irq contexts. */ struct mutex gt_lock; - drm_i915_sarea_t *sarea_priv; + struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; + + /** gmbus_mutex protects against concurrent usage of the single hw gmbus + * controller on different i2c buses. */ + struct mutex gmbus_mutex; + + /** + * Base address of the gmbus and gpio block. + */ + uint32_t gpio_mmio_base; + + struct pci_dev *bridge_dev; struct intel_ring_buffer ring[I915_NUM_RINGS]; uint32_t next_seqno; - struct drm_dmamem *status_page_dmah; - + drm_dma_handle_t *status_page_dmah; +#ifdef notyet + struct resource mch_res; +#endif union flush { struct { bus_space_tag_t bst; @@ -587,33 +733,47 @@ struct inteldrm_softc { caddr_t kva; } i8xx; } ifp; - struct vm_page *pgs; + struct vm_page *pgs; + + atomic_t irq_received; - /* Protects user_irq_refcount and irq_mask reg */ - struct mutex irq_lock; - /* Refcount for user irq, only enabled when needed */ - int user_irq_refcount; - u_int32_t irq_mask; + /* protects the irq masks */ + spinlock_t irq_lock; /* DPIO indirect register protection */ - struct mutex dpio_lock; - /* Cached value of IMR to avoid reads in updating the bitfield */ - u_int32_t pipestat[2]; - /* these two ironlake only, we should union this with pipestat XXX */ - u_int32_t gt_irq_mask; - u_int32_t pch_irq_mask; + spinlock_t dpio_lock; - u_int32_t hotplug_supported_mask; - struct task hotplug_task; + /** Cached value of IMR to avoid reads in updating the bitfield */ + u32 pipestat[2]; + u32 irq_mask; + u32 gt_irq_mask; + u32 pch_irq_mask; - int num_pch_pll; + u32 hotplug_supported_mask; + struct task hotplug_task; - struct intel_opregion opregion; + int num_pch_pll; - int crt_ddc_pin; + /* For hangcheck timer */ +#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ +#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) + struct timeout hangcheck_timer; + int hangcheck_count; + uint32_t last_acthd[I915_NUM_RINGS]; + uint32_t prev_instdone[I915_NUM_INSTDONE_REG]; + + unsigned int stop_rings; + + unsigned long cfb_size; + unsigned int cfb_fb; + enum plane cfb_plane; + int cfb_y; + struct intel_fbc_work *fbc_work; + + struct intel_opregion opregion; /* overlay */ - struct intel_overlay *overlay; + struct intel_overlay *overlay; bool sprite_scaling_enabled; /* LVDS info */ @@ -646,33 +806,30 @@ struct inteldrm_softc { } edp; bool no_aux_handshake; - struct intel_pch_pll pch_plls[I915_NUM_PLLS]; - struct intel_ddi_plls ddi_plls; + int crt_ddc_pin; + struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ + int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ + int num_fence_regs; /* 8 on pre-965, 16 otherwise */ - /* Reclocking support */ - bool render_reclock_avail; - bool lvds_downclock_avail; - int lvds_downclock; - bool busy; - int child_dev_num; - struct child_device_config *child_dev; + unsigned int fsb_freq, mem_freq, is_ddr3; - struct drm_i915_fence_reg fence_regs[16]; /* 965 */ - int fence_reg_start; /* 4 by default */ - int num_fence_regs; /* 8 pre-965, 16 post */ + spinlock_t error_lock; + /* Protected by dev->error_lock. */ + struct drm_i915_error_state *first_error; + struct task error_task; + int error_completion; + struct mutex error_completion_lock; /* number of ioctls + faults in flight */ - int entries; + int entries; - struct task error_task; + /* Display functions */ + struct drm_i915_display_funcs display; + /* PCH chipset type */ enum intel_pch pch_type; unsigned short pch_id; - struct drm_i915_display_funcs display; - - struct timeout idle_timeout; - unsigned long quirks; /* Register state */ @@ -681,6 +838,8 @@ struct inteldrm_softc { struct { /** Bridge to intel-gtt-ko */ struct intel_gtt *gtt; + /** Memory allocator for GTT stolen memory */ + struct drm_mm stolen; /** Memory allocator for GTT */ struct drm_mm gtt_space; /** List of all objects in gtt_space. Used to restore gtt @@ -697,15 +856,25 @@ struct inteldrm_softc { unsigned long gtt_start; unsigned long gtt_mappable_end; unsigned long gtt_end; + unsigned long stolen_base; /* limited to low memory (32-bit) */ - bus_addr_t gtt_base_addr; + struct io_mapping *gtt_mapping; + phys_addr_t gtt_base_addr; + int gtt_mtrr; + + /** PPGTT used for aliasing the PPGTT with the GTT */ + struct i915_hw_ppgtt *aliasing_ppgtt; + +#ifdef notyet + struct shrinker inactive_shrinker; +#endif + bool shrinker_no_lock_stealing; /** - * List of objects currently involved in rendering from the - * ringbuffer. + * List of objects currently involved in rendering. * * Includes buffers having the contents of their GPU caches - * flushed, not necessarily primitives. last_rendering_seqno + * flushed, not necessarily primitives. last_rendering_seqno * represents when the rendering involved will be completed. * * A reference is held on the buffer while on this list. @@ -716,7 +885,7 @@ struct inteldrm_softc { * LRU list of objects which are not in the ringbuffer and * are ready to unbind, but are still in the GTT. * - * last_rendering_seqno is 0 while an object is in this list + * last_rendering_seqno is 0 while an object is in this list. * * A reference is not held on the buffer while on this list, * as merely being GTT-bound shouldn't prevent its being @@ -724,7 +893,7 @@ struct inteldrm_softc { */ struct list_head inactive_list; - /* Fence LRU */ + /** LRU list of objects with fence regs on them. */ struct list_head fence_list; /** @@ -732,9 +901,11 @@ struct inteldrm_softc { * but this means that requests will finish and never * be retired once the system goes idle. Set a timer to * fire periodically while the ring is running. When it - * fires, go retire requests in a workq. + * fires, go retire requests. */ struct timeout retire_timer; + struct taskq *retire_taskq; + struct task retire_task; /** * Are we in a non-interruptible section of code like @@ -756,10 +927,10 @@ struct inteldrm_softc { * Flag if the hardware appears to be wedged. * * This is set when attempts to idle the device timeout. - * It prevents command submission from occuring and makes + * It prevents command submission from occurring and makes * every pending request fail */ - int wedged; + atomic_t wedged; /** Bit 6 swizzling required for X tiling */ uint32_t bit_6_swizzle_x; @@ -774,22 +945,9 @@ struct inteldrm_softc { size_t mappable_gtt_total; size_t object_memory; u32 object_count; - - /* for gem retire handler */ - struct taskq *retire_taskq; - struct task retire_task; } mm; - /* for hangcheck */ -#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ - struct timeout hangcheck_timer; - int hangcheck_count; - uint32_t last_acthd[I915_NUM_RINGS]; - uint32_t prev_instdone[I915_NUM_INSTDONE_REG]; - - const struct intel_device_info *info; - - int relative_constants_mode; + /* Kernel Modesetting */ struct sdvo_device_mapping sdvo_mappings[2]; /* indicate whether the LVDS_BORDER should be enabled or not */ @@ -799,28 +957,21 @@ struct inteldrm_softc { struct drm_crtc *plane_to_crtc_mapping[3]; struct drm_crtc *pipe_to_crtc_mapping[3]; - int pending_flip_queue; + wait_queue_head_t pending_flip_queue; - bool flip_pending_is_done; + struct intel_pch_pll pch_plls[I915_NUM_PLLS]; + struct intel_ddi_plls ddi_plls; - struct drm_connector *int_lvds_connector; - struct drm_connector *int_edp_connector; + /* Reclocking support */ + bool render_reclock_avail; + bool lvds_downclock_avail; + /* indicates the reduced downclock for LVDS*/ + int lvds_downclock; + u16 orig_clock; + int child_dev_num; + struct child_device_config *child_dev; - u8 cur_delay; - u8 min_delay; - u8 max_delay; - u8 fmax; - u8 fstart; - - u64 last_count1; - unsigned long last_time1; - unsigned long chipset_power; - u64 last_count2; -// struct timespec last_time2; - unsigned long gfx_power; - int c_m; - int r_t; - u8 corr; + bool mchbar_need_disable; struct intel_l3_parity l3_parity; @@ -833,22 +984,23 @@ struct inteldrm_softc { enum no_fbc_reason no_fbc_reason; - unsigned int stop_rings; + struct drm_mm_node *compressed_fb; + struct drm_mm_node *compressed_llb; - unsigned long cfb_size; - unsigned int cfb_fb; - int cfb_plane; - int cfb_y; - struct intel_fbc_work *fbc_work; + unsigned long last_gpu_reset; - unsigned int fsb_freq, mem_freq, is_ddr3; + /* list of fbdev register on this device */ + struct intel_fbdev *fbdev; - int error_completion; - struct mutex error_completion_lock; + /* + * The console may be contended at resume, but we don't + * want it to block on it. + */ +#ifdef notyet + struct work_struct console_resume_work; +#endif - time_t last_gpu_reset; - - struct intel_fbdev *fbdev; + struct backlight_device *backlight; struct drm_property *broadcast_rgb_property; struct drm_property *force_audio_property; @@ -859,8 +1011,11 @@ struct inteldrm_softc { u32 fdi_rx_config; struct i915_suspend_saved_registers regfile; + + /* Old dri1 support infrastructure, beware the dragons ya fools entering + * here! */ + struct i915_dri1_state dri1; }; -#define drm_i915_private inteldrm_softc typedef struct drm_i915_private drm_i915_private_t; /* Iterate over initialised rings */ @@ -876,9 +1031,9 @@ enum hdmi_force_audio { }; enum i915_cache_level { - I915_CACHE_NONE, + I915_CACHE_NONE = 0, I915_CACHE_LLC, - I915_CACHE_LLC_MLC, /* gen6+ */ + I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */ }; struct drm_i915_gem_object_ops { @@ -899,42 +1054,6 @@ struct drm_i915_gem_object_ops { void (*put_pages)(struct drm_i915_gem_object *); }; -struct inteldrm_file { - struct drm_file file_priv; - struct { - } mm; -}; - -/* chip type flags */ -#define CHIP_I830 0x00001 -#define CHIP_I845G 0x00002 -#define CHIP_I85X 0x00004 -#define CHIP_I865G 0x00008 -#define CHIP_I9XX 0x00010 -#define CHIP_I915G 0x00020 -#define CHIP_I915GM 0x00040 -#define CHIP_I945G 0x00080 -#define CHIP_I945GM 0x00100 -#define CHIP_I965 0x00200 -#define CHIP_I965GM 0x00400 -#define CHIP_G33 0x00800 -#define CHIP_GM45 0x01000 -#define CHIP_G4X 0x02000 -#define CHIP_M 0x04000 -#define CHIP_HWS 0x08000 -#define CHIP_GEN2 0x10000 -#define CHIP_GEN3 0x20000 -#define CHIP_GEN4 0x40000 -#define CHIP_GEN6 0x80000 -#define CHIP_PINEVIEW 0x100000 -#define CHIP_IRONLAKE 0x200000 -#define CHIP_IRONLAKE_D 0x400000 -#define CHIP_IRONLAKE_M 0x800000 -#define CHIP_SANDYBRIDGE 0x1000000 -#define CHIP_IVYBRIDGE 0x2000000 -#define CHIP_GEN7 0x4000000 - -/** driver private structure attached to each drm_gem_object */ struct drm_i915_gem_object { struct drm_gem_object base; @@ -944,7 +1063,7 @@ struct drm_i915_gem_object { struct drm_mm_node *gtt_space; struct list_head gtt_list; - /** This object's place on the active/flushing/inactive lists */ + /** This object's place on the active/inactive lists */ struct list_head ring_list; struct list_head mm_list; /** This object's place in the batchbuffer or on the eviction list */ @@ -1029,6 +1148,10 @@ struct drm_i915_gem_object { struct vm_page **pages; int pages_pin_count; + /* prime dma-buf support */ + void *dma_buf_vmapping; + int vmapping_count; + /** * Used for performing relocations during execbuffer insertion. */ @@ -1069,21 +1192,12 @@ struct drm_i915_gem_object { * will be page flipped away on the next vblank. When it * reaches 0, dev_priv->pending_flip_queue will be woken up. */ - int pending_flip; + atomic_t pending_flip; }; #define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base) #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) -struct drm_i915_file_private { - struct { - struct mutex lock; - struct list_head request_list; - } mm; - SPLAY_HEAD(i915_ctx_tree, i915_ctx_handle) ctx_tree; - uint32_t ctx_id; -}; - /** * Request queue structure. * @@ -1095,163 +1209,460 @@ struct drm_i915_file_private { * an emission time with seqnos for tracking how far ahead of the GPU we are. */ struct drm_i915_gem_request { - struct list_head list; /** On Which ring this request was generated */ - struct intel_ring_buffer *ring; + struct intel_ring_buffer *ring; + /** GEM sequence number associated with this request. */ - uint32_t seqno; + uint32_t seqno; + /** Postion in the ringbuffer of the end of the request */ - uint32_t tail; - /** Time at which this request was emitted, in ticks. */ - unsigned long emitted_ticks; - struct drm_i915_file_private *file_priv; + u32 tail; + + /** Time at which this request was emitted, in jiffies. */ + unsigned long emitted_ticks; + + /** global list entry for this request */ + struct list_head list; + + struct drm_i915_file_private *file_priv; /** file_priv list entry for this request */ - struct list_head client_list; + struct list_head client_list; }; -u_int32_t inteldrm_read_hws(struct inteldrm_softc *, int); -int intel_ring_begin(struct intel_ring_buffer *, int); -void intel_ring_emit(struct intel_ring_buffer *, u_int32_t); -void intel_ring_advance(struct intel_ring_buffer *); -void inteldrm_update_ring(struct intel_ring_buffer *); -int inteldrm_pipe_enabled(struct inteldrm_softc *, int); -int i915_init_phys_hws(struct inteldrm_softc *, bus_dma_tag_t); +struct drm_i915_file_private { + struct { + struct mutex lock; + struct list_head request_list; + } mm; + SPLAY_HEAD(i915_ctx_tree, i915_ctx_handle) ctx_tree; + uint32_t ctx_id; +}; + +#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) + +#define IS_I830(dev) ((dev)->pci_device == 0x3577) +#define IS_845G(dev) ((dev)->pci_device == 0x2562) +#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) +#define IS_I865G(dev) ((dev)->pci_device == 0x2572) +#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) +#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) +#define IS_I945G(dev) ((dev)->pci_device == 0x2772) +#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) +#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) +#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) +#define IS_GM45(dev) ((dev)->pci_device == 0x2A42) +#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) +#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) +#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) +#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) +#define IS_G33(dev) (INTEL_INFO(dev)->is_g33) +#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) +#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) +#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) +#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \ + (dev)->pci_device == 0x0152 || \ + (dev)->pci_device == 0x015a) +#define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \ + (dev)->pci_device == 0x0106 || \ + (dev)->pci_device == 0x010A) +#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) +#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) +#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) +#define IS_ULT(dev) (IS_HASWELL(dev) && \ + ((dev)->pci_device & 0xFF00) == 0x0A00) + +/* + * The genX designation typically refers to the render engine, so render + * capability related checks should use IS_GEN, while display and other checks + * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular + * chips, etc.). + */ +#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) +#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) +#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) +#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) +#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) +#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) + +#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) +#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) +#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) +#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) -unsigned long i915_chipset_val(struct inteldrm_softc *dev_priv); -unsigned long i915_mch_val(struct inteldrm_softc *dev_priv); -unsigned long i915_gfx_val(struct inteldrm_softc *dev_priv); -void i915_update_gfx_val(struct inteldrm_softc *); +#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) +#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev)) -int intel_init_render_ring_buffer(struct drm_device *); -void intel_cleanup_ring_buffer(struct intel_ring_buffer *); +#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) +#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) -/* i915_irq.c */ +/* Early gen2 have a totally busted CS tlb and require pinned batches. */ +#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) + +/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte + * rows, which changed the alignment requirements and fence programming. + */ +#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ + IS_I915GM(dev))) +#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) +#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) +#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) +#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) +#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) +#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) +/* dsparb controlled by hw only */ +#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) + +#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) +#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) +#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) + +#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) + +#define HAS_DDI(dev) (IS_HASWELL(dev)) + +#define INTEL_PCH_DEVICE_ID_MASK 0xff00 +#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 +#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 +#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 +#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 +#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 + +#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) +#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) +#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) +#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) +#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) + +#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) + +#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) -extern int i915_driver_irq_install(struct drm_device * dev); -extern void i915_driver_irq_uninstall(struct drm_device * dev); -extern void i915_user_irq_get(struct inteldrm_softc *); -extern void i915_user_irq_put(struct inteldrm_softc *); -void i915_enable_pipestat(struct inteldrm_softc *, int, u_int32_t); -void i915_disable_pipestat(struct inteldrm_softc *, int, u_int32_t); -void intel_irq_init(struct drm_device *dev); -void i915_hangcheck_elapsed(void *); -void i915_handle_error(struct drm_device *dev, bool wedged); -void intel_enable_asle(struct drm_device *); +#define GT_FREQUENCY_MULTIPLIER 50 +/** + * RC6 is a special power stage which allows the GPU to enter an very + * low-voltage mode when idle, using down to 0V while at this stage. This + * stage is entered automatically when the GPU is idle when RC6 support is + * enabled, and as soon as new workload arises GPU wakes up automatically as well. + * + * There are different RC6 modes available in Intel GPU, which differentiate + * among each other with the latency required to enter and leave RC6 and + * voltage consumed by the GPU in different states. + * + * The combination of the following flags define which states GPU is allowed + * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and + * RC6pp is deepest RC6. Their support by hardware varies according to the + * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one + * which brings the most power savings; deeper states save more power, but + * require higher latency to switch to and wake up. + */ +#define INTEL_RC6_ENABLE (1<<0) +#define INTEL_RC6p_ENABLE (1<<1) +#define INTEL_RC6pp_ENABLE (1<<2) + +#ifdef notyet +extern struct drm_ioctl_desc i915_ioctls[]; +#endif +extern int i915_max_ioctl; +extern unsigned int i915_fbpercrtc __always_unused; +extern int i915_panel_ignore_lid __read_mostly; +extern unsigned int i915_powersave __read_mostly; +extern int i915_semaphores __read_mostly; +extern unsigned int i915_lvds_downclock __read_mostly; +extern int i915_lvds_channel_mode __read_mostly; +extern int i915_panel_use_ssc __read_mostly; +extern int i915_vbt_sdvo_panel_type __read_mostly; +extern int i915_enable_rc6 __read_mostly; +extern int i915_enable_fbc __read_mostly; +extern bool i915_enable_hangcheck __read_mostly; +extern int i915_enable_ppgtt __read_mostly; +extern unsigned int i915_preliminary_hw_support __read_mostly; + +#ifdef __linux__ +extern int i915_suspend(struct drm_device *dev, pm_message_t state); +extern int i915_resume(struct drm_device *dev); +extern int i915_master_create(struct drm_device *dev, struct drm_master *master); +extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); +#endif + + /* i915_dma.c */ +void i915_update_dri1_breadcrumb(struct drm_device *dev); +extern void i915_kernel_lost_context(struct drm_device * dev); +extern int i915_driver_load(struct drm_device *, unsigned long flags); +extern int i915_driver_unload(struct drm_device *); +extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv); +extern void i915_driver_lastclose(struct drm_device * dev); +extern void i915_driver_preclose(struct drm_device *dev, + struct drm_file *file_priv); +extern void i915_driver_postclose(struct drm_device *dev, + struct drm_file *file_priv); +extern void i915_driver_close(struct drm_device *dev, + struct drm_file *file_priv); +extern int i915_driver_device_is_agp(struct drm_device * dev); +#ifdef CONFIG_COMPAT +extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg); +#endif +extern int i915_emit_box(struct drm_device *dev, + struct drm_clip_rect *box, + int DR1, int DR4); +extern int intel_gpu_reset(struct drm_device *dev); +extern int i915_reset(struct drm_device *dev); +extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); +extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); +extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); +extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); + +#ifdef __linux__ +extern void intel_console_resume(struct work_struct *work); +#endif +int intel_setup_mchbar(struct inteldrm_softc *, + struct pci_attach_args *); +int i915_getparam(struct drm_device *, void *, struct drm_file *); +int i915_setparam(struct drm_device *, void *, struct drm_file *); + +/* i915_irq.c */ +void i915_hangcheck_elapsed(void *); +void i915_handle_error(struct drm_device *dev, bool wedged); + +extern void intel_irq_init(struct drm_device *dev); extern void intel_pm_init(struct drm_device *dev); extern void intel_gt_init(struct drm_device *dev); extern void intel_gt_sanitize(struct drm_device *dev); -/* gem */ -/* Ioctls */ -int i915_gem_init_ioctl(struct drm_device *, void *, struct drm_file *); -int i915_gem_create_ioctl(struct drm_device *, void *, struct drm_file *); -int i915_gem_pread_ioctl(struct drm_device *, void *, struct drm_file *); -int i915_gem_pwrite_ioctl(struct drm_device *, void *, struct drm_file *); -int i915_gem_set_domain_ioctl(struct drm_device *, void *, - struct drm_file *); -int i915_gem_execbuffer2(struct drm_device *, void *, struct drm_file *); -int i915_gem_pin_ioctl(struct drm_device *, void *, struct drm_file *); -int i915_gem_unpin_ioctl(struct drm_device *, void *, struct drm_file *); -int i915_gem_busy_ioctl(struct drm_device *, void *, struct drm_file *); -int i915_gem_entervt_ioctl(struct drm_device *, void *, struct drm_file *); -int i915_gem_leavevt_ioctl(struct drm_device *, void *, struct drm_file *); -int i915_gem_get_aperture_ioctl(struct drm_device *, void *, - struct drm_file *); -int i915_gem_set_tiling(struct drm_device *, void *, struct drm_file *); -int i915_gem_get_tiling(struct drm_device *, void *, struct drm_file *); -int i915_gem_mmap_ioctl(struct drm_device *, void *, struct drm_file *); -int i915_gem_mmap_gtt_ioctl(struct drm_device *, void *, struct drm_file *); -int i915_gem_madvise_ioctl(struct drm_device *, void *, struct drm_file *); -int i915_gem_sw_finish_ioctl(struct drm_device *, void *, struct drm_file *); -int i915_gem_get_caching_ioctl(struct drm_device *, void *, - struct drm_file *); -int i915_gem_set_caching_ioctl(struct drm_device *, void *, - struct drm_file *); -int i915_gem_wait_ioctl(struct drm_device *, void *, struct drm_file *); - -/* GEM memory manager functions */ -int i915_gem_init_object(struct drm_gem_object *); -void i915_gem_free_object(struct drm_gem_object *); -int i915_gem_object_pin(struct drm_i915_gem_object *, uint32_t, bool, bool); -void i915_gem_object_unpin(struct drm_i915_gem_object *); -void i915_gem_retire_requests(struct drm_device *); -void i915_gem_retire_requests_ring(struct intel_ring_buffer *); -int i915_gem_check_wedge(struct inteldrm_softc *, - bool interruptible); - -void i915_gem_retire_work_handler(void *); -int i915_gem_idle(struct drm_device *); -void i915_gem_object_move_to_active(struct drm_i915_gem_object *, - struct intel_ring_buffer *); -int i915_add_request(struct intel_ring_buffer *, struct drm_file *, u32 *); -void i915_gem_init_swizzling(struct drm_device *); -void i915_gem_cleanup_ringbuffer(struct drm_device *); -int i915_gem_ring_throttle(struct drm_device *, struct drm_file *); -void i915_dispatch_gem_execbuffer(struct intel_ring_buffer *, - struct drm_i915_gem_execbuffer2 *, uint64_t); - -extern int i915_gem_get_seqno(struct drm_device *, u32 *); - -int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *, - bool); -int i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *, - u32, struct intel_ring_buffer *); -int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *, - bool); - -int i915_gem_init(struct drm_device *); -int i915_gem_mmap_gtt(struct drm_file *, struct drm_device *, - uint32_t, uint64_t *); -int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, - enum i915_cache_level cache_level); - -int i915_gem_object_finish_gpu(struct drm_i915_gem_object *); -int i915_gem_init_hw(struct drm_device *); - -/* Debug functions, mostly called from ddb */ -void i915_gem_seqno_info(int); -void i915_interrupt_info(int); -void i915_gem_fence_regs_info(int); -void i915_hws_info(int); -void i915_batchbuffer_info(int); -void i915_ringbuffer_data(int); -void i915_ringbuffer_info(int); - -int i915_gem_object_unbind(struct drm_i915_gem_object *); -int i915_wait_seqno(struct intel_ring_buffer *, uint32_t); -#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) +#ifdef notyet +void i915_error_state_free(struct kref *error_ref); +#endif + +void +i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); -void i915_gem_detach_phys_object(struct drm_device *, - struct drm_i915_gem_object *); +void +i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); + +void intel_enable_asle(struct drm_device *dev); + +#ifdef CONFIG_DEBUG_FS +extern void i915_destroy_error_state(struct drm_device *dev); +#else +#define i915_destroy_error_state(x) +#endif + + +/* i915_gem.c */ +int i915_gem_init_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_pread_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_execbuffer(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_execbuffer2(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_pin_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_busy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_set_tiling(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_get_tiling(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_wait_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +void i915_gem_load(struct drm_device *dev); +int i915_gem_init_object(struct drm_gem_object *obj); +void i915_gem_object_init(struct drm_i915_gem_object *obj, + const struct drm_i915_gem_object_ops *ops); +struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, + size_t size); +void i915_gem_free_object(struct drm_gem_object *obj); +int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, + uint32_t alignment, + bool map_and_fenceable, + bool nonblocking); +void i915_gem_object_unpin(struct drm_i915_gem_object *obj); +int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); +void i915_gem_release_mmap(struct drm_i915_gem_object *obj); +void i915_gem_lastclose(struct drm_device *dev); +int i915_gem_ring_throttle(struct drm_device *, struct drm_file *); + +#ifdef __linux +int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); +static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) +{ + struct scatterlist *sg = obj->pages->sgl; + int nents = obj->pages->nents; + while (nents > SG_MAX_SINGLE_ALLOC) { + if (n < SG_MAX_SINGLE_ALLOC - 1) + break; + + sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1); + n -= SG_MAX_SINGLE_ALLOC - 1; + nents -= SG_MAX_SINGLE_ALLOC - 1; + } + return sg_page(sg+n); +} +#else +int i915_gem_object_get_pages(struct drm_i915_gem_object *obj); +#endif + +static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) +{ + BUG_ON(obj->pages == NULL); + obj->pages_pin_count++; +} +static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) +{ + BUG_ON(obj->pages_pin_count == 0); + obj->pages_pin_count--; +} + +int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); +int i915_gem_object_sync(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *to); +void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *ring); + +int i915_gem_dumb_create(struct drm_file *file_priv, + struct drm_device *dev, + struct drm_mode_create_dumb *args); +int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, + uint32_t handle, uint64_t *offset); +int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev, + uint32_t handle); +/** + * Returns true if seq1 is later than seq2. + */ +static inline bool +i915_seqno_passed(uint32_t seq1, uint32_t seq2) +{ + return (int32_t)(seq1 - seq2) >= 0; +} + +extern int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); + +int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); +int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); + +static inline bool +i915_gem_object_pin_fence(struct drm_i915_gem_object *obj) +{ + if (obj->fence_reg != I915_FENCE_REG_NONE) { + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + dev_priv->fence_regs[obj->fence_reg].pin_count++; + return true; + } else + return false; +} + +static inline void +i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) +{ + if (obj->fence_reg != I915_FENCE_REG_NONE) { + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + dev_priv->fence_regs[obj->fence_reg].pin_count--; + } +} + +static inline int +i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) +{ + return obj->madv == I915_MADV_DONTNEED; +} + +void i915_gem_retire_requests(struct drm_device *dev); +void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); +int __must_check i915_gem_check_wedge(struct drm_i915_private *dev_priv, + bool interruptible); + +void i915_gem_reset(struct drm_device *dev); +void i915_gem_clflush_object(struct drm_i915_gem_object *obj); +int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, + uint32_t read_domains, + uint32_t write_domain); +int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); +int __must_check i915_gem_init(struct drm_device *dev); +int __must_check i915_gem_init_hw(struct drm_device *dev); +void i915_gem_l3_remap(struct drm_device *dev); +void i915_gem_init_swizzling(struct drm_device *dev); +void i915_gem_init_ppgtt(struct drm_device *dev); +void i915_gem_cleanup_ringbuffer(struct drm_device *dev); +int __must_check i915_gpu_idle(struct drm_device *dev); +int __must_check i915_gem_idle(struct drm_device *dev); +int i915_add_request(struct intel_ring_buffer *ring, + struct drm_file *file, + u32 *seqno); +int __must_check i915_wait_seqno(struct intel_ring_buffer *ring, + uint32_t seqno); +int i915_gem_fault(struct drm_gem_object *, struct uvm_faultinfo *, off_t, + vaddr_t, vm_page_t *, int, int, vm_prot_t, int ); +int __must_check +i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, + bool write); +int __must_check +i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); +int __must_check +i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, + u32 alignment, + struct intel_ring_buffer *pipelined); int i915_gem_attach_phys_object(struct drm_device *dev, - struct drm_i915_gem_object *, int, int); - -int i915_gem_dumb_create(struct drm_file *, struct drm_device *, - struct drm_mode_create_dumb *); -int i915_gem_mmap_gtt(struct drm_file *, struct drm_device *, - uint32_t, uint64_t *); -int i915_gem_dumb_destroy(struct drm_file *, struct drm_device *, - uint32_t); - -/* i915_dma.c */ -void i915_driver_lastclose(struct drm_device *); -int intel_setup_mchbar(struct inteldrm_softc *, - struct pci_attach_args *); -void intel_teardown_mchbar(struct inteldrm_softc *, - struct pci_attach_args *, int); -int i915_getparam(struct drm_device *, void *, struct drm_file *); -int i915_setparam(struct drm_device *, void *, struct drm_file *); -void i915_kernel_lost_context(struct drm_device *); -int i915_driver_open(struct drm_device *, struct drm_file *); -void i915_driver_close(struct drm_device *, struct drm_file *); + struct drm_i915_gem_object *obj, + int id, + int align); +void i915_gem_detach_phys_object(struct drm_device *dev, + struct drm_i915_gem_object *obj); +void i915_gem_free_all_phys_object(struct drm_device *dev); +void i915_gem_release(struct drm_device *dev, struct drm_file *file); + +uint32_t +i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, + uint32_t size, + int tiling_mode); + +int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, + enum i915_cache_level cache_level); + +#ifdef notyet +struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf); + +struct dma_buf *i915_gem_prime_export(struct drm_device *dev, + struct drm_gem_object *gem_obj, int flags); +#endif /* i915_drv.c */ -void i915_gem_chipset_flush(struct drm_device *); -int intel_gpu_reset(struct drm_device *); -int i915_reset(struct drm_device *); -void inteldrm_timeout(void *); -bool i915_semaphore_is_enabled(struct drm_device *); +void i915_gem_chipset_flush(struct drm_device *); +int intel_gpu_reset(struct drm_device *); +int i915_reset(struct drm_device *); +void inteldrm_timeout(void *); +bool i915_semaphore_is_enabled(struct drm_device *); /* i915_gem_context.c */ void i915_gem_context_init(struct drm_device *dev); @@ -1264,6 +1675,36 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file); +/* i915_gem_gtt.c */ +int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev); +void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); +void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, + struct drm_i915_gem_object *obj, + enum i915_cache_level cache_level); +void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, + struct drm_i915_gem_object *obj); + +void i915_gem_restore_gtt_mappings(struct drm_device *dev); +int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); +void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, + enum i915_cache_level cache_level); +void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); +void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); +void i915_gem_init_global_gtt(struct drm_device *dev, + unsigned long start, + unsigned long mappable_end, + unsigned long end); +int i915_gem_gtt_init(struct drm_device *dev); +void i915_gem_gtt_fini(struct drm_device *dev); +#ifdef notyet +static inline void i915_gem_chipset_flush(struct drm_device *dev) +{ + if (INTEL_INFO(dev)->gen < 6) + intel_gtt_chipset_flush(); +} +#endif + + /* i915_gem_evict.c */ int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment, @@ -1272,74 +1713,67 @@ int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, bool nonblock); int i915_gem_evict_everything(struct drm_device *dev); +/* i915_gem_stolen.c */ +int i915_gem_init_stolen(struct drm_device *dev); +void i915_gem_cleanup_stolen(struct drm_device *dev); + /* i915_gem_tiling.c */ -void i915_gem_detect_bit_6_swizzle(struct drm_device *); -void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *); -void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *); +void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); +void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); +void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); /* i915_gem_debug.c */ +void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, + const char *where, uint32_t mark); +#if WATCH_LISTS +int i915_verify_lists(struct drm_device *dev); +#else #define i915_verify_lists(dev) 0 +#endif +void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, + int handle); +void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, + const char *where, uint32_t mark); + +/* i915_debugfs.c */ +#ifdef __linux__ +int i915_debugfs_init(struct drm_minor *minor); +void i915_debugfs_cleanup(struct drm_minor *minor); +#endif /* i915_suspend.c */ -extern int i915_save_state(struct drm_device *); -extern int i915_restore_state(struct drm_device *); +extern int i915_save_state(struct drm_device *dev); +extern int i915_restore_state(struct drm_device *dev); + +/* i915_suspend.c */ +extern int i915_save_state(struct drm_device *dev); +extern int i915_restore_state(struct drm_device *dev); + +/* i915_sysfs.c */ +void i915_setup_sysfs(struct drm_device *dev_priv); +void i915_teardown_sysfs(struct drm_device *dev_priv); /* intel_i2c.c */ -extern int intel_setup_gmbus(struct inteldrm_softc *); -static inline bool intel_gmbus_is_port_valid(unsigned port) +extern int intel_setup_gmbus(struct drm_device *dev); +extern void intel_teardown_gmbus(struct drm_device *dev); +extern inline bool intel_gmbus_is_port_valid(unsigned port) { return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD); } -struct i2c_controller *intel_gmbus_get_adapter(drm_i915_private_t *, unsigned); -extern void intel_gmbus_force_bit(struct i2c_controller *, bool); -static inline bool intel_gmbus_is_forced_bit(struct i2c_controller *i2c) +extern struct i2c_controller *intel_gmbus_get_adapter( + struct drm_i915_private *dev_priv, unsigned port); +extern void intel_gmbus_set_speed(struct i2c_controller *adapter, int speed); +extern void intel_gmbus_force_bit(struct i2c_controller *adapter, bool force_bit); +extern inline bool intel_gmbus_is_forced_bit(struct i2c_controller *i2c) { return container_of(i2c, struct intel_gmbus, controller)->force_bit; } - -/* i915_gem.c */ -int i915_gem_fault(struct drm_gem_object *, struct uvm_faultinfo *, off_t, - vaddr_t, vm_page_t *, int, int, vm_prot_t, int ); -void i915_gem_object_init(struct drm_i915_gem_object *obj, - const struct drm_i915_gem_object_ops *ops); -struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, - size_t size); -int i915_gpu_idle(struct drm_device *); -int i915_gem_object_get_fence(struct drm_i915_gem_object *); -int i915_gem_object_put_fence(struct drm_i915_gem_object *); -void i915_gem_reset(struct drm_device *); -void i915_gem_clflush_object(struct drm_i915_gem_object *); -void i915_gem_release(struct drm_device *, struct drm_file *); -void i915_gem_release_mmap(struct drm_i915_gem_object *); -void i915_gem_load(struct drm_device *dev); - -uint32_t -i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, - uint32_t size, - int tiling_mode); - -int i915_gem_object_sync(struct drm_i915_gem_object *, - struct intel_ring_buffer *); - -int i915_gem_object_get_pages(struct drm_i915_gem_object *obj); - -static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) -{ - BUG_ON(obj->pages == NULL); - obj->pages_pin_count++; -} -static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) -{ - BUG_ON(obj->pages_pin_count == 0); - obj->pages_pin_count--; -} - -int i915_mutex_lock_interruptible(struct drm_device *dev); +extern void intel_i2c_reset(struct drm_device *dev); /* intel_opregion.c */ -int intel_opregion_setup(struct drm_device *dev); -#if NACPI > 0 +extern int intel_opregion_setup(struct drm_device *dev); +#ifdef CONFIG_ACPI extern void intel_opregion_init(struct drm_device *dev); extern void intel_opregion_fini(struct drm_device *dev); extern void intel_opregion_asle_intr(struct drm_device *dev); @@ -1353,20 +1787,14 @@ static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; } static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; } #endif -/* i915_gem_gtt.c */ -void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); -void i915_gem_restore_gtt_mappings(struct drm_device *dev); -int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); -void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, - enum i915_cache_level cache_level); -void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); -void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); -void i915_gem_init_global_gtt(struct drm_device *dev, - unsigned long start, - unsigned long mappable_end, - unsigned long end); -int i915_gem_gtt_init(struct drm_device *dev); -void i915_gem_gtt_fini(struct drm_device *dev); +/* intel_acpi.c */ +#ifdef CONFIG_ACPI +extern void intel_register_dsm_handler(void); +extern void intel_unregister_dsm_handler(void); +#else +static inline void intel_register_dsm_handler(void) { return; } +static inline void intel_unregister_dsm_handler(void) { return; } +#endif /* CONFIG_ACPI */ /* modesetting */ extern void intel_modeset_init_hw(struct drm_device *dev); @@ -1381,34 +1809,36 @@ extern void intel_disable_fbc(struct drm_device *dev); extern bool ironlake_set_drps(struct drm_device *dev, u8 val); extern void intel_init_pch_refclk(struct drm_device *dev); extern void gen6_set_rps(struct drm_device *dev, u8 val); -extern void intel_detect_pch(struct inteldrm_softc *dev_priv); +extern void intel_detect_pch(struct drm_device *dev); extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); -int i915_load_modeset_init(struct drm_device *dev); extern int intel_enable_rc6(const struct drm_device *dev); +int i915_load_modeset_init(struct drm_device *dev); -extern struct intel_overlay_error_state *intel_overlay_capture_error_state( - struct drm_device *dev); -#ifdef notyet -extern void intel_overlay_print_error_state(struct sbuf *m, - struct intel_overlay_error_state *error); -#endif -extern struct intel_display_error_state *intel_display_capture_error_state( - struct drm_device *dev); -#ifdef notyet -extern void intel_display_print_error_state(struct sbuf *m, - struct drm_device *dev, struct intel_display_error_state *error); +extern bool i915_semaphore_is_enabled(struct drm_device *dev); +int i915_reg_read_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); + +/* overlay */ +#ifdef CONFIG_DEBUG_FS +extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); +extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error); + +extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); +extern void intel_display_print_error_state(struct seq_file *m, + struct drm_device *dev, + struct intel_display_error_state *error); #endif /* On SNB platform, before reading ring registers forcewake bit * must be set to prevent GT core from power down and stale values being * returned. */ -void gen6_gt_force_wake_get(struct inteldrm_softc *dev_priv); -void gen6_gt_force_wake_put(struct inteldrm_softc *dev_priv); -int __gen6_gt_wait_for_fifo(struct inteldrm_softc *dev_priv); +void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); +void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); +int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); -int sandybridge_pcode_read(struct inteldrm_softc *dev_priv, u8 mbox, u32 *val); -int sandybridge_pcode_write(struct inteldrm_softc *dev_priv, u8 mbox, u32 val); +int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val); +int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val); static __inline void write8(struct inteldrm_softc *dev_priv, bus_size_t reg, uint8_t val) @@ -1517,231 +1947,5 @@ __i915_write(64, q) #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) -#define INTELDRM_VERBOSE 0 -#if INTELDRM_VERBOSE > 0 -#define INTELDRM_VPRINTF(fmt, args...) DRM_INFO(fmt, ##args) -#else -#define INTELDRM_VPRINTF(fmt, args...) -#endif - -#define LP_RING(d) (&((struct inteldrm_softc *)(d))->ring[RCS]) - -#define BEGIN_LP_RING(n) \ - intel_ring_begin(LP_RING(dev_priv), (n)) - -#define OUT_RING(x) \ - intel_ring_emit(LP_RING(dev_priv), (x)) - -#define ADVANCE_LP_RING() \ - intel_ring_advance(LP_RING(dev_priv)) - -/* MCH IFP BARs */ -#define I915_IFPADDR 0x60 -#define I965_IFPADDR 0x70 - -/** - * Reads a dword out of the status page, which is written to from the command - * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or - * MI_STORE_DATA_IMM. - * - * The following dwords have a reserved meaning: - * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. - * 0x04: ring 0 head pointer - * 0x05: ring 1 head pointer (915-class) - * 0x06: ring 2 head pointer (915-class) - * 0x10-0x1b: Context status DWords (GM45) - * 0x1f: Last written status offset. (GM45) - * - * The area from dword 0x20 to 0x3ff is available for driver usage. - */ -#define READ_HWSP(dev_priv, reg) inteldrm_read_hws(dev_priv, reg) -#define I915_GEM_HWS_INDEX 0x20 - -#define INTEL_INFO(dev) (((struct inteldrm_softc *) (dev)->dev_private)->info) - -/* Chipset type macros */ - -#define IS_I830(dev) ((dev)->pci_device == 0x3577) -#define IS_845G(dev) ((dev)->pci_device == 0x2562) -#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) -#define IS_I865G(dev) ((dev)->pci_device == 0x2572) -#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) -#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) -#define IS_I945G(dev) ((dev)->pci_device == 0x2772) -#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) -#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) -#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) -#define IS_GM45(dev) ((dev)->pci_device == 0x2A42) -#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) -#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) -#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) -#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) -#define IS_G33(dev) (INTEL_INFO(dev)->is_g33) -#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) -#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) -#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) -#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \ - (dev)->pci_device == 0x0152 || \ - (dev)->pci_device == 0x015a) -#define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \ - (dev)->pci_device == 0x0106 || \ - (dev)->pci_device == 0x010A) -#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) - -#define IS_I9XX(dev) (INTEL_INFO(dev)->gen >= 3) -#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->gen == 5) - -#define IS_SANDYBRIDGE(dev) (INTEL_INFO(dev)->gen == 6) -#define IS_SANDYBRIDGE_D(dev) (IS_SANDYBRIDGE(dev) && \ - (INTEL_INFO(dev)->is_mobile == 0)) -#define IS_SANDYBRIDGE_M(dev) (IS_SANDYBRIDGE(dev) && \ - (INTEL_INFO(dev)->is_mobile == 1)) -#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) -#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) -#define IS_ULT(dev) (IS_HASWELL(dev) && \ - ((dev)->pci_device & 0xFF00) == 0x0A00) - -/* - * The genX designation typically refers to the render engine, so render - * capability related checks should use IS_GEN, while display and other checks - * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular - * chips, etc.). - */ -#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) -#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) -#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) -#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) -#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) -#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) - -#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) -#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) -#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) -#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) - -#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) -#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev)) - -#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) -#define OVERLAY_NEEDS_PHYSICAL(dev) \ - (INTEL_INFO(dev)->overlay_needs_physical) - -/* Early gen2 have a totally busted CS tlb and require pinned batches. */ -#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) - -/* - * With the 945 and later, Y tiling got adjusted so that it was 32 128-byte - * rows, which changes the alignment requirements and fence programming. - */ -#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && \ - !(IS_I915G(dev) || IS_I915GM(dev))) - -#define HAS_RESET(dev) (INTEL_INFO(dev)->gen >= 4 && \ - (!IS_GEN6(dev)) && (!IS_GEN7(dev))) - -#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) -#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) -#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) -#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) -#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) -#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) - -#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) -#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) -#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) - -#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) - -#define HAS_DDI(dev) (IS_HASWELL(dev)) - -#define INTEL_PCH_DEVICE_ID_MASK 0xff00 -#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 -#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 -#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 -#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 -#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 - -#define INTEL_PCH_TYPE(dev) (((struct inteldrm_softc *) (dev)->dev_private)->pch_type) -#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) -#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) -#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) -#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) - -#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) - -#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) - -#define PRIMARY_RINGBUFFER_SIZE (128*1024) - -#define __EXEC_OBJECT_HAS_FENCE (1<<30) - -/** - * RC6 is a special power stage which allows the GPU to enter an very - * low-voltage mode when idle, using down to 0V while at this stage. This - * stage is entered automatically when the GPU is idle when RC6 support is - * enabled, and as soon as new workload arises GPU wakes up automatically as well. - * - * There are different RC6 modes available in Intel GPU, which differentiate - * among each other with the latency required to enter and leave RC6 and - * voltage consumed by the GPU in different states. - * - * The combination of the following flags define which states GPU is allowed - * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and - * RC6pp is deepest RC6. Their support by hardware varies according to the - * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one - * which brings the most power savings; deeper states save more power, but - * require higher latency to switch to and wake up. - */ -#define INTEL_RC6_ENABLE (1<<0) -#define INTEL_RC6p_ENABLE (1<<1) -#define INTEL_RC6pp_ENABLE (1<<2) - -extern unsigned int i915_lvds_downclock; -extern int i915_lvds_channel_mode; -extern int i915_panel_use_ssc; -extern int i915_panel_ignore_lid; -extern unsigned int i915_powersave; -extern int i915_semaphores; -extern int i915_vbt_sdvo_panel_type; -extern int i915_enable_rc6; -extern int i915_enable_fbc; -extern bool i915_enable_hangcheck; - -/* Inlines */ - -/** - * Returns true if seq1 is later than seq2. - */ -static __inline int -i915_seqno_passed(uint32_t seq1, uint32_t seq2) -{ - return ((int32_t)(seq1 - seq2) >= 0); -} - -static inline bool -i915_gem_object_pin_fence(struct drm_i915_gem_object *obj) -{ - if (obj->fence_reg != I915_FENCE_REG_NONE) { - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; - dev_priv->fence_regs[obj->fence_reg].pin_count++; - return true; - } else - return false; -} - -static inline void -i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) -{ - if (obj->fence_reg != I915_FENCE_REG_NONE) { - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; - dev_priv->fence_regs[obj->fence_reg].pin_count--; - } -} - -static inline int -i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) -{ - return obj->madv == I915_MADV_DONTNEED; -} #endif diff --git a/sys/dev/pci/drm/i915/intel_i2c.c b/sys/dev/pci/drm/i915/intel_i2c.c index d29fada4eae..e01a24ad0e7 100644 --- a/sys/dev/pci/drm/i915/intel_i2c.c +++ b/sys/dev/pci/drm/i915/intel_i2c.c @@ -1,4 +1,4 @@ -/* $OpenBSD: intel_i2c.c,v 1.4 2015/02/12 04:56:03 kettenis Exp $ */ +/* $OpenBSD: intel_i2c.c,v 1.5 2015/04/03 13:10:59 jsg Exp $ */ /* * Copyright (c) 2012, 2013 Mark Kettenis <kettenis@openbsd.org> * @@ -274,9 +274,9 @@ intel_gpio_write_byte(void *cookie, u_int8_t byte, int flags) } int -intel_setup_gmbus(struct inteldrm_softc *dev_priv) +intel_setup_gmbus(struct drm_device *dev) { - struct drm_device *dev = dev_priv->dev; + struct drm_i915_private *dev_priv = dev->dev_private; int i; if (HAS_PCH_SPLIT(dev)) diff --git a/sys/dev/pci/drm/i915/intel_ringbuffer.h b/sys/dev/pci/drm/i915/intel_ringbuffer.h index 6a90d3527cd..22459af08e1 100644 --- a/sys/dev/pci/drm/i915/intel_ringbuffer.h +++ b/sys/dev/pci/drm/i915/intel_ringbuffer.h @@ -1,4 +1,4 @@ -/* $OpenBSD: intel_ringbuffer.h,v 1.3 2014/03/30 00:58:24 jsg Exp $ */ +/* $OpenBSD: intel_ringbuffer.h,v 1.4 2015/04/03 13:10:59 jsg Exp $ */ #ifndef _INTEL_RINGBUFFER_H_ #define _INTEL_RINGBUFFER_H_ @@ -206,6 +206,8 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring, iowrite32(data, ring->virtual_start + ring->tail); ring->tail += 4; } +#else +void intel_ring_emit(struct intel_ring_buffer *, u_int32_t); #endif void intel_ring_advance(struct intel_ring_buffer *ring); int intel_ring_idle(struct intel_ring_buffer *ring); |