summaryrefslogtreecommitdiff
path: root/sys/dev/pci
diff options
context:
space:
mode:
authorJonathan Gray <jsg@cvs.openbsd.org>2013-03-18 12:36:53 +0000
committerJonathan Gray <jsg@cvs.openbsd.org>2013-03-18 12:36:53 +0000
commit1e83ebc015e3e708142fa767510ae65e6892c20d (patch)
treed1f5f7f40202ef7b50c703a9c3ac746a96dd93ed /sys/dev/pci
parentc48141c1d8bd41bf9e7298c54917eaa386e56689 (diff)
Significantly increase the wordlist for ddb hangman,
and update our device independent DRM code and the Intel DRM code to be mostly in sync with Linux 3.8.3. Among other things this brings support for kernel modesetting and enables use of the rings on gen6+ Intel hardware. Based on some earlier work from matthieu@ with some hints from FreeBSD and with lots of help from kettenis@ (including a beautiful accelerated wscons framebuffer console!) Thanks to M:Tier and the OpenBSD Foundation for sponsoring this work.
Diffstat (limited to 'sys/dev/pci')
-rw-r--r--sys/dev/pci/drm/drmP.h275
-rw-r--r--sys/dev/pci/drm/drm_atomic.h20
-rw-r--r--sys/dev/pci/drm/drm_crtc.c4112
-rw-r--r--sys/dev/pci/drm/drm_crtc.h1080
-rw-r--r--sys/dev/pci/drm/drm_crtc_helper.c1137
-rw-r--r--sys/dev/pci/drm/drm_crtc_helper.h167
-rw-r--r--sys/dev/pci/drm/drm_dp_helper.c373
-rw-r--r--sys/dev/pci/drm/drm_dp_helper.h362
-rw-r--r--sys/dev/pci/drm/drm_drv.c152
-rw-r--r--sys/dev/pci/drm/drm_edid.c2183
-rw-r--r--sys/dev/pci/drm/drm_edid.h258
-rw-r--r--sys/dev/pci/drm/drm_edid_modes.h774
-rw-r--r--sys/dev/pci/drm/drm_fb_helper.c1533
-rw-r--r--sys/dev/pci/drm/drm_fb_helper.h122
-rw-r--r--sys/dev/pci/drm/drm_fourcc.h136
-rw-r--r--sys/dev/pci/drm/drm_irq.c1464
-rw-r--r--sys/dev/pci/drm/drm_linux_list.h175
-rw-r--r--sys/dev/pci/drm/drm_mm.c562
-rw-r--r--sys/dev/pci/drm/drm_mm.h184
-rw-r--r--sys/dev/pci/drm/drm_mode.h25
-rw-r--r--sys/dev/pci/drm/drm_modes.c1359
-rw-r--r--sys/dev/pci/drm/files.drm48
-rw-r--r--sys/dev/pci/drm/i915/dvo.h152
-rw-r--r--sys/dev/pci/drm/i915/dvo_ch7017.c443
-rw-r--r--sys/dev/pci/drm/i915/dvo_ch7xxx.c371
-rw-r--r--sys/dev/pci/drm/i915/dvo_ivch.c458
-rw-r--r--sys/dev/pci/drm/i915/dvo_ns2501.c615
-rw-r--r--sys/dev/pci/drm/i915/dvo_sil164.c304
-rw-r--r--sys/dev/pci/drm/i915/dvo_tfp410.c345
-rw-r--r--sys/dev/pci/drm/i915/i915_dma.c406
-rw-r--r--sys/dev/pci/drm/i915/i915_drv.c2583
-rw-r--r--sys/dev/pci/drm/i915/i915_drv.h1567
-rw-r--r--sys/dev/pci/drm/i915/i915_gem.c3229
-rw-r--r--sys/dev/pci/drm/i915/i915_gem_evict.c182
-rw-r--r--sys/dev/pci/drm/i915/i915_gem_execbuffer.c517
-rw-r--r--sys/dev/pci/drm/i915/i915_gem_gtt.c74
-rw-r--r--sys/dev/pci/drm/i915/i915_gem_tiling.c574
-rw-r--r--sys/dev/pci/drm/i915/i915_irq.c2923
-rw-r--r--sys/dev/pci/drm/i915/i915_reg.h (renamed from sys/dev/pci/drm/i915_reg.h)1085
-rw-r--r--sys/dev/pci/drm/i915/i915_suspend.c939
-rw-r--r--sys/dev/pci/drm/i915/intel_bios.c852
-rw-r--r--sys/dev/pci/drm/i915/intel_bios.h620
-rw-r--r--sys/dev/pci/drm/i915/intel_crt.c887
-rw-r--r--sys/dev/pci/drm/i915/intel_ddi.c1582
-rw-r--r--sys/dev/pci/drm/i915/intel_display.c10065
-rw-r--r--sys/dev/pci/drm/i915/intel_dp.c3085
-rw-r--r--sys/dev/pci/drm/i915/intel_drv.h646
-rw-r--r--sys/dev/pci/drm/i915/intel_dvo.c559
-rw-r--r--sys/dev/pci/drm/i915/intel_fb.c309
-rw-r--r--sys/dev/pci/drm/i915/intel_hdmi.c1156
-rw-r--r--sys/dev/pci/drm/i915/intel_i2c.c220
-rw-r--r--sys/dev/pci/drm/i915/intel_lvds.c1207
-rw-r--r--sys/dev/pci/drm/i915/intel_modes.c128
-rw-r--r--sys/dev/pci/drm/i915/intel_opregion.c570
-rw-r--r--sys/dev/pci/drm/i915/intel_overlay.c1631
-rw-r--r--sys/dev/pci/drm/i915/intel_panel.c516
-rw-r--r--sys/dev/pci/drm/i915/intel_pm.c4752
-rw-r--r--sys/dev/pci/drm/i915/intel_ringbuffer.c2007
-rw-r--r--sys/dev/pci/drm/i915/intel_ringbuffer.h242
-rw-r--r--sys/dev/pci/drm/i915/intel_sdvo.c3047
-rw-r--r--sys/dev/pci/drm/i915/intel_sdvo_regs.h731
-rw-r--r--sys/dev/pci/drm/i915/intel_sprite.c767
-rw-r--r--sys/dev/pci/drm/i915/intel_tv.c1708
-rw-r--r--sys/dev/pci/drm/i915_drm.h26
-rw-r--r--sys/dev/pci/drm/i915_drv.c5434
-rw-r--r--sys/dev/pci/drm/i915_drv.h765
-rw-r--r--sys/dev/pci/drm/i915_irq.c380
-rw-r--r--sys/dev/pci/drm/i915_suspend.c927
-rw-r--r--sys/dev/pci/drm/radeon_drv.c7
-rw-r--r--sys/dev/pci/drm/radeon_irq.c4
70 files changed, 70138 insertions, 7960 deletions
diff --git a/sys/dev/pci/drm/drmP.h b/sys/dev/pci/drm/drmP.h
index 08261e61b4c..7c4035dad30 100644
--- a/sys/dev/pci/drm/drmP.h
+++ b/sys/dev/pci/drm/drmP.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: drmP.h,v 1.133 2012/12/06 15:05:21 mpi Exp $ */
+/* $OpenBSD: drmP.h,v 1.134 2013/03/18 12:36:51 jsg Exp $ */
/* drmP.h -- Private header for Direct Rendering Manager -*- linux-c -*-
* Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
*/
@@ -70,10 +70,15 @@
#include <machine/param.h>
#include <machine/bus.h>
+#include "drm_linux_list.h"
#include "drm.h"
#include "drm_atomic.h"
#include "agp.h"
+#define DRM_DEBUGBITS_DEBUG 0x1
+#define DRM_DEBUGBITS_KMS 0x2
+#define DRM_DEBUGBITS_FAILED_IOCTL 0x4
+
#define __OS_HAS_AGP (NAGP > 0)
#if BYTE_ORDER == BIG_ENDIAN
@@ -110,14 +115,81 @@
#define DRM_MTRR_WC MDF_WRITECOMBINE
#define PAGE_ALIGN(addr) (((addr) + PAGE_MASK) & ~PAGE_MASK)
+#define roundup2(x, y) (((x)+((y)-1))&(~((y)-1))) /* if y is powers of two */
+#define jiffies_to_msecs(x) (((int64_t)(x)) * 1000 / hz)
+#define msecs_to_jiffies(x) (((int64_t)(x)) * hz / 1000)
+#define time_after(a,b) ((long)(b) - (long)(a) < 0)
+#define time_after_eq(a,b) ((long)(b) - (long)(a) <= 0)
+#define drm_msleep(x, msg) delay(x * 1000)
extern struct cfdriver drm_cd;
+/* linux compat */
typedef u_int64_t u64;
typedef u_int32_t u32;
typedef u_int16_t u16;
typedef u_int8_t u8;
+typedef int64_t s64;
+
+typedef uint16_t __le16;
+
+#define EXPORT_SYMBOL(x)
+#define ARRAY_SIZE nitems
+
+#define unlikely(x) __builtin_expect(!!(x), 0)
+
+#define BUG() \
+do { \
+ panic("BUG at %s:%d", __FILE__, __LINE__); \
+} while (0)
+
+#define BUG_ON(x) KASSERT(!(x))
+
+#define WARN(condition, fmt...) ({ \
+ if (condition) \
+ printf(fmt); \
+ (condition); \
+})
+
+#define _WARN_STR(x) #x
+
+#define WARN_ON(condition) ({ \
+ if (condition) \
+ printf("WARNING %s failed at %s:%d\n", \
+ _WARN_STR(condition), __FILE__, __LINE__); \
+ (condition); \
+})
+
+#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-ELAST)
+
+static inline void *
+ERR_PTR(long error)
+{
+ return (void *) error;
+}
+
+static inline long
+IS_ERR(const void *ptr)
+{
+ return IS_ERR_VALUE((unsigned long)ptr);
+}
+
+static inline long
+IS_ERR_OR_NULL(const void *ptr)
+{
+ return !ptr || IS_ERR_VALUE((unsigned long)ptr);
+}
+
+#define container_of(ptr, type, member) ({ \
+ __typeof( ((type *)0)->member ) *__mptr = (ptr); \
+ (type *)( (char *)__mptr - offsetof(type,member) );})
+
+#ifndef __DECONST
+#define __DECONST(type, var) ((type)(__uintptr_t)(const void *)(var))
+#endif
+
+
/* DRM_READMEMORYBARRIER() prevents reordering of reads.
* DRM_WRITEMEMORYBARRIER() prevents reordering of writes.
* DRM_MEMORYBARRIER() prevents reordering of reads and writes.
@@ -190,6 +262,30 @@ do { \
#define DRM_DEBUG(fmt, arg...) do { } while(/* CONSTCOND */ 0)
#endif
+#ifdef DRMDEBUG
+#undef DRM_DEBUG_KMS
+#define DRM_DEBUG_KMS(fmt, arg...) do { \
+ if (drm_debug_flag) \
+ printf("[" DRM_NAME ":pid%d:%s] " fmt, curproc->p_pid, \
+ __func__ , ## arg); \
+} while (0)
+#else
+#define DRM_DEBUG_KMS(fmt, arg...) do { } while(/* CONSTCOND */ 0)
+#endif
+
+#define DRM_LOG_KMS DRM_DEBUG_KMS
+
+#ifdef DRMDEBUG
+#undef DRM_DEBUG_DRIVER
+#define DRM_DEBUG_DRIVER(fmt, arg...) do { \
+ if (drm_debug_flag) \
+ printf("[" DRM_NAME ":pid%d:%s] " fmt, curproc->p_pid, \
+ __func__ , ## arg); \
+} while (0)
+#else
+#define DRM_DEBUG_DRIVER(fmt, arg...) do { } while(/* CONSTCOND */ 0)
+#endif
+
struct drm_pcidev {
int vendor;
int device;
@@ -232,11 +328,13 @@ struct drm_pending_event {
TAILQ_ENTRY(drm_pending_event) link;
struct drm_event *event;
struct drm_file *file_priv;
+ pid_t pid;
void (*destroy)(struct drm_pending_event *);
};
struct drm_pending_vblank_event {
struct drm_pending_event base;
+ int pipe;
struct drm_event_vblank event;
};
@@ -257,6 +355,8 @@ struct drm_file {
int master;
int minor;
u_int obj_id; /*next gem id*/
+ struct list_head fbs;
+ void *driver_priv;
};
struct drm_lock_data {
@@ -329,21 +429,6 @@ struct drm_local_map {
enum drm_map_type type; /* Type of memory mapped */
};
-struct drm_vblank_info {
- struct mutex vb_lock; /* VBLANK data lock */
- struct timeout vb_disable_timer; /* timer for disable */
- int vb_num; /* number of crtcs */
- u_int32_t vb_max; /* counter reg size */
- struct drm_vblank {
- struct drmevlist vbl_events; /* vblank events */
- u_int32_t vbl_last; /* Last received */
- u_int32_t vbl_count; /* interrupt no. */
- int vbl_refs; /* Number of users */
- int vbl_enabled; /* Enabled? */
- int vbl_inmodeset; /* in a modeset? */
- } vb_crtcs[1];
-};
-
/* Heap implementation for radeon and i915 legacy */
TAILQ_HEAD(drm_heap, drm_mem);
@@ -429,6 +514,27 @@ struct drm_handle {
uint32_t handle;
};
+struct drm_mode_handle {
+ SPLAY_ENTRY(drm_mode_handle) entry;
+ struct drm_mode_object *obj;
+ uint32_t handle;
+};
+
+/* Size of ringbuffer for vblank timestamps. Just double-buffer
+ * in initial implementation.
+ */
+#define DRM_VBLANKTIME_RBSIZE 2
+
+/* Flags and return codes for get_vblank_timestamp() driver function. */
+#define DRM_CALLED_FROM_VBLIRQ 1
+#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0)
+#define DRM_VBLANKTIME_INVBL (1 << 1)
+
+/* get_scanout_position() return flags */
+#define DRM_SCANOUTPOS_VALID (1 << 0)
+#define DRM_SCANOUTPOS_INVBL (1 << 1)
+#define DRM_SCANOUTPOS_ACCURATE (1 << 2)
+
struct drm_driver_info {
int (*firstopen)(struct drm_device *);
int (*open)(struct drm_device *, struct drm_file *);
@@ -438,12 +544,19 @@ struct drm_driver_info {
void (*lastclose)(struct drm_device *);
int (*dma_ioctl)(struct drm_device *, struct drm_dma *,
struct drm_file *);
+ int (*irq_handler)(void *);
+ void (*irq_preinstall) (struct drm_device *);
int (*irq_install)(struct drm_device *);
+ int (*irq_postinstall) (struct drm_device *);
void (*irq_uninstall)(struct drm_device *);
int vblank_pipes;
u_int32_t (*get_vblank_counter)(struct drm_device *, int);
int (*enable_vblank)(struct drm_device *, int);
void (*disable_vblank)(struct drm_device *, int);
+ int (*get_scanout_position)(struct drm_device *, int, int *, int *);
+ int (*get_vblank_timestamp)(struct drm_device *, int, int *,
+ struct timeval *, unsigned);;
+
/*
* driver-specific constructor for gem objects to set up private data.
* returns 0 on success.
@@ -453,6 +566,13 @@ struct drm_driver_info {
int (*gem_fault)(struct drm_obj *, struct uvm_faultinfo *, off_t,
vaddr_t, vm_page_t *, int, int, vm_prot_t, int);
+ int (*dumb_create)(struct drm_file *file_priv,
+ struct drm_device *dev, struct drm_mode_create_dumb *args);
+ int (*dumb_map_offset)(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle, uint64_t *offset);
+ int (*dumb_destroy)(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle);
+
size_t gem_size;
size_t buf_priv_size;
size_t file_priv_size;
@@ -472,17 +592,40 @@ struct drm_driver_info {
#define DRIVER_SG 0x20
#define DRIVER_IRQ 0x40
#define DRIVER_GEM 0x80
+#define DRIVER_MODESET 0x100
u_int flags;
};
+#include "drm_crtc.h"
+
+/* mode specified on the command line */
+struct drm_cmdline_mode {
+ bool specified;
+ bool refresh_specified;
+ bool bpp_specified;
+ int xres, yres;
+ int bpp;
+ int refresh;
+ bool rb;
+ bool interlace;
+ bool cvt;
+ bool margins;
+ enum drm_connector_force force;
+};
+
/**
* DRM device functions structure
*/
struct drm_device {
struct device device; /* softc is an extension of struct device */
- const struct drm_driver_info *driver;
+ struct drm_driver_info *driver;
+
+ u_int16_t pci_device;
+ u_int16_t pci_vendor;
+ u_int16_t pci_subdevice;
+ u_int16_t pci_subvendor;
bus_dma_tag_t dmat;
bus_space_tag_t bst;
@@ -516,9 +659,27 @@ struct drm_device {
int irq_enabled; /* True if the irq handler is enabled */
/* VBLANK support */
- struct drm_vblank_info *vblank; /* One per ctrc */
+ struct drmevlist vbl_events; /* vblank events */
+ int vblank_disable_allowed;
+ /**< size of vblank counter register */
+ uint32_t max_vblank_count;
struct mutex event_lock;
+ int *vbl_queue;
+ atomic_t *_vblank_count;
+ struct timeval *_vblank_time;
+ struct mutex vblank_time_lock;
+ struct mutex vbl_lock;
+ atomic_t *vblank_refcount;
+ uint32_t *last_vblank;
+
+ int *vblank_enabled;
+ int *vblank_inmodeset;
+ u32 *last_vblank_wait;
+ struct timeout vblank_disable_timer;
+
+ int num_crtcs;
+
pid_t buf_pgid;
struct drm_agp_head *agp;
@@ -527,6 +688,8 @@ struct drm_device {
void *dev_private;
struct drm_local_map *agp_buffer_map;
+ struct drm_mode_config mode_config; /* Current mode config */
+
/* GEM info */
struct mutex obj_name_lock;
atomic_t obj_count;
@@ -537,27 +700,68 @@ struct drm_device {
atomic_t gtt_count;
atomic_t gtt_memory;
uint32_t gtt_total;
- uint32_t invalidate_domains;
- uint32_t flush_domains;
SPLAY_HEAD(drm_name_tree, drm_obj) name_tree;
struct pool objpl;
+
+ /* mode stuff */
};
struct drm_attach_args {
- const struct drm_driver_info *driver;
+ struct drm_driver_info *driver;
char *busid;
bus_dma_tag_t dmat;
bus_space_tag_t bst;
size_t busid_len;
int is_agp;
u_int8_t irq;
+ u_int16_t pci_vendor;
+ u_int16_t pci_device;
+ u_int16_t pci_subvendor;
+ u_int16_t pci_subdevice;
};
extern int drm_debug_flag;
+enum dmi_field {
+ DMI_NONE,
+ DMI_BIOS_VENDOR,
+ DMI_BIOS_VERSION,
+ DMI_BIOS_DATE,
+ DMI_SYS_VENDOR,
+ DMI_PRODUCT_NAME,
+ DMI_PRODUCT_VERSION,
+ DMI_PRODUCT_SERIAL,
+ DMI_PRODUCT_UUID,
+ DMI_BOARD_VENDOR,
+ DMI_BOARD_NAME,
+ DMI_BOARD_VERSION,
+ DMI_BOARD_SERIAL,
+ DMI_BOARD_ASSET_TAG,
+ DMI_CHASSIS_VENDOR,
+ DMI_CHASSIS_TYPE,
+ DMI_CHASSIS_VERSION,
+ DMI_CHASSIS_SERIAL,
+ DMI_CHASSIS_ASSET_TAG,
+ DMI_STRING_MAX,
+};
+
+struct dmi_strmatch {
+ unsigned char slot;
+ char substr[79];
+};
+
+struct dmi_system_id {
+ int (*callback)(const struct dmi_system_id *);
+ const char *ident;
+ struct dmi_strmatch matches[4];
+};
+#define DMI_MATCH(a, b) {(a), (b)}
+int dmi_check_system(const struct dmi_system_id *);
+
+
/* Device setup support (drm_drv.c) */
int drm_pciprobe(struct pci_attach_args *, const struct drm_pcidev * );
-struct device *drm_attach_pci(const struct drm_driver_info *,
+struct device *drm_attach_pci(struct drm_driver_info *,
struct pci_attach_args *, int, struct device *);
dev_type_ioctl(drmioctl);
dev_type_read(drmread);
@@ -635,8 +839,14 @@ int drm_vblank_init(struct drm_device *, int);
u_int32_t drm_vblank_count(struct drm_device *, int);
int drm_vblank_get(struct drm_device *, int);
void drm_vblank_put(struct drm_device *, int);
+void drm_vblank_off(struct drm_device *, int);
+void drm_vblank_pre_modeset(struct drm_device *, int);
+void drm_vblank_post_modeset(struct drm_device *, int);
int drm_modeset_ctl(struct drm_device *, void *, struct drm_file *);
-void drm_handle_vblank(struct drm_device *, int);
+bool drm_handle_vblank(struct drm_device *, int);
+void drm_calc_timestamping_constants(struct drm_crtc *);
+int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *,
+ int, int *, struct timeval *, unsigned, struct drm_crtc *);
/* AGP/PCI Express/GART support (drm_agpsupport.c) */
struct drm_agp_head *drm_agp_init(void);
@@ -678,6 +888,8 @@ int drm_rmctx(struct drm_device *, void *, struct drm_file *);
int drm_control(struct drm_device *, void *, struct drm_file *);
int drm_wait_vblank(struct drm_device *, void *, struct drm_file *);
int drm_irq_by_busid(struct drm_device *, void *, struct drm_file *);
+void drm_send_vblank_event(struct drm_device *, int,
+ struct drm_pending_vblank_event *);
/* AGP/GART support (drm_agpsupport.c) */
int drm_agp_acquire_ioctl(struct drm_device *, void *, struct drm_file *);
@@ -725,6 +937,16 @@ drm_gem_object_unreference(struct drm_obj *obj)
drm_unref(&obj->uobj);
}
+static __inline void
+drm_gem_object_unreference_unlocked(struct drm_obj *obj)
+{
+ struct drm_device *dev = obj->dev;
+
+ DRM_LOCK();
+ drm_unref(&obj->uobj);
+ DRM_UNLOCK();
+}
+
static __inline void
drm_lock_obj(struct drm_obj *obj)
{
@@ -736,6 +958,13 @@ drm_unlock_obj(struct drm_obj *obj)
{
simple_unlock(&obj->uobj);
}
+
+static __inline__ int drm_core_check_feature(struct drm_device *dev,
+ int feature)
+{
+ return ((dev->driver->flags & feature) ? 1 : 0);
+}
+
#ifdef DRMLOCKDEBUG
#define DRM_ASSERT_HELD(obj) \
diff --git a/sys/dev/pci/drm/drm_atomic.h b/sys/dev/pci/drm/drm_atomic.h
index 0a9df51de2e..111d7b50672 100644
--- a/sys/dev/pci/drm/drm_atomic.h
+++ b/sys/dev/pci/drm/drm_atomic.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: drm_atomic.h,v 1.6 2011/06/02 18:22:00 weerd Exp $ */
+/* $OpenBSD: drm_atomic.h,v 1.7 2013/03/18 12:36:51 jsg Exp $ */
/**
* \file drm_atomic.h
* Atomic operations used in the DRM which may or may not be provided by the OS.
@@ -52,6 +52,24 @@ typedef u_int32_t atomic_t;
#define atomic_inc(p) (*(p) += 1)
#define atomic_dec(p) (*(p) -= 1)
#define atomic_add(n, p) (*(p) += (n))
+
+static inline int
+atomic_add_return(int n, atomic_t *p)
+{
+ *(p) += (n);
+ return (*p);
+}
+
+static inline int
+atomic_inc_not_zero(atomic_t *p)
+{
+ if (*p == 0)
+ return (0);
+
+ *(p) += 1;
+ return (*p);
+}
+
#define atomic_sub(n, p) (*(p) -= (n))
/* FIXME */
#define atomic_add_int(p, v) *(p) += v
diff --git a/sys/dev/pci/drm/drm_crtc.c b/sys/dev/pci/drm/drm_crtc.c
new file mode 100644
index 00000000000..5178413ceb3
--- /dev/null
+++ b/sys/dev/pci/drm/drm_crtc.c
@@ -0,0 +1,4112 @@
+/* $OpenBSD: drm_crtc.c,v 1.1 2013/03/18 12:36:51 jsg Exp $ */
+/*
+ * Copyright (c) 2006-2008 Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2008 Red Hat Inc.
+ *
+ * DRM core CRTC related functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ * Keith Packard
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_edid.h"
+#include "drm_fourcc.h"
+
+/* Avoid boilerplate. I'm tired of typing. */
+#define DRM_ENUM_NAME_FN(fnname, list) \
+ char *fnname(int); \
+ \
+ char *fnname(int val) \
+ { \
+ int i; \
+ for (i = 0; i < ARRAY_SIZE(list); i++) { \
+ if (list[i].type == val) \
+ return list[i].name; \
+ } \
+ return "(unknown)"; \
+ }
+
+char *drm_get_connector_status_name(enum drm_connector_status);
+int drm_mode_group_init(struct drm_device *, struct drm_mode_group *);
+void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *,
+ const struct drm_display_mode *);
+int drm_mode_handle_cmp(struct drm_mode_handle *,
+ struct drm_mode_handle *);
+int drm_crtc_convert_umode(struct drm_display_mode *,
+ const struct drm_mode_modeinfo *);
+int drm_mode_object_get(struct drm_device *, struct drm_mode_object *,
+ uint32_t);
+void drm_mode_object_put(struct drm_device *, struct drm_mode_object *);
+int drm_mode_create_standard_connector_properties(struct drm_device *);
+void drm_mode_attachmode(struct drm_device *, struct drm_connector *,
+ struct drm_display_mode *);
+int drm_mode_detachmode(struct drm_device *, struct drm_connector *,
+ struct drm_display_mode *);
+struct drm_property_blob *drm_property_create_blob(struct drm_device *,
+ int, void *);
+void drm_property_destroy_blob(struct drm_device *,
+ struct drm_property_blob *);
+int format_check(const struct drm_mode_fb_cmd2 *);
+int framebuffer_check(const struct drm_mode_fb_cmd2 *);
+bool drm_property_change_is_valid(struct drm_property *, uint64_t);
+int drm_mode_connector_set_obj_prop(struct drm_mode_object *,
+ struct drm_property *, uint64_t);
+int drm_mode_crtc_set_obj_prop(struct drm_mode_object *,
+ struct drm_property *, uint64_t);
+int drm_mode_plane_set_obj_prop(struct drm_mode_object *,
+ struct drm_property *, uint64_t);
+void drm_framebuffer_free(struct drm_framebuffer *);
+
+SPLAY_PROTOTYPE(drm_mode_tree, drm_mode_handle, entry, drm_mode_handle_cmp);
+
+/*
+ * Global properties
+ */
+static struct drm_prop_enum_list drm_dpms_enum_list[] =
+{ { DRM_MODE_DPMS_ON, "On" },
+ { DRM_MODE_DPMS_STANDBY, "Standby" },
+ { DRM_MODE_DPMS_SUSPEND, "Suspend" },
+ { DRM_MODE_DPMS_OFF, "Off" }
+};
+
+DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
+
+/*
+ * Optional properties
+ */
+static struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
+{
+ { DRM_MODE_SCALE_NONE, "None" },
+ { DRM_MODE_SCALE_FULLSCREEN, "Full" },
+ { DRM_MODE_SCALE_CENTER, "Center" },
+ { DRM_MODE_SCALE_ASPECT, "Full aspect" },
+};
+
+static struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
+{
+ { DRM_MODE_DITHERING_OFF, "Off" },
+ { DRM_MODE_DITHERING_ON, "On" },
+ { DRM_MODE_DITHERING_AUTO, "Automatic" },
+};
+
+/*
+ * Non-global properties, but "required" for certain connectors.
+ */
+static struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
+{
+ { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
+ { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
+ { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */
+};
+
+DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
+
+static struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
+{
+ { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
+ { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
+ { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */
+};
+
+DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
+ drm_dvi_i_subconnector_enum_list)
+
+static struct drm_prop_enum_list drm_tv_select_enum_list[] =
+{
+ { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
+ { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
+ { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
+ { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
+ { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */
+};
+
+DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
+
+static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
+{
+ { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
+ { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
+ { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
+ { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
+ { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */
+};
+
+DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
+ drm_tv_subconnector_enum_list)
+
+static struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
+ { DRM_MODE_DIRTY_OFF, "Off" },
+ { DRM_MODE_DIRTY_ON, "On" },
+ { DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
+};
+
+DRM_ENUM_NAME_FN(drm_get_dirty_info_name,
+ drm_dirty_info_enum_list)
+
+struct drm_conn_prop_enum_list {
+ int type;
+ char *name;
+ int count;
+};
+
+/*
+ * Connector and encoder types.
+ */
+static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
+{ { DRM_MODE_CONNECTOR_Unknown, "Unknown", 0 },
+ { DRM_MODE_CONNECTOR_VGA, "VGA", 0 },
+ { DRM_MODE_CONNECTOR_DVII, "DVI-I", 0 },
+ { DRM_MODE_CONNECTOR_DVID, "DVI-D", 0 },
+ { DRM_MODE_CONNECTOR_DVIA, "DVI-A", 0 },
+ { DRM_MODE_CONNECTOR_Composite, "Composite", 0 },
+ { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 },
+ { DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 },
+ { DRM_MODE_CONNECTOR_Component, "Component", 0 },
+ { DRM_MODE_CONNECTOR_9PinDIN, "DIN", 0 },
+ { DRM_MODE_CONNECTOR_DisplayPort, "DP", 0 },
+ { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A", 0 },
+ { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 },
+ { DRM_MODE_CONNECTOR_TV, "TV", 0 },
+ { DRM_MODE_CONNECTOR_eDP, "eDP", 0 },
+ { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual", 0},
+};
+
+static struct drm_prop_enum_list drm_encoder_enum_list[] =
+{ { DRM_MODE_ENCODER_NONE, "None" },
+ { DRM_MODE_ENCODER_DAC, "DAC" },
+ { DRM_MODE_ENCODER_TMDS, "TMDS" },
+ { DRM_MODE_ENCODER_LVDS, "LVDS" },
+ { DRM_MODE_ENCODER_TVDAC, "TV" },
+ { DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
+};
+
+char *drm_get_encoder_name(struct drm_encoder *encoder)
+{
+ static char buf[32];
+
+ snprintf(buf, 32, "%s-%d",
+ drm_encoder_enum_list[encoder->encoder_type].name,
+ encoder->base.id);
+ return buf;
+}
+EXPORT_SYMBOL(drm_get_encoder_name);
+
+char *drm_get_connector_name(struct drm_connector *connector)
+{
+ static char buf[32];
+
+ snprintf(buf, 32, "%s-%d",
+ drm_connector_enum_list[connector->connector_type].name,
+ connector->connector_type_id);
+ return buf;
+}
+EXPORT_SYMBOL(drm_get_connector_name);
+
+char *drm_get_connector_status_name(enum drm_connector_status status)
+{
+ if (status == connector_status_connected)
+ return "connected";
+ else if (status == connector_status_disconnected)
+ return "disconnected";
+ else
+ return "unknown";
+}
+
+/**
+ * drm_mode_object_get - allocate a new identifier
+ * @dev: DRM device
+ * @ptr: object pointer, used to generate unique ID
+ * @type: object type
+ *
+ * LOCKING:
+ *
+ * Create a unique identifier based on @ptr in @dev's identifier space. Used
+ * for tracking modes, CRTCs and connectors.
+ *
+ * RETURNS:
+ * New unique (relative to other objects in @dev) integer identifier for the
+ * object.
+ */
+int
+drm_mode_object_get(struct drm_device *dev,
+ struct drm_mode_object *obj, uint32_t obj_type)
+{
+ struct drm_mode_handle *han;
+ int new_id = 0;
+
+ if ((han = drm_calloc(1, sizeof(*han))) == NULL)
+ return ENOMEM;
+ han->obj = obj;
+ rw_enter_write(&dev->mode_config.idr_rwl);
+
+again:
+ new_id = han->handle = ++dev->mode_config.mode_obj_id;
+ /*
+ * Make sure we have no duplicates. this'll hurt once we wrap, 0 is
+ * reserved.
+ */
+ obj->id = new_id;
+ obj->type = obj_type;
+ if (han->handle == 0 || SPLAY_INSERT(drm_mode_tree,
+ &dev->mode_config.mode_tree, han))
+ goto again;
+ rw_exit_write(&dev->mode_config.idr_rwl);
+ return 0;
+}
+
+/**
+ * drm_mode_object_put - free an identifer
+ * @dev: DRM device
+ * @id: ID to free
+ *
+ * LOCKING:
+ * Caller must hold DRM mode_config lock.
+ *
+ * Free @id from @dev's unique identifier pool.
+ */
+void
+drm_mode_object_put(struct drm_device *dev,
+ struct drm_mode_object *object)
+{
+ struct drm_mode_handle han;
+ han.obj = object;
+ han.handle = object->id;
+
+ rw_enter_write(&dev->mode_config.idr_rwl);
+ SPLAY_REMOVE(drm_mode_tree, &dev->mode_config.mode_tree, &han);
+ rw_exit_write(&dev->mode_config.idr_rwl);
+}
+
+struct drm_mode_object *
+drm_mode_object_find(struct drm_device *dev,
+ uint32_t id, uint32_t type)
+{
+ struct drm_mode_object *obj = NULL;
+ struct drm_mode_handle *han, search;
+
+ rw_enter_write(&dev->mode_config.idr_rwl);
+ search.handle = id;
+ han = SPLAY_FIND(drm_mode_tree, &dev->mode_config.mode_tree, &search);
+ if (han == NULL) {
+ rw_exit_write(&dev->mode_config.idr_rwl);
+ return NULL;
+ }
+
+ obj = han->obj;
+ if (obj->type != type) {
+ return NULL;
+ }
+ rw_exit_write(&dev->mode_config.idr_rwl);
+
+ return obj;
+}
+EXPORT_SYMBOL(drm_mode_object_find);
+
+/**
+ * drm_framebuffer_init - initialize a framebuffer
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Allocates an ID for the framebuffer's parent mode object, sets its mode
+ * functions & device file and adds it to the master fd list.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure.
+ */
+int
+drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
+ const struct drm_framebuffer_funcs *funcs)
+{
+ int ret;
+
+ fb->refcount = 0;
+
+ ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
+ if (ret)
+ return ret;
+
+ fb->dev = dev;
+ fb->funcs = funcs;
+ dev->mode_config.num_fb++;
+ list_add(&fb->head, &dev->mode_config.fb_list);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_framebuffer_init);
+
+void
+drm_framebuffer_free(struct drm_framebuffer *fb)
+{
+ fb->funcs->destroy(fb);
+}
+
+/**
+ * drm_framebuffer_unreference - unref a framebuffer
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ */
+void
+drm_framebuffer_unreference(struct drm_framebuffer *fb)
+{
+// struct drm_device *dev = fb->dev;
+ DRM_DEBUG("FB ID: %d\n", fb->base.id);
+#ifdef notyet
+ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+#endif
+ if (--fb->refcount == 0)
+ drm_framebuffer_free(fb);
+}
+EXPORT_SYMBOL(drm_framebuffer_unreference);
+
+/**
+ * drm_framebuffer_reference - incr the fb refcnt
+ */
+void
+drm_framebuffer_reference(struct drm_framebuffer *fb)
+{
+ DRM_DEBUG("FB ID: %d\n", fb->base.id);
+ fb->refcount++;
+}
+EXPORT_SYMBOL(drm_framebuffer_reference);
+
+/**
+ * drm_framebuffer_cleanup - remove a framebuffer object
+ * @fb: framebuffer to remove
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Scans all the CRTCs in @dev's mode_config. If they're using @fb, removes
+ * it, setting it to NULL.
+ */
+void
+drm_framebuffer_cleanup(struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = fb->dev;
+ /*
+ * This could be moved to drm_framebuffer_remove(), but for
+ * debugging is nice to keep around the list of fb's that are
+ * no longer associated w/ a drm_file but are not unreferenced
+ * yet. (i915 and omapdrm have debugfs files which will show
+ * this.)
+ */
+ drm_mode_object_put(dev, &fb->base);
+ list_del(&fb->head);
+ dev->mode_config.num_fb--;
+}
+EXPORT_SYMBOL(drm_framebuffer_cleanup);
+
+/**
+ * drm_framebuffer_remove - remove and unreference a framebuffer object
+ * @fb: framebuffer to remove
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Scans all the CRTCs and planes in @dev's mode_config. If they're
+ * using @fb, removes it, setting it to NULL.
+ */
+void
+drm_framebuffer_remove(struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = fb->dev;
+ struct drm_crtc *crtc;
+ struct drm_plane *plane;
+ struct drm_mode_set set;
+ int ret;
+
+ /* remove from any CRTC */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->fb == fb) {
+ /* should turn off the crtc */
+ memset(&set, 0, sizeof(struct drm_mode_set));
+ set.crtc = crtc;
+ set.fb = NULL;
+ ret = crtc->funcs->set_config(&set);
+ if (ret)
+ DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
+ }
+ }
+
+ list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+ if (plane->fb == fb) {
+ /* should turn off the crtc */
+ ret = plane->funcs->disable_plane(plane);
+ if (ret)
+ DRM_ERROR("failed to disable plane with busy fb\n");
+ /* disconnect the plane from the fb and crtc: */
+ plane->fb = NULL;
+ plane->crtc = NULL;
+ }
+ }
+
+ list_del(&fb->filp_head);
+
+ drm_framebuffer_unreference(fb);
+}
+EXPORT_SYMBOL(drm_framebuffer_remove);
+
+/**
+ * drm_crtc_init - Initialise a new CRTC object
+ * @dev: DRM device
+ * @crtc: CRTC object to init
+ * @funcs: callbacks for the new CRTC
+ *
+ * LOCKING:
+ * Takes mode_config lock.
+ *
+ * Inits a new object created as base part of an driver crtc object.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure.
+ */
+int
+drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+ const struct drm_crtc_funcs *funcs)
+{
+ int ret;
+
+ crtc->dev = dev;
+ crtc->funcs = funcs;
+ crtc->invert_dimensions = false;
+
+ rw_enter_write(&dev->mode_config.rwl);
+
+ ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
+ if (ret)
+ goto out;
+
+ crtc->base.properties = &crtc->properties;
+
+ list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
+ dev->mode_config.num_crtc++;
+
+ out:
+ rw_exit_write(&dev->mode_config.rwl);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_crtc_init);
+
+/**
+ * drm_crtc_cleanup - Cleans up the core crtc usage.
+ * @crtc: CRTC to cleanup
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Cleanup @crtc. Removes from drm modesetting space
+ * does NOT free object, caller does that.
+ */
+void
+drm_crtc_cleanup(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+
+ if (crtc->gamma_store) {
+ free(crtc->gamma_store, M_DRM);
+ crtc->gamma_store = NULL;
+ }
+
+ drm_mode_object_put(dev, &crtc->base);
+ list_del(&crtc->head);
+ dev->mode_config.num_crtc--;
+}
+EXPORT_SYMBOL(drm_crtc_cleanup);
+
+/**
+ * drm_mode_probed_add - add a mode to a connector's probed mode list
+ * @connector: connector the new mode
+ * @mode: mode data
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Add @mode to @connector's mode list for later use.
+ */
+void
+drm_mode_probed_add(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ list_add(&mode->head, &connector->probed_modes);
+}
+EXPORT_SYMBOL(drm_mode_probed_add);
+
+/**
+ * drm_mode_remove - remove and free a mode
+ * @connector: connector list to modify
+ * @mode: mode to remove
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Remove @mode from @connector's mode list, then free it.
+ */
+void
+drm_mode_remove(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ list_del(&mode->head);
+ drm_mode_destroy(connector->dev, mode);
+}
+EXPORT_SYMBOL(drm_mode_remove);
+
+/**
+ * drm_connector_init - Init a preallocated connector
+ * @dev: DRM device
+ * @connector: the connector to init
+ * @funcs: callbacks for this connector
+ * @name: user visible name of the connector
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Initialises a preallocated connector. Connectors should be
+ * subclassed as part of driver connector objects.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure.
+ */
+int
+drm_connector_init(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type)
+{
+ int ret;
+
+ rw_enter_write(&dev->mode_config.rwl);
+
+ ret = drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
+ if (ret)
+ goto out;
+
+ connector->base.properties = &connector->properties;
+ connector->dev = dev;
+ connector->funcs = funcs;
+ connector->connector_type = connector_type;
+ connector->connector_type_id =
+ ++drm_connector_enum_list[connector_type].count; /* TODO */
+ INIT_LIST_HEAD(&connector->user_modes);
+ INIT_LIST_HEAD(&connector->probed_modes);
+ INIT_LIST_HEAD(&connector->modes);
+ connector->edid_blob_ptr = NULL;
+ connector->status = connector_status_unknown;
+
+ list_add_tail(&connector->head, &dev->mode_config.connector_list);
+ dev->mode_config.num_connector++;
+
+ if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.edid_property,
+ 0);
+
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.dpms_property, 0);
+
+ out:
+ rw_exit_write(&dev->mode_config.rwl);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_connector_init);
+
+/**
+ * drm_connector_cleanup - cleans up an initialised connector
+ * @connector: connector to cleanup
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Cleans up the connector but doesn't free the object.
+ */
+void
+drm_connector_cleanup(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode, *t;
+
+ list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
+ drm_mode_remove(connector, mode);
+
+ list_for_each_entry_safe(mode, t, &connector->modes, head)
+ drm_mode_remove(connector, mode);
+
+ list_for_each_entry_safe(mode, t, &connector->user_modes, head)
+ drm_mode_remove(connector, mode);
+
+ rw_enter_write(&dev->mode_config.rwl);
+ drm_mode_object_put(dev, &connector->base);
+ list_del(&connector->head);
+ dev->mode_config.num_connector--;
+ rw_exit_write(&dev->mode_config.rwl);
+}
+EXPORT_SYMBOL(drm_connector_cleanup);
+
+void
+drm_connector_unplug_all(struct drm_device *dev)
+{
+#ifdef notyet
+ struct drm_connector *connector;
+
+ /* taking the mode config mutex ends up in a clash with sysfs */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ drm_sysfs_connector_remove(connector);
+#endif
+}
+EXPORT_SYMBOL(drm_connector_unplug_all);
+
+int
+drm_encoder_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ const struct drm_encoder_funcs *funcs,
+ int encoder_type)
+{
+ int ret;
+
+ rw_enter_write(&dev->mode_config.rwl);
+
+ ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
+ if (ret)
+ goto out;
+
+ encoder->dev = dev;
+ encoder->encoder_type = encoder_type;
+ encoder->funcs = funcs;
+
+ list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
+ dev->mode_config.num_encoder++;
+
+ out:
+ rw_exit_write(&dev->mode_config.rwl);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_encoder_init);
+
+void
+drm_encoder_cleanup(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ rw_enter_write(&dev->mode_config.rwl);
+ drm_mode_object_put(dev, &encoder->base);
+ list_del(&encoder->head);
+ dev->mode_config.num_encoder--;
+ rw_exit_write(&dev->mode_config.rwl);
+}
+EXPORT_SYMBOL(drm_encoder_cleanup);
+
+int
+drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
+ unsigned long possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, uint32_t format_count,
+ bool priv)
+{
+ int ret;
+
+ rw_enter_write(&dev->mode_config.rwl);
+
+ ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
+ if (ret)
+ goto out;
+
+ plane->base.properties = &plane->properties;
+ plane->dev = dev;
+ plane->funcs = funcs;
+ plane->format_types = malloc(sizeof(uint32_t) * format_count,
+ M_DRM, M_WAITOK);
+
+ if (!plane->format_types) {
+ DRM_DEBUG_KMS("out of memory when allocating plane\n");
+ drm_mode_object_put(dev, &plane->base);
+ ret = ENOMEM;
+ goto out;
+ }
+
+ memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
+ plane->format_count = format_count;
+ plane->possible_crtcs = possible_crtcs;
+
+ /* private planes are not exposed to userspace, but depending on
+ * display hardware, might be convenient to allow sharing programming
+ * for the scanout engine with the crtc implementation.
+ */
+ if (!priv) {
+ list_add_tail(&plane->head, &dev->mode_config.plane_list);
+ dev->mode_config.num_plane++;
+ } else {
+ INIT_LIST_HEAD(&plane->head);
+ }
+
+ out:
+ rw_exit_write(&dev->mode_config.rwl);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_plane_init);
+
+void
+drm_plane_cleanup(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+
+ rw_enter_write(&dev->mode_config.rwl);
+ free(plane->format_types, M_DRM);
+ drm_mode_object_put(dev, &plane->base);
+ /* if not added to a list, it must be a private plane */
+ if (!list_empty(&plane->head)) {
+ list_del(&plane->head);
+ dev->mode_config.num_plane--;
+ }
+ rw_exit_write(&dev->mode_config.rwl);
+}
+EXPORT_SYMBOL(drm_plane_cleanup);
+
+/**
+ * drm_mode_create - create a new display mode
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Caller must hold DRM mode_config lock.
+ *
+ * Create a new drm_display_mode, give it an ID, and return it.
+ *
+ * RETURNS:
+ * Pointer to new mode on success, NULL on error.
+ */
+struct drm_display_mode *
+drm_mode_create(struct drm_device *dev)
+{
+ struct drm_display_mode *nmode;
+
+ nmode = malloc(sizeof(struct drm_display_mode), M_DRM, M_WAITOK|M_ZERO);
+ if (!nmode)
+ return NULL;
+
+ if (drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE)) {
+ free(nmode, M_DRM);
+ return NULL;
+ }
+
+ return nmode;
+}
+EXPORT_SYMBOL(drm_mode_create);
+
+/**
+ * drm_mode_destroy - remove a mode
+ * @dev: DRM device
+ * @mode: mode to remove
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Free @mode's unique identifier, then free it.
+ */
+void
+drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
+{
+ if (!mode)
+ return;
+
+ drm_mode_object_put(dev, &mode->base);
+
+ free(mode, M_DRM);
+}
+EXPORT_SYMBOL(drm_mode_destroy);
+
+int
+drm_mode_create_standard_connector_properties(struct drm_device *dev)
+{
+ struct drm_property *edid;
+ struct drm_property *dpms;
+
+ /*
+ * Standard properties (apply to all connectors)
+ */
+ edid = drm_property_create(dev, DRM_MODE_PROP_BLOB |
+ DRM_MODE_PROP_IMMUTABLE,
+ "EDID", 0);
+ dev->mode_config.edid_property = edid;
+
+ dpms = drm_property_create_enum(dev, 0,
+ "DPMS", drm_dpms_enum_list,
+ ARRAY_SIZE(drm_dpms_enum_list));
+ dev->mode_config.dpms_property = dpms;
+
+ return 0;
+}
+
+/**
+ * drm_mode_create_dvi_i_properties - create DVI-I specific connector properties
+ * @dev: DRM device
+ *
+ * Called by a driver the first time a DVI-I connector is made.
+ */
+int
+drm_mode_create_dvi_i_properties(struct drm_device *dev)
+{
+ struct drm_property *dvi_i_selector;
+ struct drm_property *dvi_i_subconnector;
+
+ if (dev->mode_config.dvi_i_select_subconnector_property)
+ return 0;
+
+ dvi_i_selector =
+ drm_property_create_enum(dev, 0,
+ "select subconnector",
+ drm_dvi_i_select_enum_list,
+ ARRAY_SIZE(drm_dvi_i_select_enum_list));
+ dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector;
+
+ dvi_i_subconnector = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+ "subconnector",
+ drm_dvi_i_subconnector_enum_list,
+ ARRAY_SIZE(drm_dvi_i_subconnector_enum_list));
+ dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
+
+/**
+ * drm_create_tv_properties - create TV specific connector properties
+ * @dev: DRM device
+ * @num_modes: number of different TV formats (modes) supported
+ * @modes: array of pointers to strings containing name of each format
+ *
+ * Called by a driver's TV initialization routine, this function creates
+ * the TV specific connector properties for a given device. Caller is
+ * responsible for allocating a list of format names and passing them to
+ * this routine.
+ */
+int
+drm_mode_create_tv_properties(struct drm_device *dev, int num_modes,
+ char *modes[])
+{
+ struct drm_property *tv_selector;
+ struct drm_property *tv_subconnector;
+ int i;
+
+ if (dev->mode_config.tv_select_subconnector_property)
+ return 0;
+
+ /*
+ * Basic connector properties
+ */
+ tv_selector = drm_property_create_enum(dev, 0,
+ "select subconnector",
+ drm_tv_select_enum_list,
+ ARRAY_SIZE(drm_tv_select_enum_list));
+ dev->mode_config.tv_select_subconnector_property = tv_selector;
+
+ tv_subconnector =
+ drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+ "subconnector",
+ drm_tv_subconnector_enum_list,
+ ARRAY_SIZE(drm_tv_subconnector_enum_list));
+ dev->mode_config.tv_subconnector_property = tv_subconnector;
+
+ /*
+ * Other, TV specific properties: margins & TV modes.
+ */
+ dev->mode_config.tv_left_margin_property =
+ drm_property_create_range(dev, 0, "left margin", 0, 100);
+
+ dev->mode_config.tv_right_margin_property =
+ drm_property_create_range(dev, 0, "right margin", 0, 100);
+
+ dev->mode_config.tv_top_margin_property =
+ drm_property_create_range(dev, 0, "top margin", 0, 100);
+
+ dev->mode_config.tv_bottom_margin_property =
+ drm_property_create_range(dev, 0, "bottom margin", 0, 100);
+
+ dev->mode_config.tv_mode_property =
+ drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "mode", num_modes);
+ for (i = 0; i < num_modes; i++)
+ drm_property_add_enum(dev->mode_config.tv_mode_property, i,
+ i, modes[i]);
+
+ dev->mode_config.tv_brightness_property =
+ drm_property_create_range(dev, 0, "brightness", 0, 100);
+
+ dev->mode_config.tv_contrast_property =
+ drm_property_create_range(dev, 0, "contrast", 0, 100);
+
+ dev->mode_config.tv_flicker_reduction_property =
+ drm_property_create_range(dev, 0, "flicker reduction", 0, 100);
+
+ dev->mode_config.tv_overscan_property =
+ drm_property_create_range(dev, 0, "overscan", 0, 100);
+
+ dev->mode_config.tv_saturation_property =
+ drm_property_create_range(dev, 0, "saturation", 0, 100);
+
+ dev->mode_config.tv_hue_property =
+ drm_property_create_range(dev, 0, "hue", 0, 100);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_tv_properties);
+
+/**
+ * drm_mode_create_scaling_mode_property - create scaling mode property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int
+drm_mode_create_scaling_mode_property(struct drm_device *dev)
+{
+ struct drm_property *scaling_mode;
+
+ if (dev->mode_config.scaling_mode_property)
+ return 0;
+
+ scaling_mode =
+ drm_property_create_enum(dev, 0, "scaling mode",
+ drm_scaling_mode_enum_list,
+ ARRAY_SIZE(drm_scaling_mode_enum_list));
+
+ dev->mode_config.scaling_mode_property = scaling_mode;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
+
+/**
+ * drm_mode_create_dithering_property - create dithering property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int
+drm_mode_create_dithering_property(struct drm_device *dev)
+{
+ struct drm_property *dithering_mode;
+
+ if (dev->mode_config.dithering_mode_property)
+ return 0;
+
+ dithering_mode =
+ drm_property_create_enum(dev, 0, "dithering",
+ drm_dithering_mode_enum_list,
+ ARRAY_SIZE(drm_dithering_mode_enum_list));
+ dev->mode_config.dithering_mode_property = dithering_mode;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_dithering_property);
+
+/**
+ * drm_mode_create_dirty_property - create dirty property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int
+drm_mode_create_dirty_info_property(struct drm_device *dev)
+{
+ struct drm_property *dirty_info;
+
+ if (dev->mode_config.dirty_info_property)
+ return 0;
+
+ dirty_info =
+ drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+ "dirty",
+ drm_dirty_info_enum_list,
+ ARRAY_SIZE(drm_dirty_info_enum_list));
+ dev->mode_config.dirty_info_property = dirty_info;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
+
+/**
+ * drm_mode_config_init - initialize DRM mode_configuration structure
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * None, should happen single threaded at init time.
+ *
+ * Initialize @dev's mode_config structure, used for tracking the graphics
+ * configuration of @dev.
+ */
+void
+drm_mode_config_init(struct drm_device *dev)
+{
+ rw_init(&dev->mode_config.rwl, "mcrwl");
+ rw_init(&dev->mode_config.idr_rwl, "idrwl");
+ INIT_LIST_HEAD(&dev->mode_config.fb_list);
+ INIT_LIST_HEAD(&dev->mode_config.crtc_list);
+ INIT_LIST_HEAD(&dev->mode_config.connector_list);
+ INIT_LIST_HEAD(&dev->mode_config.encoder_list);
+ INIT_LIST_HEAD(&dev->mode_config.property_list);
+ INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
+ INIT_LIST_HEAD(&dev->mode_config.plane_list);
+ SPLAY_INIT(&dev->mode_config.mode_tree);
+
+ rw_enter_write(&dev->mode_config.rwl);
+ drm_mode_create_standard_connector_properties(dev);
+ rw_exit_write(&dev->mode_config.rwl);
+
+ /* Just to be sure */
+ dev->mode_config.num_fb = 0;
+ dev->mode_config.num_connector = 0;
+ dev->mode_config.num_crtc = 0;
+ dev->mode_config.num_encoder = 0;
+}
+EXPORT_SYMBOL(drm_mode_config_init);
+
+int
+drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
+{
+ uint32_t total_objects = 0;
+
+ total_objects += dev->mode_config.num_crtc;
+ total_objects += dev->mode_config.num_connector;
+ total_objects += dev->mode_config.num_encoder;
+
+ group->id_list = malloc(total_objects * sizeof(uint32_t),
+ M_DRM, M_WAITOK|M_ZERO);
+ if (!group->id_list)
+ return ENOMEM;
+
+ group->num_crtcs = 0;
+ group->num_connectors = 0;
+ group->num_encoders = 0;
+ return 0;
+}
+
+int
+drm_mode_group_init_legacy_group(struct drm_device *dev,
+ struct drm_mode_group *group)
+{
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int ret;
+
+ if ((ret = drm_mode_group_init(dev, group)))
+ return ret;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ group->id_list[group->num_crtcs++] = crtc->base.id;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ group->id_list[group->num_crtcs + group->num_encoders++] =
+ encoder->base.id;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ group->id_list[group->num_crtcs + group->num_encoders +
+ group->num_connectors++] = connector->base.id;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
+
+/**
+ * drm_mode_config_cleanup - free up DRM mode_config info
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Free up all the connectors and CRTCs associated with this DRM device, then
+ * free up the framebuffers and associated buffer objects.
+ *
+ * FIXME: cleanup any dangling user buffer objects too
+ */
+void
+drm_mode_config_cleanup(struct drm_device *dev)
+{
+ struct drm_connector *connector, *ot;
+ struct drm_crtc *crtc, *ct;
+ struct drm_encoder *encoder, *enct;
+ struct drm_framebuffer *fb, *fbt;
+ struct drm_property *property, *pt;
+ struct drm_plane *plane, *plt;
+
+ list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
+ head) {
+ encoder->funcs->destroy(encoder);
+ }
+
+ list_for_each_entry_safe(connector, ot,
+ &dev->mode_config.connector_list, head) {
+ connector->funcs->destroy(connector);
+ }
+
+ list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
+ head) {
+ drm_property_destroy(dev, property);
+ }
+
+ list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
+ drm_framebuffer_remove(fb);
+ }
+
+ list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
+ head) {
+ plane->funcs->destroy(plane);
+ }
+
+ list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
+ crtc->funcs->destroy(crtc);
+ }
+
+ // XXX destroy idr/SPLAY tree
+}
+EXPORT_SYMBOL(drm_mode_config_cleanup);
+
+/**
+ * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
+ * @out: drm_mode_modeinfo struct to return to the user
+ * @in: drm_display_mode to use
+ *
+ * LOCKING:
+ * None.
+ *
+ * Convert a drm_display_mode into a drm_mode_modeinfo structure to return to
+ * the user.
+ */
+void
+drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
+ const struct drm_display_mode *in)
+{
+#ifdef notyet
+ WARN(in->hdisplay > USHRT_MAX || in->hsync_start > USHRT_MAX ||
+ in->hsync_end > USHRT_MAX || in->htotal > USHRT_MAX ||
+ in->hskew > USHRT_MAX || in->vdisplay > USHRT_MAX ||
+ in->vsync_start > USHRT_MAX || in->vsync_end > USHRT_MAX ||
+ in->vtotal > USHRT_MAX || in->vscan > USHRT_MAX,
+ "timing values too large for mode info\n");
+#endif
+
+ out->clock = in->clock;
+ out->hdisplay = in->hdisplay;
+ out->hsync_start = in->hsync_start;
+ out->hsync_end = in->hsync_end;
+ out->htotal = in->htotal;
+ out->hskew = in->hskew;
+ out->vdisplay = in->vdisplay;
+ out->vsync_start = in->vsync_start;
+ out->vsync_end = in->vsync_end;
+ out->vtotal = in->vtotal;
+ out->vscan = in->vscan;
+ out->vrefresh = in->vrefresh;
+ out->flags = in->flags;
+ out->type = in->type;
+ strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
+ out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+}
+
+/**
+ * drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode
+ * @out: drm_display_mode to return to the user
+ * @in: drm_mode_modeinfo to use
+ *
+ * LOCKING:
+ * None.
+ *
+ * Convert a drm_mode_modeinfo into a drm_display_mode structure to return to
+ * the caller.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int
+drm_crtc_convert_umode(struct drm_display_mode *out,
+ const struct drm_mode_modeinfo *in)
+{
+ if (in->clock > INT_MAX || in->vrefresh > INT_MAX)
+ return ERANGE;
+
+ out->clock = in->clock;
+ out->hdisplay = in->hdisplay;
+ out->hsync_start = in->hsync_start;
+ out->hsync_end = in->hsync_end;
+ out->htotal = in->htotal;
+ out->hskew = in->hskew;
+ out->vdisplay = in->vdisplay;
+ out->vsync_start = in->vsync_start;
+ out->vsync_end = in->vsync_end;
+ out->vtotal = in->vtotal;
+ out->vscan = in->vscan;
+ out->vrefresh = in->vrefresh;
+ out->flags = in->flags;
+ out->type = in->type;
+ strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
+ out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+
+ return 0;
+}
+
+/**
+ * drm_mode_getresources - get graphics configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Construct a set of configuration description structures and return
+ * them to the user, including CRTC, connector and framebuffer configuration.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int
+drm_mode_getresources(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_card_res *card_res = data;
+ struct list_head *lh;
+ struct drm_framebuffer *fb;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ int ret = 0;
+ int connector_count = 0;
+ int crtc_count = 0;
+ int fb_count = 0;
+ int encoder_count = 0;
+ int copied = 0;
+#if 0
+ int i;
+#endif
+ uint32_t __user *fb_id;
+ uint32_t __user *crtc_id;
+ uint32_t __user *connector_id;
+ uint32_t __user *encoder_id;
+#if 0
+ struct drm_mode_group *mode_group;
+#endif
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return EINVAL;
+
+ rw_enter_write(&dev->mode_config.rwl);
+
+ /*
+ * For the non-control nodes we need to limit the list of resources
+ * by IDs in the group list for this node
+ */
+ list_for_each(lh, &file_priv->fbs)
+ fb_count++;
+
+#if 0
+ mode_group = &file_priv->master->minor->mode_group;
+ if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+#endif
+
+ list_for_each(lh, &dev->mode_config.crtc_list)
+ crtc_count++;
+
+ list_for_each(lh, &dev->mode_config.connector_list)
+ connector_count++;
+
+ list_for_each(lh, &dev->mode_config.encoder_list)
+ encoder_count++;
+#if 0
+ } else {
+
+ crtc_count = mode_group->num_crtcs;
+ connector_count = mode_group->num_connectors;
+ encoder_count = mode_group->num_encoders;
+ }
+#endif
+
+ card_res->max_height = dev->mode_config.max_height;
+ card_res->min_height = dev->mode_config.min_height;
+ card_res->max_width = dev->mode_config.max_width;
+ card_res->min_width = dev->mode_config.min_width;
+
+ /* handle this in 4 parts */
+ /* FBs */
+ if (card_res->count_fbs >= fb_count) {
+ copied = 0;
+ fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
+ list_for_each_entry(fb, &file_priv->fbs, filp_head) {
+ if (copyout(&fb->base.id, fb_id + copied,
+ sizeof(fb->base.id)) != 0) {
+ ret = EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ card_res->count_fbs = fb_count;
+
+ /* CRTCs */
+ if (card_res->count_crtcs >= crtc_count) {
+ copied = 0;
+ crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
+ /* if (file_priv->master->minor->type == DRM_MINOR_CONTROL) { */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+ head) {
+ DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+ if (copyout(&crtc->base.id, crtc_id + copied,
+ sizeof(crtc->base.id)) != 0) {
+ ret = EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ /* } else {
+ for (i = 0; i < mode_group->num_crtcs; i++) {
+ if (copyout(&mode_group->id_list[i],
+ crtc_id + copied,
+ sizeof(mode_group->id_list[i])) != 0) {
+ ret = EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ } */
+ }
+ card_res->count_crtcs = crtc_count;
+
+ /* Encoders */
+ if (card_res->count_encoders >= encoder_count) {
+ copied = 0;
+ encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
+#if 0
+ if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+#endif
+ list_for_each_entry(encoder,
+ &dev->mode_config.encoder_list,
+ head) {
+ DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
+ drm_get_encoder_name(encoder));
+ if (copyout(&encoder->base.id, encoder_id +
+ copied, sizeof(encoder->base.id)) != 0) {
+ ret = EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+#if 0
+ } else {
+ for (i = mode_group->num_crtcs; i < mode_group->num_crtcs + mode_group->num_encoders; i++) {
+ if (copyout(&mode_group->id_list[i]
+ encoder_id + copied, sizeof(uint32_t)) != 0) {
+ ret = EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+
+ }
+#endif
+ }
+ card_res->count_encoders = encoder_count;
+
+ /* Connectors */
+ if (card_res->count_connectors >= connector_count) {
+ copied = 0;
+ connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
+#if 0
+ if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+#endif
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list,
+ head) {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id,
+ drm_get_connector_name(connector));
+ if (copyout(&connector->base.id,
+ connector_id + copied,
+ sizeof(connector->base.id)) != 0) {
+ ret = EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+#if 0
+ } else {
+ int start = mode_group->num_crtcs +
+ mode_group->num_encoders;
+ for (i = start; i < start + mode_group->num_connectors; i++) {
+ if (copyout(&mode_group->id_list[i],
+ connector_id + copied,
+ sizeof(uint32_t)) != 0) {
+ ret = EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+#endif
+ }
+ card_res->count_connectors = connector_count;
+
+ DRM_DEBUG_KMS("CRTC[%d] CONNECTORS[%d] ENCODERS[%d]\n", card_res->count_crtcs,
+ card_res->count_connectors, card_res->count_encoders);
+
+out:
+ rw_exit_write(&dev->mode_config.rwl);
+ return ret;
+}
+
+/**
+ * drm_mode_getcrtc - get CRTC configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Construct a CRTC configuration structure to return to the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int
+drm_mode_getcrtc(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_crtc *crtc_resp = data;
+ struct drm_crtc *crtc;
+ struct drm_mode_object *obj;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return EINVAL;
+
+ rw_enter_write(&dev->mode_config.rwl);
+
+ obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
+ DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ ret = EINVAL;
+ goto out;
+ }
+ crtc = obj_to_crtc(obj);
+
+ crtc_resp->x = crtc->x;
+ crtc_resp->y = crtc->y;
+ crtc_resp->gamma_size = crtc->gamma_size;
+ if (crtc->fb)
+ crtc_resp->fb_id = crtc->fb->base.id;
+ else
+ crtc_resp->fb_id = 0;
+
+ if (crtc->enabled) {
+
+ drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->mode);
+ crtc_resp->mode_valid = 1;
+
+ } else {
+ crtc_resp->mode_valid = 0;
+ }
+
+out:
+ rw_exit_write(&dev->mode_config.rwl);
+ return ret;
+}
+
+/**
+ * drm_mode_getconnector - get connector configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Construct a connector configuration structure to return to the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int
+drm_mode_getconnector(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_get_connector *out_resp = data;
+ struct drm_mode_object *obj;
+ struct drm_connector *connector;
+ struct drm_display_mode *mode;
+ int mode_count = 0;
+ int props_count = 0;
+ int encoders_count = 0;
+ int ret = 0;
+ int copied = 0;
+ int i;
+ struct drm_mode_modeinfo u_mode;
+ struct drm_mode_modeinfo __user *mode_ptr;
+ uint32_t __user *prop_ptr;
+ uint64_t __user *prop_values;
+ uint32_t __user *encoder_ptr;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return EINVAL;
+
+ memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id);
+
+ rw_enter_write(&dev->mode_config.rwl);
+
+ obj = drm_mode_object_find(dev, out_resp->connector_id,
+ DRM_MODE_OBJECT_CONNECTOR);
+ if (!obj) {
+ ret = EINVAL;
+ goto out;
+ }
+ connector = obj_to_connector(obj);
+
+ props_count = connector->properties.count;
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] != 0) {
+ encoders_count++;
+ }
+ }
+
+ if (out_resp->count_modes == 0) {
+ connector->funcs->fill_modes(connector,
+ dev->mode_config.max_width,
+ dev->mode_config.max_height);
+ }
+
+ /* delayed so we get modes regardless of pre-fill_modes state */
+ list_for_each_entry(mode, &connector->modes, head)
+ mode_count++;
+
+ out_resp->connector_id = connector->base.id;
+ out_resp->connector_type = connector->connector_type;
+ out_resp->connector_type_id = connector->connector_type_id;
+ out_resp->mm_width = connector->display_info.width_mm;
+ out_resp->mm_height = connector->display_info.height_mm;
+ out_resp->subpixel = connector->display_info.subpixel_order;
+ out_resp->connection = connector->status;
+ if (connector->encoder)
+ out_resp->encoder_id = connector->encoder->base.id;
+ else
+ out_resp->encoder_id = 0;
+
+ /*
+ * This ioctl is called twice, once to determine how much space is
+ * needed, and the 2nd time to fill it.
+ */
+ if ((out_resp->count_modes >= mode_count) && mode_count) {
+ copied = 0;
+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
+ list_for_each_entry(mode, &connector->modes, head) {
+ drm_crtc_convert_to_umode(&u_mode, mode);
+ if (copyout(&u_mode, mode_ptr + copied,
+ sizeof(u_mode)) != 0) {
+ ret = EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ out_resp->count_modes = mode_count;
+
+ if ((out_resp->count_props >= props_count) && props_count) {
+ copied = 0;
+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
+ for (i = 0; i < connector->properties.count; i++) {
+ if (copyout(&connector->properties.ids[i],
+ prop_ptr + copied,
+ sizeof(uint32_t)) != 0) {
+ ret = EFAULT;
+ goto out;
+ }
+
+ if (copyout(&connector->properties.values[i],
+ prop_values + copied,
+ sizeof(uint32_t)) != 0) {
+ ret = EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ out_resp->count_props = props_count;
+
+ if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
+ copied = 0;
+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] != 0) {
+ if (copyout(&connector->encoder_ids[i],
+ encoder_ptr + copied,
+ sizeof(connector->encoder_ids[i])) != 0) {
+ ret = EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ }
+ out_resp->count_encoders = encoders_count;
+
+out:
+ rw_exit_write(&dev->mode_config.rwl);
+ return ret;
+}
+
+int
+drm_mode_getencoder(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_get_encoder *enc_resp = data;
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return EINVAL;
+
+ rw_enter_write(&dev->mode_config.rwl);
+ obj = drm_mode_object_find(dev, enc_resp->encoder_id,
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj) {
+ ret = EINVAL;
+ goto out;
+ }
+ encoder = obj_to_encoder(obj);
+
+ if (encoder->crtc)
+ enc_resp->crtc_id = encoder->crtc->base.id;
+ else
+ enc_resp->crtc_id = 0;
+ enc_resp->encoder_type = encoder->encoder_type;
+ enc_resp->encoder_id = encoder->base.id;
+ enc_resp->possible_crtcs = encoder->possible_crtcs;
+ enc_resp->possible_clones = encoder->possible_clones;
+
+out:
+ rw_exit_write(&dev->mode_config.rwl);
+ return ret;
+}
+
+/**
+ * drm_mode_getplane_res - get plane info
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Return an plane count and set of IDs.
+ */
+int
+drm_mode_getplane_res(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_get_plane_res *plane_resp = data;
+ struct drm_mode_config *config;
+ struct drm_plane *plane;
+ uint32_t __user *plane_ptr;
+ int copied = 0, ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return EINVAL;
+
+ rw_enter_write(&dev->mode_config.rwl);
+ config = &dev->mode_config;
+
+ /*
+ * This ioctl is called twice, once to determine how much space is
+ * needed, and the 2nd time to fill it.
+ */
+ if (config->num_plane &&
+ (plane_resp->count_planes >= config->num_plane)) {
+ plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
+
+ list_for_each_entry(plane, &config->plane_list, head) {
+ if (copyout(&plane->base.id, plane_ptr + copied,
+ sizeof(uint32_t))) {
+ ret = EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ plane_resp->count_planes = config->num_plane;
+
+out:
+ rw_exit_write(&dev->mode_config.rwl);
+ return ret;
+}
+
+/**
+ * drm_mode_getplane - get plane info
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Return plane info, including formats supported, gamma size, any
+ * current fb, etc.
+ */
+int
+drm_mode_getplane(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_get_plane *plane_resp = data;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ uint32_t __user *format_ptr;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return EINVAL;
+
+ rw_enter_write(&dev->mode_config.rwl);
+ obj = drm_mode_object_find(dev, plane_resp->plane_id,
+ DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ ret = ENOENT;
+ goto out;
+ }
+ plane = obj_to_plane(obj);
+
+ if (plane->crtc)
+ plane_resp->crtc_id = plane->crtc->base.id;
+ else
+ plane_resp->crtc_id = 0;
+
+ if (plane->fb)
+ plane_resp->fb_id = plane->fb->base.id;
+ else
+ plane_resp->fb_id = 0;
+
+ plane_resp->plane_id = plane->base.id;
+ plane_resp->possible_crtcs = plane->possible_crtcs;
+ plane_resp->gamma_size = plane->gamma_size;
+
+ /*
+ * This ioctl is called twice, once to determine how much space is
+ * needed, and the 2nd time to fill it.
+ */
+ if (plane->format_count &&
+ (plane_resp->count_format_types >= plane->format_count)) {
+ format_ptr = (uint32_t __user *)(unsigned long)plane_resp->format_type_ptr;
+ if (copyout(format_ptr,
+ plane->format_types,
+ sizeof(uint32_t) * plane->format_count)) {
+ ret = EFAULT;
+ goto out;
+ }
+ }
+ plane_resp->count_format_types = plane->format_count;
+
+out:
+ rw_exit_write(&dev->mode_config.rwl);
+ return ret;
+}
+
+/**
+ * drm_mode_setplane - set up or tear down an plane
+ * @dev: DRM device
+ * @data: ioctl data*
+ * @file_prive: DRM file info
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Set plane info, including placement, fb, scaling, and other factors.
+ * Or pass a NULL fb to disable.
+ */
+int
+drm_mode_setplane(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_set_plane *plane_req = data;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+ int ret = 0;
+ unsigned int fb_width, fb_height;
+ int i;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return EINVAL;
+
+ rw_enter_write(&dev->mode_config.rwl);
+
+ /*
+ * First, find the plane, crtc, and fb objects. If not available,
+ * we don't bother to call the driver.
+ */
+ obj = drm_mode_object_find(dev, plane_req->plane_id,
+ DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown plane ID %d\n",
+ plane_req->plane_id);
+ ret = ENOENT;
+ goto out;
+ }
+ plane = obj_to_plane(obj);
+
+ /* No fb means shut it down */
+ if (!plane_req->fb_id) {
+ plane->funcs->disable_plane(plane);
+ plane->crtc = NULL;
+ plane->fb = NULL;
+ goto out;
+ }
+
+ obj = drm_mode_object_find(dev, plane_req->crtc_id,
+ DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown crtc ID %d\n",
+ plane_req->crtc_id);
+ ret = ENOENT;
+ goto out;
+ }
+ crtc = obj_to_crtc(obj);
+
+ obj = drm_mode_object_find(dev, plane_req->fb_id,
+ DRM_MODE_OBJECT_FB);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
+ plane_req->fb_id);
+ ret = ENOENT;
+ goto out;
+ }
+ fb = obj_to_fb(obj);
+
+ /* Check whether this plane supports the fb pixel format. */
+ for (i = 0; i < plane->format_count; i++)
+ if (fb->pixel_format == plane->format_types[i])
+ break;
+ if (i == plane->format_count) {
+ DRM_DEBUG_KMS("Invalid pixel format 0x%08x\n", fb->pixel_format);
+ ret = EINVAL;
+ goto out;
+ }
+
+ fb_width = fb->width << 16;
+ fb_height = fb->height << 16;
+
+ /* Make sure source coordinates are inside the fb. */
+ if (plane_req->src_w > fb_width ||
+ plane_req->src_x > fb_width - plane_req->src_w ||
+ plane_req->src_h > fb_height ||
+ plane_req->src_y > fb_height - plane_req->src_h) {
+ DRM_DEBUG_KMS("Invalid source coordinates "
+ "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
+ plane_req->src_w >> 16,
+ ((plane_req->src_w & 0xffff) * 15625) >> 10,
+ plane_req->src_h >> 16,
+ ((plane_req->src_h & 0xffff) * 15625) >> 10,
+ plane_req->src_x >> 16,
+ ((plane_req->src_x & 0xffff) * 15625) >> 10,
+ plane_req->src_y >> 16,
+ ((plane_req->src_y & 0xffff) * 15625) >> 10);
+ ret = ENOSPC;
+ goto out;
+ }
+
+ /* Give drivers some help against integer overflows */
+ if (plane_req->crtc_w > INT_MAX ||
+ plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
+ plane_req->crtc_h > INT_MAX ||
+ plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
+ DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
+ plane_req->crtc_w, plane_req->crtc_h,
+ plane_req->crtc_x, plane_req->crtc_y);
+ ret = ERANGE;
+ goto out;
+ }
+
+ ret = plane->funcs->update_plane(plane, crtc, fb,
+ plane_req->crtc_x, plane_req->crtc_y,
+ plane_req->crtc_w, plane_req->crtc_h,
+ plane_req->src_x, plane_req->src_y,
+ plane_req->src_w, plane_req->src_h);
+ if (!ret) {
+ plane->crtc = crtc;
+ plane->fb = fb;
+ }
+
+out:
+ rw_exit_write(&dev->mode_config.rwl);
+
+ return ret;
+}
+
+/**
+ * drm_mode_setcrtc - set CRTC configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Build a new CRTC configuration based on user request.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int
+drm_mode_setcrtc(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_mode_crtc *crtc_req = data;
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ struct drm_connector **connector_set = NULL, *connector;
+ struct drm_framebuffer *fb = NULL;
+ struct drm_display_mode *mode = NULL;
+ struct drm_mode_set set;
+ uint32_t __user *set_connectors_ptr;
+ int ret;
+ int i;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return EINVAL;
+
+ /* For some reason crtc x/y offsets are signed internally. */
+ if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX)
+ return ERANGE;
+
+ rw_enter_write(&dev->mode_config.rwl);
+ obj = drm_mode_object_find(dev, crtc_req->crtc_id,
+ DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
+ ret = EINVAL;
+ goto out;
+ }
+ crtc = obj_to_crtc(obj);
+ DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+
+ if (crtc_req->mode_valid) {
+ int hdisplay, vdisplay;
+ /* If we have a mode we need a framebuffer. */
+ /* If we pass -1, set the mode with the currently bound fb */
+ if (crtc_req->fb_id == -1) {
+ if (!crtc->fb) {
+ DRM_DEBUG_KMS("CRTC doesn't have current FB\n");
+ ret = EINVAL;
+ goto out;
+ }
+ fb = crtc->fb;
+ } else {
+ obj = drm_mode_object_find(dev, crtc_req->fb_id,
+ DRM_MODE_OBJECT_FB);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown FB ID%d\n",
+ crtc_req->fb_id);
+ ret = EINVAL;
+ goto out;
+ }
+ fb = obj_to_fb(obj);
+ }
+
+ mode = drm_mode_create(dev);
+ if (!mode) {
+ ret = ENOMEM;
+ goto out;
+ }
+
+ ret = drm_crtc_convert_umode(mode, &crtc_req->mode);
+ if (ret) {
+ DRM_DEBUG_KMS("Invalid mode\n");
+ goto out;
+ }
+
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+
+ hdisplay = mode->hdisplay;
+ vdisplay = mode->vdisplay;
+
+ if (crtc->invert_dimensions) {
+ int tmp = hdisplay;
+ hdisplay = vdisplay;
+ vdisplay = tmp;
+ }
+
+ if (hdisplay > fb->width ||
+ vdisplay > fb->height ||
+ crtc_req->x > fb->width - hdisplay ||
+ crtc_req->y > fb->height - vdisplay) {
+ DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
+ fb->width, fb->height,
+ hdisplay, vdisplay, crtc_req->x, crtc_req->y,
+ crtc->invert_dimensions ? " (inverted)" : "");
+ ret = ENOSPC;
+ goto out;
+ }
+ }
+
+ if (crtc_req->count_connectors == 0 && mode) {
+ DRM_DEBUG_KMS("Count connectors is 0 but mode set\n");
+ ret = EINVAL;
+ goto out;
+ }
+
+ if (crtc_req->count_connectors > 0 && (!mode || !fb)) {
+ DRM_DEBUG_KMS("Count connectors is %d but no mode or fb set\n",
+ crtc_req->count_connectors);
+ ret = EINVAL;
+ goto out;
+ }
+
+ if (crtc_req->count_connectors > 0) {
+ u32 out_id;
+
+ /* Avoid unbounded kernel memory allocation */
+ if (crtc_req->count_connectors > config->num_connector) {
+ ret = EINVAL;
+ goto out;
+ }
+
+ connector_set = malloc(crtc_req->count_connectors *
+ sizeof(struct drm_connector *),
+ M_DRM, M_WAITOK);
+ if (!connector_set) {
+ ret = ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < crtc_req->count_connectors; i++) {
+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
+ if (copyin(&set_connectors_ptr[i], &out_id,
+ sizeof(out_id)) != 0) {
+ ret = EFAULT;
+ goto out;
+ }
+
+ obj = drm_mode_object_find(dev, out_id,
+ DRM_MODE_OBJECT_CONNECTOR);
+ if (!obj) {
+ DRM_DEBUG_KMS("Connector id %d unknown\n",
+ out_id);
+ ret = EINVAL;
+ goto out;
+ }
+ connector = obj_to_connector(obj);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id,
+ drm_get_connector_name(connector));
+
+ connector_set[i] = connector;
+ }
+ }
+
+ set.crtc = crtc;
+ set.x = crtc_req->x;
+ set.y = crtc_req->y;
+ set.mode = mode;
+ set.connectors = connector_set;
+ set.num_connectors = crtc_req->count_connectors;
+ set.fb = fb;
+ ret = crtc->funcs->set_config(&set);
+
+out:
+ if (connector_set != NULL)
+ free(connector_set, M_DRM);
+ drm_mode_destroy(dev, mode);
+ rw_exit_write(&dev->mode_config.rwl);
+ return ret;
+}
+
+int
+drm_mode_cursor_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_cursor *req = data;
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return EINVAL;
+
+ if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
+ return EINVAL;
+
+ rw_enter_write(&dev->mode_config.rwl);
+ obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
+ ret = EINVAL;
+ goto out;
+ }
+ crtc = obj_to_crtc(obj);
+
+ if (req->flags & DRM_MODE_CURSOR_BO) {
+ if (!crtc->funcs->cursor_set) {
+ ret = ENXIO;
+ goto out;
+ }
+ /* Turns off the cursor if handle is 0 */
+ ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle,
+ req->width, req->height);
+ }
+
+ if (req->flags & DRM_MODE_CURSOR_MOVE) {
+ if (crtc->funcs->cursor_move) {
+ ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
+ } else {
+ ret = EFAULT;
+ goto out;
+ }
+ }
+out:
+ rw_exit_write(&dev->mode_config.rwl);
+ return ret;
+}
+
+/* Original addfb only supported RGB formats, so figure out which one */
+uint32_t
+drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
+{
+ uint32_t fmt;
+
+ switch (bpp) {
+ case 8:
+ fmt = DRM_FORMAT_C8;
+ break;
+ case 16:
+ if (depth == 15)
+ fmt = DRM_FORMAT_XRGB1555;
+ else
+ fmt = DRM_FORMAT_RGB565;
+ break;
+ case 24:
+ fmt = DRM_FORMAT_RGB888;
+ break;
+ case 32:
+ if (depth == 24)
+ fmt = DRM_FORMAT_XRGB8888;
+ else if (depth == 30)
+ fmt = DRM_FORMAT_XRGB2101010;
+ else
+ fmt = DRM_FORMAT_ARGB8888;
+ break;
+ default:
+ DRM_ERROR("bad bpp, assuming x8r8g8b8 pixel format\n");
+ fmt = DRM_FORMAT_XRGB8888;
+ break;
+ }
+
+ return fmt;
+}
+EXPORT_SYMBOL(drm_mode_legacy_fb_format);
+
+/**
+ * drm_mode_addfb - add an FB to the graphics configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Add a new FB to the specified CRTC, given a user request.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int
+drm_mode_addfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_fb_cmd *or = data;
+ struct drm_mode_fb_cmd2 r = {};
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_framebuffer *fb;
+ int ret = 0;
+
+ /* Use new struct with format internally */
+ r.fb_id = or->fb_id;
+ r.width = or->width;
+ r.height = or->height;
+ r.pitches[0] = or->pitch;
+ r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
+ r.handles[0] = or->handle;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return EINVAL;
+
+ if ((config->min_width > r.width) || (r.width > config->max_width))
+ return EINVAL;
+
+ if ((config->min_height > r.height) || (r.height > config->max_height))
+ return EINVAL;
+
+ rw_enter_write(&dev->mode_config.rwl);
+
+ /* TODO check buffer is sufficiently large */
+ /* TODO setup destructor callback */
+
+ fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r);
+ if (fb == NULL) {
+ DRM_DEBUG_KMS("could not create framebuffer\n");
+ goto out;
+ }
+
+ or->fb_id = fb->base.id;
+ list_add(&fb->filp_head, &file_priv->fbs);
+ DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+
+out:
+ rw_exit_write(&dev->mode_config.rwl);
+ return ret;
+}
+
+int
+format_check(const struct drm_mode_fb_cmd2 *r)
+{
+ uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
+
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_BGR233:
+ case DRM_FORMAT_XRGB4444:
+ case DRM_FORMAT_XBGR4444:
+ case DRM_FORMAT_RGBX4444:
+ case DRM_FORMAT_BGRX4444:
+ case DRM_FORMAT_ARGB4444:
+ case DRM_FORMAT_ABGR4444:
+ case DRM_FORMAT_RGBA4444:
+ case DRM_FORMAT_BGRA4444:
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_XBGR1555:
+ case DRM_FORMAT_RGBX5551:
+ case DRM_FORMAT_BGRX5551:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_ABGR1555:
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_BGRA5551:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_RGBX1010102:
+ case DRM_FORMAT_BGRX1010102:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_BGRA1010102:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_AYUV:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV24:
+ case DRM_FORMAT_NV42:
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return 0;
+ default:
+ return EINVAL;
+ }
+}
+
+int
+framebuffer_check(const struct drm_mode_fb_cmd2 *r)
+{
+ int ret, hsub, vsub, num_planes, i;
+
+ ret = format_check(r);
+ if (ret) {
+ DRM_DEBUG_KMS("bad framebuffer format 0x%08x\n", r->pixel_format);
+ return ret;
+ }
+
+ hsub = drm_format_horz_chroma_subsampling(r->pixel_format);
+ vsub = drm_format_vert_chroma_subsampling(r->pixel_format);
+ num_planes = drm_format_num_planes(r->pixel_format);
+
+ if (r->width == 0 || r->width % hsub) {
+ DRM_DEBUG_KMS("bad framebuffer width %u\n", r->height);
+ return EINVAL;
+ }
+
+ if (r->height == 0 || r->height % vsub) {
+ DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height);
+ return EINVAL;
+ }
+
+ for (i = 0; i < num_planes; i++) {
+ unsigned int width = r->width / (i != 0 ? hsub : 1);
+ unsigned int height = r->height / (i != 0 ? vsub : 1);
+ unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i);
+
+ if (!r->handles[i]) {
+ DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
+ return EINVAL;
+ }
+
+ if ((uint64_t) width * cpp > UINT_MAX)
+ return ERANGE;
+
+ if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX)
+ return ERANGE;
+
+ if (r->pitches[i] < width * cpp) {
+ DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
+ return EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * drm_mode_addfb2 - add an FB to the graphics configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Add a new FB to the specified CRTC, given a user request with format.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int
+drm_mode_addfb2(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_fb_cmd2 *r = data;
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_framebuffer *fb;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return EINVAL;
+
+ if (r->flags & ~DRM_MODE_FB_INTERLACED) {
+ DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
+ return EINVAL;
+ }
+
+ if ((config->min_width > r->width) || (r->width > config->max_width)) {
+ DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
+ r->width, config->min_width, config->max_width);
+ return EINVAL;
+ }
+ if ((config->min_height > r->height) || (r->height > config->max_height)) {
+ DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n",
+ r->height, config->min_height, config->max_height);
+ return EINVAL;
+ }
+
+ ret = framebuffer_check(r);
+ if (ret)
+ return ret;
+
+ rw_enter_write(&dev->mode_config.rwl);
+
+ fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
+ if (fb == NULL) {
+ DRM_DEBUG_KMS("could not create framebuffer\n");
+ goto out;
+ }
+
+ r->fb_id = fb->base.id;
+ list_add(&fb->filp_head, &file_priv->fbs);
+ DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+
+out:
+ rw_exit_write(&dev->mode_config.rwl);
+ return ret;
+}
+
+/**
+ * drm_mode_rmfb - remove an FB from the configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Remove the FB specified by the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int
+drm_mode_rmfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_object *obj;
+ struct drm_framebuffer *fb = NULL;
+ struct drm_framebuffer *fbl = NULL;
+ uint32_t *id = data;
+ int ret = 0;
+ int found = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return EINVAL;
+
+ rw_enter_write(&dev->mode_config.rwl);
+ obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
+ /* TODO check that we really get a framebuffer back. */
+ if (!obj) {
+ ret = EINVAL;
+ goto out;
+ }
+ fb = obj_to_fb(obj);
+
+ list_for_each_entry(fbl, &file_priv->fbs, filp_head)
+ if (fb == fbl)
+ found = 1;
+
+ if (!found) {
+ ret = EINVAL;
+ goto out;
+ }
+
+ drm_framebuffer_remove(fb);
+
+out:
+ rw_exit_write(&dev->mode_config.rwl);
+ return ret;
+}
+
+/**
+ * drm_mode_getfb - get FB info
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Lookup the FB given its ID and return info about it.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int
+drm_mode_getfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_fb_cmd *r = data;
+ struct drm_mode_object *obj;
+ struct drm_framebuffer *fb;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return EINVAL;
+
+ rw_enter_write(&dev->mode_config.rwl);
+ obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
+ if (!obj) {
+ ret = EINVAL;
+ goto out;
+ }
+ fb = obj_to_fb(obj);
+
+ r->height = fb->height;
+ r->width = fb->width;
+ r->depth = fb->depth;
+ r->bpp = fb->bits_per_pixel;
+ r->pitch = fb->pitches[0];
+ fb->funcs->create_handle(fb, file_priv, &r->handle);
+
+out:
+ rw_exit_write(&dev->mode_config.rwl);
+ return ret;
+}
+
+int
+drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_clip_rect __user *clips_ptr;
+ struct drm_clip_rect *clips = NULL;
+ struct drm_mode_fb_dirty_cmd *r = data;
+ struct drm_mode_object *obj;
+ struct drm_framebuffer *fb;
+ unsigned flags;
+ int num_clips;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return EINVAL;
+
+ rw_enter_write(&dev->mode_config.rwl);
+ obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
+ if (!obj) {
+ ret = EINVAL;
+ goto out_err1;
+ }
+ fb = obj_to_fb(obj);
+
+ num_clips = r->num_clips;
+ clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
+
+ if (!num_clips != !clips_ptr) {
+ ret = EINVAL;
+ goto out_err1;
+ }
+
+ flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags;
+
+ /* If userspace annotates copy, clips must come in pairs */
+ if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) {
+ ret = EINVAL;
+ goto out_err1;
+ }
+
+ if (num_clips && clips_ptr) {
+ if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) {
+ ret = EINVAL;
+ goto out_err1;
+ }
+ clips = malloc(num_clips * sizeof(*clips), M_DRM,
+ M_WAITOK|M_ZERO);
+ if (!clips) {
+ ret = ENOMEM;
+ goto out_err1;
+ }
+
+ ret = copyin(clips_ptr, clips,
+ num_clips * sizeof(*clips));
+
+ if (ret) {
+ ret = EFAULT;
+ goto out_err2;
+ }
+ }
+
+ if (fb->funcs->dirty) {
+ ret = fb->funcs->dirty(fb, file_priv, flags, r->color,
+ clips, num_clips);
+ } else {
+ ret = ENOSYS;
+ goto out_err2;
+ }
+
+out_err2:
+ free(clips, M_DRM);
+out_err1:
+ rw_exit_write(&dev->mode_config.rwl);
+ return ret;
+}
+
+
+/**
+ * drm_fb_release - remove and free the FBs on this file
+ * @filp: file * from the ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Destroy all the FBs associated with @filp.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+void
+drm_fb_release(struct drm_device *dev, struct drm_file *priv)
+{
+ struct drm_framebuffer *fb, *tfb;
+
+ rw_enter_write(&dev->mode_config.rwl);
+ list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
+ drm_framebuffer_remove(fb);
+ }
+ rw_exit_write(&dev->mode_config.rwl);
+}
+
+/**
+ * drm_mode_attachmode - add a mode to the user mode list
+ * @dev: DRM device
+ * @connector: connector to add the mode to
+ * @mode: mode to add
+ *
+ * Add @mode to @connector's user mode list.
+ */
+void
+drm_mode_attachmode(struct drm_device *dev,
+ struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ list_add_tail(&mode->head, &connector->user_modes);
+}
+
+int
+drm_mode_attachmode_crtc(struct drm_device *dev, struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct drm_connector *connector;
+ int ret = 0;
+ struct drm_display_mode *dup_mode, *next;
+ DRM_LIST_HEAD(list);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (!connector->encoder)
+ continue;
+ if (connector->encoder->crtc == crtc) {
+ dup_mode = drm_mode_duplicate(dev, mode);
+ if (!dup_mode) {
+ ret = ENOMEM;
+ goto out;
+ }
+ list_add_tail(&dup_mode->head, &list);
+ }
+ }
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (!connector->encoder)
+ continue;
+ if (connector->encoder->crtc == crtc)
+ list_move_tail(list.next, &connector->user_modes);
+ }
+
+// WARN_ON(!list_empty(&list));
+
+ out:
+ list_for_each_entry_safe(dup_mode, next, &list, head)
+ drm_mode_destroy(dev, dup_mode);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_mode_attachmode_crtc);
+
+int
+drm_mode_detachmode(struct drm_device *dev,
+ struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ int found = 0;
+ int ret = 0;
+ struct drm_display_mode *match_mode, *t;
+
+ list_for_each_entry_safe(match_mode, t, &connector->user_modes, head) {
+ if (drm_mode_equal(match_mode, mode)) {
+ list_del(&match_mode->head);
+ drm_mode_destroy(dev, match_mode);
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found)
+ ret = EINVAL;
+
+ return ret;
+}
+
+int
+drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode)
+{
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_mode_detachmode(dev, connector, mode);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_detachmode_crtc);
+
+/**
+ * drm_fb_attachmode - Attach a user mode to an connector
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * This attaches a user specified mode to an connector.
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int
+drm_mode_attachmode_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_mode_cmd *mode_cmd = data;
+ struct drm_connector *connector;
+ struct drm_display_mode *mode;
+ struct drm_mode_object *obj;
+ struct drm_mode_modeinfo *umode = &mode_cmd->mode;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return EINVAL;
+
+ rw_enter_write(&dev->mode_config.rwl);
+
+ obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
+ if (!obj) {
+ ret = EINVAL;
+ goto out;
+ }
+ connector = obj_to_connector(obj);
+
+ mode = drm_mode_create(dev);
+ if (!mode) {
+ ret = ENOMEM;
+ goto out;
+ }
+
+ ret = drm_crtc_convert_umode(mode, umode);
+ if (ret) {
+ DRM_DEBUG_KMS("Invalid mode\n");
+ drm_mode_destroy(dev, mode);
+ goto out;
+ }
+
+ drm_mode_attachmode(dev, connector, mode);
+out:
+ rw_exit_write(&dev->mode_config.rwl);
+ return ret;
+}
+
+
+/**
+ * drm_fb_detachmode - Detach a user specified mode from an connector
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int
+drm_mode_detachmode_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_object *obj;
+ struct drm_mode_mode_cmd *mode_cmd = data;
+ struct drm_connector *connector;
+ struct drm_display_mode mode;
+ struct drm_mode_modeinfo *umode = &mode_cmd->mode;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return EINVAL;
+
+ rw_enter_write(&dev->mode_config.rwl);
+
+ obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
+ if (!obj) {
+ ret = EINVAL;
+ goto out;
+ }
+ connector = obj_to_connector(obj);
+
+ ret = drm_crtc_convert_umode(&mode, umode);
+ if (ret) {
+ DRM_DEBUG_KMS("Invalid mode\n");
+ goto out;
+ }
+
+ ret = drm_mode_detachmode(dev, connector, &mode);
+out:
+ rw_exit_write(&dev->mode_config.rwl);
+ return ret;
+}
+
+struct drm_property *
+drm_property_create(struct drm_device *dev, int flags,
+ const char *name, int num_values)
+{
+ struct drm_property *property = NULL;
+ int ret;
+
+ property = malloc(sizeof(struct drm_property), M_DRM, M_WAITOK|M_ZERO);
+ if (!property)
+ return NULL;
+
+ if (num_values) {
+ property->values = malloc(sizeof(uint64_t)*num_values,
+ M_DRM, M_WAITOK|M_ZERO);
+ if (!property->values)
+ goto fail;
+ }
+
+ ret = drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
+ if (ret)
+ goto fail;
+
+ property->flags = flags;
+ property->num_values = num_values;
+ INIT_LIST_HEAD(&property->enum_blob_list);
+
+ if (name) {
+ strncpy(property->name, name, DRM_PROP_NAME_LEN);
+ property->name[DRM_PROP_NAME_LEN-1] = '\0';
+ }
+
+ list_add_tail(&property->head, &dev->mode_config.property_list);
+ return property;
+fail:
+ free(property->values, M_DRM);
+ free(property, M_DRM);
+ return NULL;
+}
+EXPORT_SYMBOL(drm_property_create);
+
+struct drm_property *
+drm_property_create_enum(struct drm_device *dev, int flags,
+ const char *name,
+ const struct drm_prop_enum_list *props,
+ int num_values)
+{
+ struct drm_property *property;
+ int i, ret;
+
+ flags |= DRM_MODE_PROP_ENUM;
+
+ property = drm_property_create(dev, flags, name, num_values);
+ if (!property)
+ return NULL;
+
+ for (i = 0; i < num_values; i++) {
+ ret = drm_property_add_enum(property, i,
+ props[i].type,
+ props[i].name);
+ if (ret) {
+ drm_property_destroy(dev, property);
+ return NULL;
+ }
+ }
+
+ return property;
+}
+EXPORT_SYMBOL(drm_property_create_enum);
+
+struct drm_property *
+drm_property_create_bitmask(struct drm_device *dev,
+ int flags, const char *name,
+ const struct drm_prop_enum_list *props,
+ int num_values)
+{
+ struct drm_property *property;
+ int i, ret;
+
+ flags |= DRM_MODE_PROP_BITMASK;
+
+ property = drm_property_create(dev, flags, name, num_values);
+ if (!property)
+ return NULL;
+
+ for (i = 0; i < num_values; i++) {
+ ret = drm_property_add_enum(property, i,
+ props[i].type,
+ props[i].name);
+ if (ret) {
+ drm_property_destroy(dev, property);
+ return NULL;
+ }
+ }
+
+ return property;
+}
+EXPORT_SYMBOL(drm_property_create_bitmask);
+
+struct drm_property *
+drm_property_create_range(struct drm_device *dev, int flags,
+ const char *name,
+ uint64_t min, uint64_t max)
+{
+ struct drm_property *property;
+
+ flags |= DRM_MODE_PROP_RANGE;
+
+ property = drm_property_create(dev, flags, name, 2);
+ if (!property)
+ return NULL;
+
+ property->values[0] = min;
+ property->values[1] = max;
+
+ return property;
+}
+EXPORT_SYMBOL(drm_property_create_range);
+
+int
+drm_property_add_enum(struct drm_property *property, int index,
+ uint64_t value, const char *name)
+{
+ struct drm_property_enum *prop_enum;
+
+ if (!(property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)))
+ return EINVAL;
+
+ /*
+ * Bitmask enum properties have the additional constraint of values
+ * from 0 to 63
+ */
+ if ((property->flags & DRM_MODE_PROP_BITMASK) && (value > 63))
+ return EINVAL;
+
+ if (!list_empty(&property->enum_blob_list)) {
+ list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
+ if (prop_enum->value == value) {
+ strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
+ prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
+ return 0;
+ }
+ }
+ }
+
+ prop_enum = malloc(sizeof(struct drm_property_enum), M_DRM,
+ M_WAITOK | M_ZERO);
+ if (!prop_enum)
+ return ENOMEM;
+
+ strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
+ prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
+ prop_enum->value = value;
+
+ property->values[index] = value;
+ list_add_tail(&prop_enum->head, &property->enum_blob_list);
+ return 0;
+}
+EXPORT_SYMBOL(drm_property_add_enum);
+
+void
+drm_property_destroy(struct drm_device *dev, struct drm_property *property)
+{
+ struct drm_property_enum *prop_enum, *pt;
+
+ list_for_each_entry_safe(prop_enum, pt, &property->enum_blob_list, head) {
+ list_del(&prop_enum->head);
+ free(prop_enum, M_DRM);
+ }
+
+ if (property->num_values)
+ free(property->values, M_DRM);
+ drm_mode_object_put(dev, &property->base);
+ list_del(&property->head);
+ free(property, M_DRM);
+}
+EXPORT_SYMBOL(drm_property_destroy);
+
+void
+drm_object_attach_property(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t init_val)
+{
+ int count = obj->properties->count;
+
+ if (count == DRM_OBJECT_MAX_PROPERTY) {
+ printf("Failed to attach object property (type: 0x%x). Please "
+ "increase DRM_OBJECT_MAX_PROPERTY by 1 for each time "
+ "you see this message on the same object type.\n",
+ obj->type);
+ return;
+ }
+
+ obj->properties->ids[count] = property->base.id;
+ obj->properties->values[count] = init_val;
+ obj->properties->count++;
+}
+EXPORT_SYMBOL(drm_object_attach_property);
+
+int
+drm_object_property_set_value(struct drm_mode_object *obj,
+ struct drm_property *property, uint64_t val)
+{
+ int i;
+
+ for (i = 0; i < obj->properties->count; i++) {
+ if (obj->properties->ids[i] == property->base.id) {
+ obj->properties->values[i] = val;
+ return 0;
+ }
+ }
+
+ return EINVAL;
+}
+EXPORT_SYMBOL(drm_object_property_set_value);
+
+int
+drm_object_property_get_value(struct drm_mode_object *obj,
+ struct drm_property *property, uint64_t *val)
+{
+ int i;
+
+ for (i = 0; i < obj->properties->count; i++) {
+ if (obj->properties->ids[i] == property->base.id) {
+ *val = obj->properties->values[i];
+ return 0;
+ }
+ }
+
+ return EINVAL;
+}
+EXPORT_SYMBOL(drm_object_property_get_value);
+
+int
+drm_mode_getproperty_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_object *obj;
+ struct drm_mode_get_property *out_resp = data;
+ struct drm_property *property;
+ int enum_count = 0;
+ int blob_count = 0;
+ int value_count = 0;
+ int ret = 0, i;
+ int copied;
+ struct drm_property_enum *prop_enum;
+ struct drm_mode_property_enum __user *enum_ptr;
+ struct drm_property_blob *prop_blob;
+ uint32_t __user *blob_id_ptr;
+ uint64_t __user *values_ptr;
+ uint32_t __user *blob_length_ptr;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return EINVAL;
+
+ rw_enter_write(&dev->mode_config.rwl);
+ obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
+ if (!obj) {
+ ret = EINVAL;
+ goto done;
+ }
+ property = obj_to_property(obj);
+
+ if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
+ list_for_each_entry(prop_enum, &property->enum_blob_list, head)
+ enum_count++;
+ } else if (property->flags & DRM_MODE_PROP_BLOB) {
+ list_for_each_entry(prop_blob, &property->enum_blob_list, head)
+ blob_count++;
+ }
+
+ value_count = property->num_values;
+
+ strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN);
+ out_resp->name[DRM_PROP_NAME_LEN-1] = 0;
+ out_resp->flags = property->flags;
+
+ if ((out_resp->count_values >= value_count) && value_count) {
+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
+ for (i = 0; i < value_count; i++) {
+ if (copyout(&property->values[i], values_ptr + i, sizeof(uint64_t)) != 0) {
+ ret = EFAULT;
+ goto done;
+ }
+ }
+ }
+ out_resp->count_values = value_count;
+
+ if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
+ if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
+ copied = 0;
+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
+ list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
+
+ if (copyout(&prop_enum->value,
+ &enum_ptr[copied].value,
+ sizeof(uint64_t)) != 0) {
+ ret = EFAULT;
+ goto done;
+ }
+
+ if (copyout(&prop_enum->name,
+ &enum_ptr[copied].name,
+ DRM_PROP_NAME_LEN) != 0) {
+ ret = EFAULT;
+ goto done;
+ }
+ copied++;
+ }
+ }
+ out_resp->count_enum_blobs = enum_count;
+ }
+
+ if (property->flags & DRM_MODE_PROP_BLOB) {
+ if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
+ copied = 0;
+ blob_id_ptr = (uint32_t __user *)(unsigned long)out_resp->enum_blob_ptr;
+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
+
+ list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
+ if (copyout(&prop_blob->base.id,
+ blob_id_ptr + copied,
+ sizeof(prop_blob->base.id)) != 0) {
+ ret = EFAULT;
+ goto done;
+ }
+
+ if (copyout(&prop_blob->length,
+ blob_length_ptr + copied,
+ sizeof(prop_blob->length)) != 0) {
+ ret = EFAULT;
+ goto done;
+ }
+
+ copied++;
+ }
+ }
+ out_resp->count_enum_blobs = blob_count;
+ }
+done:
+ rw_exit_write(&dev->mode_config.rwl);
+ return ret;
+}
+
+struct drm_property_blob *
+drm_property_create_blob(struct drm_device *dev, int length,
+ void *data)
+{
+ struct drm_property_blob *blob;
+ int ret;
+
+ if (!length || !data)
+ return NULL;
+
+ blob = malloc(sizeof(struct drm_property_blob)+length,
+ M_DRM, M_WAITOK|M_ZERO);
+ if (!blob)
+ return NULL;
+
+ ret = drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB);
+ if (ret) {
+ free(blob, M_DRM);
+ return NULL;
+ }
+
+ blob->length = length;
+
+ memcpy(blob->data, data, length);
+
+ list_add_tail(&blob->head, &dev->mode_config.property_blob_list);
+ return blob;
+}
+
+void
+drm_property_destroy_blob(struct drm_device *dev,
+ struct drm_property_blob *blob)
+{
+ drm_mode_object_put(dev, &blob->base);
+ list_del(&blob->head);
+ free(blob, M_DRM);
+}
+
+int
+drm_mode_getblob_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_object *obj;
+ struct drm_mode_get_blob *out_resp = data;
+ struct drm_property_blob *blob;
+ int ret = 0;
+ void __user *blob_ptr;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return EINVAL;
+
+ rw_enter_write(&dev->mode_config.rwl);
+ obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
+ if (!obj) {
+ ret = EINVAL;
+ goto done;
+ }
+ blob = obj_to_blob(obj);
+
+ if (out_resp->length == blob->length) {
+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
+ if (copyout(blob->data, blob_ptr, blob->length) != 0) {
+ ret = EFAULT;
+ goto done;
+ }
+ }
+ out_resp->length = blob->length;
+
+done:
+ rw_exit_write(&dev->mode_config.rwl);
+ return ret;
+}
+
+int
+drm_mode_connector_update_edid_property(struct drm_connector *connector,
+ struct edid *edid)
+{
+ struct drm_device *dev = connector->dev;
+ int ret, size;
+
+ if (connector->edid_blob_ptr)
+ drm_property_destroy_blob(dev, connector->edid_blob_ptr);
+
+ /* Delete edid, when there is none. */
+ if (!edid) {
+ connector->edid_blob_ptr = NULL;
+ ret = drm_object_property_set_value(&connector->base, dev->mode_config.edid_property, 0);
+ return ret;
+ }
+
+ size = EDID_LENGTH * (1 + edid->extensions);
+ connector->edid_blob_ptr = drm_property_create_blob(connector->dev,
+ size, edid);
+ if (!connector->edid_blob_ptr)
+ return EINVAL;
+
+ ret = drm_object_property_set_value(&connector->base,
+ dev->mode_config.edid_property,
+ connector->edid_blob_ptr->base.id);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
+
+bool
+drm_property_change_is_valid(struct drm_property *property,
+ uint64_t value)
+{
+ if (property->flags & DRM_MODE_PROP_IMMUTABLE)
+ return false;
+ if (property->flags & DRM_MODE_PROP_RANGE) {
+ if (value < property->values[0] || value > property->values[1])
+ return false;
+ return true;
+ } else if (property->flags & DRM_MODE_PROP_BITMASK) {
+ int i;
+ uint64_t valid_mask = 0;
+ for (i = 0; i < property->num_values; i++)
+ valid_mask |= (1ULL << property->values[i]);
+ return !(value & ~valid_mask);
+ } else if (property->flags & DRM_MODE_PROP_BLOB) {
+ /* Only the driver knows */
+ return true;
+ } else {
+ int i;
+ for (i = 0; i < property->num_values; i++)
+ if (property->values[i] == value)
+ return true;
+ return false;
+ }
+}
+
+int
+drm_mode_connector_property_set_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_connector_set_property *conn_set_prop = data;
+ struct drm_mode_obj_set_property obj_set_prop = {
+ .value = conn_set_prop->value,
+ .prop_id = conn_set_prop->prop_id,
+ .obj_id = conn_set_prop->connector_id,
+ .obj_type = DRM_MODE_OBJECT_CONNECTOR
+ };
+
+ /* It does all the locking and checking we need */
+ return drm_mode_obj_set_property_ioctl(dev, &obj_set_prop, file_priv);
+}
+
+int
+drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t value)
+{
+ int ret = EINVAL;
+ struct drm_connector *connector = obj_to_connector(obj);
+
+ /* Do DPMS ourselves */
+ if (property == connector->dev->mode_config.dpms_property) {
+ if (connector->funcs->dpms)
+ (*connector->funcs->dpms)(connector, (int)value);
+ ret = 0;
+ } else if (connector->funcs->set_property)
+ ret = connector->funcs->set_property(connector, property, value);
+
+ /* store the property value if successful */
+ if (!ret)
+ drm_object_property_set_value(&connector->base, property, value);
+ return ret;
+}
+
+int
+drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t value)
+{
+ int ret = EINVAL;
+ struct drm_crtc *crtc = obj_to_crtc(obj);
+
+ if (crtc->funcs->set_property)
+ ret = crtc->funcs->set_property(crtc, property, value);
+ if (!ret)
+ drm_object_property_set_value(obj, property, value);
+
+ return ret;
+}
+
+int
+drm_mode_plane_set_obj_prop(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t value)
+{
+ int ret = EINVAL;
+ struct drm_plane *plane = obj_to_plane(obj);
+
+ if (plane->funcs->set_property)
+ ret = plane->funcs->set_property(plane, property, value);
+ if (!ret)
+ drm_object_property_set_value(obj, property, value);
+
+ return ret;
+}
+
+int
+drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_obj_get_properties *arg = data;
+ struct drm_mode_object *obj;
+ int ret = 0;
+ int i;
+ int copied = 0;
+ int props_count = 0;
+ uint32_t __user *props_ptr;
+ uint64_t __user *prop_values_ptr;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return EINVAL;
+
+ rw_enter_write(&dev->mode_config.rwl);
+
+ obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
+ if (!obj) {
+ ret = EINVAL;
+ goto out;
+ }
+ if (!obj->properties) {
+ ret = EINVAL;
+ goto out;
+ }
+
+ props_count = obj->properties->count;
+
+ /* This ioctl is called twice, once to determine how much space is
+ * needed, and the 2nd time to fill it. */
+ if ((arg->count_props >= props_count) && props_count) {
+ copied = 0;
+ props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
+ prop_values_ptr = (uint64_t __user *)(unsigned long)
+ (arg->prop_values_ptr);
+ for (i = 0; i < props_count; i++) {
+ if (copyout(&obj->properties->ids[i],
+ props_ptr + copied,
+ sizeof(obj->properties->ids[i])) != 0) {
+ ret = EFAULT;
+ goto out;
+ }
+ if (copyout(&obj->properties->values[i],
+ prop_values_ptr + copied,
+ sizeof(obj->properties->values[i])) != 0) {
+ ret = EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ arg->count_props = props_count;
+out:
+ rw_exit_write(&dev->mode_config.rwl);
+ return ret;
+}
+
+int
+drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_obj_set_property *arg = data;
+ struct drm_mode_object *arg_obj;
+ struct drm_mode_object *prop_obj;
+ struct drm_property *property;
+ int ret = EINVAL;
+ int i;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return EINVAL;
+
+ rw_enter_write(&dev->mode_config.rwl);
+
+ arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
+ if (!arg_obj)
+ goto out;
+ if (!arg_obj->properties)
+ goto out;
+
+ for (i = 0; i < arg_obj->properties->count; i++)
+ if (arg_obj->properties->ids[i] == arg->prop_id)
+ break;
+
+ if (i == arg_obj->properties->count)
+ goto out;
+
+ prop_obj = drm_mode_object_find(dev, arg->prop_id,
+ DRM_MODE_OBJECT_PROPERTY);
+ if (!prop_obj)
+ goto out;
+ property = obj_to_property(prop_obj);
+
+ if (!drm_property_change_is_valid(property, arg->value))
+ goto out;
+
+ switch (arg_obj->type) {
+ case DRM_MODE_OBJECT_CONNECTOR:
+ ret = drm_mode_connector_set_obj_prop(arg_obj, property,
+ arg->value);
+ break;
+ case DRM_MODE_OBJECT_CRTC:
+ ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
+ break;
+ case DRM_MODE_OBJECT_PLANE:
+ ret = drm_mode_plane_set_obj_prop(arg_obj, property, arg->value);
+ break;
+ }
+
+out:
+ rw_exit_write(&dev->mode_config.rwl);
+ return ret;
+}
+
+int
+drm_mode_connector_attach_encoder(struct drm_connector *connector,
+ struct drm_encoder *encoder)
+{
+ int i;
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == 0) {
+ connector->encoder_ids[i] = encoder->base.id;
+ return 0;
+ }
+ }
+ return ENOMEM;
+}
+EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
+
+void
+drm_mode_connector_detach_encoder(struct drm_connector *connector,
+ struct drm_encoder *encoder)
+{
+ int i;
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == encoder->base.id) {
+ connector->encoder_ids[i] = 0;
+ if (connector->encoder == encoder)
+ connector->encoder = NULL;
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL(drm_mode_connector_detach_encoder);
+
+int
+drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
+ int gamma_size)
+{
+ crtc->gamma_size = gamma_size;
+
+ crtc->gamma_store = malloc(gamma_size * sizeof(uint16_t) * 3,
+ M_DRM, M_WAITOK|M_ZERO);
+ if (!crtc->gamma_store) {
+ crtc->gamma_size = 0;
+ return ENOMEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
+
+int
+drm_mode_gamma_set_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_crtc_lut *crtc_lut = data;
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ void *r_base, *g_base, *b_base;
+ int size;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return EINVAL;
+
+ rw_enter_write(&dev->mode_config.rwl);
+ obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ ret = EINVAL;
+ goto out;
+ }
+ crtc = obj_to_crtc(obj);
+
+ if (crtc->funcs->gamma_set == NULL) {
+ ret = ENOSYS;
+ goto out;
+ }
+
+ /* memcpy into gamma store */
+ if (crtc_lut->gamma_size != crtc->gamma_size) {
+ ret = EINVAL;
+ goto out;
+ }
+
+ size = crtc_lut->gamma_size * (sizeof(uint16_t));
+ r_base = crtc->gamma_store;
+ if (copyin((void __user *)(unsigned long)crtc_lut->red,
+ r_base, size) != 0) {
+ ret = EFAULT;
+ goto out;
+ }
+
+ g_base = r_base + size;
+ if (copyin((void __user *)(unsigned long)crtc_lut->green,
+ g_base, size) != 0) {
+ ret = EFAULT;
+ goto out;
+ }
+
+ b_base = g_base + size;
+ if (copyin((void __user *)(unsigned long)crtc_lut->blue,
+ b_base, size) != 0) {
+ ret = EFAULT;
+ goto out;
+ }
+
+ crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
+
+out:
+ rw_exit_write(&dev->mode_config.rwl);
+ return ret;
+
+}
+
+int
+drm_mode_gamma_get_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_crtc_lut *crtc_lut = data;
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ void *r_base, *g_base, *b_base;
+ int size;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return EINVAL;
+
+ rw_enter_write(&dev->mode_config.rwl);
+ obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ ret = EINVAL;
+ goto out;
+ }
+ crtc = obj_to_crtc(obj);
+
+ /* memcpy into gamma store */
+ if (crtc_lut->gamma_size != crtc->gamma_size) {
+ ret = EINVAL;
+ goto out;
+ }
+
+ size = crtc_lut->gamma_size * (sizeof(uint16_t));
+ r_base = crtc->gamma_store;
+ if (copyout(r_base,
+ (void __user *)(unsigned long)crtc_lut->red, size) != 0) {
+ ret = EFAULT;
+ goto out;
+ }
+
+ g_base = r_base + size;
+ if (copyout(g_base,
+ (void __user *)(unsigned long)crtc_lut->green, size) != 0) {
+ ret = EFAULT;
+ goto out;
+ }
+
+ b_base = g_base + size;
+ if (copyout(b_base,
+ (void __user *)(unsigned long)crtc_lut->blue, size) != 0) {
+ ret = EFAULT;
+ goto out;
+ }
+out:
+ rw_exit_write(&dev->mode_config.rwl);
+ return ret;
+}
+
+int
+drm_mode_page_flip_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_crtc_page_flip *page_flip = data;
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+ struct drm_pending_vblank_event *e = NULL;
+ int hdisplay, vdisplay;
+ int ret = EINVAL;
+
+ if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
+ page_flip->reserved != 0)
+ return EINVAL;
+
+ rw_enter_write(&dev->mode_config.rwl);
+ obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
+ if (!obj)
+ goto out;
+ crtc = obj_to_crtc(obj);
+
+ if (crtc->fb == NULL) {
+ /* The framebuffer is currently unbound, presumably
+ * due to a hotplug event, that userspace has not
+ * yet discovered.
+ */
+ ret = EBUSY;
+ goto out;
+ }
+
+ if (crtc->funcs->page_flip == NULL)
+ goto out;
+
+ obj = drm_mode_object_find(dev, page_flip->fb_id, DRM_MODE_OBJECT_FB);
+ if (!obj)
+ goto out;
+ fb = obj_to_fb(obj);
+
+ hdisplay = crtc->mode.hdisplay;
+ vdisplay = crtc->mode.vdisplay;
+
+ if (crtc->invert_dimensions) {
+ int tmp = hdisplay;
+ hdisplay = vdisplay;
+ vdisplay = tmp;
+ }
+
+ if (hdisplay > fb->width ||
+ vdisplay > fb->height ||
+ crtc->x > fb->width - hdisplay ||
+ crtc->y > fb->height - vdisplay) {
+ DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
+ fb->width, fb->height, hdisplay, vdisplay, crtc->x, crtc->y,
+ crtc->invert_dimensions ? " (inverted)" : "");
+ ret = ENOSPC;
+ goto out;
+ }
+
+ if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+ ret = ENOMEM;
+ mtx_enter(&dev->event_lock);
+ if (file_priv->event_space < sizeof e->event) {
+ mtx_leave(&dev->event_lock);
+ goto out;
+ }
+ file_priv->event_space -= sizeof e->event;
+ mtx_leave(&dev->event_lock);
+
+ e = malloc(sizeof *e, M_DRM, M_WAITOK|M_ZERO);
+ if (e == NULL) {
+ mtx_enter(&dev->event_lock);
+ file_priv->event_space += sizeof e->event;
+ mtx_leave(&dev->event_lock);
+ goto out;
+ }
+
+ e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
+ e->event.base.length = sizeof e->event;
+ e->event.user_data = page_flip->user_data;
+ e->base.event = &e->event.base;
+ e->base.file_priv = file_priv;
+ e->base.destroy =
+ (void (*) (struct drm_pending_event *)) drm_free;
+ }
+
+ ret = crtc->funcs->page_flip(crtc, fb, e);
+ if (ret) {
+ if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+ mtx_enter(&dev->event_lock);
+ file_priv->event_space += sizeof e->event;
+ mtx_leave(&dev->event_lock);
+ free(e, M_DRM);
+ }
+ }
+
+out:
+ rw_exit_write(&dev->mode_config.rwl);
+ return ret;
+}
+
+void
+drm_mode_config_reset(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ if (crtc->funcs->reset)
+ crtc->funcs->reset(crtc);
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ if (encoder->funcs->reset)
+ encoder->funcs->reset(encoder);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ connector->status = connector_status_unknown;
+
+ if (connector->funcs->reset)
+ connector->funcs->reset(connector);
+ }
+}
+EXPORT_SYMBOL(drm_mode_config_reset);
+
+int
+drm_mode_create_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_create_dumb *args = data;
+
+ if (!dev->driver->dumb_create)
+ return ENOSYS;
+ return dev->driver->dumb_create(file_priv, dev, args);
+}
+
+int
+drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_map_dumb *args = data;
+
+ /* call driver ioctl to get mmap offset */
+ if (!dev->driver->dumb_map_offset)
+ return ENOSYS;
+
+ return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset);
+}
+
+int
+drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_destroy_dumb *args = data;
+
+ if (!dev->driver->dumb_destroy)
+ return ENOSYS;
+
+ return dev->driver->dumb_destroy(file_priv, dev, args->handle);
+}
+
+/*
+ * Just need to support RGB formats here for compat with code that doesn't
+ * use pixel formats directly yet.
+ */
+void
+drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
+ int *bpp)
+{
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_BGR233:
+ *depth = 8;
+ *bpp = 8;
+ break;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_XBGR1555:
+ case DRM_FORMAT_RGBX5551:
+ case DRM_FORMAT_BGRX5551:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_ABGR1555:
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_BGRA5551:
+ *depth = 15;
+ *bpp = 16;
+ break;
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ *depth = 16;
+ *bpp = 16;
+ break;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ *depth = 24;
+ *bpp = 24;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ *depth = 24;
+ *bpp = 32;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_RGBX1010102:
+ case DRM_FORMAT_BGRX1010102:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_BGRA1010102:
+ *depth = 30;
+ *bpp = 32;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ *depth = 32;
+ *bpp = 32;
+ break;
+ default:
+ DRM_DEBUG_KMS("unsupported pixel format\n");
+ *depth = 0;
+ *bpp = 0;
+ break;
+ }
+}
+EXPORT_SYMBOL(drm_fb_get_bpp_depth);
+
+/**
+ * drm_format_num_planes - get the number of planes for format
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * RETURNS:
+ * The number of planes used by the specified pixel format.
+ */
+int
+drm_format_num_planes(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return 3;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV24:
+ case DRM_FORMAT_NV42:
+ return 2;
+ default:
+ return 1;
+ }
+}
+EXPORT_SYMBOL(drm_format_num_planes);
+
+/**
+ * drm_format_plane_cpp - determine the bytes per pixel value
+ * @format: pixel format (DRM_FORMAT_*)
+ * @plane: plane index
+ *
+ * RETURNS:
+ * The bytes per pixel value for the specified plane.
+ */
+int
+drm_format_plane_cpp(uint32_t format, int plane)
+{
+ unsigned int depth;
+ int bpp;
+
+ if (plane >= drm_format_num_planes(format))
+ return 0;
+
+ switch (format) {
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ return 2;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV24:
+ case DRM_FORMAT_NV42:
+ return plane ? 2 : 1;
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return 1;
+ default:
+ drm_fb_get_bpp_depth(format, &depth, &bpp);
+ return bpp >> 3;
+ }
+}
+EXPORT_SYMBOL(drm_format_plane_cpp);
+
+/**
+ * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * RETURNS:
+ * The horizontal chroma subsampling factor for the
+ * specified pixel format.
+ */
+int
+drm_format_horz_chroma_subsampling(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ return 4;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ return 2;
+ default:
+ return 1;
+ }
+}
+EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
+
+/**
+ * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * RETURNS:
+ * The vertical chroma subsampling factor for the
+ * specified pixel format.
+ */
+int
+drm_format_vert_chroma_subsampling(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ return 4;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ return 2;
+ default:
+ return 1;
+ }
+}
+EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
+
+int
+drm_mode_handle_cmp(struct drm_mode_handle *a, struct drm_mode_handle *b)
+{
+ return a->handle < b->handle ? -1 : a->handle > b->handle;
+}
+
+SPLAY_GENERATE(drm_mode_tree, drm_mode_handle, entry, drm_mode_handle_cmp);
diff --git a/sys/dev/pci/drm/drm_crtc.h b/sys/dev/pci/drm/drm_crtc.h
new file mode 100644
index 00000000000..89ca9447162
--- /dev/null
+++ b/sys/dev/pci/drm/drm_crtc.h
@@ -0,0 +1,1080 @@
+/* $OpenBSD: drm_crtc.h,v 1.1 2013/03/18 12:36:51 jsg Exp $ */
+/*
+ * Copyright © 2006 Keith Packard
+ * Copyright © 2007-2008 Dave Airlie
+ * Copyright © 2007-2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __DRM_CRTC_H__
+#define __DRM_CRTC_H__
+
+#include <sys/types.h>
+#include <dev/i2c/i2cvar.h>
+#include <sys/workq.h>
+#include "drm_mode.h"
+
+#include "drm_fourcc.h"
+
+struct drm_device;
+struct drm_mode_set;
+struct drm_framebuffer;
+struct drm_object_properties;
+
+
+#define DRM_MODE_OBJECT_CRTC 0xcccccccc
+#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
+#define DRM_MODE_OBJECT_ENCODER 0xe0e0e0e0
+#define DRM_MODE_OBJECT_MODE 0xdededede
+#define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0
+#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
+#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
+#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
+
+struct drm_mode_object {
+ uint32_t id;
+ uint32_t type;
+ struct drm_object_properties *properties;
+};
+
+#define DRM_OBJECT_MAX_PROPERTY 24
+struct drm_object_properties {
+ int count;
+ uint32_t ids[DRM_OBJECT_MAX_PROPERTY];
+ uint64_t values[DRM_OBJECT_MAX_PROPERTY];
+};
+
+/*
+ * Note on terminology: here, for brevity and convenience, we refer to connector
+ * control chips as 'CRTCs'. They can control any type of connector, VGA, LVDS,
+ * DVI, etc. And 'screen' refers to the whole of the visible display, which
+ * may span multiple monitors (and therefore multiple CRTC and connector
+ * structures).
+ */
+
+enum drm_mode_status {
+ MODE_OK = 0, /* Mode OK */
+ MODE_HSYNC, /* hsync out of range */
+ MODE_VSYNC, /* vsync out of range */
+ MODE_H_ILLEGAL, /* mode has illegal horizontal timings */
+ MODE_V_ILLEGAL, /* mode has illegal horizontal timings */
+ MODE_BAD_WIDTH, /* requires an unsupported linepitch */
+ MODE_NOMODE, /* no mode with a matching name */
+ MODE_NO_INTERLACE, /* interlaced mode not supported */
+ MODE_NO_DBLESCAN, /* doublescan mode not supported */
+ MODE_NO_VSCAN, /* multiscan mode not supported */
+ MODE_MEM, /* insufficient video memory */
+ MODE_VIRTUAL_X, /* mode width too large for specified virtual size */
+ MODE_VIRTUAL_Y, /* mode height too large for specified virtual size */
+ MODE_MEM_VIRT, /* insufficient video memory given virtual size */
+ MODE_NOCLOCK, /* no fixed clock available */
+ MODE_CLOCK_HIGH, /* clock required is too high */
+ MODE_CLOCK_LOW, /* clock required is too low */
+ MODE_CLOCK_RANGE, /* clock/mode isn't in a ClockRange */
+ MODE_BAD_HVALUE, /* horizontal timing was out of range */
+ MODE_BAD_VVALUE, /* vertical timing was out of range */
+ MODE_BAD_VSCAN, /* VScan value out of range */
+ MODE_HSYNC_NARROW, /* horizontal sync too narrow */
+ MODE_HSYNC_WIDE, /* horizontal sync too wide */
+ MODE_HBLANK_NARROW, /* horizontal blanking too narrow */
+ MODE_HBLANK_WIDE, /* horizontal blanking too wide */
+ MODE_VSYNC_NARROW, /* vertical sync too narrow */
+ MODE_VSYNC_WIDE, /* vertical sync too wide */
+ MODE_VBLANK_NARROW, /* vertical blanking too narrow */
+ MODE_VBLANK_WIDE, /* vertical blanking too wide */
+ MODE_PANEL, /* exceeds panel dimensions */
+ MODE_INTERLACE_WIDTH, /* width too large for interlaced mode */
+ MODE_ONE_WIDTH, /* only one width is supported */
+ MODE_ONE_HEIGHT, /* only one height is supported */
+ MODE_ONE_SIZE, /* only one resolution is supported */
+ MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */
+ MODE_UNVERIFIED = -3, /* mode needs to reverified */
+ MODE_BAD = -2, /* unspecified reason */
+ MODE_ERROR = -1 /* error condition */
+};
+
+#define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \
+ DRM_MODE_TYPE_CRTC_C)
+
+#define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \
+ .name = nm, .status = 0, .type = (t), .clock = (c), \
+ .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \
+ .htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \
+ .vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \
+ .vscan = (vs), .flags = (f), .vrefresh = 0, \
+ .base.type = DRM_MODE_OBJECT_MODE
+
+#define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */
+
+struct drm_display_mode {
+ /* Header */
+ struct list_head head;
+ struct drm_mode_object base;
+
+ char name[DRM_DISPLAY_MODE_LEN];
+
+ enum drm_mode_status status;
+ unsigned int type;
+
+ /* Proposed mode values */
+ int clock; /* in kHz */
+ int hdisplay;
+ int hsync_start;
+ int hsync_end;
+ int htotal;
+ int hskew;
+ int vdisplay;
+ int vsync_start;
+ int vsync_end;
+ int vtotal;
+ int vscan;
+ unsigned int flags;
+
+ /* Addressable image size (may be 0 for projectors, etc.) */
+ int width_mm;
+ int height_mm;
+
+ /* Actual mode we give to hw */
+ int clock_index;
+ int synth_clock;
+ int crtc_hdisplay;
+ int crtc_hblank_start;
+ int crtc_hblank_end;
+ int crtc_hsync_start;
+ int crtc_hsync_end;
+ int crtc_htotal;
+ int crtc_hskew;
+ int crtc_vdisplay;
+ int crtc_vblank_start;
+ int crtc_vblank_end;
+ int crtc_vsync_start;
+ int crtc_vsync_end;
+ int crtc_vtotal;
+
+ /* Driver private mode info */
+ int private_size;
+ int *private;
+ int private_flags;
+
+ int vrefresh; /* in Hz */
+ int hsync; /* in kHz */
+ /* Link for sorting */
+ RB_ENTRY(drm_display_mode) sort;
+};
+
+enum drm_connector_status {
+ connector_status_connected = 1,
+ connector_status_disconnected = 2,
+ connector_status_unknown = 3,
+};
+
+enum subpixel_order {
+ SubPixelUnknown = 0,
+ SubPixelHorizontalRGB,
+ SubPixelHorizontalBGR,
+ SubPixelVerticalRGB,
+ SubPixelVerticalBGR,
+ SubPixelNone,
+};
+
+#define DRM_COLOR_FORMAT_RGB444 (1<<0)
+#define DRM_COLOR_FORMAT_YCRCB444 (1<<1)
+#define DRM_COLOR_FORMAT_YCRCB422 (1<<2)
+/*
+ * Describes a given display (e.g. CRT or flat panel) and its limitations.
+ */
+struct drm_display_info {
+ char name[DRM_DISPLAY_INFO_LEN];
+
+ /* Physical size */
+ unsigned int width_mm;
+ unsigned int height_mm;
+
+ /* Clock limits FIXME: storage format */
+ unsigned int min_vfreq, max_vfreq;
+ unsigned int min_hfreq, max_hfreq;
+ unsigned int pixel_clock;
+ unsigned int bpc;
+
+ enum subpixel_order subpixel_order;
+ u32 color_formats;
+
+ u8 cea_rev;
+};
+
+struct drm_framebuffer_funcs {
+ /* note: use drm_framebuffer_remove() */
+ void (*destroy)(struct drm_framebuffer *framebuffer);
+ int (*create_handle)(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle);
+ /**
+ * Optinal callback for the dirty fb ioctl.
+ *
+ * Userspace can notify the driver via this callback
+ * that a area of the framebuffer has changed and should
+ * be flushed to the display hardware.
+ *
+ * See documentation in drm_mode.h for the struct
+ * drm_mode_fb_dirty_cmd for more information as all
+ * the semantics and arguments have a one to one mapping
+ * on this function.
+ */
+ int (*dirty)(struct drm_framebuffer *framebuffer,
+ struct drm_file *file_priv, unsigned flags,
+ unsigned color, struct drm_clip_rect *clips,
+ unsigned num_clips);
+};
+
+struct drm_framebuffer {
+ struct drm_device *dev;
+ /*
+ * Note that the fb is refcounted for the benefit of driver internals,
+ * for example some hw, disabling a CRTC/plane is asynchronous, and
+ * scanout does not actually complete until the next vblank. So some
+ * cleanup (like releasing the reference(s) on the backing GEM bo(s))
+ * should be deferred. In cases like this, the driver would like to
+ * hold a ref to the fb even though it has already been removed from
+ * userspace perspective.
+ */
+ int refcount;
+ struct list_head head;
+ struct drm_mode_object base;
+ const struct drm_framebuffer_funcs *funcs;
+ unsigned int pitches[4];
+ unsigned int offsets[4];
+ unsigned int width;
+ unsigned int height;
+ /* depth can be 15 or 16 */
+ unsigned int depth;
+ int bits_per_pixel;
+ int flags;
+ uint32_t pixel_format; /* fourcc format */
+ struct list_head filp_head;
+ /* if you are using the helper */
+ void *helper_private;
+};
+
+struct drm_property_blob {
+ struct drm_mode_object base;
+ struct list_head head;
+ unsigned int length;
+ unsigned char data[];
+};
+
+struct drm_property_enum {
+ uint64_t value;
+ struct list_head head;
+ char name[DRM_PROP_NAME_LEN];
+};
+
+struct drm_property {
+ struct list_head head;
+ struct drm_mode_object base;
+ uint32_t flags;
+ char name[DRM_PROP_NAME_LEN];
+ uint32_t num_values;
+ uint64_t *values;
+
+ struct list_head enum_blob_list;
+};
+
+struct drm_crtc;
+struct drm_connector;
+struct drm_encoder;
+struct drm_pending_vblank_event;
+struct drm_plane;
+
+/**
+ * drm_crtc_funcs - control CRTCs for a given device
+ * @save: save CRTC state
+ * @restore: restore CRTC state
+ * @reset: reset CRTC after state has been invalidate (e.g. resume)
+ * @cursor_set: setup the cursor
+ * @cursor_move: move the cursor
+ * @gamma_set: specify color ramp for CRTC
+ * @destroy: deinit and free object
+ * @set_property: called when a property is changed
+ * @set_config: apply a new CRTC configuration
+ * @page_flip: initiate a page flip
+ *
+ * The drm_crtc_funcs structure is the central CRTC management structure
+ * in the DRM. Each CRTC controls one or more connectors (note that the name
+ * CRTC is simply historical, a CRTC may control LVDS, VGA, DVI, TV out, etc.
+ * connectors, not just CRTs).
+ *
+ * Each driver is responsible for filling out this structure at startup time,
+ * in addition to providing other modesetting features, like i2c and DDC
+ * bus accessors.
+ */
+struct drm_crtc_funcs {
+ /* Save CRTC state */
+ void (*save)(struct drm_crtc *crtc); /* suspend? */
+ /* Restore CRTC state */
+ void (*restore)(struct drm_crtc *crtc); /* resume? */
+ /* Reset CRTC state */
+ void (*reset)(struct drm_crtc *crtc);
+
+ /* cursor controls */
+ int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv,
+ uint32_t handle, uint32_t width, uint32_t height);
+ int (*cursor_move)(struct drm_crtc *crtc, int x, int y);
+
+ /* Set gamma on the CRTC */
+ void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+ uint32_t start, uint32_t size);
+ /* Object destroy routine */
+ void (*destroy)(struct drm_crtc *crtc);
+
+ int (*set_config)(struct drm_mode_set *set);
+
+ /*
+ * Flip to the given framebuffer. This implements the page
+ * flip ioctl described in drm_mode.h, specifically, the
+ * implementation must return immediately and block all
+ * rendering to the current fb until the flip has completed.
+ * If userspace set the event flag in the ioctl, the event
+ * argument will point to an event to send back when the flip
+ * completes, otherwise it will be NULL.
+ */
+ int (*page_flip)(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event);
+
+ int (*set_property)(struct drm_crtc *crtc,
+ struct drm_property *property, uint64_t val);
+};
+
+/**
+ * drm_crtc - central CRTC control structure
+ * @dev: parent DRM device
+ * @head: list management
+ * @base: base KMS object for ID tracking etc.
+ * @enabled: is this CRTC enabled?
+ * @mode: current mode timings
+ * @hwmode: mode timings as programmed to hw regs
+ * @invert_dimensions: for purposes of error checking crtc vs fb sizes,
+ * invert the width/height of the crtc. This is used if the driver
+ * is performing 90 or 270 degree rotated scanout
+ * @x: x position on screen
+ * @y: y position on screen
+ * @funcs: CRTC control functions
+ * @gamma_size: size of gamma ramp
+ * @gamma_store: gamma ramp values
+ * @framedur_ns: precise frame timing
+ * @framedur_ns: precise line timing
+ * @pixeldur_ns: precise pixel timing
+ * @helper_private: mid-layer private data
+ * @properties: property tracking for this CRTC
+ *
+ * Each CRTC may have one or more connectors associated with it. This structure
+ * allows the CRTC to be controlled.
+ */
+struct drm_crtc {
+ struct drm_device *dev;
+ struct list_head head;
+
+ struct drm_mode_object base;
+
+ /* framebuffer the connector is currently bound to */
+ struct drm_framebuffer *fb;
+
+ bool enabled;
+
+ /* Requested mode from modesetting. */
+ struct drm_display_mode mode;
+
+ /* Programmed mode in hw, after adjustments for encoders,
+ * crtc, panel scaling etc. Needed for timestamping etc.
+ */
+ struct drm_display_mode hwmode;
+
+ bool invert_dimensions;
+
+ int x, y;
+ const struct drm_crtc_funcs *funcs;
+
+ /* CRTC gamma size for reporting to userspace */
+ uint32_t gamma_size;
+ uint16_t *gamma_store;
+
+ /* Constants needed for precise vblank and swap timestamping. */
+ s64 framedur_ns, linedur_ns, pixeldur_ns;
+
+ /* if you are using the helper */
+ void *helper_private;
+
+ struct drm_object_properties properties;
+};
+
+
+/**
+ * drm_connector_funcs - control connectors on a given device
+ * @dpms: set power state (see drm_crtc_funcs above)
+ * @save: save connector state
+ * @restore: restore connector state
+ * @reset: reset connector after state has been invalidate (e.g. resume)
+ * @detect: is this connector active?
+ * @fill_modes: fill mode list for this connector
+ * @set_property: property for this connector may need update
+ * @destroy: make object go away
+ * @force: notify the driver the connector is forced on
+ *
+ * Each CRTC may have one or more connectors attached to it. The functions
+ * below allow the core DRM code to control connectors, enumerate available modes,
+ * etc.
+ */
+struct drm_connector_funcs {
+ void (*dpms)(struct drm_connector *connector, int mode);
+ void (*save)(struct drm_connector *connector);
+ void (*restore)(struct drm_connector *connector);
+ void (*reset)(struct drm_connector *connector);
+
+ /* Check to see if anything is attached to the connector.
+ * @force is set to false whilst polling, true when checking the
+ * connector due to user request. @force can be used by the driver
+ * to avoid expensive, destructive operations during automated
+ * probing.
+ */
+ enum drm_connector_status (*detect)(struct drm_connector *connector,
+ bool force);
+ int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height);
+ int (*set_property)(struct drm_connector *connector, struct drm_property *property,
+ uint64_t val);
+ void (*destroy)(struct drm_connector *connector);
+ void (*force)(struct drm_connector *connector);
+};
+
+/**
+ * drm_encoder_funcs - encoder controls
+ * @reset: reset state (e.g. at init or resume time)
+ * @destroy: cleanup and free associated data
+ *
+ * Encoders sit between CRTCs and connectors.
+ */
+struct drm_encoder_funcs {
+ void (*reset)(struct drm_encoder *encoder);
+ void (*destroy)(struct drm_encoder *encoder);
+};
+
+#define DRM_CONNECTOR_MAX_UMODES 16
+#define DRM_CONNECTOR_LEN 32
+#define DRM_CONNECTOR_MAX_ENCODER 3
+
+/**
+ * drm_encoder - central DRM encoder structure
+ * @dev: parent DRM device
+ * @head: list management
+ * @base: base KMS object
+ * @encoder_type: one of the %DRM_MODE_ENCODER_<foo> types in drm_mode.h
+ * @possible_crtcs: bitmask of potential CRTC bindings
+ * @possible_clones: bitmask of potential sibling encoders for cloning
+ * @crtc: currently bound CRTC
+ * @funcs: control functions
+ * @helper_private: mid-layer private data
+ *
+ * CRTCs drive pixels to encoders, which convert them into signals
+ * appropriate for a given connector or set of connectors.
+ */
+struct drm_encoder {
+ struct drm_device *dev;
+ struct list_head head;
+
+ struct drm_mode_object base;
+ int encoder_type;
+ uint32_t possible_crtcs;
+ uint32_t possible_clones;
+
+ struct drm_crtc *crtc;
+ const struct drm_encoder_funcs *funcs;
+ void *helper_private;
+};
+
+enum drm_connector_force {
+ DRM_FORCE_UNSPECIFIED,
+ DRM_FORCE_OFF,
+ DRM_FORCE_ON, /* force on analog part normally */
+ DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */
+};
+
+/* should we poll this connector for connects and disconnects */
+/* hot plug detectable */
+#define DRM_CONNECTOR_POLL_HPD (1 << 0)
+/* poll for connections */
+#define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
+/* can cleanly poll for disconnections without flickering the screen */
+/* DACs should rarely do this without a lot of testing */
+#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
+
+#define MAX_ELD_BYTES 128
+
+/**
+ * drm_connector - central DRM connector control structure
+ * @dev: parent DRM device
+ * @kdev: kernel device for sysfs attributes
+ * @attr: sysfs attributes
+ * @head: list management
+ * @base: base KMS object
+ * @connector_type: one of the %DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
+ * @connector_type_id: index into connector type enum
+ * @interlace_allowed: can this connector handle interlaced modes?
+ * @doublescan_allowed: can this connector handle doublescan?
+ * @modes: modes available on this connector (from fill_modes() + user)
+ * @status: one of the drm_connector_status enums (connected, not, or unknown)
+ * @probed_modes: list of modes derived directly from the display
+ * @display_info: information about attached display (e.g. from EDID)
+ * @funcs: connector control functions
+ * @user_modes: user added mode list
+ * @edid_blob_ptr: DRM property containing EDID if present
+ * @properties: property tracking for this connector
+ * @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling
+ * @dpms: current dpms state
+ * @helper_private: mid-layer private data
+ * @force: a %DRM_FORCE_<foo> state for forced mode sets
+ * @encoder_ids: valid encoders for this connector
+ * @encoder: encoder driving this connector, if any
+ * @eld: EDID-like data, if present
+ * @dvi_dual: dual link DVI, if found
+ * @max_tmds_clock: max clock rate, if found
+ * @latency_present: AV delay info from ELD, if found
+ * @video_latency: video latency info from ELD, if found
+ * @audio_latency: audio latency info from ELD, if found
+ * @null_edid_counter: track sinks that give us all zeros for the EDID
+ *
+ * Each connector may be connected to one or more CRTCs, or may be clonable by
+ * another connector if they can share a CRTC. Each connector also has a specific
+ * position in the broader display (referred to as a 'screen' though it could
+ * span multiple monitors).
+ */
+struct drm_connector {
+ struct drm_device *dev;
+ struct device kdev;
+ struct device_attribute *attr;
+ struct list_head head;
+
+ struct drm_mode_object base;
+
+ int connector_type;
+ int connector_type_id;
+ bool interlace_allowed;
+ bool doublescan_allowed;
+ struct list_head modes; /* list of modes on this connector */
+
+ enum drm_connector_status status;
+
+ /* these are modes added by probing with DDC or the BIOS */
+ struct list_head probed_modes;
+
+ struct drm_display_info display_info;
+ const struct drm_connector_funcs *funcs;
+
+ struct list_head user_modes;
+ struct drm_property_blob *edid_blob_ptr;
+ struct drm_object_properties properties;
+
+ uint8_t polled; /* DRM_CONNECTOR_POLL_* */
+
+ /* requested DPMS state */
+ int dpms;
+
+ void *helper_private;
+
+ /* forced on connector */
+ enum drm_connector_force force;
+ uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
+ struct drm_encoder *encoder; /* currently active encoder */
+
+ /* EDID bits */
+ uint8_t eld[MAX_ELD_BYTES];
+ bool dvi_dual;
+ int max_tmds_clock; /* in MHz */
+ bool latency_present[2];
+ int video_latency[2]; /* [0]: progressive, [1]: interlaced */
+ int audio_latency[2];
+ int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
+ unsigned bad_edid_counter;
+};
+
+/**
+ * drm_plane_funcs - driver plane control functions
+ * @update_plane: update the plane configuration
+ * @disable_plane: shut down the plane
+ * @destroy: clean up plane resources
+ * @set_property: called when a property is changed
+ */
+struct drm_plane_funcs {
+ int (*update_plane)(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+ int (*disable_plane)(struct drm_plane *plane);
+ void (*destroy)(struct drm_plane *plane);
+
+ int (*set_property)(struct drm_plane *plane,
+ struct drm_property *property, uint64_t val);
+};
+
+/**
+ * drm_plane - central DRM plane control structure
+ * @dev: DRM device this plane belongs to
+ * @head: for list management
+ * @base: base mode object
+ * @possible_crtcs: pipes this plane can be bound to
+ * @format_types: array of formats supported by this plane
+ * @format_count: number of formats supported
+ * @crtc: currently bound CRTC
+ * @fb: currently bound fb
+ * @gamma_size: size of gamma table
+ * @gamma_store: gamma correction table
+ * @enabled: enabled flag
+ * @funcs: helper functions
+ * @helper_private: storage for drver layer
+ * @properties: property tracking for this plane
+ */
+struct drm_plane {
+ struct drm_device *dev;
+ struct list_head head;
+
+ struct drm_mode_object base;
+
+ uint32_t possible_crtcs;
+ uint32_t *format_types;
+ uint32_t format_count;
+
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+
+ /* CRTC gamma size for reporting to userspace */
+ uint32_t gamma_size;
+ uint16_t *gamma_store;
+
+ bool enabled;
+
+ const struct drm_plane_funcs *funcs;
+ void *helper_private;
+
+ struct drm_object_properties properties;
+};
+
+/**
+ * drm_mode_set - new values for a CRTC config change
+ * @head: list management
+ * @fb: framebuffer to use for new config
+ * @crtc: CRTC whose configuration we're about to change
+ * @mode: mode timings to use
+ * @x: position of this CRTC relative to @fb
+ * @y: position of this CRTC relative to @fb
+ * @connectors: array of connectors to drive with this CRTC if possible
+ * @num_connectors: size of @connectors array
+ *
+ * Represents a single crtc the connectors that it drives with what mode
+ * and from which framebuffer it scans out from.
+ *
+ * This is used to set modes.
+ */
+struct drm_mode_set {
+ struct drm_framebuffer *fb;
+ struct drm_crtc *crtc;
+ struct drm_display_mode *mode;
+
+ uint32_t x;
+ uint32_t y;
+
+ struct drm_connector **connectors;
+ size_t num_connectors;
+};
+
+/**
+ * struct drm_mode_config_funcs - basic driver provided mode setting functions
+ * @fb_create: create a new framebuffer object
+ * @output_poll_changed: function to handle output configuration changes
+ *
+ * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that
+ * involve drivers.
+ */
+struct drm_mode_config_funcs {
+ struct drm_framebuffer *(*fb_create)(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_mode_fb_cmd2 *mode_cmd);
+ void (*output_poll_changed)(struct drm_device *dev);
+};
+
+/**
+ * drm_mode_group - group of mode setting resources for potential sub-grouping
+ * @num_crtcs: CRTC count
+ * @num_encoders: encoder count
+ * @num_connectors: connector count
+ * @id_list: list of KMS object IDs in this group
+ *
+ * Currently this simply tracks the global mode setting state. But in the
+ * future it could allow groups of objects to be set aside into independent
+ * control groups for use by different user level processes (e.g. two X servers
+ * running simultaneously on different heads, each with their own mode
+ * configuration and freedom of mode setting).
+ */
+struct drm_mode_group {
+ uint32_t num_crtcs;
+ uint32_t num_encoders;
+ uint32_t num_connectors;
+
+ /* list of object IDs for this group */
+ uint32_t *id_list;
+};
+
+/**
+ * drm_mode_config - Mode configuration control structure
+ * @mutex: mutex protecting KMS related lists and structures
+ * @idr_mutex: mutex for KMS ID allocation and management
+ * @crtc_idr: main KMS ID tracking object
+ * @num_fb: number of fbs available
+ * @fb_list: list of framebuffers available
+ * @num_connector: number of connectors on this device
+ * @connector_list: list of connector objects
+ * @num_encoder: number of encoders on this device
+ * @encoder_list: list of encoder objects
+ * @num_crtc: number of CRTCs on this device
+ * @crtc_list: list of CRTC objects
+ * @min_width: minimum pixel width on this device
+ * @min_height: minimum pixel height on this device
+ * @max_width: maximum pixel width on this device
+ * @max_height: maximum pixel height on this device
+ * @funcs: core driver provided mode setting functions
+ * @fb_base: base address of the framebuffer
+ * @poll_enabled: track polling status for this device
+ * @output_poll_work: delayed work for polling in process context
+ * @*_property: core property tracking
+ *
+ * Core mode resource tracking structure. All CRTC, encoders, and connectors
+ * enumerated by the driver are added here, as are global properties. Some
+ * global restrictions are also here, e.g. dimension restrictions.
+ */
+struct drm_mode_config {
+ struct rwlock rwl; /* protects configuration (mode lists etc.) */
+ struct rwlock idr_rwl; /* for IDR management */
+ SPLAY_HEAD(drm_mode_tree, drm_mode_handle) mode_tree; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
+ uint32_t mode_obj_id;
+ /* this is limited to one for now */
+ int num_fb;
+ struct list_head fb_list;
+ int num_connector;
+ struct list_head connector_list;
+ int num_encoder;
+ struct list_head encoder_list;
+ int num_plane;
+ struct list_head plane_list;
+
+ int num_crtc;
+ struct list_head crtc_list;
+
+ struct list_head property_list;
+
+ int min_width, min_height;
+ int max_width, max_height;
+ const struct drm_mode_config_funcs *funcs;
+ paddr_t fb_base;
+
+ /* output poll support */
+ bool poll_enabled;
+ bool poll_running;
+ struct timeout output_poll_to;
+ struct workq_task poll_task;
+
+ /* pointers to standard properties */
+ struct list_head property_blob_list;
+ struct drm_property *edid_property;
+ struct drm_property *dpms_property;
+
+ /* DVI-I properties */
+ struct drm_property *dvi_i_subconnector_property;
+ struct drm_property *dvi_i_select_subconnector_property;
+
+ /* TV properties */
+ struct drm_property *tv_subconnector_property;
+ struct drm_property *tv_select_subconnector_property;
+ struct drm_property *tv_mode_property;
+ struct drm_property *tv_left_margin_property;
+ struct drm_property *tv_right_margin_property;
+ struct drm_property *tv_top_margin_property;
+ struct drm_property *tv_bottom_margin_property;
+ struct drm_property *tv_brightness_property;
+ struct drm_property *tv_contrast_property;
+ struct drm_property *tv_flicker_reduction_property;
+ struct drm_property *tv_overscan_property;
+ struct drm_property *tv_saturation_property;
+ struct drm_property *tv_hue_property;
+
+ /* Optional properties */
+ struct drm_property *scaling_mode_property;
+ struct drm_property *dithering_mode_property;
+ struct drm_property *dirty_info_property;
+
+ /* dumb ioctl parameters */
+ uint32_t preferred_depth, prefer_shadow;
+};
+
+#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
+#define obj_to_connector(x) container_of(x, struct drm_connector, base)
+#define obj_to_encoder(x) container_of(x, struct drm_encoder, base)
+#define obj_to_mode(x) container_of(x, struct drm_display_mode, base)
+#define obj_to_fb(x) container_of(x, struct drm_framebuffer, base)
+#define obj_to_property(x) container_of(x, struct drm_property, base)
+#define obj_to_blob(x) container_of(x, struct drm_property_blob, base)
+#define obj_to_plane(x) container_of(x, struct drm_plane, base)
+
+struct drm_prop_enum_list {
+ int type;
+ char *name;
+};
+
+extern int drm_crtc_init(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ const struct drm_crtc_funcs *funcs);
+extern void drm_crtc_cleanup(struct drm_crtc *crtc);
+
+extern int drm_connector_init(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type);
+
+extern void drm_connector_cleanup(struct drm_connector *connector);
+/* helper to unplug all connectors from sysfs for device */
+extern void drm_connector_unplug_all(struct drm_device *dev);
+
+extern int drm_encoder_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ const struct drm_encoder_funcs *funcs,
+ int encoder_type);
+
+extern int drm_plane_init(struct drm_device *dev,
+ struct drm_plane *plane,
+ unsigned long possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, uint32_t format_count,
+ bool priv);
+extern void drm_plane_cleanup(struct drm_plane *plane);
+
+extern void drm_encoder_cleanup(struct drm_encoder *encoder);
+
+extern char *drm_get_connector_name(struct drm_connector *connector);
+extern char *drm_get_dpms_name(int val);
+extern char *drm_get_dvi_i_subconnector_name(int val);
+extern char *drm_get_dvi_i_select_name(int val);
+extern char *drm_get_tv_subconnector_name(int val);
+extern char *drm_get_tv_select_name(int val);
+extern void drm_fb_release(struct drm_device *dev, struct drm_file *file_priv);
+extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
+extern bool drm_probe_ddc(struct i2c_controller *adapter);
+extern struct edid *drm_get_edid(struct drm_connector *connector,
+ struct i2c_controller *adapter);
+extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
+extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
+extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode);
+extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src);
+extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
+ const struct drm_display_mode *mode);
+extern void drm_mode_debug_printmodeline(const struct drm_display_mode *mode);
+extern void drm_mode_config_init(struct drm_device *dev);
+extern void drm_mode_config_reset(struct drm_device *dev);
+extern void drm_mode_config_cleanup(struct drm_device *dev);
+extern void drm_mode_set_name(struct drm_display_mode *mode);
+extern bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
+extern int drm_mode_width(const struct drm_display_mode *mode);
+extern int drm_mode_height(const struct drm_display_mode *mode);
+
+/* for us by fb module */
+extern int drm_mode_attachmode_crtc(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ const struct drm_display_mode *mode);
+extern int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode);
+
+extern struct drm_display_mode *drm_mode_create(struct drm_device *dev);
+extern void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode);
+extern void drm_mode_list_concat(struct list_head *head,
+ struct list_head *new);
+extern void drm_mode_validate_size(struct drm_device *dev,
+ struct list_head *mode_list,
+ int maxX, int maxY, int maxPitch);
+extern void drm_mode_prune_invalid(struct drm_device *dev,
+ struct list_head *mode_list, bool verbose);
+extern void drm_mode_sort(struct list_head *mode_list);
+extern int drm_mode_hsync(const struct drm_display_mode *mode);
+extern int drm_mode_vrefresh(const struct drm_display_mode *mode);
+extern void drm_mode_set_crtcinfo(struct drm_display_mode *p,
+ int adjust_flags);
+extern void drm_mode_connector_list_update(struct drm_connector *connector);
+extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
+ struct edid *edid);
+extern int drm_object_property_set_value(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t val);
+extern int drm_object_property_get_value(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t *value);
+extern struct drm_display_mode *drm_crtc_mode_create(struct drm_device *dev);
+extern void drm_framebuffer_set_object(struct drm_device *dev,
+ unsigned long handle);
+extern int drm_framebuffer_init(struct drm_device *dev,
+ struct drm_framebuffer *fb,
+ const struct drm_framebuffer_funcs *funcs);
+extern void drm_framebuffer_unreference(struct drm_framebuffer *fb);
+extern void drm_framebuffer_reference(struct drm_framebuffer *fb);
+extern void drm_framebuffer_remove(struct drm_framebuffer *fb);
+extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
+extern int drmfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
+extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
+extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY);
+extern bool drm_crtc_in_use(struct drm_crtc *crtc);
+
+extern void drm_object_attach_property(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t init_val);
+extern struct drm_property *drm_property_create(struct drm_device *dev, int flags,
+ const char *name, int num_values);
+extern struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
+ const char *name,
+ const struct drm_prop_enum_list *props,
+ int num_values);
+struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
+ int flags, const char *name,
+ const struct drm_prop_enum_list *props,
+ int num_values);
+struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
+ const char *name,
+ uint64_t min, uint64_t max);
+extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
+extern int drm_property_add_enum(struct drm_property *property, int index,
+ uint64_t value, const char *name);
+extern int drm_mode_create_dvi_i_properties(struct drm_device *dev);
+extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats,
+ char *formats[]);
+extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
+extern int drm_mode_create_dithering_property(struct drm_device *dev);
+extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
+extern char *drm_get_encoder_name(struct drm_encoder *encoder);
+
+extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
+ struct drm_encoder *encoder);
+extern void drm_mode_connector_detach_encoder(struct drm_connector *connector,
+ struct drm_encoder *encoder);
+extern int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
+ int gamma_size);
+extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+ uint32_t id, uint32_t type);
+/* IOCTLs */
+extern int drm_mode_getresources(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_getplane_res(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_mode_getcrtc(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_getconnector(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_setcrtc(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_getplane(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_setplane(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_cursor_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_addfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_addfb2(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
+extern int drm_mode_rmfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_getfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_addmode_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_rmmode_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_attachmode_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_detachmode_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+
+extern int drm_mode_getproperty_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_getblob_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_hotplug_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_replacefb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_getencoder(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern u8 *drm_find_cea_extension(struct edid *edid);
+extern u8 drm_match_cea_mode(struct drm_display_mode *to_match);
+extern bool drm_detect_hdmi_monitor(struct edid *edid);
+extern bool drm_detect_monitor_audio(struct edid *edid);
+extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
+ int hdisplay, int vdisplay, int vrefresh,
+ bool reduced, bool interlaced, bool margins);
+extern struct drm_display_mode *drm_gtf_mode(struct drm_device *dev,
+ int hdisplay, int vdisplay, int vrefresh,
+ bool interlaced, int margins);
+extern struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev,
+ int hdisplay, int vdisplay, int vrefresh,
+ bool interlaced, int margins, int GTF_M,
+ int GTF_2C, int GTF_K, int GTF_2J);
+extern int drm_add_modes_noedid(struct drm_connector *connector,
+ int hdisplay, int vdisplay);
+extern uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode);
+
+extern int drm_edid_header_is_valid(const u8 *raw_edid);
+extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
+extern bool drm_edid_is_valid(struct edid *edid);
+struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
+ int hsize, int vsize, int fresh,
+ bool rb);
+
+extern int drm_mode_create_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
+ int *bpp);
+extern int drm_format_num_planes(uint32_t format);
+extern int drm_format_plane_cpp(uint32_t format, int plane);
+extern int drm_format_horz_chroma_subsampling(uint32_t format);
+extern int drm_format_vert_chroma_subsampling(uint32_t format);
+
+#endif /* __DRM_CRTC_H__ */
diff --git a/sys/dev/pci/drm/drm_crtc_helper.c b/sys/dev/pci/drm/drm_crtc_helper.c
new file mode 100644
index 00000000000..9ed6c417cb2
--- /dev/null
+++ b/sys/dev/pci/drm/drm_crtc_helper.c
@@ -0,0 +1,1137 @@
+/* $OpenBSD: drm_crtc_helper.c,v 1.1 2013/03/18 12:36:51 jsg Exp $ */
+/*
+ * Copyright (c) 2006-2008 Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ *
+ * DRM core CRTC related functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ * Keith Packard
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_fourcc.h"
+#include "drm_crtc_helper.h"
+#include "drm_fb_helper.h"
+#include "drm_edid.h"
+
+void drm_mode_validate_flag(struct drm_connector *, int);
+void drm_encoder_disable(struct drm_encoder *);
+void drm_calc_timestamping_constants(struct drm_crtc *);
+bool drm_encoder_crtc_ok(struct drm_encoder *, struct drm_crtc *);
+void drm_crtc_prepare_encoders(struct drm_device *);
+int drm_helper_choose_encoder_dpms(struct drm_encoder *);
+int drm_helper_choose_crtc_dpms(struct drm_crtc *);
+int drm_crtc_helper_disable(struct drm_crtc *);
+void drm_output_poll_execute(void *, void *);
+void drm_output_poll_tick(void *);
+
+/**
+ * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
+ * connector list
+ * @dev: drm device to operate on
+ *
+ * Some userspace presumes that the first connected connector is the main
+ * display, where it's supposed to display e.g. the login screen. For
+ * laptops, this should be the main panel. Use this function to sort all
+ * (eDP/LVDS) panels to the front of the connector list, instead of
+ * painstakingly trying to initialize them in the right order.
+ */
+void
+drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
+{
+ struct drm_connector *connector, *tmp;
+ struct list_head panel_list;
+
+ INIT_LIST_HEAD(&panel_list);
+
+ list_for_each_entry_safe(connector, tmp,
+ &dev->mode_config.connector_list, head) {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ list_move_tail(&connector->head, &panel_list);
+ }
+
+ list_splice(&panel_list, &dev->mode_config.connector_list);
+}
+EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
+
+static bool drm_kms_helper_poll = true;
+
+void
+drm_mode_validate_flag(struct drm_connector *connector,
+ int flags)
+{
+ struct drm_display_mode *mode;
+
+ if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE))
+ return;
+
+ list_for_each_entry(mode, &connector->modes, head) {
+ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
+ !(flags & DRM_MODE_FLAG_INTERLACE))
+ mode->status = MODE_NO_INTERLACE;
+ if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
+ !(flags & DRM_MODE_FLAG_DBLSCAN))
+ mode->status = MODE_NO_DBLESCAN;
+ }
+
+ return;
+}
+
+/**
+ * drm_helper_probe_single_connector_modes - get complete set of display modes
+ * @connector: connector to probe
+ * @maxX: max width for modes
+ * @maxY: max height for modes
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Based on the helper callbacks implemented by @connector try to detect all
+ * valid modes. Modes will first be added to the connector's probed_modes list,
+ * then culled (based on validity and the @maxX, @maxY parameters) and put into
+ * the normal modes list.
+ *
+ * Intended to be use as a generic implementation of the ->probe() @connector
+ * callback for drivers that use the crtc helpers for output mode filtering and
+ * detection.
+ *
+ * RETURNS:
+ * Number of modes found on @connector.
+ */
+int
+drm_helper_probe_single_connector_modes(struct drm_connector *connector,
+ uint32_t maxX, uint32_t maxY)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode;
+ struct drm_connector_helper_funcs *connector_funcs =
+ connector->helper_private;
+ int count = 0;
+ int mode_flags = 0;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
+ drm_get_connector_name(connector));
+ /* set all modes to the unverified state */
+ list_for_each_entry(mode, &connector->modes, head)
+ mode->status = MODE_UNVERIFIED;
+
+ if (connector->force) {
+ if (connector->force == DRM_FORCE_ON)
+ connector->status = connector_status_connected;
+ else
+ connector->status = connector_status_disconnected;
+ if (connector->funcs->force)
+ connector->funcs->force(connector);
+ } else {
+ connector->status = connector->funcs->detect(connector, true);
+ }
+
+ /* Re-enable polling in case the global poll config changed. */
+ if (drm_kms_helper_poll != dev->mode_config.poll_running)
+ drm_kms_helper_poll_enable(dev);
+
+ dev->mode_config.poll_running = drm_kms_helper_poll;
+
+ if (connector->status == connector_status_disconnected) {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
+ connector->base.id, drm_get_connector_name(connector));
+ drm_mode_connector_update_edid_property(connector, NULL);
+ goto prune;
+ }
+
+#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
+ count = drm_load_edid_firmware(connector);
+ if (count == 0)
+#endif
+ count = (*connector_funcs->get_modes)(connector);
+
+ if (count == 0 && connector->status == connector_status_connected)
+ count = drm_add_modes_noedid(connector, 1024, 768);
+ if (count == 0)
+ goto prune;
+
+ drm_mode_connector_list_update(connector);
+
+ if (maxX && maxY)
+ drm_mode_validate_size(dev, &connector->modes, maxX,
+ maxY, 0);
+
+ if (connector->interlace_allowed)
+ mode_flags |= DRM_MODE_FLAG_INTERLACE;
+ if (connector->doublescan_allowed)
+ mode_flags |= DRM_MODE_FLAG_DBLSCAN;
+ drm_mode_validate_flag(connector, mode_flags);
+
+ list_for_each_entry(mode, &connector->modes, head) {
+ if (mode->status == MODE_OK)
+ mode->status = connector_funcs->mode_valid(connector,
+ mode);
+ }
+
+prune:
+ drm_mode_prune_invalid(dev, &connector->modes, true);
+
+ if (list_empty(&connector->modes))
+ return 0;
+
+ drm_mode_sort(&connector->modes);
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
+ drm_get_connector_name(connector));
+ list_for_each_entry(mode, &connector->modes, head) {
+ mode->vrefresh = drm_mode_vrefresh(mode);
+
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ drm_mode_debug_printmodeline(mode);
+ }
+
+ return count;
+}
+EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
+
+/**
+ * drm_helper_encoder_in_use - check if a given encoder is in use
+ * @encoder: encoder to check
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Walk @encoders's DRM device's mode_config and see if it's in use.
+ *
+ * RETURNS:
+ * True if @encoder is part of the mode_config, false otherwise.
+ */
+bool
+drm_helper_encoder_in_use(struct drm_encoder *encoder)
+{
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder == encoder)
+ return true;
+ return false;
+}
+EXPORT_SYMBOL(drm_helper_encoder_in_use);
+
+/**
+ * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config
+ * @crtc: CRTC to check
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Walk @crtc's DRM device's mode_config and see if it's in use.
+ *
+ * RETURNS:
+ * True if @crtc is part of the mode_config, false otherwise.
+ */
+bool
+drm_helper_crtc_in_use(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ struct drm_device *dev = crtc->dev;
+ /* FIXME: Locking around list access? */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder))
+ return true;
+ return false;
+}
+EXPORT_SYMBOL(drm_helper_crtc_in_use);
+
+void
+drm_encoder_disable(struct drm_encoder *encoder)
+{
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+
+ if (encoder_funcs->disable)
+ (*encoder_funcs->disable)(encoder);
+ else
+ (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
+}
+
+/**
+ * drm_helper_disable_unused_functions - disable unused objects
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * If an connector or CRTC isn't part of @dev's mode_config, it can be disabled
+ * by calling its dpms function, which should power it off.
+ */
+void
+drm_helper_disable_unused_functions(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (!connector->encoder)
+ continue;
+ if (connector->status == connector_status_disconnected)
+ connector->encoder = NULL;
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (!drm_helper_encoder_in_use(encoder)) {
+ drm_encoder_disable(encoder);
+ /* disconnector encoder from any connector */
+ encoder->crtc = NULL;
+ }
+ }
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc->enabled = drm_helper_crtc_in_use(crtc);
+ if (!crtc->enabled) {
+ if (crtc_funcs->disable)
+ (*crtc_funcs->disable)(crtc);
+ else
+ (*crtc_funcs->dpms)(crtc, DRM_MODE_DPMS_OFF);
+ crtc->fb = NULL;
+ }
+ }
+}
+EXPORT_SYMBOL(drm_helper_disable_unused_functions);
+
+/**
+ * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
+ * @encoder: encoder to test
+ * @crtc: crtc to test
+ *
+ * Return false if @encoder can't be driven by @crtc, true otherwise.
+ */
+bool
+drm_encoder_crtc_ok(struct drm_encoder *encoder,
+ struct drm_crtc *crtc)
+{
+ struct drm_device *dev;
+ struct drm_crtc *tmp;
+ int crtc_mask = 1;
+
+ if (crtc == NULL)
+ printf("%s checking null crtc?\n", __func__);
+
+ dev = crtc->dev;
+
+ list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
+ if (tmp == crtc)
+ break;
+ crtc_mask <<= 1;
+ }
+
+ if (encoder->possible_crtcs & crtc_mask)
+ return true;
+ return false;
+}
+
+/*
+ * Check the CRTC we're going to map each output to vs. its current
+ * CRTC. If they don't match, we have to disable the output and the CRTC
+ * since the driver will have to re-route things.
+ */
+void
+drm_crtc_prepare_encoders(struct drm_device *dev)
+{
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ struct drm_encoder *encoder;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ encoder_funcs = encoder->helper_private;
+ /* Disable unused encoders */
+ if (encoder->crtc == NULL)
+ drm_encoder_disable(encoder);
+ /* Disable encoders whose CRTC is about to change */
+ if (encoder_funcs->get_crtc &&
+ encoder->crtc != (*encoder_funcs->get_crtc)(encoder))
+ drm_encoder_disable(encoder);
+ }
+}
+
+/**
+ * drm_crtc_helper_set_mode - internal helper to set a mode
+ * @crtc: CRTC to program
+ * @mode: mode to use
+ * @x: horizontal offset into the surface
+ * @y: vertical offset into the surface
+ * @old_fb: old framebuffer, for cleanup
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Try to set @mode on @crtc. Give @crtc and its associated connectors a chance
+ * to fixup or reject the mode prior to trying to set it. This is an internal
+ * helper that drivers could e.g. use to update properties that require the
+ * entire output pipe to be disabled and re-enabled in a new configuration. For
+ * example for changing whether audio is enabled on a hdmi link or for changing
+ * panel fitter or dither attributes. It is also called by the
+ * drm_crtc_helper_set_config() helper function to drive the mode setting
+ * sequence.
+ *
+ * RETURNS:
+ * True if the mode was set successfully, or false otherwise.
+ */
+bool
+drm_crtc_helper_set_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ int saved_x, saved_y;
+ struct drm_encoder *encoder;
+ bool ret = true;
+
+ crtc->enabled = drm_helper_crtc_in_use(crtc);
+ if (!crtc->enabled)
+ return true;
+
+ adjusted_mode = drm_mode_duplicate(dev, mode);
+ if (!adjusted_mode)
+ return false;
+
+ saved_hwmode = crtc->hwmode;
+ saved_mode = crtc->mode;
+ saved_x = crtc->x;
+ saved_y = crtc->y;
+
+ /* Update crtc values up front so the driver can rely on them for mode
+ * setting.
+ */
+ crtc->mode = *mode;
+ crtc->x = x;
+ crtc->y = y;
+
+ /* Pass our mode to the connectors and the CRTC to give them a chance to
+ * adjust it according to limitations or connector properties, and also
+ * a chance to reject the mode entirely.
+ */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+ if (encoder->crtc != crtc)
+ continue;
+ encoder_funcs = encoder->helper_private;
+ if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
+ adjusted_mode))) {
+ DRM_DEBUG_KMS("Encoder fixup failed\n");
+ goto done;
+ }
+ }
+
+ if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) {
+ DRM_DEBUG_KMS("CRTC fixup failed\n");
+ goto done;
+ }
+ DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+
+ /* Prepare the encoders and CRTCs before setting the mode. */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+ if (encoder->crtc != crtc)
+ continue;
+ encoder_funcs = encoder->helper_private;
+ /* Disable the encoders as the first thing we do. */
+ encoder_funcs->prepare(encoder);
+ }
+
+ drm_crtc_prepare_encoders(dev);
+
+ crtc_funcs->prepare(crtc);
+
+ /* Set up the DPLL and any encoders state that needs to adjust or depend
+ * on the DPLL.
+ */
+ ret = !crtc_funcs->mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
+ if (!ret)
+ goto done;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+ if (encoder->crtc != crtc)
+ continue;
+
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
+ encoder->base.id, drm_get_encoder_name(encoder),
+ mode->base.id, mode->name);
+ encoder_funcs = encoder->helper_private;
+ encoder_funcs->mode_set(encoder, mode, adjusted_mode);
+ }
+
+ /* Now enable the clocks, plane, pipe, and connectors that we set up. */
+ crtc_funcs->commit(crtc);
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+ if (encoder->crtc != crtc)
+ continue;
+
+ encoder_funcs = encoder->helper_private;
+ encoder_funcs->commit(encoder);
+
+ }
+
+ /* Store real post-adjustment hardware mode. */
+ crtc->hwmode = *adjusted_mode;
+
+ /* Calculate and store various constants which
+ * are later needed by vblank and swap-completion
+ * timestamping. They are derived from true hwmode.
+ */
+ drm_calc_timestamping_constants(crtc);
+
+ /* FIXME: add subpixel order */
+done:
+ drm_mode_destroy(dev, adjusted_mode);
+ if (!ret) {
+ crtc->hwmode = saved_hwmode;
+ crtc->mode = saved_mode;
+ crtc->x = saved_x;
+ crtc->y = saved_y;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_crtc_helper_set_mode);
+
+
+int
+drm_crtc_helper_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+
+ /* Decouple all encoders and their attached connectors from this crtc */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder != encoder)
+ continue;
+
+ connector->encoder = NULL;
+ }
+ }
+
+ drm_helper_disable_unused_functions(dev);
+ return 0;
+}
+
+/**
+ * drm_crtc_helper_set_config - set a new config from userspace
+ * @set: mode set configuration
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Setup a new configuration, provided by the upper layers (either an ioctl call
+ * from userspace or internally e.g. from the fbdev suppport code) in @set, and
+ * enable it. This is the main helper functions for drivers that implement
+ * kernel mode setting with the crtc helper functions and the assorted
+ * ->prepare(), ->modeset() and ->commit() helper callbacks.
+ *
+ * RETURNS:
+ * Returns 0 on success, ERRNO on failure.
+ */
+int
+drm_crtc_helper_set_config(struct drm_mode_set *set)
+{
+ struct drm_device *dev;
+ struct drm_crtc *save_crtcs, *new_crtc, *crtc;
+ struct drm_encoder *save_encoders, *new_encoder, *encoder;
+ struct drm_framebuffer *old_fb = NULL;
+ bool mode_changed = false; /* if true do a full mode set */
+ bool fb_changed = false; /* if true and !mode_changed just do a flip */
+ struct drm_connector *save_connectors, *connector;
+ int count = 0, ro, fail = 0;
+ struct drm_crtc_helper_funcs *crtc_funcs;
+ struct drm_mode_set save_set;
+ int ret;
+ int i;
+
+ DRM_DEBUG_KMS("\n");
+
+ if (!set)
+ return EINVAL;
+
+ if (!set->crtc)
+ return EINVAL;
+
+ if (!set->crtc->helper_private)
+ return EINVAL;
+
+ crtc_funcs = set->crtc->helper_private;
+
+ if (!set->mode)
+ set->fb = NULL;
+
+ if (set->fb) {
+ DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
+ set->crtc->base.id, set->fb->base.id,
+ (int)set->num_connectors, set->x, set->y);
+ } else {
+ DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
+ return drm_crtc_helper_disable(set->crtc);
+ }
+
+ dev = set->crtc->dev;
+
+ /* Allocate space for the backup of all (non-pointer) crtc, encoder and
+ * connector data. */
+ save_crtcs = malloc(dev->mode_config.num_crtc *
+ sizeof(struct drm_crtc), M_DRM, M_WAITOK|M_ZERO);
+ if (!save_crtcs)
+ return ENOMEM;
+
+ save_encoders = malloc(dev->mode_config.num_encoder *
+ sizeof(struct drm_encoder), M_DRM, M_WAITOK|M_ZERO);
+ if (!save_encoders) {
+ free(save_crtcs, M_DRM);
+ return ENOMEM;
+ }
+
+ save_connectors = malloc(dev->mode_config.num_connector *
+ sizeof(struct drm_connector), M_DRM, M_WAITOK|M_ZERO);
+ if (!save_connectors) {
+ free(save_crtcs, M_DRM);
+ free(save_encoders, M_DRM);
+ return ENOMEM;
+ }
+
+ /* Copy data. Note that driver private data is not affected.
+ * Should anything bad happen only the expected state is
+ * restored, not the drivers personal bookkeeping.
+ */
+ count = 0;
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ save_crtcs[count++] = *crtc;
+ }
+
+ count = 0;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ save_encoders[count++] = *encoder;
+ }
+
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ save_connectors[count++] = *connector;
+ }
+
+ save_set.crtc = set->crtc;
+ save_set.mode = &set->crtc->mode;
+ save_set.x = set->crtc->x;
+ save_set.y = set->crtc->y;
+ save_set.fb = set->crtc->fb;
+
+ /* We should be able to check here if the fb has the same properties
+ * and then just flip_or_move it */
+ if (set->crtc->fb != set->fb) {
+ /* If we have no fb then treat it as a full mode set */
+ if (set->crtc->fb == NULL) {
+ DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
+ mode_changed = true;
+ } else if (set->fb == NULL) {
+ mode_changed = true;
+ } else if (set->fb->depth != set->crtc->fb->depth) {
+ mode_changed = true;
+ } else if (set->fb->bits_per_pixel !=
+ set->crtc->fb->bits_per_pixel) {
+ mode_changed = true;
+ } else
+ fb_changed = true;
+ }
+
+ if (set->x != set->crtc->x || set->y != set->crtc->y)
+ fb_changed = true;
+
+ if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
+ DRM_DEBUG_KMS("modes are different, full mode set\n");
+ drm_mode_debug_printmodeline(&set->crtc->mode);
+ drm_mode_debug_printmodeline(set->mode);
+ mode_changed = true;
+ }
+
+ /* a) traverse passed in connector list and get encoders for them */
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct drm_connector_helper_funcs *connector_funcs =
+ connector->helper_private;
+ new_encoder = connector->encoder;
+ for (ro = 0; ro < set->num_connectors; ro++) {
+ if (set->connectors[ro] == connector) {
+ new_encoder = connector_funcs->best_encoder(connector);
+ /* if we can't get an encoder for a connector
+ we are setting now - then fail */
+ if (new_encoder == NULL)
+ /* don't break so fail path works correct */
+ fail = 1;
+ break;
+ }
+ }
+
+ if (new_encoder != connector->encoder) {
+ DRM_DEBUG_KMS("encoder changed, full mode switch\n");
+ mode_changed = true;
+ /* If the encoder is reused for another connector, then
+ * the appropriate crtc will be set later.
+ */
+ if (connector->encoder)
+ connector->encoder->crtc = NULL;
+ connector->encoder = new_encoder;
+ }
+ }
+
+ if (fail) {
+ ret = EINVAL;
+ goto fail;
+ }
+
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (!connector->encoder)
+ continue;
+
+ if (connector->encoder->crtc == set->crtc)
+ new_crtc = NULL;
+ else
+ new_crtc = connector->encoder->crtc;
+
+ for (ro = 0; ro < set->num_connectors; ro++) {
+ if (set->connectors[ro] == connector)
+ new_crtc = set->crtc;
+ }
+
+ /* Make sure the new CRTC will work with the encoder */
+ if (new_crtc &&
+ !drm_encoder_crtc_ok(connector->encoder, new_crtc)) {
+ ret = EINVAL;
+ goto fail;
+ }
+ if (new_crtc != connector->encoder->crtc) {
+ DRM_DEBUG_KMS("crtc changed, full mode switch\n");
+ mode_changed = true;
+ connector->encoder->crtc = new_crtc;
+ }
+ if (new_crtc) {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
+ connector->base.id, drm_get_connector_name(connector),
+ new_crtc->base.id);
+ } else {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
+ connector->base.id, drm_get_connector_name(connector));
+ }
+ }
+
+ /* mode_set_base is not a required function */
+ if (fb_changed && !crtc_funcs->mode_set_base)
+ mode_changed = true;
+
+ if (mode_changed) {
+ set->crtc->enabled = drm_helper_crtc_in_use(set->crtc);
+ if (set->crtc->enabled) {
+ DRM_DEBUG_KMS("attempting to set mode from"
+ " userspace\n");
+ drm_mode_debug_printmodeline(set->mode);
+ old_fb = set->crtc->fb;
+ set->crtc->fb = set->fb;
+ if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
+ set->x, set->y,
+ old_fb)) {
+ DRM_ERROR("failed to set mode on [CRTC:%d]\n",
+ set->crtc->base.id);
+ set->crtc->fb = old_fb;
+ ret = EINVAL;
+ goto fail;
+ }
+ DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
+ for (i = 0; i < set->num_connectors; i++) {
+ DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
+ drm_get_connector_name(set->connectors[i]));
+ set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
+ }
+ }
+ drm_helper_disable_unused_functions(dev);
+ } else if (fb_changed) {
+ set->crtc->x = set->x;
+ set->crtc->y = set->y;
+
+ old_fb = set->crtc->fb;
+ if (set->crtc->fb != set->fb)
+ set->crtc->fb = set->fb;
+ ret = crtc_funcs->mode_set_base(set->crtc,
+ set->x, set->y, old_fb);
+ if (ret != 0) {
+ set->crtc->fb = old_fb;
+ goto fail;
+ }
+ }
+
+ free(save_connectors, M_DRM);
+ free(save_encoders, M_DRM);
+ free(save_crtcs, M_DRM);
+ return 0;
+
+fail:
+ /* Restore all previous data. */
+ count = 0;
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ *crtc = save_crtcs[count++];
+ }
+
+ count = 0;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ *encoder = save_encoders[count++];
+ }
+
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ *connector = save_connectors[count++];
+ }
+
+ /* Try to restore the config */
+ if (mode_changed &&
+ !drm_crtc_helper_set_mode(save_set.crtc, save_set.mode, save_set.x,
+ save_set.y, save_set.fb))
+ DRM_ERROR("failed to restore config after modeset failure\n");
+
+ free(save_connectors, M_DRM);
+ free(save_encoders, M_DRM);
+ free(save_crtcs, M_DRM);
+ return ret;
+}
+EXPORT_SYMBOL(drm_crtc_helper_set_config);
+
+int
+drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
+{
+ int dpms = DRM_MODE_DPMS_OFF;
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder == encoder)
+ if (connector->dpms < dpms)
+ dpms = connector->dpms;
+ return dpms;
+}
+
+int
+drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
+{
+ int dpms = DRM_MODE_DPMS_OFF;
+ struct drm_connector *connector;
+ struct drm_device *dev = crtc->dev;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder && connector->encoder->crtc == crtc)
+ if (connector->dpms < dpms)
+ dpms = connector->dpms;
+ return dpms;
+}
+
+/**
+ * drm_helper_connector_dpms() - connector dpms helper implementation
+ * @connector: affected connector
+ * @mode: DPMS mode
+ *
+ * This is the main helper function provided by the crtc helper framework for
+ * implementing the DPMS connector attribute. It computes the new desired DPMS
+ * state for all encoders and crtcs in the output mesh and calls the ->dpms()
+ * callback provided by the driver appropriately.
+ */
+void
+drm_helper_connector_dpms(struct drm_connector *connector, int mode)
+{
+ struct drm_encoder *encoder = connector->encoder;
+ struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
+ int old_dpms;
+
+ if (mode == connector->dpms)
+ return;
+
+ old_dpms = connector->dpms;
+ connector->dpms = mode;
+
+ /* from off to on, do crtc then encoder */
+ if (mode < old_dpms) {
+ if (crtc) {
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ if (crtc_funcs->dpms)
+ (*crtc_funcs->dpms) (crtc,
+ drm_helper_choose_crtc_dpms(crtc));
+ }
+ if (encoder) {
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+ if (encoder_funcs->dpms)
+ (*encoder_funcs->dpms) (encoder,
+ drm_helper_choose_encoder_dpms(encoder));
+ }
+ }
+
+ /* from on to off, do encoder then crtc */
+ if (mode > old_dpms) {
+ if (encoder) {
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+ if (encoder_funcs->dpms)
+ (*encoder_funcs->dpms) (encoder,
+ drm_helper_choose_encoder_dpms(encoder));
+ }
+ if (crtc) {
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ if (crtc_funcs->dpms)
+ (*crtc_funcs->dpms) (crtc,
+ drm_helper_choose_crtc_dpms(crtc));
+ }
+ }
+
+ return;
+}
+EXPORT_SYMBOL(drm_helper_connector_dpms);
+
+int
+drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ int i;
+
+ fb->width = mode_cmd->width;
+ fb->height = mode_cmd->height;
+ for (i = 0; i < 4; i++) {
+ fb->pitches[i] = mode_cmd->pitches[i];
+ fb->offsets[i] = mode_cmd->offsets[i];
+ }
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
+ &fb->bits_per_pixel);
+ fb->pixel_format = mode_cmd->pixel_format;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
+
+int
+drm_helper_resume_force_mode(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ struct drm_crtc_helper_funcs *crtc_funcs;
+ int ret;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+
+ if (!crtc->enabled)
+ continue;
+
+ ret = drm_crtc_helper_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
+
+ if (ret == false)
+ DRM_ERROR("failed to set mode on crtc %p\n", crtc);
+
+ /* Turn off outputs that were already powered off */
+ if (drm_helper_choose_crtc_dpms(crtc)) {
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+ if(encoder->crtc != crtc)
+ continue;
+
+ encoder_funcs = encoder->helper_private;
+ if (encoder_funcs->dpms)
+ (*encoder_funcs->dpms) (encoder,
+ drm_helper_choose_encoder_dpms(encoder));
+ }
+
+ crtc_funcs = crtc->helper_private;
+ if (crtc_funcs->dpms)
+ (*crtc_funcs->dpms) (crtc,
+ drm_helper_choose_crtc_dpms(crtc));
+ }
+ }
+ /* disable the unused connectors while restoring the modesetting */
+ drm_helper_disable_unused_functions(dev);
+ return 0;
+}
+EXPORT_SYMBOL(drm_helper_resume_force_mode);
+
+void
+drm_kms_helper_hotplug_event(struct drm_device *dev)
+{
+ /* send a uevent + call fbdev */
+// drm_sysfs_hotplug_event(dev);
+ if (dev->mode_config.funcs->output_poll_changed)
+ dev->mode_config.funcs->output_poll_changed(dev);
+}
+EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
+
+void
+drm_output_poll_tick(void *arg)
+{
+ struct drm_device *dev = arg;
+
+ workq_queue_task(NULL, &dev->mode_config.poll_task, 0,
+ drm_output_poll_execute, dev, NULL);
+}
+
+#define DRM_OUTPUT_POLL_SECONDS 10
+void
+drm_output_poll_execute(void *arg1, void *arg2)
+{
+ struct drm_device *dev = (struct drm_device *)arg1;
+ struct drm_connector *connector;
+ enum drm_connector_status old_status;
+ bool repoll = false, changed = false;
+
+ if (!drm_kms_helper_poll)
+ return;
+
+ rw_enter_write(&dev->mode_config.rwl);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+
+ /* Ignore forced connectors. */
+ if (connector->force)
+ continue;
+
+ /* Ignore HPD capable connectors and connectors where we don't
+ * want any hotplug detection at all for polling. */
+ if (!connector->polled || connector->polled == DRM_CONNECTOR_POLL_HPD)
+ continue;
+
+ repoll = true;
+
+ old_status = connector->status;
+ /* if we are connected and don't want to poll for disconnect
+ skip it */
+ if (old_status == connector_status_connected &&
+ !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT))
+ continue;
+
+ connector->status = connector->funcs->detect(connector, false);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
+ connector->base.id,
+ drm_get_connector_name(connector),
+ old_status, connector->status);
+ if (old_status != connector->status)
+ changed = true;
+ }
+
+ rw_exit_write(&dev->mode_config.rwl);
+
+ if (changed)
+ drm_kms_helper_hotplug_event(dev);
+
+ if (repoll)
+ timeout_add_sec(&dev->mode_config.output_poll_to,
+ DRM_OUTPUT_POLL_SECONDS);
+}
+
+void
+drm_kms_helper_poll_disable(struct drm_device *dev)
+{
+ if (!dev->mode_config.poll_enabled)
+ return;
+ timeout_del(&dev->mode_config.output_poll_to);
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_disable);
+
+void
+drm_kms_helper_poll_enable(struct drm_device *dev)
+{
+ bool poll = false;
+ struct drm_connector *connector;
+
+ if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
+ return;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT))
+ poll = true;
+ }
+
+ if (poll)
+ timeout_add_sec(&dev->mode_config.output_poll_to,
+ DRM_OUTPUT_POLL_SECONDS);
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_enable);
+
+void
+drm_kms_helper_poll_init(struct drm_device *dev)
+{
+ timeout_set(&dev->mode_config.output_poll_to, drm_output_poll_tick,
+ dev);
+ dev->mode_config.poll_enabled = true;
+
+ drm_kms_helper_poll_enable(dev);
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_init);
+
+void
+drm_kms_helper_poll_fini(struct drm_device *dev)
+{
+ drm_kms_helper_poll_disable(dev);
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_fini);
+
+void
+drm_helper_hpd_irq_event(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ enum drm_connector_status old_status;
+ bool changed = false;
+
+ if (!dev->mode_config.poll_enabled)
+ return;
+
+ rw_enter_write(&dev->mode_config.rwl);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+
+ /* Only handle HPD capable connectors. */
+ if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
+ continue;
+
+ old_status = connector->status;
+
+ connector->status = connector->funcs->detect(connector, false);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
+ connector->base.id,
+ drm_get_connector_name(connector),
+ old_status, connector->status);
+ if (old_status != connector->status)
+ changed = true;
+ }
+
+ rw_exit_write(&dev->mode_config.rwl);
+
+ if (changed)
+ drm_kms_helper_hotplug_event(dev);
+}
+EXPORT_SYMBOL(drm_helper_hpd_irq_event);
diff --git a/sys/dev/pci/drm/drm_crtc_helper.h b/sys/dev/pci/drm/drm_crtc_helper.h
new file mode 100644
index 00000000000..e199a32f987
--- /dev/null
+++ b/sys/dev/pci/drm/drm_crtc_helper.h
@@ -0,0 +1,167 @@
+/* $OpenBSD: drm_crtc_helper.h,v 1.1 2013/03/18 12:36:51 jsg Exp $ */
+/*
+ * Copyright © 2006 Keith Packard
+ * Copyright © 2007-2008 Dave Airlie
+ * Copyright © 2007-2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * The DRM mode setting helper functions are common code for drivers to use if
+ * they wish. Drivers are not forced to use this code in their
+ * implementations but it would be useful if they code they do use at least
+ * provides a consistent interface and operation to userspace
+ */
+
+#ifndef __DRM_CRTC_HELPER_H__
+#define __DRM_CRTC_HELPER_H__
+
+enum mode_set_atomic {
+ LEAVE_ATOMIC_MODE_SET,
+ ENTER_ATOMIC_MODE_SET,
+};
+
+/**
+ * drm_crtc_helper_funcs - helper operations for CRTCs
+ * @mode_fixup: try to fixup proposed mode for this connector
+ * @mode_set: set this mode
+ *
+ * The helper operations are called by the mid-layer CRTC helper.
+ */
+struct drm_crtc_helper_funcs {
+ /*
+ * Control power levels on the CRTC. If the mode passed in is
+ * unsupported, the provider must use the next lowest power level.
+ */
+ void (*dpms)(struct drm_crtc *crtc, int mode);
+ void (*prepare)(struct drm_crtc *crtc);
+ void (*commit)(struct drm_crtc *crtc);
+
+ /* Provider can fixup or change mode timings before modeset occurs */
+ bool (*mode_fixup)(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ /* Actually set the mode */
+ int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode, int x, int y,
+ struct drm_framebuffer *old_fb);
+
+ /* Move the crtc on the current fb to the given position *optional* */
+ int (*mode_set_base)(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb);
+ int (*mode_set_base_atomic)(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int x, int y,
+ enum mode_set_atomic);
+
+ /* reload the current crtc LUT */
+ void (*load_lut)(struct drm_crtc *crtc);
+
+ /* disable crtc when not in use - more explicit than dpms off */
+ void (*disable)(struct drm_crtc *crtc);
+};
+
+/**
+ * drm_encoder_helper_funcs - helper operations for encoders
+ * @mode_fixup: try to fixup proposed mode for this connector
+ * @mode_set: set this mode
+ *
+ * The helper operations are called by the mid-layer CRTC helper.
+ */
+struct drm_encoder_helper_funcs {
+ void (*dpms)(struct drm_encoder *encoder, int mode);
+ void (*save)(struct drm_encoder *encoder);
+ void (*restore)(struct drm_encoder *encoder);
+
+ bool (*mode_fixup)(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ void (*prepare)(struct drm_encoder *encoder);
+ void (*commit)(struct drm_encoder *encoder);
+ void (*mode_set)(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ struct drm_crtc *(*get_crtc)(struct drm_encoder *encoder);
+ /* detect for DAC style encoders */
+ enum drm_connector_status (*detect)(struct drm_encoder *encoder,
+ struct drm_connector *connector);
+ /* disable encoder when not in use - more explicit than dpms off */
+ void (*disable)(struct drm_encoder *encoder);
+};
+
+/**
+ * drm_connector_helper_funcs - helper operations for connectors
+ * @get_modes: get mode list for this connector
+ * @mode_valid: is this mode valid on the given connector?
+ *
+ * The helper operations are called by the mid-layer CRTC helper.
+ */
+struct drm_connector_helper_funcs {
+ int (*get_modes)(struct drm_connector *connector);
+ int (*mode_valid)(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+ struct drm_encoder *(*best_encoder)(struct drm_connector *connector);
+};
+
+extern int drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY);
+extern void drm_helper_disable_unused_functions(struct drm_device *dev);
+extern int drm_crtc_helper_set_config(struct drm_mode_set *set);
+extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb);
+extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
+extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
+
+extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode);
+
+extern void drm_helper_move_panel_connectors_to_head(struct drm_device *);
+
+extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
+ struct drm_mode_fb_cmd2 *mode_cmd);
+
+static inline void drm_crtc_helper_add(struct drm_crtc *crtc,
+ const struct drm_crtc_helper_funcs *funcs)
+{
+ crtc->helper_private = (void *)funcs;
+}
+
+static inline void drm_encoder_helper_add(struct drm_encoder *encoder,
+ const struct drm_encoder_helper_funcs *funcs)
+{
+ encoder->helper_private = (void *)funcs;
+}
+
+static inline void drm_connector_helper_add(struct drm_connector *connector,
+ const struct drm_connector_helper_funcs *funcs)
+{
+ connector->helper_private = (void *)funcs;
+}
+
+extern int drm_helper_resume_force_mode(struct drm_device *dev);
+extern void drm_kms_helper_poll_init(struct drm_device *dev);
+extern void drm_kms_helper_poll_fini(struct drm_device *dev);
+extern void drm_helper_hpd_irq_event(struct drm_device *dev);
+extern void drm_kms_helper_hotplug_event(struct drm_device *dev);
+
+extern void drm_kms_helper_poll_disable(struct drm_device *dev);
+extern void drm_kms_helper_poll_enable(struct drm_device *dev);
+
+#endif
diff --git a/sys/dev/pci/drm/drm_dp_helper.c b/sys/dev/pci/drm/drm_dp_helper.c
new file mode 100644
index 00000000000..feaaaf3e74d
--- /dev/null
+++ b/sys/dev/pci/drm/drm_dp_helper.c
@@ -0,0 +1,373 @@
+/* $OpenBSD: drm_dp_helper.c,v 1.1 2013/03/18 12:36:51 jsg Exp $ */
+/*
+ * Copyright © 2009 Keith Packard
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_dp_helper.h"
+
+/**
+ * DOC: dp helpers
+ *
+ * These functions contain some common logic and helpers at various abstraction
+ * levels to deal with Display Port sink devices and related things like DP aux
+ * channel transfers, EDID reading over DP aux channels, decoding certain DPCD
+ * blocks, ...
+ */
+
+int i2c_algo_dp_aux_transaction(struct i2c_controller *, int, uint8_t,
+ uint8_t *);
+int i2c_algo_dp_aux_address(struct i2c_controller *, u16, bool);
+void i2c_algo_dp_aux_stop(struct i2c_controller *, bool);
+int i2c_algo_dp_aux_put_byte(struct i2c_controller *, u8);
+int i2c_algo_dp_aux_get_byte(struct i2c_controller *, u8 *);
+void i2c_dp_aux_reset_bus(struct i2c_controller *);
+int i2c_dp_aux_prepare_bus(struct i2c_controller *);
+int i2c_dp_aux_add_bus(struct i2c_controller *);
+u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int);
+u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE], int);
+int i2c_algo_dp_aux_exec(void *, i2c_op_t, i2c_addr_t,
+ const void *, size_t, void *, size_t, int);
+void i2c_dp_aux_release_bus(void *, int);
+int i2c_dp_aux_acquire_bus(void *, int);
+
+/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
+int
+i2c_algo_dp_aux_transaction(struct i2c_controller *adapter, int mode,
+ uint8_t write_byte, uint8_t *read_byte)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->ic_cookie;
+ int ret;
+
+ ret = (*algo_data->aux_ch)(adapter, mode,
+ write_byte, read_byte);
+ return ret;
+}
+
+/*
+ * I2C over AUX CH
+ */
+
+/*
+ * Send the address. If the I2C link is running, this 'restarts'
+ * the connection with the new address, this is used for doing
+ * a write followed by a read (as needed for DDC)
+ */
+int
+i2c_algo_dp_aux_address(struct i2c_controller *adapter, u16 address, bool reading)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->ic_cookie;
+ int mode = MODE_I2C_START;
+ int ret;
+
+ if (reading)
+ mode |= MODE_I2C_READ;
+ else
+ mode |= MODE_I2C_WRITE;
+ algo_data->address = address;
+ algo_data->running = true;
+ ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
+ return ret;
+}
+
+/*
+ * Stop the I2C transaction. This closes out the link, sending
+ * a bare address packet with the MOT bit turned off
+ */
+void
+i2c_algo_dp_aux_stop(struct i2c_controller *adapter, bool reading)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->ic_cookie;
+ int mode = MODE_I2C_STOP;
+
+ if (reading)
+ mode |= MODE_I2C_READ;
+ else
+ mode |= MODE_I2C_WRITE;
+ if (algo_data->running) {
+ (void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
+ algo_data->running = false;
+ }
+}
+
+/*
+ * Write a single byte to the current I2C address, the
+ * the I2C link must be running or this returns -EIO
+ */
+int
+i2c_algo_dp_aux_put_byte(struct i2c_controller *adapter, u8 byte)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->ic_cookie;
+ int ret;
+
+ if (!algo_data->running)
+ return -EIO;
+
+ ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL);
+ return ret;
+}
+
+/*
+ * Read a single byte from the current I2C address, the
+ * I2C link must be running or this returns -EIO
+ */
+int
+i2c_algo_dp_aux_get_byte(struct i2c_controller *adapter, u8 *byte_ret)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->ic_cookie;
+ int ret;
+
+ if (!algo_data->running)
+ return -EIO;
+
+ ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret);
+ return ret;
+}
+
+int
+i2c_algo_dp_aux_exec(void *cookie, i2c_op_t op, i2c_addr_t addr,
+ const void *cmdbuf, size_t cmdlen, void *buffer, size_t len, int flags)
+{
+ struct i2c_algo_dp_aux_data *algo_data = cookie;
+ struct i2c_controller *adapter = algo_data->adapter;
+ int ret = 0;
+ bool reading = false;
+ int b;
+ uint8_t *buf = buffer;
+
+ reading = I2C_OP_READ_P(op);
+ ret = i2c_algo_dp_aux_address(adapter, addr, reading);
+ if (ret < 0)
+ goto out;
+ if (reading) {
+ for (b = 0; b < len; b++) {
+ ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]);
+ if (ret < 0)
+ break;
+ }
+ } else if (I2C_OP_WRITE_P(op)) {
+ for (b = 0; b < len; b++) {
+ ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]);
+ if (ret < 0)
+ break;
+ }
+ }
+
+out:
+ i2c_algo_dp_aux_stop(adapter, reading);
+ DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
+ return ret;
+}
+
+void
+i2c_dp_aux_reset_bus(struct i2c_controller *adapter)
+{
+ (void) i2c_algo_dp_aux_address(adapter, 0, false);
+ (void) i2c_algo_dp_aux_stop(adapter, false);
+}
+
+int
+i2c_dp_aux_acquire_bus(void *cookie, int flags)
+{
+ return (0);
+}
+
+void
+i2c_dp_aux_release_bus(void *cookie, int flags)
+{
+ struct i2c_algo_dp_aux_data *algo_data = cookie;
+
+ i2c_dp_aux_reset_bus(algo_data->adapter);
+}
+
+int
+i2c_dp_aux_prepare_bus(struct i2c_controller *adapter)
+{
+#ifdef notyet
+ adapter->algo = &i2c_dp_aux_algo;
+ adapter->retries = 3;
+#endif
+ /* cookie set in driver */
+ adapter->ic_acquire_bus = i2c_dp_aux_acquire_bus;
+ adapter->ic_release_bus = i2c_dp_aux_release_bus;
+ adapter->ic_exec = i2c_algo_dp_aux_exec;
+ i2c_dp_aux_reset_bus(adapter);
+ return 0;
+}
+
+/**
+ * i2c_dp_aux_add_bus() - register an i2c adapter using the aux ch helper
+ * @adapter: i2c adapter to register
+ *
+ * This registers an i2c adapater that uses dp aux channel as it's underlaying
+ * transport. The driver needs to fill out the &i2c_algo_dp_aux_data structure
+ * and store it in the algo_data member of the @adapter argument. This will be
+ * used by the i2c over dp aux algorithm to drive the hardware.
+ *
+ * RETURNS:
+ * 0 on success, -ERRNO on failure.
+ */
+int
+i2c_dp_aux_add_bus(struct i2c_controller *adapter)
+{
+ int error;
+
+ error = i2c_dp_aux_prepare_bus(adapter);
+ if (error)
+ return error;
+#ifdef notyet
+ error = i2c_add_adapter(adapter);
+#endif
+ return error;
+}
+EXPORT_SYMBOL(i2c_dp_aux_add_bus);
+
+/* Helpers for DP link training */
+u8
+dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
+{
+ return link_status[r - DP_LANE0_1_STATUS];
+}
+
+u8
+dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_LANE0_1_STATUS + (lane >> 1);
+ int s = (lane & 1) * 4;
+ u8 l = dp_link_status(link_status, i);
+ return (l >> s) & 0xf;
+}
+
+bool
+drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count)
+{
+ u8 lane_align;
+ u8 lane_status;
+ int lane;
+
+ lane_align = dp_link_status(link_status,
+ DP_LANE_ALIGN_STATUS_UPDATED);
+ if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
+ return false;
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = dp_get_lane_status(link_status, lane);
+ if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
+ return false;
+ }
+ return true;
+}
+EXPORT_SYMBOL(drm_dp_channel_eq_ok);
+
+bool
+drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count)
+{
+ int lane;
+ u8 lane_status;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = dp_get_lane_status(link_status, lane);
+ if ((lane_status & DP_LANE_CR_DONE) == 0)
+ return false;
+ }
+ return true;
+}
+EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
+
+u8
+drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
+ DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
+ u8 l = dp_link_status(link_status, i);
+
+ return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
+}
+EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage);
+
+u8
+drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
+ DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
+ u8 l = dp_link_status(link_status, i);
+
+ return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+}
+EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
+
+void
+drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
+ DELAY(100);
+ else
+ DELAY(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4 * 1000);
+}
+EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
+
+void
+drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
+ DELAY(400);
+ else
+ DELAY(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4 * 1000);
+}
+EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
+
+u8
+drm_dp_link_rate_to_bw_code(int link_rate)
+{
+ switch (link_rate) {
+ case 162000:
+ default:
+ return DP_LINK_BW_1_62;
+ case 270000:
+ return DP_LINK_BW_2_7;
+ case 540000:
+ return DP_LINK_BW_5_4;
+ }
+}
+EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code);
+
+int
+drm_dp_bw_code_to_link_rate(u8 link_bw)
+{
+ switch (link_bw) {
+ case DP_LINK_BW_1_62:
+ default:
+ return 162000;
+ case DP_LINK_BW_2_7:
+ return 270000;
+ case DP_LINK_BW_5_4:
+ return 540000;
+ }
+}
+EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);
diff --git a/sys/dev/pci/drm/drm_dp_helper.h b/sys/dev/pci/drm/drm_dp_helper.h
new file mode 100644
index 00000000000..c6f50495602
--- /dev/null
+++ b/sys/dev/pci/drm/drm_dp_helper.h
@@ -0,0 +1,362 @@
+/* $OpenBSD: drm_dp_helper.h,v 1.1 2013/03/18 12:36:51 jsg Exp $ */
+/*
+ * Copyright © 2008 Keith Packard
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef _DRM_DP_HELPER_H_
+#define _DRM_DP_HELPER_H_
+
+/*
+ * Unless otherwise noted, all values are from the DP 1.1a spec. Note that
+ * DP and DPCD versions are independent. Differences from 1.0 are not noted,
+ * 1.0 devices basically don't exist in the wild.
+ *
+ * Abbreviations, in chronological order:
+ *
+ * eDP: Embedded DisplayPort version 1
+ * DPI: DisplayPort Interoperability Guideline v1.1a
+ * 1.2: DisplayPort 1.2
+ *
+ * 1.2 formally includes both eDP and DPI definitions.
+ */
+
+#define AUX_NATIVE_WRITE 0x8
+#define AUX_NATIVE_READ 0x9
+#define AUX_I2C_WRITE 0x0
+#define AUX_I2C_READ 0x1
+#define AUX_I2C_STATUS 0x2
+#define AUX_I2C_MOT 0x4
+
+#define AUX_NATIVE_REPLY_ACK (0x0 << 4)
+#define AUX_NATIVE_REPLY_NACK (0x1 << 4)
+#define AUX_NATIVE_REPLY_DEFER (0x2 << 4)
+#define AUX_NATIVE_REPLY_MASK (0x3 << 4)
+
+#define AUX_I2C_REPLY_ACK (0x0 << 6)
+#define AUX_I2C_REPLY_NACK (0x1 << 6)
+#define AUX_I2C_REPLY_DEFER (0x2 << 6)
+#define AUX_I2C_REPLY_MASK (0x3 << 6)
+
+/* AUX CH addresses */
+/* DPCD */
+#define DP_DPCD_REV 0x000
+
+#define DP_MAX_LINK_RATE 0x001
+
+#define DP_MAX_LANE_COUNT 0x002
+# define DP_MAX_LANE_COUNT_MASK 0x1f
+# define DP_TPS3_SUPPORTED (1 << 6) /* 1.2 */
+# define DP_ENHANCED_FRAME_CAP (1 << 7)
+
+#define DP_MAX_DOWNSPREAD 0x003
+# define DP_NO_AUX_HANDSHAKE_LINK_TRAINING (1 << 6)
+
+#define DP_NORP 0x004
+
+#define DP_DOWNSTREAMPORT_PRESENT 0x005
+# define DP_DWN_STRM_PORT_PRESENT (1 << 0)
+# define DP_DWN_STRM_PORT_TYPE_MASK 0x06
+/* 00b = DisplayPort */
+/* 01b = Analog */
+/* 10b = TMDS or HDMI */
+/* 11b = Other */
+# define DP_FORMAT_CONVERSION (1 << 3)
+# define DP_DETAILED_CAP_INFO_AVAILABLE (1 << 4) /* DPI */
+
+#define DP_MAIN_LINK_CHANNEL_CODING 0x006
+
+#define DP_DOWN_STREAM_PORT_COUNT 0x007
+# define DP_PORT_COUNT_MASK 0x0f
+# define DP_MSA_TIMING_PAR_IGNORED (1 << 6) /* eDP */
+# define DP_OUI_SUPPORT (1 << 7)
+
+#define DP_I2C_SPEED_CAP 0x00c /* DPI */
+# define DP_I2C_SPEED_1K 0x01
+# define DP_I2C_SPEED_5K 0x02
+# define DP_I2C_SPEED_10K 0x04
+# define DP_I2C_SPEED_100K 0x08
+# define DP_I2C_SPEED_400K 0x10
+# define DP_I2C_SPEED_1M 0x20
+
+#define DP_EDP_CONFIGURATION_CAP 0x00d /* XXX 1.2? */
+#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */
+
+/* Multiple stream transport */
+#define DP_MSTM_CAP 0x021 /* 1.2 */
+# define DP_MST_CAP (1 << 0)
+
+#define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */
+# define DP_PSR_IS_SUPPORTED 1
+#define DP_PSR_CAPS 0x071 /* XXX 1.2? */
+# define DP_PSR_NO_TRAIN_ON_EXIT 1
+# define DP_PSR_SETUP_TIME_330 (0 << 1)
+# define DP_PSR_SETUP_TIME_275 (1 << 1)
+# define DP_PSR_SETUP_TIME_220 (2 << 1)
+# define DP_PSR_SETUP_TIME_165 (3 << 1)
+# define DP_PSR_SETUP_TIME_110 (4 << 1)
+# define DP_PSR_SETUP_TIME_55 (5 << 1)
+# define DP_PSR_SETUP_TIME_0 (6 << 1)
+# define DP_PSR_SETUP_TIME_MASK (7 << 1)
+# define DP_PSR_SETUP_TIME_SHIFT 1
+
+/*
+ * 0x80-0x8f describe downstream port capabilities, but there are two layouts
+ * based on whether DP_DETAILED_CAP_INFO_AVAILABLE was set. If it was not,
+ * each port's descriptor is one byte wide. If it was set, each port's is
+ * four bytes wide, starting with the one byte from the base info. As of
+ * DP interop v1.1a only VGA defines additional detail.
+ */
+
+/* offset 0 */
+#define DP_DOWNSTREAM_PORT_0 0x80
+# define DP_DS_PORT_TYPE_MASK (7 << 0)
+# define DP_DS_PORT_TYPE_DP 0
+# define DP_DS_PORT_TYPE_VGA 1
+# define DP_DS_PORT_TYPE_DVI 2
+# define DP_DS_PORT_TYPE_HDMI 3
+# define DP_DS_PORT_TYPE_NON_EDID 4
+# define DP_DS_PORT_HPD (1 << 3)
+/* offset 1 for VGA is maximum megapixels per second / 8 */
+/* offset 2 */
+# define DP_DS_VGA_MAX_BPC_MASK (3 << 0)
+# define DP_DS_VGA_8BPC 0
+# define DP_DS_VGA_10BPC 1
+# define DP_DS_VGA_12BPC 2
+# define DP_DS_VGA_16BPC 3
+
+/* link configuration */
+#define DP_LINK_BW_SET 0x100
+# define DP_LINK_BW_1_62 0x06
+# define DP_LINK_BW_2_7 0x0a
+# define DP_LINK_BW_5_4 0x14 /* 1.2 */
+
+#define DP_LANE_COUNT_SET 0x101
+# define DP_LANE_COUNT_MASK 0x0f
+# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7)
+
+#define DP_TRAINING_PATTERN_SET 0x102
+# define DP_TRAINING_PATTERN_DISABLE 0
+# define DP_TRAINING_PATTERN_1 1
+# define DP_TRAINING_PATTERN_2 2
+# define DP_TRAINING_PATTERN_3 3 /* 1.2 */
+# define DP_TRAINING_PATTERN_MASK 0x3
+
+# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2)
+# define DP_LINK_QUAL_PATTERN_D10_2 (1 << 2)
+# define DP_LINK_QUAL_PATTERN_ERROR_RATE (2 << 2)
+# define DP_LINK_QUAL_PATTERN_PRBS7 (3 << 2)
+# define DP_LINK_QUAL_PATTERN_MASK (3 << 2)
+
+# define DP_RECOVERED_CLOCK_OUT_EN (1 << 4)
+# define DP_LINK_SCRAMBLING_DISABLE (1 << 5)
+
+# define DP_SYMBOL_ERROR_COUNT_BOTH (0 << 6)
+# define DP_SYMBOL_ERROR_COUNT_DISPARITY (1 << 6)
+# define DP_SYMBOL_ERROR_COUNT_SYMBOL (2 << 6)
+# define DP_SYMBOL_ERROR_COUNT_MASK (3 << 6)
+
+#define DP_TRAINING_LANE0_SET 0x103
+#define DP_TRAINING_LANE1_SET 0x104
+#define DP_TRAINING_LANE2_SET 0x105
+#define DP_TRAINING_LANE3_SET 0x106
+
+# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3
+# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0
+# define DP_TRAIN_MAX_SWING_REACHED (1 << 2)
+# define DP_TRAIN_VOLTAGE_SWING_400 (0 << 0)
+# define DP_TRAIN_VOLTAGE_SWING_600 (1 << 0)
+# define DP_TRAIN_VOLTAGE_SWING_800 (2 << 0)
+# define DP_TRAIN_VOLTAGE_SWING_1200 (3 << 0)
+
+# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3)
+# define DP_TRAIN_PRE_EMPHASIS_0 (0 << 3)
+# define DP_TRAIN_PRE_EMPHASIS_3_5 (1 << 3)
+# define DP_TRAIN_PRE_EMPHASIS_6 (2 << 3)
+# define DP_TRAIN_PRE_EMPHASIS_9_5 (3 << 3)
+
+# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3
+# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5)
+
+#define DP_DOWNSPREAD_CTRL 0x107
+# define DP_SPREAD_AMP_0_5 (1 << 4)
+# define DP_MSA_TIMING_PAR_IGNORE_EN (1 << 7) /* eDP */
+
+#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108
+# define DP_SET_ANSI_8B10B (1 << 0)
+
+#define DP_I2C_SPEED_CONTROL_STATUS 0x109 /* DPI */
+/* bitmask as for DP_I2C_SPEED_CAP */
+
+#define DP_EDP_CONFIGURATION_SET 0x10a /* XXX 1.2? */
+
+#define DP_MSTM_CTRL 0x111 /* 1.2 */
+# define DP_MST_EN (1 << 0)
+# define DP_UP_REQ_EN (1 << 1)
+# define DP_UPSTREAM_IS_SRC (1 << 2)
+
+#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */
+# define DP_PSR_ENABLE (1 << 0)
+# define DP_PSR_MAIN_LINK_ACTIVE (1 << 1)
+# define DP_PSR_CRC_VERIFICATION (1 << 2)
+# define DP_PSR_FRAME_CAPTURE (1 << 3)
+
+#define DP_SINK_COUNT 0x200
+/* prior to 1.2 bit 7 was reserved mbz */
+# define DP_GET_SINK_COUNT(x) ((((x) & 0x80) >> 1) | ((x) & 0x3f))
+# define DP_SINK_CP_READY (1 << 6)
+
+#define DP_DEVICE_SERVICE_IRQ_VECTOR 0x201
+# define DP_REMOTE_CONTROL_COMMAND_PENDING (1 << 0)
+# define DP_AUTOMATED_TEST_REQUEST (1 << 1)
+# define DP_CP_IRQ (1 << 2)
+# define DP_SINK_SPECIFIC_IRQ (1 << 6)
+
+#define DP_LANE0_1_STATUS 0x202
+#define DP_LANE2_3_STATUS 0x203
+# define DP_LANE_CR_DONE (1 << 0)
+# define DP_LANE_CHANNEL_EQ_DONE (1 << 1)
+# define DP_LANE_SYMBOL_LOCKED (1 << 2)
+
+#define DP_CHANNEL_EQ_BITS (DP_LANE_CR_DONE | \
+ DP_LANE_CHANNEL_EQ_DONE | \
+ DP_LANE_SYMBOL_LOCKED)
+
+#define DP_LANE_ALIGN_STATUS_UPDATED 0x204
+
+#define DP_INTERLANE_ALIGN_DONE (1 << 0)
+#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6)
+#define DP_LINK_STATUS_UPDATED (1 << 7)
+
+#define DP_SINK_STATUS 0x205
+
+#define DP_RECEIVE_PORT_0_STATUS (1 << 0)
+#define DP_RECEIVE_PORT_1_STATUS (1 << 1)
+
+#define DP_ADJUST_REQUEST_LANE0_1 0x206
+#define DP_ADJUST_REQUEST_LANE2_3 0x207
+# define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03
+# define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0
+# define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c
+# define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2
+# define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30
+# define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4
+# define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0
+# define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6
+
+#define DP_TEST_REQUEST 0x218
+# define DP_TEST_LINK_TRAINING (1 << 0)
+# define DP_TEST_LINK_PATTERN (1 << 1)
+# define DP_TEST_LINK_EDID_READ (1 << 2)
+# define DP_TEST_LINK_PHY_TEST_PATTERN (1 << 3) /* DPCD >= 1.1 */
+
+#define DP_TEST_LINK_RATE 0x219
+# define DP_LINK_RATE_162 (0x6)
+# define DP_LINK_RATE_27 (0xa)
+
+#define DP_TEST_LANE_COUNT 0x220
+
+#define DP_TEST_PATTERN 0x221
+
+#define DP_TEST_RESPONSE 0x260
+# define DP_TEST_ACK (1 << 0)
+# define DP_TEST_NAK (1 << 1)
+# define DP_TEST_EDID_CHECKSUM_WRITE (1 << 2)
+
+#define DP_SOURCE_OUI 0x300
+#define DP_SINK_OUI 0x400
+#define DP_BRANCH_OUI 0x500
+
+#define DP_SET_POWER 0x600
+# define DP_SET_POWER_D0 0x1
+# define DP_SET_POWER_D3 0x2
+
+#define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */
+# define DP_PSR_LINK_CRC_ERROR (1 << 0)
+# define DP_PSR_RFB_STORAGE_ERROR (1 << 1)
+
+#define DP_PSR_ESI 0x2007 /* XXX 1.2? */
+# define DP_PSR_CAPS_CHANGE (1 << 0)
+
+#define DP_PSR_STATUS 0x2008 /* XXX 1.2? */
+# define DP_PSR_SINK_INACTIVE 0
+# define DP_PSR_SINK_ACTIVE_SRC_SYNCED 1
+# define DP_PSR_SINK_ACTIVE_RFB 2
+# define DP_PSR_SINK_ACTIVE_SINK_SYNCED 3
+# define DP_PSR_SINK_ACTIVE_RESYNC 4
+# define DP_PSR_SINK_INTERNAL_ERROR 7
+# define DP_PSR_SINK_STATE_MASK 0x07
+
+#define MODE_I2C_START 1
+#define MODE_I2C_WRITE 2
+#define MODE_I2C_READ 4
+#define MODE_I2C_STOP 8
+
+/**
+ * struct i2c_algo_dp_aux_data - driver interface structure for i2c over dp
+ * aux algorithm
+ * @running: set by the algo indicating whether an i2c is ongoing or whether
+ * the i2c bus is quiescent
+ * @address: i2c target address for the currently ongoing transfer
+ * @aux_ch: driver callback to transfer a single byte of the i2c payload
+ */
+struct i2c_algo_dp_aux_data {
+ struct i2c_controller *adapter;
+ bool running;
+ u16 address;
+ int (*aux_ch) (struct i2c_controller *adapter,
+ int mode, uint8_t write_byte,
+ uint8_t *read_byte);
+};
+
+int
+i2c_dp_aux_add_bus(struct i2c_controller *adapter);
+
+
+#define DP_LINK_STATUS_SIZE 6
+bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count);
+bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count);
+u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane);
+u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane);
+
+#define DP_RECEIVER_CAP_SIZE 0xf
+void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+
+u8 drm_dp_link_rate_to_bw_code(int link_rate);
+int drm_dp_bw_code_to_link_rate(u8 link_bw);
+
+static inline int
+drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]);
+}
+
+static inline u8
+drm_dp_max_lane_count(u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
+}
+
+#endif /* _DRM_DP_HELPER_H_ */
diff --git a/sys/dev/pci/drm/drm_drv.c b/sys/dev/pci/drm/drm_drv.c
index 16b0f32e2bd..a62b198eb43 100644
--- a/sys/dev/pci/drm/drm_drv.c
+++ b/sys/dev/pci/drm/drm_drv.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: drm_drv.c,v 1.99 2012/12/06 15:05:21 mpi Exp $ */
+/* $OpenBSD: drm_drv.c,v 1.100 2013/03/18 12:36:51 jsg Exp $ */
/*-
* Copyright 2007-2009 Owain G. Ainsworth <oga@openbsd.org>
* Copyright © 2008 Intel Corporation
@@ -86,6 +86,8 @@ boolean_t drm_flush(struct uvm_object *, voff_t, voff_t, int);
SPLAY_PROTOTYPE(drm_obj_tree, drm_handle, entry, drm_handle_cmp);
SPLAY_PROTOTYPE(drm_name_tree, drm_obj, entry, drm_name_cmp);
+int drm_getcap(struct drm_device *, void *, struct drm_file *);
+
/*
* attach drm to a pci-based driver.
*
@@ -93,10 +95,11 @@ SPLAY_PROTOTYPE(drm_name_tree, drm_obj, entry, drm_name_cmp);
* drm_attach_args.
*/
struct device *
-drm_attach_pci(const struct drm_driver_info *driver, struct pci_attach_args *pa,
+drm_attach_pci(struct drm_driver_info *driver, struct pci_attach_args *pa,
int is_agp, struct device *dev)
{
struct drm_attach_args arg;
+ pcireg_t subsys;
arg.driver = driver;
arg.dmat = pa->pa_dmat;
@@ -104,6 +107,13 @@ drm_attach_pci(const struct drm_driver_info *driver, struct pci_attach_args *pa,
arg.irq = pa->pa_intrline;
arg.is_agp = is_agp;
+ arg.pci_vendor = PCI_VENDOR(pa->pa_id);
+ arg.pci_device = PCI_PRODUCT(pa->pa_id);
+
+ subsys = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
+ arg.pci_subvendor = PCI_VENDOR(subsys);
+ arg.pci_subdevice = PCI_PRODUCT(subsys);
+
arg.busid_len = 20;
arg.busid = malloc(arg.busid_len + 1, M_DRM, M_NOWAIT);
if (arg.busid == NULL) {
@@ -159,6 +169,10 @@ drm_attach(struct device *parent, struct device *self, void *aux)
dev->irq = da->irq;
dev->unique = da->busid;
dev->unique_len = da->busid_len;
+ dev->pci_vendor = da->pci_vendor;
+ dev->pci_device = da->pci_device;
+ dev->pci_subvendor = da->pci_subvendor;
+ dev->pci_subdevice = da->pci_subdevice;
rw_init(&dev->dev_lock, "drmdevlk");
mtx_init(&dev->lock.spinlock, IPL_NONE);
@@ -166,12 +180,7 @@ drm_attach(struct device *parent, struct device *self, void *aux)
TAILQ_INIT(&dev->maplist);
SPLAY_INIT(&dev->files);
-
- if (dev->driver->vblank_pipes != 0 && drm_vblank_init(dev,
- dev->driver->vblank_pipes)) {
- printf(": failed to allocate vblank data\n");
- goto error;
- }
+ TAILQ_INIT(&dev->vbl_events);
/*
* the dma buffers api is just weird. offset 1Gb to ensure we don't
@@ -318,14 +327,16 @@ drm_firstopen(struct drm_device *dev)
if (dev->driver->firstopen)
dev->driver->firstopen(dev);
- if (dev->driver->flags & DRIVER_DMA) {
+ if (drm_core_check_feature(dev, DRIVER_DMA) &&
+ !drm_core_check_feature(dev, DRIVER_MODESET)) {
if ((i = drm_dma_setup(dev)) != 0)
return (i);
}
dev->magicid = 1;
- dev->irq_enabled = 0;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ dev->irq_enabled = 0;
dev->if_version = 0;
dev->buf_pgid = 0;
@@ -345,16 +356,19 @@ drm_lastclose(struct drm_device *dev)
if (dev->driver->lastclose != NULL)
dev->driver->lastclose(dev);
- if (dev->irq_enabled)
+ if (!drm_core_check_feature(dev, DRIVER_MODESET) && dev->irq_enabled)
drm_irq_uninstall(dev);
#if __OS_HAS_AGP
- drm_agp_takedown(dev);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ drm_agp_takedown(dev);
#endif
- drm_dma_takedown(dev);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ drm_dma_takedown(dev);
DRM_LOCK();
- if (dev->sg != NULL) {
+ if (dev->sg != NULL &&
+ !drm_core_check_feature(dev, DRIVER_MODESET)) {
struct drm_sg_mem *sg = dev->sg;
dev->sg = NULL;
@@ -416,6 +430,7 @@ drmopen(dev_t kdev, int flags, int fmt, struct proc *p)
file_priv->kdev = kdev;
file_priv->flags = flags;
file_priv->minor = minor(kdev);
+ INIT_LIST_HEAD(&file_priv->fbs);
TAILQ_INIT(&file_priv->evlist);
file_priv->event_space = 4096; /* 4k for event buffer */
DRM_DEBUG("minor = %d\n", file_priv->minor);
@@ -465,7 +480,8 @@ drmclose(dev_t kdev, int flags, int fmt, struct proc *p)
struct drm_device *dev = drm_get_device_from_kdev(kdev);
struct drm_file *file_priv;
struct drm_pending_event *ev, *evtmp;
- int i, retcode = 0;
+ struct drm_pending_vblank_event *vev;
+ int retcode = 0;
if (dev == NULL)
return (ENXIO);
@@ -500,16 +516,15 @@ drmclose(dev_t kdev, int flags, int fmt, struct proc *p)
drm_reclaim_buffers(dev, file_priv);
mtx_enter(&dev->event_lock);
- for (i = 0; i < dev->vblank->vb_num; i++) {
- struct drmevlist *list = &dev->vblank->vb_crtcs[i].vbl_events;
- for (ev = TAILQ_FIRST(list); ev != TAILQ_END(list);
- ev = evtmp) {
- evtmp = TAILQ_NEXT(ev, link);
- if (ev->file_priv == file_priv) {
- TAILQ_REMOVE(list, ev, link);
- drm_vblank_put(dev, i);
- ev->destroy(ev);
- }
+ struct drmevlist *list = &dev->vbl_events;
+ for (ev = TAILQ_FIRST(list); ev != TAILQ_END(list);
+ ev = evtmp) {
+ evtmp = TAILQ_NEXT(ev, link);
+ vev = (struct drm_pending_vblank_event *)ev;
+ if (ev->file_priv == file_priv) {
+ TAILQ_REMOVE(list, ev, link);
+ drm_vblank_put(dev, vev->pipe);
+ ev->destroy(ev);
}
}
while ((ev = TAILQ_FIRST(&file_priv->evlist)) != NULL) {
@@ -518,6 +533,9 @@ drmclose(dev_t kdev, int flags, int fmt, struct proc *p)
}
mtx_leave(&dev->event_lock);
+ if (dev->driver->flags & DRIVER_MODESET)
+ drm_fb_release(dev, file_priv);
+
DRM_LOCK();
if (dev->driver->flags & DRIVER_GEM) {
struct drm_handle *han;
@@ -647,6 +665,8 @@ drmioctl(dev_t kdev, u_long cmd, caddr_t data, int flags,
return (drm_gem_flink_ioctl(dev, data, file_priv));
case DRM_IOCTL_GEM_OPEN:
return (drm_gem_open_ioctl(dev, data, file_priv));
+ case DRM_IOCTL_GET_CAP:
+ return (drm_getcap(dev, data, file_priv));
}
}
@@ -706,6 +726,60 @@ drmioctl(dev_t kdev, u_long cmd, caddr_t data, int flags,
* requested version 1.1 or greater.
*/
return (EBUSY);
+ case DRM_IOCTL_MODE_GETRESOURCES:
+ return drm_mode_getresources(dev, data, file_priv);
+ case DRM_IOCTL_MODE_GETPLANERESOURCES:
+ return drm_mode_getplane_res(dev, data, file_priv);
+ case DRM_IOCTL_MODE_GETCRTC:
+ return drm_mode_getcrtc(dev, data, file_priv);
+ case DRM_IOCTL_MODE_SETCRTC:
+ return drm_mode_setcrtc(dev, data, file_priv);
+ case DRM_IOCTL_MODE_GETPLANE:
+ return drm_mode_getplane(dev, data, file_priv);
+ case DRM_IOCTL_MODE_SETPLANE:
+ return drm_mode_setplane(dev, data, file_priv);
+ case DRM_IOCTL_MODE_CURSOR:
+ return drm_mode_cursor_ioctl(dev, data, file_priv);
+ case DRM_IOCTL_MODE_GETGAMMA:
+ return drm_mode_gamma_get_ioctl(dev, data, file_priv);
+ case DRM_IOCTL_MODE_SETGAMMA:
+ return drm_mode_gamma_set_ioctl(dev, data, file_priv);
+ case DRM_IOCTL_MODE_GETENCODER:
+ return drm_mode_getencoder(dev, data, file_priv);
+ case DRM_IOCTL_MODE_GETCONNECTOR:
+ return drm_mode_getconnector(dev, data, file_priv);
+ case DRM_IOCTL_MODE_ATTACHMODE:
+ return drm_mode_attachmode_ioctl(dev, data, file_priv);
+ case DRM_IOCTL_MODE_DETACHMODE:
+ return drm_mode_detachmode_ioctl(dev, data, file_priv);
+ case DRM_IOCTL_MODE_GETPROPERTY:
+ return drm_mode_getproperty_ioctl(dev, data,
+ file_priv);
+ case DRM_IOCTL_MODE_SETPROPERTY:
+ return drm_mode_connector_property_set_ioctl(dev,
+ data, file_priv);
+ case DRM_IOCTL_MODE_GETPROPBLOB:
+ return drm_mode_getblob_ioctl(dev, data, file_priv);
+ case DRM_IOCTL_MODE_GETFB:
+ return drm_mode_getfb(dev, data, file_priv);
+ case DRM_IOCTL_MODE_ADDFB:
+ return drm_mode_addfb(dev, data, file_priv);
+ case DRM_IOCTL_MODE_ADDFB2:
+ return drm_mode_addfb2(dev, data, file_priv);
+ case DRM_IOCTL_MODE_RMFB:
+ return drm_mode_rmfb(dev, data, file_priv);
+ case DRM_IOCTL_MODE_PAGE_FLIP:
+ return drm_mode_page_flip_ioctl(dev, data, file_priv);
+ case DRM_IOCTL_MODE_DIRTYFB:
+ return drm_mode_dirtyfb_ioctl(dev, data, file_priv);
+ case DRM_IOCTL_MODE_CREATE_DUMB:
+ return drm_mode_create_dumb_ioctl(dev, data,
+ file_priv);
+ case DRM_IOCTL_MODE_MAP_DUMB:
+ return drm_mode_mmap_dumb_ioctl(dev, data, file_priv);
+ case DRM_IOCTL_MODE_DESTROY_DUMB:
+ return drm_mode_destroy_dumb_ioctl(dev, data,
+ file_priv);
}
}
if (dev->driver->ioctl != NULL)
@@ -778,7 +852,7 @@ int
drm_dequeue_event(struct drm_device *dev, struct drm_file *file_priv,
size_t resid, struct drm_pending_event **out)
{
- struct drm_pending_event *ev;
+ struct drm_pending_event *ev = NULL;
int gotone = 0;
MUTEX_ASSERT_LOCKED(&dev->event_lock);
@@ -949,6 +1023,32 @@ drm_getunique(struct drm_device *dev, void *data, struct drm_file *file_priv)
return 0;
}
+int
+drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_get_cap *req = data;
+
+ req->value = 0;
+ switch (req->capability) {
+ case DRM_CAP_DUMB_BUFFER:
+ if (dev->driver->dumb_create)
+ req->value = 1;
+ break;
+ case DRM_CAP_VBLANK_HIGH_CRTC:
+ req->value = 1;
+ break;
+ case DRM_CAP_DUMB_PREFERRED_DEPTH:
+ req->value = dev->mode_config.preferred_depth;
+ break;
+ case DRM_CAP_DUMB_PREFER_SHADOW:
+ req->value = dev->mode_config.prefer_shadow;
+ break;
+ default:
+ return EINVAL;
+ }
+ return 0;
+}
+
#define DRM_IF_MAJOR 1
#define DRM_IF_MINOR 2
diff --git a/sys/dev/pci/drm/drm_edid.c b/sys/dev/pci/drm/drm_edid.c
new file mode 100644
index 00000000000..dc9f4b93134
--- /dev/null
+++ b/sys/dev/pci/drm/drm_edid.c
@@ -0,0 +1,2183 @@
+/* $OpenBSD: drm_edid.c,v 1.1 2013/03/18 12:36:51 jsg Exp $ */
+/*
+ * Copyright (c) 2006 Luc Verhaegen (quirks list)
+ * Copyright (c) 2007-2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ * Copyright 2010 Red Hat, Inc.
+ *
+ * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
+ * FB layer.
+ * Copyright (C) 2006 Dennis Munsie <dmunsie@cecropia.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "drmP.h"
+#include "drm_edid.h"
+#include "drm_edid_modes.h"
+
+#include <dev/i2c/i2cvar.h>
+
+typedef void detailed_cb(struct detailed_timing *timing, void *closure);
+
+u8 *drm_do_get_edid(struct drm_connector *, struct i2c_controller *);
+int drm_do_probe_ddc_edid(struct i2c_controller *, unsigned char *, int,
+ int);
+bool drm_edid_is_zero(u8 *, int);
+bool edid_vendor(struct edid *, char *);
+u32 edid_get_quirks(struct edid *);
+void edid_fixup_preferred(struct drm_connector *, u32);
+void cea_for_each_detailed_block(u8 *, detailed_cb *, void *);
+void vtb_for_each_detailed_block(u8 *, detailed_cb *, void *);
+void drm_for_each_detailed_block(u8 *, detailed_cb *, void *);
+void is_rb(struct detailed_timing *, void *);
+bool drm_monitor_supports_rb(struct edid *);
+void find_gtf2(struct detailed_timing *, void *);
+int drm_gtf2_hbreak(struct edid *);
+int drm_gtf2_2c(struct edid *);
+int drm_gtf2_m(struct edid *);
+int drm_gtf2_k(struct edid *);
+int drm_gtf2_2j(struct edid *);
+int standard_timing_level(struct edid *);
+int bad_std_timing(u8, u8);
+struct drm_display_mode *drm_mode_std(struct drm_connector *, struct edid *,
+ struct std_timing *, int);
+void drm_mode_do_interlace_quirk(struct drm_display_mode *,
+ struct detailed_pixel_timing *);
+struct drm_display_mode *
+ drm_mode_detailed(struct drm_device *, struct edid *,
+ struct detailed_timing *, u32);
+bool mode_is_rb(const struct drm_display_mode *);
+bool mode_in_hsync_range(const struct drm_display_mode *, struct edid *,
+ u8 *);
+bool mode_in_vsync_range(const struct drm_display_mode *,
+ struct edid *, u8 *);
+u32 range_pixel_clock(struct edid *, u8 *);
+bool mode_in_range(const struct drm_display_mode *, struct edid *,
+ struct detailed_timing *);
+int drm_gtf_modes_for_range(struct drm_connector *, struct edid *,
+ struct detailed_timing *);
+void do_inferred_modes(struct detailed_timing *, void *);
+int add_inferred_modes(struct drm_connector *, struct edid *);
+int drm_est3_modes(struct drm_connector *, struct detailed_timing *);
+void do_established_modes(struct detailed_timing *, void *);
+int add_established_modes(struct drm_connector *, struct edid *);
+void do_standard_modes(struct detailed_timing *, void *);
+int add_standard_modes(struct drm_connector *, struct edid *);
+int drm_cvt_modes(struct drm_connector *, struct detailed_timing *);
+void do_cvt_mode(struct detailed_timing *, void *);
+int add_cvt_modes(struct drm_connector *, struct edid *);
+void do_detailed_mode(struct detailed_timing *, void *);
+int add_detailed_modes(struct drm_connector *, struct edid *, u32);
+void drm_add_display_info(struct edid *, struct drm_display_info *);
+void parse_hdmi_vsdb(struct drm_connector *, const u8 *);
+void monitor_name(struct detailed_timing *, void *);
+bool valid_inferred_mode(const struct drm_connector *,
+ const struct drm_display_mode *);
+int drm_dmt_modes_for_range(struct drm_connector *, struct edid *,
+ struct detailed_timing *);
+void fixup_mode_1366x768(struct drm_display_mode *);
+int drm_cvt_modes_for_range(struct drm_connector *, struct edid *,
+ struct detailed_timing *);
+int do_cea_modes (struct drm_connector *, u8 *, u8);
+int cea_db_payload_len(const u8 *);
+int cea_db_tag(const u8 *);
+int cea_revision(const u8 *);
+int cea_db_offsets(const u8 *, int *, int *);
+int add_cea_modes(struct drm_connector *, struct edid *);
+bool cea_db_is_hdmi_vsdb(const u8 *);
+
+#define version_greater(edid, maj, min) \
+ (((edid)->version > (maj)) || \
+ ((edid)->version == (maj) && (edid)->revision > (min)))
+
+#define EDID_EST_TIMINGS 16
+#define EDID_STD_TIMINGS 8
+#define EDID_DETAILED_TIMINGS 4
+
+/*
+ * EDID blocks out in the wild have a variety of bugs, try to collect
+ * them here (note that userspace may work around broken monitors first,
+ * but fixes should make their way here so that the kernel "just works"
+ * on as many displays as possible).
+ */
+
+/* First detailed mode wrong, use largest 60Hz mode */
+#define EDID_QUIRK_PREFER_LARGE_60 (1 << 0)
+/* Reported 135MHz pixel clock is too high, needs adjustment */
+#define EDID_QUIRK_135_CLOCK_TOO_HIGH (1 << 1)
+/* Prefer the largest mode at 75 Hz */
+#define EDID_QUIRK_PREFER_LARGE_75 (1 << 2)
+/* Detail timing is in cm not mm */
+#define EDID_QUIRK_DETAILED_IN_CM (1 << 3)
+/* Detailed timing descriptors have bogus size values, so just take the
+ * maximum size and use that.
+ */
+#define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE (1 << 4)
+/* Monitor forgot to set the first detailed is preferred bit. */
+#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
+/* use +hsync +vsync for detailed mode */
+#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
+/* Force reduced-blanking timings for detailed modes */
+#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
+
+struct detailed_mode_closure {
+ struct drm_connector *connector;
+ struct edid *edid;
+ bool preferred;
+ u32 quirks;
+ int modes;
+};
+
+#define LEVEL_DMT 0
+#define LEVEL_GTF 1
+#define LEVEL_GTF2 2
+#define LEVEL_CVT 3
+
+static struct edid_quirk {
+ char vendor[4];
+ int product_id;
+ u32 quirks;
+} edid_quirk_list[] = {
+ /* Acer AL1706 */
+ { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
+ /* Acer F51 */
+ { "API", 0x7602, EDID_QUIRK_PREFER_LARGE_60 },
+ /* Unknown Acer */
+ { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+
+ /* Belinea 10 15 55 */
+ { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
+ { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
+
+ /* Envision Peripherals, Inc. EN-7100e */
+ { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH },
+ /* Envision EN2028 */
+ { "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 },
+
+ /* Funai Electronics PM36B */
+ { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
+ EDID_QUIRK_DETAILED_IN_CM },
+
+ /* LG Philips LCD LP154W01-A5 */
+ { "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
+ { "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
+
+ /* Philips 107p5 CRT */
+ { "PHL", 57364, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+
+ /* Proview AY765C */
+ { "PTS", 765, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+
+ /* Samsung SyncMaster 205BW. Note: irony */
+ { "SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP },
+ /* Samsung SyncMaster 22[5-6]BW */
+ { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
+ { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
+
+ /* ViewSonic VA2026w */
+ { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
+};
+
+/*** DDC fetch and block validation ***/
+
+static const u8 edid_header[] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
+};
+
+ /*
+ * Sanity check the header of the base EDID block. Return 8 if the header
+ * is perfect, down to 0 if it's totally wrong.
+ */
+int
+drm_edid_header_is_valid(const u8 *raw_edid)
+{
+ int i, score = 0;
+
+ for (i = 0; i < sizeof(edid_header); i++)
+ if (raw_edid[i] == edid_header[i])
+ score++;
+
+ return score;
+}
+EXPORT_SYMBOL(drm_edid_header_is_valid);
+
+/* Minimum number of valid EDID header bytes (0-8, default 6) */
+static int edid_fixup = 6;
+
+/*
+ * Sanity check the EDID block (base or extension). Return 0 if the block
+ * doesn't check out, or 1 if it's valid.
+ */
+bool
+drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
+{
+ int i;
+ u8 csum = 0;
+ struct edid *edid = (struct edid *)raw_edid;
+
+ if (edid_fixup > 8 || edid_fixup < 0)
+ edid_fixup = 6;
+
+ if (block == 0) {
+ int score = drm_edid_header_is_valid(raw_edid);
+ if (score == 8) ;
+ else if (score >= edid_fixup) {
+ DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
+ memcpy(raw_edid, edid_header, sizeof(edid_header));
+ } else {
+ goto bad;
+ }
+ }
+
+ for (i = 0; i < EDID_LENGTH; i++)
+ csum += raw_edid[i];
+ if (csum) {
+ if (print_bad_edid) {
+ DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
+ }
+
+ /* allow CEA to slide through, switches mangle this */
+ if (raw_edid[0] != 0x02)
+ goto bad;
+ }
+
+ /* per-block-type checks */
+ switch (raw_edid[0]) {
+ case 0: /* base */
+ if (edid->version != 1) {
+ DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
+ goto bad;
+ }
+
+ if (edid->revision > 4)
+ DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
+ break;
+
+ default:
+ break;
+ }
+
+ return 1;
+
+bad:
+ if (raw_edid && print_bad_edid) {
+ printf("Raw EDID:\n");
+ for (i = 0; i < EDID_LENGTH; i++) {
+ if (i % 16 == 0)
+ printf("\n");
+ else if (i % 8 == 0)
+ printf(" ");
+ printf("%02x ", raw_edid[i]);
+ }
+ printf("\n");
+ }
+ return 0;
+}
+EXPORT_SYMBOL(drm_edid_block_valid);
+
+/**
+ * drm_edid_is_valid - sanity check EDID data
+ * @edid: EDID data
+ *
+ * Sanity-check an entire EDID record (including extensions)
+ */
+bool
+drm_edid_is_valid(struct edid *edid)
+{
+ int i;
+ u8 *raw = (u8 *)edid;
+
+ if (!edid)
+ return false;
+
+ for (i = 0; i <= edid->extensions; i++)
+ if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i, true))
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL(drm_edid_is_valid);
+
+#define DDC_SEGMENT_ADDR 0x30
+/**
+ * Get EDID information via I2C.
+ *
+ * \param adapter : i2c device adaptor
+ * \param buf : EDID data buffer to be filled
+ * \param len : EDID data buffer length
+ * \return 0 on success or -1 on failure.
+ *
+ * Try to fetch EDID information by calling i2c driver function.
+ */
+int
+drm_do_probe_ddc_edid(struct i2c_controller *adapter, unsigned char *buf,
+ int block, int len)
+{
+ uint8_t cmd = 0;
+ unsigned char start = block * EDID_LENGTH;
+ unsigned char segment = block >> 1;
+ int ret;
+
+ iic_acquire_bus(adapter, 0);
+ if (segment) {
+ ret = iic_exec(adapter, I2C_OP_WRITE_WITH_STOP,
+ DDC_SEGMENT_ADDR, &cmd, 1, &segment, 1, 0);
+ if (ret)
+ return ret;
+ }
+ ret = iic_exec(adapter, I2C_OP_WRITE_WITH_STOP, DDC_ADDR, &cmd, 1,
+ &start, 1, 0);
+ if (ret)
+ return (ret);
+ ret = iic_exec(adapter, I2C_OP_READ_WITH_STOP, DDC_ADDR, &cmd, 1,
+ buf, len, 0);
+ if (ret)
+ return (ret);
+ iic_release_bus(adapter, 0);
+
+ return 0;
+}
+
+bool
+drm_edid_is_zero(u8 *in_edid, int length)
+{
+ int i;
+ u32 *raw_edid = (u32 *)in_edid;
+
+ for (i = 0; i < length / 4; i++)
+ if (*(raw_edid + i) != 0)
+ return false;
+ return true;
+}
+
+u8 *
+drm_do_get_edid(struct drm_connector *connector, struct i2c_controller *adapter)
+{
+ int i, j = 0, valid_extensions = 0;
+ u8 *block, *new;
+ bool print_bad_edid = !connector->bad_edid_counter;
+
+ if ((block = malloc(EDID_LENGTH, M_DRM, M_WAITOK)) == NULL)
+ return NULL;
+
+ /* base block fetch */
+ for (i = 0; i < 4; i++) {
+ if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
+ goto out;
+ if (drm_edid_block_valid(block, 0, print_bad_edid))
+ break;
+ if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
+ connector->null_edid_counter++;
+ goto carp;
+ }
+ }
+ if (i == 4)
+ goto carp;
+
+ /* if there's no extensions, we're done */
+ if (block[0x7e] == 0)
+ return block;
+
+ new = malloc((block[0x7e] + 1) * EDID_LENGTH, M_DRM, M_WAITOK);
+ if (!new)
+ goto out;
+ bcopy(block, new, EDID_LENGTH);
+ free(block, M_DRM);
+ block = new;
+
+ for (j = 1; j <= block[0x7e]; j++) {
+ for (i = 0; i < 4; i++) {
+ if (drm_do_probe_ddc_edid(adapter,
+ block + (valid_extensions + 1) * EDID_LENGTH,
+ j, EDID_LENGTH))
+ goto out;
+ if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j, print_bad_edid)) {
+ valid_extensions++;
+ break;
+ }
+ }
+
+ if (i == 4 && print_bad_edid) {
+ printf("%s: Ignoring invalid EDID block %d.\n",
+ drm_get_connector_name(connector), j);
+
+ connector->bad_edid_counter++;
+ }
+ }
+
+ if (valid_extensions != block[0x7e]) {
+ block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
+ block[0x7e] = valid_extensions;
+ new = malloc((valid_extensions + 1) * EDID_LENGTH,
+ M_DRM, M_WAITOK);
+ if (!new)
+ goto out;
+ bcopy(block, new, (valid_extensions + 1) * EDID_LENGTH);
+ free(block, M_DRM);
+ block = new;
+ }
+
+ return block;
+
+carp:
+ if (print_bad_edid) {
+ printf("%s: EDID block %d invalid.\n",
+ drm_get_connector_name(connector), j);
+ }
+ connector->bad_edid_counter++;
+
+out:
+ free(block, M_DRM);
+ return NULL;
+}
+
+/**
+ * Probe DDC presence.
+ *
+ * \param adapter : i2c device adaptor
+ * \return 1 on success
+ */
+bool
+drm_probe_ddc(struct i2c_controller *adapter)
+{
+ unsigned char out;
+
+ return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0);
+}
+EXPORT_SYMBOL(drm_probe_ddc);
+
+/**
+ * drm_get_edid - get EDID data, if available
+ * @connector: connector we're probing
+ * @adapter: i2c adapter to use for DDC
+ *
+ * Poke the given i2c channel to grab EDID data if possible. If found,
+ * attach it to the connector.
+ *
+ * Return edid data or NULL if we couldn't find any.
+ */
+struct edid *
+drm_get_edid(struct drm_connector *connector,
+ struct i2c_controller *adapter)
+{
+ struct edid *edid = NULL;
+
+ if (drm_probe_ddc(adapter))
+ edid = (struct edid *)drm_do_get_edid(connector, adapter);
+
+ return edid;
+}
+EXPORT_SYMBOL(drm_get_edid);
+
+/*** EDID parsing ***/
+
+/**
+ * edid_vendor - match a string against EDID's obfuscated vendor field
+ * @edid: EDID to match
+ * @vendor: vendor string
+ *
+ * Returns true if @vendor is in @edid, false otherwise
+ */
+bool
+edid_vendor(struct edid *edid, char *vendor)
+{
+ char edid_vendor[3];
+
+ edid_vendor[0] = ((edid->mfg_id[0] & 0x7c) >> 2) + '@';
+ edid_vendor[1] = (((edid->mfg_id[0] & 0x3) << 3) |
+ ((edid->mfg_id[1] & 0xe0) >> 5)) + '@';
+ edid_vendor[2] = (edid->mfg_id[1] & 0x1f) + '@';
+
+ return !strncmp(edid_vendor, vendor, 3);
+}
+
+/**
+ * edid_get_quirks - return quirk flags for a given EDID
+ * @edid: EDID to process
+ *
+ * This tells subsequent routines what fixes they need to apply.
+ */
+u32
+edid_get_quirks(struct edid *edid)
+{
+ struct edid_quirk *quirk;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
+ quirk = &edid_quirk_list[i];
+
+ if (edid_vendor(edid, quirk->vendor) &&
+ (EDID_PRODUCT_ID(edid) == quirk->product_id))
+ return quirk->quirks;
+ }
+
+ return 0;
+}
+
+#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
+#define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh))
+
+/**
+ * edid_fixup_preferred - set preferred modes based on quirk list
+ * @connector: has mode list to fix up
+ * @quirks: quirks list
+ *
+ * Walk the mode list for @connector, clearing the preferred status
+ * on existing modes and setting it anew for the right mode ala @quirks.
+ */
+void
+edid_fixup_preferred(struct drm_connector *connector,
+ u32 quirks)
+{
+ struct drm_display_mode *t, *cur_mode, *preferred_mode;
+ int target_refresh = 0;
+
+ if (list_empty(&connector->probed_modes))
+ return;
+
+ if (quirks & EDID_QUIRK_PREFER_LARGE_60)
+ target_refresh = 60;
+ if (quirks & EDID_QUIRK_PREFER_LARGE_75)
+ target_refresh = 75;
+
+ preferred_mode = list_first_entry(&connector->probed_modes,
+ struct drm_display_mode, head);
+
+ list_for_each_entry_safe(cur_mode, t, &connector->probed_modes, head) {
+ cur_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
+
+ if (cur_mode == preferred_mode)
+ continue;
+
+ /* Largest mode is preferred */
+ if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode))
+ preferred_mode = cur_mode;
+
+ /* At a given size, try to get closest to target refresh */
+ if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) &&
+ MODE_REFRESH_DIFF(cur_mode, target_refresh) <
+ MODE_REFRESH_DIFF(preferred_mode, target_refresh)) {
+ preferred_mode = cur_mode;
+ }
+ }
+
+ preferred_mode->type |= DRM_MODE_TYPE_PREFERRED;
+}
+
+bool
+mode_is_rb(const struct drm_display_mode *mode)
+{
+ return (mode->htotal - mode->hdisplay == 160) &&
+ (mode->hsync_end - mode->hdisplay == 80) &&
+ (mode->hsync_end - mode->hsync_start == 32) &&
+ (mode->vsync_start - mode->vdisplay == 3);
+}
+
+/*
+ * drm_mode_find_dmt - Create a copy of a mode if present in DMT
+ * @dev: Device to duplicate against
+ * @hsize: Mode width
+ * @vsize: Mode height
+ * @fresh: Mode refresh rate
+ * @rb: Mode reduced-blanking-ness
+ *
+ * Walk the DMT mode list looking for a match for the given parameters.
+ * Return a newly allocated copy of the mode, or NULL if not found.
+ */
+struct drm_display_mode *
+drm_mode_find_dmt(struct drm_device *dev,
+ int hsize, int vsize, int fresh,
+ bool rb)
+{
+ int i;
+
+ for (i = 0; i < drm_num_dmt_modes; i++) {
+ const struct drm_display_mode *ptr = &drm_dmt_modes[i];
+ if (hsize != ptr->hdisplay)
+ continue;
+ if (vsize != ptr->vdisplay)
+ continue;
+ if (fresh != drm_mode_vrefresh(ptr))
+ continue;
+ if (rb != mode_is_rb(ptr))
+ continue;
+
+ return drm_mode_duplicate(dev, ptr);
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(drm_mode_find_dmt);
+
+void
+cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
+{
+ int i, n = 0;
+ u8 d = ext[0x02];
+ u8 *det_base = ext + d;
+
+ n = (127 - d) / 18;
+ for (i = 0; i < n; i++)
+ cb((struct detailed_timing *)(det_base + 18 * i), closure);
+}
+
+void
+vtb_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
+{
+ unsigned int i, n = min((int)ext[0x02], 6);
+ u8 *det_base = ext + 5;
+
+ if (ext[0x01] != 1)
+ return; /* unknown version */
+
+ for (i = 0; i < n; i++)
+ cb((struct detailed_timing *)(det_base + 18 * i), closure);
+}
+
+void
+drm_for_each_detailed_block(u8 *raw_edid, detailed_cb *cb, void *closure)
+{
+ int i;
+ struct edid *edid = (struct edid *)raw_edid;
+
+ if (edid == NULL)
+ return;
+
+ for (i = 0; i < EDID_DETAILED_TIMINGS; i++)
+ cb(&(edid->detailed_timings[i]), closure);
+
+ for (i = 1; i <= raw_edid[0x7e]; i++) {
+ u8 *ext = raw_edid + (i * EDID_LENGTH);
+ switch (*ext) {
+ case CEA_EXT:
+ cea_for_each_detailed_block(ext, cb, closure);
+ break;
+ case VTB_EXT:
+ vtb_for_each_detailed_block(ext, cb, closure);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+void
+is_rb(struct detailed_timing *t, void *data)
+{
+ u8 *r = (u8 *)t;
+ if (r[3] == EDID_DETAIL_MONITOR_RANGE)
+ if (r[15] & 0x10)
+ *(bool *)data = true;
+}
+
+/* EDID 1.4 defines this explicitly. For EDID 1.3, we guess, badly. */
+bool
+drm_monitor_supports_rb(struct edid *edid)
+{
+ if (edid->revision >= 4) {
+ bool ret = false;
+ drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
+ return ret;
+ }
+
+ return ((edid->input & DRM_EDID_INPUT_DIGITAL) != 0);
+}
+
+void
+find_gtf2(struct detailed_timing *t, void *data)
+{
+ u8 *r = (u8 *)t;
+ if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02)
+ *(u8 **)data = r;
+}
+
+/* Secondary GTF curve kicks in above some break frequency */
+int
+drm_gtf2_hbreak(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? (r[12] * 2) : 0;
+}
+
+int
+drm_gtf2_2c(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? r[13] : 0;
+}
+
+int
+drm_gtf2_m(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? (r[15] << 8) + r[14] : 0;
+}
+
+int
+drm_gtf2_k(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? r[16] : 0;
+}
+
+int
+drm_gtf2_2j(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? r[17] : 0;
+}
+
+/**
+ * standard_timing_level - get std. timing level(CVT/GTF/DMT)
+ * @edid: EDID block to scan
+ */
+int
+standard_timing_level(struct edid *edid)
+{
+ if (edid->revision >= 2) {
+ if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
+ return LEVEL_CVT;
+ if (drm_gtf2_hbreak(edid))
+ return LEVEL_GTF2;
+ return LEVEL_GTF;
+ }
+ return LEVEL_DMT;
+}
+
+/*
+ * 0 is reserved. The spec says 0x01 fill for unused timings. Some old
+ * monitors fill with ascii space (0x20) instead.
+ */
+int
+bad_std_timing(u8 a, u8 b)
+{
+ return (a == 0x00 && b == 0x00) ||
+ (a == 0x01 && b == 0x01) ||
+ (a == 0x20 && b == 0x20);
+}
+
+/**
+ * drm_mode_std - convert standard mode info (width, height, refresh) into mode
+ * @t: standard timing params
+ * @timing_level: standard timing level
+ *
+ * Take the standard timing params (in this case width, aspect, and refresh)
+ * and convert them into a real mode using CVT/GTF/DMT.
+ */
+struct drm_display_mode *
+drm_mode_std(struct drm_connector *connector, struct edid *edid,
+ struct std_timing *t, int revision)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *m, *mode = NULL;
+ int hsize, vsize;
+ int vrefresh_rate;
+ unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK)
+ >> EDID_TIMING_ASPECT_SHIFT;
+ unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK)
+ >> EDID_TIMING_VFREQ_SHIFT;
+ int timing_level = standard_timing_level(edid);
+
+ if (bad_std_timing(t->hsize, t->vfreq_aspect))
+ return NULL;
+
+ /* According to the EDID spec, the hdisplay = hsize * 8 + 248 */
+ hsize = t->hsize * 8 + 248;
+ /* vrefresh_rate = vfreq + 60 */
+ vrefresh_rate = vfreq + 60;
+ /* the vdisplay is calculated based on the aspect ratio */
+ if (aspect_ratio == 0) {
+ if (revision < 3)
+ vsize = hsize;
+ else
+ vsize = (hsize * 10) / 16;
+ } else if (aspect_ratio == 1)
+ vsize = (hsize * 3) / 4;
+ else if (aspect_ratio == 2)
+ vsize = (hsize * 4) / 5;
+ else
+ vsize = (hsize * 9) / 16;
+
+ /* HDTV hack, part 1 */
+ if (vrefresh_rate == 60 &&
+ ((hsize == 1360 && vsize == 765) ||
+ (hsize == 1368 && vsize == 769))) {
+ hsize = 1366;
+ vsize = 768;
+ }
+
+ /*
+ * If this connector already has a mode for this size and refresh
+ * rate (because it came from detailed or CVT info), use that
+ * instead. This way we don't have to guess at interlace or
+ * reduced blanking.
+ */
+ list_for_each_entry(m, &connector->probed_modes, head)
+ if (m->hdisplay == hsize && m->vdisplay == vsize &&
+ drm_mode_vrefresh(m) == vrefresh_rate)
+ return NULL;
+
+ /* HDTV hack, part 2 */
+ if (hsize == 1366 && vsize == 768 && vrefresh_rate == 60) {
+ mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0,
+ false);
+ mode->hdisplay = 1366;
+ mode->hsync_start = mode->hsync_start - 1;
+ mode->hsync_end = mode->hsync_end - 1;
+ return mode;
+ }
+
+ /* check whether it can be found in default mode table */
+ if (drm_monitor_supports_rb(edid)) {
+ mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate,
+ true);
+ if (mode)
+ return mode;
+ }
+ mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate, false);
+ if (mode)
+ return mode;
+
+ /* okay, generate it */
+ switch (timing_level) {
+ case LEVEL_DMT:
+ break;
+ case LEVEL_GTF:
+ mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
+ break;
+ case LEVEL_GTF2:
+ /*
+ * This is potentially wrong if there's ever a monitor with
+ * more than one ranges section, each claiming a different
+ * secondary GTF curve. Please don't do that.
+ */
+ mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
+ if (!mode)
+ return NULL;
+ if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) {
+ drm_mode_destroy(dev, mode);
+ mode = drm_gtf_mode_complex(dev, hsize, vsize,
+ vrefresh_rate, 0, 0,
+ drm_gtf2_m(edid),
+ drm_gtf2_2c(edid),
+ drm_gtf2_k(edid),
+ drm_gtf2_2j(edid));
+ }
+ break;
+ case LEVEL_CVT:
+ mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
+ false);
+ break;
+ }
+ return mode;
+}
+
+/*
+ * EDID is delightfully ambiguous about how interlaced modes are to be
+ * encoded. Our internal representation is of frame height, but some
+ * HDTV detailed timings are encoded as field height.
+ *
+ * The format list here is from CEA, in frame size. Technically we
+ * should be checking refresh rate too. Whatever.
+ */
+void
+drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
+ struct detailed_pixel_timing *pt)
+{
+ int i;
+ static const struct {
+ int w, h;
+ } cea_interlaced[] = {
+ { 1920, 1080 },
+ { 720, 480 },
+ { 1440, 480 },
+ { 2880, 480 },
+ { 720, 576 },
+ { 1440, 576 },
+ { 2880, 576 },
+ };
+
+ if (!(pt->misc & DRM_EDID_PT_INTERLACED))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(cea_interlaced); i++) {
+ if ((mode->hdisplay == cea_interlaced[i].w) &&
+ (mode->vdisplay == cea_interlaced[i].h / 2)) {
+ mode->vdisplay *= 2;
+ mode->vsync_start *= 2;
+ mode->vsync_end *= 2;
+ mode->vtotal *= 2;
+ mode->vtotal |= 1;
+ }
+ }
+
+ mode->flags |= DRM_MODE_FLAG_INTERLACE;
+}
+
+/**
+ * drm_mode_detailed - create a new mode from an EDID detailed timing section
+ * @dev: DRM device (needed to create new mode)
+ * @edid: EDID block
+ * @timing: EDID detailed timing info
+ * @quirks: quirks to apply
+ *
+ * An EDID detailed timing block contains enough info for us to create and
+ * return a new struct drm_display_mode.
+ */
+struct drm_display_mode *
+drm_mode_detailed(struct drm_device *dev,
+ struct edid *edid,
+ struct detailed_timing *timing,
+ u32 quirks)
+{
+ struct drm_display_mode *mode;
+ struct detailed_pixel_timing *pt = &timing->data.pixel_data;
+ unsigned hactive = (pt->hactive_hblank_hi & 0xf0) << 4 | pt->hactive_lo;
+ unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo;
+ unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo;
+ unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
+ unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
+ unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
+ unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
+ unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
+
+ /* ignore tiny modes */
+ if (hactive < 64 || vactive < 64)
+ return NULL;
+
+ if (pt->misc & DRM_EDID_PT_STEREO) {
+ printf("stereo mode not supported\n");
+ return NULL;
+ }
+ if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
+ printf("composite sync not supported\n");
+ }
+
+ /* it is incorrect if hsync/vsync width is zero */
+ if (!hsync_pulse_width || !vsync_pulse_width) {
+ DRM_DEBUG_KMS("Incorrect Detailed timing. "
+ "Wrong Hsync/Vsync pulse width\n");
+ return NULL;
+ }
+
+ if (quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) {
+ mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false);
+ if (!mode)
+ return NULL;
+
+ goto set_size;
+ }
+
+ mode = drm_mode_create(dev);
+ if (!mode)
+ return NULL;
+
+ if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
+ timing->pixel_clock = htole16(1088);
+
+ mode->clock = letoh16(timing->pixel_clock) * 10;
+
+ mode->hdisplay = hactive;
+ mode->hsync_start = mode->hdisplay + hsync_offset;
+ mode->hsync_end = mode->hsync_start + hsync_pulse_width;
+ mode->htotal = mode->hdisplay + hblank;
+
+ mode->vdisplay = vactive;
+ mode->vsync_start = mode->vdisplay + vsync_offset;
+ mode->vsync_end = mode->vsync_start + vsync_pulse_width;
+ mode->vtotal = mode->vdisplay + vblank;
+
+ /* Some EDIDs have bogus h/vtotal values */
+ if (mode->hsync_end > mode->htotal)
+ mode->htotal = mode->hsync_end + 1;
+ if (mode->vsync_end > mode->vtotal)
+ mode->vtotal = mode->vsync_end + 1;
+
+ drm_mode_do_interlace_quirk(mode, pt);
+
+ if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
+ pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
+ }
+
+ mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
+ DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
+ mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
+ DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
+
+set_size:
+ mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
+ mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
+
+ if (quirks & EDID_QUIRK_DETAILED_IN_CM) {
+ mode->width_mm *= 10;
+ mode->height_mm *= 10;
+ }
+
+ if (quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) {
+ mode->width_mm = edid->width_cm * 10;
+ mode->height_mm = edid->height_cm * 10;
+ }
+
+ mode->type = DRM_MODE_TYPE_DRIVER;
+ drm_mode_set_name(mode);
+
+ return mode;
+}
+
+bool
+mode_in_hsync_range(const struct drm_display_mode *mode,
+ struct edid *edid, u8 *t)
+{
+ int hsync, hmin, hmax;
+
+ hmin = t[7];
+ if (edid->revision >= 4)
+ hmin += ((t[4] & 0x04) ? 255 : 0);
+ hmax = t[8];
+ if (edid->revision >= 4)
+ hmax += ((t[4] & 0x08) ? 255 : 0);
+ hsync = drm_mode_hsync(mode);
+
+ return (hsync <= hmax && hsync >= hmin);
+}
+
+bool
+mode_in_vsync_range(const struct drm_display_mode *mode,
+ struct edid *edid, u8 *t)
+{
+ int vsync, vmin, vmax;
+
+ vmin = t[5];
+ if (edid->revision >= 4)
+ vmin += ((t[4] & 0x01) ? 255 : 0);
+ vmax = t[6];
+ if (edid->revision >= 4)
+ vmax += ((t[4] & 0x02) ? 255 : 0);
+ vsync = drm_mode_vrefresh(mode);
+
+ return (vsync <= vmax && vsync >= vmin);
+}
+
+u32
+range_pixel_clock(struct edid *edid, u8 *t)
+{
+ /* unspecified */
+ if (t[9] == 0 || t[9] == 255)
+ return 0;
+
+ /* 1.4 with CVT support gives us real precision, yay */
+ if (edid->revision >= 4 && t[10] == 0x04)
+ return (t[9] * 10000) - ((t[12] >> 2) * 250);
+
+ /* 1.3 is pathetic, so fuzz up a bit */
+ return t[9] * 10000 + 5001;
+}
+
+bool
+mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
+ struct detailed_timing *timing)
+{
+ u32 max_clock;
+ u8 *t = (u8 *)timing;
+
+ if (!mode_in_hsync_range(mode, edid, t))
+ return false;
+
+ if (!mode_in_vsync_range(mode, edid, t))
+ return false;
+
+ if ((max_clock = range_pixel_clock(edid, t)))
+ if (mode->clock > max_clock)
+ return false;
+
+ /* 1.4 max horizontal check */
+ if (edid->revision >= 4 && t[10] == 0x04)
+ if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3))))
+ return false;
+
+ if (mode_is_rb(mode) && !drm_monitor_supports_rb(edid))
+ return false;
+
+ return true;
+}
+
+bool
+valid_inferred_mode(const struct drm_connector *connector,
+ const struct drm_display_mode *mode)
+{
+ struct drm_display_mode *m;
+ bool ok = false;
+
+ list_for_each_entry(m, &connector->probed_modes, head) {
+ if (mode->hdisplay == m->hdisplay &&
+ mode->vdisplay == m->vdisplay &&
+ drm_mode_vrefresh(mode) == drm_mode_vrefresh(m))
+ return false; /* duplicated */
+ if (mode->hdisplay <= m->hdisplay &&
+ mode->vdisplay <= m->vdisplay)
+ ok = true;
+ }
+ return ok;
+}
+
+int
+drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
+ struct detailed_timing *timing)
+{
+ int i, modes = 0;
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
+
+ for (i = 0; i < drm_num_dmt_modes; i++) {
+ if (mode_in_range(drm_dmt_modes + i, edid, timing) &&
+ valid_inferred_mode(connector, drm_dmt_modes + i)) {
+ newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ }
+
+ return modes;
+}
+
+/* fix up 1366x768 mode from 1368x768;
+ * GFT/CVT can't express 1366 width which isn't dividable by 8
+ */
+void
+fixup_mode_1366x768(struct drm_display_mode *mode)
+{
+ if (mode->hdisplay == 1368 && mode->vdisplay == 768) {
+ mode->hdisplay = 1366;
+ mode->hsync_start--;
+ mode->hsync_end--;
+ drm_mode_set_name(mode);
+ }
+}
+
+int
+drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
+ struct detailed_timing *timing)
+{
+ int i, modes = 0;
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
+
+ for (i = 0; i < num_extra_modes; i++) {
+ const struct minimode *m = &extra_modes[i];
+ newmode = drm_gtf_mode(dev, m->w, m->h, m->r, 0, 0);
+ if (!newmode)
+ return modes;
+
+ fixup_mode_1366x768(newmode);
+ if (!mode_in_range(newmode, edid, timing) ||
+ !valid_inferred_mode(connector, newmode)) {
+ drm_mode_destroy(dev, newmode);
+ continue;
+ }
+
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+
+ return modes;
+}
+
+int
+drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
+ struct detailed_timing *timing)
+{
+ int i, modes = 0;
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
+ bool rb = drm_monitor_supports_rb(edid);
+
+ for (i = 0; i < num_extra_modes; i++) {
+ const struct minimode *m = &extra_modes[i];
+ newmode = drm_cvt_mode(dev, m->w, m->h, m->r, rb, 0, 0);
+ if (!newmode)
+ return modes;
+
+ fixup_mode_1366x768(newmode);
+ if (!mode_in_range(newmode, edid, timing) ||
+ !valid_inferred_mode(connector, newmode)) {
+ drm_mode_destroy(dev, newmode);
+ continue;
+ }
+
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+
+ return modes;
+}
+
+void
+do_inferred_modes(struct detailed_timing *timing, void *c)
+{
+ struct detailed_mode_closure *closure = c;
+ struct detailed_non_pixel *data = &timing->data.other_data;
+ struct detailed_data_monitor_range *range = &data->data.range;
+
+ if (data->type != EDID_DETAIL_MONITOR_RANGE)
+ return;
+
+ closure->modes += drm_dmt_modes_for_range(closure->connector,
+ closure->edid,
+ timing);
+
+ if (!version_greater(closure->edid, 1, 1))
+ return; /* GTF not defined yet */
+
+ switch (range->flags) {
+ case 0x02: /* secondary gtf, XXX could do more */
+ case 0x00: /* default gtf */
+ closure->modes += drm_gtf_modes_for_range(closure->connector,
+ closure->edid,
+ timing);
+ break;
+ case 0x04: /* cvt, only in 1.4+ */
+ if (!version_greater(closure->edid, 1, 3))
+ break;
+
+ closure->modes += drm_cvt_modes_for_range(closure->connector,
+ closure->edid,
+ timing);
+ break;
+ case 0x01: /* just the ranges, no formula */
+ default:
+ break;
+ }
+}
+
+int
+add_inferred_modes(struct drm_connector *connector, struct edid *edid)
+{
+ struct detailed_mode_closure closure = {
+ connector, edid, 0, 0, 0
+ };
+
+ if (version_greater(edid, 1, 0))
+ drm_for_each_detailed_block((u8 *)edid, do_inferred_modes,
+ &closure);
+
+ return closure.modes;
+}
+
+int
+drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
+{
+ int i, j, m, modes = 0;
+ struct drm_display_mode *mode;
+ u8 *est = ((u8 *)timing) + 5;
+
+ for (i = 0; i < 6; i++) {
+ for (j = 7; j > 0; j--) {
+ m = (i * 8) + (7 - j);
+ if (m >= ARRAY_SIZE(est3_modes))
+ break;
+ if (est[i] & (1 << j)) {
+ mode = drm_mode_find_dmt(connector->dev,
+ est3_modes[m].w,
+ est3_modes[m].h,
+ est3_modes[m].r,
+ est3_modes[m].rb);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ modes++;
+ }
+ }
+ }
+ }
+
+ return modes;
+}
+
+void
+do_established_modes(struct detailed_timing *timing, void *c)
+{
+ struct detailed_mode_closure *closure = c;
+ struct detailed_non_pixel *data = &timing->data.other_data;
+
+ if (data->type == EDID_DETAIL_EST_TIMINGS)
+ closure->modes += drm_est3_modes(closure->connector, timing);
+}
+
+/**
+ * add_established_modes - get est. modes from EDID and add them
+ * @edid: EDID block to scan
+ *
+ * Each EDID block contains a bitmap of the supported "established modes" list
+ * (defined above). Tease them out and add them to the global modes list.
+ */
+int
+add_established_modes(struct drm_connector *connector, struct edid *edid)
+{
+ struct drm_device *dev = connector->dev;
+ unsigned long est_bits = edid->established_timings.t1 |
+ (edid->established_timings.t2 << 8) |
+ ((edid->established_timings.mfg_rsvd & 0x80) << 9);
+ int i, modes = 0;
+ struct detailed_mode_closure closure = {
+ connector, edid, 0, 0, 0
+ };
+
+ for (i = 0; i <= EDID_EST_TIMINGS; i++) {
+ if (est_bits & (1<<i)) {
+ struct drm_display_mode *newmode;
+ newmode = drm_mode_duplicate(dev, &edid_est_modes[i]);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ }
+
+ if (version_greater(edid, 1, 0))
+ drm_for_each_detailed_block((u8 *)edid,
+ do_established_modes, &closure);
+
+ return modes + closure.modes;
+}
+
+void
+do_standard_modes(struct detailed_timing *timing, void *c)
+{
+ struct detailed_mode_closure *closure = c;
+ struct detailed_non_pixel *data = &timing->data.other_data;
+ struct drm_connector *connector = closure->connector;
+ struct edid *edid = closure->edid;
+
+ if (data->type == EDID_DETAIL_STD_MODES) {
+ int i;
+ for (i = 0; i < 6; i++) {
+ struct std_timing *std;
+ struct drm_display_mode *newmode;
+
+ std = &data->data.timings[i];
+ newmode = drm_mode_std(connector, edid, std,
+ edid->revision);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ closure->modes++;
+ }
+ }
+ }
+}
+
+/**
+ * add_standard_modes - get std. modes from EDID and add them
+ * @edid: EDID block to scan
+ *
+ * Standard modes can be calculated using the appropriate standard (DMT,
+ * GTF or CVT. Grab them from @edid and add them to the list.
+ */
+int
+add_standard_modes(struct drm_connector *connector, struct edid *edid)
+{
+ int i, modes = 0;
+ struct detailed_mode_closure closure = {
+ connector, edid, 0, 0, 0
+ };
+
+ for (i = 0; i < EDID_STD_TIMINGS; i++) {
+ struct drm_display_mode *newmode;
+
+ newmode = drm_mode_std(connector, edid,
+ &edid->standard_timings[i],
+ edid->revision);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+
+ if (version_greater(edid, 1, 0))
+ drm_for_each_detailed_block((u8 *)edid, do_standard_modes,
+ &closure);
+
+ /* XXX should also look for standard codes in VTB blocks */
+
+ return modes + closure.modes;
+}
+
+int
+drm_cvt_modes(struct drm_connector *connector,
+ struct detailed_timing *timing)
+{
+ int i, j, modes = 0;
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
+ struct cvt_timing *cvt;
+ const int rates[] = { 60, 85, 75, 60, 50 };
+ const u8 empty[3] = { 0, 0, 0 };
+
+ for (i = 0; i < 4; i++) {
+ int width, height;
+ cvt = &(timing->data.other_data.data.cvt[i]);
+
+ if (!memcmp(cvt->code, empty, 3))
+ continue;
+
+ height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2;
+ switch (cvt->code[1] & 0x0c) {
+ case 0x00:
+ width = height * 4 / 3;
+ break;
+ case 0x04:
+ width = height * 16 / 9;
+ break;
+ case 0x08:
+ width = height * 16 / 10;
+ break;
+ case 0x0c:
+ width = height * 15 / 9;
+ break;
+ }
+
+ for (j = 1; j < 5; j++) {
+ if (cvt->code[2] & (1 << j)) {
+ newmode = drm_cvt_mode(dev, width, height,
+ rates[j], j == 0,
+ false, false);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ }
+ }
+
+ return modes;
+}
+
+void
+do_cvt_mode(struct detailed_timing *timing, void *c)
+{
+ struct detailed_mode_closure *closure = c;
+ struct detailed_non_pixel *data = &timing->data.other_data;
+
+ if (data->type == EDID_DETAIL_CVT_3BYTE)
+ closure->modes += drm_cvt_modes(closure->connector, timing);
+}
+
+int
+add_cvt_modes(struct drm_connector *connector, struct edid *edid)
+{
+ struct detailed_mode_closure closure = {
+ connector, edid, 0, 0, 0
+ };
+
+ if (version_greater(edid, 1, 2))
+ drm_for_each_detailed_block((u8 *)edid, do_cvt_mode, &closure);
+
+ /* XXX should also look for CVT codes in VTB blocks */
+
+ return closure.modes;
+}
+
+void
+do_detailed_mode(struct detailed_timing *timing, void *c)
+{
+ struct detailed_mode_closure *closure = c;
+ struct drm_display_mode *newmode;
+
+ if (timing->pixel_clock) {
+ newmode = drm_mode_detailed(closure->connector->dev,
+ closure->edid, timing,
+ closure->quirks);
+ if (!newmode)
+ return;
+
+ if (closure->preferred)
+ newmode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_probed_add(closure->connector, newmode);
+ closure->modes++;
+ closure->preferred = 0;
+ }
+}
+
+/*
+ * add_detailed_modes - Add modes from detailed timings
+ * @connector: attached connector
+ * @edid: EDID block to scan
+ * @quirks: quirks to apply
+ */
+int
+add_detailed_modes(struct drm_connector *connector, struct edid *edid,
+ u32 quirks)
+{
+ struct detailed_mode_closure closure = {
+ connector,
+ edid,
+ 1,
+ quirks,
+ 0
+ };
+
+ if (closure.preferred && !version_greater(edid, 1, 3))
+ closure.preferred =
+ (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
+
+ drm_for_each_detailed_block((u8 *)edid, do_detailed_mode, &closure);
+
+ return closure.modes;
+}
+
+#define HDMI_IDENTIFIER 0x000C03
+#define AUDIO_BLOCK 0x01
+#define VIDEO_BLOCK 0x02
+#define VENDOR_BLOCK 0x03
+#define SPEAKER_BLOCK 0x04
+#define EDID_BASIC_AUDIO (1 << 6)
+#define EDID_CEA_YCRCB444 (1 << 5)
+#define EDID_CEA_YCRCB422 (1 << 4)
+
+/**
+ * Search EDID for CEA extension block.
+ */
+u8 *
+drm_find_cea_extension(struct edid *edid)
+{
+ u8 *edid_ext = NULL;
+ int i;
+
+ /* No EDID or EDID extensions */
+ if (edid == NULL || edid->extensions == 0)
+ return NULL;
+
+ /* Find CEA extension */
+ for (i = 0; i < edid->extensions; i++) {
+ edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
+ if (edid_ext[0] == CEA_EXT)
+ break;
+ }
+
+ if (i == edid->extensions)
+ return NULL;
+
+ return edid_ext;
+}
+EXPORT_SYMBOL(drm_find_cea_extension);
+
+/*
+ * Looks for a CEA mode matching given drm_display_mode.
+ * Returns its CEA Video ID code, or 0 if not found.
+ */
+u8
+drm_match_cea_mode(struct drm_display_mode *to_match)
+{
+ struct drm_display_mode *cea_mode;
+ u8 mode;
+
+ for (mode = 0; mode < drm_num_cea_modes; mode++) {
+ cea_mode = (struct drm_display_mode *)&edid_cea_modes[mode];
+
+ if (drm_mode_equal(to_match, cea_mode))
+ return mode + 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(drm_match_cea_mode);
+
+
+int
+do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
+{
+ struct drm_device *dev = connector->dev;
+ u8 * mode, cea_mode;
+ int modes = 0;
+
+ for (mode = db; mode < db + len; mode++) {
+ cea_mode = (*mode & 127) - 1; /* CEA modes are numbered 1..127 */
+ if (cea_mode < drm_num_cea_modes) {
+ struct drm_display_mode *newmode;
+ newmode = drm_mode_duplicate(dev,
+ &edid_cea_modes[cea_mode]);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ }
+
+ return modes;
+}
+
+int
+cea_db_payload_len(const u8 *db)
+{
+ return db[0] & 0x1f;
+}
+
+int
+cea_db_tag(const u8 *db)
+{
+ return db[0] >> 5;
+}
+
+int
+cea_revision(const u8 *cea)
+{
+ return cea[1];
+}
+
+int
+cea_db_offsets(const u8 *cea, int *start, int *end)
+{
+ /* Data block offset in CEA extension block */
+ *start = 4;
+ *end = cea[2];
+ if (*end == 0)
+ *end = 127;
+ if (*end < 4 || *end > 127)
+ return -ERANGE;
+ return 0;
+}
+
+#define for_each_cea_db(cea, i, start, end) \
+ for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1)
+
+int
+add_cea_modes(struct drm_connector *connector, struct edid *edid)
+{
+ u8 * cea = drm_find_cea_extension(edid);
+ u8 * db, dbl;
+ int modes = 0;
+
+ if (cea && cea_revision(cea) >= 3) {
+ int i, start, end;
+
+ if (cea_db_offsets(cea, &start, &end))
+ return 0;
+
+ for_each_cea_db(cea, i, start, end) {
+ db = &cea[i];
+ dbl = cea_db_payload_len(db);
+
+ if (cea_db_tag(db) == VIDEO_BLOCK)
+ modes += do_cea_modes (connector, db+1, dbl);
+ }
+ }
+
+ return modes;
+}
+
+void
+parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db)
+{
+ u8 len = cea_db_payload_len(db);
+
+ if (len >= 6) {
+ connector->eld[5] |= (db[6] >> 7) << 1; /* Supports_AI */
+ connector->dvi_dual = db[6] & 1;
+ }
+ if (len >= 7)
+ connector->max_tmds_clock = db[7] * 5;
+ if (len >= 8) {
+ connector->latency_present[0] = db[8] >> 7;
+ connector->latency_present[1] = (db[8] >> 6) & 1;
+ }
+ if (len >= 9)
+ connector->video_latency[0] = db[9];
+ if (len >= 10)
+ connector->audio_latency[0] = db[10];
+ if (len >= 11)
+ connector->video_latency[1] = db[11];
+ if (len >= 12)
+ connector->audio_latency[1] = db[12];
+
+ DRM_DEBUG_KMS("HDMI: DVI dual %d, "
+ "max TMDS clock %d, "
+ "latency present %d %d, "
+ "video latency %d %d, "
+ "audio latency %d %d\n",
+ connector->dvi_dual,
+ connector->max_tmds_clock,
+ (int) connector->latency_present[0],
+ (int) connector->latency_present[1],
+ connector->video_latency[0],
+ connector->video_latency[1],
+ connector->audio_latency[0],
+ connector->audio_latency[1]);
+}
+
+void
+monitor_name(struct detailed_timing *t, void *data)
+{
+ if (t->data.other_data.type == EDID_DETAIL_MONITOR_NAME)
+ *(u8 **)data = t->data.other_data.data.str.str;
+}
+
+bool
+cea_db_is_hdmi_vsdb(const u8 *db)
+{
+ int hdmi_id;
+
+ if (cea_db_tag(db) != VENDOR_BLOCK)
+ return false;
+
+ if (cea_db_payload_len(db) < 5)
+ return false;
+
+ hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
+
+ return hdmi_id == HDMI_IDENTIFIER;
+}
+
+/**
+ * drm_edid_to_eld - build ELD from EDID
+ * @connector: connector corresponding to the HDMI/DP sink
+ * @edid: EDID to parse
+ *
+ * Fill the ELD (EDID-Like Data) buffer for passing to the audio driver.
+ * Some ELD fields are left to the graphics driver caller:
+ * - Conn_Type
+ * - HDCP
+ * - Port_ID
+ */
+void
+drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
+{
+ uint8_t *eld = connector->eld;
+ u8 *cea;
+ u8 *name;
+ u8 *db;
+ int sad_count = 0;
+ int mnl;
+ int dbl;
+
+ memset(eld, 0, sizeof(connector->eld));
+
+ cea = drm_find_cea_extension(edid);
+ if (!cea) {
+ DRM_DEBUG_KMS("ELD: no CEA Extension found\n");
+ return;
+ }
+
+ name = NULL;
+ drm_for_each_detailed_block((u8 *)edid, monitor_name, &name);
+ for (mnl = 0; name && mnl < 13; mnl++) {
+ if (name[mnl] == 0x0a)
+ break;
+ eld[20 + mnl] = name[mnl];
+ }
+ eld[4] = (cea[1] << 5) | mnl;
+ DRM_DEBUG_KMS("ELD monitor %s\n", eld + 20);
+
+ eld[0] = 2 << 3; /* ELD version: 2 */
+
+ eld[16] = edid->mfg_id[0];
+ eld[17] = edid->mfg_id[1];
+ eld[18] = edid->prod_code[0];
+ eld[19] = edid->prod_code[1];
+
+ if (cea_revision(cea) >= 3) {
+ int i, start, end;
+
+ if (cea_db_offsets(cea, &start, &end)) {
+ start = 0;
+ end = 0;
+ }
+
+ for_each_cea_db(cea, i, start, end) {
+ db = &cea[i];
+ dbl = cea_db_payload_len(db);
+
+ switch (cea_db_tag(db)) {
+ case AUDIO_BLOCK:
+ /* Audio Data Block, contains SADs */
+ sad_count = dbl / 3;
+ if (dbl >= 1)
+ memcpy(eld + 20 + mnl, &db[1], dbl);
+ break;
+ case SPEAKER_BLOCK:
+ /* Speaker Allocation Data Block */
+ if (dbl >= 1)
+ eld[7] = db[1];
+ break;
+ case VENDOR_BLOCK:
+ /* HDMI Vendor-Specific Data Block */
+ if (cea_db_is_hdmi_vsdb(db))
+ parse_hdmi_vsdb(connector, db);
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ eld[5] |= sad_count << 4;
+ eld[2] = (20 + mnl + sad_count * 3 + 3) / 4;
+
+ DRM_DEBUG_KMS("ELD size %d, SAD count %d\n", (int)eld[2], sad_count);
+}
+EXPORT_SYMBOL(drm_edid_to_eld);
+
+/**
+ * drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond
+ * @connector: connector associated with the HDMI/DP sink
+ * @mode: the display mode
+ */
+int
+drm_av_sync_delay(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ int i = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+ int a, v;
+
+ if (!connector->latency_present[0])
+ return 0;
+ if (!connector->latency_present[1])
+ i = 0;
+
+ a = connector->audio_latency[i];
+ v = connector->video_latency[i];
+
+ /*
+ * HDMI/DP sink doesn't support audio or video?
+ */
+ if (a == 255 || v == 255)
+ return 0;
+
+ /*
+ * Convert raw EDID values to millisecond.
+ * Treat unknown latency as 0ms.
+ */
+ if (a)
+ a = min(2 * (a - 1), 500);
+ if (v)
+ v = min(2 * (v - 1), 500);
+
+ return max(v - a, 0);
+}
+EXPORT_SYMBOL(drm_av_sync_delay);
+
+/**
+ * drm_select_eld - select one ELD from multiple HDMI/DP sinks
+ * @encoder: the encoder just changed display mode
+ * @mode: the adjusted display mode
+ *
+ * It's possible for one encoder to be associated with multiple HDMI/DP sinks.
+ * The policy is now hard coded to simply use the first HDMI/DP sink's ELD.
+ */
+struct drm_connector *
+drm_select_eld(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder == encoder && connector->eld[0])
+ return connector;
+
+ return NULL;
+}
+EXPORT_SYMBOL(drm_select_eld);
+
+/**
+ * drm_detect_hdmi_monitor - detect whether monitor is hdmi.
+ * @edid: monitor EDID information
+ *
+ * Parse the CEA extension according to CEA-861-B.
+ * Return true if HDMI, false if not or unknown.
+ */
+bool
+drm_detect_hdmi_monitor(struct edid *edid)
+{
+ u8 *edid_ext;
+ int i;
+ int start_offset, end_offset;
+
+ edid_ext = drm_find_cea_extension(edid);
+ if (!edid_ext)
+ return false;
+
+ if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
+ return false;
+
+ /*
+ * Because HDMI identifier is in Vendor Specific Block,
+ * search it from all data blocks of CEA extension.
+ */
+ for_each_cea_db(edid_ext, i, start_offset, end_offset) {
+ if (cea_db_is_hdmi_vsdb(&edid_ext[i]))
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(drm_detect_hdmi_monitor);
+
+/**
+ * drm_detect_monitor_audio - check monitor audio capability
+ *
+ * Monitor should have CEA extension block.
+ * If monitor has 'basic audio', but no CEA audio blocks, it's 'basic
+ * audio' only. If there is any audio extension block and supported
+ * audio format, assume at least 'basic audio' support, even if 'basic
+ * audio' is not defined in EDID.
+ *
+ */
+bool
+drm_detect_monitor_audio(struct edid *edid)
+{
+ u8 *edid_ext;
+ int i, j;
+ bool has_audio = false;
+ int start_offset, end_offset;
+
+ edid_ext = drm_find_cea_extension(edid);
+ if (!edid_ext)
+ goto end;
+
+ has_audio = ((edid_ext[3] & EDID_BASIC_AUDIO) != 0);
+
+ if (has_audio) {
+ DRM_DEBUG_KMS("Monitor has basic audio support\n");
+ goto end;
+ }
+
+ if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
+ goto end;
+
+ for_each_cea_db(edid_ext, i, start_offset, end_offset) {
+ if (cea_db_tag(&edid_ext[i]) == AUDIO_BLOCK) {
+ has_audio = true;
+ for (j = 1; j < cea_db_payload_len(&edid_ext[i]) + 1; j += 3)
+ DRM_DEBUG_KMS("CEA audio format %d\n",
+ (edid_ext[i + j] >> 3) & 0xf);
+ goto end;
+ }
+ }
+end:
+ return has_audio;
+}
+EXPORT_SYMBOL(drm_detect_monitor_audio);
+
+/**
+ * drm_add_display_info - pull display info out if present
+ * @edid: EDID data
+ * @info: display info (attached to connector)
+ *
+ * Grab any available display info and stuff it into the drm_display_info
+ * structure that's part of the connector. Useful for tracking bpp and
+ * color spaces.
+ */
+void
+drm_add_display_info(struct edid *edid,
+ struct drm_display_info *info)
+{
+ u8 *edid_ext;
+
+ info->width_mm = edid->width_cm * 10;
+ info->height_mm = edid->height_cm * 10;
+
+ /* driver figures it out in this case */
+ info->bpc = 0;
+ info->color_formats = 0;
+
+ if (edid->revision < 3)
+ return;
+
+ if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
+ return;
+
+ /* Get data from CEA blocks if present */
+ edid_ext = drm_find_cea_extension(edid);
+ if (edid_ext) {
+ info->cea_rev = edid_ext[1];
+
+ /* The existence of a CEA block should imply RGB support */
+ info->color_formats = DRM_COLOR_FORMAT_RGB444;
+ if (edid_ext[3] & EDID_CEA_YCRCB444)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
+ if (edid_ext[3] & EDID_CEA_YCRCB422)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
+ }
+
+ /* Only defined for 1.4 with digital displays */
+ if (edid->revision < 4)
+ return;
+
+ switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
+ case DRM_EDID_DIGITAL_DEPTH_6:
+ info->bpc = 6;
+ break;
+ case DRM_EDID_DIGITAL_DEPTH_8:
+ info->bpc = 8;
+ break;
+ case DRM_EDID_DIGITAL_DEPTH_10:
+ info->bpc = 10;
+ break;
+ case DRM_EDID_DIGITAL_DEPTH_12:
+ info->bpc = 12;
+ break;
+ case DRM_EDID_DIGITAL_DEPTH_14:
+ info->bpc = 14;
+ break;
+ case DRM_EDID_DIGITAL_DEPTH_16:
+ info->bpc = 16;
+ break;
+ case DRM_EDID_DIGITAL_DEPTH_UNDEF:
+ default:
+ info->bpc = 0;
+ break;
+ }
+
+ info->color_formats |= DRM_COLOR_FORMAT_RGB444;
+ if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
+ if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
+}
+
+/**
+ * drm_add_edid_modes - add modes from EDID data, if available
+ * @connector: connector we're probing
+ * @edid: edid data
+ *
+ * Add the specified modes to the connector's mode list.
+ *
+ * Return number of modes added or 0 if we couldn't find any.
+ */
+int
+drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
+{
+ int num_modes = 0;
+ u32 quirks;
+
+ if (edid == NULL) {
+ return 0;
+ }
+ if (!drm_edid_is_valid(edid)) {
+ printf("%s: EDID invalid.\n",
+ drm_get_connector_name(connector));
+ return 0;
+ }
+
+ quirks = edid_get_quirks(edid);
+
+ /*
+ * EDID spec says modes should be preferred in this order:
+ * - preferred detailed mode
+ * - other detailed modes from base block
+ * - detailed modes from extension blocks
+ * - CVT 3-byte code modes
+ * - standard timing codes
+ * - established timing codes
+ * - modes inferred from GTF or CVT range information
+ *
+ * We get this pretty much right.
+ *
+ * XXX order for additional mode types in extension blocks?
+ */
+ num_modes += add_detailed_modes(connector, edid, quirks);
+ num_modes += add_cvt_modes(connector, edid);
+ num_modes += add_standard_modes(connector, edid);
+ num_modes += add_established_modes(connector, edid);
+ if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
+ num_modes += add_inferred_modes(connector, edid);
+ num_modes += add_cea_modes(connector, edid);
+
+ if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
+ edid_fixup_preferred(connector, quirks);
+
+ drm_add_display_info(edid, &connector->display_info);
+
+ return num_modes;
+}
+EXPORT_SYMBOL(drm_add_edid_modes);
+
+/**
+ * drm_add_modes_noedid - add modes for the connectors without EDID
+ * @connector: connector we're probing
+ * @hdisplay: the horizontal display limit
+ * @vdisplay: the vertical display limit
+ *
+ * Add the specified modes to the connector's mode list. Only when the
+ * hdisplay/vdisplay is not beyond the given limit, it will be added.
+ *
+ * Return number of modes added or 0 if we couldn't find any.
+ */
+int
+drm_add_modes_noedid(struct drm_connector *connector,
+ int hdisplay, int vdisplay)
+{
+ int i, count, num_modes = 0;
+ struct drm_display_mode *mode;
+ struct drm_device *dev = connector->dev;
+
+ count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
+ if (hdisplay < 0)
+ hdisplay = 0;
+ if (vdisplay < 0)
+ vdisplay = 0;
+
+ for (i = 0; i < count; i++) {
+ const struct drm_display_mode *ptr = &drm_dmt_modes[i];
+ if (hdisplay && vdisplay) {
+ /*
+ * Only when two are valid, they will be used to check
+ * whether the mode should be added to the mode list of
+ * the connector.
+ */
+ if (ptr->hdisplay > hdisplay ||
+ ptr->vdisplay > vdisplay)
+ continue;
+ }
+ if (drm_mode_vrefresh(ptr) > 61)
+ continue;
+ mode = drm_mode_duplicate(dev, ptr);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ num_modes++;
+ }
+ }
+ return num_modes;
+}
+EXPORT_SYMBOL(drm_add_modes_noedid);
+
+/**
+ * drm_mode_cea_vic - return the CEA-861 VIC of a given mode
+ * @mode: mode
+ *
+ * RETURNS:
+ * The VIC number, 0 in case it's not a CEA-861 mode.
+ */
+uint8_t
+drm_mode_cea_vic(const struct drm_display_mode *mode)
+{
+ uint8_t i;
+
+ for (i = 0; i < drm_num_cea_modes; i++)
+ if (drm_mode_equal(mode, &edid_cea_modes[i]))
+ return i + 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_cea_vic);
diff --git a/sys/dev/pci/drm/drm_edid.h b/sys/dev/pci/drm/drm_edid.h
new file mode 100644
index 00000000000..3559c3e6c4a
--- /dev/null
+++ b/sys/dev/pci/drm/drm_edid.h
@@ -0,0 +1,258 @@
+/* $OpenBSD: drm_edid.h,v 1.1 2013/03/18 12:36:51 jsg Exp $ */
+/*
+ * Copyright © 2007-2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __DRM_EDID_H__
+#define __DRM_EDID_H__
+
+#include <sys/types.h>
+
+#define EDID_LENGTH 128
+#define DDC_ADDR 0x50
+
+#define CEA_EXT 0x02
+#define VTB_EXT 0x10
+#define DI_EXT 0x40
+#define LS_EXT 0x50
+#define MI_EXT 0x60
+
+struct est_timings {
+ u8 t1;
+ u8 t2;
+ u8 mfg_rsvd;
+} __attribute__((packed));
+
+/* 00=16:10, 01=4:3, 10=5:4, 11=16:9 */
+#define EDID_TIMING_ASPECT_SHIFT 6
+#define EDID_TIMING_ASPECT_MASK (0x3 << EDID_TIMING_ASPECT_SHIFT)
+
+/* need to add 60 */
+#define EDID_TIMING_VFREQ_SHIFT 0
+#define EDID_TIMING_VFREQ_MASK (0x3f << EDID_TIMING_VFREQ_SHIFT)
+
+struct std_timing {
+ u8 hsize; /* need to multiply by 8 then add 248 */
+ u8 vfreq_aspect;
+} __attribute__((packed));
+
+#define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1)
+#define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2)
+#define DRM_EDID_PT_SEPARATE_SYNC (3 << 3)
+#define DRM_EDID_PT_STEREO (1 << 5)
+#define DRM_EDID_PT_INTERLACED (1 << 7)
+
+/* If detailed data is pixel timing */
+struct detailed_pixel_timing {
+ u8 hactive_lo;
+ u8 hblank_lo;
+ u8 hactive_hblank_hi;
+ u8 vactive_lo;
+ u8 vblank_lo;
+ u8 vactive_vblank_hi;
+ u8 hsync_offset_lo;
+ u8 hsync_pulse_width_lo;
+ u8 vsync_offset_pulse_width_lo;
+ u8 hsync_vsync_offset_pulse_width_hi;
+ u8 width_mm_lo;
+ u8 height_mm_lo;
+ u8 width_height_mm_hi;
+ u8 hborder;
+ u8 vborder;
+ u8 misc;
+} __attribute__((packed));
+
+/* If it's not pixel timing, it'll be one of the below */
+struct detailed_data_string {
+ u8 str[13];
+} __attribute__((packed));
+
+struct detailed_data_monitor_range {
+ u8 min_vfreq;
+ u8 max_vfreq;
+ u8 min_hfreq_khz;
+ u8 max_hfreq_khz;
+ u8 pixel_clock_mhz; /* need to multiply by 10 */
+ u8 flags;
+ union {
+ struct {
+ u8 reserved;
+ u8 hfreq_start_khz; /* need to multiply by 2 */
+ u8 c; /* need to divide by 2 */
+ __le16 m;
+ u8 k;
+ u8 j; /* need to divide by 2 */
+ } __attribute__((packed)) gtf2;
+ struct {
+ u8 version;
+ u8 data1; /* high 6 bits: extra clock resolution */
+ u8 data2; /* plus low 2 of above: max hactive */
+ u8 supported_aspects;
+ u8 flags; /* preferred aspect and blanking support */
+ u8 supported_scalings;
+ u8 preferred_refresh;
+ } __attribute__((packed)) cvt;
+ } formula;
+} __attribute__((packed));
+
+struct detailed_data_wpindex {
+ u8 white_yx_lo; /* Lower 2 bits each */
+ u8 white_x_hi;
+ u8 white_y_hi;
+ u8 gamma; /* need to divide by 100 then add 1 */
+} __attribute__((packed));
+
+struct detailed_data_color_point {
+ u8 windex1;
+ u8 wpindex1[3];
+ u8 windex2;
+ u8 wpindex2[3];
+} __attribute__((packed));
+
+struct cvt_timing {
+ u8 code[3];
+} __attribute__((packed));
+
+struct detailed_non_pixel {
+ u8 pad1;
+ u8 type; /* ff=serial, fe=string, fd=monitor range, fc=monitor name
+ fb=color point data, fa=standard timing data,
+ f9=undefined, f8=mfg. reserved */
+ u8 pad2;
+ union {
+ struct detailed_data_string str;
+ struct detailed_data_monitor_range range;
+ struct detailed_data_wpindex color;
+ struct std_timing timings[6];
+ struct cvt_timing cvt[4];
+ } data;
+} __attribute__((packed));
+
+#define EDID_DETAIL_EST_TIMINGS 0xf7
+#define EDID_DETAIL_CVT_3BYTE 0xf8
+#define EDID_DETAIL_COLOR_MGMT_DATA 0xf9
+#define EDID_DETAIL_STD_MODES 0xfa
+#define EDID_DETAIL_MONITOR_CPDATA 0xfb
+#define EDID_DETAIL_MONITOR_NAME 0xfc
+#define EDID_DETAIL_MONITOR_RANGE 0xfd
+#define EDID_DETAIL_MONITOR_STRING 0xfe
+#define EDID_DETAIL_MONITOR_SERIAL 0xff
+
+struct detailed_timing {
+ __le16 pixel_clock; /* need to multiply by 10 KHz */
+ union {
+ struct detailed_pixel_timing pixel_data;
+ struct detailed_non_pixel other_data;
+ } data;
+} __attribute__((packed));
+
+#define DRM_EDID_INPUT_SERRATION_VSYNC (1 << 0)
+#define DRM_EDID_INPUT_SYNC_ON_GREEN (1 << 1)
+#define DRM_EDID_INPUT_COMPOSITE_SYNC (1 << 2)
+#define DRM_EDID_INPUT_SEPARATE_SYNCS (1 << 3)
+#define DRM_EDID_INPUT_BLANK_TO_BLACK (1 << 4)
+#define DRM_EDID_INPUT_VIDEO_LEVEL (3 << 5)
+#define DRM_EDID_INPUT_DIGITAL (1 << 7)
+#define DRM_EDID_DIGITAL_DEPTH_MASK (7 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_UNDEF (0 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_6 (1 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_8 (2 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_10 (3 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_12 (4 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_14 (5 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_16 (6 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_RSVD (7 << 4)
+#define DRM_EDID_DIGITAL_TYPE_UNDEF (0)
+#define DRM_EDID_DIGITAL_TYPE_DVI (1)
+#define DRM_EDID_DIGITAL_TYPE_HDMI_A (2)
+#define DRM_EDID_DIGITAL_TYPE_HDMI_B (3)
+#define DRM_EDID_DIGITAL_TYPE_MDDI (4)
+#define DRM_EDID_DIGITAL_TYPE_DP (5)
+
+#define DRM_EDID_FEATURE_DEFAULT_GTF (1 << 0)
+#define DRM_EDID_FEATURE_PREFERRED_TIMING (1 << 1)
+#define DRM_EDID_FEATURE_STANDARD_COLOR (1 << 2)
+/* If analog */
+#define DRM_EDID_FEATURE_DISPLAY_TYPE (3 << 3) /* 00=mono, 01=rgb, 10=non-rgb, 11=unknown */
+/* If digital */
+#define DRM_EDID_FEATURE_COLOR_MASK (3 << 3)
+#define DRM_EDID_FEATURE_RGB (0 << 3)
+#define DRM_EDID_FEATURE_RGB_YCRCB444 (1 << 3)
+#define DRM_EDID_FEATURE_RGB_YCRCB422 (2 << 3)
+#define DRM_EDID_FEATURE_RGB_YCRCB (3 << 3) /* both 4:4:4 and 4:2:2 */
+
+#define DRM_EDID_FEATURE_PM_ACTIVE_OFF (1 << 5)
+#define DRM_EDID_FEATURE_PM_SUSPEND (1 << 6)
+#define DRM_EDID_FEATURE_PM_STANDBY (1 << 7)
+
+struct edid {
+ u8 header[8];
+ /* Vendor & product info */
+ u8 mfg_id[2];
+ u8 prod_code[2];
+ u32 serial; /* FIXME: byte order */
+ u8 mfg_week;
+ u8 mfg_year;
+ /* EDID version */
+ u8 version;
+ u8 revision;
+ /* Display info: */
+ u8 input;
+ u8 width_cm;
+ u8 height_cm;
+ u8 gamma;
+ u8 features;
+ /* Color characteristics */
+ u8 red_green_lo;
+ u8 black_white_lo;
+ u8 red_x;
+ u8 red_y;
+ u8 green_x;
+ u8 green_y;
+ u8 blue_x;
+ u8 blue_y;
+ u8 white_x;
+ u8 white_y;
+ /* Est. timings and mfg rsvd timings*/
+ struct est_timings established_timings;
+ /* Standard timings 1-8*/
+ struct std_timing standard_timings[8];
+ /* Detailing timings 1-4 */
+ struct detailed_timing detailed_timings[4];
+ /* Number of 128 byte ext. blocks */
+ u8 extensions;
+ /* Checksum */
+ u8 checksum;
+} __attribute__((packed));
+
+#define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8))
+
+struct drm_encoder;
+struct drm_connector;
+struct drm_display_mode;
+void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid);
+int drm_av_sync_delay(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
+ struct drm_display_mode *mode);
+int drm_load_edid_firmware(struct drm_connector *connector);
+
+#endif /* __DRM_EDID_H__ */
diff --git a/sys/dev/pci/drm/drm_edid_modes.h b/sys/dev/pci/drm/drm_edid_modes.h
new file mode 100644
index 00000000000..c1ca3e4c1e7
--- /dev/null
+++ b/sys/dev/pci/drm/drm_edid_modes.h
@@ -0,0 +1,774 @@
+/* $OpenBSD: drm_edid_modes.h,v 1.1 2013/03/18 12:36:51 jsg Exp $ */
+/*
+ * Copyright (c) 2007-2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ * Copyright 2010 Red Hat, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_edid.h"
+
+/*
+ * Autogenerated from the DMT spec.
+ * This table is copied from xfree86/modes/xf86EdidModes.c.
+ */
+static const struct drm_display_mode drm_dmt_modes[] = {
+ /* 640x350@85Hz */
+ { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
+ 736, 832, 0, 350, 382, 385, 445, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x400@85Hz */
+ { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
+ 736, 832, 0, 400, 401, 404, 445, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 720x400@85Hz */
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756,
+ 828, 936, 0, 400, 401, 404, 446, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 640x480@60Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+ 752, 800, 0, 480, 489, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x480@72Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
+ 704, 832, 0, 480, 489, 492, 520, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x480@75Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
+ 720, 840, 0, 480, 481, 484, 500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x480@85Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696,
+ 752, 832, 0, 480, 481, 484, 509, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 800x600@56Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
+ 896, 1024, 0, 600, 601, 603, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@60Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+ 968, 1056, 0, 600, 601, 605, 628, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@72Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
+ 976, 1040, 0, 600, 637, 643, 666, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@75Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
+ 896, 1056, 0, 600, 601, 604, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@85Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
+ 896, 1048, 0, 600, 601, 604, 631, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@120Hz RB */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 73250, 800, 848,
+ 880, 960, 0, 600, 603, 607, 636, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 848x480@60Hz */
+ { DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
+ 976, 1088, 0, 480, 486, 494, 517, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1024x768@43Hz, interlace */
+ { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
+ 1208, 1264, 0, 768, 768, 772, 817, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1024x768@60Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+ 1184, 1344, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1024x768@70Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
+ 1184, 1328, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1024x768@75Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
+ 1136, 1312, 0, 768, 769, 772, 800, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1024x768@85Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
+ 1168, 1376, 0, 768, 769, 772, 808, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1024x768@120Hz RB */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 115500, 1024, 1072,
+ 1104, 1184, 0, 768, 771, 775, 813, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1152x864@75Hz */
+ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+ 1344, 1600, 0, 864, 865, 868, 900, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@60Hz RB */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 68250, 1280, 1328,
+ 1360, 1440, 0, 768, 771, 778, 790, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x768@60Hz */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
+ 1472, 1664, 0, 768, 771, 778, 798, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@75Hz */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360,
+ 1488, 1696, 0, 768, 771, 778, 805, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x768@85Hz */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
+ 1496, 1712, 0, 768, 771, 778, 809, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@120Hz RB */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 140250, 1280, 1328,
+ 1360, 1440, 0, 768, 771, 778, 813, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x800@60Hz RB */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 71000, 1280, 1328,
+ 1360, 1440, 0, 800, 803, 809, 823, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x800@60Hz */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
+ 1480, 1680, 0, 800, 803, 809, 831, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x800@75Hz */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360,
+ 1488, 1696, 0, 800, 803, 809, 838, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x800@85Hz */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
+ 1496, 1712, 0, 800, 803, 809, 843, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x800@120Hz RB */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 146250, 1280, 1328,
+ 1360, 1440, 0, 800, 803, 809, 847, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x960@60Hz */
+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
+ 1488, 1800, 0, 960, 961, 964, 1000, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x960@85Hz */
+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
+ 1504, 1728, 0, 960, 961, 964, 1011, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x960@120Hz RB */
+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 175500, 1280, 1328,
+ 1360, 1440, 0, 960, 963, 967, 1017, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x1024@60Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@75Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@85Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
+ 1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@120Hz RB */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 187250, 1280, 1328,
+ 1360, 1440, 0, 1024, 1027, 1034, 1084, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1360x768@60Hz */
+ { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
+ 1536, 1792, 0, 768, 771, 777, 795, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1360x768@120Hz RB */
+ { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 148250, 1360, 1408,
+ 1440, 1520, 0, 768, 771, 776, 813, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1400x1050@60Hz RB */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 101000, 1400, 1448,
+ 1480, 1560, 0, 1050, 1053, 1057, 1080, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1400x1050@60Hz */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
+ 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1400x1050@75Hz */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
+ 1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1400x1050@85Hz */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
+ 1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1400x1050@120Hz RB */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 208000, 1400, 1448,
+ 1480, 1560, 0, 1050, 1053, 1057, 1112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x900@60Hz RB */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 88750, 1440, 1488,
+ 1520, 1600, 0, 900, 903, 909, 926, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x900@60Hz */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
+ 1672, 1904, 0, 900, 903, 909, 934, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@75Hz */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536,
+ 1688, 1936, 0, 900, 903, 909, 942, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@85Hz */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
+ 1696, 1952, 0, 900, 903, 909, 948, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@120Hz RB */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 182750, 1440, 1488,
+ 1520, 1600, 0, 900, 903, 909, 953, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1600x1200@60Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@65Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@70Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@75Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@85Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@120Hz RB */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 268250, 1600, 1648,
+ 1680, 1760, 0, 1200, 1203, 1207, 1271, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1680x1050@60Hz RB */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 119000, 1680, 1728,
+ 1760, 1840, 0, 1050, 1053, 1059, 1080, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1680x1050@60Hz */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
+ 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@75Hz */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800,
+ 1976, 2272, 0, 1050, 1053, 1059, 1099, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@85Hz */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
+ 1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@120Hz RB */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 245500, 1680, 1728,
+ 1760, 1840, 0, 1050, 1053, 1059, 1112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1792x1344@60Hz */
+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
+ 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1792x1344@75Hz */
+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
+ 2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1792x1344@120Hz RB */
+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 333250, 1792, 1840,
+ 1872, 1952, 0, 1344, 1347, 1351, 1423, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1856x1392@60Hz */
+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
+ 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1856x1392@75Hz */
+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
+ 2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1856x1392@120Hz RB */
+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 356500, 1856, 1904,
+ 1936, 2016, 0, 1392, 1395, 1399, 1474, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1920x1200@60Hz RB */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 154000, 1920, 1968,
+ 2000, 2080, 0, 1200, 1203, 1209, 1235, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1920x1200@60Hz */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
+ 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@75Hz */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056,
+ 2264, 2608, 0, 1200, 1203, 1209, 1255, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@85Hz */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
+ 2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@120Hz RB */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 317000, 1920, 1968,
+ 2000, 2080, 0, 1200, 1203, 1209, 1271, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1920x1440@60Hz */
+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
+ 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1440@75Hz */
+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
+ 2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1440@120Hz RB */
+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 380500, 1920, 1968,
+ 2000, 2080, 0, 1440, 1443, 1447, 1525, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2560x1600@60Hz RB */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 268500, 2560, 2608,
+ 2640, 2720, 0, 1600, 1603, 1609, 1646, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2560x1600@60Hz */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
+ 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@75HZ */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768,
+ 3048, 3536, 0, 1600, 1603, 1609, 1672, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@85HZ */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
+ 3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@120Hz RB */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 552750, 2560, 2608,
+ 2640, 2720, 0, 1600, 1603, 1609, 1694, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+
+};
+static const int drm_num_dmt_modes =
+ sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
+
+static const struct drm_display_mode edid_est_modes[] = {
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+ 968, 1056, 0, 600, 601, 605, 628, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
+ 896, 1024, 0, 600, 601, 603, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
+ 720, 840, 0, 480, 481, 484, 500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
+ 704, 832, 0, 480, 489, 491, 520, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
+ 768, 864, 0, 480, 483, 486, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
+ 752, 800, 0, 480, 490, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
+ 846, 900, 0, 400, 421, 423, 449, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
+ 846, 900, 0, 400, 412, 414, 449, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
+ 1136, 1312, 0, 768, 769, 772, 800, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
+ 1184, 1328, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+ 1184, 1344, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
+ { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
+ 1208, 1264, 0, 768, 768, 776, 817, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
+ { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
+ 928, 1152, 0, 624, 625, 628, 667, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
+ 896, 1056, 0, 600, 601, 604, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
+ 976, 1040, 0, 600, 637, 643, 666, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */
+ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+ 1344, 1600, 0, 864, 865, 868, 900, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
+};
+
+struct minimode {
+ short w;
+ short h;
+ short r;
+ short rb;
+};
+
+static const struct minimode est3_modes[] = {
+ /* byte 6 */
+ { 640, 350, 85, 0 },
+ { 640, 400, 85, 0 },
+ { 720, 400, 85, 0 },
+ { 640, 480, 85, 0 },
+ { 848, 480, 60, 0 },
+ { 800, 600, 85, 0 },
+ { 1024, 768, 85, 0 },
+ { 1152, 864, 75, 0 },
+ /* byte 7 */
+ { 1280, 768, 60, 1 },
+ { 1280, 768, 60, 0 },
+ { 1280, 768, 75, 0 },
+ { 1280, 768, 85, 0 },
+ { 1280, 960, 60, 0 },
+ { 1280, 960, 85, 0 },
+ { 1280, 1024, 60, 0 },
+ { 1280, 1024, 85, 0 },
+ /* byte 8 */
+ { 1360, 768, 60, 0 },
+ { 1440, 900, 60, 1 },
+ { 1440, 900, 60, 0 },
+ { 1440, 900, 75, 0 },
+ { 1440, 900, 85, 0 },
+ { 1400, 1050, 60, 1 },
+ { 1400, 1050, 60, 0 },
+ { 1400, 1050, 75, 0 },
+ /* byte 9 */
+ { 1400, 1050, 85, 0 },
+ { 1680, 1050, 60, 1 },
+ { 1680, 1050, 60, 0 },
+ { 1680, 1050, 75, 0 },
+ { 1680, 1050, 85, 0 },
+ { 1600, 1200, 60, 0 },
+ { 1600, 1200, 65, 0 },
+ { 1600, 1200, 70, 0 },
+ /* byte 10 */
+ { 1600, 1200, 75, 0 },
+ { 1600, 1200, 85, 0 },
+ { 1792, 1344, 60, 0 },
+ { 1792, 1344, 85, 0 },
+ { 1856, 1392, 60, 0 },
+ { 1856, 1392, 75, 0 },
+ { 1920, 1200, 60, 1 },
+ { 1920, 1200, 60, 0 },
+ /* byte 11 */
+ { 1920, 1200, 75, 0 },
+ { 1920, 1200, 85, 0 },
+ { 1920, 1440, 60, 0 },
+ { 1920, 1440, 75, 0 },
+};
+static const int num_est3_modes = ARRAY_SIZE(est3_modes);
+
+static const struct minimode extra_modes[] = {
+ { 1024, 576, 60, 0 },
+ { 1366, 768, 60, 0 },
+ { 1600, 900, 60, 0 },
+ { 1680, 945, 60, 0 },
+ { 1920, 1080, 60, 0 },
+ { 2048, 1152, 60, 0 },
+ { 2048, 1536, 60, 0 },
+};
+static const int num_extra_modes = ARRAY_SIZE(extra_modes);
+
+/*
+ * Probably taken from CEA-861 spec.
+ * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
+ */
+static const struct drm_display_mode edid_cea_modes[] = {
+ /* 1 - 640x480@60Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+ 752, 800, 0, 480, 490, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2 - 720x480@60Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 3 - 720x480@60Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 4 - 1280x720@60Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 5 - 1920x1080i@60Hz */
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 6 - 1440x480i@60Hz */
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 7 - 1440x480i@60Hz */
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 8 - 1440x240@60Hz */
+ { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 9 - 1440x240@60Hz */
+ { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 10 - 2880x480i@60Hz */
+ { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 11 - 2880x480i@60Hz */
+ { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 12 - 2880x240@60Hz */
+ { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 13 - 2880x240@60Hz */
+ { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 14 - 1440x480@60Hz */
+ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
+ 1596, 1716, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 15 - 1440x480@60Hz */
+ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
+ 1596, 1716, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 16 - 1920x1080@60Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 17 - 720x576@50Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 18 - 720x576@50Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 19 - 1280x720@50Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 20 - 1920x1080i@50Hz */
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 21 - 1440x576i@50Hz */
+ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 22 - 1440x576i@50Hz */
+ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 23 - 1440x288@50Hz */
+ { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 24 - 1440x288@50Hz */
+ { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 25 - 2880x576i@50Hz */
+ { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 26 - 2880x576i@50Hz */
+ { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 27 - 2880x288@50Hz */
+ { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 28 - 2880x288@50Hz */
+ { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 29 - 1440x576@50Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1592, 1728, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 30 - 1440x576@50Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1592, 1728, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 31 - 1920x1080@50Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 32 - 1920x1080@24Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 33 - 1920x1080@25Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 34 - 1920x1080@30Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 35 - 2880x480@60Hz */
+ { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
+ 3192, 3432, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 36 - 2880x480@60Hz */
+ { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
+ 3192, 3432, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 37 - 2880x576@50Hz */
+ { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
+ 3184, 3456, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 38 - 2880x576@50Hz */
+ { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
+ 3184, 3456, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 39 - 1920x1080i@50Hz */
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
+ 2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 40 - 1920x1080i@100Hz */
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 41 - 1280x720@100Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 42 - 720x576@100Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 43 - 720x576@100Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 44 - 1440x576i@100Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 45 - 1440x576i@100Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 46 - 1920x1080i@120Hz */
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 47 - 1280x720@120Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 48 - 720x480@120Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 49 - 720x480@120Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 50 - 1440x480i@120Hz */
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 51 - 1440x480i@120Hz */
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 52 - 720x576@200Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 53 - 720x576@200Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 54 - 1440x576i@200Hz */
+ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 55 - 1440x576i@200Hz */
+ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 56 - 720x480@240Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 57 - 720x480@240Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 58 - 1440x480i@240 */
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 59 - 1440x480i@240 */
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 60 - 1280x720@24Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
+ 3080, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 61 - 1280x720@25Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
+ 3740, 3960, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 62 - 1280x720@30Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
+ 3080, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 63 - 1920x1080@120Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 64 - 1920x1080@100Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+};
+static const int drm_num_cea_modes = ARRAY_SIZE(edid_cea_modes);
diff --git a/sys/dev/pci/drm/drm_fb_helper.c b/sys/dev/pci/drm/drm_fb_helper.c
new file mode 100644
index 00000000000..2d01ff85adf
--- /dev/null
+++ b/sys/dev/pci/drm/drm_fb_helper.c
@@ -0,0 +1,1533 @@
+/* $OpenBSD: drm_fb_helper.c,v 1.1 2013/03/18 12:36:51 jsg Exp $ */
+/*
+ * Copyright (c) 2006-2009 Red Hat Inc.
+ * Copyright (c) 2006-2008 Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ *
+ * DRM framebuffer helper functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_fb_helper.h"
+#include "drm_crtc_helper.h"
+
+bool drm_fb_helper_connector_parse_command_line(
+ struct drm_fb_helper_connector *, const char *);
+int fb_get_options(const char *, char **);
+int drm_fb_helper_parse_command_line(struct drm_fb_helper *);
+void drm_fb_helper_crtc_free(struct drm_fb_helper *);
+struct drm_display_mode *drm_has_preferred_mode(
+ struct drm_fb_helper_connector *, int, int);
+int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *,
+ uint32_t, uint32_t);
+bool drm_has_cmdline_mode(struct drm_fb_helper_connector *);
+struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *,
+ int, int);
+bool drm_connector_enabled(struct drm_connector *, bool);
+void drm_enable_connectors(struct drm_fb_helper *, bool *);
+bool drm_target_cloned(struct drm_fb_helper *, struct drm_display_mode **,
+ bool *, int, int);
+bool drm_target_preferred(struct drm_fb_helper *,
+ struct drm_display_mode **, bool *, int, int);
+int drm_pick_crtcs(struct drm_fb_helper *, struct drm_fb_helper_crtc **,
+ struct drm_display_mode **, int, int, int);
+void drm_setup_crtcs(struct drm_fb_helper *);
+void drm_fb_helper_save_lut_atomic(struct drm_crtc *,
+ struct drm_fb_helper *);
+void drm_fb_helper_restore_lut_atomic(struct drm_crtc *);
+struct drm_framebuffer *
+ drm_mode_config_fb(struct drm_crtc *);
+bool drm_fb_helper_force_kernel_mode(void);
+
+static DRM_LIST_HEAD(kernel_fb_helper_list);
+
+/**
+ * DOC: fbdev helpers
+ *
+ * The fb helper functions are useful to provide an fbdev on top of a drm kernel
+ * mode setting driver. They can be used mostly independantely from the crtc
+ * helper functions used by many drivers to implement the kernel mode setting
+ * interfaces.
+ */
+
+/* simple single crtc case helper function */
+int
+drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_connector *connector;
+ int i;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct drm_fb_helper_connector *fb_helper_connector;
+
+ fb_helper_connector = malloc(sizeof(struct drm_fb_helper_connector), M_DRM,
+ M_WAITOK | M_ZERO);
+ if (!fb_helper_connector)
+ goto fail;
+
+ fb_helper_connector->connector = connector;
+ fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
+ }
+ return 0;
+fail:
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ free(fb_helper->connector_info[i], M_DRM);
+ fb_helper->connector_info[i] = NULL;
+ }
+ fb_helper->connector_count = 0;
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
+
+int
+drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
+{
+ struct drm_fb_helper_connector *fb_helper_conn;
+ int i;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ struct drm_cmdline_mode *mode;
+ struct drm_connector *connector;
+// char *option = NULL;
+
+ fb_helper_conn = fb_helper->connector_info[i];
+ connector = fb_helper_conn->connector;
+ mode = &fb_helper_conn->cmdline_mode;
+
+#ifdef notyet
+ /* do something on return - turn off connector maybe */
+ if (fb_get_options(drm_get_connector_name(connector), &option))
+ continue;
+
+ if (drm_mode_parse_command_line_for_connector(option,
+ connector,
+ mode)) {
+ if (mode->force) {
+ const char *s;
+ switch (mode->force) {
+ case DRM_FORCE_OFF:
+ s = "OFF";
+ break;
+ case DRM_FORCE_ON_DIGITAL:
+ s = "ON - dig";
+ break;
+ default:
+ case DRM_FORCE_ON:
+ s = "ON";
+ break;
+ }
+
+ DRM_INFO("forcing %s connector %s\n",
+ drm_get_connector_name(connector), s);
+ connector->force = mode->force;
+ }
+
+ DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
+ drm_get_connector_name(connector),
+ mode->xres, mode->yres,
+ mode->refresh_specified ? mode->refresh : 60,
+ mode->rb ? " reduced blanking" : "",
+ mode->margins ? " with margins" : "",
+ mode->interlace ? " interlaced" : "");
+ }
+#endif
+
+ }
+ return 0;
+}
+
+void
+drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_helper *helper)
+{
+ uint16_t *r_base, *g_base, *b_base;
+ int i;
+
+ r_base = crtc->gamma_store;
+ g_base = r_base + crtc->gamma_size;
+ b_base = g_base + crtc->gamma_size;
+
+ for (i = 0; i < crtc->gamma_size; i++)
+ helper->funcs->gamma_get(crtc, &r_base[i], &g_base[i], &b_base[i], i);
+}
+
+void
+drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
+{
+ uint16_t *r_base, *g_base, *b_base;
+
+ if (crtc->funcs->gamma_set == NULL)
+ return;
+
+ r_base = crtc->gamma_store;
+ g_base = r_base + crtc->gamma_size;
+ b_base = g_base + crtc->gamma_size;
+
+ crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
+}
+
+#if 0
+int
+drm_fb_helper_debug_enter(struct fb_info *info)
+{
+ struct drm_fb_helper *helper = info->par;
+ struct drm_crtc_helper_funcs *funcs;
+ int i;
+
+ if (list_empty(&kernel_fb_helper_list))
+ return false;
+
+ list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
+ for (i = 0; i < helper->crtc_count; i++) {
+ struct drm_mode_set *mode_set =
+ &helper->crtc_info[i].mode_set;
+
+ if (!mode_set->crtc->enabled)
+ continue;
+
+ funcs = mode_set->crtc->helper_private;
+ drm_fb_helper_save_lut_atomic(mode_set->crtc, helper);
+ funcs->mode_set_base_atomic(mode_set->crtc,
+ mode_set->fb,
+ mode_set->x,
+ mode_set->y,
+ ENTER_ATOMIC_MODE_SET);
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_debug_enter);
+#endif
+
+/* Find the real fb for a given fb helper CRTC */
+struct drm_framebuffer *
+drm_mode_config_fb(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc *c;
+
+ list_for_each_entry(c, &dev->mode_config.crtc_list, head) {
+ if (crtc->base.id == c->base.id)
+ return c->fb;
+ }
+
+ return NULL;
+}
+
+#if 0
+int
+drm_fb_helper_debug_leave(struct fb_info *info)
+{
+ struct drm_fb_helper *helper = info->par;
+ struct drm_crtc *crtc;
+ struct drm_crtc_helper_funcs *funcs;
+ struct drm_framebuffer *fb;
+ int i;
+
+ for (i = 0; i < helper->crtc_count; i++) {
+ struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set;
+ crtc = mode_set->crtc;
+ funcs = crtc->helper_private;
+ fb = drm_mode_config_fb(crtc);
+
+ if (!crtc->enabled)
+ continue;
+
+ if (!fb) {
+ DRM_ERROR("no fb to restore??\n");
+ continue;
+ }
+
+ drm_fb_helper_restore_lut_atomic(mode_set->crtc);
+ funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x,
+ crtc->y, LEAVE_ATOMIC_MODE_SET);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_debug_leave);
+#endif
+
+bool
+drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
+{
+ bool error = false;
+ int i, ret;
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
+ ret = mode_set->crtc->funcs->set_config(mode_set);
+ if (ret)
+ error = true;
+ }
+ return error;
+}
+EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode);
+
+bool
+drm_fb_helper_force_kernel_mode(void)
+{
+ bool ret, error = false;
+ struct drm_fb_helper *helper;
+
+ if (list_empty(&kernel_fb_helper_list))
+ return false;
+
+ list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
+#ifdef notyet
+ if (helper->dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ continue;
+#endif
+
+ ret = drm_fb_helper_restore_fbdev_mode(helper);
+ if (ret)
+ error = true;
+ }
+ return error;
+}
+
+#if 0
+int
+drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
+ void *panic_str)
+{
+ /*
+ * It's a waste of time and effort to switch back to text console
+ * if the kernel should reboot before panic messages can be seen.
+ */
+ if (panic_timeout < 0)
+ return 0;
+
+ pr_err("panic occurred, switching back to text console\n");
+ return drm_fb_helper_force_kernel_mode();
+}
+EXPORT_SYMBOL(drm_fb_helper_panic);
+
+static struct notifier_block paniced = {
+ .notifier_call = drm_fb_helper_panic,
+};
+#endif
+
+/**
+ * drm_fb_helper_restore - restore the framebuffer console (kernel) config
+ *
+ * Restore's the kernel's fbcon mode, used for lastclose & panic paths.
+ */
+void
+drm_fb_helper_restore(void)
+{
+ bool ret;
+ ret = drm_fb_helper_force_kernel_mode();
+ if (ret == true)
+ DRM_ERROR("Failed to restore crtc configuration\n");
+}
+EXPORT_SYMBOL(drm_fb_helper_restore);
+
+#if 0
+#ifdef CONFIG_MAGIC_SYSRQ
+void
+drm_fb_helper_restore_work_fn(struct work_struct *ignored)
+{
+ drm_fb_helper_restore();
+}
+static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
+
+void
+drm_fb_helper_sysrq(int dummy1)
+{
+ schedule_work(&drm_fb_helper_restore_work);
+}
+
+static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
+ .handler = drm_fb_helper_sysrq,
+ .help_msg = "force-fb(V)",
+ .action_msg = "Restore framebuffer console",
+};
+#else
+static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
+#endif
+#endif
+
+#if 0
+void
+drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_crtc *crtc;
+ struct drm_connector *connector;
+ int i, j;
+
+ /*
+ * For each CRTC in this fb, turn the connectors on/off.
+ */
+ mutex_lock(&dev->mode_config.mutex);
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+
+ if (!crtc->enabled)
+ continue;
+
+ /* Walk the connectors & encoders on this fb turning them on/off */
+ for (j = 0; j < fb_helper->connector_count; j++) {
+ connector = fb_helper->connector_info[j]->connector;
+ connector->funcs->dpms(connector, dpms_mode);
+ drm_object_property_set_value(&connector->base,
+ dev->mode_config.dpms_property, dpms_mode);
+ }
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+}
+#endif
+
+#if 0
+int
+drm_fb_helper_blank(int blank, struct fb_info *info)
+{
+ switch (blank) {
+ /* Display: On; HSync: On, VSync: On */
+ case FB_BLANK_UNBLANK:
+ drm_fb_helper_dpms(info, DRM_MODE_DPMS_ON);
+ break;
+ /* Display: Off; HSync: On, VSync: On */
+ case FB_BLANK_NORMAL:
+ drm_fb_helper_dpms(info, DRM_MODE_DPMS_STANDBY);
+ break;
+ /* Display: Off; HSync: Off, VSync: On */
+ case FB_BLANK_HSYNC_SUSPEND:
+ drm_fb_helper_dpms(info, DRM_MODE_DPMS_STANDBY);
+ break;
+ /* Display: Off; HSync: On, VSync: Off */
+ case FB_BLANK_VSYNC_SUSPEND:
+ drm_fb_helper_dpms(info, DRM_MODE_DPMS_SUSPEND);
+ break;
+ /* Display: Off; HSync: Off, VSync: Off */
+ case FB_BLANK_POWERDOWN:
+ drm_fb_helper_dpms(info, DRM_MODE_DPMS_OFF);
+ break;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_blank);
+#endif
+
+void
+drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
+{
+ int i;
+
+ for (i = 0; i < helper->connector_count; i++)
+ free(helper->connector_info[i], M_DRM);
+ free(helper->connector_info, M_DRM);
+ for (i = 0; i < helper->crtc_count; i++) {
+ free(helper->crtc_info[i].mode_set.connectors, M_DRM);
+ if (helper->crtc_info[i].mode_set.mode)
+ drm_mode_destroy(helper->dev, helper->crtc_info[i].mode_set.mode);
+ }
+ free(helper->crtc_info, M_DRM);
+}
+
+int
+drm_fb_helper_init(struct drm_device *dev,
+ struct drm_fb_helper *fb_helper,
+ int crtc_count, int max_conn_count)
+{
+ struct drm_crtc *crtc;
+ int i;
+
+ fb_helper->dev = dev;
+
+ INIT_LIST_HEAD(&fb_helper->kernel_fb_list);
+
+ fb_helper->crtc_info = malloc(crtc_count *
+ sizeof(struct drm_fb_helper_crtc), M_DRM, M_WAITOK | M_ZERO);
+ if (!fb_helper->crtc_info)
+ return -ENOMEM;
+
+ fb_helper->crtc_count = crtc_count;
+ fb_helper->connector_info = malloc(dev->mode_config.num_connector *
+ sizeof(struct drm_fb_helper_connector *), M_DRM,
+ M_WAITOK | M_ZERO);
+ if (!fb_helper->connector_info) {
+ free(fb_helper->crtc_info, M_DRM);
+ return -ENOMEM;
+ }
+ fb_helper->connector_count = 0;
+
+ for (i = 0; i < crtc_count; i++) {
+ fb_helper->crtc_info[i].mode_set.connectors =
+ malloc(max_conn_count * sizeof(struct drm_connector *),
+ M_DRM, M_WAITOK | M_ZERO);
+
+ if (!fb_helper->crtc_info[i].mode_set.connectors)
+ goto out_free;
+ fb_helper->crtc_info[i].mode_set.num_connectors = 0;
+ }
+
+ i = 0;
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ fb_helper->crtc_info[i].mode_set.crtc = crtc;
+ i++;
+ }
+
+ return 0;
+out_free:
+ drm_fb_helper_crtc_free(fb_helper);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(drm_fb_helper_init);
+
+void
+drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
+{
+ if (!list_empty(&fb_helper->kernel_fb_list)) {
+ list_del(&fb_helper->kernel_fb_list);
+ if (list_empty(&kernel_fb_helper_list)) {
+#if 0
+ pr_info("drm: unregistered panic notifier\n");
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &paniced);
+ unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
+#endif
+ }
+ }
+
+ drm_fb_helper_crtc_free(fb_helper);
+
+}
+EXPORT_SYMBOL(drm_fb_helper_fini);
+
+#if 0
+int
+setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, u16 regno, struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_framebuffer *fb = fb_helper->fb;
+ int pindex;
+
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
+ u32 *palette;
+ u32 value;
+ /* place color in psuedopalette */
+ if (regno > 16)
+ return -EINVAL;
+ palette = (u32 *)info->pseudo_palette;
+ red >>= (16 - info->var.red.length);
+ green >>= (16 - info->var.green.length);
+ blue >>= (16 - info->var.blue.length);
+ value = (red << info->var.red.offset) |
+ (green << info->var.green.offset) |
+ (blue << info->var.blue.offset);
+ if (info->var.transp.length > 0) {
+ u32 mask = (1 << info->var.transp.length) - 1;
+ mask <<= info->var.transp.offset;
+ value |= mask;
+ }
+ palette[regno] = value;
+ return 0;
+ }
+
+ pindex = regno;
+
+ if (fb->bits_per_pixel == 16) {
+ pindex = regno << 3;
+
+ if (fb->depth == 16 && regno > 63)
+ return -EINVAL;
+ if (fb->depth == 15 && regno > 31)
+ return -EINVAL;
+
+ if (fb->depth == 16) {
+ u16 r, g, b;
+ int i;
+ if (regno < 32) {
+ for (i = 0; i < 8; i++)
+ fb_helper->funcs->gamma_set(crtc, red,
+ green, blue, pindex + i);
+ }
+
+ fb_helper->funcs->gamma_get(crtc, &r,
+ &g, &b,
+ pindex >> 1);
+
+ for (i = 0; i < 4; i++)
+ fb_helper->funcs->gamma_set(crtc, r,
+ green, b,
+ (pindex >> 1) + i);
+ }
+ }
+
+ if (fb->depth != 16)
+ fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex);
+ return 0;
+}
+#endif
+
+#if 0
+int
+drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_crtc_helper_funcs *crtc_funcs;
+ u16 *red, *green, *blue, *transp;
+ struct drm_crtc *crtc;
+ int i, j, rc = 0;
+ int start;
+
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ crtc_funcs = crtc->helper_private;
+
+ red = cmap->red;
+ green = cmap->green;
+ blue = cmap->blue;
+ transp = cmap->transp;
+ start = cmap->start;
+
+ for (j = 0; j < cmap->len; j++) {
+ u16 hred, hgreen, hblue, htransp = 0xffff;
+
+ hred = *red++;
+ hgreen = *green++;
+ hblue = *blue++;
+
+ if (transp)
+ htransp = *transp++;
+
+ rc = setcolreg(crtc, hred, hgreen, hblue, start++, info);
+ if (rc)
+ return rc;
+ }
+ crtc_funcs->load_lut(crtc);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(drm_fb_helper_setcmap);
+#endif
+
+#if 0
+int
+drm_fb_helper_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_framebuffer *fb = fb_helper->fb;
+ int depth;
+
+ if (var->pixclock != 0 || in_dbg_master())
+ return -EINVAL;
+
+ /* Need to resize the fb object !!! */
+ if (var->bits_per_pixel > fb->bits_per_pixel ||
+ var->xres > fb->width || var->yres > fb->height ||
+ var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
+ DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb "
+ "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n",
+ var->xres, var->yres, var->bits_per_pixel,
+ var->xres_virtual, var->yres_virtual,
+ fb->width, fb->height, fb->bits_per_pixel);
+ return -EINVAL;
+ }
+
+ switch (var->bits_per_pixel) {
+ case 16:
+ depth = (var->green.length == 6) ? 16 : 15;
+ break;
+ case 32:
+ depth = (var->transp.length > 0) ? 32 : 24;
+ break;
+ default:
+ depth = var->bits_per_pixel;
+ break;
+ }
+
+ switch (depth) {
+ case 8:
+ var->red.offset = 0;
+ var->green.offset = 0;
+ var->blue.offset = 0;
+ var->red.length = 8;
+ var->green.length = 8;
+ var->blue.length = 8;
+ var->transp.length = 0;
+ var->transp.offset = 0;
+ break;
+ case 15:
+ var->red.offset = 10;
+ var->green.offset = 5;
+ var->blue.offset = 0;
+ var->red.length = 5;
+ var->green.length = 5;
+ var->blue.length = 5;
+ var->transp.length = 1;
+ var->transp.offset = 15;
+ break;
+ case 16:
+ var->red.offset = 11;
+ var->green.offset = 5;
+ var->blue.offset = 0;
+ var->red.length = 5;
+ var->green.length = 6;
+ var->blue.length = 5;
+ var->transp.length = 0;
+ var->transp.offset = 0;
+ break;
+ case 24:
+ var->red.offset = 16;
+ var->green.offset = 8;
+ var->blue.offset = 0;
+ var->red.length = 8;
+ var->green.length = 8;
+ var->blue.length = 8;
+ var->transp.length = 0;
+ var->transp.offset = 0;
+ break;
+ case 32:
+ var->red.offset = 16;
+ var->green.offset = 8;
+ var->blue.offset = 0;
+ var->red.length = 8;
+ var->green.length = 8;
+ var->blue.length = 8;
+ var->transp.length = 8;
+ var->transp.offset = 24;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_check_var);
+#endif
+
+#if 0
+/* this will let fbcon do the mode init */
+int
+drm_fb_helper_set_par(struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_device *dev = fb_helper->dev;
+ struct fb_var_screeninfo *var = &info->var;
+ struct drm_crtc *crtc;
+ int ret;
+ int i;
+
+ if (var->pixclock != 0) {
+ DRM_ERROR("PIXEL CLOCK SET\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev->mode_config.mutex);
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
+ if (ret) {
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+ }
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (fb_helper->delayed_hotplug) {
+ fb_helper->delayed_hotplug = false;
+ drm_fb_helper_hotplug_event(fb_helper);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_set_par);
+#endif
+
+#if 0
+int
+drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_mode_set *modeset;
+ struct drm_crtc *crtc;
+ int ret = 0;
+ int i;
+
+ mutex_lock(&dev->mode_config.mutex);
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+
+ modeset = &fb_helper->crtc_info[i].mode_set;
+
+ modeset->x = var->xoffset;
+ modeset->y = var->yoffset;
+
+ if (modeset->num_connectors) {
+ ret = crtc->funcs->set_config(modeset);
+ if (!ret) {
+ info->var.xoffset = var->xoffset;
+ info->var.yoffset = var->yoffset;
+ }
+ }
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+EXPORT_SYMBOL(drm_fb_helper_pan_display);
+#endif
+
+int
+drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
+ int preferred_bpp)
+{
+ int new_fb = 0;
+ int crtc_count = 0;
+ int i;
+#if 0
+ struct fb_info *info;
+#endif
+ struct drm_fb_helper_surface_size sizes;
+ int gamma_size = 0;
+
+ memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size));
+ sizes.surface_depth = 24;
+ sizes.surface_bpp = 32;
+ sizes.fb_width = (unsigned)-1;
+ sizes.fb_height = (unsigned)-1;
+
+ /* if driver picks 8 or 16 by default use that
+ for both depth/bpp */
+ if (preferred_bpp != sizes.surface_bpp)
+ sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
+
+ /* first up get a count of crtcs now in use and new min/maxes width/heights */
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
+ struct drm_cmdline_mode *cmdline_mode;
+
+ cmdline_mode = &fb_helper_conn->cmdline_mode;
+
+ if (cmdline_mode->bpp_specified) {
+ switch (cmdline_mode->bpp) {
+ case 8:
+ sizes.surface_depth = sizes.surface_bpp = 8;
+ break;
+ case 15:
+ sizes.surface_depth = 15;
+ sizes.surface_bpp = 16;
+ break;
+ case 16:
+ sizes.surface_depth = sizes.surface_bpp = 16;
+ break;
+ case 24:
+ sizes.surface_depth = sizes.surface_bpp = 24;
+ break;
+ case 32:
+ sizes.surface_depth = 24;
+ sizes.surface_bpp = 32;
+ break;
+ }
+ break;
+ }
+ }
+
+ crtc_count = 0;
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ struct drm_display_mode *desired_mode;
+ desired_mode = fb_helper->crtc_info[i].desired_mode;
+
+ if (desired_mode) {
+ if (gamma_size == 0)
+ gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
+ if (desired_mode->hdisplay < sizes.fb_width)
+ sizes.fb_width = desired_mode->hdisplay;
+ if (desired_mode->vdisplay < sizes.fb_height)
+ sizes.fb_height = desired_mode->vdisplay;
+ if (desired_mode->hdisplay > sizes.surface_width)
+ sizes.surface_width = desired_mode->hdisplay;
+ if (desired_mode->vdisplay > sizes.surface_height)
+ sizes.surface_height = desired_mode->vdisplay;
+ crtc_count++;
+ }
+ }
+
+ if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
+ /* hmm everyone went away - assume VGA cable just fell out
+ and will come back later. */
+ printf("Cannot find any crtc or sizes - going 1024x768\n");
+ sizes.fb_width = sizes.surface_width = 1024;
+ sizes.fb_height = sizes.surface_height = 768;
+ }
+
+ /* push down into drivers */
+ new_fb = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
+ if (new_fb < 0)
+ return new_fb;
+
+#if 0
+ info = fb_helper->fbdev;
+#endif
+
+ /* set the fb pointer */
+ for (i = 0; i < fb_helper->crtc_count; i++)
+ fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
+
+#if 0
+ if (new_fb) {
+ info->var.pixclock = 0;
+ if (register_framebuffer(info) < 0)
+ return -EINVAL;
+
+ dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
+ info->node, info->fix.id);
+
+ } else {
+ drm_fb_helper_set_par(info);
+ }
+
+ /* Switch back to kernel console on panic */
+ /* multi card linked list maybe */
+ if (list_empty(&kernel_fb_helper_list)) {
+ dev_info(fb_helper->dev->dev, "registered panic notifier\n");
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &paniced);
+ register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
+ }
+#endif
+ if (new_fb)
+ list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
+
+#if 0
+void
+drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
+ uint32_t depth)
+{
+ info->fix.type = FB_TYPE_PACKED_PIXELS;
+ info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
+ FB_VISUAL_TRUECOLOR;
+ info->fix.mmio_start = 0;
+ info->fix.mmio_len = 0;
+ info->fix.type_aux = 0;
+ info->fix.xpanstep = 1; /* doing it in hw */
+ info->fix.ypanstep = 1; /* doing it in hw */
+ info->fix.ywrapstep = 0;
+ info->fix.accel = FB_ACCEL_NONE;
+ info->fix.type_aux = 0;
+
+ info->fix.line_length = pitch;
+ return;
+}
+EXPORT_SYMBOL(drm_fb_helper_fill_fix);
+
+void
+drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
+ uint32_t fb_width, uint32_t fb_height)
+{
+ struct drm_framebuffer *fb = fb_helper->fb;
+ info->pseudo_palette = fb_helper->pseudo_palette;
+ info->var.xres_virtual = fb->width;
+ info->var.yres_virtual = fb->height;
+ info->var.bits_per_pixel = fb->bits_per_pixel;
+ info->var.accel_flags = FB_ACCELF_TEXT;
+ info->var.xoffset = 0;
+ info->var.yoffset = 0;
+ info->var.activate = FB_ACTIVATE_NOW;
+ info->var.height = -1;
+ info->var.width = -1;
+
+ switch (fb->depth) {
+ case 8:
+ info->var.red.offset = 0;
+ info->var.green.offset = 0;
+ info->var.blue.offset = 0;
+ info->var.red.length = 8; /* 8bit DAC */
+ info->var.green.length = 8;
+ info->var.blue.length = 8;
+ info->var.transp.offset = 0;
+ info->var.transp.length = 0;
+ break;
+ case 15:
+ info->var.red.offset = 10;
+ info->var.green.offset = 5;
+ info->var.blue.offset = 0;
+ info->var.red.length = 5;
+ info->var.green.length = 5;
+ info->var.blue.length = 5;
+ info->var.transp.offset = 15;
+ info->var.transp.length = 1;
+ break;
+ case 16:
+ info->var.red.offset = 11;
+ info->var.green.offset = 5;
+ info->var.blue.offset = 0;
+ info->var.red.length = 5;
+ info->var.green.length = 6;
+ info->var.blue.length = 5;
+ info->var.transp.offset = 0;
+ break;
+ case 24:
+ info->var.red.offset = 16;
+ info->var.green.offset = 8;
+ info->var.blue.offset = 0;
+ info->var.red.length = 8;
+ info->var.green.length = 8;
+ info->var.blue.length = 8;
+ info->var.transp.offset = 0;
+ info->var.transp.length = 0;
+ break;
+ case 32:
+ info->var.red.offset = 16;
+ info->var.green.offset = 8;
+ info->var.blue.offset = 0;
+ info->var.red.length = 8;
+ info->var.green.length = 8;
+ info->var.blue.length = 8;
+ info->var.transp.offset = 24;
+ info->var.transp.length = 8;
+ break;
+ default:
+ break;
+ }
+
+ info->var.xres = fb_width;
+ info->var.yres = fb_height;
+}
+EXPORT_SYMBOL(drm_fb_helper_fill_var);
+#endif
+
+int
+drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper,
+ uint32_t maxX,
+ uint32_t maxY)
+{
+ struct drm_connector *connector;
+ int count = 0;
+ int i;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ connector = fb_helper->connector_info[i]->connector;
+ count += connector->funcs->fill_modes(connector, maxX, maxY);
+ }
+
+ return count;
+}
+
+struct drm_display_mode *
+drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height)
+{
+ struct drm_display_mode *mode;
+
+ list_for_each_entry(mode, &fb_connector->connector->modes, head) {
+ if (drm_mode_width(mode) > width ||
+ drm_mode_height(mode) > height)
+ continue;
+ if (mode->type & DRM_MODE_TYPE_PREFERRED)
+ return mode;
+ }
+ return NULL;
+}
+
+bool
+drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
+{
+ struct drm_cmdline_mode *cmdline_mode;
+ cmdline_mode = &fb_connector->cmdline_mode;
+ return cmdline_mode->specified;
+}
+
+struct drm_display_mode *
+drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
+ int width, int height)
+{
+ printf("%s stub\n", __func__);
+ return (NULL);
+#ifdef notyet
+ struct drm_cmdline_mode *cmdline_mode;
+ struct drm_display_mode *mode = NULL;
+
+ cmdline_mode = &fb_helper_conn->cmdline_mode;
+ if (cmdline_mode->specified == false)
+ return mode;
+
+ /* attempt to find a matching mode in the list of modes
+ * we have gotten so far, if not add a CVT mode that conforms
+ */
+ if (cmdline_mode->rb || cmdline_mode->margins)
+ goto create_mode;
+
+ list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
+ /* check width/height */
+ if (mode->hdisplay != cmdline_mode->xres ||
+ mode->vdisplay != cmdline_mode->yres)
+ continue;
+
+ if (cmdline_mode->refresh_specified) {
+ if (mode->vrefresh != cmdline_mode->refresh)
+ continue;
+ }
+
+ if (cmdline_mode->interlace) {
+ if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
+ continue;
+ }
+ return mode;
+ }
+
+create_mode:
+ mode = drm_mode_create_from_cmdline_mode(fb_helper_conn->connector->dev,
+ cmdline_mode);
+ list_add(&mode->head, &fb_helper_conn->connector->modes);
+ return mode;
+#endif
+}
+
+bool
+drm_connector_enabled(struct drm_connector *connector, bool strict)
+{
+ bool enable;
+
+ if (strict)
+ enable = connector->status == connector_status_connected;
+ else
+ enable = connector->status != connector_status_disconnected;
+
+ return enable;
+}
+
+void
+drm_enable_connectors(struct drm_fb_helper *fb_helper,
+ bool *enabled)
+{
+ bool any_enabled = false;
+ struct drm_connector *connector;
+ int i = 0;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ connector = fb_helper->connector_info[i]->connector;
+ enabled[i] = drm_connector_enabled(connector, true);
+ DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
+ enabled[i] ? "yes" : "no");
+ any_enabled |= enabled[i];
+ }
+
+ if (any_enabled)
+ return;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ connector = fb_helper->connector_info[i]->connector;
+ enabled[i] = drm_connector_enabled(connector, false);
+ }
+}
+
+bool
+drm_target_cloned(struct drm_fb_helper *fb_helper,
+ struct drm_display_mode **modes,
+ bool *enabled, int width, int height)
+{
+ int count, i, j;
+ bool can_clone = false;
+ struct drm_fb_helper_connector *fb_helper_conn;
+ struct drm_display_mode *dmt_mode, *mode;
+
+ /* only contemplate cloning in the single crtc case */
+ if (fb_helper->crtc_count > 1)
+ return false;
+
+ count = 0;
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ if (enabled[i])
+ count++;
+ }
+
+ /* only contemplate cloning if more than one connector is enabled */
+ if (count <= 1)
+ return false;
+
+ /* check the command line or if nothing common pick 1024x768 */
+ can_clone = true;
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ if (!enabled[i])
+ continue;
+ fb_helper_conn = fb_helper->connector_info[i];
+ modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
+ if (!modes[i]) {
+ can_clone = false;
+ break;
+ }
+ for (j = 0; j < i; j++) {
+ if (!enabled[j])
+ continue;
+ if (!drm_mode_equal(modes[j], modes[i]))
+ can_clone = false;
+ }
+ }
+
+ if (can_clone) {
+ DRM_DEBUG_KMS("can clone using command line\n");
+ return true;
+ }
+
+ /* try and find a 1024x768 mode on each connector */
+ can_clone = true;
+ dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60, false);
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+
+ if (!enabled[i])
+ continue;
+
+ fb_helper_conn = fb_helper->connector_info[i];
+ list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
+ if (drm_mode_equal(mode, dmt_mode))
+ modes[i] = mode;
+ }
+ if (!modes[i])
+ can_clone = false;
+ }
+
+ if (can_clone) {
+ DRM_DEBUG_KMS("can clone using 1024x768\n");
+ return true;
+ }
+ printf("kms: can't enable cloning when we probably wanted to.\n");
+ return false;
+}
+
+bool
+drm_target_preferred(struct drm_fb_helper *fb_helper,
+ struct drm_display_mode **modes,
+ bool *enabled, int width, int height)
+{
+ struct drm_fb_helper_connector *fb_helper_conn;
+ int i;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ fb_helper_conn = fb_helper->connector_info[i];
+
+ if (enabled[i] == false)
+ continue;
+
+ DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
+ fb_helper_conn->connector->base.id);
+
+ /* got for command line mode first */
+ modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
+ if (!modes[i]) {
+ DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
+ fb_helper_conn->connector->base.id);
+ modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height);
+ }
+ /* No preferred modes, pick one off the list */
+ if (!modes[i] && !list_empty(&fb_helper_conn->connector->modes)) {
+ list_for_each_entry(modes[i], &fb_helper_conn->connector->modes, head)
+ break;
+ }
+ DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
+ "none");
+ }
+ return true;
+}
+
+int
+drm_pick_crtcs(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_crtc **best_crtcs,
+ struct drm_display_mode **modes,
+ int n, int width, int height)
+{
+ int c, o;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_connector *connector;
+ struct drm_connector_helper_funcs *connector_funcs;
+ struct drm_encoder *encoder;
+ struct drm_fb_helper_crtc *best_crtc;
+ int my_score, best_score, score;
+ struct drm_fb_helper_crtc **crtcs, *crtc;
+ struct drm_fb_helper_connector *fb_helper_conn;
+
+ if (n == fb_helper->connector_count)
+ return 0;
+
+ fb_helper_conn = fb_helper->connector_info[n];
+ connector = fb_helper_conn->connector;
+
+ best_crtcs[n] = NULL;
+ best_crtc = NULL;
+ best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height);
+ if (modes[n] == NULL)
+ return best_score;
+
+ crtcs = malloc(dev->mode_config.num_connector *
+ sizeof(struct drm_fb_helper_crtc *), M_DRM,
+ M_WAITOK | M_ZERO);
+ if (!crtcs)
+ return best_score;
+
+ my_score = 1;
+ if (connector->status == connector_status_connected)
+ my_score++;
+ if (drm_has_cmdline_mode(fb_helper_conn))
+ my_score++;
+ if (drm_has_preferred_mode(fb_helper_conn, width, height))
+ my_score++;
+
+ connector_funcs = connector->helper_private;
+ encoder = connector_funcs->best_encoder(connector);
+ if (!encoder)
+ goto out;
+
+ /* select a crtc for this connector and then attempt to configure
+ remaining connectors */
+ for (c = 0; c < fb_helper->crtc_count; c++) {
+ crtc = &fb_helper->crtc_info[c];
+
+ if ((encoder->possible_crtcs & (1 << c)) == 0)
+ continue;
+
+ for (o = 0; o < n; o++)
+ if (best_crtcs[o] == crtc)
+ break;
+
+ if (o < n) {
+ /* ignore cloning unless only a single crtc */
+ if (fb_helper->crtc_count > 1)
+ continue;
+
+ if (!drm_mode_equal(modes[o], modes[n]))
+ continue;
+ }
+
+ crtcs[n] = crtc;
+ memcpy(crtcs, best_crtcs, n * sizeof(struct drm_fb_helper_crtc *));
+ score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1,
+ width, height);
+ if (score > best_score) {
+ best_crtc = crtc;
+ best_score = score;
+ memcpy(best_crtcs, crtcs,
+ dev->mode_config.num_connector *
+ sizeof(struct drm_fb_helper_crtc *));
+ }
+ }
+out:
+ free(crtcs, M_DRM);
+ return best_score;
+}
+
+void
+drm_setup_crtcs(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_fb_helper_crtc **crtcs;
+ struct drm_display_mode **modes;
+ struct drm_mode_set *modeset;
+ bool *enabled;
+ int width, height;
+ int i, ret;
+
+ DRM_DEBUG_KMS("\n");
+
+ width = dev->mode_config.max_width;
+ height = dev->mode_config.max_height;
+
+ crtcs = malloc(dev->mode_config.num_connector *
+ sizeof(struct drm_fb_helper_crtc *), M_DRM,
+ M_WAITOK | M_ZERO);
+ modes = malloc(dev->mode_config.num_connector *
+ sizeof(struct drm_display_mode *), M_DRM,
+ M_WAITOK | M_ZERO);
+ enabled = malloc(dev->mode_config.num_connector *
+ sizeof(bool), M_DRM, M_WAITOK | M_ZERO);
+ if (!crtcs || !modes || !enabled) {
+ DRM_ERROR("Memory allocation failed\n");
+ goto out;
+ }
+
+
+ drm_enable_connectors(fb_helper, enabled);
+
+ ret = drm_target_cloned(fb_helper, modes, enabled, width, height);
+ if (!ret) {
+ ret = drm_target_preferred(fb_helper, modes, enabled, width, height);
+ if (!ret)
+ DRM_ERROR("Unable to find initial modes\n");
+ }
+
+ DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height);
+
+ drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height);
+
+ /* need to set the modesets up here for use later */
+ /* fill out the connector<->crtc mappings into the modesets */
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ modeset = &fb_helper->crtc_info[i].mode_set;
+ modeset->num_connectors = 0;
+ }
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ struct drm_display_mode *mode = modes[i];
+ struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
+ modeset = &fb_crtc->mode_set;
+
+ if (mode && fb_crtc) {
+ DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
+ mode->name, fb_crtc->mode_set.crtc->base.id);
+ fb_crtc->desired_mode = mode;
+ if (modeset->mode)
+ drm_mode_destroy(dev, modeset->mode);
+ modeset->mode = drm_mode_duplicate(dev,
+ fb_crtc->desired_mode);
+ modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
+ }
+ }
+
+out:
+ free(crtcs, M_DRM);
+ free(modes, M_DRM);
+ free(enabled, M_DRM);
+}
+
+/**
+ * drm_helper_initial_config - setup a sane initial connector configuration
+ * @fb_helper: fb_helper device struct
+ * @bpp_sel: bpp value to use for the framebuffer configuration
+ *
+ * LOCKING:
+ * Called at init time by the driver to set up the @fb_helper initial
+ * configuration, must take the mode config lock.
+ *
+ * Scans the CRTCs and connectors and tries to put together an initial setup.
+ * At the moment, this is a cloned configuration across all heads with
+ * a new framebuffer object as the backing store.
+ *
+ * RETURNS:
+ * Zero if everything went ok, nonzero otherwise.
+ */
+bool
+drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
+{
+ struct drm_device *dev = fb_helper->dev;
+ int count = 0;
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(fb_helper->dev);
+
+ drm_fb_helper_parse_command_line(fb_helper);
+
+ count = drm_fb_helper_probe_connector_modes(fb_helper,
+ dev->mode_config.max_width,
+ dev->mode_config.max_height);
+ /*
+ * we shouldn't end up with no modes here.
+ */
+ if (count == 0)
+ printf("No connectors reported connected with modes\n");
+
+ drm_setup_crtcs(fb_helper);
+
+ return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+}
+EXPORT_SYMBOL(drm_fb_helper_initial_config);
+
+/**
+ * drm_fb_helper_hotplug_event - respond to a hotplug notification by
+ * probing all the outputs attached to the fb
+ * @fb_helper: the drm_fb_helper
+ *
+ * LOCKING:
+ * Called at runtime, must take mode config lock.
+ *
+ * Scan the connectors attached to the fb_helper and try to put together a
+ * setup after *notification of a change in output configuration.
+ *
+ * RETURNS:
+ * 0 on success and a non-zero error code otherwise.
+ */
+int
+drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ int count = 0;
+ u32 max_width, max_height, bpp_sel;
+ int bound = 0, crtcs_bound = 0;
+ struct drm_crtc *crtc;
+
+ if (!fb_helper->fb)
+ return 0;
+
+ rw_enter_write(&dev->mode_config.rwl);
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->fb)
+ crtcs_bound++;
+ if (crtc->fb == fb_helper->fb)
+ bound++;
+ }
+
+ if (bound < crtcs_bound) {
+ fb_helper->delayed_hotplug = true;
+ rw_exit_write(&dev->mode_config.rwl);
+ return 0;
+ }
+ DRM_DEBUG_KMS("\n");
+
+ max_width = fb_helper->fb->width;
+ max_height = fb_helper->fb->height;
+ bpp_sel = fb_helper->fb->bits_per_pixel;
+
+ count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
+ max_height);
+ drm_setup_crtcs(fb_helper);
+ rw_exit_write(&dev->mode_config.rwl);
+
+ return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+}
+EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
+
+#ifdef __linux__
+/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
+ * but the module doesn't depend on any fb console symbols. At least
+ * attempt to load fbcon to avoid leaving the system without a usable console.
+ */
+#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT)
+int
+__init drm_fb_helper_modinit(void)
+{
+ const char *name = "fbcon";
+ struct module *fbcon;
+
+ mtx_enter(&module_mutex);
+ fbcon = find_module(name);
+ mtx_leave(&module_mutex);
+
+ if (!fbcon)
+ request_module_nowait(name);
+ return 0;
+}
+
+module_init(drm_fb_helper_modinit);
+#endif
+#endif /* __linux__ */
diff --git a/sys/dev/pci/drm/drm_fb_helper.h b/sys/dev/pci/drm/drm_fb_helper.h
new file mode 100644
index 00000000000..58b7a89968d
--- /dev/null
+++ b/sys/dev/pci/drm/drm_fb_helper.h
@@ -0,0 +1,122 @@
+/* $OpenBSD: drm_fb_helper.h,v 1.1 2013/03/18 12:36:51 jsg Exp $ */
+/*
+ * Copyright (c) 2006-2009 Red Hat Inc.
+ * Copyright (c) 2006-2008 Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ *
+ * DRM framebuffer helper functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+#ifndef DRM_FB_HELPER_H
+#define DRM_FB_HELPER_H
+
+struct drm_fb_helper;
+
+struct drm_fb_helper_crtc {
+ struct drm_mode_set mode_set;
+ struct drm_display_mode *desired_mode;
+};
+
+struct drm_fb_helper_surface_size {
+ u32 fb_width;
+ u32 fb_height;
+ u32 surface_width;
+ u32 surface_height;
+ u32 surface_bpp;
+ u32 surface_depth;
+};
+
+struct drm_fb_helper_funcs {
+ void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno);
+ void (*gamma_get)(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno);
+
+ int (*fb_probe)(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes);
+};
+
+struct drm_fb_helper_connector {
+ struct drm_connector *connector;
+ struct drm_cmdline_mode cmdline_mode;
+};
+
+struct drm_fb_helper {
+ struct drm_framebuffer *fb;
+ struct drm_framebuffer *saved_fb;
+ struct drm_device *dev;
+ struct drm_display_mode *mode;
+ int crtc_count;
+ struct drm_fb_helper_crtc *crtc_info;
+ int connector_count;
+ struct drm_fb_helper_connector **connector_info;
+ struct drm_fb_helper_funcs *funcs;
+ struct fb_info *fbdev;
+ u32 pseudo_palette[17];
+ struct list_head kernel_fb_list;
+
+ /* we got a hotplug but fbdev wasn't running the console
+ delay until next set_par */
+ bool delayed_hotplug;
+};
+
+struct fb_var_screeninfo;
+struct fb_cmap;
+
+int drm_fb_helper_single_fb_probe(struct drm_fb_helper *helper,
+ int preferred_bpp);
+
+int drm_fb_helper_init(struct drm_device *dev,
+ struct drm_fb_helper *helper, int crtc_count,
+ int max_conn);
+void drm_fb_helper_fini(struct drm_fb_helper *helper);
+int drm_fb_helper_blank(int blank, struct fb_info *info);
+int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info);
+int drm_fb_helper_set_par(struct fb_info *info);
+int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info);
+int drm_fb_helper_setcolreg(unsigned regno,
+ unsigned red,
+ unsigned green,
+ unsigned blue,
+ unsigned transp,
+ struct fb_info *info);
+
+bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper);
+void drm_fb_helper_restore(void);
+void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
+ uint32_t fb_width, uint32_t fb_height);
+void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
+ uint32_t depth);
+
+int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
+
+int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
+bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
+int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper);
+int drm_fb_helper_debug_enter(struct fb_info *info);
+int drm_fb_helper_debug_leave(struct fb_info *info);
+
+#endif
diff --git a/sys/dev/pci/drm/drm_fourcc.h b/sys/dev/pci/drm/drm_fourcc.h
new file mode 100644
index 00000000000..5fff4e1e8dc
--- /dev/null
+++ b/sys/dev/pci/drm/drm_fourcc.h
@@ -0,0 +1,136 @@
+/* $OpenBSD: drm_fourcc.h,v 1.1 2013/03/18 12:36:51 jsg Exp $ */
+/*
+ * Copyright 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef DRM_FOURCC_H
+#define DRM_FOURCC_H
+
+#include <sys/types.h>
+
+#define fourcc_code(a, b, c, d) ((uint32_t)(a) | ((uint32_t)(b) << 8) | \
+ ((uint32_t)(c) << 16) | ((uint32_t)(d) << 24))
+
+#define DRM_FORMAT_BIG_ENDIAN (1<<31) /* format is big endian instead of little endian */
+
+/* color index */
+#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
+
+/* 8 bpp RGB */
+#define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */
+#define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */
+
+/* 16 bpp RGB */
+#define DRM_FORMAT_XRGB4444 fourcc_code('X', 'R', '1', '2') /* [15:0] x:R:G:B 4:4:4:4 little endian */
+#define DRM_FORMAT_XBGR4444 fourcc_code('X', 'B', '1', '2') /* [15:0] x:B:G:R 4:4:4:4 little endian */
+#define DRM_FORMAT_RGBX4444 fourcc_code('R', 'X', '1', '2') /* [15:0] R:G:B:x 4:4:4:4 little endian */
+#define DRM_FORMAT_BGRX4444 fourcc_code('B', 'X', '1', '2') /* [15:0] B:G:R:x 4:4:4:4 little endian */
+
+#define DRM_FORMAT_ARGB4444 fourcc_code('A', 'R', '1', '2') /* [15:0] A:R:G:B 4:4:4:4 little endian */
+#define DRM_FORMAT_ABGR4444 fourcc_code('A', 'B', '1', '2') /* [15:0] A:B:G:R 4:4:4:4 little endian */
+#define DRM_FORMAT_RGBA4444 fourcc_code('R', 'A', '1', '2') /* [15:0] R:G:B:A 4:4:4:4 little endian */
+#define DRM_FORMAT_BGRA4444 fourcc_code('B', 'A', '1', '2') /* [15:0] B:G:R:A 4:4:4:4 little endian */
+
+#define DRM_FORMAT_XRGB1555 fourcc_code('X', 'R', '1', '5') /* [15:0] x:R:G:B 1:5:5:5 little endian */
+#define DRM_FORMAT_XBGR1555 fourcc_code('X', 'B', '1', '5') /* [15:0] x:B:G:R 1:5:5:5 little endian */
+#define DRM_FORMAT_RGBX5551 fourcc_code('R', 'X', '1', '5') /* [15:0] R:G:B:x 5:5:5:1 little endian */
+#define DRM_FORMAT_BGRX5551 fourcc_code('B', 'X', '1', '5') /* [15:0] B:G:R:x 5:5:5:1 little endian */
+
+#define DRM_FORMAT_ARGB1555 fourcc_code('A', 'R', '1', '5') /* [15:0] A:R:G:B 1:5:5:5 little endian */
+#define DRM_FORMAT_ABGR1555 fourcc_code('A', 'B', '1', '5') /* [15:0] A:B:G:R 1:5:5:5 little endian */
+#define DRM_FORMAT_RGBA5551 fourcc_code('R', 'A', '1', '5') /* [15:0] R:G:B:A 5:5:5:1 little endian */
+#define DRM_FORMAT_BGRA5551 fourcc_code('B', 'A', '1', '5') /* [15:0] B:G:R:A 5:5:5:1 little endian */
+
+#define DRM_FORMAT_RGB565 fourcc_code('R', 'G', '1', '6') /* [15:0] R:G:B 5:6:5 little endian */
+#define DRM_FORMAT_BGR565 fourcc_code('B', 'G', '1', '6') /* [15:0] B:G:R 5:6:5 little endian */
+
+/* 24 bpp RGB */
+#define DRM_FORMAT_RGB888 fourcc_code('R', 'G', '2', '4') /* [23:0] R:G:B little endian */
+#define DRM_FORMAT_BGR888 fourcc_code('B', 'G', '2', '4') /* [23:0] B:G:R little endian */
+
+/* 32 bpp RGB */
+#define DRM_FORMAT_XRGB8888 fourcc_code('X', 'R', '2', '4') /* [31:0] x:R:G:B 8:8:8:8 little endian */
+#define DRM_FORMAT_XBGR8888 fourcc_code('X', 'B', '2', '4') /* [31:0] x:B:G:R 8:8:8:8 little endian */
+#define DRM_FORMAT_RGBX8888 fourcc_code('R', 'X', '2', '4') /* [31:0] R:G:B:x 8:8:8:8 little endian */
+#define DRM_FORMAT_BGRX8888 fourcc_code('B', 'X', '2', '4') /* [31:0] B:G:R:x 8:8:8:8 little endian */
+
+#define DRM_FORMAT_ARGB8888 fourcc_code('A', 'R', '2', '4') /* [31:0] A:R:G:B 8:8:8:8 little endian */
+#define DRM_FORMAT_ABGR8888 fourcc_code('A', 'B', '2', '4') /* [31:0] A:B:G:R 8:8:8:8 little endian */
+#define DRM_FORMAT_RGBA8888 fourcc_code('R', 'A', '2', '4') /* [31:0] R:G:B:A 8:8:8:8 little endian */
+#define DRM_FORMAT_BGRA8888 fourcc_code('B', 'A', '2', '4') /* [31:0] B:G:R:A 8:8:8:8 little endian */
+
+#define DRM_FORMAT_XRGB2101010 fourcc_code('X', 'R', '3', '0') /* [31:0] x:R:G:B 2:10:10:10 little endian */
+#define DRM_FORMAT_XBGR2101010 fourcc_code('X', 'B', '3', '0') /* [31:0] x:B:G:R 2:10:10:10 little endian */
+#define DRM_FORMAT_RGBX1010102 fourcc_code('R', 'X', '3', '0') /* [31:0] R:G:B:x 10:10:10:2 little endian */
+#define DRM_FORMAT_BGRX1010102 fourcc_code('B', 'X', '3', '0') /* [31:0] B:G:R:x 10:10:10:2 little endian */
+
+#define DRM_FORMAT_ARGB2101010 fourcc_code('A', 'R', '3', '0') /* [31:0] A:R:G:B 2:10:10:10 little endian */
+#define DRM_FORMAT_ABGR2101010 fourcc_code('A', 'B', '3', '0') /* [31:0] A:B:G:R 2:10:10:10 little endian */
+#define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
+#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
+
+/* packed YCbCr */
+#define DRM_FORMAT_YUYV fourcc_code('Y', 'U', 'Y', 'V') /* [31:0] Cr0:Y1:Cb0:Y0 8:8:8:8 little endian */
+#define DRM_FORMAT_YVYU fourcc_code('Y', 'V', 'Y', 'U') /* [31:0] Cb0:Y1:Cr0:Y0 8:8:8:8 little endian */
+#define DRM_FORMAT_UYVY fourcc_code('U', 'Y', 'V', 'Y') /* [31:0] Y1:Cr0:Y0:Cb0 8:8:8:8 little endian */
+#define DRM_FORMAT_VYUY fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */
+
+#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
+
+/*
+ * 2 plane YCbCr
+ * index 0 = Y plane, [7:0] Y
+ * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
+ * or
+ * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
+ */
+#define DRM_FORMAT_NV12 fourcc_code('N', 'V', '1', '2') /* 2x2 subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV21 fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */
+#define DRM_FORMAT_NV16 fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV61 fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */
+#define DRM_FORMAT_NV24 fourcc_code('N', 'V', '2', '4') /* non-subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV42 fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */
+
+/* special NV12 tiled format */
+#define DRM_FORMAT_NV12MT fourcc_code('T', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane 64x32 macroblocks */
+
+/*
+ * 3 plane YCbCr
+ * index 0: Y plane, [7:0] Y
+ * index 1: Cb plane, [7:0] Cb
+ * index 2: Cr plane, [7:0] Cr
+ * or
+ * index 1: Cr plane, [7:0] Cr
+ * index 2: Cb plane, [7:0] Cb
+ */
+#define DRM_FORMAT_YUV410 fourcc_code('Y', 'U', 'V', '9') /* 4x4 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU410 fourcc_code('Y', 'V', 'U', '9') /* 4x4 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV411 fourcc_code('Y', 'U', '1', '1') /* 4x1 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU411 fourcc_code('Y', 'V', '1', '1') /* 4x1 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV420 fourcc_code('Y', 'U', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU420 fourcc_code('Y', 'V', '1', '2') /* 2x2 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV422 fourcc_code('Y', 'U', '1', '6') /* 2x1 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU422 fourcc_code('Y', 'V', '1', '6') /* 2x1 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV444 fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU444 fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */
+
+#endif /* DRM_FOURCC_H */
diff --git a/sys/dev/pci/drm/drm_irq.c b/sys/dev/pci/drm/drm_irq.c
index d94f4f89835..2896e921852 100644
--- a/sys/dev/pci/drm/drm_irq.c
+++ b/sys/dev/pci/drm/drm_irq.c
@@ -1,6 +1,17 @@
-/* $OpenBSD: drm_irq.c,v 1.43 2011/06/02 18:22:00 weerd Exp $ */
-/*-
- * Copyright 2003 Eric Anholt
+/* $OpenBSD: drm_irq.c,v 1.44 2013/03/18 12:36:51 jsg Exp $ */
+/**
+ * \file drm_irq.c
+ * IRQ support
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -17,18 +28,10 @@
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
- * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Eric Anholt <anholt@FreeBSD.org>
- *
- */
-
-/** @file drm_irq.c
- * Support code for handling setup/teardown of interrupt handlers and
- * handing interrupt handlers off to the drivers.
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*/
#include <sys/workq.h>
@@ -36,11 +39,38 @@
#include "drmP.h"
#include "drm.h"
-void drm_update_vblank_count(struct drm_device *, int);
-void vblank_disable(void *);
-int drm_queue_vblank_event(struct drm_device *, int,
- union drm_wait_vblank *, struct drm_file *);
-void drm_handle_vblank_events(struct drm_device *, int);
+/* Access macro for slots in vblank timestamp ringbuffer. */
+#define vblanktimestamp(dev, crtc, count) ( \
+ (dev)->_vblank_time[(crtc) * DRM_VBLANKTIME_RBSIZE + \
+ ((count) % DRM_VBLANKTIME_RBSIZE)])
+
+/* Retry timestamp calculation up to 3 times to satisfy
+ * drm_timestamp_precision before giving up.
+ */
+#define DRM_TIMESTAMP_MAXRETRIES 3
+
+/* Threshold in nanoseconds for detection of redundant
+ * vblank irq in drm_handle_vblank(). 1 msec should be ok.
+ */
+#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
+
+void clear_vblank_timestamps(struct drm_device *, int);
+void vblank_disable_and_save(struct drm_device *, int);
+u32 drm_get_last_vbltimestamp(struct drm_device *, int, struct timeval *,
+ unsigned);
+void vblank_disable_fn(void *);
+int64_t timeval_to_ns(const struct timeval *);
+struct timeval ns_to_timeval(const int64_t);
+void drm_irq_vgaarb_nokms(void *, bool);
+struct timeval get_drm_timestamp(void);
+u32 drm_vblank_count_and_time(struct drm_device *, int, struct timeval *);
+void send_vblank_event(struct drm_device *,
+ struct drm_pending_vblank_event *, unsigned long,
+ struct timeval *);
+void drm_update_vblank_count(struct drm_device *, int);
+int drm_queue_vblank_event(struct drm_device *, int,
+ union drm_wait_vblank *, struct drm_file *);
+void drm_handle_vblank_events(struct drm_device *, int);
#ifdef DRM_VBLANK_DEBUG
#define DPRINTF(x...) do { printf(x); } while(/* CONSTCOND */ 0)
@@ -48,6 +78,27 @@ void drm_handle_vblank_events(struct drm_device *, int);
#define DPRINTF(x...) do { } while(/* CONSTCOND */ 0)
#endif
+unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
+unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
+/*
+ * Default to use monotonic timestamps for wait-for-vblank and page-flip
+ * complete events.
+ */
+unsigned int drm_timestamp_monotonic = 1;
+
+/**
+ * Get interrupt from bus id.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_irq_busid structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Finds the PCI device with the specified bus id and gets its IRQ number.
+ * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
+ * to that of the device that this DRM instance attached to.
+ */
int
drm_irq_by_busid(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
@@ -66,6 +117,281 @@ drm_irq_by_busid(struct drm_device *dev, void *data, struct drm_file *file_priv)
return 0;
}
+/*
+ * Clear vblank timestamp buffer for a crtc.
+ */
+void
+clear_vblank_timestamps(struct drm_device *dev, int crtc)
+{
+ memset(&dev->_vblank_time[crtc * DRM_VBLANKTIME_RBSIZE], 0,
+ DRM_VBLANKTIME_RBSIZE * sizeof(struct timeval));
+}
+
+#define NSEC_PER_USEC 1000L
+#define NSEC_PER_SEC 1000000000L
+
+int64_t
+timeval_to_ns(const struct timeval *tv)
+{
+ return ((int64_t)tv->tv_sec * NSEC_PER_SEC) +
+ tv->tv_usec * NSEC_PER_USEC;
+}
+
+struct timeval
+ns_to_timeval(const int64_t nsec)
+{
+ struct timeval tv;
+ uint32_t rem;
+
+ if (nsec == 0) {
+ tv.tv_sec = 0;
+ tv.tv_usec = 0;
+ return (tv);
+ }
+
+ tv.tv_sec = nsec / NSEC_PER_SEC;
+ rem = nsec % NSEC_PER_SEC;
+ if (rem < 0) {
+ tv.tv_sec--;
+ rem += NSEC_PER_SEC;
+ }
+ tv.tv_usec = rem / 1000;
+ return (tv);
+}
+
+static inline int64_t
+abs64(int64_t x)
+{
+ return (x < 0 ? -x : x);
+}
+
+/*
+ * Disable vblank irq's on crtc, make sure that last vblank count
+ * of hardware and corresponding consistent software vblank counter
+ * are preserved, even if there are any spurious vblank irq's after
+ * disable.
+ */
+void
+vblank_disable_and_save(struct drm_device *dev, int crtc)
+{
+ u32 vblcount;
+ s64 diff_ns;
+ int vblrc;
+ struct timeval tvblank;
+ int count = DRM_TIMESTAMP_MAXRETRIES;
+
+ /* Prevent vblank irq processing while disabling vblank irqs,
+ * so no updates of timestamps or count can happen after we've
+ * disabled. Needed to prevent races in case of delayed irq's.
+ */
+ mtx_enter(&dev->vblank_time_lock);
+
+ dev->driver->disable_vblank(dev, crtc);
+ dev->vblank_enabled[crtc] = 0;
+
+ /* No further vblank irq's will be processed after
+ * this point. Get current hardware vblank count and
+ * vblank timestamp, repeat until they are consistent.
+ *
+ * FIXME: There is still a race condition here and in
+ * drm_update_vblank_count() which can cause off-by-one
+ * reinitialization of software vblank counter. If gpu
+ * vblank counter doesn't increment exactly at the leading
+ * edge of a vblank interval, then we can lose 1 count if
+ * we happen to execute between start of vblank and the
+ * delayed gpu counter increment.
+ */
+ do {
+ dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
+ vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
+ } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
+
+ if (!count)
+ vblrc = 0;
+
+ /* Compute time difference to stored timestamp of last vblank
+ * as updated by last invocation of drm_handle_vblank() in vblank irq.
+ */
+ vblcount = atomic_read(&dev->_vblank_count[crtc]);
+ diff_ns = timeval_to_ns(&tvblank) -
+ timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
+
+ /* If there is at least 1 msec difference between the last stored
+ * timestamp and tvblank, then we are currently executing our
+ * disable inside a new vblank interval, the tvblank timestamp
+ * corresponds to this new vblank interval and the irq handler
+ * for this vblank didn't run yet and won't run due to our disable.
+ * Therefore we need to do the job of drm_handle_vblank() and
+ * increment the vblank counter by one to account for this vblank.
+ *
+ * Skip this step if there isn't any high precision timestamp
+ * available. In that case we can't account for this and just
+ * hope for the best.
+ */
+ if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
+ atomic_inc(&dev->_vblank_count[crtc]);
+// smp_mb__after_atomic_inc();
+ DRM_WRITEMEMORYBARRIER();
+ }
+
+ /* Invalidate all timestamps while vblank irq's are off. */
+ clear_vblank_timestamps(dev, crtc);
+
+ mtx_leave(&dev->vblank_time_lock);
+}
+
+void
+vblank_disable_fn(void *arg)
+{
+ struct drm_device *dev = (struct drm_device *)arg;
+ int i;
+
+ if (!dev->vblank_disable_allowed)
+ return;
+
+ for (i = 0; i < dev->num_crtcs; i++) {
+ mtx_enter(&dev->vbl_lock);
+ if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
+ dev->vblank_enabled[i]) {
+ DPRINTF("disabling vblank on crtc %d\n", i);
+ vblank_disable_and_save(dev, i);
+ }
+ mtx_leave(&dev->vbl_lock);
+ }
+}
+
+void
+drm_vblank_cleanup(struct drm_device *dev)
+{
+ /* Bail if the driver didn't call drm_vblank_init() */
+ if (dev->num_crtcs == 0)
+ return;
+
+ timeout_del(&dev->vblank_disable_timer);
+
+ vblank_disable_fn(dev);
+
+ free(dev->vbl_queue, M_DRM);
+ free(dev->_vblank_count, M_DRM);
+ free(dev->vblank_refcount, M_DRM);
+ free(dev->vblank_enabled, M_DRM);
+ free(dev->last_vblank, M_DRM);
+ free(dev->last_vblank_wait, M_DRM);
+ free(dev->vblank_inmodeset, M_DRM);
+ free(dev->_vblank_time, M_DRM);
+
+ dev->num_crtcs = 0;
+}
+
+int
+drm_vblank_init(struct drm_device *dev, int num_crtcs)
+{
+ int i, ret = -ENOMEM;
+
+ timeout_set(&dev->vblank_disable_timer, vblank_disable_fn,
+ dev);
+ mtx_init(&dev->vbl_lock, IPL_TTY);
+ mtx_init(&dev->vblank_time_lock, IPL_NONE);
+
+ dev->num_crtcs = num_crtcs;
+
+ dev->vbl_queue = malloc(sizeof(int) * num_crtcs,
+ M_DRM, M_WAITOK);
+ if (!dev->vbl_queue)
+ goto err;
+
+ dev->_vblank_count = malloc(sizeof(atomic_t) * num_crtcs,
+ M_DRM, M_WAITOK);
+ if (!dev->_vblank_count)
+ goto err;
+
+ dev->vblank_refcount = malloc(sizeof(atomic_t) * num_crtcs,
+ M_DRM, M_WAITOK);
+ if (!dev->vblank_refcount)
+ goto err;
+
+ dev->vblank_enabled = malloc(num_crtcs * sizeof(int),
+ M_DRM, M_ZERO | M_WAITOK);
+ if (!dev->vblank_enabled)
+ goto err;
+
+ dev->last_vblank = malloc(num_crtcs * sizeof(u32),
+ M_DRM, M_ZERO | M_WAITOK);
+ if (!dev->last_vblank)
+ goto err;
+
+ dev->last_vblank_wait = malloc(num_crtcs * sizeof(u32),
+ M_DRM, M_ZERO | M_WAITOK);
+ if (!dev->last_vblank_wait)
+ goto err;
+
+ dev->vblank_inmodeset = malloc(num_crtcs * sizeof(int),
+ M_DRM, M_ZERO | M_WAITOK);
+ if (!dev->vblank_inmodeset)
+ goto err;
+
+ dev->_vblank_time = malloc(num_crtcs * DRM_VBLANKTIME_RBSIZE *
+ sizeof(struct timeval), M_DRM, M_ZERO | M_WAITOK);
+ if (!dev->_vblank_time)
+ goto err;
+
+ DRM_DEBUG("Supports vblank timestamp caching Rev 1 (10.10.2010).\n");
+
+ /* Driver specific high-precision vblank timestamping supported? */
+ if (dev->driver->get_vblank_timestamp)
+ DRM_DEBUG("Driver supports precise vblank timestamp query.\n");
+ else
+ DRM_DEBUG("No driver support for vblank timestamp query.\n");
+
+ /* Zero per-crtc vblank stuff */
+ for (i = 0; i < num_crtcs; i++) {
+ atomic_set(&dev->_vblank_count[i], 0);
+ atomic_set(&dev->vblank_refcount[i], 0);
+ }
+
+ dev->vblank_disable_allowed = 0;
+ return 0;
+
+err:
+ drm_vblank_cleanup(dev);
+ return ret;
+}
+
+void
+drm_irq_vgaarb_nokms(void *cookie, bool state)
+{
+ struct drm_device *dev = cookie;
+
+#ifdef notyet
+ if (dev->driver->vgaarb_irq) {
+ dev->driver->vgaarb_irq(dev, state);
+ return;
+ }
+#endif
+
+ if (!dev->irq_enabled)
+ return;
+
+ if (state) {
+ if (dev->driver->irq_uninstall)
+ dev->driver->irq_uninstall(dev);
+ } else {
+ if (dev->driver->irq_preinstall)
+ dev->driver->irq_preinstall(dev);
+ if (dev->driver->irq_postinstall)
+ dev->driver->irq_postinstall(dev);
+ }
+}
+
+/**
+ * Install IRQ handler.
+ *
+ * \param dev DRM device.
+ *
+ * Initializes the IRQ related data. Installs the handler, calling the driver
+ * \c irq_preinstall() and \c irq_postinstall() functions
+ * before and after the installation.
+ */
int
drm_irq_install(struct drm_device *dev)
{
@@ -84,8 +410,15 @@ drm_irq_install(struct drm_device *dev)
dev->irq_enabled = 1;
DRM_UNLOCK();
- if ((ret = dev->driver->irq_install(dev)) != 0)
- goto err;
+ if (dev->driver->irq_install) {
+ if ((ret = dev->driver->irq_install(dev)) != 0)
+ goto err;
+ } else {
+ if (dev->driver->irq_preinstall)
+ dev->driver->irq_preinstall(dev);
+ if (dev->driver->irq_postinstall)
+ dev->driver->irq_postinstall(dev);
+ }
return (0);
err:
@@ -95,6 +428,13 @@ err:
return (ret);
}
+/**
+ * Uninstall the IRQ handler.
+ *
+ * \param dev DRM device.
+ *
+ * Calls the driver's \c irq_uninstall() function, and stops the irq.
+ */
int
drm_irq_uninstall(struct drm_device *dev)
{
@@ -114,15 +454,15 @@ drm_irq_uninstall(struct drm_device *dev)
* on them gets woken up. Also make sure we update state correctly
* so that we can continue refcounting correctly.
*/
- if (dev->vblank != NULL) {
- mtx_enter(&dev->vblank->vb_lock);
- for (i = 0; i < dev->vblank->vb_num; i++) {
- wakeup(&dev->vblank->vb_crtcs[i]);
- dev->vblank->vb_crtcs[i].vbl_enabled = 0;
- dev->vblank->vb_crtcs[i].vbl_last =
+ if (dev->num_crtcs) {
+ mtx_enter(&dev->vbl_lock);
+ for (i = 0; i < dev->num_crtcs; i++) {
+ wakeup(&dev->vbl_queue[i]);
+ dev->vblank_enabled[i] = 0;
+ dev->last_vblank[i] =
dev->driver->get_vblank_counter(dev, i);
}
- mtx_leave(&dev->vblank->vb_lock);
+ mtx_leave(&dev->vbl_lock);
}
DRM_DEBUG("irq=%d\n", dev->irq);
@@ -132,6 +472,17 @@ drm_irq_uninstall(struct drm_device *dev)
return (0);
}
+/**
+ * IRQ control ioctl.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_control structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls irq_install() or irq_uninstall() according to \p arg.
+ */
int
drm_control(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
@@ -143,345 +494,968 @@ drm_control(struct drm_device *dev, void *data, struct drm_file *file_priv)
switch (ctl->func) {
case DRM_INST_HANDLER:
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return 0;
if (dev->if_version < DRM_IF_VERSION(1, 2) &&
ctl->irq != dev->irq)
return (EINVAL);
return (drm_irq_install(dev));
case DRM_UNINST_HANDLER:
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return 0;
return (drm_irq_uninstall(dev));
default:
return (EINVAL);
}
}
+/**
+ * drm_calc_timestamping_constants - Calculate and
+ * store various constants which are later needed by
+ * vblank and swap-completion timestamping, e.g, by
+ * drm_calc_vbltimestamp_from_scanoutpos().
+ * They are derived from crtc's true scanout timing,
+ * so they take things like panel scaling or other
+ * adjustments into account.
+ *
+ * @crtc drm_crtc whose timestamp constants should be updated.
+ *
+ */
void
-vblank_disable(void *arg)
+drm_calc_timestamping_constants(struct drm_crtc *crtc)
{
- struct drm_device *dev = (struct drm_device*)arg;
- struct drm_vblank_info *vbl = dev->vblank;
- struct drm_vblank *crtc;
- int i;
+ s64 linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0;
+ u64 dotclock;
- mtx_enter(&vbl->vb_lock);
- for (i = 0; i < vbl->vb_num; i++) {
- crtc = &vbl->vb_crtcs[i];
+ /* Dot clock in Hz: */
+ dotclock = (u64) crtc->hwmode.clock * 1000;
- if (crtc->vbl_refs == 0 && crtc->vbl_enabled) {
- DPRINTF("%s: disabling crtc %d\n", __func__, i);
- crtc->vbl_last =
- dev->driver->get_vblank_counter(dev, i);
- dev->driver->disable_vblank(dev, i);
- crtc->vbl_enabled = 0;
+ /* Fields of interlaced scanout modes are only halve a frame duration.
+ * Double the dotclock to get halve the frame-/line-/pixelduration.
+ */
+ if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE)
+ dotclock *= 2;
+
+ /* Valid dotclock? */
+ if (dotclock > 0) {
+ /* Convert scanline length in pixels and video dot clock to
+ * line duration, frame duration and pixel duration in
+ * nanoseconds:
+ */
+ pixeldur_ns = (s64) 1000000000 / dotclock;
+ linedur_ns = (s64) ((u64) crtc->hwmode.crtc_htotal *
+ 1000000000) / dotclock;
+ framedur_ns = (s64) crtc->hwmode.crtc_vtotal * linedur_ns;
+ } else
+ DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n",
+ crtc->base.id);
+
+ crtc->pixeldur_ns = pixeldur_ns;
+ crtc->linedur_ns = linedur_ns;
+ crtc->framedur_ns = framedur_ns;
+
+ DPRINTF("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
+ crtc->base.id, crtc->hwmode.crtc_htotal,
+ crtc->hwmode.crtc_vtotal, crtc->hwmode.crtc_vdisplay);
+ DPRINTF("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n",
+ crtc->base.id, (int) dotclock/1000, (int) framedur_ns,
+ (int) linedur_ns, (int) pixeldur_ns);
+}
+
+/**
+ * drm_calc_vbltimestamp_from_scanoutpos - helper routine for kms
+ * drivers. Implements calculation of exact vblank timestamps from
+ * given drm_display_mode timings and current video scanout position
+ * of a crtc. This can be called from within get_vblank_timestamp()
+ * implementation of a kms driver to implement the actual timestamping.
+ *
+ * Should return timestamps conforming to the OML_sync_control OpenML
+ * extension specification. The timestamp corresponds to the end of
+ * the vblank interval, aka start of scanout of topmost-leftmost display
+ * pixel in the following video frame.
+ *
+ * Requires support for optional dev->driver->get_scanout_position()
+ * in kms driver, plus a bit of setup code to provide a drm_display_mode
+ * that corresponds to the true scanout timing.
+ *
+ * The current implementation only handles standard video modes. It
+ * returns as no operation if a doublescan or interlaced video mode is
+ * active. Higher level code is expected to handle this.
+ *
+ * @dev: DRM device.
+ * @crtc: Which crtc's vblank timestamp to retrieve.
+ * @max_error: Desired maximum allowable error in timestamps (nanosecs).
+ * On return contains true maximum error of timestamp.
+ * @vblank_time: Pointer to struct timeval which should receive the timestamp.
+ * @flags: Flags to pass to driver:
+ * 0 = Default.
+ * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
+ * @refcrtc: drm_crtc* of crtc which defines scanout timing.
+ *
+ * Returns negative value on error, failure or if not supported in current
+ * video mode:
+ *
+ * -EINVAL - Invalid crtc.
+ * -EAGAIN - Temporary unavailable, e.g., called before initial modeset.
+ * -ENOTSUPP - Function not supported in current display mode.
+ * -EIO - Failed, e.g., due to failed scanout position query.
+ *
+ * Returns or'ed positive status flags on success:
+ *
+ * DRM_VBLANKTIME_SCANOUTPOS_METHOD - Signal this method used for timestamping.
+ * DRM_VBLANKTIME_INVBL - Timestamp taken while scanout was in vblank interval.
+ *
+ */
+int
+drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags,
+ struct drm_crtc *refcrtc)
+{
+ struct timeval stime, etime;
+#ifdef notyet
+ struct timeval mono_time_offset;
+#endif
+ struct drm_display_mode *mode;
+ int vbl_status, vtotal, vdisplay;
+ int vpos, hpos, i;
+ s64 framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
+ bool invbl;
+
+ if (crtc < 0 || crtc >= dev->num_crtcs) {
+ DRM_ERROR("Invalid crtc %d\n", crtc);
+ return -EINVAL;
+ }
+
+ /* Scanout position query not supported? Should not happen. */
+ if (!dev->driver->get_scanout_position) {
+ DRM_ERROR("Called from driver w/o get_scanout_position()!?\n");
+ return -EIO;
+ }
+
+ mode = &refcrtc->hwmode;
+ vtotal = mode->crtc_vtotal;
+ vdisplay = mode->crtc_vdisplay;
+
+ /* Durations of frames, lines, pixels in nanoseconds. */
+ framedur_ns = refcrtc->framedur_ns;
+ linedur_ns = refcrtc->linedur_ns;
+ pixeldur_ns = refcrtc->pixeldur_ns;
+
+ /* If mode timing undefined, just return as no-op:
+ * Happens during initial modesetting of a crtc.
+ */
+ if (vtotal <= 0 || vdisplay <= 0 || framedur_ns == 0) {
+ DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc);
+ return -EAGAIN;
+ }
+
+ /* Get current scanout position with system timestamp.
+ * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
+ * if single query takes longer than max_error nanoseconds.
+ *
+ * This guarantees a tight bound on maximum error if
+ * code gets preempted or delayed for some reason.
+ */
+ for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) {
+ /* Disable preemption to make it very likely to
+ * succeed in the first iteration even on PREEMPT_RT kernel.
+ */
+#ifdef notyet
+ preempt_disable();
+#endif
+
+ /* Get system timestamp before query. */
+ getmicrouptime(&stime);
+
+ /* Get vertical and horizontal scanout pos. vpos, hpos. */
+ vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
+
+ /* Get system timestamp after query. */
+ getmicrouptime(&etime);
+#ifdef notyet
+ if (!drm_timestamp_monotonic)
+ mono_time_offset = ktime_get_monotonic_offset();
+
+ preempt_enable();
+#endif
+
+ /* Return as no-op if scanout query unsupported or failed. */
+ if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
+ DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
+ crtc, vbl_status);
+ return -EIO;
}
+
+ duration_ns = timeval_to_ns(&etime) - timeval_to_ns(&stime);
+
+ /* Accept result with < max_error nsecs timing uncertainty. */
+ if (duration_ns <= (s64) *max_error)
+ break;
+ }
+
+ /* Noisy system timing? */
+ if (i == DRM_TIMESTAMP_MAXRETRIES) {
+ DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n",
+ crtc, (int) duration_ns/1000, *max_error/1000, i);
+ }
+
+ /* Return upper bound of timestamp precision error. */
+ *max_error = (int) duration_ns;
+
+ /* Check if in vblank area:
+ * vpos is >=0 in video scanout area, but negative
+ * within vblank area, counting down the number of lines until
+ * start of scanout.
+ */
+ invbl = vbl_status & DRM_SCANOUTPOS_INVBL;
+
+ /* Convert scanout position into elapsed time at raw_time query
+ * since start of scanout at first display scanline. delta_ns
+ * can be negative if start of scanout hasn't happened yet.
+ */
+ delta_ns = (s64) vpos * linedur_ns + (s64) hpos * pixeldur_ns;
+
+ /* Is vpos outside nominal vblank area, but less than
+ * 1/100 of a frame height away from start of vblank?
+ * If so, assume this isn't a massively delayed vblank
+ * interrupt, but a vblank interrupt that fired a few
+ * microseconds before true start of vblank. Compensate
+ * by adding a full frame duration to the final timestamp.
+ * Happens, e.g., on ATI R500, R600.
+ *
+ * We only do this if DRM_CALLED_FROM_VBLIRQ.
+ */
+ if ((flags & DRM_CALLED_FROM_VBLIRQ) && !invbl &&
+ ((vdisplay - vpos) < vtotal / 100)) {
+ delta_ns = delta_ns - framedur_ns;
+
+ /* Signal this correction as "applied". */
+ vbl_status |= 0x8;
}
- mtx_leave(&vbl->vb_lock);
+
+#ifdef notyet
+ if (!drm_timestamp_monotonic)
+ etime = ktime_sub(etime, mono_time_offset);
+#endif
+
+ /* Subtract time delta from raw timestamp to get final
+ * vblank_time timestamp for end of vblank.
+ */
+ *vblank_time = ns_to_timeval(timeval_to_ns(&etime) - delta_ns);
+
+ DPRINTF("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
+ crtc, (int)vbl_status, hpos, vpos,
+ (long)etime.tv_sec, (long)etime.tv_usec,
+ (long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
+ (int)duration_ns/1000, i);
+
+ vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD;
+ if (invbl)
+ vbl_status |= DRM_VBLANKTIME_INVBL;
+
+ return vbl_status;
}
-void
-drm_vblank_cleanup(struct drm_device *dev)
+struct timeval
+get_drm_timestamp(void)
{
- if (dev->vblank == NULL)
- return; /* not initialised */
+ struct timeval now;
- timeout_del(&dev->vblank->vb_disable_timer);
-
- vblank_disable(dev);
+ getmicrouptime(&now);
+#ifdef notyet
+ if (!drm_timestamp_monotonic)
+ now = ktime_sub(now, ktime_get_monotonic_offset());
+#endif
- drm_free(dev->vblank);
- dev->vblank = NULL;
+ return (now);
}
-int
-drm_vblank_init(struct drm_device *dev, int num_crtcs)
+/**
+ * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
+ * vblank interval.
+ *
+ * @dev: DRM device
+ * @crtc: which crtc's vblank timestamp to retrieve
+ * @tvblank: Pointer to target struct timeval which should receive the timestamp
+ * @flags: Flags to pass to driver:
+ * 0 = Default.
+ * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
+ *
+ * Fetches the system timestamp corresponding to the time of the most recent
+ * vblank interval on specified crtc. May call into kms-driver to
+ * compute the timestamp with a high-precision GPU specific method.
+ *
+ * Returns zero if timestamp originates from uncorrected do_gettimeofday()
+ * call, i.e., it isn't very precisely locked to the true vblank.
+ *
+ * Returns non-zero if timestamp is considered to be very precise.
+ */
+u32
+drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
+ struct timeval *tvblank, unsigned flags)
{
- int i;
+ int ret;
- dev->vblank = malloc(sizeof(*dev->vblank) + (num_crtcs *
- sizeof(struct drm_vblank)), M_DRM, M_WAITOK | M_CANFAIL | M_ZERO);
- if (dev->vblank == NULL)
- return (ENOMEM);
+ /* Define requested maximum error on timestamps (nanoseconds). */
+ int max_error = (int) drm_timestamp_precision * 1000;
- dev->vblank->vb_num = num_crtcs;
- mtx_init(&dev->vblank->vb_lock, IPL_TTY);
- timeout_set(&dev->vblank->vb_disable_timer, vblank_disable, dev);
- for (i = 0; i < num_crtcs; i++)
- TAILQ_INIT(&dev->vblank->vb_crtcs[i].vbl_events);
+ /* Query driver if possible and precision timestamping enabled. */
+ if (dev->driver->get_vblank_timestamp && (max_error > 0)) {
+ ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error,
+ tvblank, flags);
+ if (ret > 0)
+ return (u32) ret;
+ }
- return (0);
+ /* GPU high precision timestamp query unsupported or failed.
+ * Return current monotonic/gettimeofday timestamp as best estimate.
+ */
+ *tvblank = get_drm_timestamp();
+
+ return 0;
}
-u_int32_t
+/**
+ * drm_vblank_count - retrieve "cooked" vblank counter value
+ * @dev: DRM device
+ * @crtc: which counter to retrieve
+ *
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity.
+ */
+u32
drm_vblank_count(struct drm_device *dev, int crtc)
{
- return (dev->vblank->vb_crtcs[crtc].vbl_count);
+ return atomic_read(&dev->_vblank_count[crtc]);
+}
+
+/**
+ * drm_vblank_count_and_time - retrieve "cooked" vblank counter value
+ * and the system timestamp corresponding to that vblank counter value.
+ *
+ * @dev: DRM device
+ * @crtc: which counter to retrieve
+ * @vblanktime: Pointer to struct timeval to receive the vblank timestamp.
+ *
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity. Returns corresponding system timestamp of the time
+ * of the vblank interval that corresponds to the current value vblank counter
+ * value.
+ */
+u32
+drm_vblank_count_and_time(struct drm_device *dev, int crtc,
+ struct timeval *vblanktime)
+{
+ u32 cur_vblank;
+
+ /* Read timestamp from slot of _vblank_time ringbuffer
+ * that corresponds to current vblank count. Retry if
+ * count has incremented during readout. This works like
+ * a seqlock.
+ */
+ do {
+ cur_vblank = atomic_read(&dev->_vblank_count[crtc]);
+ *vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
+ DRM_READMEMORYBARRIER();
+ } while (cur_vblank != atomic_read(&dev->_vblank_count[crtc]));
+
+ return cur_vblank;
}
void
+send_vblank_event(struct drm_device *dev,
+ struct drm_pending_vblank_event *e,
+ unsigned long seq, struct timeval *now)
+{
+ struct drm_file *file_priv = e->base.file_priv;
+ MUTEX_ASSERT_LOCKED(&dev->event_lock);
+ e->event.sequence = seq;
+ e->event.tv_sec = now->tv_sec;
+ e->event.tv_usec = now->tv_usec;
+
+ TAILQ_INSERT_TAIL(&file_priv->evlist, &e->base, link);
+ wakeup(&file_priv->evlist);
+ selwakeup(&file_priv->rsel);
+#if 0
+ trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
+ e->event.sequence);
+#endif
+}
+
+/**
+ * drm_send_vblank_event - helper to send vblank event after pageflip
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ * @e: the event to send
+ *
+ * Updates sequence # and timestamp on event, and sends it to userspace.
+ * Caller must hold event lock.
+ */
+void
+drm_send_vblank_event(struct drm_device *dev, int crtc,
+ struct drm_pending_vblank_event *e)
+{
+ struct timeval now;
+ unsigned int seq;
+ if (crtc >= 0) {
+ seq = drm_vblank_count_and_time(dev, crtc, &now);
+ } else {
+ seq = 0;
+
+ now = get_drm_timestamp();
+ }
+ send_vblank_event(dev, e, seq, &now);
+}
+
+/**
+ * drm_update_vblank_count - update the master vblank counter
+ * @dev: DRM device
+ * @crtc: counter to update
+ *
+ * Call back into the driver to update the appropriate vblank counter
+ * (specified by @crtc). Deal with wraparound, if it occurred, and
+ * update the last read value so we can deal with wraparound on the next
+ * call if necessary.
+ *
+ * Only necessary when going from off->on, to account for frames we
+ * didn't get an interrupt for.
+ *
+ * Note: caller must hold dev->vbl_lock since this reads & writes
+ * device vblank fields.
+ */
+void
drm_update_vblank_count(struct drm_device *dev, int crtc)
{
- u_int32_t cur_vblank, diff;
+ u32 cur_vblank, diff, tslot, rc;
+ struct timeval t_vblank;
/*
- * Interrupt was disabled prior to this call, so deal with counter wrap
- * note that we may have lost a full vb_max events if
- * the register is small or the interrupts were off for a long time.
+ * Interrupts were disabled prior to this call, so deal with counter
+ * wrap if needed.
+ * NOTE! It's possible we lost a full dev->max_vblank_count events
+ * here if the register is small or we had vblank interrupts off for
+ * a long time.
+ *
+ * We repeat the hardware vblank counter & timestamp query until
+ * we get consistent results. This to prevent races between gpu
+ * updating its hardware counter while we are retrieving the
+ * corresponding vblank timestamp.
*/
- cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
- diff = cur_vblank - dev->vblank->vb_crtcs[crtc].vbl_last;
- if (cur_vblank < dev->vblank->vb_crtcs[crtc].vbl_last)
- diff += dev->vblank->vb_max;
+ do {
+ cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
+ rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0);
+ } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
+
+ /* Deal with counter wrap */
+ diff = cur_vblank - dev->last_vblank[crtc];
+ if (cur_vblank < dev->last_vblank[crtc]) {
+ diff += dev->max_vblank_count;
+
+ DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
+ crtc, dev->last_vblank[crtc], cur_vblank, diff);
+ }
- dev->vblank->vb_crtcs[crtc].vbl_count += diff;
+ DPRINTF("enabling vblank interrupts on crtc %d, missed %d\n",
+ crtc, diff);
+
+ /* Reinitialize corresponding vblank timestamp if high-precision query
+ * available. Skip this step if query unsupported or failed. Will
+ * reinitialize delayed at next vblank interrupt in that case.
+ */
+ if (rc) {
+ tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
+ vblanktimestamp(dev, crtc, tslot) = t_vblank;
+ }
+
+// smp_mb__before_atomic_inc();
+ atomic_add(diff, &dev->_vblank_count[crtc]);
+// smp_mb__after_atomic_inc();
}
+/**
+ * drm_vblank_get - get a reference count on vblank events
+ * @dev: DRM device
+ * @crtc: which CRTC to own
+ *
+ * Acquire a reference count on vblank events to avoid having them disabled
+ * while in use.
+ *
+ * RETURNS
+ * Zero on success, nonzero on failure.
+ */
int
drm_vblank_get(struct drm_device *dev, int crtc)
{
- struct drm_vblank_info *vbl = dev->vblank;
- int ret = 0;
-
- if (dev->irq_enabled == 0)
- return (EINVAL);
-
- mtx_enter(&vbl->vb_lock);
- DPRINTF("%s: %d refs = %d\n", __func__, crtc,
- vbl->vb_crtcs[crtc].vbl_refs);
- vbl->vb_crtcs[crtc].vbl_refs++;
- if (vbl->vb_crtcs[crtc].vbl_refs == 1 &&
- vbl->vb_crtcs[crtc].vbl_enabled == 0) {
- if ((ret = dev->driver->enable_vblank(dev, crtc)) == 0) {
- vbl->vb_crtcs[crtc].vbl_enabled = 1;
- drm_update_vblank_count(dev, crtc);
- } else {
- vbl->vb_crtcs[crtc].vbl_refs--;
+ int ret = 0;
+
+ mtx_enter(&dev->vbl_lock);
+ /* Going from 0->1 means we have to enable interrupts again */
+ if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
+ mtx_enter(&dev->vblank_time_lock);
+ if (!dev->vblank_enabled[crtc]) {
+ /* Enable vblank irqs under vblank_time_lock protection.
+ * All vblank count & timestamp updates are held off
+ * until we are done reinitializing master counter and
+ * timestamps. Filtercode in drm_handle_vblank() will
+ * prevent double-accounting of same vblank interval.
+ */
+ ret = dev->driver->enable_vblank(dev, crtc);
+ DPRINTF("enabling vblank on crtc %d, ret: %d\n",
+ crtc, ret);
+ if (ret)
+ atomic_dec(&dev->vblank_refcount[crtc]);
+ else {
+ dev->vblank_enabled[crtc] = 1;
+ drm_update_vblank_count(dev, crtc);
+ }
+ }
+ mtx_leave(&dev->vblank_time_lock);
+ } else {
+ if (!dev->vblank_enabled[crtc]) {
+ atomic_dec(&dev->vblank_refcount[crtc]);
+ ret = -EINVAL;
}
-
}
- mtx_leave(&vbl->vb_lock);
+ mtx_leave(&dev->vbl_lock);
- return (ret);
+ return ret;
}
+/**
+ * drm_vblank_put - give up ownership of vblank events
+ * @dev: DRM device
+ * @crtc: which counter to give up
+ *
+ * Release ownership of a given vblank counter, turning off interrupts
+ * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
+ */
void
drm_vblank_put(struct drm_device *dev, int crtc)
{
- mtx_enter(&dev->vblank->vb_lock);
- /* Last user schedules disable */
- DPRINTF("%s: %d refs = %d\n", __func__, crtc,
- dev->vblank->vb_crtcs[crtc].vbl_refs);
- KASSERT(dev->vblank->vb_crtcs[crtc].vbl_refs > 0);
- if (--dev->vblank->vb_crtcs[crtc].vbl_refs == 0)
- timeout_add_sec(&dev->vblank->vb_disable_timer, 5);
- mtx_leave(&dev->vblank->vb_lock);
+ BUG_ON(atomic_read(&dev->vblank_refcount[crtc]) == 0);
+
+ /* Last user schedules interrupt disable */
+ if ((--dev->vblank_refcount[crtc] == 0) &&
+ (drm_vblank_offdelay > 0))
+ timeout_add_msec(&dev->vblank_disable_timer, drm_vblank_offdelay);
}
-int
-drm_modeset_ctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+/**
+ * drm_vblank_off - disable vblank events on a CRTC
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ *
+ * Caller must hold event lock.
+ */
+void
+drm_vblank_off(struct drm_device *dev, int crtc)
{
- struct drm_modeset_ctl *modeset = data;
- struct drm_vblank *vbl;
- int crtc, ret = 0;
+ struct drmevlist *list;
+ struct drm_pending_event *ev, *tmp;
+ struct drm_pending_vblank_event *vev;
+ struct timeval now;
+ unsigned int seq;
- /* not initialised yet, just noop */
- if (dev->vblank == NULL)
- return (0);
+ mtx_enter(&dev->vbl_lock);
+ vblank_disable_and_save(dev, crtc);
+ wakeup(&dev->vbl_queue[crtc]);
- crtc = modeset->crtc;
- if (crtc >= dev->vblank->vb_num || crtc < 0)
- return (EINVAL);
+ list = &dev->vbl_events;
+ /* Send any queued vblank events, lest the natives grow disquiet */
+ seq = drm_vblank_count_and_time(dev, crtc, &now);
+
+ mtx_enter(&dev->event_lock);
+ for (ev = TAILQ_FIRST(list); ev != TAILQ_END(list); ev = tmp) {
+ tmp = TAILQ_NEXT(ev, link);
+
+ vev = (struct drm_pending_vblank_event *)ev;
+
+ if (vev->pipe != crtc)
+ continue;
+ DRM_DEBUG("Sending premature vblank event on disable: \
+ wanted %d, current %d\n",
+ vev->event.sequence, seq);
+ TAILQ_REMOVE(list, ev, link);
+ drm_vblank_put(dev, vev->pipe);
+ send_vblank_event(dev, vev, seq, &now);
+ }
+ mtx_leave(&dev->event_lock);
- vbl = &dev->vblank->vb_crtcs[crtc];
+ mtx_leave(&dev->vbl_lock);
+}
+/**
+ * drm_vblank_pre_modeset - account for vblanks across mode sets
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ *
+ * Account for vblank events across mode setting events, which will likely
+ * reset the hardware frame counter.
+ */
+void
+drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
+{
+ /* vblank is not initialized (IRQ not installed ?) */
+ if (!dev->num_crtcs)
+ return;
/*
- * If interrupts are enabled/disabled between calls to this ioctl then
- * it can get nasty. So just grab a reference so that the interrupts
- * keep going through the modeset
+ * To avoid all the problems that might happen if interrupts
+ * were enabled/disabled around or between these calls, we just
+ * have the kernel take a reference on the CRTC (just once though
+ * to avoid corrupting the count if multiple, mismatch calls occur),
+ * so that interrupts remain enabled in the interim.
*/
+ if (!dev->vblank_inmodeset[crtc]) {
+ dev->vblank_inmodeset[crtc] = 0x1;
+ if (drm_vblank_get(dev, crtc) == 0)
+ dev->vblank_inmodeset[crtc] |= 0x2;
+ }
+}
+
+void
+drm_vblank_post_modeset(struct drm_device *dev, int crtc)
+{
+ if (dev->vblank_inmodeset[crtc]) {
+ mtx_enter(&dev->vbl_lock);
+ dev->vblank_disable_allowed = 1;
+ mtx_leave(&dev->vbl_lock);
+
+ if (dev->vblank_inmodeset[crtc] & 0x2)
+ drm_vblank_put(dev, crtc);
+
+ dev->vblank_inmodeset[crtc] = 0;
+ }
+}
+
+/**
+ * drm_modeset_ctl - handle vblank event counter changes across mode switch
+ * @DRM_IOCTL_ARGS: standard ioctl arguments
+ *
+ * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
+ * ioctls around modesetting so that any lost vblank events are accounted for.
+ *
+ * Generally the counter will reset across mode sets. If interrupts are
+ * enabled around this call, we don't have to do anything since the counter
+ * will have already been incremented.
+ */
+int
+drm_modeset_ctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_modeset_ctl *modeset = data;
+ unsigned int crtc;
+
+ /* If drm_vblank_init() hasn't been called yet, just no-op */
+ if (!dev->num_crtcs)
+ return 0;
+
+ /* KMS drivers handle this internally */
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return 0;
+
+ crtc = modeset->crtc;
+ if (crtc >= dev->num_crtcs)
+ return -EINVAL;
+
switch (modeset->cmd) {
case _DRM_PRE_MODESET:
- DPRINTF("%s: pre modeset on %d\n", __func__, crtc);
- if (vbl->vbl_inmodeset == 0) {
- mtx_enter(&dev->vblank->vb_lock);
- vbl->vbl_inmodeset = 0x1;
- mtx_leave(&dev->vblank->vb_lock);
- if (drm_vblank_get(dev, crtc) == 0)
- vbl->vbl_inmodeset |= 0x2;
- }
+ drm_vblank_pre_modeset(dev, crtc);
break;
case _DRM_POST_MODESET:
- DPRINTF("%s: post modeset on %d\n", __func__, crtc);
- if (vbl->vbl_inmodeset) {
- if (vbl->vbl_inmodeset & 0x2)
- drm_vblank_put(dev, crtc);
- vbl->vbl_inmodeset = 0;
- }
+ drm_vblank_post_modeset(dev, crtc);
break;
default:
- ret = EINVAL;
- break;
+ return -EINVAL;
}
- return (ret);
+ return 0;
}
int
-drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv)
+drm_queue_vblank_event(struct drm_device *dev, int pipe,
+ union drm_wait_vblank *vblwait,
+ struct drm_file *file_priv)
{
- struct timeval now;
- union drm_wait_vblank *vblwait = data;
- int ret, flags, crtc, seq;
-
- if (!dev->irq_enabled || dev->vblank == NULL ||
- vblwait->request.type & _DRM_VBLANK_SIGNAL)
- return (EINVAL);
-
- flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
- crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
+ struct drm_pending_vblank_event *e;
+ struct timeval now;
+ unsigned int seq;
+ int ret;
+
+ e = malloc(sizeof *e, M_DRM, M_ZERO | M_WAITOK);
+ if (e == NULL) {
+ ret = -ENOMEM;
+ goto err_put;
+ }
- if (crtc >= dev->vblank->vb_num)
- return (EINVAL);
+ e->pipe = pipe;
+ e->base.pid = DRM_CURRENTPID;
+ e->event.base.type = DRM_EVENT_VBLANK;
+ e->event.base.length = sizeof e->event;
+ e->event.user_data = vblwait->request.signal;
+ e->base.event = &e->event.base;
+ e->base.file_priv = file_priv;
+ e->base.destroy = (void (*) (struct drm_pending_event *)) drm_free;
- if ((ret = drm_vblank_get(dev, crtc)) != 0)
- return (ret);
- seq = drm_vblank_count(dev, crtc);
+ mtx_enter(&dev->event_lock);
- if (vblwait->request.type & _DRM_VBLANK_RELATIVE) {
- vblwait->request.sequence += seq;
- vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
+ if (file_priv->event_space < sizeof e->event) {
+ ret = -EBUSY;
+ goto err_unlock;
}
- flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
- if ((flags & _DRM_VBLANK_NEXTONMISS) &&
- (seq - vblwait->request.sequence) <= (1<<23)) {
+ file_priv->event_space -= sizeof e->event;
+ seq = drm_vblank_count_and_time(dev, pipe, &now);
+
+ if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
+ (seq - vblwait->request.sequence) <= (1 << 23)) {
vblwait->request.sequence = seq + 1;
+ vblwait->reply.sequence = vblwait->request.sequence;
}
- if (flags & _DRM_VBLANK_EVENT)
- return (drm_queue_vblank_event(dev, crtc, vblwait, file_priv));
+ DPRINTF("event on vblank count %d, current %d, crtc %d\n",
+ vblwait->request.sequence, seq, pipe);
- DPRINTF("%s: %d waiting on %d, current %d\n", __func__, crtc,
- vblwait->request.sequence, drm_vblank_count(dev, crtc));
- DRM_WAIT_ON(ret, &dev->vblank->vb_crtcs[crtc], &dev->vblank->vb_lock,
- 3 * hz, "drmvblq", ((drm_vblank_count(dev, crtc) -
- vblwait->request.sequence) <= (1 << 23)) || dev->irq_enabled == 0);
+#if 0
+ trace_drm_vblank_event_queued(current->pid, pipe,
+ vblwait->request.sequence);
+#endif
- microtime(&now);
- vblwait->reply.tval_sec = now.tv_sec;
- vblwait->reply.tval_usec = now.tv_usec;
- vblwait->reply.sequence = drm_vblank_count(dev, crtc);
- DPRINTF("%s: %d done waiting, seq = %d\n", __func__, crtc,
- vblwait->reply.sequence);
+ e->event.sequence = vblwait->request.sequence;
+ if ((seq - vblwait->request.sequence) <= (1 << 23)) {
+ drm_vblank_put(dev, pipe);
+ send_vblank_event(dev, e, seq, &now);
+ vblwait->reply.sequence = seq;
+ } else {
+ /* drm_handle_vblank_events will call drm_vblank_put */
+ TAILQ_INSERT_TAIL(&dev->vbl_events, &e->base, link);
+ vblwait->reply.sequence = vblwait->request.sequence;
+ }
- drm_vblank_put(dev, crtc);
- return (ret);
+ mtx_leave(&dev->event_lock);
+
+ return 0;
+
+err_unlock:
+ mtx_leave(&dev->event_lock);
+ free(e, M_DRM);
+err_put:
+ drm_vblank_put(dev, pipe);
+ return ret;
}
+/**
+ * Wait for VBLANK.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param data user argument, pointing to a drm_wait_vblank structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * This function enables the vblank interrupt on the pipe requested, then
+ * sleeps waiting for the requested sequence number to occur, and drops
+ * the vblank interrupt refcount afterwards. (vblank irq disable follows that
+ * after a timeout with no further vblank waits scheduled).
+ */
int
-drm_queue_vblank_event(struct drm_device *dev, int crtc,
- union drm_wait_vblank *vblwait, struct drm_file *file_priv)
+drm_wait_vblank(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- struct drm_pending_vblank_event *vev;
- struct timeval now;
- u_int seq;
-
-
- vev = drm_calloc(1, sizeof(*vev));
- if (vev == NULL)
- return (ENOMEM);
+ union drm_wait_vblank *vblwait = data;
+ int ret;
+ unsigned int flags, seq, crtc, high_crtc;
+
+ if (!dev->irq_enabled)
+ return -EINVAL;
+
+ if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
+ return -EINVAL;
+
+ if (vblwait->request.type &
+ ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
+ _DRM_VBLANK_HIGH_CRTC_MASK)) {
+ DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
+ vblwait->request.type,
+ (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
+ _DRM_VBLANK_HIGH_CRTC_MASK));
+ return -EINVAL;
+ }
- vev->event.base.type = DRM_EVENT_VBLANK;
- vev->event.base.length = sizeof(vev->event);
- vev->event.user_data = vblwait->request.signal;
- vev->base.event = &vev->event.base;
- vev->base.file_priv = file_priv;
- vev->base.destroy = (void (*) (struct drm_pending_event *))drm_free;
+ flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
+ high_crtc = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK);
+ if (high_crtc)
+ crtc = high_crtc >> _DRM_VBLANK_HIGH_CRTC_SHIFT;
+ else
+ crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
+ if (crtc >= dev->num_crtcs)
+ return -EINVAL;
+
+ ret = drm_vblank_get(dev, crtc);
+ if (ret) {
+ DRM_DEBUG("failed to acquire vblank counter, %d\n", ret);
+ return ret;
+ }
+ seq = drm_vblank_count(dev, crtc);
- microtime(&now);
+ switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
+ case _DRM_VBLANK_RELATIVE:
+ vblwait->request.sequence += seq;
+ vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
+ case _DRM_VBLANK_ABSOLUTE:
+ break;
+ default:
+ ret = -EINVAL;
+ goto done;
+ }
- mtx_enter(&dev->event_lock);
- if (file_priv->event_space < sizeof(vev->event)) {
- mtx_leave(&dev->event_lock);
- drm_free(vev);
- return (ENOMEM);
+ if (flags & _DRM_VBLANK_EVENT) {
+ /* must hold on to the vblank ref until the event fires
+ * drm_vblank_put will be called asynchronously
+ */
+ return drm_queue_vblank_event(dev, crtc, vblwait, file_priv);
}
+ if ((flags & _DRM_VBLANK_NEXTONMISS) &&
+ (seq - vblwait->request.sequence) <= (1<<23)) {
+ vblwait->request.sequence = seq + 1;
+ }
- seq = drm_vblank_count(dev, crtc);
- file_priv->event_space -= sizeof(vev->event);
+ DPRINTF("waiting on vblank count %d, crtc %d\n",
+ vblwait->request.sequence, crtc);
+ dev->last_vblank_wait[crtc] = vblwait->request.sequence;
+ DRM_WAIT_ON(ret, &dev->vbl_queue[crtc], &dev->vbl_lock, 3 * hz,
+ "drmvblq", (((drm_vblank_count(dev, crtc) -
+ vblwait->request.sequence) <= (1 << 23)) ||
+ !dev->irq_enabled));
- DPRINTF("%s: queueing event %d on crtc %d\n", __func__, seq, crtc);
+ if (ret != -EINTR) {
+ struct timeval now;
- if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
- (seq - vblwait->request.sequence) <= (1 << 23)) {
- vblwait->request.sequence = seq + 1;
- vblwait->reply.sequence = vblwait->request.sequence;
- }
+ vblwait->reply.sequence = drm_vblank_count_and_time(dev, crtc, &now);
+ vblwait->reply.tval_sec = now.tv_sec;
+ vblwait->reply.tval_usec = now.tv_usec;
- vev->event.sequence = vblwait->request.sequence;
- if ((seq - vblwait->request.sequence) <= (1 << 23)) {
- vev->event.tv_sec = now.tv_sec;
- vev->event.tv_usec = now.tv_usec;
- DPRINTF("%s: already passed, dequeuing: crtc %d, value %d\n",
- __func__, crtc, seq);
- drm_vblank_put(dev, crtc);
- TAILQ_INSERT_TAIL(&file_priv->evlist, &vev->base, link);
- wakeup(&file_priv->evlist);
- selwakeup(&file_priv->rsel);
+ DPRINTF("returning %d to client\n",
+ vblwait->reply.sequence);
} else {
- TAILQ_INSERT_TAIL(&dev->vblank->vb_crtcs[crtc].vbl_events,
- &vev->base, link);
+ DPRINTF("vblank wait interrupted by signal\n");
}
- mtx_leave(&dev->event_lock);
- return (0);
+done:
+ drm_vblank_put(dev, crtc);
+ return ret;
}
void
drm_handle_vblank_events(struct drm_device *dev, int crtc)
{
- struct drmevlist *list;
- struct drm_pending_event *ev, *tmp;
- struct drm_pending_vblank_event *vev;
- struct timeval now;
- u_int seq;
+ struct drmevlist *list;
+ struct drm_pending_event *ev, *tmp;
+ struct drm_pending_vblank_event *vev;
+ struct timeval now;
+ unsigned int seq;
- list = &dev->vblank->vb_crtcs[crtc].vbl_events;
- microtime(&now);
- seq = drm_vblank_count(dev, crtc);
+ list = &dev->vbl_events;
+ seq = drm_vblank_count_and_time(dev, crtc, &now);
mtx_enter(&dev->event_lock);
+
for (ev = TAILQ_FIRST(list); ev != TAILQ_END(list); ev = tmp) {
tmp = TAILQ_NEXT(ev, link);
vev = (struct drm_pending_vblank_event *)ev;
- if ((seq - vev->event.sequence) > (1 << 23))
+ if (vev->pipe != crtc)
+ continue;
+ if ((seq - vev->event.sequence) > (1<<23))
continue;
- DPRINTF("%s: got vblank event on crtc %d, value %d\n",
- __func__, crtc, seq);
-
- vev->event.sequence = seq;
- vev->event.tv_sec = now.tv_sec;
- vev->event.tv_usec = now.tv_usec;
- drm_vblank_put(dev, crtc);
+
+ DPRINTF("vblank event on %d, current %d\n",
+ vev->event.sequence, seq);
+
TAILQ_REMOVE(list, ev, link);
- TAILQ_INSERT_TAIL(&ev->file_priv->evlist, ev, link);
- wakeup(&ev->file_priv->evlist);
- selwakeup(&ev->file_priv->rsel);
+ drm_vblank_put(dev, vev->pipe);
+ send_vblank_event(dev, vev, seq, &now);
}
+
mtx_leave(&dev->event_lock);
+
+// trace_drm_vblank_event(crtc, seq);
}
-void
+/**
+ * drm_handle_vblank - handle a vblank event
+ * @dev: DRM device
+ * @crtc: where this event occurred
+ *
+ * Drivers should call this routine in their vblank interrupt handlers to
+ * update the vblank counter and send any signals that may be pending.
+ */
+bool
drm_handle_vblank(struct drm_device *dev, int crtc)
{
- /*
- * XXX if we had proper atomic operations this mutex wouldn't
- * XXX need to be held.
+ u32 vblcount;
+ s64 diff_ns;
+ struct timeval tvblank;
+
+ if (!dev->num_crtcs)
+ return false;
+
+ /* Need timestamp lock to prevent concurrent execution with
+ * vblank enable/disable, as this would cause inconsistent
+ * or corrupted timestamps and vblank counts.
+ */
+ mtx_enter(&dev->vblank_time_lock);
+
+ /* Vblank irq handling disabled. Nothing to do. */
+ if (!dev->vblank_enabled[crtc]) {
+ mtx_leave(&dev->vblank_time_lock);
+ return false;
+ }
+
+ /* Fetch corresponding timestamp for this vblank interval from
+ * driver and store it in proper slot of timestamp ringbuffer.
*/
- mtx_enter(&dev->vblank->vb_lock);
- dev->vblank->vb_crtcs[crtc].vbl_count++;
- wakeup(&dev->vblank->vb_crtcs[crtc]);
- mtx_leave(&dev->vblank->vb_lock);
+
+ /* Get current timestamp and count. */
+ vblcount = atomic_read(&dev->_vblank_count[crtc]);
+ drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
+
+ /* Compute time difference to timestamp of last vblank */
+ diff_ns = timeval_to_ns(&tvblank) -
+ timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
+
+ /* Update vblank timestamp and count if at least
+ * DRM_REDUNDANT_VBLIRQ_THRESH_NS nanoseconds
+ * difference between last stored timestamp and current
+ * timestamp. A smaller difference means basically
+ * identical timestamps. Happens if this vblank has
+ * been already processed and this is a redundant call,
+ * e.g., due to spurious vblank interrupts. We need to
+ * ignore those for accounting.
+ */
+ if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
+ /* Store new timestamp in ringbuffer. */
+ vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
+
+ /* Increment cooked vblank count. This also atomically commits
+ * the timestamp computed above.
+ */
+// smp_mb__before_atomic_inc();
+ atomic_inc(&dev->_vblank_count[crtc]);
+// smp_mb__after_atomic_inc();
+ } else {
+ DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
+ crtc, (int) diff_ns);
+ }
+
+ wakeup(&dev->vbl_queue[crtc]);
drm_handle_vblank_events(dev, crtc);
+
+ mtx_leave(&dev->vblank_time_lock);
+ return true;
}
diff --git a/sys/dev/pci/drm/drm_linux_list.h b/sys/dev/pci/drm/drm_linux_list.h
new file mode 100644
index 00000000000..a0bc521b329
--- /dev/null
+++ b/sys/dev/pci/drm/drm_linux_list.h
@@ -0,0 +1,175 @@
+/* $OpenBSD: drm_linux_list.h,v 1.4 2013/03/18 12:36:51 jsg Exp $ */
+/* drm_linux_list.h -- linux list functions for the BSDs.
+ * Created: Mon Apr 7 14:30:16 1999 by anholt@FreeBSD.org
+ */
+/*-
+ * Copyright 2003 Eric Anholt
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <anholt@FreeBSD.org>
+ *
+ */
+
+#ifndef _DRM_LINUX_LIST_H_
+#define _DRM_LINUX_LIST_H_
+
+struct list_head {
+ struct list_head *next, *prev;
+};
+
+#define list_entry(ptr, type, member) container_of(ptr,type,member)
+#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
+
+static inline void
+INIT_LIST_HEAD(struct list_head *head) {
+ (head)->next = head;
+ (head)->prev = head;
+}
+
+#define LIST_HEAD_INIT(name) { &(name), &(name) }
+
+#define DRM_LIST_HEAD(name) \
+ struct list_head name = LIST_HEAD_INIT(name)
+
+static inline int
+list_empty(const struct list_head *head) {
+ return (head)->next == head;
+}
+
+static inline void
+list_add(struct list_head *new, struct list_head *head) {
+ (head)->next->prev = new;
+ (new)->next = (head)->next;
+ (new)->prev = head;
+ (head)->next = new;
+}
+
+static inline void
+list_add_tail(struct list_head *entry, struct list_head *head) {
+ (entry)->prev = (head)->prev;
+ (entry)->next = head;
+ (head)->prev->next = entry;
+ (head)->prev = entry;
+}
+
+static inline void
+list_del(struct list_head *entry) {
+ (entry)->next->prev = (entry)->prev;
+ (entry)->prev->next = (entry)->next;
+}
+
+static inline void list_replace(struct list_head *old,
+ struct list_head *new)
+{
+ new->next = old->next;
+ new->next->prev = new;
+ new->prev = old->prev;
+ new->prev->next = new;
+}
+
+static inline void list_move(struct list_head *list, struct list_head *head)
+{
+ list_del(list);
+ list_add(list, head);
+}
+
+static inline void list_move_tail(struct list_head *list,
+ struct list_head *head)
+{
+ list_del(list);
+ list_add_tail(list, head);
+}
+
+static inline void
+list_del_init(struct list_head *entry) {
+ (entry)->next->prev = (entry)->prev;
+ (entry)->prev->next = (entry)->next;
+ INIT_LIST_HEAD(entry);
+}
+
+#define list_for_each(entry, head) \
+ for (entry = (head)->next; entry != head; entry = (entry)->next)
+
+#define list_for_each_prev(entry, head) \
+ for (entry = (head)->prev; entry != (head); \
+ entry = entry->prev)
+
+#define list_for_each_safe(entry, temp, head) \
+ for (entry = (head)->next, temp = (entry)->next; \
+ entry != head; \
+ entry = temp, temp = entry->next)
+
+#define list_for_each_entry(pos, head, member) \
+ for (pos = list_entry((head)->next, __typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = list_entry(pos->member.next, __typeof(*pos), member))
+
+#define list_for_each_entry_continue_reverse(pos, head, member) \
+ for (pos = list_entry(pos->member.prev, __typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = list_entry(pos->member.prev, __typeof(*pos), member))
+
+/**
+ * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
+ * @pos: the type * to use as a loop cursor.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_for_each_entry_safe(pos, n, head, member) \
+ for (pos = list_entry((head)->next, __typeof(*pos), member), \
+ n = list_entry(pos->member.next, __typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = n, n = list_entry(n->member.next, __typeof(*n), member))
+
+#define list_first_entry(ptr, type, member) \
+ list_entry((ptr)->next, type, member)
+
+
+static inline void
+__list_splice(const struct list_head *list, struct list_head *prev,
+ struct list_head *next)
+{
+ struct list_head *first = list->next;
+ struct list_head *last = list->prev;
+
+ first->prev = prev;
+ prev->next = first;
+
+ last->next = next;
+ next->prev = last;
+}
+
+static inline void
+list_splice(const struct list_head *list, struct list_head *head)
+{
+ if (list_empty(list))
+ return;
+
+ __list_splice(list, head, head->next);
+}
+
+void drm_list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv,
+ struct list_head *a, struct list_head *b));
+
+#endif /* _DRM_LINUX_LIST_H_ */
diff --git a/sys/dev/pci/drm/drm_mm.c b/sys/dev/pci/drm/drm_mm.c
new file mode 100644
index 00000000000..c9bda339842
--- /dev/null
+++ b/sys/dev/pci/drm/drm_mm.c
@@ -0,0 +1,562 @@
+/* $OpenBSD: drm_mm.c,v 1.1 2013/03/18 12:36:51 jsg Exp $ */
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+
+/*
+ * Generic simple memory manager implementation. Intended to be used as a base
+ * class implementation for more advanced memory managers.
+ *
+ * Note that the algorithm used is quite simple and there might be substantial
+ * performance gains if a smarter free list is implemented. Currently it is just an
+ * unordered stack of free regions. This could easily be improved if an RB-tree
+ * is used instead. At least if we expect heavy fragmentation.
+ *
+ * Aligned allocations can also see improvement.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+#include "drm_linux_list.h"
+#include "drm_mm.h"
+
+#define MM_UNUSED_TARGET 4
+
+static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
+{
+ struct drm_mm_node *child;
+
+ child = malloc(sizeof(*child), M_DRM, M_ZERO |
+ (atomic ? M_NOWAIT : M_WAITOK));
+
+ if (unlikely(child == NULL)) {
+ mtx_enter(&mm->unused_lock);
+ if (list_empty(&mm->unused_nodes))
+ child = NULL;
+ else {
+ child =
+ list_entry(mm->unused_nodes.next,
+ struct drm_mm_node, node_list);
+ list_del(&child->node_list);
+ --mm->num_unused;
+ }
+ mtx_leave(&mm->unused_lock);
+ }
+ return child;
+}
+
+int drm_mm_pre_get(struct drm_mm *mm)
+{
+ struct drm_mm_node *node;
+
+ mtx_enter(&mm->unused_lock);
+ while (mm->num_unused < MM_UNUSED_TARGET) {
+ mtx_leave(&mm->unused_lock);
+ node = malloc(sizeof(*node), M_DRM, M_WAITOK);
+ mtx_enter(&mm->unused_lock);
+
+ if (unlikely(node == NULL)) {
+ int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
+ mtx_leave(&mm->unused_lock);
+ return ret;
+ }
+ ++mm->num_unused;
+ list_add_tail(&node->node_list, &mm->unused_nodes);
+ }
+ mtx_leave(&mm->unused_lock);
+ return 0;
+}
+
+static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
+{
+ return hole_node->start + hole_node->size;
+}
+
+static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
+{
+ struct drm_mm_node *next_node =
+ list_entry(hole_node->node_list.next, struct drm_mm_node,
+ node_list);
+
+ return next_node->start;
+}
+
+static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
+ struct drm_mm_node *node,
+ unsigned long size, unsigned alignment)
+{
+ struct drm_mm *mm = hole_node->mm;
+ unsigned long tmp = 0, wasted = 0;
+ unsigned long hole_start = drm_mm_hole_node_start(hole_node);
+ unsigned long hole_end = drm_mm_hole_node_end(hole_node);
+
+ KASSERT(hole_node->hole_follows && !node->allocated);
+
+ if (alignment)
+ tmp = hole_start % alignment;
+
+ if (!tmp) {
+ hole_node->hole_follows = 0;
+ list_del_init(&hole_node->hole_stack);
+ } else
+ wasted = alignment - tmp;
+
+ node->start = hole_start + wasted;
+ node->size = size;
+ node->mm = mm;
+ node->allocated = 1;
+
+ INIT_LIST_HEAD(&node->hole_stack);
+ list_add(&node->node_list, &hole_node->node_list);
+
+ KASSERT(node->start + node->size <= hole_end);
+
+ if (node->start + node->size < hole_end) {
+ list_add(&node->hole_stack, &mm->hole_stack);
+ node->hole_follows = 1;
+ } else {
+ node->hole_follows = 0;
+ }
+}
+
+struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
+ unsigned long size,
+ unsigned alignment,
+ int atomic)
+{
+ struct drm_mm_node *node;
+
+ node = drm_mm_kmalloc(hole_node->mm, atomic);
+ if (unlikely(node == NULL))
+ return NULL;
+
+ drm_mm_insert_helper(hole_node, node, size, alignment);
+
+ return node;
+}
+
+int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment)
+{
+ struct drm_mm_node *hole_node;
+
+ hole_node = drm_mm_search_free(mm, size, alignment, 0);
+ if (!hole_node)
+ return -ENOSPC;
+
+ drm_mm_insert_helper(hole_node, node, size, alignment);
+
+ return 0;
+}
+
+static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
+ struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long start, unsigned long end)
+{
+ struct drm_mm *mm = hole_node->mm;
+ unsigned long tmp = 0, wasted = 0;
+ unsigned long hole_start = drm_mm_hole_node_start(hole_node);
+ unsigned long hole_end = drm_mm_hole_node_end(hole_node);
+
+ KASSERT(hole_node->hole_follows && !node->allocated);
+
+ if (hole_start < start)
+ wasted += start - hole_start;
+ if (alignment)
+ tmp = (hole_start + wasted) % alignment;
+
+ if (tmp)
+ wasted += alignment - tmp;
+
+ if (!wasted) {
+ hole_node->hole_follows = 0;
+ list_del_init(&hole_node->hole_stack);
+ }
+
+ node->start = hole_start + wasted;
+ node->size = size;
+ node->mm = mm;
+ node->allocated = 1;
+
+ INIT_LIST_HEAD(&node->hole_stack);
+ list_add(&node->node_list, &hole_node->node_list);
+
+ KASSERT(node->start + node->size <= hole_end);
+ KASSERT(node->start + node->size <= end);
+
+ if (node->start + node->size < hole_end) {
+ list_add(&node->hole_stack, &mm->hole_stack);
+ node->hole_follows = 1;
+ } else {
+ node->hole_follows = 0;
+ }
+}
+
+struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end,
+ int atomic)
+{
+ struct drm_mm_node *node;
+
+ node = drm_mm_kmalloc(hole_node->mm, atomic);
+ if (unlikely(node == NULL))
+ return NULL;
+
+ drm_mm_insert_helper_range(hole_node, node, size, alignment,
+ start, end);
+
+ return node;
+}
+
+int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long start, unsigned long end)
+{
+ struct drm_mm_node *hole_node;
+
+ hole_node = drm_mm_search_free_in_range(mm, size, alignment,
+ start, end, 0);
+ if (!hole_node)
+ return -ENOSPC;
+
+ drm_mm_insert_helper_range(hole_node, node, size, alignment,
+ start, end);
+
+ return 0;
+}
+
+void drm_mm_remove_node(struct drm_mm_node *node)
+{
+ struct drm_mm *mm = node->mm;
+ struct drm_mm_node *prev_node;
+
+ KASSERT(!node->scanned_block && !node->scanned_prev_free
+ && !node->scanned_next_free);
+
+ prev_node =
+ list_entry(node->node_list.prev, struct drm_mm_node, node_list);
+
+ if (node->hole_follows) {
+ KASSERT(drm_mm_hole_node_start(node)
+ != drm_mm_hole_node_end(node));
+ list_del(&node->hole_stack);
+ } else
+ KASSERT(drm_mm_hole_node_start(node)
+ == drm_mm_hole_node_end(node));
+
+ if (!prev_node->hole_follows) {
+ prev_node->hole_follows = 1;
+ list_add(&prev_node->hole_stack, &mm->hole_stack);
+ } else
+ list_move(&prev_node->hole_stack, &mm->hole_stack);
+
+ list_del(&node->node_list);
+ node->allocated = 0;
+}
+
+/*
+ * Put a block. Merge with the previous and / or next block if they are free.
+ * Otherwise add to the free stack.
+ */
+
+void drm_mm_put_block(struct drm_mm_node *node)
+{
+ struct drm_mm *mm = node->mm;
+
+ drm_mm_remove_node(node);
+
+ mtx_enter(&mm->unused_lock);
+ if (mm->num_unused < MM_UNUSED_TARGET) {
+ list_add(&node->node_list, &mm->unused_nodes);
+ ++mm->num_unused;
+ } else
+ free(node, M_DRM);
+ mtx_leave(&mm->unused_lock);
+}
+
+static int check_free_hole(unsigned long start, unsigned long end,
+ unsigned long size, unsigned alignment)
+{
+ unsigned wasted = 0;
+
+ if (end - start < size)
+ return 0;
+
+ if (alignment) {
+ unsigned tmp = start % alignment;
+ if (tmp)
+ wasted = alignment - tmp;
+ }
+
+ if (end >= start + size + wasted) {
+ return 1;
+ }
+
+ return 0;
+}
+
+
+struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment, int best_match)
+{
+ struct drm_mm_node *entry;
+ struct drm_mm_node *best;
+ unsigned long best_size;
+
+ best = NULL;
+ best_size = ~0UL;
+
+ list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
+ KASSERT(entry->hole_follows);
+ if (!check_free_hole(drm_mm_hole_node_start(entry),
+ drm_mm_hole_node_end(entry),
+ size, alignment))
+ continue;
+
+ if (!best_match)
+ return entry;
+
+ if (entry->size < best_size) {
+ best = entry;
+ best_size = entry->size;
+ }
+ }
+
+ return best;
+}
+
+struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end,
+ int best_match)
+{
+ struct drm_mm_node *entry;
+ struct drm_mm_node *best;
+ unsigned long best_size;
+
+ KASSERT(!mm->scanned_blocks);
+
+ best = NULL;
+ best_size = ~0UL;
+
+ list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
+ unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
+ start : drm_mm_hole_node_start(entry);
+ unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
+ end : drm_mm_hole_node_end(entry);
+
+ KASSERT(entry->hole_follows);
+ if (!check_free_hole(adj_start, adj_end, size, alignment))
+ continue;
+
+ if (!best_match)
+ return entry;
+
+ if (entry->size < best_size) {
+ best = entry;
+ best_size = entry->size;
+ }
+ }
+
+ return best;
+}
+
+void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
+{
+ list_replace(&old->node_list, &new->node_list);
+ list_replace(&old->hole_stack, &new->hole_stack);
+ new->hole_follows = old->hole_follows;
+ new->mm = old->mm;
+ new->start = old->start;
+ new->size = old->size;
+
+ old->allocated = 0;
+ new->allocated = 1;
+}
+
+void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
+ unsigned alignment)
+{
+ mm->scan_alignment = alignment;
+ mm->scan_size = size;
+ mm->scanned_blocks = 0;
+ mm->scan_hit_start = 0;
+ mm->scan_hit_size = 0;
+ mm->scan_check_range = 0;
+ mm->prev_scanned_node = NULL;
+}
+
+void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end)
+{
+ mm->scan_alignment = alignment;
+ mm->scan_size = size;
+ mm->scanned_blocks = 0;
+ mm->scan_hit_start = 0;
+ mm->scan_hit_size = 0;
+ mm->scan_start = start;
+ mm->scan_end = end;
+ mm->scan_check_range = 1;
+ mm->prev_scanned_node = NULL;
+}
+
+int drm_mm_scan_add_block(struct drm_mm_node *node)
+{
+ struct drm_mm *mm = node->mm;
+ struct drm_mm_node *prev_node;
+ unsigned long hole_start, hole_end;
+ unsigned long adj_start;
+ unsigned long adj_end;
+
+ mm->scanned_blocks++;
+
+ KASSERT(!node->scanned_block);
+ node->scanned_block = 1;
+
+ prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
+ node_list);
+
+ node->scanned_preceeds_hole = prev_node->hole_follows;
+ prev_node->hole_follows = 1;
+ list_del(&node->node_list);
+ node->node_list.prev = &prev_node->node_list;
+ node->node_list.next = &mm->prev_scanned_node->node_list;
+ mm->prev_scanned_node = node;
+
+ hole_start = drm_mm_hole_node_start(prev_node);
+ hole_end = drm_mm_hole_node_end(prev_node);
+ if (mm->scan_check_range) {
+ adj_start = hole_start < mm->scan_start ?
+ mm->scan_start : hole_start;
+ adj_end = hole_end > mm->scan_end ?
+ mm->scan_end : hole_end;
+ } else {
+ adj_start = hole_start;
+ adj_end = hole_end;
+ }
+
+ if (check_free_hole(adj_start , adj_end,
+ mm->scan_size, mm->scan_alignment)) {
+ mm->scan_hit_start = hole_start;
+ mm->scan_hit_size = hole_end;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+int drm_mm_scan_remove_block(struct drm_mm_node *node)
+{
+ struct drm_mm *mm = node->mm;
+ struct drm_mm_node *prev_node;
+
+ mm->scanned_blocks--;
+
+ KASSERT(node->scanned_block);
+ node->scanned_block = 0;
+
+ prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
+ node_list);
+
+ prev_node->hole_follows = node->scanned_preceeds_hole;
+ INIT_LIST_HEAD(&node->node_list);
+ list_add(&node->node_list, &prev_node->node_list);
+
+ /* Only need to check for containement because start&size for the
+ * complete resulting free block (not just the desired part) is
+ * stored. */
+ if (node->start >= mm->scan_hit_start &&
+ node->start + node->size
+ <= mm->scan_hit_start + mm->scan_hit_size) {
+ return 1;
+ }
+
+ return 0;
+}
+
+int drm_mm_clean(struct drm_mm * mm)
+{
+ struct list_head *head = &mm->head_node.node_list;
+
+ return (head->next->next == head);
+}
+
+int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
+{
+ INIT_LIST_HEAD(&mm->hole_stack);
+ INIT_LIST_HEAD(&mm->unused_nodes);
+ mm->num_unused = 0;
+ mm->scanned_blocks = 0;
+ mtx_init(&mm->unused_lock, IPL_NONE);
+
+ INIT_LIST_HEAD(&mm->head_node.node_list);
+ INIT_LIST_HEAD(&mm->head_node.hole_stack);
+ mm->head_node.hole_follows = 1;
+ mm->head_node.scanned_block = 0;
+ mm->head_node.scanned_prev_free = 0;
+ mm->head_node.scanned_next_free = 0;
+ mm->head_node.mm = mm;
+ mm->head_node.start = start + size;
+ mm->head_node.size = start - mm->head_node.start;
+ list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
+
+ return 0;
+}
+
+void drm_mm_takedown(struct drm_mm * mm)
+{
+ struct drm_mm_node *entry, *next;
+
+ if (!list_empty(&mm->head_node.node_list)) {
+ DRM_ERROR("Memory manager not clean. Delaying takedown\n");
+ return;
+ }
+
+ mtx_enter(&mm->unused_lock);
+ list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
+ list_del(&entry->node_list);
+ free(entry, M_DRM);
+ --mm->num_unused;
+ }
+ mtx_leave(&mm->unused_lock);
+
+// mtx_destroy(&mm->unused_lock);
+
+ KASSERT(mm->num_unused == 0);
+}
diff --git a/sys/dev/pci/drm/drm_mm.h b/sys/dev/pci/drm/drm_mm.h
new file mode 100644
index 00000000000..28347c064a0
--- /dev/null
+++ b/sys/dev/pci/drm/drm_mm.h
@@ -0,0 +1,184 @@
+/* $OpenBSD: drm_mm.h,v 1.1 2013/03/18 12:36:51 jsg Exp $ */
+/**************************************************************************
+ *
+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+
+/*
+ * Authors:
+ * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifndef _DRM_MM_H_
+#define _DRM_MM_H_
+
+struct drm_mm_node;
+
+
+struct drm_mm_node {
+ struct list_head node_list;
+ struct list_head hole_stack;
+ unsigned hole_follows : 1;
+ unsigned scanned_block : 1;
+ unsigned scanned_prev_free : 1;
+ unsigned scanned_next_free : 1;
+ unsigned scanned_preceeds_hole : 1;
+ unsigned allocated : 1;
+ unsigned long start;
+ unsigned long size;
+ struct drm_mm *mm;
+ void *private;
+};
+
+struct drm_mm {
+ struct list_head hole_stack;
+ struct drm_mm_node head_node;
+ struct list_head unused_nodes;
+ int num_unused;
+ struct mutex unused_lock;
+ unsigned int scan_check_range : 1;
+ unsigned scan_alignment;
+ unsigned long scan_size;
+ unsigned long scan_hit_start;
+ unsigned scan_hit_size;
+ unsigned scanned_blocks;
+ unsigned long scan_start;
+ unsigned long scan_end;
+ struct drm_mm_node *prev_scanned_node;
+};
+
+static inline bool drm_mm_node_allocated(struct drm_mm_node *node)
+{
+ return node->allocated;
+}
+
+static inline bool drm_mm_initialized(struct drm_mm *mm)
+{
+ return (mm->hole_stack.next != NULL);
+}
+#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
+ &(mm)->head_node.node_list, \
+ node_list)
+#define drm_mm_for_each_scanned_node_reverse(entry, n, mm) \
+ for (entry = (mm)->prev_scanned_node, \
+ next = entry ? list_entry(entry->node_list.next, \
+ struct drm_mm_node, node_list) : NULL; \
+ entry != NULL; entry = next, \
+ next = entry ? list_entry(entry->node_list.next, \
+ struct drm_mm_node, node_list) : NULL)
+
+/*
+ * Basic range manager support (drm_mm.c)
+ */
+extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
+ unsigned long size,
+ unsigned alignment,
+ int atomic);
+extern struct drm_mm_node *drm_mm_get_block_range_generic(
+ struct drm_mm_node *node,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end,
+ int atomic);
+static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment)
+{
+ return drm_mm_get_block_generic(parent, size, alignment, 0);
+}
+static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment)
+{
+ return drm_mm_get_block_generic(parent, size, alignment, 1);
+}
+static inline struct drm_mm_node *drm_mm_get_block_range(
+ struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end)
+{
+ return drm_mm_get_block_range_generic(parent, size, alignment,
+ start, end, 0);
+}
+static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
+ struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end)
+{
+ return drm_mm_get_block_range_generic(parent, size, alignment,
+ start, end, 1);
+}
+extern int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment);
+extern int drm_mm_insert_node_in_range(struct drm_mm *mm,
+ struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long start, unsigned long end);
+extern void drm_mm_put_block(struct drm_mm_node *cur);
+extern void drm_mm_remove_node(struct drm_mm_node *node);
+extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
+extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ int best_match);
+extern struct drm_mm_node *drm_mm_search_free_in_range(
+ const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end,
+ int best_match);
+extern int drm_mm_init(struct drm_mm *mm, unsigned long start,
+ unsigned long size);
+extern void drm_mm_takedown(struct drm_mm *mm);
+extern int drm_mm_clean(struct drm_mm *mm);
+extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
+extern int drm_mm_remove_space_from_tail(struct drm_mm *mm,
+ unsigned long size);
+extern int drm_mm_add_space_to_tail(struct drm_mm *mm,
+ unsigned long size, int atomic);
+extern int drm_mm_pre_get(struct drm_mm *mm);
+
+static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
+{
+ return block->mm;
+}
+
+void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
+ unsigned alignment);
+void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end);
+int drm_mm_scan_add_block(struct drm_mm_node *node);
+int drm_mm_scan_remove_block(struct drm_mm_node *node);
+
+#endif
diff --git a/sys/dev/pci/drm/drm_mode.h b/sys/dev/pci/drm/drm_mode.h
index c27cd0d7400..75787c29f0a 100644
--- a/sys/dev/pci/drm/drm_mode.h
+++ b/sys/dev/pci/drm/drm_mode.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: drm_mode.h,v 1.1 2013/01/09 10:33:42 jsg Exp $ */
+/* $OpenBSD: drm_mode.h,v 1.2 2013/03/18 12:36:51 jsg Exp $ */
/*
* Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
* Copyright (c) 2007 Jakob Bornecrantz <wallbraker@gmail.com>
@@ -163,6 +163,7 @@ struct drm_mode_get_plane_res {
#define DRM_MODE_ENCODER_TMDS 2
#define DRM_MODE_ENCODER_LVDS 3
#define DRM_MODE_ENCODER_TVDAC 4
+#define DRM_MODE_ENCODER_VIRTUAL 5
struct drm_mode_get_encoder {
uint32_t encoder_id;
@@ -200,6 +201,7 @@ struct drm_mode_get_encoder {
#define DRM_MODE_CONNECTOR_HDMIB 12
#define DRM_MODE_CONNECTOR_TV 13
#define DRM_MODE_CONNECTOR_eDP 14
+#define DRM_MODE_CONNECTOR_VIRTUAL 15
struct drm_mode_get_connector {
@@ -227,6 +229,7 @@ struct drm_mode_get_connector {
#define DRM_MODE_PROP_IMMUTABLE (1<<2)
#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */
#define DRM_MODE_PROP_BLOB (1<<4)
+#define DRM_MODE_PROP_BITMASK (1<<5) /* bitmask of enumerated types */
struct drm_mode_property_enum {
uint64_t value;
@@ -251,6 +254,21 @@ struct drm_mode_connector_set_property {
uint32_t connector_id;
};
+struct drm_mode_obj_get_properties {
+ uint64_t props_ptr;
+ uint64_t prop_values_ptr;
+ uint32_t count_props;
+ uint32_t obj_id;
+ uint32_t obj_type;
+};
+
+struct drm_mode_obj_set_property {
+ uint64_t value;
+ uint32_t prop_id;
+ uint32_t obj_id;
+ uint32_t obj_type;
+};
+
struct drm_mode_get_blob {
uint32_t blob_id;
uint32_t length;
@@ -340,8 +358,9 @@ struct drm_mode_mode_cmd {
struct drm_mode_modeinfo mode;
};
-#define DRM_MODE_CURSOR_BO (1<<0)
-#define DRM_MODE_CURSOR_MOVE (1<<1)
+#define DRM_MODE_CURSOR_BO 0x01
+#define DRM_MODE_CURSOR_MOVE 0x02
+#define DRM_MODE_CURSOR_FLAGS 0x03
/*
* depending on the value in flags different members are used.
diff --git a/sys/dev/pci/drm/drm_modes.c b/sys/dev/pci/drm/drm_modes.c
new file mode 100644
index 00000000000..50918d845cb
--- /dev/null
+++ b/sys/dev/pci/drm/drm_modes.c
@@ -0,0 +1,1359 @@
+/* $OpenBSD: drm_modes.c,v 1.1 2013/03/18 12:36:51 jsg Exp $ */
+/*
+ * Copyright © 1997-2003 by The XFree86 Project, Inc.
+ * Copyright © 2007 Dave Airlie
+ * Copyright © 2007-2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ * Copyright 2005-2006 Luc Verhaegen
+ * Copyright (c) 2001, Andy Ritger aritger@nvidia.com
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Except as contained in this notice, the name of the copyright holder(s)
+ * and author(s) shall not be used in advertising or otherwise to promote
+ * the sale, use or other dealings in this Software without prior written
+ * authorization from the copyright holder(s) and author(s).
+ */
+
+#include "drmP.h"
+#include "drm_crtc.h"
+
+#define KHZ2PICOS(a) (1000000000UL/(a))
+
+void drm_mode_validate_clocks(struct drm_device *, struct list_head *,
+ int *, int *, int);
+bool drm_mode_parse_command_line_for_connector(const char *,
+ struct drm_connector *, struct drm_cmdline_mode *);
+struct drm_display_mode *
+ drm_mode_create_from_cmdline_mode(struct drm_device *,
+ struct drm_cmdline_mode *);
+int drm_mode_compare(struct drm_display_mode *, struct drm_display_mode*);
+long simple_strtol(const char *, char **, int);
+
+/**
+ * drm_mode_debug_printmodeline - debug print a mode
+ * @dev: DRM device
+ * @mode: mode to print
+ *
+ * LOCKING:
+ * None.
+ *
+ * Describe @mode using DRM_DEBUG.
+ */
+void
+drm_mode_debug_printmodeline(const struct drm_display_mode *mode)
+{
+ DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d "
+ "0x%x 0x%x\n",
+ mode->base.id, mode->name, mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal, mode->type, mode->flags);
+}
+EXPORT_SYMBOL(drm_mode_debug_printmodeline);
+
+/**
+ * drm_cvt_mode -create a modeline based on CVT algorithm
+ * @dev: DRM device
+ * @hdisplay: hdisplay size
+ * @vdisplay: vdisplay size
+ * @vrefresh : vrefresh rate
+ * @reduced : Whether the GTF calculation is simplified
+ * @interlaced:Whether the interlace is supported
+ *
+ * LOCKING:
+ * none.
+ *
+ * return the modeline based on CVT algorithm
+ *
+ * This function is called to generate the modeline based on CVT algorithm
+ * according to the hdisplay, vdisplay, vrefresh.
+ * It is based from the VESA(TM) Coordinated Video Timing Generator by
+ * Graham Loveridge April 9, 2003 available at
+ * http://www.elo.utfsm.cl/~elo212/docs/CVTd6r1.xls
+ *
+ * And it is copied from xf86CVTmode in xserver/hw/xfree86/modes/xf86cvt.c.
+ * What I have done is to translate it by using integer calculation.
+ */
+#define HV_FACTOR 1000
+struct drm_display_mode *
+drm_cvt_mode(struct drm_device *dev, int hdisplay,
+ int vdisplay, int vrefresh,
+ bool reduced, bool interlaced, bool margins)
+{
+ /* 1) top/bottom margin size (% of height) - default: 1.8, */
+#define CVT_MARGIN_PERCENTAGE 18
+ /* 2) character cell horizontal granularity (pixels) - default 8 */
+#define CVT_H_GRANULARITY 8
+ /* 3) Minimum vertical porch (lines) - default 3 */
+#define CVT_MIN_V_PORCH 3
+ /* 4) Minimum number of vertical back porch lines - default 6 */
+#define CVT_MIN_V_BPORCH 6
+ /* Pixel Clock step (kHz) */
+#define CVT_CLOCK_STEP 250
+ struct drm_display_mode *drm_mode;
+ unsigned int vfieldrate, hperiod;
+ int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync;
+ int interlace;
+
+ /* allocate the drm_display_mode structure. If failure, we will
+ * return directly
+ */
+ drm_mode = drm_mode_create(dev);
+ if (!drm_mode)
+ return NULL;
+
+ /* the CVT default refresh rate is 60Hz */
+ if (!vrefresh)
+ vrefresh = 60;
+
+ /* the required field fresh rate */
+ if (interlaced)
+ vfieldrate = vrefresh * 2;
+ else
+ vfieldrate = vrefresh;
+
+ /* horizontal pixels */
+ hdisplay_rnd = hdisplay - (hdisplay % CVT_H_GRANULARITY);
+
+ /* determine the left&right borders */
+ hmargin = 0;
+ if (margins) {
+ hmargin = hdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000;
+ hmargin -= hmargin % CVT_H_GRANULARITY;
+ }
+ /* find the total active pixels */
+ drm_mode->hdisplay = hdisplay_rnd + 2 * hmargin;
+
+ /* find the number of lines per field */
+ if (interlaced)
+ vdisplay_rnd = vdisplay / 2;
+ else
+ vdisplay_rnd = vdisplay;
+
+ /* find the top & bottom borders */
+ vmargin = 0;
+ if (margins)
+ vmargin = vdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000;
+
+ drm_mode->vdisplay = vdisplay + 2 * vmargin;
+
+ /* Interlaced */
+ if (interlaced)
+ interlace = 1;
+ else
+ interlace = 0;
+
+ /* Determine VSync Width from aspect ratio */
+ if (!(vdisplay % 3) && ((vdisplay * 4 / 3) == hdisplay))
+ vsync = 4;
+ else if (!(vdisplay % 9) && ((vdisplay * 16 / 9) == hdisplay))
+ vsync = 5;
+ else if (!(vdisplay % 10) && ((vdisplay * 16 / 10) == hdisplay))
+ vsync = 6;
+ else if (!(vdisplay % 4) && ((vdisplay * 5 / 4) == hdisplay))
+ vsync = 7;
+ else if (!(vdisplay % 9) && ((vdisplay * 15 / 9) == hdisplay))
+ vsync = 7;
+ else /* custom */
+ vsync = 10;
+
+ if (!reduced) {
+ /* simplify the GTF calculation */
+ /* 4) Minimum time of vertical sync + back porch interval (µs)
+ * default 550.0
+ */
+ int tmp1, tmp2;
+#define CVT_MIN_VSYNC_BP 550
+ /* 3) Nominal HSync width (% of line period) - default 8 */
+#define CVT_HSYNC_PERCENTAGE 8
+ unsigned int hblank_percentage;
+ int vsyncandback_porch, vback_porch, hblank;
+
+ /* estimated the horizontal period */
+ tmp1 = HV_FACTOR * 1000000 -
+ CVT_MIN_VSYNC_BP * HV_FACTOR * vfieldrate;
+ tmp2 = (vdisplay_rnd + 2 * vmargin + CVT_MIN_V_PORCH) * 2 +
+ interlace;
+ hperiod = tmp1 * 2 / (tmp2 * vfieldrate);
+
+ tmp1 = CVT_MIN_VSYNC_BP * HV_FACTOR / hperiod + 1;
+ /* 9. Find number of lines in sync + backporch */
+ if (tmp1 < (vsync + CVT_MIN_V_PORCH))
+ vsyncandback_porch = vsync + CVT_MIN_V_PORCH;
+ else
+ vsyncandback_porch = tmp1;
+ /* 10. Find number of lines in back porch */
+ vback_porch = vsyncandback_porch - vsync;
+ drm_mode->vtotal = vdisplay_rnd + 2 * vmargin +
+ vsyncandback_porch + CVT_MIN_V_PORCH;
+ /* 5) Definition of Horizontal blanking time limitation */
+ /* Gradient (%/kHz) - default 600 */
+#define CVT_M_FACTOR 600
+ /* Offset (%) - default 40 */
+#define CVT_C_FACTOR 40
+ /* Blanking time scaling factor - default 128 */
+#define CVT_K_FACTOR 128
+ /* Scaling factor weighting - default 20 */
+#define CVT_J_FACTOR 20
+#define CVT_M_PRIME (CVT_M_FACTOR * CVT_K_FACTOR / 256)
+#define CVT_C_PRIME ((CVT_C_FACTOR - CVT_J_FACTOR) * CVT_K_FACTOR / 256 + \
+ CVT_J_FACTOR)
+ /* 12. Find ideal blanking duty cycle from formula */
+ hblank_percentage = CVT_C_PRIME * HV_FACTOR - CVT_M_PRIME *
+ hperiod / 1000;
+ /* 13. Blanking time */
+ if (hblank_percentage < 20 * HV_FACTOR)
+ hblank_percentage = 20 * HV_FACTOR;
+ hblank = drm_mode->hdisplay * hblank_percentage /
+ (100 * HV_FACTOR - hblank_percentage);
+ hblank -= hblank % (2 * CVT_H_GRANULARITY);
+ /* 14. find the total pixes per line */
+ drm_mode->htotal = drm_mode->hdisplay + hblank;
+ drm_mode->hsync_end = drm_mode->hdisplay + hblank / 2;
+ drm_mode->hsync_start = drm_mode->hsync_end -
+ (drm_mode->htotal * CVT_HSYNC_PERCENTAGE) / 100;
+ drm_mode->hsync_start += CVT_H_GRANULARITY -
+ drm_mode->hsync_start % CVT_H_GRANULARITY;
+ /* fill the Vsync values */
+ drm_mode->vsync_start = drm_mode->vdisplay + CVT_MIN_V_PORCH;
+ drm_mode->vsync_end = drm_mode->vsync_start + vsync;
+ } else {
+ /* Reduced blanking */
+ /* Minimum vertical blanking interval time (µs)- default 460 */
+#define CVT_RB_MIN_VBLANK 460
+ /* Fixed number of clocks for horizontal sync */
+#define CVT_RB_H_SYNC 32
+ /* Fixed number of clocks for horizontal blanking */
+#define CVT_RB_H_BLANK 160
+ /* Fixed number of lines for vertical front porch - default 3*/
+#define CVT_RB_VFPORCH 3
+ int vbilines;
+ int tmp1, tmp2;
+ /* 8. Estimate Horizontal period. */
+ tmp1 = HV_FACTOR * 1000000 -
+ CVT_RB_MIN_VBLANK * HV_FACTOR * vfieldrate;
+ tmp2 = vdisplay_rnd + 2 * vmargin;
+ hperiod = tmp1 / (tmp2 * vfieldrate);
+ /* 9. Find number of lines in vertical blanking */
+ vbilines = CVT_RB_MIN_VBLANK * HV_FACTOR / hperiod + 1;
+ /* 10. Check if vertical blanking is sufficient */
+ if (vbilines < (CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH))
+ vbilines = CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH;
+ /* 11. Find total number of lines in vertical field */
+ drm_mode->vtotal = vdisplay_rnd + 2 * vmargin + vbilines;
+ /* 12. Find total number of pixels in a line */
+ drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK;
+ /* Fill in HSync values */
+ drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2;
+ drm_mode->hsync_start = drm_mode->hsync_end - CVT_RB_H_SYNC;
+ /* Fill in VSync values */
+ drm_mode->vsync_start = drm_mode->vdisplay + CVT_RB_VFPORCH;
+ drm_mode->vsync_end = drm_mode->vsync_start + vsync;
+ }
+ /* 15/13. Find pixel clock frequency (kHz for xf86) */
+ drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod;
+ drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP;
+ /* 18/16. Find actual vertical frame frequency */
+ /* ignore - just set the mode flag for interlaced */
+ if (interlaced) {
+ drm_mode->vtotal *= 2;
+ drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ }
+ /* Fill the mode line name */
+ drm_mode_set_name(drm_mode);
+ if (reduced)
+ drm_mode->flags |= (DRM_MODE_FLAG_PHSYNC |
+ DRM_MODE_FLAG_NVSYNC);
+ else
+ drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_NHSYNC);
+
+ return drm_mode;
+}
+EXPORT_SYMBOL(drm_cvt_mode);
+
+/**
+ * drm_gtf_mode_complex - create the modeline based on full GTF algorithm
+ *
+ * @dev :drm device
+ * @hdisplay :hdisplay size
+ * @vdisplay :vdisplay size
+ * @vrefresh :vrefresh rate.
+ * @interlaced :whether the interlace is supported
+ * @margins :desired margin size
+ * @GTF_[MCKJ] :extended GTF formula parameters
+ *
+ * LOCKING.
+ * none.
+ *
+ * return the modeline based on full GTF algorithm.
+ *
+ * GTF feature blocks specify C and J in multiples of 0.5, so we pass them
+ * in here multiplied by two. For a C of 40, pass in 80.
+ */
+struct drm_display_mode *
+drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay,
+ int vrefresh, bool interlaced, int margins,
+ int GTF_M, int GTF_2C, int GTF_K, int GTF_2J)
+{ /* 1) top/bottom margin size (% of height) - default: 1.8, */
+#define GTF_MARGIN_PERCENTAGE 18
+ /* 2) character cell horizontal granularity (pixels) - default 8 */
+#define GTF_CELL_GRAN 8
+ /* 3) Minimum vertical porch (lines) - default 3 */
+#define GTF_MIN_V_PORCH 1
+ /* width of vsync in lines */
+#define V_SYNC_RQD 3
+ /* width of hsync as % of total line */
+#define H_SYNC_PERCENT 8
+ /* min time of vsync + back porch (microsec) */
+#define MIN_VSYNC_PLUS_BP 550
+ /* C' and M' are part of the Blanking Duty Cycle computation */
+#define GTF_C_PRIME ((((GTF_2C - GTF_2J) * GTF_K / 256) + GTF_2J) / 2)
+#define GTF_M_PRIME (GTF_K * GTF_M / 256)
+ struct drm_display_mode *drm_mode;
+ unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd;
+ int top_margin, bottom_margin;
+ int interlace;
+ unsigned int hfreq_est;
+ int vsync_plus_bp, vback_porch;
+ unsigned int vtotal_lines, vfieldrate_est, hperiod;
+ unsigned int vfield_rate, vframe_rate;
+ int left_margin, right_margin;
+ unsigned int total_active_pixels, ideal_duty_cycle;
+ unsigned int hblank, total_pixels, pixel_freq;
+ int hsync, hfront_porch, vodd_front_porch_lines;
+ unsigned int tmp1, tmp2;
+
+ drm_mode = drm_mode_create(dev);
+ if (!drm_mode)
+ return NULL;
+
+ /* 1. In order to give correct results, the number of horizontal
+ * pixels requested is first processed to ensure that it is divisible
+ * by the character size, by rounding it to the nearest character
+ * cell boundary:
+ */
+ hdisplay_rnd = (hdisplay + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN;
+ hdisplay_rnd = hdisplay_rnd * GTF_CELL_GRAN;
+
+ /* 2. If interlace is requested, the number of vertical lines assumed
+ * by the calculation must be halved, as the computation calculates
+ * the number of vertical lines per field.
+ */
+ if (interlaced)
+ vdisplay_rnd = vdisplay / 2;
+ else
+ vdisplay_rnd = vdisplay;
+
+ /* 3. Find the frame rate required: */
+ if (interlaced)
+ vfieldrate_rqd = vrefresh * 2;
+ else
+ vfieldrate_rqd = vrefresh;
+
+ /* 4. Find number of lines in Top margin: */
+ top_margin = 0;
+ if (margins)
+ top_margin = (vdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) /
+ 1000;
+ /* 5. Find number of lines in bottom margin: */
+ bottom_margin = top_margin;
+
+ /* 6. If interlace is required, then set variable interlace: */
+ if (interlaced)
+ interlace = 1;
+ else
+ interlace = 0;
+
+ /* 7. Estimate the Horizontal frequency */
+ {
+ tmp1 = (1000000 - MIN_VSYNC_PLUS_BP * vfieldrate_rqd) / 500;
+ tmp2 = (vdisplay_rnd + 2 * top_margin + GTF_MIN_V_PORCH) *
+ 2 + interlace;
+ hfreq_est = (tmp2 * 1000 * vfieldrate_rqd) / tmp1;
+ }
+
+ /* 8. Find the number of lines in V sync + back porch */
+ /* [V SYNC+BP] = RINT(([MIN VSYNC+BP] * hfreq_est / 1000000)) */
+ vsync_plus_bp = MIN_VSYNC_PLUS_BP * hfreq_est / 1000;
+ vsync_plus_bp = (vsync_plus_bp + 500) / 1000;
+ /* 9. Find the number of lines in V back porch alone: */
+ vback_porch = vsync_plus_bp - V_SYNC_RQD;
+ /* 10. Find the total number of lines in Vertical field period: */
+ vtotal_lines = vdisplay_rnd + top_margin + bottom_margin +
+ vsync_plus_bp + GTF_MIN_V_PORCH;
+ /* 11. Estimate the Vertical field frequency: */
+ vfieldrate_est = hfreq_est / vtotal_lines;
+ /* 12. Find the actual horizontal period: */
+ hperiod = 1000000 / (vfieldrate_rqd * vtotal_lines);
+
+ /* 13. Find the actual Vertical field frequency: */
+ vfield_rate = hfreq_est / vtotal_lines;
+ /* 14. Find the Vertical frame frequency: */
+ if (interlaced)
+ vframe_rate = vfield_rate / 2;
+ else
+ vframe_rate = vfield_rate;
+ /* 15. Find number of pixels in left margin: */
+ if (margins)
+ left_margin = (hdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) /
+ 1000;
+ else
+ left_margin = 0;
+
+ /* 16.Find number of pixels in right margin: */
+ right_margin = left_margin;
+ /* 17.Find total number of active pixels in image and left and right */
+ total_active_pixels = hdisplay_rnd + left_margin + right_margin;
+ /* 18.Find the ideal blanking duty cycle from blanking duty cycle */
+ ideal_duty_cycle = GTF_C_PRIME * 1000 -
+ (GTF_M_PRIME * 1000000 / hfreq_est);
+ /* 19.Find the number of pixels in the blanking time to the nearest
+ * double character cell: */
+ hblank = total_active_pixels * ideal_duty_cycle /
+ (100000 - ideal_duty_cycle);
+ hblank = (hblank + GTF_CELL_GRAN) / (2 * GTF_CELL_GRAN);
+ hblank = hblank * 2 * GTF_CELL_GRAN;
+ /* 20.Find total number of pixels: */
+ total_pixels = total_active_pixels + hblank;
+ /* 21.Find pixel clock frequency: */
+ pixel_freq = total_pixels * hfreq_est / 1000;
+ /* Stage 1 computations are now complete; I should really pass
+ * the results to another function and do the Stage 2 computations,
+ * but I only need a few more values so I'll just append the
+ * computations here for now */
+ /* 17. Find the number of pixels in the horizontal sync period: */
+ hsync = H_SYNC_PERCENT * total_pixels / 100;
+ hsync = (hsync + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN;
+ hsync = hsync * GTF_CELL_GRAN;
+ /* 18. Find the number of pixels in horizontal front porch period */
+ hfront_porch = hblank / 2 - hsync;
+ /* 36. Find the number of lines in the odd front porch period: */
+ vodd_front_porch_lines = GTF_MIN_V_PORCH ;
+
+ /* finally, pack the results in the mode struct */
+ drm_mode->hdisplay = hdisplay_rnd;
+ drm_mode->hsync_start = hdisplay_rnd + hfront_porch;
+ drm_mode->hsync_end = drm_mode->hsync_start + hsync;
+ drm_mode->htotal = total_pixels;
+ drm_mode->vdisplay = vdisplay_rnd;
+ drm_mode->vsync_start = vdisplay_rnd + vodd_front_porch_lines;
+ drm_mode->vsync_end = drm_mode->vsync_start + V_SYNC_RQD;
+ drm_mode->vtotal = vtotal_lines;
+
+ drm_mode->clock = pixel_freq;
+
+ if (interlaced) {
+ drm_mode->vtotal *= 2;
+ drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ }
+
+ drm_mode_set_name(drm_mode);
+ if (GTF_M == 600 && GTF_2C == 80 && GTF_K == 128 && GTF_2J == 40)
+ drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
+ else
+ drm_mode->flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC;
+
+ return drm_mode;
+}
+EXPORT_SYMBOL(drm_gtf_mode_complex);
+
+/**
+ * drm_gtf_mode - create the modeline based on GTF algorithm
+ *
+ * @dev :drm device
+ * @hdisplay :hdisplay size
+ * @vdisplay :vdisplay size
+ * @vrefresh :vrefresh rate.
+ * @interlaced :whether the interlace is supported
+ * @margins :whether the margin is supported
+ *
+ * LOCKING.
+ * none.
+ *
+ * return the modeline based on GTF algorithm
+ *
+ * This function is to create the modeline based on the GTF algorithm.
+ * Generalized Timing Formula is derived from:
+ * GTF Spreadsheet by Andy Morrish (1/5/97)
+ * available at http://www.vesa.org
+ *
+ * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
+ * What I have done is to translate it by using integer calculation.
+ * I also refer to the function of fb_get_mode in the file of
+ * drivers/video/fbmon.c
+ *
+ * Standard GTF parameters:
+ * M = 600
+ * C = 40
+ * K = 128
+ * J = 20
+ */
+struct drm_display_mode *
+drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh,
+ bool lace, int margins)
+{
+ return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh, lace,
+ margins, 600, 40 * 2, 128, 20 * 2);
+}
+EXPORT_SYMBOL(drm_gtf_mode);
+
+/**
+ * drm_mode_set_name - set the name on a mode
+ * @mode: name will be set in this mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Set the name of @mode to a standard format.
+ */
+void
+drm_mode_set_name(struct drm_display_mode *mode)
+{
+ bool interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+
+ snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s",
+ mode->hdisplay, mode->vdisplay,
+ interlaced ? "i" : "");
+}
+EXPORT_SYMBOL(drm_mode_set_name);
+
+/**
+ * drm_mode_list_concat - move modes from one list to another
+ * @head: source list
+ * @new: dst list
+ *
+ * LOCKING:
+ * Caller must ensure both lists are locked.
+ *
+ * Move all the modes from @head to @new.
+ */
+void
+drm_mode_list_concat(struct list_head *head, struct list_head *new)
+{
+
+ struct list_head *entry, *tmp;
+
+ list_for_each_safe(entry, tmp, head) {
+ list_move_tail(entry, new);
+ }
+}
+EXPORT_SYMBOL(drm_mode_list_concat);
+
+/**
+ * drm_mode_width - get the width of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @mode's width (hdisplay) value.
+ *
+ * FIXME: is this needed?
+ *
+ * RETURNS:
+ * @mode->hdisplay
+ */
+int
+drm_mode_width(const struct drm_display_mode *mode)
+{
+ return mode->hdisplay;
+
+}
+EXPORT_SYMBOL(drm_mode_width);
+
+/**
+ * drm_mode_height - get the height of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @mode's height (vdisplay) value.
+ *
+ * FIXME: is this needed?
+ *
+ * RETURNS:
+ * @mode->vdisplay
+ */
+int
+drm_mode_height(const struct drm_display_mode *mode)
+{
+ return mode->vdisplay;
+}
+EXPORT_SYMBOL(drm_mode_height);
+
+/** drm_mode_hsync - get the hsync of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @modes's hsync rate in kHz, rounded to the nearest int.
+ */
+int
+drm_mode_hsync(const struct drm_display_mode *mode)
+{
+ unsigned int calc_val;
+
+ if (mode->hsync)
+ return mode->hsync;
+
+ if (mode->htotal < 0)
+ return 0;
+
+ calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
+ calc_val += 500; /* round to 1000Hz */
+ calc_val /= 1000; /* truncate to kHz */
+
+ return calc_val;
+}
+EXPORT_SYMBOL(drm_mode_hsync);
+
+/**
+ * drm_mode_vrefresh - get the vrefresh of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @mode's vrefresh rate in Hz or calculate it if necessary.
+ *
+ * FIXME: why is this needed? shouldn't vrefresh be set already?
+ *
+ * RETURNS:
+ * Vertical refresh rate. It will be the result of actual value plus 0.5.
+ * If it is 70.288, it will return 70Hz.
+ * If it is 59.6, it will return 60Hz.
+ */
+int
+drm_mode_vrefresh(const struct drm_display_mode *mode)
+{
+ int refresh = 0;
+ unsigned int calc_val;
+
+ if (mode->vrefresh > 0)
+ refresh = mode->vrefresh;
+ else if (mode->htotal > 0 && mode->vtotal > 0) {
+ int vtotal;
+ vtotal = mode->vtotal;
+ /* work out vrefresh the value will be x1000 */
+ calc_val = (mode->clock * 1000);
+ calc_val /= mode->htotal;
+ refresh = (calc_val + vtotal / 2) / vtotal;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ refresh *= 2;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ refresh /= 2;
+ if (mode->vscan > 1)
+ refresh /= mode->vscan;
+ }
+ return refresh;
+}
+EXPORT_SYMBOL(drm_mode_vrefresh);
+
+/**
+ * drm_mode_set_crtcinfo - set CRTC modesetting parameters
+ * @p: mode
+ * @adjust_flags: unused? (FIXME)
+ *
+ * LOCKING:
+ * None.
+ *
+ * Setup the CRTC modesetting parameters for @p, adjusting if necessary.
+ */
+void
+drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
+{
+ if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
+ return;
+
+ p->crtc_hdisplay = p->hdisplay;
+ p->crtc_hsync_start = p->hsync_start;
+ p->crtc_hsync_end = p->hsync_end;
+ p->crtc_htotal = p->htotal;
+ p->crtc_hskew = p->hskew;
+ p->crtc_vdisplay = p->vdisplay;
+ p->crtc_vsync_start = p->vsync_start;
+ p->crtc_vsync_end = p->vsync_end;
+ p->crtc_vtotal = p->vtotal;
+
+ if (p->flags & DRM_MODE_FLAG_INTERLACE) {
+ if (adjust_flags & CRTC_INTERLACE_HALVE_V) {
+ p->crtc_vdisplay /= 2;
+ p->crtc_vsync_start /= 2;
+ p->crtc_vsync_end /= 2;
+ p->crtc_vtotal /= 2;
+ }
+ }
+
+ if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
+ p->crtc_vdisplay *= 2;
+ p->crtc_vsync_start *= 2;
+ p->crtc_vsync_end *= 2;
+ p->crtc_vtotal *= 2;
+ }
+
+ if (p->vscan > 1) {
+ p->crtc_vdisplay *= p->vscan;
+ p->crtc_vsync_start *= p->vscan;
+ p->crtc_vsync_end *= p->vscan;
+ p->crtc_vtotal *= p->vscan;
+ }
+
+ p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
+ p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
+ p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
+ p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
+}
+EXPORT_SYMBOL(drm_mode_set_crtcinfo);
+
+
+/**
+ * drm_mode_copy - copy the mode
+ * @dst: mode to overwrite
+ * @src: mode to copy
+ *
+ * LOCKING:
+ * None.
+ *
+ * Copy an existing mode into another mode, preserving the object id
+ * of the destination mode.
+ */
+void
+drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src)
+{
+ int id = dst->base.id;
+
+ *dst = *src;
+ dst->base.id = id;
+ INIT_LIST_HEAD(&dst->head);
+}
+EXPORT_SYMBOL(drm_mode_copy);
+
+/**
+ * drm_mode_duplicate - allocate and duplicate an existing mode
+ * @m: mode to duplicate
+ *
+ * LOCKING:
+ * None.
+ *
+ * Just allocate a new mode, copy the existing mode into it, and return
+ * a pointer to it. Used to create new instances of established modes.
+ */
+struct drm_display_mode *
+drm_mode_duplicate(struct drm_device *dev,
+ const struct drm_display_mode *mode)
+{
+ struct drm_display_mode *nmode;
+
+ nmode = drm_mode_create(dev);
+ if (!nmode)
+ return NULL;
+
+ drm_mode_copy(nmode, mode);
+
+ return nmode;
+}
+EXPORT_SYMBOL(drm_mode_duplicate);
+
+/**
+ * drm_mode_equal - test modes for equality
+ * @mode1: first mode
+ * @mode2: second mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Check to see if @mode1 and @mode2 are equivalent.
+ *
+ * RETURNS:
+ * True if the modes are equal, false otherwise.
+ */
+bool
+drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
+{
+ /* do clock check convert to PICOS so fb modes get matched
+ * the same */
+ if (mode1->clock && mode2->clock) {
+ if (KHZ2PICOS(mode1->clock) != KHZ2PICOS(mode2->clock))
+ return false;
+ } else if (mode1->clock != mode2->clock)
+ return false;
+
+ if (mode1->hdisplay == mode2->hdisplay &&
+ mode1->hsync_start == mode2->hsync_start &&
+ mode1->hsync_end == mode2->hsync_end &&
+ mode1->htotal == mode2->htotal &&
+ mode1->hskew == mode2->hskew &&
+ mode1->vdisplay == mode2->vdisplay &&
+ mode1->vsync_start == mode2->vsync_start &&
+ mode1->vsync_end == mode2->vsync_end &&
+ mode1->vtotal == mode2->vtotal &&
+ mode1->vscan == mode2->vscan &&
+ mode1->flags == mode2->flags)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(drm_mode_equal);
+
+/**
+ * drm_mode_validate_size - make sure modes adhere to size constraints
+ * @dev: DRM device
+ * @mode_list: list of modes to check
+ * @maxX: maximum width
+ * @maxY: maximum height
+ * @maxPitch: max pitch
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * The DRM device (@dev) has size and pitch limits. Here we validate the
+ * modes we probed for @dev against those limits and set their status as
+ * necessary.
+ */
+void
+drm_mode_validate_size(struct drm_device *dev,
+ struct list_head *mode_list,
+ int maxX, int maxY, int maxPitch)
+{
+ struct drm_display_mode *mode;
+
+ list_for_each_entry(mode, mode_list, head) {
+ if (maxPitch > 0 && mode->hdisplay > maxPitch)
+ mode->status = MODE_BAD_WIDTH;
+
+ if (maxX > 0 && mode->hdisplay > maxX)
+ mode->status = MODE_VIRTUAL_X;
+
+ if (maxY > 0 && mode->vdisplay > maxY)
+ mode->status = MODE_VIRTUAL_Y;
+ }
+}
+EXPORT_SYMBOL(drm_mode_validate_size);
+
+/**
+ * drm_mode_validate_clocks - validate modes against clock limits
+ * @dev: DRM device
+ * @mode_list: list of modes to check
+ * @min: minimum clock rate array
+ * @max: maximum clock rate array
+ * @n_ranges: number of clock ranges (size of arrays)
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * Some code may need to check a mode list against the clock limits of the
+ * device in question. This function walks the mode list, testing to make
+ * sure each mode falls within a given range (defined by @min and @max
+ * arrays) and sets @mode->status as needed.
+ */
+void
+drm_mode_validate_clocks(struct drm_device *dev,
+ struct list_head *mode_list,
+ int *min, int *max, int n_ranges)
+{
+ struct drm_display_mode *mode;
+ int i;
+
+ list_for_each_entry(mode, mode_list, head) {
+ bool good = false;
+ for (i = 0; i < n_ranges; i++) {
+ if (mode->clock >= min[i] && mode->clock <= max[i]) {
+ good = true;
+ break;
+ }
+ }
+ if (!good)
+ mode->status = MODE_CLOCK_RANGE;
+ }
+}
+EXPORT_SYMBOL(drm_mode_validate_clocks);
+
+/**
+ * drm_mode_prune_invalid - remove invalid modes from mode list
+ * @dev: DRM device
+ * @mode_list: list of modes to check
+ * @verbose: be verbose about it
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * Once mode list generation is complete, a caller can use this routine to
+ * remove invalid modes from a mode list. If any of the modes have a
+ * status other than %MODE_OK, they are removed from @mode_list and freed.
+ */
+void
+drm_mode_prune_invalid(struct drm_device *dev,
+ struct list_head *mode_list, bool verbose)
+{
+ struct drm_display_mode *mode, *t;
+
+ list_for_each_entry_safe(mode, t, mode_list, head) {
+ if (mode->status != MODE_OK) {
+ list_del(&mode->head);
+ if (verbose) {
+ drm_mode_debug_printmodeline(mode);
+ DRM_DEBUG_KMS("Not using %s mode %d\n",
+ mode->name, mode->status);
+ }
+ drm_mode_destroy(dev, mode);
+ }
+ }
+}
+EXPORT_SYMBOL(drm_mode_prune_invalid);
+
+/**
+ * drm_mode_compare - compare modes for favorability
+ * @priv: unused
+ * @lh_a: list_head for first mode
+ * @lh_b: list_head for second mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Compare two modes, given by @lh_a and @lh_b, returning a value indicating
+ * which is better.
+ *
+ * RETURNS:
+ * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
+ * positive if @lh_b is better than @lh_a.
+ */
+int
+drm_mode_compare(struct drm_display_mode *a, struct drm_display_mode* b)
+{
+ int diff;
+
+ diff = ((b->type & DRM_MODE_TYPE_PREFERRED) != 0) -
+ ((a->type & DRM_MODE_TYPE_PREFERRED) != 0);
+ if (diff)
+ return diff;
+ diff = b->hdisplay * b->vdisplay - a->hdisplay * a->vdisplay;
+ if (diff)
+ return diff;
+ diff = b->clock - a->clock;
+ return diff;
+}
+
+/**
+ * drm_mode_sort - sort mode list
+ * @mode_list: list to sort
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * Sort @mode_list by favorability, putting good modes first.
+ */
+RB_HEAD(drm_mode_sort, drm_display_mode);
+
+RB_PROTOTYPE(drm_mode_sort, drm_display_mode, sort, drm_mode_compare);
+
+void
+drm_mode_sort(struct list_head *mode_list)
+{
+ struct drm_display_mode *mode, *t;
+ struct drm_mode_sort drm_mode_tree;
+
+ RB_INIT(&drm_mode_tree);
+ list_for_each_entry_safe(mode, t, mode_list, head) {
+ RB_INSERT(drm_mode_sort, &drm_mode_tree, mode);
+ list_del(&mode->head);
+ }
+ RB_FOREACH(mode, drm_mode_sort, &drm_mode_tree)
+ list_add_tail(&mode->head, mode_list);
+}
+RB_GENERATE(drm_mode_sort, drm_display_mode, sort, drm_mode_compare);
+
+EXPORT_SYMBOL(drm_mode_sort);
+
+/**
+ * drm_mode_connector_list_update - update the mode list for the connector
+ * @connector: the connector to update
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * This moves the modes from the @connector probed_modes list
+ * to the actual mode list. It compares the probed mode against the current
+ * list and only adds different modes. All modes unverified after this point
+ * will be removed by the prune invalid modes.
+ */
+void
+drm_mode_connector_list_update(struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+ struct drm_display_mode *pmode, *pt;
+ int found_it;
+
+ list_for_each_entry_safe(pmode, pt, &connector->probed_modes,
+ head) {
+ found_it = 0;
+ /* go through current modes checking for the new probed mode */
+ list_for_each_entry(mode, &connector->modes, head) {
+ if (drm_mode_equal(pmode, mode)) {
+ found_it = 1;
+ /* if equal delete the probed mode */
+ mode->status = pmode->status;
+ /* Merge type bits together */
+ mode->type |= pmode->type;
+ list_del(&pmode->head);
+ drm_mode_destroy(connector->dev, pmode);
+ break;
+ }
+ }
+
+ if (!found_it) {
+ list_move_tail(&pmode->head, &connector->modes);
+ }
+ }
+}
+EXPORT_SYMBOL(drm_mode_connector_list_update);
+
+/**
+ * drm_mode_parse_command_line_for_connector - parse command line for connector
+ * @mode_option - per connector mode option
+ * @connector - connector to parse line for
+ *
+ * This parses the connector specific then generic command lines for
+ * modes and options to configure the connector.
+ *
+ * This uses the same parameters as the fb modedb.c, except for extra
+ * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
+ *
+ * enable/enable Digital/disable bit at the end
+ */
+bool
+drm_mode_parse_command_line_for_connector(const char *mode_option,
+ struct drm_connector *connector,
+ struct drm_cmdline_mode *mode)
+{
+ const char *name;
+ unsigned int namelen;
+ bool res_specified = false, bpp_specified = false, refresh_specified = false;
+ unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0;
+ bool yres_specified = false, cvt = false, rb = false;
+ bool interlace = false, margins = false, was_digit = false;
+ int i;
+ enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
+
+#ifdef CONFIG_FB
+ if (!mode_option)
+ mode_option = fb_mode_option;
+#endif
+
+ if (!mode_option) {
+ mode->specified = false;
+ return false;
+ }
+
+ name = mode_option;
+ namelen = strlen(name);
+ for (i = namelen-1; i >= 0; i--) {
+ switch (name[i]) {
+ case '@':
+ if (!refresh_specified && !bpp_specified &&
+ !yres_specified && !cvt && !rb && was_digit) {
+ refresh = simple_strtol(&name[i+1], NULL, 10);
+ refresh_specified = true;
+ was_digit = false;
+ } else
+ goto done;
+ break;
+ case '-':
+ if (!bpp_specified && !yres_specified && !cvt &&
+ !rb && was_digit) {
+ bpp = simple_strtol(&name[i+1], NULL, 10);
+ bpp_specified = true;
+ was_digit = false;
+ } else
+ goto done;
+ break;
+ case 'x':
+ if (!yres_specified && was_digit) {
+ yres = simple_strtol(&name[i+1], NULL, 10);
+ yres_specified = true;
+ was_digit = false;
+ } else
+ goto done;
+ case '0' ... '9':
+ was_digit = true;
+ break;
+ case 'M':
+ if (yres_specified || cvt || was_digit)
+ goto done;
+ cvt = true;
+ break;
+ case 'R':
+ if (yres_specified || cvt || rb || was_digit)
+ goto done;
+ rb = true;
+ break;
+ case 'm':
+ if (cvt || yres_specified || was_digit)
+ goto done;
+ margins = true;
+ break;
+ case 'i':
+ if (cvt || yres_specified || was_digit)
+ goto done;
+ interlace = true;
+ break;
+ case 'e':
+ if (yres_specified || bpp_specified || refresh_specified ||
+ was_digit || (force != DRM_FORCE_UNSPECIFIED))
+ goto done;
+
+ force = DRM_FORCE_ON;
+ break;
+ case 'D':
+ if (yres_specified || bpp_specified || refresh_specified ||
+ was_digit || (force != DRM_FORCE_UNSPECIFIED))
+ goto done;
+
+ if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) &&
+ (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB))
+ force = DRM_FORCE_ON;
+ else
+ force = DRM_FORCE_ON_DIGITAL;
+ break;
+ case 'd':
+ if (yres_specified || bpp_specified || refresh_specified ||
+ was_digit || (force != DRM_FORCE_UNSPECIFIED))
+ goto done;
+
+ force = DRM_FORCE_OFF;
+ break;
+ default:
+ goto done;
+ }
+ }
+
+ if (i < 0 && yres_specified) {
+ char *ch;
+ xres = simple_strtol(name, &ch, 10);
+ if ((ch != NULL) && (*ch == 'x'))
+ res_specified = true;
+ else
+ i = ch - name;
+ } else if (!yres_specified && was_digit) {
+ /* catch mode that begins with digits but has no 'x' */
+ i = 0;
+ }
+done:
+ if (i >= 0) {
+ printf("parse error at position %i in video mode '%s'\n",
+ i, name);
+ mode->specified = false;
+ return false;
+ }
+
+ if (res_specified) {
+ mode->specified = true;
+ mode->xres = xres;
+ mode->yres = yres;
+ }
+
+ if (refresh_specified) {
+ mode->refresh_specified = true;
+ mode->refresh = refresh;
+ }
+
+ if (bpp_specified) {
+ mode->bpp_specified = true;
+ mode->bpp = bpp;
+ }
+ mode->rb = rb;
+ mode->cvt = cvt;
+ mode->interlace = interlace;
+ mode->margins = margins;
+ mode->force = force;
+
+ return true;
+}
+EXPORT_SYMBOL(drm_mode_parse_command_line_for_connector);
+
+struct drm_display_mode *
+drm_mode_create_from_cmdline_mode(struct drm_device *dev,
+ struct drm_cmdline_mode *cmd)
+{
+ struct drm_display_mode *mode;
+
+ if (cmd->cvt)
+ mode = drm_cvt_mode(dev,
+ cmd->xres, cmd->yres,
+ cmd->refresh_specified ? cmd->refresh : 60,
+ cmd->rb, cmd->interlace,
+ cmd->margins);
+ else
+ mode = drm_gtf_mode(dev,
+ cmd->xres, cmd->yres,
+ cmd->refresh_specified ? cmd->refresh : 60,
+ cmd->interlace,
+ cmd->margins);
+ if (!mode)
+ return NULL;
+
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ return mode;
+}
+EXPORT_SYMBOL(drm_mode_create_from_cmdline_mode);
+
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+/*
+ * Convert a string to a long integer.
+ *
+ * Ignores `locale' stuff. Assumes that the upper and lower case
+ * alphabets and digits are each contiguous.
+ */
+#include <sys/limits.h>
+
+long
+simple_strtol(const char *nptr, char **endptr, int base)
+{
+ const char *s;
+ long acc, cutoff;
+ int c;
+ int neg, any, cutlim;
+ int errno;
+
+ /*
+ * Skip white space and pick up leading +/- sign if any.
+ * If base is 0, allow 0x for hex and 0 for octal, else
+ * assume decimal; if base is already 16, allow 0x.
+ */
+ s = nptr;
+ do {
+ c = (unsigned char) *s++;
+ } while (c == ' ' || c == '\t');
+ if (c == '-') {
+ neg = 1;
+ c = *s++;
+ } else {
+ neg = 0;
+ if (c == '+')
+ c = *s++;
+ }
+ if ((base == 0 || base == 16) &&
+ c == '0' && (*s == 'x' || *s == 'X')) {
+ c = s[1];
+ s += 2;
+ base = 16;
+ }
+ if (base == 0)
+ base = c == '0' ? 8 : 10;
+
+ /*
+ * Compute the cutoff value between legal numbers and illegal
+ * numbers. That is the largest legal value, divided by the
+ * base. An input number that is greater than this value, if
+ * followed by a legal input character, is too big. One that
+ * is equal to this value may be valid or not; the limit
+ * between valid and invalid numbers is then based on the last
+ * digit. For instance, if the range for longs is
+ * [-2147483648..2147483647] and the input base is 10,
+ * cutoff will be set to 214748364 and cutlim to either
+ * 7 (neg==0) or 8 (neg==1), meaning that if we have accumulated
+ * a value > 214748364, or equal but the next digit is > 7 (or 8),
+ * the number is too big, and we will return a range error.
+ *
+ * Set any if any `digits' consumed; make it negative to indicate
+ * overflow.
+ */
+ cutoff = neg ? LONG_MIN : LONG_MAX;
+ cutlim = cutoff % base;
+ cutoff /= base;
+ if (neg) {
+ if (cutlim > 0) {
+ cutlim -= base;
+ cutoff += 1;
+ }
+ cutlim = -cutlim;
+ }
+ for (acc = 0, any = 0;; c = (unsigned char) *s++) {
+ if (c >= '0' && c <= '9')
+ c -= '0';
+ else if (c >= 'A' && c <= 'Z')
+ c -= 'A' - 10;
+ else if( c >= 'a' && c <= 'z')
+ c -= 'a' - 10;
+ else
+ break;
+ if (c >= base)
+ break;
+ if (any < 0)
+ continue;
+ if (neg) {
+ if (acc < cutoff || (acc == cutoff && c > cutlim)) {
+ any = -1;
+ acc = LONG_MIN;
+ errno = ERANGE;
+ } else {
+ any = 1;
+ acc *= base;
+ acc -= c;
+ }
+ } else {
+ if (acc > cutoff || (acc == cutoff && c > cutlim)) {
+ any = -1;
+ acc = LONG_MAX;
+ errno = ERANGE;
+ } else {
+ any = 1;
+ acc *= base;
+ acc += c;
+ }
+ }
+ }
+ if (endptr != 0)
+ *endptr = (char *) (any ? s - 1 : nptr);
+ return (acc);
+}
diff --git a/sys/dev/pci/drm/files.drm b/sys/dev/pci/drm/files.drm
index 68aa4f4dc19..dc7ffd80f73 100644
--- a/sys/dev/pci/drm/files.drm
+++ b/sys/dev/pci/drm/files.drm
@@ -1,5 +1,5 @@
# $NetBSD: files.drm,v 1.2 2007/03/28 11:29:37 jmcneill Exp $
-# $OpenBSD: files.drm,v 1.23 2012/05/19 18:02:53 kettenis Exp $
+# $OpenBSD: files.drm,v 1.24 2013/03/18 12:36:51 jsg Exp $
# direct rendering modules
define drmbase {}
@@ -12,13 +12,51 @@ file dev/pci/drm/drm_drv.c drm needs-flag
file dev/pci/drm/drm_irq.c drm
file dev/pci/drm/drm_lock.c drm
file dev/pci/drm/drm_memory.c drm
+file dev/pci/drm/drm_mm.c drm
file dev/pci/drm/drm_scatter.c drm
+file dev/pci/drm/drm_crtc.c drm
+file dev/pci/drm/drm_modes.c drm
+file dev/pci/drm/drm_crtc_helper.c drm
+file dev/pci/drm/drm_edid.c drm
+file dev/pci/drm/drm_dp_helper.c drm
+file dev/pci/drm/drm_fb_helper.c drm
-device inteldrm: drmbase
+device inteldrm: drmbase, wsemuldisplaydev, rasops32
attach inteldrm at drmdev
-file dev/pci/drm/i915_drv.c inteldrm
-file dev/pci/drm/i915_irq.c inteldrm
-file dev/pci/drm/i915_suspend.c inteldrm
+file dev/pci/drm/i915/i915_dma.c inteldrm
+file dev/pci/drm/i915/i915_drv.c inteldrm
+file dev/pci/drm/i915/i915_gem.c inteldrm
+file dev/pci/drm/i915/i915_gem_evict.c inteldrm
+file dev/pci/drm/i915/i915_gem_execbuffer.c inteldrm
+file dev/pci/drm/i915/i915_gem_gtt.c inteldrm
+file dev/pci/drm/i915/i915_gem_tiling.c inteldrm
+file dev/pci/drm/i915/i915_irq.c inteldrm
+file dev/pci/drm/i915/i915_suspend.c inteldrm
+file dev/pci/drm/i915/intel_bios.c inteldrm
+file dev/pci/drm/i915/intel_crt.c inteldrm
+file dev/pci/drm/i915/intel_ddi.c inteldrm
+file dev/pci/drm/i915/intel_display.c inteldrm
+file dev/pci/drm/i915/intel_dp.c inteldrm
+file dev/pci/drm/i915/intel_dvo.c inteldrm
+file dev/pci/drm/i915/intel_fb.c inteldrm
+file dev/pci/drm/i915/intel_hdmi.c inteldrm
+file dev/pci/drm/i915/intel_i2c.c inteldrm
+file dev/pci/drm/i915/intel_lvds.c inteldrm
+file dev/pci/drm/i915/intel_modes.c inteldrm
+file dev/pci/drm/i915/intel_opregion.c inteldrm
+file dev/pci/drm/i915/intel_overlay.c inteldrm
+file dev/pci/drm/i915/intel_panel.c inteldrm
+file dev/pci/drm/i915/intel_pm.c inteldrm
+file dev/pci/drm/i915/intel_ringbuffer.c inteldrm
+file dev/pci/drm/i915/intel_sdvo.c inteldrm
+file dev/pci/drm/i915/intel_sprite.c inteldrm
+file dev/pci/drm/i915/intel_tv.c inteldrm
+file dev/pci/drm/i915/dvo_ch7017.c inteldrm
+file dev/pci/drm/i915/dvo_ch7xxx.c inteldrm
+file dev/pci/drm/i915/dvo_ivch.c inteldrm
+file dev/pci/drm/i915/dvo_ns2501.c inteldrm
+file dev/pci/drm/i915/dvo_sil164.c inteldrm
+file dev/pci/drm/i915/dvo_tfp410.c inteldrm
device radeondrm: drmbase
attach radeondrm at drmdev
diff --git a/sys/dev/pci/drm/i915/dvo.h b/sys/dev/pci/drm/i915/dvo.h
new file mode 100644
index 00000000000..8c9ae333095
--- /dev/null
+++ b/sys/dev/pci/drm/i915/dvo.h
@@ -0,0 +1,152 @@
+/* $OpenBSD: dvo.h,v 1.1 2013/03/18 12:36:51 jsg Exp $ */
+/*
+ * Copyright © 2006 Eric Anholt
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef _INTEL_DVO_H
+#define _INTEL_DVO_H
+
+#include <sys/types.h>
+#include <sys/device.h>
+#include <dev/i2c/i2cvar.h>
+#include <dev/pci/drm/drmP.h>
+#include <dev/pci/drm/drm_crtc.h>
+#include "intel_drv.h"
+
+struct intel_dvo_device {
+ const char *name;
+ int type;
+ /* DVOA/B/C output register */
+ u32 dvo_reg;
+ /* GPIO register used for i2c bus to control this device */
+ u32 gpio;
+ int slave_addr;
+
+ const struct intel_dvo_dev_ops *dev_ops;
+ void *dev_priv;
+ struct i2c_controller *i2c_bus;
+};
+
+struct intel_dvo_dev_ops {
+ /*
+ * Initialize the device at startup time.
+ * Returns NULL if the device does not exist.
+ */
+ bool (*init)(struct intel_dvo_device *dvo,
+ struct i2c_controller *i2cbus);
+
+ /*
+ * Called to allow the output a chance to create properties after the
+ * RandR objects have been created.
+ */
+ void (*create_resources)(struct intel_dvo_device *dvo);
+
+ /*
+ * Turn on/off output.
+ *
+ * Because none of our dvo drivers support an intermediate power levels,
+ * we don't expose this in the interfac.
+ */
+ void (*dpms)(struct intel_dvo_device *dvo, bool enable);
+
+ /*
+ * Callback for testing a video mode for a given output.
+ *
+ * This function should only check for cases where a mode can't
+ * be supported on the output specifically, and not represent
+ * generic CRTC limitations.
+ *
+ * \return MODE_OK if the mode is valid, or another MODE_* otherwise.
+ */
+ int (*mode_valid)(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode);
+
+ /*
+ * Callback to adjust the mode to be set in the CRTC.
+ *
+ * This allows an output to adjust the clock or even the entire set of
+ * timings, which is used for panels with fixed timings or for
+ * buses with clock limitations.
+ */
+ bool (*mode_fixup)(struct intel_dvo_device *dvo,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+ /*
+ * Callback for preparing mode changes on an output
+ */
+ void (*prepare)(struct intel_dvo_device *dvo);
+
+ /*
+ * Callback for committing mode changes on an output
+ */
+ void (*commit)(struct intel_dvo_device *dvo);
+
+ /*
+ * Callback for setting up a video mode after fixups have been made.
+ *
+ * This is only called while the output is disabled. The dpms callback
+ * must be all that's necessary for the output, to turn the output on
+ * after this function is called.
+ */
+ void (*mode_set)(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+ /*
+ * Probe for a connected output, and return detect_status.
+ */
+ enum drm_connector_status (*detect)(struct intel_dvo_device *dvo);
+
+ /*
+ * Probe the current hw status, returning true if the connected output
+ * is active.
+ */
+ bool (*get_hw_state)(struct intel_dvo_device *dev);
+
+ /**
+ * Query the device for the modes it provides.
+ *
+ * This function may also update MonInfo, mm_width, and mm_height.
+ *
+ * \return singly-linked list of modes or NULL if no modes found.
+ */
+ struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
+
+ /**
+ * Clean up driver-specific bits of the output
+ */
+ void (*destroy) (struct intel_dvo_device *dvo);
+
+ /**
+ * Debugging hook to dump device registers to log file
+ */
+ void (*dump_regs)(struct intel_dvo_device *dvo);
+};
+
+extern struct intel_dvo_dev_ops sil164_ops;
+extern struct intel_dvo_dev_ops ch7xxx_ops;
+extern struct intel_dvo_dev_ops ivch_ops;
+extern struct intel_dvo_dev_ops tfp410_ops;
+extern struct intel_dvo_dev_ops ch7017_ops;
+extern struct intel_dvo_dev_ops ns2501_ops;
+
+#endif /* _INTEL_DVO_H */
diff --git a/sys/dev/pci/drm/i915/dvo_ch7017.c b/sys/dev/pci/drm/i915/dvo_ch7017.c
new file mode 100644
index 00000000000..95a7ec961b2
--- /dev/null
+++ b/sys/dev/pci/drm/i915/dvo_ch7017.c
@@ -0,0 +1,443 @@
+/* $OpenBSD: dvo_ch7017.c,v 1.1 2013/03/18 12:36:51 jsg Exp $ */
+/*
+ * Copyright © 2006 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include "dvo.h"
+
+#define CH7017_TV_DISPLAY_MODE 0x00
+#define CH7017_FLICKER_FILTER 0x01
+#define CH7017_VIDEO_BANDWIDTH 0x02
+#define CH7017_TEXT_ENHANCEMENT 0x03
+#define CH7017_START_ACTIVE_VIDEO 0x04
+#define CH7017_HORIZONTAL_POSITION 0x05
+#define CH7017_VERTICAL_POSITION 0x06
+#define CH7017_BLACK_LEVEL 0x07
+#define CH7017_CONTRAST_ENHANCEMENT 0x08
+#define CH7017_TV_PLL 0x09
+#define CH7017_TV_PLL_M 0x0a
+#define CH7017_TV_PLL_N 0x0b
+#define CH7017_SUB_CARRIER_0 0x0c
+#define CH7017_CIV_CONTROL 0x10
+#define CH7017_CIV_0 0x11
+#define CH7017_CHROMA_BOOST 0x14
+#define CH7017_CLOCK_MODE 0x1c
+#define CH7017_INPUT_CLOCK 0x1d
+#define CH7017_GPIO_CONTROL 0x1e
+#define CH7017_INPUT_DATA_FORMAT 0x1f
+#define CH7017_CONNECTION_DETECT 0x20
+#define CH7017_DAC_CONTROL 0x21
+#define CH7017_BUFFERED_CLOCK_OUTPUT 0x22
+#define CH7017_DEFEAT_VSYNC 0x47
+#define CH7017_TEST_PATTERN 0x48
+
+#define CH7017_POWER_MANAGEMENT 0x49
+/** Enables the TV output path. */
+#define CH7017_TV_EN (1 << 0)
+#define CH7017_DAC0_POWER_DOWN (1 << 1)
+#define CH7017_DAC1_POWER_DOWN (1 << 2)
+#define CH7017_DAC2_POWER_DOWN (1 << 3)
+#define CH7017_DAC3_POWER_DOWN (1 << 4)
+/** Powers down the TV out block, and DAC0-3 */
+#define CH7017_TV_POWER_DOWN_EN (1 << 5)
+
+#define CH7017_VERSION_ID 0x4a
+
+#define CH7017_DEVICE_ID 0x4b
+#define CH7017_DEVICE_ID_VALUE 0x1b
+#define CH7018_DEVICE_ID_VALUE 0x1a
+#define CH7019_DEVICE_ID_VALUE 0x19
+
+#define CH7017_XCLK_D2_ADJUST 0x53
+#define CH7017_UP_SCALER_COEFF_0 0x55
+#define CH7017_UP_SCALER_COEFF_1 0x56
+#define CH7017_UP_SCALER_COEFF_2 0x57
+#define CH7017_UP_SCALER_COEFF_3 0x58
+#define CH7017_UP_SCALER_COEFF_4 0x59
+#define CH7017_UP_SCALER_VERTICAL_INC_0 0x5a
+#define CH7017_UP_SCALER_VERTICAL_INC_1 0x5b
+#define CH7017_GPIO_INVERT 0x5c
+#define CH7017_UP_SCALER_HORIZONTAL_INC_0 0x5d
+#define CH7017_UP_SCALER_HORIZONTAL_INC_1 0x5e
+
+#define CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT 0x5f
+/**< Low bits of horizontal active pixel input */
+
+#define CH7017_ACTIVE_INPUT_LINE_OUTPUT 0x60
+/** High bits of horizontal active pixel input */
+#define CH7017_LVDS_HAP_INPUT_MASK (0x7 << 0)
+/** High bits of vertical active line output */
+#define CH7017_LVDS_VAL_HIGH_MASK (0x7 << 3)
+
+#define CH7017_VERTICAL_ACTIVE_LINE_OUTPUT 0x61
+/**< Low bits of vertical active line output */
+
+#define CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT 0x62
+/**< Low bits of horizontal active pixel output */
+
+#define CH7017_LVDS_POWER_DOWN 0x63
+/** High bits of horizontal active pixel output */
+#define CH7017_LVDS_HAP_HIGH_MASK (0x7 << 0)
+/** Enables the LVDS power down state transition */
+#define CH7017_LVDS_POWER_DOWN_EN (1 << 6)
+/** Enables the LVDS upscaler */
+#define CH7017_LVDS_UPSCALER_EN (1 << 7)
+#define CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED 0x08
+
+#define CH7017_LVDS_ENCODING 0x64
+#define CH7017_LVDS_DITHER_2D (1 << 2)
+#define CH7017_LVDS_DITHER_DIS (1 << 3)
+#define CH7017_LVDS_DUAL_CHANNEL_EN (1 << 4)
+#define CH7017_LVDS_24_BIT (1 << 5)
+
+#define CH7017_LVDS_ENCODING_2 0x65
+
+#define CH7017_LVDS_PLL_CONTROL 0x66
+/** Enables the LVDS panel output path */
+#define CH7017_LVDS_PANEN (1 << 0)
+/** Enables the LVDS panel backlight */
+#define CH7017_LVDS_BKLEN (1 << 3)
+
+#define CH7017_POWER_SEQUENCING_T1 0x67
+#define CH7017_POWER_SEQUENCING_T2 0x68
+#define CH7017_POWER_SEQUENCING_T3 0x69
+#define CH7017_POWER_SEQUENCING_T4 0x6a
+#define CH7017_POWER_SEQUENCING_T5 0x6b
+#define CH7017_GPIO_DRIVER_TYPE 0x6c
+#define CH7017_GPIO_DATA 0x6d
+#define CH7017_GPIO_DIRECTION_CONTROL 0x6e
+
+#define CH7017_LVDS_PLL_FEEDBACK_DIV 0x71
+# define CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT 4
+# define CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT 0
+# define CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED 0x80
+
+#define CH7017_LVDS_PLL_VCO_CONTROL 0x72
+# define CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED 0x80
+# define CH7017_LVDS_PLL_VCO_SHIFT 4
+# define CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT 0
+
+#define CH7017_OUTPUTS_ENABLE 0x73
+# define CH7017_CHARGE_PUMP_LOW 0x0
+# define CH7017_CHARGE_PUMP_HIGH 0x3
+# define CH7017_LVDS_CHANNEL_A (1 << 3)
+# define CH7017_LVDS_CHANNEL_B (1 << 4)
+# define CH7017_TV_DAC_A (1 << 5)
+# define CH7017_TV_DAC_B (1 << 6)
+# define CH7017_DDC_SELECT_DC2 (1 << 7)
+
+#define CH7017_LVDS_OUTPUT_AMPLITUDE 0x74
+#define CH7017_LVDS_PLL_EMI_REDUCTION 0x75
+#define CH7017_LVDS_POWER_DOWN_FLICKER 0x76
+
+#define CH7017_LVDS_CONTROL_2 0x78
+# define CH7017_LOOP_FILTER_SHIFT 5
+# define CH7017_PHASE_DETECTOR_SHIFT 0
+
+#define CH7017_BANG_LIMIT_CONTROL 0x7f
+
+struct ch7017_priv {
+ uint8_t dummy;
+};
+
+void ch7017_dump_regs(struct intel_dvo_device *);
+void ch7017_dpms(struct intel_dvo_device *, bool);
+bool ch7017_read(struct intel_dvo_device *, u8, u8 *);
+bool ch7017_write(struct intel_dvo_device *, u8, u8);
+bool ch7017_init(struct intel_dvo_device *, struct i2c_controller *);
+enum drm_connector_status ch7017_detect(struct intel_dvo_device *);
+enum drm_mode_status ch7017_mode_valid(struct intel_dvo_device *,
+ struct drm_display_mode *);
+void ch7017_mode_set(struct intel_dvo_device *, struct drm_display_mode *,
+ struct drm_display_mode *);
+bool ch7017_get_hw_state(struct intel_dvo_device *);
+void ch7017_destroy(struct intel_dvo_device *);
+
+bool
+ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
+{
+ struct i2c_controller *adapter = dvo->i2c_bus;
+ int ret;
+ uint8_t cmd = 0;
+
+ iic_acquire_bus(adapter, 0);
+ ret = iic_exec(adapter, I2C_OP_WRITE_WITH_STOP, dvo->slave_addr,
+ &cmd, 1, &addr, 1, 0);
+ if (ret) {
+ iic_release_bus(adapter, 0);
+ return false;
+ }
+ ret = iic_exec(adapter, I2C_OP_READ_WITH_STOP, dvo->slave_addr,
+ &cmd, 1, val, 1, 0);
+ iic_release_bus(adapter, 0);
+ if (ret)
+ return false;
+ else
+ return true;
+}
+
+bool
+ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val)
+{
+ struct i2c_controller *adapter = dvo->i2c_bus;
+ uint8_t buf[2] = { addr, val };
+ int ret;
+ uint8_t cmd = 0;
+
+ iic_acquire_bus(adapter, 0);
+ ret = iic_exec(adapter, I2C_OP_WRITE_WITH_STOP, dvo->slave_addr,
+ &cmd, 1, buf, 2, 0);
+ iic_release_bus(adapter, 0);
+ if (ret)
+ return false;
+ else
+ return true;
+}
+
+/** Probes for a CH7017 on the given bus and slave address. */
+bool
+ch7017_init(struct intel_dvo_device *dvo,
+ struct i2c_controller *adapter)
+{
+ struct ch7017_priv *priv;
+ const char *str;
+ u8 val;
+
+ priv = malloc(sizeof(struct ch7017_priv), M_DRM, M_WAITOK | M_ZERO);
+ if (priv == NULL)
+ return false;
+
+ dvo->i2c_bus = adapter;
+ dvo->dev_priv = priv;
+
+ if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val))
+ goto fail;
+
+ switch (val) {
+ case CH7017_DEVICE_ID_VALUE:
+ str = "ch7017";
+ break;
+ case CH7018_DEVICE_ID_VALUE:
+ str = "ch7018";
+ break;
+ case CH7019_DEVICE_ID_VALUE:
+ str = "ch7019";
+ break;
+ default:
+ DRM_DEBUG_KMS("ch701x not detected, got %d: from "
+ "slave %d.\n",
+ val, dvo->slave_addr);
+ goto fail;
+ }
+
+ DRM_DEBUG_KMS("%s detected, addr %d\n",
+ str, dvo->slave_addr);
+ return true;
+
+fail:
+ free(priv, M_DRM);
+ return false;
+}
+
+enum drm_connector_status
+ch7017_detect(struct intel_dvo_device *dvo)
+{
+ return connector_status_connected;
+}
+
+enum drm_mode_status
+ch7017_mode_valid(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode)
+{
+ if (mode->clock > 160000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+void
+ch7017_mode_set(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ uint8_t lvds_pll_feedback_div, lvds_pll_vco_control;
+ uint8_t outputs_enable, lvds_control_2, lvds_power_down;
+ uint8_t horizontal_active_pixel_input;
+ uint8_t horizontal_active_pixel_output, vertical_active_line_output;
+ uint8_t active_input_line_output;
+
+ DRM_DEBUG_KMS("Registers before mode setting\n");
+ ch7017_dump_regs(dvo);
+
+ /* LVDS PLL settings from page 75 of 7017-7017ds.pdf*/
+ if (mode->clock < 100000) {
+ outputs_enable = CH7017_LVDS_CHANNEL_A | CH7017_CHARGE_PUMP_LOW;
+ lvds_pll_feedback_div = CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED |
+ (2 << CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT) |
+ (13 << CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT);
+ lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED |
+ (2 << CH7017_LVDS_PLL_VCO_SHIFT) |
+ (3 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT);
+ lvds_control_2 = (1 << CH7017_LOOP_FILTER_SHIFT) |
+ (0 << CH7017_PHASE_DETECTOR_SHIFT);
+ } else {
+ outputs_enable = CH7017_LVDS_CHANNEL_A | CH7017_CHARGE_PUMP_HIGH;
+ lvds_pll_feedback_div = CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED |
+ (2 << CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT) |
+ (3 << CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT);
+ lvds_pll_feedback_div = 35;
+ lvds_control_2 = (3 << CH7017_LOOP_FILTER_SHIFT) |
+ (0 << CH7017_PHASE_DETECTOR_SHIFT);
+ if (1) { /* XXX: dual channel panel detection. Assume yes for now. */
+ outputs_enable |= CH7017_LVDS_CHANNEL_B;
+ lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED |
+ (2 << CH7017_LVDS_PLL_VCO_SHIFT) |
+ (13 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT);
+ } else {
+ lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED |
+ (1 << CH7017_LVDS_PLL_VCO_SHIFT) |
+ (13 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT);
+ }
+ }
+
+ horizontal_active_pixel_input = mode->hdisplay & 0x00ff;
+
+ vertical_active_line_output = mode->vdisplay & 0x00ff;
+ horizontal_active_pixel_output = mode->hdisplay & 0x00ff;
+
+ active_input_line_output = ((mode->hdisplay & 0x0700) >> 8) |
+ (((mode->vdisplay & 0x0700) >> 8) << 3);
+
+ lvds_power_down = CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED |
+ (mode->hdisplay & 0x0700) >> 8;
+
+ ch7017_dpms(dvo, false);
+ ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT,
+ horizontal_active_pixel_input);
+ ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT,
+ horizontal_active_pixel_output);
+ ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT,
+ vertical_active_line_output);
+ ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT,
+ active_input_line_output);
+ ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, lvds_pll_vco_control);
+ ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, lvds_pll_feedback_div);
+ ch7017_write(dvo, CH7017_LVDS_CONTROL_2, lvds_control_2);
+ ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, outputs_enable);
+
+ /* Turn the LVDS back on with new settings. */
+ ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, lvds_power_down);
+
+ DRM_DEBUG_KMS("Registers after mode setting\n");
+ ch7017_dump_regs(dvo);
+}
+
+/* set the CH7017 power state */
+void
+ch7017_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+ uint8_t val;
+
+ ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
+
+ /* Turn off TV/VGA, and never turn it on since we don't support it. */
+ ch7017_write(dvo, CH7017_POWER_MANAGEMENT,
+ CH7017_DAC0_POWER_DOWN |
+ CH7017_DAC1_POWER_DOWN |
+ CH7017_DAC2_POWER_DOWN |
+ CH7017_DAC3_POWER_DOWN |
+ CH7017_TV_POWER_DOWN_EN);
+
+ if (enable) {
+ /* Turn on the LVDS */
+ ch7017_write(dvo, CH7017_LVDS_POWER_DOWN,
+ val & ~CH7017_LVDS_POWER_DOWN_EN);
+ } else {
+ /* Turn off the LVDS */
+ ch7017_write(dvo, CH7017_LVDS_POWER_DOWN,
+ val | CH7017_LVDS_POWER_DOWN_EN);
+ }
+
+ /* XXX: Should actually wait for update power status somehow */
+ drm_msleep(20, "chdmps");
+}
+
+bool
+ch7017_get_hw_state(struct intel_dvo_device *dvo)
+{
+ uint8_t val;
+
+ ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
+
+ if (val & CH7017_LVDS_POWER_DOWN_EN)
+ return false;
+ else
+ return true;
+}
+
+void
+ch7017_dump_regs(struct intel_dvo_device *dvo)
+{
+ uint8_t val;
+
+#define DUMP(reg) \
+do { \
+ ch7017_read(dvo, reg, &val); \
+ DRM_DEBUG_KMS(#reg ": %02x\n", val); \
+} while (0)
+
+ DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT);
+ DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT);
+ DUMP(CH7017_VERTICAL_ACTIVE_LINE_OUTPUT);
+ DUMP(CH7017_ACTIVE_INPUT_LINE_OUTPUT);
+ DUMP(CH7017_LVDS_PLL_VCO_CONTROL);
+ DUMP(CH7017_LVDS_PLL_FEEDBACK_DIV);
+ DUMP(CH7017_LVDS_CONTROL_2);
+ DUMP(CH7017_OUTPUTS_ENABLE);
+ DUMP(CH7017_LVDS_POWER_DOWN);
+}
+
+void
+ch7017_destroy(struct intel_dvo_device *dvo)
+{
+ struct ch7017_priv *priv = dvo->dev_priv;
+
+ if (priv) {
+ free(priv, M_DRM);
+ dvo->dev_priv = NULL;
+ }
+}
+
+struct intel_dvo_dev_ops ch7017_ops = {
+ .init = ch7017_init,
+ .detect = ch7017_detect,
+ .mode_valid = ch7017_mode_valid,
+ .mode_set = ch7017_mode_set,
+ .dpms = ch7017_dpms,
+ .get_hw_state = ch7017_get_hw_state,
+ .dump_regs = ch7017_dump_regs,
+ .destroy = ch7017_destroy,
+};
diff --git a/sys/dev/pci/drm/i915/dvo_ch7xxx.c b/sys/dev/pci/drm/i915/dvo_ch7xxx.c
new file mode 100644
index 00000000000..4f872d44bcd
--- /dev/null
+++ b/sys/dev/pci/drm/i915/dvo_ch7xxx.c
@@ -0,0 +1,371 @@
+/* $OpenBSD: dvo_ch7xxx.c,v 1.1 2013/03/18 12:36:51 jsg Exp $ */
+/**************************************************************************
+
+Copyright © 2006 Dave Airlie
+
+All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sub license, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+#include "dvo.h"
+
+#define CH7xxx_REG_VID 0x4a
+#define CH7xxx_REG_DID 0x4b
+
+#define CH7011_VID 0x83 /* 7010 as well */
+#define CH7009A_VID 0x84
+#define CH7009B_VID 0x85
+#define CH7301_VID 0x95
+
+#define CH7xxx_VID 0x84
+#define CH7xxx_DID 0x17
+
+#define CH7xxx_NUM_REGS 0x4c
+
+#define CH7xxx_CM 0x1c
+#define CH7xxx_CM_XCM (1<<0)
+#define CH7xxx_CM_MCP (1<<2)
+#define CH7xxx_INPUT_CLOCK 0x1d
+#define CH7xxx_GPIO 0x1e
+#define CH7xxx_GPIO_HPIR (1<<3)
+#define CH7xxx_IDF 0x1f
+
+#define CH7xxx_IDF_HSP (1<<3)
+#define CH7xxx_IDF_VSP (1<<4)
+
+#define CH7xxx_CONNECTION_DETECT 0x20
+#define CH7xxx_CDET_DVI (1<<5)
+
+#define CH7301_DAC_CNTL 0x21
+#define CH7301_HOTPLUG 0x23
+#define CH7xxx_TCTL 0x31
+#define CH7xxx_TVCO 0x32
+#define CH7xxx_TPCP 0x33
+#define CH7xxx_TPD 0x34
+#define CH7xxx_TPVT 0x35
+#define CH7xxx_TLPF 0x36
+#define CH7xxx_TCT 0x37
+#define CH7301_TEST_PATTERN 0x48
+
+#define CH7xxx_PM 0x49
+#define CH7xxx_PM_FPD (1<<0)
+#define CH7301_PM_DACPD0 (1<<1)
+#define CH7301_PM_DACPD1 (1<<2)
+#define CH7301_PM_DACPD2 (1<<3)
+#define CH7xxx_PM_DVIL (1<<6)
+#define CH7xxx_PM_DVIP (1<<7)
+
+#define CH7301_SYNC_POLARITY 0x56
+#define CH7301_SYNC_RGB_YUV (1<<0)
+#define CH7301_SYNC_POL_DVI (1<<5)
+
+/** @file
+ * driver for the Chrontel 7xxx DVI chip over DVO.
+ */
+
+static struct ch7xxx_id_struct {
+ uint8_t vid;
+ char *name;
+} ch7xxx_ids[] = {
+ { CH7011_VID, "CH7011" },
+ { CH7009A_VID, "CH7009A" },
+ { CH7009B_VID, "CH7009B" },
+ { CH7301_VID, "CH7301" },
+};
+
+struct ch7xxx_priv {
+ bool quiet;
+};
+
+char *ch7xxx_get_id(uint8_t);
+bool ch7xxx_readb(struct intel_dvo_device *, int, uint8_t *);
+bool ch7xxx_writeb(struct intel_dvo_device *, int, uint8_t);
+bool ch7xxx_init(struct intel_dvo_device *, struct i2c_controller *);
+enum drm_connector_status ch7xxx_detect(struct intel_dvo_device *);
+enum drm_mode_status ch7xxx_mode_valid(struct intel_dvo_device *,
+ struct drm_display_mode *);
+void ch7xxx_mode_set(struct intel_dvo_device *, struct drm_display_mode *,
+ struct drm_display_mode *);
+void ch7xxx_dpms(struct intel_dvo_device *, bool);
+bool ch7xxx_get_hw_state(struct intel_dvo_device *);
+void ch7xxx_dump_regs(struct intel_dvo_device *);
+void ch7xxx_destroy(struct intel_dvo_device *);
+
+char *
+ch7xxx_get_id(uint8_t vid)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ch7xxx_ids); i++) {
+ if (ch7xxx_ids[i].vid == vid)
+ return ch7xxx_ids[i].name;
+ }
+
+ return NULL;
+}
+
+/** Reads an 8 bit register */
+bool
+ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+{
+ struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
+ struct i2c_controller *adapter = dvo->i2c_bus;
+ u8 out_buf[2];
+ u8 in_buf[2];
+ int ret;
+ uint8_t cmd = 0;
+
+ out_buf[0] = addr;
+ out_buf[1] = 0;
+
+ iic_acquire_bus(adapter, 0);
+ ret = iic_exec(adapter, I2C_OP_WRITE_WITH_STOP, dvo->slave_addr,
+ &cmd, 1, out_buf, 1, 0);
+ if (ret)
+ goto read_err;
+ ret = iic_exec(adapter, I2C_OP_READ_WITH_STOP, dvo->slave_addr,
+ &cmd, 1, in_buf, 1, 0);
+ if (ret)
+ goto read_err;
+ iic_release_bus(adapter, 0);
+
+ *ch = in_buf[0];
+ return true;
+
+read_err:
+ iic_release_bus(adapter, 0);
+ if (!ch7xxx->quiet) {
+ DRM_DEBUG_KMS("Unable to read register 0x%02x from %02x.\n",
+ addr, dvo->slave_addr);
+ }
+ return false;
+}
+
+/** Writes an 8 bit register */
+bool
+ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+{
+ struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
+ struct i2c_controller *adapter = dvo->i2c_bus;
+ uint8_t out_buf[2];
+ int ret;
+ uint8_t cmd = 0;
+
+ out_buf[0] = addr;
+ out_buf[1] = ch;
+
+ iic_acquire_bus(adapter, 0);
+ ret = iic_exec(adapter, I2C_OP_WRITE_WITH_STOP, dvo->slave_addr,
+ &cmd, 1, out_buf, 2, 0);
+ iic_release_bus(adapter, 0);
+ if (ret)
+ goto write_err;
+
+ return true;
+
+write_err:
+ if (!ch7xxx->quiet) {
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %d.\n",
+ addr, dvo->slave_addr);
+ }
+
+ return false;
+}
+
+bool
+ch7xxx_init(struct intel_dvo_device *dvo,
+ struct i2c_controller *adapter)
+{
+ /* this will detect the CH7xxx chip on the specified i2c bus */
+ struct ch7xxx_priv *ch7xxx;
+ uint8_t vendor, device;
+ char *name;
+
+ ch7xxx = malloc(sizeof(struct ch7xxx_priv), M_DRM, M_WAITOK | M_ZERO);
+ if (ch7xxx == NULL)
+ return false;
+
+ dvo->i2c_bus = adapter;
+ dvo->dev_priv = ch7xxx;
+ ch7xxx->quiet = true;
+
+ if (!ch7xxx_readb(dvo, CH7xxx_REG_VID, &vendor))
+ goto out;
+
+ name = ch7xxx_get_id(vendor);
+ if (!name) {
+ DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from "
+ "slave %d.\n",
+ vendor, dvo->slave_addr);
+ goto out;
+ }
+
+
+ if (!ch7xxx_readb(dvo, CH7xxx_REG_DID, &device))
+ goto out;
+
+ if (device != CH7xxx_DID) {
+ DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x "
+ "slave %d.\n",
+ vendor, dvo->slave_addr);
+ goto out;
+ }
+
+ ch7xxx->quiet = false;
+ DRM_DEBUG_KMS("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n",
+ name, vendor, device);
+ return true;
+out:
+ free(ch7xxx, M_DRM);
+ return false;
+}
+
+enum drm_connector_status
+ch7xxx_detect(struct intel_dvo_device *dvo)
+{
+ uint8_t cdet, orig_pm, pm;
+
+ ch7xxx_readb(dvo, CH7xxx_PM, &orig_pm);
+
+ pm = orig_pm;
+ pm &= ~CH7xxx_PM_FPD;
+ pm |= CH7xxx_PM_DVIL | CH7xxx_PM_DVIP;
+
+ ch7xxx_writeb(dvo, CH7xxx_PM, pm);
+
+ ch7xxx_readb(dvo, CH7xxx_CONNECTION_DETECT, &cdet);
+
+ ch7xxx_writeb(dvo, CH7xxx_PM, orig_pm);
+
+ if (cdet & CH7xxx_CDET_DVI)
+ return connector_status_connected;
+ return connector_status_disconnected;
+}
+
+enum drm_mode_status
+ch7xxx_mode_valid(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode)
+{
+ if (mode->clock > 165000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+void
+ch7xxx_mode_set(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ uint8_t tvco, tpcp, tpd, tlpf, idf;
+
+ if (mode->clock <= 65000) {
+ tvco = 0x23;
+ tpcp = 0x08;
+ tpd = 0x16;
+ tlpf = 0x60;
+ } else {
+ tvco = 0x2d;
+ tpcp = 0x06;
+ tpd = 0x26;
+ tlpf = 0xa0;
+ }
+
+ ch7xxx_writeb(dvo, CH7xxx_TCTL, 0x00);
+ ch7xxx_writeb(dvo, CH7xxx_TVCO, tvco);
+ ch7xxx_writeb(dvo, CH7xxx_TPCP, tpcp);
+ ch7xxx_writeb(dvo, CH7xxx_TPD, tpd);
+ ch7xxx_writeb(dvo, CH7xxx_TPVT, 0x30);
+ ch7xxx_writeb(dvo, CH7xxx_TLPF, tlpf);
+ ch7xxx_writeb(dvo, CH7xxx_TCT, 0x00);
+
+ ch7xxx_readb(dvo, CH7xxx_IDF, &idf);
+
+ idf &= ~(CH7xxx_IDF_HSP | CH7xxx_IDF_VSP);
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ idf |= CH7xxx_IDF_HSP;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ idf |= CH7xxx_IDF_HSP;
+
+ ch7xxx_writeb(dvo, CH7xxx_IDF, idf);
+}
+
+/* set the CH7xxx power state */
+void
+ch7xxx_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+ if (enable)
+ ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP);
+ else
+ ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD);
+}
+
+bool
+ch7xxx_get_hw_state(struct intel_dvo_device *dvo)
+{
+ u8 val;
+
+ ch7xxx_readb(dvo, CH7xxx_PM, &val);
+
+ if (val & (CH7xxx_PM_DVIL | CH7xxx_PM_DVIP))
+ return true;
+ else
+ return false;
+}
+
+void
+ch7xxx_dump_regs(struct intel_dvo_device *dvo)
+{
+ int i;
+
+ for (i = 0; i < CH7xxx_NUM_REGS; i++) {
+ uint8_t val;
+ if ((i % 8) == 0)
+ DRM_LOG_KMS("\n %02X: ", i);
+ ch7xxx_readb(dvo, i, &val);
+ DRM_LOG_KMS("%02X ", val);
+ }
+}
+
+void
+ch7xxx_destroy(struct intel_dvo_device *dvo)
+{
+ struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
+
+ if (ch7xxx) {
+ free(ch7xxx, M_DRM);
+ dvo->dev_priv = NULL;
+ }
+}
+
+struct intel_dvo_dev_ops ch7xxx_ops = {
+ .init = ch7xxx_init,
+ .detect = ch7xxx_detect,
+ .mode_valid = ch7xxx_mode_valid,
+ .mode_set = ch7xxx_mode_set,
+ .dpms = ch7xxx_dpms,
+ .get_hw_state = ch7xxx_get_hw_state,
+ .dump_regs = ch7xxx_dump_regs,
+ .destroy = ch7xxx_destroy,
+};
diff --git a/sys/dev/pci/drm/i915/dvo_ivch.c b/sys/dev/pci/drm/i915/dvo_ivch.c
new file mode 100644
index 00000000000..bbc965d34fa
--- /dev/null
+++ b/sys/dev/pci/drm/i915/dvo_ivch.c
@@ -0,0 +1,458 @@
+/* $OpenBSD: dvo_ivch.c,v 1.1 2013/03/18 12:36:51 jsg Exp $ */
+/*
+ * Copyright © 2006 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include "dvo.h"
+
+/*
+ * register definitions for the i82807aa.
+ *
+ * Documentation on this chipset can be found in datasheet #29069001 at
+ * intel.com.
+ */
+
+/*
+ * VCH Revision & GMBus Base Addr
+ */
+#define VR00 0x00
+# define VR00_BASE_ADDRESS_MASK 0x007f
+
+/*
+ * Functionality Enable
+ */
+#define VR01 0x01
+
+/*
+ * Enable the panel fitter
+ */
+# define VR01_PANEL_FIT_ENABLE (1 << 3)
+/*
+ * Enables the LCD display.
+ *
+ * This must not be set while VR01_DVO_BYPASS_ENABLE is set.
+ */
+# define VR01_LCD_ENABLE (1 << 2)
+/** Enables the DVO repeater. */
+# define VR01_DVO_BYPASS_ENABLE (1 << 1)
+/** Enables the DVO clock */
+# define VR01_DVO_ENABLE (1 << 0)
+
+/*
+ * LCD Interface Format
+ */
+#define VR10 0x10
+/** Enables LVDS output instead of CMOS */
+# define VR10_LVDS_ENABLE (1 << 4)
+/** Enables 18-bit LVDS output. */
+# define VR10_INTERFACE_1X18 (0 << 2)
+/** Enables 24-bit LVDS or CMOS output */
+# define VR10_INTERFACE_1X24 (1 << 2)
+/** Enables 2x18-bit LVDS or CMOS output. */
+# define VR10_INTERFACE_2X18 (2 << 2)
+/** Enables 2x24-bit LVDS output */
+# define VR10_INTERFACE_2X24 (3 << 2)
+
+/*
+ * VR20 LCD Horizontal Display Size
+ */
+#define VR20 0x20
+
+/*
+ * LCD Vertical Display Size
+ */
+#define VR21 0x20
+
+/*
+ * Panel power down status
+ */
+#define VR30 0x30
+/** Read only bit indicating that the panel is not in a safe poweroff state. */
+# define VR30_PANEL_ON (1 << 15)
+
+#define VR40 0x40
+# define VR40_STALL_ENABLE (1 << 13)
+# define VR40_VERTICAL_INTERP_ENABLE (1 << 12)
+# define VR40_ENHANCED_PANEL_FITTING (1 << 11)
+# define VR40_HORIZONTAL_INTERP_ENABLE (1 << 10)
+# define VR40_AUTO_RATIO_ENABLE (1 << 9)
+# define VR40_CLOCK_GATING_ENABLE (1 << 8)
+
+/*
+ * Panel Fitting Vertical Ratio
+ * (((image_height - 1) << 16) / ((panel_height - 1))) >> 2
+ */
+#define VR41 0x41
+
+/*
+ * Panel Fitting Horizontal Ratio
+ * (((image_width - 1) << 16) / ((panel_width - 1))) >> 2
+ */
+#define VR42 0x42
+
+/*
+ * Horizontal Image Size
+ */
+#define VR43 0x43
+
+/* VR80 GPIO 0
+ */
+#define VR80 0x80
+#define VR81 0x81
+#define VR82 0x82
+#define VR83 0x83
+#define VR84 0x84
+#define VR85 0x85
+#define VR86 0x86
+#define VR87 0x87
+
+/* VR88 GPIO 8
+ */
+#define VR88 0x88
+
+/* Graphics BIOS scratch 0
+ */
+#define VR8E 0x8E
+# define VR8E_PANEL_TYPE_MASK (0xf << 0)
+# define VR8E_PANEL_INTERFACE_CMOS (0 << 4)
+# define VR8E_PANEL_INTERFACE_LVDS (1 << 4)
+# define VR8E_FORCE_DEFAULT_PANEL (1 << 5)
+
+/* Graphics BIOS scratch 1
+ */
+#define VR8F 0x8F
+# define VR8F_VCH_PRESENT (1 << 0)
+# define VR8F_DISPLAY_CONN (1 << 1)
+# define VR8F_POWER_MASK (0x3c)
+# define VR8F_POWER_POS (2)
+
+
+struct ivch_priv {
+ bool quiet;
+
+ uint16_t width, height;
+};
+
+
+void ivch_dump_regs(struct intel_dvo_device *);
+bool ivch_read(struct intel_dvo_device *, int, uint16_t *);
+bool ivch_write(struct intel_dvo_device *, int, uint16_t);
+bool ivch_init(struct intel_dvo_device *, struct i2c_controller *);
+enum drm_connector_status ivch_detect(struct intel_dvo_device *);
+enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *,
+ struct drm_display_mode *);
+void ivch_dpms(struct intel_dvo_device *, bool);
+bool ivch_get_hw_state(struct intel_dvo_device *);
+void ivch_mode_set(struct intel_dvo_device *, struct drm_display_mode *,
+ struct drm_display_mode *);
+void ivch_destroy(struct intel_dvo_device *);
+
+/**
+ * Reads a register on the ivch.
+ *
+ * Each of the 256 registers are 16 bits long.
+ */
+bool
+ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
+{
+ struct ivch_priv *priv = dvo->dev_priv;
+ struct i2c_controller *adapter = dvo->i2c_bus;
+ u8 out_buf[1];
+ u8 in_buf[2];
+ int ret;
+ uint8_t cmd = 0;
+
+ out_buf[0] = addr;
+
+ iic_acquire_bus(adapter, 0);
+ ret = iic_exec(adapter, I2C_OP_READ_WITH_STOP, dvo->slave_addr,
+ &cmd, 1, NULL, 0, 0);
+ if (ret)
+ goto read_err;
+ ret = iic_exec(adapter, I2C_OP_WRITE, 0,
+ &cmd, 1, out_buf, 1, 0);
+ if (ret)
+ goto read_err;
+ ret = iic_exec(adapter, I2C_OP_READ, dvo->slave_addr,
+ &cmd, 1, in_buf, 2, 0);
+ if (ret)
+ goto read_err;
+ iic_release_bus(adapter, 0);
+
+ *data = (in_buf[1] << 8) | in_buf[0];
+ return true;
+
+read_err:
+ iic_release_bus(adapter, 0);
+ if (!priv->quiet) {
+ DRM_DEBUG_KMS("Unable to read register 0x%02x from "
+ "%02x.\n",
+ addr, dvo->slave_addr);
+ }
+ return false;
+}
+
+/** Writes a 16-bit register on the ivch */
+bool
+ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
+{
+ struct ivch_priv *priv = dvo->dev_priv;
+ struct i2c_controller *adapter = dvo->i2c_bus;
+ u8 out_buf[3];
+ int ret;
+ uint8_t cmd = 0;
+
+ out_buf[0] = addr;
+ out_buf[1] = data & 0xff;
+ out_buf[2] = data >> 8;
+
+ iic_acquire_bus(adapter, 0);
+ ret = iic_exec(adapter, I2C_OP_WRITE_WITH_STOP, dvo->slave_addr,
+ &cmd, 1, out_buf, 3, 0);
+ iic_release_bus(adapter, 0);
+ if (ret)
+ goto write_err;
+
+ return true;
+
+write_err:
+ if (!priv->quiet) {
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %d.\n",
+ addr, dvo->slave_addr);
+ }
+
+ return false;
+}
+
+/** Probes the given bus and slave address for an ivch */
+bool
+ivch_init(struct intel_dvo_device *dvo,
+ struct i2c_controller *adapter)
+{
+ struct ivch_priv *priv;
+ uint16_t temp;
+
+ priv = malloc(sizeof(struct ivch_priv), M_DRM, M_WAITOK | M_ZERO);
+ if (priv == NULL)
+ return false;
+
+ dvo->i2c_bus = adapter;
+ dvo->dev_priv = priv;
+ priv->quiet = true;
+
+ if (!ivch_read(dvo, VR00, &temp))
+ goto out;
+ priv->quiet = false;
+
+ /* Since the identification bits are probably zeroes, which doesn't seem
+ * very unique, check that the value in the base address field matches
+ * the address it's responding on.
+ */
+ if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->slave_addr) {
+ DRM_DEBUG_KMS("ivch detect failed due to address mismatch "
+ "(%d vs %d)\n",
+ (temp & VR00_BASE_ADDRESS_MASK), dvo->slave_addr);
+ goto out;
+ }
+
+ ivch_read(dvo, VR20, &priv->width);
+ ivch_read(dvo, VR21, &priv->height);
+
+ return true;
+
+out:
+ free(priv, M_DRM);
+ return false;
+}
+
+enum drm_connector_status
+ivch_detect(struct intel_dvo_device *dvo)
+{
+ return connector_status_connected;
+}
+
+enum drm_mode_status
+ivch_mode_valid(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode)
+{
+ if (mode->clock > 112000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+/** Sets the power state of the panel connected to the ivch */
+void
+ivch_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+ int i;
+ uint16_t vr01, vr30, backlight;
+
+ /* Set the new power state of the panel. */
+ if (!ivch_read(dvo, VR01, &vr01))
+ return;
+
+ if (enable)
+ backlight = 1;
+ else
+ backlight = 0;
+ ivch_write(dvo, VR80, backlight);
+
+ if (enable)
+ vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE;
+ else
+ vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE);
+
+ ivch_write(dvo, VR01, vr01);
+
+ /* Wait for the panel to make its state transition */
+ for (i = 0; i < 100; i++) {
+ if (!ivch_read(dvo, VR30, &vr30))
+ break;
+
+ if (((vr30 & VR30_PANEL_ON) != 0) == enable)
+ break;
+ delay(1000);
+ }
+ /* wait some more; vch may fail to resync sometimes without this */
+ delay(16 * 1000);
+}
+
+bool
+ivch_get_hw_state(struct intel_dvo_device *dvo)
+{
+ uint16_t vr01;
+
+ /* Set the new power state of the panel. */
+ if (!ivch_read(dvo, VR01, &vr01))
+ return false;
+
+ if (vr01 & VR01_LCD_ENABLE)
+ return true;
+ else
+ return false;
+}
+
+void
+ivch_mode_set(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ uint16_t vr40 = 0;
+ uint16_t vr01;
+
+ vr01 = 0;
+ vr40 = (VR40_STALL_ENABLE | VR40_VERTICAL_INTERP_ENABLE |
+ VR40_HORIZONTAL_INTERP_ENABLE);
+
+ if (mode->hdisplay != adjusted_mode->hdisplay ||
+ mode->vdisplay != adjusted_mode->vdisplay) {
+ uint16_t x_ratio, y_ratio;
+
+ vr01 |= VR01_PANEL_FIT_ENABLE;
+ vr40 |= VR40_CLOCK_GATING_ENABLE;
+ x_ratio = (((mode->hdisplay - 1) << 16) /
+ (adjusted_mode->hdisplay - 1)) >> 2;
+ y_ratio = (((mode->vdisplay - 1) << 16) /
+ (adjusted_mode->vdisplay - 1)) >> 2;
+ ivch_write(dvo, VR42, x_ratio);
+ ivch_write(dvo, VR41, y_ratio);
+ } else {
+ vr01 &= ~VR01_PANEL_FIT_ENABLE;
+ vr40 &= ~VR40_CLOCK_GATING_ENABLE;
+ }
+ vr40 &= ~VR40_AUTO_RATIO_ENABLE;
+
+ ivch_write(dvo, VR01, vr01);
+ ivch_write(dvo, VR40, vr40);
+
+ ivch_dump_regs(dvo);
+}
+
+void
+ivch_dump_regs(struct intel_dvo_device *dvo)
+{
+ uint16_t val;
+
+ ivch_read(dvo, VR00, &val);
+ DRM_LOG_KMS("VR00: 0x%04x\n", val);
+ ivch_read(dvo, VR01, &val);
+ DRM_LOG_KMS("VR01: 0x%04x\n", val);
+ ivch_read(dvo, VR30, &val);
+ DRM_LOG_KMS("VR30: 0x%04x\n", val);
+ ivch_read(dvo, VR40, &val);
+ DRM_LOG_KMS("VR40: 0x%04x\n", val);
+
+ /* GPIO registers */
+ ivch_read(dvo, VR80, &val);
+ DRM_LOG_KMS("VR80: 0x%04x\n", val);
+ ivch_read(dvo, VR81, &val);
+ DRM_LOG_KMS("VR81: 0x%04x\n", val);
+ ivch_read(dvo, VR82, &val);
+ DRM_LOG_KMS("VR82: 0x%04x\n", val);
+ ivch_read(dvo, VR83, &val);
+ DRM_LOG_KMS("VR83: 0x%04x\n", val);
+ ivch_read(dvo, VR84, &val);
+ DRM_LOG_KMS("VR84: 0x%04x\n", val);
+ ivch_read(dvo, VR85, &val);
+ DRM_LOG_KMS("VR85: 0x%04x\n", val);
+ ivch_read(dvo, VR86, &val);
+ DRM_LOG_KMS("VR86: 0x%04x\n", val);
+ ivch_read(dvo, VR87, &val);
+ DRM_LOG_KMS("VR87: 0x%04x\n", val);
+ ivch_read(dvo, VR88, &val);
+ DRM_LOG_KMS("VR88: 0x%04x\n", val);
+
+ /* Scratch register 0 - AIM Panel type */
+ ivch_read(dvo, VR8E, &val);
+ DRM_LOG_KMS("VR8E: 0x%04x\n", val);
+
+ /* Scratch register 1 - Status register */
+ ivch_read(dvo, VR8F, &val);
+ DRM_LOG_KMS("VR8F: 0x%04x\n", val);
+}
+
+void
+ivch_destroy(struct intel_dvo_device *dvo)
+{
+ struct ivch_priv *priv = dvo->dev_priv;
+
+ if (priv) {
+ free(priv, M_DRM);
+ dvo->dev_priv = NULL;
+ }
+}
+
+struct intel_dvo_dev_ops ivch_ops = {
+ .init = ivch_init,
+ .dpms = ivch_dpms,
+ .get_hw_state = ivch_get_hw_state,
+ .mode_valid = ivch_mode_valid,
+ .mode_set = ivch_mode_set,
+ .detect = ivch_detect,
+ .dump_regs = ivch_dump_regs,
+ .destroy = ivch_destroy,
+};
diff --git a/sys/dev/pci/drm/i915/dvo_ns2501.c b/sys/dev/pci/drm/i915/dvo_ns2501.c
new file mode 100644
index 00000000000..4cebeffc292
--- /dev/null
+++ b/sys/dev/pci/drm/i915/dvo_ns2501.c
@@ -0,0 +1,615 @@
+/* $OpenBSD: dvo_ns2501.c,v 1.1 2013/03/18 12:36:51 jsg Exp $ */
+/*
+ *
+ * Copyright (c) 2012 Gilles Dartiguelongue, Thomas Richter
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "dvo.h"
+#include "i915_reg.h"
+#include "i915_drv.h"
+
+#define NS2501_VID 0x1305
+#define NS2501_DID 0x6726
+
+#define NS2501_VID_LO 0x00
+#define NS2501_VID_HI 0x01
+#define NS2501_DID_LO 0x02
+#define NS2501_DID_HI 0x03
+#define NS2501_REV 0x04
+#define NS2501_RSVD 0x05
+#define NS2501_FREQ_LO 0x06
+#define NS2501_FREQ_HI 0x07
+
+#define NS2501_REG8 0x08
+#define NS2501_8_VEN (1<<5)
+#define NS2501_8_HEN (1<<4)
+#define NS2501_8_DSEL (1<<3)
+#define NS2501_8_BPAS (1<<2)
+#define NS2501_8_RSVD (1<<1)
+#define NS2501_8_PD (1<<0)
+
+#define NS2501_REG9 0x09
+#define NS2501_9_VLOW (1<<7)
+#define NS2501_9_MSEL_MASK (0x7<<4)
+#define NS2501_9_TSEL (1<<3)
+#define NS2501_9_RSEN (1<<2)
+#define NS2501_9_RSVD (1<<1)
+#define NS2501_9_MDI (1<<0)
+
+#define NS2501_REGC 0x0c
+
+struct ns2501_priv {
+ //I2CDevRec d;
+ bool quiet;
+ int reg_8_shadow;
+ int reg_8_set;
+ // Shadow registers for i915
+ int dvoc;
+ int pll_a;
+ int srcdim;
+ int fw_blc;
+};
+
+#define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr))
+
+void enable_dvo(struct intel_dvo_device *);
+void restore_dvo(struct intel_dvo_device *);
+bool ns2501_readb(struct intel_dvo_device *, int, uint8_t *);
+bool ns2501_writeb(struct intel_dvo_device *, int, uint8_t);
+enum drm_connector_status ns2501_detect(struct intel_dvo_device *);
+bool ns2501_init(struct intel_dvo_device *, struct i2c_controller *);
+enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *,
+ struct drm_display_mode *);
+void ns2501_mode_set(struct intel_dvo_device *, struct drm_display_mode *,
+ struct drm_display_mode *);
+bool ns2501_get_hw_state(struct intel_dvo_device *);
+void ns2501_dpms(struct intel_dvo_device *, bool);
+void ns2501_dump_regs(struct intel_dvo_device *);
+void ns2501_destroy(struct intel_dvo_device *);
+
+/*
+ * For reasons unclear to me, the ns2501 at least on the Fujitsu/Siemens
+ * laptops does not react on the i2c bus unless
+ * both the PLL is running and the display is configured in its native
+ * resolution.
+ * This function forces the DVO on, and stores the registers it touches.
+ * Afterwards, registers are restored to regular values.
+ *
+ * This is pretty much a hack, though it works.
+ * Without that, ns2501_readb and ns2501_writeb fail
+ * when switching the resolution.
+ */
+
+void
+enable_dvo(struct intel_dvo_device *dvo)
+{
+ struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+ struct i2c_controller *adapter = dvo->i2c_bus;
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ controller);
+ drm_i915_private_t *dev_priv = bus->gp.dev_priv;
+
+ DRM_DEBUG_KMS("%s: Trying to re-enable the DVO\n", __FUNCTION__);
+
+ ns->dvoc = I915_READ(DVO_C);
+ ns->pll_a = I915_READ(_DPLL_A);
+ ns->srcdim = I915_READ(DVOC_SRCDIM);
+ ns->fw_blc = I915_READ(FW_BLC);
+
+ I915_WRITE(DVOC, 0x10004084);
+ I915_WRITE(_DPLL_A, 0xd0820000);
+ I915_WRITE(DVOC_SRCDIM, 0x400300); // 1024x768
+ I915_WRITE(FW_BLC, 0x1080304);
+
+ I915_WRITE(DVOC, 0x90004084);
+}
+
+/*
+ * Restore the I915 registers modified by the above
+ * trigger function.
+ */
+void
+restore_dvo(struct intel_dvo_device *dvo)
+{
+ struct i2c_controller *adapter = dvo->i2c_bus;
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ controller);
+ drm_i915_private_t *dev_priv = bus->gp.dev_priv;
+ struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+
+ I915_WRITE(DVOC, ns->dvoc);
+ I915_WRITE(_DPLL_A, ns->pll_a);
+ I915_WRITE(DVOC_SRCDIM, ns->srcdim);
+ I915_WRITE(FW_BLC, ns->fw_blc);
+}
+
+/*
+** Read a register from the ns2501.
+** Returns true if successful, false otherwise.
+** If it returns false, it might be wise to enable the
+** DVO with the above function.
+*/
+bool
+ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
+{
+ struct ns2501_priv *ns = dvo->dev_priv;
+ struct i2c_controller *adapter = dvo->i2c_bus;
+ u8 out_buf[2];
+ u8 in_buf[2];
+ int ret;
+ uint8_t cmd = 0;
+
+ out_buf[0] = addr;
+ out_buf[1] = 0;
+
+ iic_acquire_bus(adapter, 0);
+ ret = iic_exec(adapter, I2C_OP_WRITE_WITH_STOP, dvo->slave_addr,
+ &cmd, 1, out_buf, 1, 0);
+ if (ret)
+ goto read_err;
+ ret = iic_exec(adapter, I2C_OP_READ_WITH_STOP, dvo->slave_addr,
+ &cmd, 1, in_buf, 1, 0);
+ if (ret)
+ goto read_err;
+ iic_release_bus(adapter, 0);
+
+ *ch = in_buf[0];
+ return true;
+
+read_err:
+ iic_release_bus(adapter, 0);
+ if (!ns->quiet) {
+ DRM_DEBUG_KMS
+ ("Unable to read register 0x%02x from 0x%02x.\n", addr,
+ dvo->slave_addr);
+ }
+
+ return false;
+}
+
+/*
+** Write a register to the ns2501.
+** Returns true if successful, false otherwise.
+** If it returns false, it might be wise to enable the
+** DVO with the above function.
+*/
+bool
+ns2501_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+{
+ struct ns2501_priv *ns = dvo->dev_priv;
+ struct i2c_controller *adapter = dvo->i2c_bus;
+ uint8_t out_buf[2];
+ int ret;
+ uint8_t cmd = 0;
+
+ out_buf[0] = addr;
+ out_buf[1] = ch;
+
+ iic_acquire_bus(adapter, 0);
+ ret = iic_exec(adapter, I2C_OP_WRITE_WITH_STOP, dvo->slave_addr,
+ &cmd, 1, out_buf, 2, 0);
+ iic_release_bus(adapter, 0);
+ if (ret)
+ goto write_err;
+
+ return true;
+
+write_err:
+ if (!ns->quiet) {
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %d\n",
+ addr, dvo->slave_addr);
+ }
+
+ return false;
+}
+
+/* National Semiconductor 2501 driver for chip on i2c bus
+ * scan for the chip on the bus.
+ * Hope the VBIOS initialized the PLL correctly so we can
+ * talk to it. If not, it will not be seen and not detected.
+ * Bummer!
+ */
+bool
+ns2501_init(struct intel_dvo_device *dvo,
+ struct i2c_controller *adapter)
+{
+ /* this will detect the NS2501 chip on the specified i2c bus */
+ struct ns2501_priv *ns;
+ unsigned char ch;
+
+ ns = malloc(sizeof(struct ns2501_priv), M_DRM, M_WAITOK | M_ZERO);
+ if (ns == NULL)
+ return false;
+
+ dvo->i2c_bus = adapter;
+ dvo->dev_priv = ns;
+ ns->quiet = true;
+
+ if (!ns2501_readb(dvo, NS2501_VID_LO, &ch))
+ goto out;
+
+ if (ch != (NS2501_VID & 0xff)) {
+ DRM_DEBUG_KMS("ns2501 not detected got %d: from Slave %d.\n",
+ ch, dvo->slave_addr);
+ goto out;
+ }
+
+ if (!ns2501_readb(dvo, NS2501_DID_LO, &ch))
+ goto out;
+
+ if (ch != (NS2501_DID & 0xff)) {
+ DRM_DEBUG_KMS("ns2501 not detected got %d: from Slave %d.\n",
+ ch, dvo->slave_addr);
+ goto out;
+ }
+ ns->quiet = false;
+ ns->reg_8_set = 0;
+ ns->reg_8_shadow =
+ NS2501_8_PD | NS2501_8_BPAS | NS2501_8_VEN | NS2501_8_HEN;
+
+ DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n");
+ return true;
+
+out:
+ free(ns, M_DRM);
+ return false;
+}
+
+enum drm_connector_status
+ns2501_detect(struct intel_dvo_device *dvo)
+{
+ /*
+ * This is a Laptop display, it doesn't have hotplugging.
+ * Even if not, the detection bit of the 2501 is unreliable as
+ * it only works for some display types.
+ * It is even more unreliable as the PLL must be active for
+ * allowing reading from the chiop.
+ */
+ return connector_status_connected;
+}
+
+enum drm_mode_status
+ns2501_mode_valid(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode)
+{
+ DRM_DEBUG_KMS
+ ("%s: is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n",
+ __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
+ mode->vtotal);
+
+ /*
+ * Currently, these are all the modes I have data from.
+ * More might exist. Unclear how to find the native resolution
+ * of the panel in here so we could always accept it
+ * by disabling the scaler.
+ */
+ if ((mode->hdisplay == 800 && mode->vdisplay == 600) ||
+ (mode->hdisplay == 640 && mode->vdisplay == 480) ||
+ (mode->hdisplay == 1024 && mode->vdisplay == 768)) {
+ return MODE_OK;
+ } else {
+ return MODE_ONE_SIZE; /* Is this a reasonable error? */
+ }
+}
+
+void
+ns2501_mode_set(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ bool ok;
+ bool restore = false;
+ struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+
+ DRM_DEBUG_KMS
+ ("%s: set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
+ __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
+ mode->vtotal);
+
+ /*
+ * Where do I find the native resolution for which scaling is not required???
+ *
+ * First trigger the DVO on as otherwise the chip does not appear on the i2c
+ * bus.
+ */
+ do {
+ ok = true;
+
+ if (mode->hdisplay == 800 && mode->vdisplay == 600) {
+ /* mode 277 */
+ ns->reg_8_shadow &= ~NS2501_8_BPAS;
+ DRM_DEBUG_KMS("%s: switching to 800x600\n",
+ __FUNCTION__);
+
+ /*
+ * No, I do not know where this data comes from.
+ * It is just what the video bios left in the DVO, so
+ * I'm just copying it here over.
+ * This also means that I cannot support any other modes
+ * except the ones supported by the bios.
+ */
+ ok &= ns2501_writeb(dvo, 0x11, 0xc8); // 0xc7 also works.
+ ok &= ns2501_writeb(dvo, 0x1b, 0x19);
+ ok &= ns2501_writeb(dvo, 0x1c, 0x62); // VBIOS left 0x64 here, but 0x62 works nicer
+ ok &= ns2501_writeb(dvo, 0x1d, 0x02);
+
+ ok &= ns2501_writeb(dvo, 0x34, 0x03);
+ ok &= ns2501_writeb(dvo, 0x35, 0xff);
+
+ ok &= ns2501_writeb(dvo, 0x80, 0x27);
+ ok &= ns2501_writeb(dvo, 0x81, 0x03);
+ ok &= ns2501_writeb(dvo, 0x82, 0x41);
+ ok &= ns2501_writeb(dvo, 0x83, 0x05);
+
+ ok &= ns2501_writeb(dvo, 0x8d, 0x02);
+ ok &= ns2501_writeb(dvo, 0x8e, 0x04);
+ ok &= ns2501_writeb(dvo, 0x8f, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x90, 0xfe); /* vertical. VBIOS left 0xff here, but 0xfe works better */
+ ok &= ns2501_writeb(dvo, 0x91, 0x07);
+ ok &= ns2501_writeb(dvo, 0x94, 0x00);
+ ok &= ns2501_writeb(dvo, 0x95, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x96, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x99, 0x00);
+ ok &= ns2501_writeb(dvo, 0x9a, 0x88);
+
+ ok &= ns2501_writeb(dvo, 0x9c, 0x23); /* Looks like first and last line of the image. */
+ ok &= ns2501_writeb(dvo, 0x9d, 0x00);
+ ok &= ns2501_writeb(dvo, 0x9e, 0x25);
+ ok &= ns2501_writeb(dvo, 0x9f, 0x03);
+
+ ok &= ns2501_writeb(dvo, 0xa4, 0x80);
+
+ ok &= ns2501_writeb(dvo, 0xb6, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0xb9, 0xc8); /* horizontal? */
+ ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */
+
+ ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */
+ ok &= ns2501_writeb(dvo, 0xc1, 0xd7);
+
+ ok &= ns2501_writeb(dvo, 0xc2, 0x00);
+ ok &= ns2501_writeb(dvo, 0xc3, 0xf8);
+
+ ok &= ns2501_writeb(dvo, 0xc4, 0x03);
+ ok &= ns2501_writeb(dvo, 0xc5, 0x1a);
+
+ ok &= ns2501_writeb(dvo, 0xc6, 0x00);
+ ok &= ns2501_writeb(dvo, 0xc7, 0x73);
+ ok &= ns2501_writeb(dvo, 0xc8, 0x02);
+
+ } else if (mode->hdisplay == 640 && mode->vdisplay == 480) {
+ /* mode 274 */
+ DRM_DEBUG_KMS("%s: switching to 640x480\n",
+ __FUNCTION__);
+ /*
+ * No, I do not know where this data comes from.
+ * It is just what the video bios left in the DVO, so
+ * I'm just copying it here over.
+ * This also means that I cannot support any other modes
+ * except the ones supported by the bios.
+ */
+ ns->reg_8_shadow &= ~NS2501_8_BPAS;
+
+ ok &= ns2501_writeb(dvo, 0x11, 0xa0);
+ ok &= ns2501_writeb(dvo, 0x1b, 0x11);
+ ok &= ns2501_writeb(dvo, 0x1c, 0x54);
+ ok &= ns2501_writeb(dvo, 0x1d, 0x03);
+
+ ok &= ns2501_writeb(dvo, 0x34, 0x03);
+ ok &= ns2501_writeb(dvo, 0x35, 0xff);
+
+ ok &= ns2501_writeb(dvo, 0x80, 0xff);
+ ok &= ns2501_writeb(dvo, 0x81, 0x07);
+ ok &= ns2501_writeb(dvo, 0x82, 0x3d);
+ ok &= ns2501_writeb(dvo, 0x83, 0x05);
+
+ ok &= ns2501_writeb(dvo, 0x8d, 0x02);
+ ok &= ns2501_writeb(dvo, 0x8e, 0x10);
+ ok &= ns2501_writeb(dvo, 0x8f, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x90, 0xff); /* vertical */
+ ok &= ns2501_writeb(dvo, 0x91, 0x07);
+ ok &= ns2501_writeb(dvo, 0x94, 0x00);
+ ok &= ns2501_writeb(dvo, 0x95, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x96, 0x05);
+
+ ok &= ns2501_writeb(dvo, 0x99, 0x00);
+ ok &= ns2501_writeb(dvo, 0x9a, 0x88);
+
+ ok &= ns2501_writeb(dvo, 0x9c, 0x24);
+ ok &= ns2501_writeb(dvo, 0x9d, 0x00);
+ ok &= ns2501_writeb(dvo, 0x9e, 0x25);
+ ok &= ns2501_writeb(dvo, 0x9f, 0x03);
+
+ ok &= ns2501_writeb(dvo, 0xa4, 0x84);
+
+ ok &= ns2501_writeb(dvo, 0xb6, 0x09);
+
+ ok &= ns2501_writeb(dvo, 0xb9, 0xa0); /* horizontal? */
+ ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */
+
+ ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */
+ ok &= ns2501_writeb(dvo, 0xc1, 0x90);
+
+ ok &= ns2501_writeb(dvo, 0xc2, 0x00);
+ ok &= ns2501_writeb(dvo, 0xc3, 0x0f);
+
+ ok &= ns2501_writeb(dvo, 0xc4, 0x03);
+ ok &= ns2501_writeb(dvo, 0xc5, 0x16);
+
+ ok &= ns2501_writeb(dvo, 0xc6, 0x00);
+ ok &= ns2501_writeb(dvo, 0xc7, 0x02);
+ ok &= ns2501_writeb(dvo, 0xc8, 0x02);
+
+ } else if (mode->hdisplay == 1024 && mode->vdisplay == 768) {
+ /* mode 280 */
+ DRM_DEBUG_KMS("%s: switching to 1024x768\n",
+ __FUNCTION__);
+ /*
+ * This might or might not work, actually. I'm silently
+ * assuming here that the native panel resolution is
+ * 1024x768. If not, then this leaves the scaler disabled
+ * generating a picture that is likely not the expected.
+ *
+ * Problem is that I do not know where to take the panel
+ * dimensions from.
+ *
+ * Enable the bypass, scaling not required.
+ *
+ * The scaler registers are irrelevant here....
+ *
+ */
+ ns->reg_8_shadow |= NS2501_8_BPAS;
+ ok &= ns2501_writeb(dvo, 0x37, 0x44);
+ } else {
+ /*
+ * Data not known. Bummer!
+ * Hopefully, the code should not go here
+ * as mode_OK delivered no other modes.
+ */
+ ns->reg_8_shadow |= NS2501_8_BPAS;
+ }
+ ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow);
+
+ if (!ok) {
+ if (restore)
+ restore_dvo(dvo);
+ enable_dvo(dvo);
+ restore = true;
+ }
+ } while (!ok);
+ /*
+ * Restore the old i915 registers before
+ * forcing the ns2501 on.
+ */
+ if (restore)
+ restore_dvo(dvo);
+}
+
+/* set the NS2501 power state */
+bool
+ns2501_get_hw_state(struct intel_dvo_device *dvo)
+{
+ unsigned char ch;
+
+ if (!ns2501_readb(dvo, NS2501_REG8, &ch))
+ return false;
+
+ if (ch & NS2501_8_PD)
+ return true;
+ else
+ return false;
+}
+
+/* set the NS2501 power state */
+void
+ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+ bool ok;
+ bool restore = false;
+ struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+ unsigned char ch;
+
+ DRM_DEBUG_KMS("%s: Trying set the dpms of the DVO to %i\n",
+ __FUNCTION__, enable);
+
+ ch = ns->reg_8_shadow;
+
+ if (enable)
+ ch |= NS2501_8_PD;
+ else
+ ch &= ~NS2501_8_PD;
+
+ if (ns->reg_8_set == 0 || ns->reg_8_shadow != ch) {
+ ns->reg_8_set = 1;
+ ns->reg_8_shadow = ch;
+
+ do {
+ ok = true;
+ ok &= ns2501_writeb(dvo, NS2501_REG8, ch);
+ ok &=
+ ns2501_writeb(dvo, 0x34,
+ enable ? 0x03 : 0x00);
+ ok &=
+ ns2501_writeb(dvo, 0x35,
+ enable ? 0xff : 0x00);
+ if (!ok) {
+ if (restore)
+ restore_dvo(dvo);
+ enable_dvo(dvo);
+ restore = true;
+ }
+ } while (!ok);
+
+ if (restore)
+ restore_dvo(dvo);
+ }
+}
+
+void
+ns2501_dump_regs(struct intel_dvo_device *dvo)
+{
+ uint8_t val;
+
+ ns2501_readb(dvo, NS2501_FREQ_LO, &val);
+ DRM_LOG_KMS("NS2501_FREQ_LO: 0x%02x\n", val);
+ ns2501_readb(dvo, NS2501_FREQ_HI, &val);
+ DRM_LOG_KMS("NS2501_FREQ_HI: 0x%02x\n", val);
+ ns2501_readb(dvo, NS2501_REG8, &val);
+ DRM_LOG_KMS("NS2501_REG8: 0x%02x\n", val);
+ ns2501_readb(dvo, NS2501_REG9, &val);
+ DRM_LOG_KMS("NS2501_REG9: 0x%02x\n", val);
+ ns2501_readb(dvo, NS2501_REGC, &val);
+ DRM_LOG_KMS("NS2501_REGC: 0x%02x\n", val);
+}
+
+void
+ns2501_destroy(struct intel_dvo_device *dvo)
+{
+ struct ns2501_priv *ns = dvo->dev_priv;
+
+ if (ns) {
+ free(ns, M_DRM);
+ dvo->dev_priv = NULL;
+ }
+}
+
+struct intel_dvo_dev_ops ns2501_ops = {
+ .init = ns2501_init,
+ .detect = ns2501_detect,
+ .mode_valid = ns2501_mode_valid,
+ .mode_set = ns2501_mode_set,
+ .dpms = ns2501_dpms,
+ .get_hw_state = ns2501_get_hw_state,
+ .dump_regs = ns2501_dump_regs,
+ .destroy = ns2501_destroy,
+};
diff --git a/sys/dev/pci/drm/i915/dvo_sil164.c b/sys/dev/pci/drm/i915/dvo_sil164.c
new file mode 100644
index 00000000000..2b57cd0ea2e
--- /dev/null
+++ b/sys/dev/pci/drm/i915/dvo_sil164.c
@@ -0,0 +1,304 @@
+/* $OpenBSD: dvo_sil164.c,v 1.1 2013/03/18 12:36:51 jsg Exp $ */
+/**************************************************************************
+
+Copyright © 2006 Dave Airlie
+
+All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sub license, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+#include "dvo.h"
+
+#define SIL164_VID 0x0001
+#define SIL164_DID 0x0006
+
+#define SIL164_VID_LO 0x00
+#define SIL164_VID_HI 0x01
+#define SIL164_DID_LO 0x02
+#define SIL164_DID_HI 0x03
+#define SIL164_REV 0x04
+#define SIL164_RSVD 0x05
+#define SIL164_FREQ_LO 0x06
+#define SIL164_FREQ_HI 0x07
+
+#define SIL164_REG8 0x08
+#define SIL164_8_VEN (1<<5)
+#define SIL164_8_HEN (1<<4)
+#define SIL164_8_DSEL (1<<3)
+#define SIL164_8_BSEL (1<<2)
+#define SIL164_8_EDGE (1<<1)
+#define SIL164_8_PD (1<<0)
+
+#define SIL164_REG9 0x09
+#define SIL164_9_VLOW (1<<7)
+#define SIL164_9_MSEL_MASK (0x7<<4)
+#define SIL164_9_TSEL (1<<3)
+#define SIL164_9_RSEN (1<<2)
+#define SIL164_9_HTPLG (1<<1)
+#define SIL164_9_MDI (1<<0)
+
+#define SIL164_REGC 0x0c
+
+struct sil164_priv {
+ //I2CDevRec d;
+ bool quiet;
+};
+
+#define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr))
+
+bool sil164_readb(struct intel_dvo_device *, int, uint8_t *);
+bool sil164_writeb(struct intel_dvo_device *, int, uint8_t);
+bool sil164_init(struct intel_dvo_device *, struct i2c_controller *);
+enum drm_connector_status sil164_detect(struct intel_dvo_device *);
+enum drm_mode_status sil164_mode_valid(struct intel_dvo_device *,
+ struct drm_display_mode *);
+void sil164_mode_set(struct intel_dvo_device *, struct drm_display_mode *,
+ struct drm_display_mode *);
+void sil164_dpms(struct intel_dvo_device *, bool);
+bool sil164_get_hw_state(struct intel_dvo_device *);
+void sil164_dump_regs(struct intel_dvo_device *);
+void sil164_destroy(struct intel_dvo_device *);
+
+bool
+sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+{
+ struct sil164_priv *sil = dvo->dev_priv;
+ struct i2c_controller *adapter = dvo->i2c_bus;
+ u8 out_buf[2];
+ u8 in_buf[2];
+ int ret;
+ uint8_t cmd = 0;
+
+ out_buf[0] = addr;
+ out_buf[1] = 0;
+
+ iic_acquire_bus(adapter, 0);
+ ret = iic_exec(adapter, I2C_OP_WRITE_WITH_STOP, dvo->slave_addr,
+ &cmd, 1, out_buf, 1, 0);
+ if (ret)
+ goto read_err;
+ ret = iic_exec(adapter, I2C_OP_READ_WITH_STOP, dvo->slave_addr,
+ &cmd, 1, in_buf, 1, 0);
+ if (ret)
+ goto read_err;
+ iic_release_bus(adapter, 0);
+
+ *ch = in_buf[0];
+ return true;
+
+read_err:
+ iic_release_bus(adapter, 0);
+ if (!sil->quiet) {
+ DRM_DEBUG_KMS("Unable to read register 0x%02x from %02x.\n",
+ addr, dvo->slave_addr);
+ }
+ return false;
+}
+
+bool
+sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+{
+ struct sil164_priv *sil = dvo->dev_priv;
+ struct i2c_controller *adapter = dvo->i2c_bus;
+ uint8_t out_buf[2];
+ int ret;
+ uint8_t cmd = 0;
+
+ out_buf[0] = addr;
+ out_buf[1] = ch;
+
+ iic_acquire_bus(adapter, 0);
+ ret = iic_exec(adapter, I2C_OP_READ_WITH_STOP, dvo->slave_addr,
+ &cmd, 1, out_buf, 2, 0);
+ iic_release_bus(adapter, 0);
+ if (ret)
+ goto write_err;
+
+ return true;
+
+write_err:
+ if (!sil->quiet) {
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %d.\n",
+ addr, dvo->slave_addr);
+ }
+
+ return false;
+}
+
+/* Silicon Image 164 driver for chip on i2c bus */
+bool
+sil164_init(struct intel_dvo_device *dvo,
+ struct i2c_controller *adapter)
+{
+ /* this will detect the SIL164 chip on the specified i2c bus */
+ struct sil164_priv *sil;
+ unsigned char ch;
+
+ sil = malloc(sizeof(struct sil164_priv), M_DRM, M_WAITOK | M_ZERO);
+ if (sil == NULL)
+ return false;
+
+ dvo->i2c_bus = adapter;
+ dvo->dev_priv = sil;
+ sil->quiet = true;
+
+ if (!sil164_readb(dvo, SIL164_VID_LO, &ch))
+ goto out;
+
+ if (ch != (SIL164_VID & 0xff)) {
+ DRM_DEBUG_KMS("sil164 not detected got %d: from Slave %d.\n",
+ ch, dvo->slave_addr);
+ goto out;
+ }
+
+ if (!sil164_readb(dvo, SIL164_DID_LO, &ch))
+ goto out;
+
+ if (ch != (SIL164_DID & 0xff)) {
+ DRM_DEBUG_KMS("sil164 not detected got %d: from Slave %d.\n",
+ ch, dvo->slave_addr);
+ goto out;
+ }
+ sil->quiet = false;
+
+ DRM_DEBUG_KMS("init sil164 dvo controller successfully!\n");
+ return true;
+
+out:
+ free(sil, M_DRM);
+ return false;
+}
+
+enum drm_connector_status
+sil164_detect(struct intel_dvo_device *dvo)
+{
+ uint8_t reg9;
+
+ sil164_readb(dvo, SIL164_REG9, &reg9);
+
+ if (reg9 & SIL164_9_HTPLG)
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
+}
+
+enum drm_mode_status
+sil164_mode_valid(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+void
+sil164_mode_set(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* As long as the basics are set up, since we don't have clock
+ * dependencies in the mode setup, we can just leave the
+ * registers alone and everything will work fine.
+ */
+ /* recommended programming sequence from doc */
+ /*sil164_writeb(sil, 0x08, 0x30);
+ sil164_writeb(sil, 0x09, 0x00);
+ sil164_writeb(sil, 0x0a, 0x90);
+ sil164_writeb(sil, 0x0c, 0x89);
+ sil164_writeb(sil, 0x08, 0x31);*/
+ /* don't do much */
+ return;
+}
+
+/* set the SIL164 power state */
+void
+sil164_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+ int ret;
+ unsigned char ch;
+
+ ret = sil164_readb(dvo, SIL164_REG8, &ch);
+ if (ret == false)
+ return;
+
+ if (enable)
+ ch |= SIL164_8_PD;
+ else
+ ch &= ~SIL164_8_PD;
+
+ sil164_writeb(dvo, SIL164_REG8, ch);
+ return;
+}
+
+bool
+sil164_get_hw_state(struct intel_dvo_device *dvo)
+{
+ int ret;
+ unsigned char ch;
+
+ ret = sil164_readb(dvo, SIL164_REG8, &ch);
+ if (ret == false)
+ return false;
+
+ if (ch & SIL164_8_PD)
+ return true;
+ else
+ return false;
+}
+
+void
+sil164_dump_regs(struct intel_dvo_device *dvo)
+{
+ uint8_t val;
+
+ sil164_readb(dvo, SIL164_FREQ_LO, &val);
+ DRM_LOG_KMS("SIL164_FREQ_LO: 0x%02x\n", val);
+ sil164_readb(dvo, SIL164_FREQ_HI, &val);
+ DRM_LOG_KMS("SIL164_FREQ_HI: 0x%02x\n", val);
+ sil164_readb(dvo, SIL164_REG8, &val);
+ DRM_LOG_KMS("SIL164_REG8: 0x%02x\n", val);
+ sil164_readb(dvo, SIL164_REG9, &val);
+ DRM_LOG_KMS("SIL164_REG9: 0x%02x\n", val);
+ sil164_readb(dvo, SIL164_REGC, &val);
+ DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val);
+}
+
+void
+sil164_destroy(struct intel_dvo_device *dvo)
+{
+ struct sil164_priv *sil = dvo->dev_priv;
+
+ if (sil) {
+ free(sil, M_DRM);
+ dvo->dev_priv = NULL;
+ }
+}
+
+struct intel_dvo_dev_ops sil164_ops = {
+ .init = sil164_init,
+ .detect = sil164_detect,
+ .mode_valid = sil164_mode_valid,
+ .mode_set = sil164_mode_set,
+ .dpms = sil164_dpms,
+ .get_hw_state = sil164_get_hw_state,
+ .dump_regs = sil164_dump_regs,
+ .destroy = sil164_destroy,
+};
diff --git a/sys/dev/pci/drm/i915/dvo_tfp410.c b/sys/dev/pci/drm/i915/dvo_tfp410.c
new file mode 100644
index 00000000000..4e73b928743
--- /dev/null
+++ b/sys/dev/pci/drm/i915/dvo_tfp410.c
@@ -0,0 +1,345 @@
+/* $OpenBSD: dvo_tfp410.c,v 1.1 2013/03/18 12:36:51 jsg Exp $ */
+/*
+ * Copyright © 2007 Dave Mueller
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dave Mueller <dave.mueller@gmx.ch>
+ *
+ */
+
+#include "dvo.h"
+
+/* register definitions according to the TFP410 data sheet */
+#define TFP410_VID 0x014C
+#define TFP410_DID 0x0410
+
+#define TFP410_VID_LO 0x00
+#define TFP410_VID_HI 0x01
+#define TFP410_DID_LO 0x02
+#define TFP410_DID_HI 0x03
+#define TFP410_REV 0x04
+
+#define TFP410_CTL_1 0x08
+#define TFP410_CTL_1_TDIS (1<<6)
+#define TFP410_CTL_1_VEN (1<<5)
+#define TFP410_CTL_1_HEN (1<<4)
+#define TFP410_CTL_1_DSEL (1<<3)
+#define TFP410_CTL_1_BSEL (1<<2)
+#define TFP410_CTL_1_EDGE (1<<1)
+#define TFP410_CTL_1_PD (1<<0)
+
+#define TFP410_CTL_2 0x09
+#define TFP410_CTL_2_VLOW (1<<7)
+#define TFP410_CTL_2_MSEL_MASK (0x7<<4)
+#define TFP410_CTL_2_MSEL (1<<4)
+#define TFP410_CTL_2_TSEL (1<<3)
+#define TFP410_CTL_2_RSEN (1<<2)
+#define TFP410_CTL_2_HTPLG (1<<1)
+#define TFP410_CTL_2_MDI (1<<0)
+
+#define TFP410_CTL_3 0x0A
+#define TFP410_CTL_3_DK_MASK (0x7<<5)
+#define TFP410_CTL_3_DK (1<<5)
+#define TFP410_CTL_3_DKEN (1<<4)
+#define TFP410_CTL_3_CTL_MASK (0x7<<1)
+#define TFP410_CTL_3_CTL (1<<1)
+
+#define TFP410_USERCFG 0x0B
+
+#define TFP410_DE_DLY 0x32
+
+#define TFP410_DE_CTL 0x33
+#define TFP410_DE_CTL_DEGEN (1<<6)
+#define TFP410_DE_CTL_VSPOL (1<<5)
+#define TFP410_DE_CTL_HSPOL (1<<4)
+#define TFP410_DE_CTL_DEDLY8 (1<<0)
+
+#define TFP410_DE_TOP 0x34
+
+#define TFP410_DE_CNT_LO 0x36
+#define TFP410_DE_CNT_HI 0x37
+
+#define TFP410_DE_LIN_LO 0x38
+#define TFP410_DE_LIN_HI 0x39
+
+#define TFP410_H_RES_LO 0x3A
+#define TFP410_H_RES_HI 0x3B
+
+#define TFP410_V_RES_LO 0x3C
+#define TFP410_V_RES_HI 0x3D
+
+struct tfp410_priv {
+ bool quiet;
+};
+
+bool tfp410_readb(struct intel_dvo_device *, int, uint8_t *);
+bool tfp410_writeb(struct intel_dvo_device *, int, uint8_t);
+int tfp410_getid(struct intel_dvo_device *, int);
+bool tfp410_init(struct intel_dvo_device *, struct i2c_controller *);
+enum drm_connector_status tfp410_detect(struct intel_dvo_device *);
+enum drm_mode_status tfp410_mode_valid(struct intel_dvo_device *,
+ struct drm_display_mode *);
+void tfp410_mode_set(struct intel_dvo_device *, struct drm_display_mode *,
+ struct drm_display_mode *);
+void tfp410_dpms(struct intel_dvo_device *, bool);
+bool tfp410_get_hw_state(struct intel_dvo_device *);
+void tfp410_dump_regs(struct intel_dvo_device *);
+void tfp410_destroy(struct intel_dvo_device *);
+
+bool
+tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+{
+ struct tfp410_priv *tfp = dvo->dev_priv;
+ struct i2c_controller *adapter = dvo->i2c_bus;
+ u8 out_buf[2];
+ u8 in_buf[2];
+ int ret;
+ uint8_t cmd = 0;
+
+ out_buf[0] = addr;
+ out_buf[1] = 0;
+
+ iic_acquire_bus(adapter, 0);
+ ret = iic_exec(adapter, I2C_OP_WRITE_WITH_STOP, dvo->slave_addr,
+ &cmd, 1, out_buf, 1, 0);
+ if (ret)
+ goto read_err;
+ ret = iic_exec(adapter, I2C_OP_READ_WITH_STOP, dvo->slave_addr,
+ &cmd, 1, in_buf, 1, 0);
+ if (ret)
+ goto read_err;
+ iic_release_bus(adapter, 0);
+
+ *ch = in_buf[0];
+ return true;
+
+read_err:
+ iic_release_bus(adapter, 0);
+ if (!tfp->quiet) {
+ DRM_DEBUG_KMS("Unable to read register 0x%02x from %02x.\n",
+ addr, dvo->slave_addr);
+ }
+ return false;
+}
+
+bool
+tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+{
+ struct tfp410_priv *tfp = dvo->dev_priv;
+ struct i2c_controller *adapter = dvo->i2c_bus;
+ uint8_t out_buf[2];
+ int ret;
+ uint8_t cmd = 0;
+
+ out_buf[0] = addr;
+ out_buf[1] = ch;
+
+ iic_acquire_bus(adapter, 0);
+ ret = iic_exec(adapter, I2C_OP_WRITE_WITH_STOP, dvo->slave_addr,
+ &cmd, 1, out_buf, 2, 0);
+ iic_release_bus(adapter, 0);
+ if (ret)
+ goto write_err;
+
+ return true;
+
+write_err:
+ if (!tfp->quiet) {
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %d.\n",
+ addr, dvo->slave_addr);
+ }
+
+ return false;
+}
+
+int
+tfp410_getid(struct intel_dvo_device *dvo, int addr)
+{
+ uint8_t ch1, ch2;
+
+ if (tfp410_readb(dvo, addr+0, &ch1) &&
+ tfp410_readb(dvo, addr+1, &ch2))
+ return ((ch2 << 8) & 0xFF00) | (ch1 & 0x00FF);
+
+ return -1;
+}
+
+/* Ti TFP410 driver for chip on i2c bus */
+bool
+tfp410_init(struct intel_dvo_device *dvo,
+ struct i2c_controller *adapter)
+{
+ /* this will detect the tfp410 chip on the specified i2c bus */
+ struct tfp410_priv *tfp;
+ int id;
+
+ tfp = malloc(sizeof(struct tfp410_priv), M_DRM, M_WAITOK | M_ZERO);
+ if (tfp == NULL)
+ return false;
+
+ dvo->i2c_bus = adapter;
+ dvo->dev_priv = tfp;
+ tfp->quiet = true;
+
+ if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) {
+ DRM_DEBUG_KMS("tfp410 not detected got VID %X: from "
+ "Slave %d.\n",
+ id, dvo->slave_addr);
+ goto out;
+ }
+
+ if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) {
+ DRM_DEBUG_KMS("tfp410 not detected got DID %X: from "
+ "Slave %d.\n",
+ id, dvo->slave_addr);
+ goto out;
+ }
+ tfp->quiet = false;
+ return true;
+out:
+ free(tfp, M_DRM);
+ return false;
+}
+
+enum drm_connector_status
+tfp410_detect(struct intel_dvo_device *dvo)
+{
+ enum drm_connector_status ret = connector_status_disconnected;
+ uint8_t ctl2;
+
+ if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) {
+ if (ctl2 & TFP410_CTL_2_RSEN)
+ ret = connector_status_connected;
+ else
+ ret = connector_status_disconnected;
+ }
+
+ return ret;
+}
+
+enum drm_mode_status
+tfp410_mode_valid(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+void
+tfp410_mode_set(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* As long as the basics are set up, since we don't have clock dependencies
+ * in the mode setup, we can just leave the registers alone and everything
+ * will work fine.
+ */
+ /* don't do much */
+ return;
+}
+
+/* set the tfp410 power state */
+void
+tfp410_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+ uint8_t ctl1;
+
+ if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
+ return;
+
+ if (enable)
+ ctl1 |= TFP410_CTL_1_PD;
+ else
+ ctl1 &= ~TFP410_CTL_1_PD;
+
+ tfp410_writeb(dvo, TFP410_CTL_1, ctl1);
+}
+
+bool
+tfp410_get_hw_state(struct intel_dvo_device *dvo)
+{
+ uint8_t ctl1;
+
+ if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
+ return false;
+
+ if (ctl1 & TFP410_CTL_1_PD)
+ return true;
+ else
+ return false;
+}
+
+void
+tfp410_dump_regs(struct intel_dvo_device *dvo)
+{
+ uint8_t val, val2;
+
+ tfp410_readb(dvo, TFP410_REV, &val);
+ DRM_LOG_KMS("TFP410_REV: 0x%02X\n", val);
+ tfp410_readb(dvo, TFP410_CTL_1, &val);
+ DRM_LOG_KMS("TFP410_CTL1: 0x%02X\n", val);
+ tfp410_readb(dvo, TFP410_CTL_2, &val);
+ DRM_LOG_KMS("TFP410_CTL2: 0x%02X\n", val);
+ tfp410_readb(dvo, TFP410_CTL_3, &val);
+ DRM_LOG_KMS("TFP410_CTL3: 0x%02X\n", val);
+ tfp410_readb(dvo, TFP410_USERCFG, &val);
+ DRM_LOG_KMS("TFP410_USERCFG: 0x%02X\n", val);
+ tfp410_readb(dvo, TFP410_DE_DLY, &val);
+ DRM_LOG_KMS("TFP410_DE_DLY: 0x%02X\n", val);
+ tfp410_readb(dvo, TFP410_DE_CTL, &val);
+ DRM_LOG_KMS("TFP410_DE_CTL: 0x%02X\n", val);
+ tfp410_readb(dvo, TFP410_DE_TOP, &val);
+ DRM_LOG_KMS("TFP410_DE_TOP: 0x%02X\n", val);
+ tfp410_readb(dvo, TFP410_DE_CNT_LO, &val);
+ tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2);
+ DRM_LOG_KMS("TFP410_DE_CNT: 0x%02X%02X\n", val2, val);
+ tfp410_readb(dvo, TFP410_DE_LIN_LO, &val);
+ tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2);
+ DRM_LOG_KMS("TFP410_DE_LIN: 0x%02X%02X\n", val2, val);
+ tfp410_readb(dvo, TFP410_H_RES_LO, &val);
+ tfp410_readb(dvo, TFP410_H_RES_HI, &val2);
+ DRM_LOG_KMS("TFP410_H_RES: 0x%02X%02X\n", val2, val);
+ tfp410_readb(dvo, TFP410_V_RES_LO, &val);
+ tfp410_readb(dvo, TFP410_V_RES_HI, &val2);
+ DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val);
+}
+
+void
+tfp410_destroy(struct intel_dvo_device *dvo)
+{
+ struct tfp410_priv *tfp = dvo->dev_priv;
+
+ if (tfp) {
+ free(tfp, M_DRM);
+ dvo->dev_priv = NULL;
+ }
+}
+
+struct intel_dvo_dev_ops tfp410_ops = {
+ .init = tfp410_init,
+ .detect = tfp410_detect,
+ .mode_valid = tfp410_mode_valid,
+ .mode_set = tfp410_mode_set,
+ .dpms = tfp410_dpms,
+ .get_hw_state = tfp410_get_hw_state,
+ .dump_regs = tfp410_dump_regs,
+ .destroy = tfp410_destroy,
+};
diff --git a/sys/dev/pci/drm/i915/i915_dma.c b/sys/dev/pci/drm/i915/i915_dma.c
new file mode 100644
index 00000000000..28bfbc8f55c
--- /dev/null
+++ b/sys/dev/pci/drm/i915/i915_dma.c
@@ -0,0 +1,406 @@
+/* $OpenBSD: i915_dma.c,v 1.1 2013/03/18 12:36:51 jsg Exp $ */
+/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
+ */
+/*
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <dev/pci/drm/drmP.h>
+#include <dev/pci/drm/drm.h>
+#include <dev/pci/drm/i915_drm.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include <dev/pci/drm/drm_crtc_helper.h>
+
+void
+i915_kernel_lost_context(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+#if 0
+ struct drm_i915_master_private *master_priv;
+#endif
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
+ /*
+ * We should never lose context on the ring with modesetting
+ * as we don't expose it to userspace
+ */
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+ ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
+ if (ring->space < 0)
+ ring->space += ring->size;
+
+#if 0
+ if (!dev->primary->master)
+ return;
+
+ master_priv = dev->primary->master->driver_priv;
+ if (ring->head == ring->tail && master_priv->sarea_priv)
+ master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
+#endif
+}
+
+
+int
+i915_getparam(struct inteldrm_softc *dev_priv, void *data)
+{
+ drm_i915_getparam_t *param = data;
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ int value;
+
+ switch (param->param) {
+ case I915_PARAM_CHIPSET_ID:
+ value = dev->pci_device;
+ break;
+ case I915_PARAM_HAS_GEM:
+ value = 1;
+ break;
+ case I915_PARAM_NUM_FENCES_AVAIL:
+ value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
+ break;
+ case I915_PARAM_HAS_EXECBUF2:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_OVERLAY:
+ value = dev_priv->overlay ? 1 : 0;
+ break;
+ case I915_PARAM_HAS_PAGEFLIPPING:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_BSD:
+ value = HAS_BSD(dev);
+ break;
+ case I915_PARAM_HAS_BLT:
+ value = HAS_BLT(dev);
+ break;
+ case I915_PARAM_HAS_RELAXED_FENCING:
+#ifdef notyet
+ value = 1;
+#else
+ return EINVAL;
+#endif
+ break;
+ case I915_PARAM_HAS_EXEC_CONSTANTS:
+ value = INTEL_INFO(dev)->gen >= 4;
+ break;
+ case I915_PARAM_HAS_GEN7_SOL_RESET:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_LLC:
+ value = HAS_LLC(dev);
+ break;
+ case I915_PARAM_HAS_SEMAPHORES:
+ value = i915_semaphore_is_enabled(dev);
+ break;
+ default:
+ DRM_DEBUG("Unknown parameter %d\n", param->param);
+ return (EINVAL);
+ }
+ return (copyout(&value, param->value, sizeof(int)));
+}
+
+int
+i915_setparam(struct inteldrm_softc *dev_priv, void *data)
+{
+ drm_i915_setparam_t *param = data;
+
+ switch (param->param) {
+ case I915_SETPARAM_NUM_USED_FENCES:
+ if (param->value > dev_priv->num_fence_regs ||
+ param->value < 0)
+ return EINVAL;
+ /* Userspace can use first N regs */
+ dev_priv->fence_reg_start = param->value;
+ break;
+ default:
+ DRM_DEBUG("unknown parameter %d\n", param->param);
+ return (EINVAL);
+ }
+
+ return 0;
+}
+
+#define MCHBAR_I915 0x44
+#define MCHBAR_I965 0x48
+#define MCHBAR_SIZE (4*4096)
+
+#define DEVEN_REG 0x54
+#define DEVEN_MCHBAR_EN (1 << 28)
+
+/*
+ * Check the MCHBAR on the host bridge is enabled, and if not allocate it.
+ * we do not need to actually map it because we access the bar through it's
+ * mirror on the IGD, however, if it is disabled or not allocated then
+ * the mirror does not work. *sigh*.
+ *
+ * we return a trinary state:
+ * 0 = already enabled, or can not enable
+ * 1 = enabled, needs disable
+ * 2 = enabled, needs disable and free.
+ */
+int
+intel_setup_mchbar(struct inteldrm_softc *dev_priv,
+ struct pci_attach_args *bpa)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ u_int64_t mchbar_addr;
+ pcireg_t tmp, low, high = 0;
+ u_long addr;
+ int reg, ret = 1, enabled = 0;
+
+ reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+
+ if (IS_I915G(dev) || IS_I915GM(dev)) {
+ tmp = pci_conf_read(bpa->pa_pc, bpa->pa_tag, DEVEN_REG);
+ enabled = !!(tmp & DEVEN_MCHBAR_EN);
+ } else {
+ tmp = pci_conf_read(bpa->pa_pc, bpa->pa_tag, reg);
+ enabled = tmp & 1;
+ }
+
+ if (enabled) {
+ return (0);
+ }
+
+ if (INTEL_INFO(dev)->gen >= 4)
+ high = pci_conf_read(bpa->pa_pc, bpa->pa_tag, reg + 4);
+ low = pci_conf_read(bpa->pa_pc, bpa->pa_tag, reg);
+ mchbar_addr = ((u_int64_t)high << 32) | low;
+
+ /*
+ * XXX need to check to see if it's allocated in the pci resources,
+ * right now we just check to see if there's any address there
+ *
+ * if there's no address, then we allocate one.
+ * note that we can't just use pci_mapreg_map here since some intel
+ * BARs are special in that they set bit 0 to show they're enabled,
+ * this is not handled by generic pci code.
+ */
+ if (mchbar_addr == 0) {
+ addr = (u_long)mchbar_addr;
+ if (bpa->pa_memex == NULL || extent_alloc(bpa->pa_memex,
+ MCHBAR_SIZE, MCHBAR_SIZE, 0, 0, 0, &addr)) {
+ return (0); /* just say we don't need to disable */
+ } else {
+ mchbar_addr = addr;
+ ret = 2;
+ /* We've allocated it, now fill in the BAR again */
+ if (INTEL_INFO(dev)->gen >= 4)
+ pci_conf_write(bpa->pa_pc, bpa->pa_tag,
+ reg + 4, upper_32_bits(mchbar_addr));
+ pci_conf_write(bpa->pa_pc, bpa->pa_tag,
+ reg, mchbar_addr & 0xffffffff);
+ }
+ }
+ /* set the enable bit */
+ if (IS_I915G(dev) || IS_I915GM(dev)) {
+ pci_conf_write(bpa->pa_pc, bpa->pa_tag, DEVEN_REG,
+ tmp | DEVEN_MCHBAR_EN);
+ } else {
+ tmp = pci_conf_read(bpa->pa_pc, bpa->pa_tag, reg);
+ pci_conf_write(bpa->pa_pc, bpa->pa_tag, reg, tmp | 1);
+ }
+
+ return (ret);
+}
+
+/*
+ * we take the trinary returned from intel_setup_mchbar and clean up after
+ * it.
+ */
+void
+intel_teardown_mchbar(struct inteldrm_softc *dev_priv,
+ struct pci_attach_args *bpa, int disable)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ u_int64_t mchbar_addr;
+ pcireg_t tmp, low, high = 0;
+ int reg;
+
+ reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+
+ switch(disable) {
+ case 2:
+ if (INTEL_INFO(dev)->gen >= 4)
+ high = pci_conf_read(bpa->pa_pc, bpa->pa_tag, reg + 4);
+ low = pci_conf_read(bpa->pa_pc, bpa->pa_tag, reg);
+ mchbar_addr = ((u_int64_t)high << 32) | low;
+ if (bpa->pa_memex)
+ extent_free(bpa->pa_memex, mchbar_addr, MCHBAR_SIZE, 0);
+ /* FALLTHROUGH */
+ case 1:
+ if (IS_I915G(dev) || IS_I915GM(dev)) {
+ tmp = pci_conf_read(bpa->pa_pc, bpa->pa_tag, DEVEN_REG);
+ tmp &= ~DEVEN_MCHBAR_EN;
+ pci_conf_write(bpa->pa_pc, bpa->pa_tag, DEVEN_REG, tmp);
+ } else {
+ tmp = pci_conf_read(bpa->pa_pc, bpa->pa_tag, reg);
+ tmp &= ~1;
+ pci_conf_write(bpa->pa_pc, bpa->pa_tag, reg, tmp);
+ }
+ break;
+ case 0:
+ default:
+ break;
+ };
+}
+
+int
+i915_load_modeset_init(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = intel_parse_bios(dev);
+ if (ret)
+ DRM_INFO("failed to find VBIOS tables\n");
+
+#if 0
+ intel_register_dsm_handler();
+#endif
+
+ /* IIR "flip pending" bit means done if this bit is set */
+ if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE))
+ dev_priv->flip_pending_is_done = true;
+
+#ifdef notyet
+ ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops);
+ if (ret)
+ goto cleanup_vga_client;
+
+ /* Initialise stolen first so that we may reserve preallocated
+ * objects for the BIOS to KMS transition.
+ */
+ ret = i915_gem_init_stolen(dev);
+ if (ret)
+ goto cleanup_vga_switcheroo;
+#endif
+ ret = drm_irq_install(dev);
+ if (ret)
+ goto cleanup_gem_stolen;
+
+ /* Important: The output setup functions called by modeset_init need
+ * working irqs for e.g. gmbus and dp aux transfers. */
+ intel_modeset_init(dev);
+
+ ret = i915_gem_init(dev);
+ if (ret)
+ goto cleanup_irq;
+
+ intel_modeset_gem_init(dev);
+
+ /* Always safe in the mode setting case. */
+ /* FIXME: do pre/post-mode set stuff in core KMS code */
+ dev->vblank_disable_allowed = 1;
+
+ ret = intel_fbdev_init(dev);
+ if (ret)
+ goto cleanup_gem;
+
+ /* Only enable hotplug handling once the fbdev is fully set up. */
+ dev_priv->enable_hotplug_processing = true;
+
+ drm_kms_helper_poll_init(dev);
+
+ /* We're off and running w/KMS */
+ dev_priv->mm.suspended = 0;
+
+ return (0);
+
+cleanup_gem:
+ DRM_LOCK();
+ i915_gem_cleanup_ringbuffer(dev);
+ DRM_UNLOCK();
+ i915_gem_cleanup_aliasing_ppgtt(dev);
+cleanup_irq:
+ drm_irq_uninstall(dev);
+cleanup_gem_stolen:
+#ifdef notyet
+ i915_gem_cleanup_stolen(dev);
+#endif
+ return (ret);
+}
+
+void
+i915_driver_lastclose(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct vm_page *p;
+ int ret;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ intel_fb_restore_mode(dev);
+ return;
+ }
+
+ ret = i915_gem_idle(dev_priv);
+ if (ret)
+ DRM_ERROR("failed to idle hardware: %d\n", ret);
+
+ if (dev_priv->agpdmat != NULL) {
+ /*
+ * make sure we nuke everything, we may have mappings that we've
+ * unrefed, but uvm has a reference to them for maps. Make sure
+ * they get unbound and any accesses will segfault.
+ * XXX only do ones in GEM.
+ */
+ for (p = dev_priv->pgs; p < dev_priv->pgs +
+ (dev->agp->info.ai_aperture_size / PAGE_SIZE); p++)
+ pmap_page_protect(p, VM_PROT_NONE);
+ agp_bus_dma_destroy((struct agp_softc *)dev->agp->agpdev,
+ dev_priv->agpdmat);
+ }
+ dev_priv->agpdmat = NULL;
+}
+
+int
+i915_driver_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv;
+
+ file_priv = malloc(sizeof(*file_priv), M_DRM, M_WAITOK);
+ if (!file_priv)
+ return ENOMEM;
+
+ file->driver_priv = file_priv;
+
+ mtx_init(&file_priv->mm.lock, IPL_NONE);
+ INIT_LIST_HEAD(&file_priv->mm.request_list);
+
+ return 0;
+}
+
+void
+i915_driver_close(struct drm_device *dev, struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+
+ i915_gem_release(dev, file);
+ free(file_priv, M_DRM);
+}
diff --git a/sys/dev/pci/drm/i915/i915_drv.c b/sys/dev/pci/drm/i915/i915_drv.c
new file mode 100644
index 00000000000..f34e890d4af
--- /dev/null
+++ b/sys/dev/pci/drm/i915/i915_drv.c
@@ -0,0 +1,2583 @@
+/* $OpenBSD: i915_drv.c,v 1.1 2013/03/18 12:36:51 jsg Exp $ */
+/*
+ * Copyright (c) 2008-2009 Owain G. Ainsworth <oga@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/*-
+ * Copyright © 2008 Intel Corporation
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Gareth Hughes <gareth@valinux.com>
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <dev/pci/drm/drmP.h>
+#include <dev/pci/drm/drm.h>
+#include <dev/pci/drm/i915_drm.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+#include <machine/pmap.h>
+
+#include <sys/queue.h>
+#include <sys/workq.h>
+#if 0
+# define INTELDRM_WATCH_COHERENCY
+# define WATCH_INACTIVE
+#endif
+
+extern struct mutex mchdev_lock;
+
+/*
+ * Override lid status (0=autodetect, 1=autodetect disabled [default],
+ * -1=force lid closed, -2=force lid open)
+ */
+int i915_panel_ignore_lid = 1;
+
+/* Enable powersavings, fbc, downclocking, etc. (default: true) */
+unsigned int i915_powersave = 1;
+
+/* Use semaphores for inter-ring sync (default: -1 (use per-chip defaults)) */
+int i915_semaphores = -1;
+
+/*
+ * Enable frame buffer compression for power savings
+ * (default: -1 (use per-chip default))
+ */
+int i915_enable_fbc = -1;
+
+/*
+ * Enable power-saving render C-state 6.
+ * Different stages can be selected via bitmask values
+ * (0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6).
+ * For example, 3 would enable rc6 and deep rc6, and 7 would enable everything.
+ * default: -1 (use per-chip default)
+ */
+int i915_enable_rc6 = -1;
+
+/* Use panel (LVDS/eDP) downclocking for power savings (default: false) */
+unsigned int i915_lvds_downclock = 0;
+
+/*
+ * Specify LVDS channel mode
+ * (0=probe BIOS [default], 1=single-channel, 2=dual-channel)
+ */
+int i915_lvds_channel_mode = 0;
+
+/*
+ * Use Spread Spectrum Clock with panels [LVDS/eDP]
+ * (default: auto from VBT)
+ */
+int i915_panel_use_ssc = -1;
+
+/*
+ * Override/Ignore selection of SDVO panel mode in the VBT
+ * (-2=ignore, -1=auto [default], index in VBT BIOS table)
+ */
+int i915_vbt_sdvo_panel_type = -1;
+
+const struct intel_device_info *
+ i915_get_device_id(int);
+int inteldrm_probe(struct device *, void *, void *);
+void inteldrm_attach(struct device *, struct device *, void *);
+int inteldrm_detach(struct device *, int);
+int inteldrm_activate(struct device *, int);
+int inteldrm_ioctl(struct drm_device *, u_long, caddr_t, struct drm_file *);
+int inteldrm_doioctl(struct drm_device *, u_long, caddr_t, struct drm_file *);
+
+int inteldrm_gmch_match(struct pci_attach_args *);
+void inteldrm_timeout(void *);
+void inteldrm_quiesce(struct inteldrm_softc *);
+
+/* For reset and suspend */
+int i8xx_do_reset(struct drm_device *);
+int i965_do_reset(struct drm_device *);
+int i965_reset_complete(struct drm_device *);
+int ironlake_do_reset(struct drm_device *);
+int gen6_do_reset(struct drm_device *);
+
+void i915_alloc_ifp(struct inteldrm_softc *, struct pci_attach_args *);
+void i965_alloc_ifp(struct inteldrm_softc *, struct pci_attach_args *);
+
+int i915_drm_freeze(struct drm_device *);
+int __i915_drm_thaw(struct drm_device *);
+int i915_drm_thaw(struct drm_device *);
+
+static const struct intel_device_info intel_i830_info = {
+ .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+};
+
+static const struct intel_device_info intel_845g_info = {
+ .gen = 2,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+};
+
+static const struct intel_device_info intel_i85x_info = {
+ .gen = 2, .is_i85x = 1, .is_mobile = 1,
+ .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+};
+
+static const struct intel_device_info intel_i865g_info = {
+ .gen = 2,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+};
+
+static const struct intel_device_info intel_i915g_info = {
+ .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+};
+static const struct intel_device_info intel_i915gm_info = {
+ .gen = 3, .is_mobile = 1,
+ .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .supports_tv = 1,
+};
+static const struct intel_device_info intel_i945g_info = {
+ .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+};
+static const struct intel_device_info intel_i945gm_info = {
+ .gen = 3, .is_i945gm = 1, .is_mobile = 1,
+ .has_hotplug = 1, .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .supports_tv = 1,
+};
+
+static const struct intel_device_info intel_i965g_info = {
+ .gen = 4, .is_broadwater = 1,
+ .has_hotplug = 1,
+ .has_overlay = 1,
+};
+
+static const struct intel_device_info intel_i965gm_info = {
+ .gen = 4, .is_crestline = 1,
+ .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
+ .has_overlay = 1,
+ .supports_tv = 1,
+};
+
+static const struct intel_device_info intel_g33_info = {
+ .gen = 3, .is_g33 = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_overlay = 1,
+};
+
+static const struct intel_device_info intel_g45_info = {
+ .gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
+ .has_pipe_cxsr = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
+};
+
+static const struct intel_device_info intel_gm45_info = {
+ .gen = 4, .is_g4x = 1,
+ .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
+ .has_pipe_cxsr = 1, .has_hotplug = 1,
+ .supports_tv = 1,
+ .has_bsd_ring = 1,
+};
+
+static const struct intel_device_info intel_pineview_info = {
+ .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_overlay = 1,
+};
+
+static const struct intel_device_info intel_ironlake_d_info = {
+ .gen = 5,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
+};
+
+static const struct intel_device_info intel_ironlake_m_info = {
+ .gen = 5, .is_mobile = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_fbc = 1,
+ .has_bsd_ring = 1,
+};
+
+static const struct intel_device_info intel_sandybridge_d_info = {
+ .gen = 6,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
+ .has_llc = 1,
+ .has_force_wake = 1,
+};
+
+static const struct intel_device_info intel_sandybridge_m_info = {
+ .gen = 6, .is_mobile = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_fbc = 1,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
+ .has_llc = 1,
+ .has_force_wake = 1,
+};
+
+static const struct intel_device_info intel_ivybridge_d_info = {
+ .is_ivybridge = 1, .gen = 7,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
+ .has_llc = 1,
+ .has_force_wake = 1,
+};
+
+static const struct intel_device_info intel_ivybridge_m_info = {
+ .is_ivybridge = 1, .gen = 7, .is_mobile = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_fbc = 0, /* FBC is not enabled on Ivybridge mobile yet */
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
+ .has_llc = 1,
+ .has_force_wake = 1,
+};
+
+static const struct intel_device_info intel_valleyview_m_info = {
+ .gen = 7, .is_mobile = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_fbc = 0,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
+ .is_valleyview = 1,
+};
+
+static const struct intel_device_info intel_valleyview_d_info = {
+ .gen = 7,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_fbc = 0,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
+ .is_valleyview = 1,
+};
+
+static const struct intel_device_info intel_haswell_d_info = {
+ .is_haswell = 1, .gen = 7,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
+ .has_llc = 1,
+ .has_force_wake = 1,
+};
+
+static const struct intel_device_info intel_haswell_m_info = {
+ .is_haswell = 1, .gen = 7, .is_mobile = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
+ .has_llc = 1,
+ .has_force_wake = 1,
+};
+
+const static struct drm_pcidev inteldrm_pciidlist[] = {
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82830M_IGD, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82845G_IGD, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82855GM_IGD, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82865G_IGD, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82915G_IGD_1, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_E7221_IGD, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82915GM_IGD_1, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82945G_IGD_1, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82945GM_IGD_1, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82945GME_IGD_1, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82946GZ_IGD_1, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82G35_IGD_1, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82Q965_IGD_1, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82G965_IGD_1, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82GM965_IGD_1, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82GME965_IGD_1, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82G33_IGD_1, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82Q35_IGD_1, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82Q33_IGD_1, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82GM45_IGD_1, 0 },
+ {PCI_VENDOR_INTEL, 0x2E02, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82Q45_IGD_1, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82G45_IGD_1, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82G41_IGD_1, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PINEVIEW_IGC_1, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PINEVIEW_M_IGC_1, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CLARKDALE_IGD, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ARRANDALE_IGD, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE2G_GT1, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE2G_M_GT1, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE2G_GT2, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE2G_M_GT2, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE2G_GT2_PLUS, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE2G_M_GT2_PLUS, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE3G_D_GT1, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE3G_M_GT1, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE3G_S_GT1, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE3G_D_GT2, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE3G_M_GT2, 0 },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE3G_S_GT2, 0 },
+ {0, 0, 0}
+};
+
+static const struct intel_gfx_device_id {
+ int vendor;
+ int device;
+ const struct intel_device_info *info;
+} inteldrm_pciidlist_info[] = {
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82830M_IGD,
+ &intel_i830_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82845G_IGD,
+ &intel_845g_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82855GM_IGD,
+ &intel_i85x_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82865G_IGD,
+ &intel_i865g_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82915G_IGD_1,
+ &intel_i915g_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_E7221_IGD,
+ &intel_i915g_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82915GM_IGD_1,
+ &intel_i915gm_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82945G_IGD_1,
+ &intel_i945g_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82945GM_IGD_1,
+ &intel_i945gm_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82945GME_IGD_1,
+ &intel_i945gm_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82946GZ_IGD_1,
+ &intel_i965g_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82G35_IGD_1,
+ &intel_i965g_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82Q965_IGD_1,
+ &intel_i965g_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82G965_IGD_1,
+ &intel_i965g_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82GM965_IGD_1,
+ &intel_i965gm_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82GME965_IGD_1,
+ &intel_i965gm_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82G33_IGD_1,
+ &intel_g33_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82Q35_IGD_1,
+ &intel_g33_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82Q33_IGD_1,
+ &intel_g33_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82GM45_IGD_1,
+ &intel_gm45_info },
+ {PCI_VENDOR_INTEL, 0x2E02,
+ &intel_g45_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82Q45_IGD_1,
+ &intel_g45_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82G45_IGD_1,
+ &intel_g45_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82G41_IGD_1,
+ &intel_g45_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PINEVIEW_IGC_1,
+ &intel_pineview_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PINEVIEW_M_IGC_1,
+ &intel_pineview_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CLARKDALE_IGD,
+ &intel_ironlake_d_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ARRANDALE_IGD,
+ &intel_ironlake_m_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE2G_GT1,
+ &intel_sandybridge_d_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE2G_M_GT1,
+ &intel_sandybridge_m_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE2G_GT2,
+ &intel_sandybridge_d_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE2G_M_GT2,
+ &intel_sandybridge_m_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE2G_GT2_PLUS,
+ &intel_sandybridge_d_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE2G_M_GT2_PLUS,
+ &intel_sandybridge_m_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE3G_D_GT1,
+ &intel_ivybridge_d_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE3G_M_GT1,
+ &intel_ivybridge_m_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE3G_S_GT1,
+ &intel_ivybridge_d_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE3G_D_GT2,
+ &intel_ivybridge_d_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE3G_M_GT2,
+ &intel_ivybridge_m_info },
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE3G_S_GT2,
+ &intel_ivybridge_d_info },
+ {0, 0, NULL}
+};
+
+static struct drm_driver_info inteldrm_driver = {
+ .buf_priv_size = 1, /* No dev_priv */
+ .file_priv_size = sizeof(struct inteldrm_file),
+ .ioctl = inteldrm_ioctl,
+ .open = i915_driver_open,
+ .close = i915_driver_close,
+ .lastclose = i915_driver_lastclose,
+
+ .gem_init_object = i915_gem_init_object,
+ .gem_free_object = i915_gem_free_object,
+ .gem_fault = i915_gem_fault,
+ .gem_size = sizeof(struct drm_i915_gem_object),
+
+ .dumb_create = i915_gem_dumb_create,
+ .dumb_map_offset = i915_gem_mmap_gtt,
+ .dumb_destroy = i915_gem_dumb_destroy,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+
+ .flags = DRIVER_AGP | DRIVER_AGP_REQUIRE |
+ DRIVER_MTRR | DRIVER_IRQ | DRIVER_GEM |
+ DRIVER_MODESET,
+};
+
+const struct intel_device_info *
+i915_get_device_id(int device)
+{
+ const struct intel_gfx_device_id *did;
+
+ for (did = &inteldrm_pciidlist_info[0]; did->device != 0; did++) {
+ if (did->device != device)
+ continue;
+ return (did->info);
+ }
+ return (NULL);
+}
+
+int
+inteldrm_probe(struct device *parent, void *match, void *aux)
+{
+ return (drm_pciprobe((struct pci_attach_args *)aux,
+ inteldrm_pciidlist));
+}
+
+bool
+i915_semaphore_is_enabled(struct drm_device *dev)
+{
+ if (INTEL_INFO(dev)->gen < 6)
+ return 0;
+
+ if (i915_semaphores >= 0)
+ return i915_semaphores;
+
+#ifdef CONFIG_INTEL_IOMMU
+ /* Enable semaphores on SNB when IO remapping is off */
+ if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
+ return false;
+#endif
+
+ return 1;
+}
+
+int
+i915_drm_freeze(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ drm_kms_helper_poll_disable(dev);
+
+#if 0
+ pci_save_state(dev->pdev);
+#endif
+
+ /* If KMS is active, we do the leavevt stuff here */
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ int error = i915_gem_idle(dev_priv);
+ if (error) {
+ printf("GEM idle failed, resume might fail\n");
+ return (error);
+ }
+
+ timeout_del(&dev_priv->rps.delayed_resume_to);
+
+ intel_modeset_disable(dev);
+
+ drm_irq_uninstall(dev);
+ dev_priv->enable_hotplug_processing = false;
+ }
+
+ i915_save_state(dev);
+
+ intel_opregion_fini(dev);
+
+ /* Modeset on resume, not lid events */
+ dev_priv->modeset_on_lid = 0;
+
+ return 0;
+}
+
+int
+__i915_drm_thaw(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ int error = 0;
+
+ i915_restore_state(dev);
+ intel_opregion_setup(dev);
+
+ /* KMS EnterVT equivalent */
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ intel_init_pch_refclk(dev);
+
+ DRM_LOCK();
+ dev_priv->mm.suspended = 0;
+
+ error = i915_gem_init_hw(dev);
+ DRM_UNLOCK();
+
+ /* We need working interrupts for modeset enabling ... */
+ drm_irq_install(dev);
+
+ intel_modeset_init_hw(dev);
+ intel_modeset_setup_hw_state(dev, false);
+
+ /*
+ * ... but also need to make sure that hotplug processing
+ * doesn't cause havoc. Like in the driver load code we don't
+ * bother with the tiny race here where we might loose hotplug
+ * notifications.
+ * */
+ dev_priv->enable_hotplug_processing = true;
+ }
+
+ intel_opregion_init(dev);
+
+ dev_priv->modeset_on_lid = 0;
+
+ return error;
+}
+
+int
+i915_drm_thaw(struct drm_device *dev)
+{
+ int error = 0;
+
+ intel_gt_reset(dev);
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ DRM_LOCK();
+ i915_gem_restore_gtt_mappings(dev);
+ DRM_UNLOCK();
+ }
+
+ __i915_drm_thaw(dev);
+
+ return error;
+}
+
+/*
+ * We're intel IGD, bus 0 function 0 dev 0 should be the GMCH, so it should
+ * be Intel
+ */
+int
+inteldrm_gmch_match(struct pci_attach_args *pa)
+{
+ if (pa->pa_bus == 0 && pa->pa_device == 0 && pa->pa_function == 0 &&
+ PCI_VENDOR(pa->pa_id) == PCI_VENDOR_INTEL &&
+ PCI_CLASS(pa->pa_class) == PCI_CLASS_BRIDGE &&
+ PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_BRIDGE_HOST)
+ return (1);
+ return (0);
+}
+
+
+int inteldrm_wsioctl(void *, u_long, caddr_t, int, struct proc *);
+paddr_t inteldrm_wsmmap(void *, off_t, int);
+int inteldrm_alloc_screen(void *, const struct wsscreen_descr *,
+ void **, int *, int *, long *);
+void inteldrm_free_screen(void *, void *);
+int inteldrm_show_screen(void *, void *, int,
+ void (*)(void *, int, int), void *);
+void inteldrm_doswitch(void *, void *);
+
+struct wsscreen_descr inteldrm_stdscreen = {
+ "std",
+ 0, 0,
+ 0,
+ 0, 0,
+ WSSCREEN_UNDERLINE | WSSCREEN_HILIT |
+ WSSCREEN_REVERSE | WSSCREEN_WSCOLORS
+};
+
+const struct wsscreen_descr *inteldrm_scrlist[] = {
+ &inteldrm_stdscreen,
+};
+
+struct wsscreen_list inteldrm_screenlist = {
+ nitems(inteldrm_scrlist), inteldrm_scrlist
+};
+
+struct wsdisplay_accessops inteldrm_accessops = {
+ inteldrm_wsioctl,
+ inteldrm_wsmmap,
+ inteldrm_alloc_screen,
+ inteldrm_free_screen,
+ inteldrm_show_screen
+};
+
+int
+inteldrm_wsioctl(void *v, u_long cmd, caddr_t data, int flag, struct proc *p)
+{
+ struct inteldrm_softc *dev_priv = v;
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ struct wsdisplay_param *dp = (struct wsdisplay_param *)data;
+ extern u32 _intel_panel_get_max_backlight(struct drm_device *);
+
+ switch (cmd) {
+ case WSDISPLAYIO_GETPARAM:
+ switch (dp->param) {
+ case WSDISPLAYIO_PARAM_BRIGHTNESS:
+ dp->min = 0;
+ dp->max = _intel_panel_get_max_backlight(dev);
+ dp->curval = dev_priv->backlight_level;
+ return 0;
+ }
+ break;
+ case WSDISPLAYIO_SETPARAM:
+ switch (dp->param) {
+ case WSDISPLAYIO_PARAM_BRIGHTNESS:
+ intel_panel_set_backlight(dev, dp->curval);
+ return 0;
+ }
+ break;
+ }
+
+ return (-1);
+}
+
+paddr_t
+inteldrm_wsmmap(void *v, off_t off, int prot)
+{
+ return (-1);
+}
+
+int
+inteldrm_alloc_screen(void *v, const struct wsscreen_descr *type,
+ void **cookiep, int *curxp, int *curyp, long *attrp)
+{
+ struct inteldrm_softc *dev_priv = v;
+ struct rasops_info *ri = &dev_priv->ro;
+
+ if (dev_priv->nscreens > 8)
+ return (ENOMEM);
+
+ *cookiep = ri;
+ *curxp = 0;
+ *curyp =0;
+ ri->ri_ops.alloc_attr(ri, 0, 0, 0, attrp);
+ dev_priv->nscreens++;
+ return (0);
+}
+
+void
+inteldrm_free_screen(void *v, void *cookie)
+{
+ struct inteldrm_softc *dev_priv = v;
+
+ dev_priv->nscreens--;
+}
+
+int
+inteldrm_show_screen(void *v, void *cookie, int waitok,
+ void (*cb)(void *, int, int), void *cbarg)
+{
+ struct inteldrm_softc *dev_priv = v;
+
+ dev_priv->switchcb = cb;
+ dev_priv->switchcbarg = cbarg;
+ if (cb) {
+ workq_queue_task(NULL, &dev_priv->switchwqt, 0,
+ inteldrm_doswitch, v, cookie);
+ return (EAGAIN);
+ }
+
+ inteldrm_doswitch(v, cookie);
+
+ return (0);
+}
+
+void
+inteldrm_doswitch(void *v, void *cookie)
+{
+ struct inteldrm_softc *dev_priv = v;
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+
+ intel_fb_restore_mode(dev);
+
+ if (dev_priv->switchcb)
+ (*dev_priv->switchcb)(dev_priv->switchcbarg, 0, 0);
+}
+
+/*
+ * Accelerated routines.
+ */
+
+int inteldrm_copycols(void *, int, int, int, int);
+int inteldrm_erasecols(void *, int, int, int, long);
+int inteldrm_copyrows(void *, int, int, int);
+int inteldrm_eraserows(void *cookie, int, int, long);
+void inteldrm_copyrect(struct inteldrm_softc *, int, int, int, int, int, int);
+void inteldrm_fillrect(struct inteldrm_softc *, int, int, int, int, int);
+
+int
+inteldrm_copycols(void *cookie, int row, int src, int dst, int num)
+{
+ struct rasops_info *ri = cookie;
+ struct inteldrm_softc *sc = ri->ri_hw;
+ struct drm_device *dev = (struct drm_device *)sc->drmdev;
+
+ if (dev->open_count > 0 || sc->noaccel)
+ return sc->noaccel_ops.copycols(cookie, row, src, dst, num);
+
+ num *= ri->ri_font->fontwidth;
+ src *= ri->ri_font->fontwidth;
+ dst *= ri->ri_font->fontwidth;
+ row *= ri->ri_font->fontheight;
+
+ inteldrm_copyrect(sc, ri->ri_xorigin + src, ri->ri_yorigin + row,
+ ri->ri_xorigin + dst, ri->ri_yorigin + row,
+ num, ri->ri_font->fontheight);
+
+ return 0;
+}
+
+int
+inteldrm_erasecols(void *cookie, int row, int col, int num, long attr)
+{
+ struct rasops_info *ri = cookie;
+ struct inteldrm_softc *sc = ri->ri_hw;
+ struct drm_device *dev = (struct drm_device *)sc->drmdev;
+ int bg, fg;
+
+ if (dev->open_count > 0 || sc->noaccel)
+ return sc->noaccel_ops.erasecols(cookie, row, col, num, attr);
+
+ ri->ri_ops.unpack_attr(cookie, attr, &fg, &bg, NULL);
+
+ row *= ri->ri_font->fontheight;
+ col *= ri->ri_font->fontwidth;
+ num *= ri->ri_font->fontwidth;
+
+ inteldrm_fillrect(sc, ri->ri_xorigin + col, ri->ri_yorigin + row,
+ num, ri->ri_font->fontheight, ri->ri_devcmap[bg]);
+
+ return 0;
+}
+
+int
+inteldrm_copyrows(void *cookie, int src, int dst, int num)
+{
+ struct rasops_info *ri = cookie;
+ struct inteldrm_softc *sc = ri->ri_hw;
+ struct drm_device *dev = (struct drm_device *)sc->drmdev;
+
+ if (dev->open_count > 0 || sc->noaccel)
+ return sc->noaccel_ops.copyrows(cookie, src, dst, num);
+
+ num *= ri->ri_font->fontheight;
+ src *= ri->ri_font->fontheight;
+ dst *= ri->ri_font->fontheight;
+
+ inteldrm_copyrect(sc, ri->ri_xorigin, ri->ri_yorigin + src,
+ ri->ri_xorigin, ri->ri_yorigin + dst, ri->ri_emuwidth, num);
+
+ return 0;
+}
+
+int
+inteldrm_eraserows(void *cookie, int row, int num, long attr)
+{
+ struct rasops_info *ri = cookie;
+ struct inteldrm_softc *sc = ri->ri_hw;
+ struct drm_device *dev = (struct drm_device *)sc->drmdev;
+ int bg, fg;
+ int x, y, w;
+
+ if (dev->open_count > 0 || sc->noaccel)
+ return sc->noaccel_ops.eraserows(cookie, row, num, attr);
+
+ ri->ri_ops.unpack_attr(cookie, attr, &fg, &bg, NULL);
+
+ if ((num == ri->ri_rows) && ISSET(ri->ri_flg, RI_FULLCLEAR)) {
+ num = ri->ri_height;
+ x = y = 0;
+ w = ri->ri_width;
+ } else {
+ num *= ri->ri_font->fontheight;
+ x = ri->ri_xorigin;
+ y = ri->ri_yorigin + row * ri->ri_font->fontheight;
+ w = ri->ri_emuwidth;
+ }
+ inteldrm_fillrect(sc, x, y, w, num, ri->ri_devcmap[bg]);
+
+ return 0;
+}
+
+void
+inteldrm_copyrect(struct inteldrm_softc *dev_priv, int sx, int sy,
+ int dx, int dy, int w, int h)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ bus_addr_t base = dev_priv->fbdev->ifb.obj->gtt_offset;
+ uint32_t pitch = dev_priv->fbdev->ifb.base.pitches[0];
+ struct intel_ring_buffer *ring;
+ uint32_t seqno;
+ int ret, i;
+
+ if (HAS_BLT(dev))
+ ring = &dev_priv->rings[BCS];
+ else
+ ring = &dev_priv->rings[RCS];
+
+ ret = intel_ring_begin(ring, 8);
+ if (ret)
+ return;
+
+ intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
+ XY_SRC_COPY_BLT_WRITE_ALPHA | XY_SRC_COPY_BLT_WRITE_RGB);
+ intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | pitch);
+ intel_ring_emit(ring, (dx << 0) | (dy << 16));
+ intel_ring_emit(ring, ((dx + w) << 0) | ((dy + h) << 16));
+ intel_ring_emit(ring, base);
+ intel_ring_emit(ring, (sx << 0) | (sy << 16));
+ intel_ring_emit(ring, pitch);
+ intel_ring_emit(ring, base);
+ intel_ring_advance(ring);
+
+ ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
+ if (ret)
+ return;
+
+ ret = i915_add_request(ring, NULL, &seqno);
+ if (ret)
+ return;
+
+ for (i = 1000000; i != 0; i--) {
+ if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
+ break;
+ DELAY(1);
+ }
+
+ i915_gem_retire_requests_ring(ring);
+}
+
+void
+inteldrm_fillrect(struct inteldrm_softc *dev_priv, int x, int y,
+ int w, int h, int color)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ bus_addr_t base = dev_priv->fbdev->ifb.obj->gtt_offset;
+ uint32_t pitch = dev_priv->fbdev->ifb.base.pitches[0];
+ struct intel_ring_buffer *ring;
+ uint32_t seqno;
+ int ret, i;
+
+ if (HAS_BLT(dev))
+ ring = &dev_priv->rings[BCS];
+ else
+ ring = &dev_priv->rings[RCS];
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return;
+
+#define XY_COLOR_BLT_CMD ((2<<29)|(0x50<<22)|4)
+#define XY_COLOR_BLT_WRITE_ALPHA (1<<21)
+#define XY_COLOR_BLT_WRITE_RGB (1<<20)
+#define BLT_ROP_PATCOPY (0xf0<<16)
+
+ intel_ring_emit(ring, XY_COLOR_BLT_CMD |
+ XY_COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB);
+ intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_PATCOPY | pitch);
+ intel_ring_emit(ring, (x << 0) | (y << 16));
+ intel_ring_emit(ring, ((x + w) << 0) | ((y + h) << 16));
+ intel_ring_emit(ring, base);
+ intel_ring_emit(ring, color);
+ intel_ring_advance(ring);
+
+ ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
+ if (ret)
+ return;
+
+ ret = i915_add_request(ring, NULL, &seqno);
+ if (ret)
+ return;
+
+ for (i = 1000000; i != 0; i--) {
+ if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
+ break;
+ DELAY(1);
+ }
+
+ i915_gem_retire_requests_ring(ring);
+}
+
+void
+inteldrm_attach(struct device *parent, struct device *self, void *aux)
+{
+ struct inteldrm_softc *dev_priv = (struct inteldrm_softc *)self;
+ struct vga_pci_softc *vga_sc = (struct vga_pci_softc *)parent;
+ struct pci_attach_args *pa = aux, bpa;
+ struct vga_pci_bar *bar;
+ struct drm_device *dev;
+ const struct drm_pcidev *id_entry;
+ int i;
+ uint16_t pci_device;
+
+ id_entry = drm_find_description(PCI_VENDOR(pa->pa_id),
+ PCI_PRODUCT(pa->pa_id), inteldrm_pciidlist);
+ pci_device = PCI_PRODUCT(pa->pa_id);
+ dev_priv->info = i915_get_device_id(pci_device);
+ KASSERT(dev_priv->info->gen != 0);
+
+ dev_priv->pc = pa->pa_pc;
+ dev_priv->tag = pa->pa_tag;
+ dev_priv->dmat = pa->pa_dmat;
+ dev_priv->bst = pa->pa_memt;
+
+ /* All intel chipsets need to be treated as agp, so just pass one */
+ dev_priv->drmdev = drm_attach_pci(&inteldrm_driver, pa, 1, self);
+
+ dev = (struct drm_device *)dev_priv->drmdev;
+
+ /* we need to use this api for now due to sharing with intagp */
+ bar = vga_pci_bar_info(vga_sc, (IS_I9XX(dev) ? 0 : 1));
+ if (bar == NULL) {
+ printf(": can't get BAR info\n");
+ return;
+ }
+
+ dev_priv->regs = vga_pci_bar_map(vga_sc, bar->addr, 0, 0);
+ if (dev_priv->regs == NULL) {
+ printf(": can't map mmio space\n");
+ return;
+ }
+
+ intel_detect_pch(dev_priv);
+
+ /*
+ * i945G/GM report MSI capability despite not actually supporting it.
+ * so explicitly disable it.
+ */
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ pa->pa_flags &= ~PCI_FLAGS_MSI_ENABLED;
+
+ if (pci_intr_map(pa, &dev_priv->ih) != 0) {
+ printf(": couldn't map interrupt\n");
+ return;
+ }
+
+ intel_irq_init(dev);
+
+ /*
+ * set up interrupt handler, note that we don't switch the interrupt
+ * on until the X server talks to us, kms will change this.
+ */
+ dev_priv->irqh = pci_intr_establish(dev_priv->pc, dev_priv->ih, IPL_TTY,
+ inteldrm_driver.irq_handler,
+ dev_priv, dev_priv->dev.dv_xname);
+ if (dev_priv->irqh == NULL) {
+ printf(": couldn't establish interrupt\n");
+ return;
+ }
+
+ dev_priv->workq = workq_create("intelrel", 1, IPL_TTY);
+ if (dev_priv->workq == NULL) {
+ printf("couldn't create workq\n");
+ return;
+ }
+
+ /* GEM init */
+ INIT_LIST_HEAD(&dev_priv->mm.active_list);
+ INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+ INIT_LIST_HEAD(&dev_priv->mm.bound_list);
+ INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ init_ring_lists(&dev_priv->rings[i]);
+ timeout_set(&dev_priv->mm.retire_timer, inteldrm_timeout, dev_priv);
+ timeout_set(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, dev_priv);
+ dev_priv->next_seqno = 1;
+ dev_priv->mm.suspended = 1;
+ dev_priv->error_completion = 0;
+
+ /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
+ if (IS_GEN3(dev)) {
+ u_int32_t tmp = I915_READ(MI_ARB_STATE);
+ if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
+ /*
+ * arb state is a masked write, so set bit + bit
+ * in mask
+ */
+ I915_WRITE(MI_ARB_STATE,
+ _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
+ }
+ }
+
+ dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
+
+ /* Old X drivers will take 0-2 for front, back, depth buffers */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ dev_priv->fence_reg_start = 3;
+
+ if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) ||
+ IS_I945GM(dev) || IS_G33(dev))
+ dev_priv->num_fence_regs = 16;
+ else
+ dev_priv->num_fence_regs = 8;
+
+ /* Initialise fences to zero, else on some macs we'll get corruption */
+ i915_gem_reset_fences(dev);
+
+ if (pci_find_device(&bpa, inteldrm_gmch_match) == 0) {
+ printf(": can't find GMCH\n");
+ return;
+ }
+
+ /* Set up the IFP for chipset flushing */
+ if (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) ||
+ IS_I945GM(dev)) {
+ i915_alloc_ifp(dev_priv, &bpa);
+ } else if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev)) {
+ i965_alloc_ifp(dev_priv, &bpa);
+ } else {
+ int nsegs;
+ /*
+ * I8XX has no flush page mechanism, we fake it by writing until
+ * the cache is empty. allocate a page to scribble on
+ */
+ dev_priv->ifp.i8xx.kva = NULL;
+ if (bus_dmamem_alloc(pa->pa_dmat, PAGE_SIZE, 0, 0,
+ &dev_priv->ifp.i8xx.seg, 1, &nsegs, BUS_DMA_WAITOK) == 0) {
+ if (bus_dmamem_map(pa->pa_dmat, &dev_priv->ifp.i8xx.seg,
+ 1, PAGE_SIZE, &dev_priv->ifp.i8xx.kva, 0) != 0) {
+ bus_dmamem_free(pa->pa_dmat,
+ &dev_priv->ifp.i8xx.seg, nsegs);
+ dev_priv->ifp.i8xx.kva = NULL;
+ }
+ }
+ }
+
+ i915_gem_detect_bit_6_swizzle(dev_priv, &bpa);
+
+ dev_priv->mm.interruptible = true;
+
+ printf(": %s\n", pci_intr_string(pa->pa_pc, dev_priv->ih));
+
+ mtx_init(&dev_priv->irq_lock, IPL_TTY);
+ mtx_init(&dev_priv->rps.lock, IPL_NONE);
+ mtx_init(&dev_priv->dpio_lock, IPL_NONE);
+ mtx_init(&mchdev_lock, IPL_NONE);
+ mtx_init(&dev_priv->error_completion_lock, IPL_NONE);
+
+ rw_init(&dev_priv->rps.hw_lock, "rpshw");
+
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+ dev_priv->num_pipe = 3;
+ else if (IS_MOBILE(dev) || !IS_GEN2(dev))
+ dev_priv->num_pipe = 2;
+ else
+ dev_priv->num_pipe = 1;
+
+ if (drm_vblank_init(dev, dev_priv->num_pipe)) {
+ printf(": vblank init failed\n");
+ return;
+ }
+
+ intel_gt_init(dev);
+
+ intel_opregion_setup(dev);
+ intel_setup_bios(dev);
+ intel_setup_gmbus(dev_priv);
+
+ /* XXX would be a lot nicer to get agp info before now */
+ uvm_page_physload(atop(dev->agp->base), atop(dev->agp->base +
+ dev->agp->info.ai_aperture_size), atop(dev->agp->base),
+ atop(dev->agp->base + dev->agp->info.ai_aperture_size),
+ PHYSLOAD_DEVICE);
+ /* array of vm pages that physload introduced. */
+ dev_priv->pgs = PHYS_TO_VM_PAGE(dev->agp->base);
+ KASSERT(dev_priv->pgs != NULL);
+ /*
+ * XXX mark all pages write combining so user mmaps get the right
+ * bits. We really need a proper MI api for doing this, but for now
+ * this allows us to use PAT where available.
+ */
+ for (i = 0; i < atop(dev->agp->info.ai_aperture_size); i++)
+ atomic_setbits_int(&(dev_priv->pgs[i].pg_flags), PG_PMAP_WC);
+ if (agp_init_map(dev_priv->bst, dev->agp->base,
+ dev->agp->info.ai_aperture_size, BUS_SPACE_MAP_LINEAR |
+ BUS_SPACE_MAP_PREFETCHABLE, &dev_priv->agph))
+ panic("can't map aperture");
+
+ /* XXX */
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_load_modeset_init(dev);
+ intel_opregion_init(dev);
+
+#if 1
+{
+ extern int wsdisplay_console_initted;
+ struct wsemuldisplaydev_attach_args aa;
+ struct rasops_info *ri = &dev_priv->ro;
+
+ if (ri->ri_bits == NULL)
+ return;
+
+ intel_fb_restore_mode(dev);
+
+ ri->ri_flg = RI_CENTER;
+ rasops_init(ri, 96, 132);
+
+ dev_priv->noaccel_ops = ri->ri_ops;
+
+ ri->ri_hw = dev_priv;
+ ri->ri_ops.copyrows = inteldrm_copyrows;
+ ri->ri_ops.copycols = inteldrm_copycols;
+ ri->ri_ops.eraserows = inteldrm_eraserows;
+ ri->ri_ops.erasecols = inteldrm_erasecols;
+
+ inteldrm_stdscreen.capabilities = ri->ri_caps;
+ inteldrm_stdscreen.nrows = ri->ri_rows;
+ inteldrm_stdscreen.ncols = ri->ri_cols;
+ inteldrm_stdscreen.textops = &ri->ri_ops;
+ inteldrm_stdscreen.fontwidth = ri->ri_font->fontwidth;
+ inteldrm_stdscreen.fontheight = ri->ri_font->fontheight;
+
+ aa.console = 0;
+ aa.scrdata = &inteldrm_screenlist;
+ aa.accessops = &inteldrm_accessops;
+ aa.accesscookie = dev_priv;
+ aa.defaultscreens = 0;
+
+ if (wsdisplay_console_initted) {
+ long defattr;
+
+ ri->ri_ops.alloc_attr(ri, 0, 0, 0, &defattr);
+ wsdisplay_cnattach(&inteldrm_stdscreen, ri, 0, 0, defattr);
+ aa.console = 1;
+ }
+
+ vga_sc->sc_type = -1;
+ config_found(parent, &aa, wsemuldisplaydevprint);
+}
+#endif
+}
+
+int
+inteldrm_detach(struct device *self, int flags)
+{
+ struct inteldrm_softc *dev_priv = (struct inteldrm_softc *)self;
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+
+ /* this will quiesce any dma that's going on and kill the timeouts. */
+ if (dev_priv->drmdev != NULL) {
+ config_detach(dev_priv->drmdev, flags);
+ dev_priv->drmdev = NULL;
+ }
+
+#if 0
+ if (!I915_NEED_GFX_HWS(dev) && dev_priv->hws_dmamem) {
+ drm_dmamem_free(dev_priv->dmat, dev_priv->hws_dmamem);
+ dev_priv->hws_dmamem = NULL;
+ /* Need to rewrite hardware status page */
+ I915_WRITE(HWS_PGA, 0x1ffff000);
+// dev_priv->hw_status_page = NULL;
+ }
+#endif
+
+ if (IS_I9XX(dev) && dev_priv->ifp.i9xx.bsh != 0) {
+ bus_space_unmap(dev_priv->ifp.i9xx.bst, dev_priv->ifp.i9xx.bsh,
+ PAGE_SIZE);
+ } else if ((IS_I830(dev) || IS_845G(dev) || IS_I85X(dev) ||
+ IS_I865G(dev)) && dev_priv->ifp.i8xx.kva != NULL) {
+ bus_dmamem_unmap(dev_priv->dmat, dev_priv->ifp.i8xx.kva,
+ PAGE_SIZE);
+ bus_dmamem_free(dev_priv->dmat, &dev_priv->ifp.i8xx.seg, 1);
+ }
+
+ pci_intr_disestablish(dev_priv->pc, dev_priv->irqh);
+
+ if (dev_priv->regs != NULL)
+ vga_pci_bar_unmap(dev_priv->regs);
+
+ return (0);
+}
+
+int
+inteldrm_activate(struct device *arg, int act)
+{
+ struct inteldrm_softc *dev_priv = (struct inteldrm_softc *)arg;
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+
+ switch (act) {
+ case DVACT_QUIESCE:
+// inteldrm_quiesce(dev_priv);
+ dev_priv->noaccel = 1;
+ i915_drm_freeze(dev);
+ break;
+ case DVACT_SUSPEND:
+// i915_save_state(dev);
+ break;
+ case DVACT_RESUME:
+// i915_restore_state(dev);
+// /* entrypoints can stop sleeping now */
+// atomic_clearbits_int(&dev_priv->sc_flags, INTELDRM_QUIET);
+// wakeup(&dev_priv->flags);
+ i915_drm_thaw(dev);
+ intel_fb_restore_mode(dev);
+ dev_priv->noaccel = 0;
+ break;
+ }
+
+ return (0);
+}
+
+struct cfattach inteldrm_ca = {
+ sizeof(struct inteldrm_softc), inteldrm_probe, inteldrm_attach,
+ inteldrm_detach, inteldrm_activate
+};
+
+struct cfdriver inteldrm_cd = {
+ 0, "inteldrm", DV_DULL
+};
+
+int
+inteldrm_ioctl(struct drm_device *dev, u_long cmd, caddr_t data,
+ struct drm_file *file_priv)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ int error = 0;
+
+ while ((dev_priv->sc_flags & INTELDRM_QUIET) && error == 0)
+ error = tsleep(&dev_priv->flags, PCATCH, "intelioc", 0);
+ if (error)
+ return (error);
+ dev_priv->entries++;
+
+ error = inteldrm_doioctl(dev, cmd, data, file_priv);
+
+ dev_priv->entries--;
+ if (dev_priv->sc_flags & INTELDRM_QUIET)
+ wakeup(&dev_priv->entries);
+ return (error);
+}
+
+int
+inteldrm_doioctl(struct drm_device *dev, u_long cmd, caddr_t data,
+ struct drm_file *file_priv)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ if (file_priv->authenticated == 1) {
+ switch (cmd) {
+ case DRM_IOCTL_I915_GETPARAM:
+ return (i915_getparam(dev_priv, data));
+ case DRM_IOCTL_I915_GEM_EXECBUFFER2:
+ return (i915_gem_execbuffer2(dev, data, file_priv));
+ case DRM_IOCTL_I915_GEM_BUSY:
+ return (i915_gem_busy_ioctl(dev, data, file_priv));
+ case DRM_IOCTL_I915_GEM_THROTTLE:
+ return (i915_gem_ring_throttle(dev, file_priv));
+ case DRM_IOCTL_I915_GEM_MMAP:
+ return (i915_gem_gtt_map_ioctl(dev, data, file_priv));
+ case DRM_IOCTL_I915_GEM_MMAP_GTT:
+ return (i915_gem_mmap_gtt_ioctl(dev, data, file_priv));
+ case DRM_IOCTL_I915_GEM_CREATE:
+ return (i915_gem_create_ioctl(dev, data, file_priv));
+ case DRM_IOCTL_I915_GEM_PREAD:
+ return (i915_gem_pread_ioctl(dev, data, file_priv));
+ case DRM_IOCTL_I915_GEM_PWRITE:
+ return (i915_gem_pwrite_ioctl(dev, data, file_priv));
+ case DRM_IOCTL_I915_GEM_SET_DOMAIN:
+ return (i915_gem_set_domain_ioctl(dev, data,
+ file_priv));
+ case DRM_IOCTL_I915_GEM_SET_TILING:
+ return (i915_gem_set_tiling(dev, data, file_priv));
+ case DRM_IOCTL_I915_GEM_GET_TILING:
+ return (i915_gem_get_tiling(dev, data, file_priv));
+ case DRM_IOCTL_I915_GEM_GET_APERTURE:
+ return (i915_gem_get_aperture_ioctl(dev, data,
+ file_priv));
+ case DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID:
+ return (intel_get_pipe_from_crtc_id(dev, data, file_priv));
+ case DRM_IOCTL_I915_GEM_MADVISE:
+ return (i915_gem_madvise_ioctl(dev, data, file_priv));
+ default:
+ break;
+ }
+ }
+
+ if (file_priv->master == 1) {
+ switch (cmd) {
+ case DRM_IOCTL_I915_SETPARAM:
+ return (i915_setparam(dev_priv, data));
+ case DRM_IOCTL_I915_GEM_INIT:
+ return (i915_gem_init_ioctl(dev, data, file_priv));
+ case DRM_IOCTL_I915_GEM_ENTERVT:
+ return (i915_gem_entervt_ioctl(dev, data, file_priv));
+ case DRM_IOCTL_I915_GEM_LEAVEVT:
+ return (i915_gem_leavevt_ioctl(dev, data, file_priv));
+ case DRM_IOCTL_I915_GEM_PIN:
+ return (i915_gem_pin_ioctl(dev, data, file_priv));
+ case DRM_IOCTL_I915_GEM_UNPIN:
+ return (i915_gem_unpin_ioctl(dev, data, file_priv));
+ case DRM_IOCTL_I915_OVERLAY_PUT_IMAGE:
+ return (intel_overlay_put_image(dev, data, file_priv));
+ case DRM_IOCTL_I915_OVERLAY_ATTRS:
+ return (intel_overlay_attrs(dev, data, file_priv));
+ case DRM_IOCTL_I915_GET_SPRITE_COLORKEY:
+ return (intel_sprite_get_colorkey(dev, data, file_priv));
+ case DRM_IOCTL_I915_SET_SPRITE_COLORKEY:
+ return (intel_sprite_set_colorkey(dev, data, file_priv));
+ }
+ }
+ return (EINVAL);
+}
+
+void
+i915_alloc_ifp(struct inteldrm_softc *dev_priv, struct pci_attach_args *bpa)
+{
+ bus_addr_t addr;
+ u_int32_t reg;
+
+ dev_priv->ifp.i9xx.bst = bpa->pa_memt;
+
+ reg = pci_conf_read(bpa->pa_pc, bpa->pa_tag, I915_IFPADDR);
+ if (reg & 0x1) {
+ addr = (bus_addr_t)reg;
+ addr &= ~0x1;
+ /* XXX extents ... need data on whether bioses alloc or not. */
+ if (bus_space_map(bpa->pa_memt, addr, PAGE_SIZE, 0,
+ &dev_priv->ifp.i9xx.bsh) != 0)
+ goto nope;
+ return;
+ } else if (bpa->pa_memex == NULL || extent_alloc(bpa->pa_memex,
+ PAGE_SIZE, PAGE_SIZE, 0, 0, 0, &addr) || bus_space_map(bpa->pa_memt,
+ addr, PAGE_SIZE, 0, &dev_priv->ifp.i9xx.bsh))
+ goto nope;
+
+ pci_conf_write(bpa->pa_pc, bpa->pa_tag, I915_IFPADDR, addr | 0x1);
+
+ return;
+
+nope:
+ dev_priv->ifp.i9xx.bsh = 0;
+ printf(": no ifp ");
+}
+
+void
+i965_alloc_ifp(struct inteldrm_softc *dev_priv, struct pci_attach_args *bpa)
+{
+ bus_addr_t addr;
+ u_int32_t lo, hi;
+
+ dev_priv->ifp.i9xx.bst = bpa->pa_memt;
+
+ hi = pci_conf_read(bpa->pa_pc, bpa->pa_tag, I965_IFPADDR + 4);
+ lo = pci_conf_read(bpa->pa_pc, bpa->pa_tag, I965_IFPADDR);
+ if (lo & 0x1) {
+ addr = (((u_int64_t)hi << 32) | lo);
+ addr &= ~0x1;
+ /* XXX extents ... need data on whether bioses alloc or not. */
+ if (bus_space_map(bpa->pa_memt, addr, PAGE_SIZE, 0,
+ &dev_priv->ifp.i9xx.bsh) != 0)
+ goto nope;
+ return;
+ } else if (bpa->pa_memex == NULL || extent_alloc(bpa->pa_memex,
+ PAGE_SIZE, PAGE_SIZE, 0, 0, 0, &addr) || bus_space_map(bpa->pa_memt,
+ addr, PAGE_SIZE, 0, &dev_priv->ifp.i9xx.bsh))
+ goto nope;
+
+ pci_conf_write(bpa->pa_pc, bpa->pa_tag, I965_IFPADDR + 4,
+ upper_32_bits(addr));
+ pci_conf_write(bpa->pa_pc, bpa->pa_tag, I965_IFPADDR,
+ (addr & 0xffffffff) | 0x1);
+
+ return;
+
+nope:
+ dev_priv->ifp.i9xx.bsh = 0;
+ printf(": no ifp ");
+}
+
+void
+i915_gem_chipset_flush(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ /*
+ * Write to this flush page flushes the chipset write cache.
+ * The write will return when it is done.
+ */
+ if (IS_I9XX(dev)) {
+ if (dev_priv->ifp.i9xx.bsh != 0)
+ bus_space_write_4(dev_priv->ifp.i9xx.bst,
+ dev_priv->ifp.i9xx.bsh, 0, 1);
+ } else {
+ int i;
+
+ wbinvd();
+
+#define I830_HIC 0x70
+
+ I915_WRITE(I830_HIC, (I915_READ(I830_HIC) | (1<<31)));
+ for (i = 1000; i; i--) {
+ if (!(I915_READ(I830_HIC) & (1<<31)))
+ break;
+ delay(100);
+ }
+
+ }
+}
+
+void
+inteldrm_set_max_obj_size(struct inteldrm_softc *dev_priv)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+
+ /*
+ * Allow max obj size up to the size where ony 2 would fit the
+ * aperture, but some slop exists due to alignment etc
+ */
+ dev_priv->max_gem_obj_size = (dev->gtt_total -
+ atomic_read(&dev->pin_memory)) * 3 / 4 / 2;
+
+}
+
+int
+i915_gem_gtt_map_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_mmap *args = data;
+ struct drm_obj *obj;
+ struct drm_i915_gem_object *obj_priv;
+ vaddr_t addr;
+ voff_t offset;
+ vsize_t end, nsize;
+ int ret;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return (EBADF);
+
+ /* Since we are doing purely uvm-related operations here we do
+ * not need to hold the object, a reference alone is sufficient
+ */
+ obj_priv = to_intel_bo(obj);
+
+ /* Check size. Also ensure that the object is not purgeable */
+ if (args->size == 0 || args->offset > obj->size || args->size >
+ obj->size || (args->offset + args->size) > obj->size ||
+ i915_gem_object_is_purgeable(obj_priv)) {
+ ret = EINVAL;
+ goto done;
+ }
+
+ end = round_page(args->offset + args->size);
+ offset = trunc_page(args->offset);
+ nsize = end - offset;
+
+ /*
+ * We give our reference from object_lookup to the mmap, so only
+ * must free it in the case that the map fails.
+ */
+ addr = 0;
+ ret = uvm_map(&curproc->p_vmspace->vm_map, &addr, nsize, &obj->uobj,
+ offset, 0, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
+ UVM_INH_SHARE, UVM_ADV_RANDOM, 0));
+
+done:
+ if (ret == 0)
+ args->addr_ptr = (uint64_t) addr + (args->offset & PAGE_MASK);
+ else
+ drm_unref(&obj->uobj);
+
+ return (ret);
+}
+
+struct drm_obj *
+i915_gem_find_inactive_object(struct inteldrm_softc *dev_priv,
+ size_t min_size)
+{
+ struct drm_obj *obj, *best = NULL, *first = NULL;
+ struct drm_i915_gem_object *obj_priv;
+
+ /*
+ * We don't need references to the object as long as we hold the list
+ * lock, they won't disappear until we release the lock.
+ */
+ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
+ obj = &obj_priv->base;
+ if (obj->size >= min_size) {
+ if ((!obj_priv->dirty ||
+ i915_gem_object_is_purgeable(obj_priv)) &&
+ (best == NULL || obj->size < best->size)) {
+ best = obj;
+ if (best->size == min_size)
+ break;
+ }
+ }
+ if (first == NULL)
+ first = obj;
+ }
+ if (best == NULL)
+ best = first;
+ if (best) {
+ drm_ref(&best->uobj);
+ /*
+ * if we couldn't grab it, we may as well fail and go
+ * onto the next step for the sake of simplicity.
+ */
+ if (drm_try_hold_object(best) == 0) {
+ drm_unref(&best->uobj);
+ best = NULL;
+ }
+ }
+ return (best);
+}
+
+void
+inteldrm_wipe_mappings(struct drm_obj *obj)
+{
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct drm_device *dev = obj->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct vm_page *pg;
+
+ DRM_ASSERT_HELD(obj);
+ /* make sure any writes hit the bus before we do whatever change
+ * that prompted us to kill the mappings.
+ */
+ DRM_MEMORYBARRIER();
+ /* nuke all our mappings. XXX optimise. */
+ for (pg = &dev_priv->pgs[atop(obj_priv->gtt_offset)]; pg !=
+ &dev_priv->pgs[atop(obj_priv->gtt_offset + obj->size)]; pg++)
+ pmap_page_protect(pg, VM_PROT_NONE);
+}
+
+/**
+ * Pin an object to the GTT and evaluate the relocations landing in it.
+ */
+int
+i915_gem_object_pin_and_relocate(struct drm_obj *obj,
+ struct drm_file *file_priv, struct drm_i915_gem_exec_object2 *entry,
+ struct drm_i915_gem_relocation_entry *relocs)
+{
+ struct drm_device *dev = obj->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_obj *target_obj;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ bus_space_handle_t bsh;
+ int i, ret, needs_fence;
+
+ DRM_ASSERT_HELD(obj);
+ needs_fence = ((entry->flags & EXEC_OBJECT_NEEDS_FENCE) &&
+ obj_priv->tiling_mode != I915_TILING_NONE);
+ if (needs_fence)
+ atomic_setbits_int(&obj->do_flags, I915_EXEC_NEEDS_FENCE);
+
+ /* Choose the GTT offset for our buffer and put it there. */
+ ret = i915_gem_object_pin(obj_priv, (u_int32_t)entry->alignment,
+ needs_fence);
+ if (ret)
+ return ret;
+
+ if (needs_fence) {
+ ret = i915_gem_object_get_fence(obj_priv);
+ if (ret)
+ return ret;
+
+ if (i915_gem_object_pin_fence(obj_priv))
+ obj->do_flags |= __EXEC_OBJECT_HAS_FENCE;
+
+ obj_priv->pending_fenced_gpu_access = true;
+ }
+
+ entry->offset = obj_priv->gtt_offset;
+
+ /* Apply the relocations, using the GTT aperture to avoid cache
+ * flushing requirements.
+ */
+ for (i = 0; i < entry->relocation_count; i++) {
+ struct drm_i915_gem_relocation_entry *reloc = &relocs[i];
+ struct drm_i915_gem_object *target_obj_priv;
+ uint32_t reloc_val, reloc_offset;
+
+ target_obj = drm_gem_object_lookup(obj->dev, file_priv,
+ reloc->target_handle);
+ /* object must have come before us in the list */
+ if (target_obj == NULL) {
+ i915_gem_object_unpin(obj_priv);
+ return (EBADF);
+ }
+ if ((target_obj->do_flags & I915_IN_EXEC) == 0) {
+ printf("%s: object not already in execbuffer\n",
+ __func__);
+ ret = EBADF;
+ goto err;
+ }
+
+ target_obj_priv = to_intel_bo(target_obj);
+
+ /* The target buffer should have appeared before us in the
+ * exec_object list, so it should have a GTT space bound by now.
+ */
+ if (target_obj_priv->dmamap == 0) {
+ DRM_ERROR("No GTT space found for object %d\n",
+ reloc->target_handle);
+ ret = EINVAL;
+ goto err;
+ }
+
+ /* must be in one write domain and one only */
+ if (reloc->write_domain & (reloc->write_domain - 1)) {
+ ret = EINVAL;
+ goto err;
+ }
+ if (reloc->read_domains & I915_GEM_DOMAIN_CPU ||
+ reloc->write_domain & I915_GEM_DOMAIN_CPU) {
+ DRM_ERROR("relocation with read/write CPU domains: "
+ "obj %p target %d offset %d "
+ "read %08x write %08x", obj,
+ reloc->target_handle, (int)reloc->offset,
+ reloc->read_domains, reloc->write_domain);
+ ret = EINVAL;
+ goto err;
+ }
+
+ if (reloc->write_domain && target_obj->pending_write_domain &&
+ reloc->write_domain != target_obj->pending_write_domain) {
+ DRM_ERROR("Write domain conflict: "
+ "obj %p target %d offset %d "
+ "new %08x old %08x\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->write_domain,
+ target_obj->pending_write_domain);
+ ret = EINVAL;
+ goto err;
+ }
+
+ target_obj->pending_read_domains |= reloc->read_domains;
+ target_obj->pending_write_domain |= reloc->write_domain;
+
+
+ if (reloc->offset > obj->size - 4) {
+ DRM_ERROR("Relocation beyond object bounds: "
+ "obj %p target %d offset %d size %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset, (int) obj->size);
+ ret = EINVAL;
+ goto err;
+ }
+ if (reloc->offset & 3) {
+ DRM_ERROR("Relocation not 4-byte aligned: "
+ "obj %p target %d offset %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset);
+ ret = EINVAL;
+ goto err;
+ }
+
+ if (reloc->delta > target_obj->size) {
+ DRM_ERROR("reloc larger than target\n");
+ ret = EINVAL;
+ goto err;
+ }
+
+ /* Map the page containing the relocation we're going to
+ * perform.
+ */
+ reloc_offset = obj_priv->gtt_offset + reloc->offset;
+ reloc_val = target_obj_priv->gtt_offset + reloc->delta;
+
+ if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
+ drm_gem_object_unreference(target_obj);
+ continue;
+ }
+
+ ret = i915_gem_object_set_to_gtt_domain(obj_priv, true);
+ if (ret != 0)
+ goto err;
+
+ if ((ret = agp_map_subregion(dev_priv->agph,
+ trunc_page(reloc_offset), PAGE_SIZE, &bsh)) != 0) {
+ DRM_ERROR("map failed...\n");
+ goto err;
+ }
+
+ bus_space_write_4(dev_priv->bst, bsh, reloc_offset & PAGE_MASK,
+ reloc_val);
+
+ reloc->presumed_offset = target_obj_priv->gtt_offset;
+
+ agp_unmap_subregion(dev_priv->agph, bsh, PAGE_SIZE);
+ drm_gem_object_unreference(target_obj);
+ }
+
+ return 0;
+
+err:
+ /* we always jump to here mid-loop */
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj_priv);
+ return (ret);
+}
+
+int
+i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
+ u_int32_t buffer_count, struct drm_i915_gem_relocation_entry **relocs)
+{
+ u_int32_t reloc_count = 0, reloc_index = 0, i;
+ int ret;
+
+ *relocs = NULL;
+ for (i = 0; i < buffer_count; i++) {
+ if (reloc_count + exec_list[i].relocation_count < reloc_count)
+ return (EINVAL);
+ reloc_count += exec_list[i].relocation_count;
+ }
+
+ if (reloc_count == 0)
+ return (0);
+
+ if (SIZE_MAX / reloc_count < sizeof(**relocs))
+ return (EINVAL);
+ *relocs = drm_alloc(reloc_count * sizeof(**relocs));
+ for (i = 0; i < buffer_count; i++) {
+ if ((ret = copyin((void *)(uintptr_t)exec_list[i].relocs_ptr,
+ &(*relocs)[reloc_index], exec_list[i].relocation_count *
+ sizeof(**relocs))) != 0) {
+ drm_free(*relocs);
+ *relocs = NULL;
+ return (ret);
+ }
+ reloc_index += exec_list[i].relocation_count;
+ }
+
+ return (0);
+}
+
+int
+i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
+ u_int32_t buffer_count, struct drm_i915_gem_relocation_entry *relocs)
+{
+ u_int32_t reloc_count = 0, i;
+ int ret = 0;
+
+ if (relocs == NULL)
+ return (0);
+
+ for (i = 0; i < buffer_count; i++) {
+ if ((ret = copyout(&relocs[reloc_count],
+ (void *)(uintptr_t)exec_list[i].relocs_ptr,
+ exec_list[i].relocation_count * sizeof(*relocs))) != 0)
+ break;
+ reloc_count += exec_list[i].relocation_count;
+ }
+
+ drm_free(relocs);
+
+ return (ret);
+}
+
+void
+inteldrm_quiesce(struct inteldrm_softc *dev_priv)
+{
+ /*
+ * Right now we depend on X vt switching, so we should be
+ * already suspended, but fallbacks may fault, etc.
+ * Since we can't readback the gtt to reset what we have, make
+ * sure that everything is unbound.
+ */
+ KASSERT(dev_priv->mm.suspended);
+ KASSERT(dev_priv->rings[RCS].obj == NULL);
+ atomic_setbits_int(&dev_priv->sc_flags, INTELDRM_QUIET);
+ while (dev_priv->entries)
+ tsleep(&dev_priv->entries, 0, "intelquiet", 0);
+ /*
+ * nothing should be dirty WRT the chip, only stuff that's bound
+ * for gtt mapping. Nothing should be pinned over vt switch, if it
+ * is then rendering corruption will occur due to api misuse, shame.
+ */
+ KASSERT(list_empty(&dev_priv->mm.active_list));
+ /* Disabled because root could panic the kernel if this was enabled */
+ /* KASSERT(dev->pin_count == 0); */
+
+ /* can't fail since uninterruptible */
+ (void)i915_gem_evict_inactive(dev_priv);
+}
+
+void
+inteldrm_timeout(void *arg)
+{
+ struct inteldrm_softc *dev_priv = arg;
+
+ if (workq_add_task(dev_priv->workq, 0, i915_gem_retire_work_handler,
+ dev_priv, NULL) == ENOMEM)
+ DRM_ERROR("failed to run retire handler\n");
+}
+
+int
+i8xx_do_reset(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (IS_I85X(dev))
+ return -ENODEV;
+
+ I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
+ POSTING_READ(D_STATE);
+
+ if (IS_I830(dev) || IS_845G(dev)) {
+ I915_WRITE(DEBUG_RESET_I830,
+ DEBUG_RESET_DISPLAY |
+ DEBUG_RESET_RENDER |
+ DEBUG_RESET_FULL);
+ POSTING_READ(DEBUG_RESET_I830);
+ drm_msleep(1, "8res1");
+
+ I915_WRITE(DEBUG_RESET_I830, 0);
+ POSTING_READ(DEBUG_RESET_I830);
+ }
+
+ drm_msleep(1, "8res2");
+
+ I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
+ POSTING_READ(D_STATE);
+
+ return 0;
+}
+
+int
+i965_reset_complete(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u8 gdrst;
+ gdrst = (pci_conf_read(dev_priv->pc, dev_priv->tag, I965_GDRST) >> 24);
+ return (gdrst & GRDOM_RESET_ENABLE) == 0;
+}
+
+int
+i965_do_reset(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ pcireg_t reg;
+ int retries;
+
+ /*
+ * Set the domains we want to reset (GRDOM/bits 2 and 3) as
+ * well as the reset bit (GR/bit 0). Setting the GR bit
+ * triggers the reset; when done, the hardware will clear it.
+ */
+ reg = pci_conf_read(dev_priv->pc, dev_priv->tag, I965_GDRST);
+ reg |= ((GRDOM_RENDER | GRDOM_RESET_ENABLE) << 24);
+ pci_conf_write(dev_priv->pc, dev_priv->tag, I965_GDRST, reg);
+
+ for (retries = 500; retries > 0 ; retries--) {
+ if (i965_reset_complete(dev))
+ break;
+ DELAY(1000);
+ }
+ if (retries == 0) {
+ DRM_ERROR("965 reset timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ /* We can't reset render&media without also resetting display ... */
+ reg = pci_conf_read(dev_priv->pc, dev_priv->tag, I965_GDRST);
+ reg |= ((GRDOM_MEDIA | GRDOM_RESET_ENABLE) << 24);
+ pci_conf_write(dev_priv->pc, dev_priv->tag, I965_GDRST, reg);
+
+ for (retries = 500; retries > 0 ; retries--) {
+ if (i965_reset_complete(dev))
+ break;
+ DELAY(1000);
+ }
+ if (retries == 0) {
+ DRM_ERROR("965 reset 2 timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ return (0);
+}
+
+int
+ironlake_do_reset(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 gdrst;
+ int retries;
+
+ gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+ I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
+ gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
+ for (retries = 500; retries > 0 ; retries--) {
+ if (I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1)
+ break;
+ DELAY(1000);
+ }
+ if (retries == 0) {
+ DRM_ERROR("ironlake reset timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ /* We can't reset render&media without also resetting display ... */
+ gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+ I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
+ gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
+ for (retries = 500; retries > 0 ; retries--) {
+ if (I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1)
+ break;
+ DELAY(1000);
+ }
+ if (retries == 0) {
+ DRM_ERROR("ironlake reset timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ return (0);
+}
+
+int
+gen6_do_reset(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret = 0;
+ int retries;
+
+ /* Hold gt_lock across reset to prevent any register access
+ * with forcewake not set correctly
+ */
+ mtx_enter(&dev_priv->gt_lock);
+
+ /* Reset the chip */
+
+ /* GEN6_GDRST is not in the gt power well, no need to check
+ * for fifo space for the write or forcewake the chip for
+ * the read
+ */
+ I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
+
+ /* Spin waiting for the device to ack the reset request */
+ for (retries = 500; retries > 0 ; retries--) {
+ if ((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0)
+ break;
+ DELAY(1000);
+ }
+ if (retries == 0) {
+ DRM_ERROR("gen6 reset timed out\n");
+ ret = -ETIMEDOUT;
+ }
+
+ /* If reset with a user forcewake, try to restore, otherwise turn it off */
+ if (dev_priv->forcewake_count)
+ dev_priv->gt.force_wake_get(dev_priv);
+ else
+ dev_priv->gt.force_wake_put(dev_priv);
+
+ /* Restore fifo count */
+ dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+
+ mtx_leave(&dev_priv->gt_lock);
+ return ret;
+}
+
+int
+intel_gpu_reset(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret = -ENODEV;
+
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6:
+ ret = gen6_do_reset(dev);
+ break;
+ case 5:
+ ret = ironlake_do_reset(dev);
+ break;
+ case 4:
+ ret = i965_do_reset(dev);
+ break;
+ case 2:
+ ret = i8xx_do_reset(dev);
+ break;
+ }
+
+ /* Also reset the gpu hangman. */
+ if (dev_priv->stop_rings) {
+ DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n");
+ dev_priv->stop_rings = 0;
+ if (ret == -ENODEV) {
+ DRM_ERROR("Reset not implemented, but ignoring "
+ "error for simulated gpu hangs\n");
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * i915_reset - reset chip after a hang
+ * @dev: drm device to reset
+ *
+ * Reset the chip. Useful if a hang is detected. Returns zero on successful
+ * reset or otherwise an error code.
+ *
+ * Procedure is fairly simple:
+ * - reset the chip using the reset reg
+ * - re-init context state
+ * - re-init hardware status page
+ * - re-init ring buffer
+ * - re-init interrupt state
+ * - re-init display
+ */
+int
+i915_reset(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+#ifdef notyet
+ if (!i915_try_reset)
+ return 0;
+#endif
+
+ DRM_LOCK();
+
+ i915_gem_reset(dev);
+
+ ret = -ENODEV;
+ if (time_second - dev_priv->last_gpu_reset < 5)
+ DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
+ else
+ ret = intel_gpu_reset(dev);
+
+ dev_priv->last_gpu_reset = time_second;
+ if (ret) {
+ DRM_ERROR("Failed to reset chip.\n");
+ DRM_UNLOCK();
+ return ret;
+ }
+
+ /* Ok, now get things going again... */
+
+ /*
+ * Everything depends on having the GTT running, so we need to start
+ * there. Fortunately we don't need to do this unless we reset the
+ * chip at a PCI level.
+ *
+ * Next we need to restore the context, but we don't use those
+ * yet either...
+ *
+ * Ring buffer needs to be re-initialized in the KMS case, or if X
+ * was running at the time of the reset (i.e. we weren't VT
+ * switched away).
+ */
+ if (drm_core_check_feature(dev, DRIVER_MODESET) ||
+ !dev_priv->mm.suspended) {
+ struct intel_ring_buffer *ring;
+ int i;
+
+ dev_priv->mm.suspended = 0;
+
+ i915_gem_init_swizzling(dev);
+
+ for_each_ring(ring, dev_priv, i)
+ ring->init(ring);
+
+#ifdef notyet
+ i915_gem_context_init(dev);
+ i915_gem_init_ppgtt(dev);
+#endif
+
+ /*
+ * It would make sense to re-init all the other hw state, at
+ * least the rps/rc6/emon init done within modeset_init_hw. For
+ * some unknown reason, this blows up my ilk, so don't.
+ */
+
+ DRM_UNLOCK();
+
+ drm_irq_uninstall(dev);
+ drm_irq_install(dev);
+ } else {
+ DRM_UNLOCK();
+ }
+
+ return 0;
+}
+
+/*
+ * Debug code from here.
+ */
+#ifdef WATCH_INACTIVE
+void
+inteldrm_verify_inactive(struct inteldrm_softc *dev_priv, char *file,
+ int line)
+{
+ struct drm_obj *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ TAILQ_FOREACH(obj_priv, &dev_priv->mm.inactive_list, list) {
+ obj = &obj_priv->base;
+ if (obj_priv->pin_count || obj_priv->active ||
+ obj->write_domain & I915_GEM_GPU_DOMAINS)
+ DRM_ERROR("inactive %p (p $d a $d w $x) %s:%d\n",
+ obj, obj_priv->pin_count, obj_priv->active,
+ obj->write_domain, file, line);
+ }
+}
+#endif /* WATCH_INACTIVE */
+
+#if (INTELDRM_DEBUG > 1)
+
+int i915_gem_object_list_info(int, uint);
+
+static const char *get_pin_flag(struct drm_i915_gem_object *obj)
+{
+ if (obj->user_pin_count > 0)
+ return "P";
+ if (obj->pin_count > 0)
+ return "p";
+ else
+ return " ";
+}
+
+static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
+{
+ switch (obj->tiling_mode) {
+ default:
+ case I915_TILING_NONE: return " ";
+ case I915_TILING_X: return "X";
+ case I915_TILING_Y: return "Y";
+ }
+}
+
+static const char *
+cache_level_str(int type)
+{
+ switch (type) {
+ case I915_CACHE_NONE: return " uncached";
+ case I915_CACHE_LLC: return " snooped (LLC)";
+ case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
+ default: return "";
+ }
+}
+
+static void
+describe_obj(struct drm_i915_gem_object *obj)
+{
+ printf("%p: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s",
+ &obj->base,
+ get_pin_flag(obj),
+ get_tiling_flag(obj),
+ obj->base.size / 1024,
+ obj->base.read_domains,
+ obj->base.write_domain,
+ obj->last_read_seqno,
+ obj->last_write_seqno,
+ obj->last_fenced_seqno,
+ cache_level_str(obj->cache_level),
+ obj->dirty ? " dirty" : "",
+ obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
+ if (obj->base.name)
+ printf(" (name: %d)", obj->base.name);
+ if (obj->pin_count)
+ printf(" (pinned x %d)", obj->pin_count);
+ if (obj->fence_reg != I915_FENCE_REG_NONE)
+ printf(" (fence: %d)", obj->fence_reg);
+#if 0
+ if (obj->gtt_space != NULL)
+ printf(" (gtt offset: %08x, size: %08x)",
+ obj->gtt_offset, (unsigned int)obj->gtt_space->size);
+ if (obj->pin_mappable || obj->fault_mappable) {
+ char s[3], *t = s;
+ if (obj->pin_mappable)
+ *t++ = 'p';
+ if (obj->fault_mappable)
+ *t++ = 'f';
+ *t = '\0';
+ printf(" (%s mappable)", s);
+ }
+#endif
+ if (obj->ring != NULL)
+ printf(" (%s)", obj->ring->name);
+}
+
+#define ACTIVE_LIST 0
+#define INACTIVE_LIST 1
+
+int
+i915_gem_object_list_info(int kdev, uint list)
+{
+ struct drm_device *dev = drm_get_device_from_kdev(kdev);
+ struct list_head *head;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ size_t total_obj_size, total_gtt_size;
+ int count;
+#if 0
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+#endif
+
+ switch (list) {
+ case ACTIVE_LIST:
+ printf("Active:\n");
+ head = &dev_priv->mm.active_list;
+ break;
+ case INACTIVE_LIST:
+ printf("Inactive:\n");
+ head = &dev_priv->mm.inactive_list;
+ break;
+ default:
+// mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ total_obj_size = total_gtt_size = count = 0;
+ list_for_each_entry(obj, head, mm_list) {
+ printf(" ");
+ describe_obj(obj);
+ printf("\n");
+ total_obj_size += obj->base.size;
+ count++;
+ }
+// mutex_unlock(&dev->struct_mutex);
+
+ printf("Total %d objects, %zu bytes, %zu GTT size\n",
+ count, total_obj_size);
+ return 0;
+}
+
+
+static void i915_ring_seqno_info(struct intel_ring_buffer *ring)
+{
+ if (ring->get_seqno) {
+ printf("Current sequence (%s): %d\n",
+ ring->name, ring->get_seqno(ring, false));
+ }
+}
+
+void
+i915_gem_seqno_info(int kdev)
+{
+ struct drm_device *dev = drm_get_device_from_kdev(kdev);
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ int i;
+
+ for_each_ring(ring, dev_priv, i)
+ i915_ring_seqno_info(ring);
+}
+
+void
+i915_interrupt_info(int kdev)
+{
+ struct drm_device *dev = drm_get_device_from_kdev(kdev);
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ int i, pipe;
+
+ if (IS_VALLEYVIEW(dev)) {
+ printf("Display IER:\t%08x\n",
+ I915_READ(VLV_IER));
+ printf("Display IIR:\t%08x\n",
+ I915_READ(VLV_IIR));
+ printf("Display IIR_RW:\t%08x\n",
+ I915_READ(VLV_IIR_RW));
+ printf("Display IMR:\t%08x\n",
+ I915_READ(VLV_IMR));
+ for_each_pipe(pipe)
+ printf("Pipe %c stat:\t%08x\n",
+ pipe_name(pipe),
+ I915_READ(PIPESTAT(pipe)));
+
+ printf("Master IER:\t%08x\n",
+ I915_READ(VLV_MASTER_IER));
+
+ printf("Render IER:\t%08x\n",
+ I915_READ(GTIER));
+ printf("Render IIR:\t%08x\n",
+ I915_READ(GTIIR));
+ printf("Render IMR:\t%08x\n",
+ I915_READ(GTIMR));
+
+ printf("PM IER:\t\t%08x\n",
+ I915_READ(GEN6_PMIER));
+ printf("PM IIR:\t\t%08x\n",
+ I915_READ(GEN6_PMIIR));
+ printf("PM IMR:\t\t%08x\n",
+ I915_READ(GEN6_PMIMR));
+
+ printf("Port hotplug:\t%08x\n",
+ I915_READ(PORT_HOTPLUG_EN));
+ printf("DPFLIPSTAT:\t%08x\n",
+ I915_READ(VLV_DPFLIPSTAT));
+ printf("DPINVGTT:\t%08x\n",
+ I915_READ(DPINVGTT));
+
+ } else if (!HAS_PCH_SPLIT(dev)) {
+ printf("Interrupt enable: %08x\n",
+ I915_READ(IER));
+ printf("Interrupt identity: %08x\n",
+ I915_READ(IIR));
+ printf("Interrupt mask: %08x\n",
+ I915_READ(IMR));
+ for_each_pipe(pipe)
+ printf("Pipe %c stat: %08x\n",
+ pipe_name(pipe),
+ I915_READ(PIPESTAT(pipe)));
+ } else {
+ printf("North Display Interrupt enable: %08x\n",
+ I915_READ(DEIER));
+ printf("North Display Interrupt identity: %08x\n",
+ I915_READ(DEIIR));
+ printf("North Display Interrupt mask: %08x\n",
+ I915_READ(DEIMR));
+ printf("South Display Interrupt enable: %08x\n",
+ I915_READ(SDEIER));
+ printf("South Display Interrupt identity: %08x\n",
+ I915_READ(SDEIIR));
+ printf("South Display Interrupt mask: %08x\n",
+ I915_READ(SDEIMR));
+ printf("Graphics Interrupt enable: %08x\n",
+ I915_READ(GTIER));
+ printf("Graphics Interrupt identity: %08x\n",
+ I915_READ(GTIIR));
+ printf("Graphics Interrupt mask: %08x\n",
+ I915_READ(GTIMR));
+ }
+#if 0
+ printf("Interrupts received: %d\n",
+ atomic_read(&dev_priv->irq_received));
+#endif
+ for_each_ring(ring, dev_priv, i) {
+ if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ printf(
+ "Graphics Interrupt mask (%s): %08x\n",
+ ring->name, I915_READ_IMR(ring));
+ }
+ i915_ring_seqno_info(ring);
+ }
+}
+
+void
+i915_gem_fence_regs_info(int kdev)
+{
+ struct drm_device *dev = drm_get_device_from_kdev(kdev);
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ int i;
+
+ printf("Reserved fences = %d\n", dev_priv->fence_reg_start);
+ printf("Total fences = %d\n", dev_priv->num_fence_regs);
+ for (i = 0; i < dev_priv->num_fence_regs; i++) {
+ struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
+
+ printf("Fence %d, pin count = %d, object = ",
+ i, dev_priv->fence_regs[i].pin_count);
+ if (obj == NULL)
+ printf("unused");
+ else
+ describe_obj(obj);
+ printf("\n");
+ }
+}
+
+void
+i915_hws_info(int kdev)
+{
+ struct drm_device *dev = drm_get_device_from_kdev(kdev);
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
+ int i;
+ volatile u32 *hws;
+
+ hws = (volatile u32 *)ring->status_page.page_addr;
+ if (hws == NULL)
+ return;
+
+ for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
+ printf("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i * 4,
+ hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
+ }
+}
+
+static void
+i915_dump_pages(bus_space_tag_t bst, bus_space_handle_t bsh,
+ bus_size_t size)
+{
+ bus_addr_t offset = 0;
+ int i = 0;
+
+ /*
+ * this is a bit odd so i don't have to play with the intel
+ * tools too much.
+ */
+ for (offset = 0; offset < size; offset += 4, i += 4) {
+ if (i == PAGE_SIZE)
+ i = 0;
+ printf("%08x : %08x\n", i, bus_space_read_4(bst, bsh,
+ offset));
+ }
+}
+
+void
+i915_batchbuffer_info(int kdev)
+{
+ struct drm_device *dev = drm_get_device_from_kdev(kdev);
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_obj *obj;
+ struct drm_i915_gem_object *obj_priv;
+ bus_space_handle_t bsh;
+ int ret;
+
+ list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
+ obj = &obj_priv->base;
+ if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
+ if ((ret = agp_map_subregion(dev_priv->agph,
+ obj_priv->gtt_offset, obj->size, &bsh)) != 0) {
+ DRM_ERROR("Failed to map pages: %d\n", ret);
+ return;
+ }
+ printf("--- gtt_offset = 0x%08x\n",
+ obj_priv->gtt_offset);
+ i915_dump_pages(dev_priv->bst, bsh, obj->size);
+ agp_unmap_subregion(dev_priv->agph,
+ dev_priv->rings[RCS].bsh, obj->size);
+ }
+ }
+}
+
+void
+i915_ringbuffer_data(int kdev)
+{
+ struct drm_device *dev = drm_get_device_from_kdev(kdev);
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ bus_size_t off;
+
+ if (!dev_priv->rings[RCS].obj) {
+ printf("No ringbuffer setup\n");
+ return;
+ }
+
+ for (off = 0; off < dev_priv->rings[RCS].size; off += 4)
+ printf("%08x : %08x\n", off, bus_space_read_4(dev_priv->bst,
+ dev_priv->rings[RCS].bsh, off));
+}
+
+void
+i915_ringbuffer_info(int kdev)
+{
+ struct drm_device *dev = drm_get_device_from_kdev(kdev);
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u_int32_t head, tail;
+
+ head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+ tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
+
+ printf("RingHead : %08x\n", head);
+ printf("RingTail : %08x\n", tail);
+ printf("RingMask : %08x\n", dev_priv->rings[RCS].size - 1);
+ printf("RingSize : %08lx\n", dev_priv->rings[RCS].size);
+ printf("Acthd : %08x\n", I915_READ(INTEL_INFO(dev)->gen >= 4 ?
+ ACTHD_I965 : ACTHD));
+}
+
+#endif
+
+static int
+intel_pch_match(struct pci_attach_args *pa)
+{
+ if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_INTEL &&
+ PCI_CLASS(pa->pa_class) == PCI_CLASS_BRIDGE &&
+ PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_BRIDGE_ISA)
+ return (1);
+ return (0);
+}
+
+void
+intel_detect_pch(struct inteldrm_softc *dev_priv)
+{
+ struct pci_attach_args pa;
+ unsigned short id;
+ if (pci_find_device(&pa, intel_pch_match) == 0) {
+ DRM_DEBUG_KMS("No Intel PCI-ISA bridge found\n");
+ }
+ id = PCI_PRODUCT(pa.pa_id) & INTEL_PCH_DEVICE_ID_MASK;
+ dev_priv->pch_id = id;
+
+ switch (id) {
+ case INTEL_PCH_IBX_DEVICE_ID_TYPE:
+ dev_priv->pch_type = PCH_IBX;
+ dev_priv->num_pch_pll = 2;
+ DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
+ break;
+ case INTEL_PCH_CPT_DEVICE_ID_TYPE:
+ dev_priv->pch_type = PCH_CPT;
+ dev_priv->num_pch_pll = 2;
+ DRM_DEBUG_KMS("Found CougarPoint PCH\n");
+ break;
+ case INTEL_PCH_PPT_DEVICE_ID_TYPE:
+ /* PantherPoint is CPT compatible */
+ dev_priv->pch_type = PCH_CPT;
+ dev_priv->num_pch_pll = 2;
+ DRM_DEBUG_KMS("Found PatherPoint PCH\n");
+ break;
+ case INTEL_PCH_LPT_DEVICE_ID_TYPE:
+ dev_priv->pch_type = PCH_LPT;
+ dev_priv->num_pch_pll = 0;
+ DRM_DEBUG_KMS("Found LynxPoint PCH\n");
+ break;
+ case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
+ dev_priv->pch_type = PCH_LPT;
+ dev_priv->num_pch_pll = 0;
+ DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
+ break;
+ default:
+ DRM_DEBUG_KMS("No PCH detected\n");
+ }
+}
+
diff --git a/sys/dev/pci/drm/i915/i915_drv.h b/sys/dev/pci/drm/i915/i915_drv.h
new file mode 100644
index 00000000000..e0aa6afd432
--- /dev/null
+++ b/sys/dev/pci/drm/i915/i915_drv.h
@@ -0,0 +1,1567 @@
+/* $OpenBSD: i915_drv.h,v 1.1 2013/03/18 12:36:52 jsg Exp $ */
+/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
+ */
+/*
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _I915_DRV_H_
+#define _I915_DRV_H_
+
+#include "i915_reg.h"
+#include <dev/pci/drm/i915_drm.h>
+#include "intel_bios.h"
+#include "intel_ringbuffer.h"
+
+#include <sys/workq.h>
+#include <dev/wscons/wsconsio.h>
+#include <dev/wscons/wsdisplayvar.h>
+#include <dev/rasops/rasops.h>
+
+/* General customization:
+ */
+
+#define DRIVER_AUTHOR "Tungsten Graphics, Inc."
+
+#define DRIVER_NAME "i915"
+#define DRIVER_DESC "Intel Graphics"
+#define DRIVER_DATE "20080730"
+
+enum pipe {
+ PIPE_A = 0,
+ PIPE_B,
+ PIPE_C,
+ I915_MAX_PIPES
+};
+#define pipe_name(p) ((p) + 'A')
+
+enum transcoder {
+ TRANSCODER_A = 0,
+ TRANSCODER_B,
+ TRANSCODER_C,
+ TRANSCODER_EDP = 0xF,
+};
+#define transcoder_name(t) ((t) + 'A')
+
+enum plane {
+ PLANE_A = 0,
+ PLANE_B,
+ PLANE_C,
+};
+#define plane_name(p) ((p) + 'A')
+
+enum port {
+ PORT_A = 0,
+ PORT_B,
+ PORT_C,
+ PORT_D,
+ PORT_E,
+ I915_MAX_PORTS
+};
+#define port_name(p) ((p) + 'A')
+
+#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
+
+#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
+ list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
+ if ((intel_encoder)->base.crtc == (__crtc))
+
+struct intel_pch_pll {
+ int refcount; /* count of number of CRTCs sharing this PLL */
+ int active; /* count of number of active CRTCs (i.e. DPMS on) */
+ bool on; /* is the PLL actually active? Disabled during modeset */
+ int pll_reg;
+ int fp0_reg;
+ int fp1_reg;
+};
+#define I915_NUM_PLLS 2
+
+struct intel_ddi_plls {
+ int spll_refcount;
+ int wrpll1_refcount;
+ int wrpll2_refcount;
+};
+
+/* Interface history:
+ *
+ * 1.1: Original.
+ * 1.2: Add Power Management
+ * 1.3: Add vblank support
+ * 1.4: Fix cmdbuffer path, add heap destroy
+ * 1.5: Add vblank pipe configuration
+ * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
+ * - Support vertical blank on secondary display pipe
+ */
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 6
+#define DRIVER_PATCHLEVEL 0
+
+#define I915_GEM_PHYS_CURSOR_0 1
+#define I915_GEM_PHYS_CURSOR_1 2
+#define I915_GEM_PHYS_OVERLAY_REGS 3
+#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
+
+struct drm_i915_gem_phys_object {
+ int id;
+ struct drm_dmamem *handle;
+ struct drm_i915_gem_object *cur_obj;
+};
+
+struct inteldrm_softc;
+
+struct drm_i915_display_funcs {
+ bool (*fbc_enabled)(struct drm_device *dev);
+ void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
+ void (*disable_fbc)(struct drm_device *dev);
+ int (*get_display_clock_speed)(struct drm_device *dev);
+ int (*get_fifo_size)(struct drm_device *dev, int plane);
+ void (*update_wm)(struct drm_device *dev);
+ void (*update_sprite_wm)(struct drm_device *dev, int pipe,
+ uint32_t sprite_width, int pixel_size);
+ void (*update_linetime_wm)(struct drm_device *dev, int pipe,
+ struct drm_display_mode *mode);
+ void (*modeset_global_resources)(struct drm_device *dev);
+ int (*crtc_mode_set)(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb);
+ void (*crtc_enable)(struct drm_crtc *crtc);
+ void (*crtc_disable)(struct drm_crtc *crtc);
+ void (*off)(struct drm_crtc *crtc);
+ void (*write_eld)(struct drm_connector *connector,
+ struct drm_crtc *crtc);
+ void (*fdi_link_train)(struct drm_crtc *crtc);
+ void (*init_clock_gating)(struct drm_device *dev);
+ int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj);
+ int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int x, int y);
+ /* clock updates for mode set */
+ /* cursor updates */
+ /* render clock increase/decrease */
+ /* display clock increase/decrease */
+ /* pll clock increase/decrease */
+};
+
+struct drm_i915_gt_funcs {
+ void (*force_wake_get)(struct inteldrm_softc *dev_priv);
+ void (*force_wake_put)(struct inteldrm_softc *dev_priv);
+};
+
+struct intel_device_info {
+ u8 gen;
+ u8 is_mobile:1;
+ u8 is_i85x:1;
+ u8 is_i915g:1;
+ u8 is_i945gm:1;
+ u8 is_g33:1;
+ u8 need_gfx_hws:1;
+ u8 is_g4x:1;
+ u8 is_pineview:1;
+ u8 is_broadwater:1;
+ u8 is_crestline:1;
+ u8 is_ivybridge:1;
+ u8 is_valleyview:1;
+ u8 has_force_wake:1;
+ u8 is_haswell:1;
+ u8 has_fbc:1;
+ u8 has_pipe_cxsr:1;
+ u8 has_hotplug:1;
+ u8 cursor_needs_physical:1;
+ u8 has_overlay:1;
+ u8 overlay_needs_physical:1;
+ u8 supports_tv:1;
+ u8 has_bsd_ring:1;
+ u8 has_blt_ring:1;
+ u8 has_llc:1;
+};
+
+enum no_fbc_reason {
+ FBC_NO_OUTPUT, /* no outputs enabled to compress */
+ FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
+ FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
+ FBC_MODE_TOO_LARGE, /* mode too large for compression */
+ FBC_BAD_PLANE, /* fbc not supported on plane */
+ FBC_NOT_TILED, /* buffer not tiled */
+ FBC_MULTIPLE_PIPES, /* more than one pipe active */
+ FBC_MODULE_PARAM,
+};
+
+struct opregion_header;
+struct opregion_acpi;
+struct opregion_swsci;
+struct opregion_asle;
+
+struct intel_opregion {
+ struct opregion_header *header;
+ struct opregion_acpi *acpi;
+ struct opregion_swsci *swsci;
+ struct opregion_asle *asle;
+ void *vbt;
+ u32 *lid_state;
+};
+#define OPREGION_SIZE (8*1024)
+
+
+#define I915_FENCE_REG_NONE -1
+#define I915_MAX_NUM_FENCES 16
+
+struct drm_i915_fence_reg {
+ struct list_head lru_list;
+ struct drm_i915_gem_object *obj;
+ int pin_count;
+};
+
+struct sdvo_device_mapping {
+ u8 initialized;
+ u8 dvo_port;
+ u8 slave_addr;
+ u8 dvo_wiring;
+ u8 i2c_pin;
+ u8 ddc_pin;
+};
+
+struct gmbus_port {
+ struct inteldrm_softc *dev_priv;
+ int port;
+};
+
+enum intel_pch {
+ PCH_NONE = 0, /* No PCH present */
+ PCH_IBX, /* Ibexpeak PCH */
+ PCH_CPT, /* Cougarpoint PCH */
+ PCH_LPT, /* Lynxpoint PCH */
+};
+
+enum intel_sbi_destination {
+ SBI_ICLK,
+ SBI_MPHY,
+};
+
+#define QUIRK_PIPEA_FORCE (1<<0)
+#define QUIRK_LVDS_SSC_DISABLE (1<<1)
+#define QUIRK_INVERT_BRIGHTNESS (1<<2)
+
+struct intel_fbdev;
+
+struct intel_gmbus {
+ struct i2c_controller controller;
+ struct gmbus_port gp;
+};
+
+struct i915_suspend_saved_registers {
+ u8 saveLBB;
+ u32 saveDSPACNTR;
+ u32 saveDSPBCNTR;
+ u32 saveDSPARB;
+ u32 savePIPEACONF;
+ u32 savePIPEBCONF;
+ u32 savePIPEASRC;
+ u32 savePIPEBSRC;
+ u32 saveFPA0;
+ u32 saveFPA1;
+ u32 saveDPLL_A;
+ u32 saveDPLL_A_MD;
+ u32 saveHTOTAL_A;
+ u32 saveHBLANK_A;
+ u32 saveHSYNC_A;
+ u32 saveVTOTAL_A;
+ u32 saveVBLANK_A;
+ u32 saveVSYNC_A;
+ u32 saveBCLRPAT_A;
+ u32 saveTRANSACONF;
+ u32 saveTRANS_HTOTAL_A;
+ u32 saveTRANS_HBLANK_A;
+ u32 saveTRANS_HSYNC_A;
+ u32 saveTRANS_VTOTAL_A;
+ u32 saveTRANS_VBLANK_A;
+ u32 saveTRANS_VSYNC_A;
+ u32 savePIPEASTAT;
+ u32 saveDSPASTRIDE;
+ u32 saveDSPASIZE;
+ u32 saveDSPAPOS;
+ u32 saveDSPAADDR;
+ u32 saveDSPASURF;
+ u32 saveDSPATILEOFF;
+ u32 savePFIT_PGM_RATIOS;
+ u32 saveBLC_HIST_CTL;
+ u32 saveBLC_PWM_CTL;
+ u32 saveBLC_PWM_CTL2;
+ u32 saveBLC_CPU_PWM_CTL;
+ u32 saveBLC_CPU_PWM_CTL2;
+ u32 saveFPB0;
+ u32 saveFPB1;
+ u32 saveDPLL_B;
+ u32 saveDPLL_B_MD;
+ u32 saveHTOTAL_B;
+ u32 saveHBLANK_B;
+ u32 saveHSYNC_B;
+ u32 saveVTOTAL_B;
+ u32 saveVBLANK_B;
+ u32 saveVSYNC_B;
+ u32 saveBCLRPAT_B;
+ u32 saveTRANSBCONF;
+ u32 saveTRANS_HTOTAL_B;
+ u32 saveTRANS_HBLANK_B;
+ u32 saveTRANS_HSYNC_B;
+ u32 saveTRANS_VTOTAL_B;
+ u32 saveTRANS_VBLANK_B;
+ u32 saveTRANS_VSYNC_B;
+ u32 savePIPEBSTAT;
+ u32 saveDSPBSTRIDE;
+ u32 saveDSPBSIZE;
+ u32 saveDSPBPOS;
+ u32 saveDSPBADDR;
+ u32 saveDSPBSURF;
+ u32 saveDSPBTILEOFF;
+ u32 saveVGA0;
+ u32 saveVGA1;
+ u32 saveVGA_PD;
+ u32 saveVGACNTRL;
+ u32 saveADPA;
+ u32 saveLVDS;
+ u32 savePP_ON_DELAYS;
+ u32 savePP_OFF_DELAYS;
+ u32 saveDVOA;
+ u32 saveDVOB;
+ u32 saveDVOC;
+ u32 savePP_ON;
+ u32 savePP_OFF;
+ u32 savePP_CONTROL;
+ u32 savePP_DIVISOR;
+ u32 savePFIT_CONTROL;
+ u32 save_palette_a[256];
+ u32 save_palette_b[256];
+ u32 saveDPFC_CB_BASE;
+ u32 saveFBC_CFB_BASE;
+ u32 saveFBC_LL_BASE;
+ u32 saveFBC_CONTROL;
+ u32 saveFBC_CONTROL2;
+ u32 saveIER;
+ u32 saveIIR;
+ u32 saveIMR;
+ u32 saveDEIER;
+ u32 saveDEIMR;
+ u32 saveGTIER;
+ u32 saveGTIMR;
+ u32 saveFDI_RXA_IMR;
+ u32 saveFDI_RXB_IMR;
+ u32 saveCACHE_MODE_0;
+ u32 saveMI_ARB_STATE;
+ u32 saveSWF0[16];
+ u32 saveSWF1[16];
+ u32 saveSWF2[3];
+ u8 saveMSR;
+ u8 saveSR[8];
+ u8 saveGR[25];
+ u8 saveAR_INDEX;
+ u8 saveAR[21];
+ u8 saveDACMASK;
+ u8 saveCR[37];
+ uint64_t saveFENCE[I915_MAX_NUM_FENCES];
+ u32 saveCURACNTR;
+ u32 saveCURAPOS;
+ u32 saveCURABASE;
+ u32 saveCURBCNTR;
+ u32 saveCURBPOS;
+ u32 saveCURBBASE;
+ u32 saveCURSIZE;
+ u32 saveDP_B;
+ u32 saveDP_C;
+ u32 saveDP_D;
+ u32 savePIPEA_GMCH_DATA_M;
+ u32 savePIPEB_GMCH_DATA_M;
+ u32 savePIPEA_GMCH_DATA_N;
+ u32 savePIPEB_GMCH_DATA_N;
+ u32 savePIPEA_DP_LINK_M;
+ u32 savePIPEB_DP_LINK_M;
+ u32 savePIPEA_DP_LINK_N;
+ u32 savePIPEB_DP_LINK_N;
+ u32 saveFDI_RXA_CTL;
+ u32 saveFDI_TXA_CTL;
+ u32 saveFDI_RXB_CTL;
+ u32 saveFDI_TXB_CTL;
+ u32 savePFA_CTL_1;
+ u32 savePFB_CTL_1;
+ u32 savePFA_WIN_SZ;
+ u32 savePFB_WIN_SZ;
+ u32 savePFA_WIN_POS;
+ u32 savePFB_WIN_POS;
+ u32 savePCH_DREF_CONTROL;
+ u32 saveDISP_ARB_CTL;
+ u32 savePIPEA_DATA_M1;
+ u32 savePIPEA_DATA_N1;
+ u32 savePIPEA_LINK_M1;
+ u32 savePIPEA_LINK_N1;
+ u32 savePIPEB_DATA_M1;
+ u32 savePIPEB_DATA_N1;
+ u32 savePIPEB_LINK_M1;
+ u32 savePIPEB_LINK_N1;
+ u32 saveMCHBAR_RENDER_STANDBY;
+ u32 savePCH_PORT_HOTPLUG;
+};
+
+struct intel_gen6_power_mgmt {
+ struct workq_task task;
+ u32 pm_iir;
+ /* lock - irqsave spinlock that protectects the work_struct and
+ * pm_iir. */
+ struct mutex lock;
+
+ /* The below variables an all the rps hw state are protected by
+ * dev->struct mutext. */
+ u8 cur_delay;
+ u8 min_delay;
+ u8 max_delay;
+
+ struct workq_task delayed_resume_task;
+ struct timeout delayed_resume_to;
+
+ /*
+ * Protects RPS/RC6 register access and PCU communication.
+ * Must be taken after struct_mutex if nested.
+ */
+ struct rwlock hw_lock;
+};
+
+struct intel_ilk_power_mgmt {
+ u8 cur_delay;
+ u8 min_delay;
+ u8 max_delay;
+ u8 fmax;
+ u8 fstart;
+
+ u64 last_count1;
+ unsigned long last_time1;
+ unsigned long chipset_power;
+ u64 last_count2;
+ struct timespec last_time2;
+ unsigned long gfx_power;
+ u8 corr;
+
+ int c_m;
+ int r_t;
+
+ struct drm_i915_gem_object *pwrctx;
+ struct drm_i915_gem_object *renderctx;
+};
+
+struct intel_l3_parity {
+ u32 *remap_info;
+ struct workq_task error_task;
+};
+
+/*
+ * lock ordering:
+ * exec lock,
+ * request lock
+ * list lock.
+ *
+ * XXX fence lock ,object lock
+ */
+struct inteldrm_softc {
+ struct device dev;
+ struct device *drmdev;
+ bus_dma_tag_t agpdmat; /* tag from intagp for GEM */
+ bus_dma_tag_t dmat;
+ bus_space_tag_t bst;
+ struct agp_map *agph;
+ bus_space_handle_t opregion_ioh;
+
+ u_long flags;
+
+ struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
+
+ pci_chipset_tag_t pc;
+ pcitag_t tag;
+ pci_intr_handle_t ih;
+ void *irqh;
+
+ struct vga_pci_bar *regs;
+
+ int nscreens;
+ void (*switchcb)(void *, int, int);
+ void *switchcbarg;
+ struct workq_task switchwqt;
+ struct rasops_info ro;
+
+ int noaccel;
+ struct wsdisplay_emulops noaccel_ops;
+
+ uint32_t gpio_mmio_base;
+
+ struct drm_i915_gt_funcs gt;
+ /** gt_fifo_count and the subsequent register write are synchronized
+ * with dev->struct_mutex. */
+ unsigned gt_fifo_count;
+ /** forcewake_count is protected by gt_lock */
+ unsigned forcewake_count;
+ /** gt_lock is also taken in irq contexts. */
+ struct mutex gt_lock;
+
+ drm_i915_sarea_t *sarea_priv;
+ struct intel_ring_buffer rings[I915_NUM_RINGS];
+ uint32_t next_seqno;
+
+ struct drm_dmamem *status_page_dmah;
+
+ union flush {
+ struct {
+ bus_space_tag_t bst;
+ bus_space_handle_t bsh;
+ } i9xx;
+ struct {
+ bus_dma_segment_t seg;
+ caddr_t kva;
+ } i8xx;
+ } ifp;
+ struct workq *workq;
+ struct vm_page *pgs;
+ size_t max_gem_obj_size; /* XXX */
+
+ /* Protects user_irq_refcount and irq_mask reg */
+ struct mutex irq_lock;
+ /* Refcount for user irq, only enabled when needed */
+ int user_irq_refcount;
+ u_int32_t irq_mask;
+
+ /* DPIO indirect register protection */
+ struct mutex dpio_lock;
+ /* Cached value of IMR to avoid reads in updating the bitfield */
+ u_int32_t pipestat[2];
+ /* these two ironlake only, we should union this with pipestat XXX */
+ u_int32_t gt_irq_mask;
+ u_int32_t pch_irq_mask;
+
+ u_int32_t hotplug_supported_mask;
+ struct workq_task hotplug_task;
+ bool enable_hotplug_processing;
+
+ int num_pipe;
+ int num_pch_pll;
+
+ struct intel_opregion opregion;
+
+ int crt_ddc_pin;
+
+ /* overlay */
+ struct intel_overlay *overlay;
+ bool sprite_scaling_enabled;
+
+ /* LVDS info */
+ int backlight_level; /* restore backlight to this value */
+ bool backlight_enabled;
+ struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
+ struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
+
+ /* Feature bits from the VBIOS */
+ unsigned int int_tv_support:1;
+ unsigned int lvds_dither:1;
+ unsigned int lvds_vbt:1;
+ unsigned int int_crt_support:1;
+ unsigned int lvds_use_ssc:1;
+ unsigned int display_clock_mode:1;
+ int lvds_ssc_freq;
+ unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
+ unsigned int lvds_val; /* used for checking LVDS channel mode */
+ struct {
+ int rate;
+ int lanes;
+ int preemphasis;
+ int vswing;
+
+ bool initialized;
+ bool support;
+ int bpp;
+ struct edp_power_seq pps;
+ } edp;
+ bool no_aux_handshake;
+
+ struct intel_pch_pll pch_plls[I915_NUM_PLLS];
+ struct intel_ddi_plls ddi_plls;
+
+ /* Reclocking support */
+ bool render_reclock_avail;
+ bool lvds_downclock_avail;
+ int lvds_downclock;
+ bool busy;
+ int child_dev_num;
+ struct child_device_config *child_dev;
+
+ struct drm_i915_fence_reg fence_regs[16]; /* 965 */
+ int fence_reg_start; /* 4 by default */
+ int num_fence_regs; /* 8 pre-965, 16 post */
+
+#define INTELDRM_QUIET 0x01 /* suspend close, get off the hardware */
+#define INTELDRM_WEDGED 0x02 /* chipset hung pending reset */
+#define INTELDRM_SUSPENDED 0x04 /* in vt switch, no commands */
+ int sc_flags; /* quiet, suspended, hung */
+ /* number of ioctls + faults in flight */
+ int entries;
+
+ struct workq_task error_task;
+
+ enum intel_pch pch_type;
+ unsigned short pch_id;
+
+ struct drm_i915_display_funcs display;
+
+ struct timeout idle_timeout;
+
+ unsigned long quirks;
+
+ /* Register state */
+ bool modeset_on_lid;
+
+ struct {
+ /** List of all objects in gtt_space. Used to restore gtt
+ * mappings on resume */
+ struct list_head bound_list;
+
+ /**
+ * List of objects currently involved in rendering from the
+ * ringbuffer.
+ *
+ * Includes buffers having the contents of their GPU caches
+ * flushed, not necessarily primitives. last_rendering_seqno
+ * represents when the rendering involved will be completed.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head active_list;
+
+ /**
+ * LRU list of objects which are not in the ringbuffer and
+ * are ready to unbind, but are still in the GTT.
+ *
+ * last_rendering_seqno is 0 while an object is in this list
+ *
+ * A reference is not held on the buffer while on this list,
+ * as merely being GTT-bound shouldn't prevent its being
+ * freed, and we'll pull it off the list in the free path.
+ */
+ struct list_head inactive_list;
+
+ /* Fence LRU */
+ struct list_head fence_list;
+
+ /**
+ * We leave the user IRQ off as much as possible,
+ * but this means that requests will finish and never
+ * be retired once the system goes idle. Set a timer to
+ * fire periodically while the ring is running. When it
+ * fires, go retire requests in a workq.
+ */
+ struct timeout retire_timer;
+
+ /**
+ * Are we in a non-interruptible section of code like
+ * modesetting?
+ */
+ bool interruptible;
+
+ /**
+ * Flag if the X Server, and thus DRM, is not currently in
+ * control of the device.
+ *
+ * This is set between LeaveVT and EnterVT. It needs to be
+ * replaced with a semaphore. It also needs to be
+ * transitioned away from for kernel modesetting.
+ */
+ int suspended;
+
+ /**
+ * Flag if the hardware appears to be wedged.
+ *
+ * This is set when attempts to idle the device timeout.
+ * It prevents command submission from occuring and makes
+ * every pending request fail
+ */
+ int wedged;
+
+ /** Bit 6 swizzling required for X tiling */
+ uint32_t bit_6_swizzle_x;
+ /** Bit 6 swizzling required for Y tiling */
+ uint32_t bit_6_swizzle_y;
+
+ /* storage for physical objects */
+ struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
+ } mm;
+
+ /* for hangcheck */
+#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
+ struct timeout hangcheck_timer;
+ int hangcheck_count;
+ uint32_t last_acthd[I915_NUM_RINGS];
+ uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
+
+ const struct intel_device_info *info;
+
+ int relative_constants_mode;
+
+ struct sdvo_device_mapping sdvo_mappings[2];
+ /* indicate whether the LVDS_BORDER should be enabled or not */
+ unsigned int lvds_border_bits;
+ /* Panel fitter placement and size for Ironlake+ */
+ u32 pch_pf_pos, pch_pf_size;
+
+ struct drm_crtc *plane_to_crtc_mapping[3];
+ struct drm_crtc *pipe_to_crtc_mapping[3];
+ int pending_flip_queue;
+
+ bool flip_pending_is_done;
+
+ struct drm_connector *int_lvds_connector;
+ struct drm_connector *int_edp_connector;
+
+ u8 cur_delay;
+ u8 min_delay;
+ u8 max_delay;
+ u8 fmax;
+ u8 fstart;
+
+ u64 last_count1;
+ unsigned long last_time1;
+ unsigned long chipset_power;
+ u64 last_count2;
+// struct timespec last_time2;
+ unsigned long gfx_power;
+ int c_m;
+ int r_t;
+ u8 corr;
+
+ struct intel_l3_parity l3_parity;
+
+ /* gen6+ rps state */
+ struct intel_gen6_power_mgmt rps;
+
+ /* ilk-only ips/rps state. Everything in here is protected by the global
+ * mchdev_lock in intel_pm.c */
+ struct intel_ilk_power_mgmt ips;
+
+ enum no_fbc_reason no_fbc_reason;
+
+ unsigned int stop_rings;
+
+ unsigned long cfb_size;
+ unsigned int cfb_fb;
+ int cfb_plane;
+ int cfb_y;
+ struct intel_fbc_work *fbc_work;
+
+ unsigned int fsb_freq, mem_freq, is_ddr3;
+
+ int error_completion;
+ struct mutex error_completion_lock;
+
+ time_t last_gpu_reset;
+
+ struct intel_fbdev *fbdev;
+
+ struct drm_property *broadcast_rgb_property;
+ struct drm_property *force_audio_property;
+
+ u32 fdi_rx_config;
+
+ struct i915_suspend_saved_registers regfile;
+};
+typedef struct inteldrm_softc drm_i915_private_t;
+
+/* Iterate over initialised rings */
+#define for_each_ring(ring__, dev_priv__, i__) \
+ for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
+ if (((ring__) = &(dev_priv__)->rings[(i__)]), intel_ring_initialized((ring__)))
+
+enum hdmi_force_audio {
+ HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
+ HDMI_AUDIO_OFF, /* force turn off HDMI audio */
+ HDMI_AUDIO_AUTO, /* trust EDID */
+ HDMI_AUDIO_ON, /* force turn on HDMI audio */
+};
+
+enum i915_cache_level {
+ I915_CACHE_NONE,
+ I915_CACHE_LLC,
+ I915_CACHE_LLC_MLC, /* gen6+ */
+};
+
+struct inteldrm_file {
+ struct drm_file file_priv;
+ struct {
+ } mm;
+};
+
+/* chip type flags */
+#define CHIP_I830 0x00001
+#define CHIP_I845G 0x00002
+#define CHIP_I85X 0x00004
+#define CHIP_I865G 0x00008
+#define CHIP_I9XX 0x00010
+#define CHIP_I915G 0x00020
+#define CHIP_I915GM 0x00040
+#define CHIP_I945G 0x00080
+#define CHIP_I945GM 0x00100
+#define CHIP_I965 0x00200
+#define CHIP_I965GM 0x00400
+#define CHIP_G33 0x00800
+#define CHIP_GM45 0x01000
+#define CHIP_G4X 0x02000
+#define CHIP_M 0x04000
+#define CHIP_HWS 0x08000
+#define CHIP_GEN2 0x10000
+#define CHIP_GEN3 0x20000
+#define CHIP_GEN4 0x40000
+#define CHIP_GEN6 0x80000
+#define CHIP_PINEVIEW 0x100000
+#define CHIP_IRONLAKE 0x200000
+#define CHIP_IRONLAKE_D 0x400000
+#define CHIP_IRONLAKE_M 0x800000
+#define CHIP_SANDYBRIDGE 0x1000000
+#define CHIP_IVYBRIDGE 0x2000000
+#define CHIP_GEN7 0x4000000
+
+/* flags we use in drm_obj's do_flags */
+#define I915_IN_EXEC 0x0020 /* being processed in execbuffer */
+#define I915_USER_PINNED 0x0040 /* BO has been pinned from userland */
+#define I915_EXEC_NEEDS_FENCE 0x0800 /* being processed but will need fence*/
+
+/** driver private structure attached to each drm_gem_object */
+struct drm_i915_gem_object {
+ struct drm_obj base;
+
+ struct list_head gtt_list;
+
+ /** This object's place on the active/flushing/inactive lists */
+ struct list_head ring_list;
+ struct list_head mm_list;
+ /* GTT binding. */
+ bus_dmamap_t dmamap;
+ bus_dma_segment_t *dma_segs;
+ /* Current offset of the object in GTT space. */
+ bus_addr_t gtt_offset;
+ struct intel_ring_buffer *ring;
+ u_int32_t *bit_17;
+ /* extra flags to bus_dma */
+ int dma_flags;
+ /* Fence register for this object. needed for tiling. */
+ int fence_reg;
+ /** refcount for times pinned this object in GTT space */
+ int pin_count;
+ /* number of times pinned by pin ioctl. */
+ u_int user_pin_count;
+
+ /** Breadcrumb of last rendering to the buffer. */
+ u_int32_t last_read_seqno;
+ u_int32_t last_write_seqno;
+ /** Breadcrumb of last fenced GPU access to the buffer. */
+ u_int32_t last_fenced_seqno;
+ /** Current tiling mode for the object. */
+ u_int32_t tiling_mode;
+ u_int32_t stride;
+
+ /**
+ * This is set if the object is on the active lists (has pending
+ * rendering and so a non-zero seqno), and is not set if it i s on
+ * inactive (ready to be unbound) list.
+ */
+ unsigned int active:1;
+
+ /**
+ * This is set if the object has been written to since last bound
+ * to the GTT
+ */
+ unsigned int dirty:1;
+
+ /**
+ * Advice: are the backing pages purgeable?
+ */
+ unsigned int madv:2;
+
+ /**
+ * Whether the tiling parameters for the currently associated fence
+ * register have changed. Note that for the purposes of tracking
+ * tiling changes we also treat the unfenced register, the register
+ * slot that the object occupies whilst it executes a fenced
+ * command (such as BLT on gen2/3), as a "fence".
+ */
+ unsigned int fence_dirty:1;
+
+ /**
+ * Is the object at the current location in the gtt mappable and
+ * fenceable? Used to avoid costly recalculations.
+ */
+ unsigned int map_and_fenceable:1;
+
+ /**
+ * Whether the current gtt mapping needs to be mappable (and isn't just
+ * mappable by accident). Track pin and fault separate for a more
+ * accurate mappable working set.
+ */
+ unsigned int fault_mappable:1;
+ unsigned int pin_mappable:1;
+
+ /*
+ * Is the GPU currently using a fence to access this buffer,
+ */
+ unsigned int pending_fenced_gpu_access:1;
+ unsigned int fenced_gpu_access:1;
+
+ unsigned int cache_level:2;
+
+ /** for phy allocated objects */
+ struct drm_i915_gem_phys_object *phys_obj;
+
+ /**
+ * Number of crtcs where this object is currently the fb, but
+ * will be page flipped away on the next vblank. When it
+ * reaches 0, dev_priv->pending_flip_queue will be woken up.
+ */
+ int pending_flip;
+};
+#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
+
+#define to_intel_bo(x) container_of(x,struct drm_i915_gem_object, base)
+
+struct drm_i915_file_private {
+ struct {
+ struct mutex lock;
+ struct list_head request_list;
+ } mm;
+};
+
+/**
+ * Request queue structure.
+ *
+ * The request queue allows us to note sequence numbers that have been emitted
+ * and may be associated with active buffers to be retired.
+ *
+ * By keeping this list, we can avoid having to do questionable
+ * sequence-number comparisons on buffer last_rendering_seqnos, and associate
+ * an emission time with seqnos for tracking how far ahead of the GPU we are.
+ */
+struct drm_i915_gem_request {
+ struct list_head list;
+ /** On Which ring this request was generated */
+ struct intel_ring_buffer *ring;
+ /** GEM sequence number associated with this request. */
+ uint32_t seqno;
+ /** Postion in the ringbuffer of the end of the request */
+ uint32_t tail;
+ /** Time at which this request was emitted, in ticks. */
+ unsigned long emitted_ticks;
+ struct drm_i915_file_private *file_priv;
+ /** file_priv list entry for this request */
+ struct list_head client_list;
+};
+
+u_int32_t inteldrm_read_hws(struct inteldrm_softc *, int);
+int ring_wait_for_space(struct intel_ring_buffer *, int n);
+int intel_ring_begin(struct intel_ring_buffer *, int);
+void intel_ring_emit(struct intel_ring_buffer *, u_int32_t);
+void intel_ring_advance(struct intel_ring_buffer *);
+void inteldrm_update_ring(struct intel_ring_buffer *);
+int inteldrm_pipe_enabled(struct inteldrm_softc *, int);
+int i915_init_phys_hws(struct inteldrm_softc *, bus_dma_tag_t);
+
+unsigned long i915_chipset_val(struct inteldrm_softc *dev_priv);
+unsigned long i915_mch_val(struct inteldrm_softc *dev_priv);
+unsigned long i915_gfx_val(struct inteldrm_softc *dev_priv);
+void i915_update_gfx_val(struct inteldrm_softc *);
+
+int intel_init_ring_buffer(struct drm_device *,
+ struct intel_ring_buffer *);
+int init_ring_common(struct intel_ring_buffer *);
+
+int intel_init_render_ring_buffer(struct drm_device *);
+void intel_cleanup_ring_buffer(struct intel_ring_buffer *);
+
+/* i915_irq.c */
+
+extern int i915_driver_irq_install(struct drm_device * dev);
+extern void i915_driver_irq_uninstall(struct drm_device * dev);
+extern int i915_enable_vblank(struct drm_device *dev, int crtc);
+extern void i915_disable_vblank(struct drm_device *dev, int crtc);
+extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
+extern void i915_user_irq_get(struct inteldrm_softc *);
+extern void i915_user_irq_put(struct inteldrm_softc *);
+void i915_enable_pipestat(struct inteldrm_softc *, int, u_int32_t);
+void i915_disable_pipestat(struct inteldrm_softc *, int, u_int32_t);
+void intel_irq_init(struct drm_device *dev);
+void i915_hangcheck_elapsed(void *);
+
+extern void intel_gt_init(struct drm_device *dev);
+extern void intel_gt_reset(struct drm_device *dev);
+
+/* gem */
+/* Ioctls */
+int i915_gem_init_ioctl(struct drm_device *, void *, struct drm_file *);
+int i915_gem_create_ioctl(struct drm_device *, void *, struct drm_file *);
+int i915_gem_pread_ioctl(struct drm_device *, void *, struct drm_file *);
+int i915_gem_pwrite_ioctl(struct drm_device *, void *, struct drm_file *);
+int i915_gem_set_domain_ioctl(struct drm_device *, void *,
+ struct drm_file *);
+int i915_gem_execbuffer2(struct drm_device *, void *, struct drm_file *);
+int i915_gem_pin_ioctl(struct drm_device *, void *, struct drm_file *);
+int i915_gem_unpin_ioctl(struct drm_device *, void *, struct drm_file *);
+int i915_gem_busy_ioctl(struct drm_device *, void *, struct drm_file *);
+int i915_gem_entervt_ioctl(struct drm_device *, void *, struct drm_file *);
+int i915_gem_leavevt_ioctl(struct drm_device *, void *, struct drm_file *);
+int i915_gem_get_aperture_ioctl(struct drm_device *, void *,
+ struct drm_file *);
+int i915_gem_set_tiling(struct drm_device *, void *, struct drm_file *);
+int i915_gem_get_tiling(struct drm_device *, void *, struct drm_file *);
+int i915_gem_gtt_map_ioctl(struct drm_device *, void *, struct drm_file *);
+int i915_gem_mmap_gtt_ioctl(struct drm_device *, void *, struct drm_file *);
+int i915_gem_madvise_ioctl(struct drm_device *, void *, struct drm_file *);
+
+/* GEM memory manager functions */
+int i915_gem_init_object(struct drm_obj *);
+void i915_gem_free_object(struct drm_obj *);
+int i915_gem_object_pin(struct drm_i915_gem_object *, uint32_t, bool);
+void i915_gem_object_unpin(struct drm_i915_gem_object *);
+void i915_gem_retire_request(struct inteldrm_softc *,
+ struct drm_i915_gem_request *);
+void i915_gem_retire_requests_ring(struct intel_ring_buffer *);
+int i915_gem_check_wedge(struct inteldrm_softc *,
+ bool interruptible);
+
+void i915_gem_retire_work_handler(void *, void*);
+int i915_gem_idle(struct inteldrm_softc *);
+void i915_gem_object_move_to_active(struct drm_i915_gem_object *,
+ struct intel_ring_buffer *);
+void i915_gem_object_move_to_inactive(struct drm_i915_gem_object *);
+void i915_gem_object_move_to_inactive_locked(struct drm_i915_gem_object *);
+int i915_add_request(struct intel_ring_buffer *, struct drm_file *, u32 *);
+int init_pipe_control(struct intel_ring_buffer *);
+void cleanup_status_page(struct intel_ring_buffer *);
+void i915_gem_init_swizzling(struct drm_device *);
+void i915_gem_cleanup_ringbuffer(struct drm_device *);
+int i915_gem_ring_throttle(struct drm_device *, struct drm_file *);
+int i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *,
+ u_int32_t, struct drm_i915_gem_relocation_entry **);
+int i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *,
+ u_int32_t, struct drm_i915_gem_relocation_entry *);
+void i915_dispatch_gem_execbuffer(struct intel_ring_buffer *,
+ struct drm_i915_gem_execbuffer2 *, uint64_t);
+int i915_gem_object_pin_and_relocate(struct drm_obj *,
+ struct drm_file *, struct drm_i915_gem_exec_object2 *,
+ struct drm_i915_gem_relocation_entry *);
+
+struct drm_obj *i915_gem_find_inactive_object(struct inteldrm_softc *,
+ size_t);
+
+extern int i915_gem_get_seqno(struct drm_device *, u32 *);
+
+int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *,
+ bool);
+int i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *,
+ u32, struct intel_ring_buffer *);
+int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *,
+ bool);
+int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *);
+int i915_gem_object_wait_rendering(struct drm_i915_gem_object *, bool);
+
+int i915_gem_init(struct drm_device *);
+int i915_gem_mmap_gtt(struct drm_file *, struct drm_device *,
+ uint32_t, uint64_t *);
+int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level);
+
+void sandybridge_write_fence_reg(struct drm_device *, int,
+ struct drm_i915_gem_object *);
+void i965_write_fence_reg(struct drm_device *, int,
+ struct drm_i915_gem_object *);
+void i915_write_fence_reg(struct drm_device *, int,
+ struct drm_i915_gem_object *);
+void i830_write_fence_reg(struct drm_device *, int,
+ struct drm_i915_gem_object *);
+
+int i915_gem_object_finish_gpu(struct drm_i915_gem_object *);
+int i915_gem_init_hw(struct drm_device *);
+
+/* Debug functions, mostly called from ddb */
+void i915_gem_seqno_info(int);
+void i915_interrupt_info(int);
+void i915_gem_fence_regs_info(int);
+void i915_hws_info(int);
+void i915_batchbuffer_info(int);
+void i915_ringbuffer_data(int);
+void i915_ringbuffer_info(int);
+#ifdef WATCH_INACTIVE
+void inteldrm_verify_inactive(struct inteldrm_softc *, char *, int);
+#else
+#define inteldrm_verify_inactive(dev,file,line)
+#endif
+
+void i915_gem_retire_requests(struct inteldrm_softc *);
+struct drm_obj *i915_gem_find_inactive_object(struct inteldrm_softc *,
+ size_t);
+int i915_gem_object_unbind(struct drm_i915_gem_object *);
+int i915_wait_seqno(struct intel_ring_buffer *, uint32_t);
+#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+
+void i915_gem_detach_phys_object(struct drm_device *,
+ struct drm_i915_gem_object *);
+int i915_gem_attach_phys_object(struct drm_device *dev,
+ struct drm_i915_gem_object *, int, int);
+
+int i915_gem_dumb_create(struct drm_file *, struct drm_device *,
+ struct drm_mode_create_dumb *);
+int i915_gem_mmap_gtt(struct drm_file *, struct drm_device *,
+ uint32_t, uint64_t *);
+int i915_gem_dumb_destroy(struct drm_file *, struct drm_device *,
+ uint32_t);
+
+/* i915_dma.c */
+void i915_driver_lastclose(struct drm_device *);
+int intel_setup_mchbar(struct inteldrm_softc *,
+ struct pci_attach_args *);
+void intel_teardown_mchbar(struct inteldrm_softc *,
+ struct pci_attach_args *, int);
+int i915_getparam(struct inteldrm_softc *dev_priv, void *data);
+int i915_setparam(struct inteldrm_softc *dev_priv, void *data);
+void i915_kernel_lost_context(struct drm_device *);
+int i915_driver_open(struct drm_device *, struct drm_file *);
+void i915_driver_close(struct drm_device *, struct drm_file *);
+
+/* i915_drv.c */
+void inteldrm_wipe_mappings(struct drm_obj *);
+void inteldrm_set_max_obj_size(struct inteldrm_softc *);
+void inteldrm_purge_obj(struct drm_obj *);
+void i915_gem_chipset_flush(struct drm_device *);
+int intel_gpu_reset(struct drm_device *);
+int i915_reset(struct drm_device *);
+void inteldrm_timeout(void *);
+bool i915_semaphore_is_enabled(struct drm_device *);
+
+/* i915_gem_evict.c */
+int i915_gem_evict_everything(struct inteldrm_softc *);
+int i915_gem_evict_something(struct inteldrm_softc *, size_t);
+int i915_gem_evict_inactive(struct inteldrm_softc *);
+
+/* i915_gem_tiling.c */
+void i915_gem_detect_bit_6_swizzle(struct inteldrm_softc *,
+ struct pci_attach_args *);
+void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *);
+void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *);
+int i915_gem_swizzle_page(struct vm_page *page);
+bool i915_tiling_ok(struct drm_device *, int, int, int);
+bool i915_gem_object_fence_ok(struct drm_i915_gem_object *, int);
+
+/* i915_gem_debug.c */
+#define i915_verify_lists(dev) 0
+
+/* i915_suspend.c */
+extern void i915_save_display(struct drm_device *);
+extern void i915_restore_display(struct drm_device *);
+extern int i915_save_state(struct drm_device *);
+extern int i915_restore_state(struct drm_device *);
+
+/* intel_i2c.c */
+extern int intel_setup_gmbus(struct inteldrm_softc *);
+struct i2c_controller *intel_gmbus_get_adapter(drm_i915_private_t *, unsigned);
+static inline bool intel_gmbus_is_port_valid(unsigned port)
+{
+ return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
+}
+
+/* i915_gem.c */
+int i915_gem_create(struct drm_file *, struct drm_device *, uint64_t,
+ uint32_t *);
+void init_ring_lists(struct intel_ring_buffer *);
+int i915_gem_fault(struct drm_obj *, struct uvm_faultinfo *, off_t,
+ vaddr_t, vm_page_t *, int, int, vm_prot_t, int );
+struct drm_i915_gem_object *
+ i915_gem_alloc_object(struct drm_device *, size_t);
+int i915_gpu_idle(struct drm_device *);
+void i915_gem_object_move_to_flushing(struct drm_i915_gem_object *);
+void i915_gem_write_fence(struct drm_device *, int,
+ struct drm_i915_gem_object *);
+void i915_gem_reset_fences(struct drm_device *);
+int i915_gem_object_get_fence(struct drm_i915_gem_object *);
+int i915_gem_object_put_fence(struct drm_i915_gem_object *);
+void i915_gem_reset(struct drm_device *);
+void i915_gem_clflush_object(struct drm_i915_gem_object *);
+void i915_gem_release(struct drm_device *, struct drm_file *);
+void i915_gem_release_mmap(struct drm_i915_gem_object *);
+
+uint32_t
+i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
+ uint32_t size,
+ int tiling_mode);
+
+int i915_gem_object_sync(struct drm_i915_gem_object *,
+ struct intel_ring_buffer *);
+
+int i915_mutex_lock_interruptible(struct drm_device *dev);
+
+/* intel_opregion.c */
+int intel_opregion_setup(struct drm_device *dev);
+extern int intel_opregion_init(struct drm_device *dev);
+extern void intel_opregion_fini(struct drm_device *dev);
+extern void opregion_asle_intr(struct drm_device *dev);
+extern void opregion_enable_asle(struct drm_device *dev);
+
+/* i915_gem_gtt.c */
+void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
+void i915_gem_restore_gtt_mappings(struct drm_device *dev);
+void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
+ enum i915_cache_level);
+
+/* modesetting */
+extern void intel_modeset_init_hw(struct drm_device *dev);
+extern void intel_modeset_init(struct drm_device *dev);
+extern void intel_modeset_gem_init(struct drm_device *dev);
+extern void intel_modeset_cleanup(struct drm_device *dev);
+extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
+extern void intel_modeset_setup_hw_state(struct drm_device *dev,
+ bool force_restore);
+extern bool intel_fbc_enabled(struct drm_device *dev);
+extern void intel_disable_fbc(struct drm_device *dev);
+extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
+extern void intel_init_pch_refclk(struct drm_device *dev);
+extern void ironlake_init_pch_refclk(struct drm_device *dev);
+extern void gen6_set_rps(struct drm_device *dev, u8 val);
+extern void intel_detect_pch(struct inteldrm_softc *dev_priv);
+extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
+int i915_load_modeset_init(struct drm_device *dev);
+extern int intel_enable_rc6(const struct drm_device *dev);
+
+extern void __gen6_gt_force_wake_get(struct inteldrm_softc *dev_priv);
+extern void __gen6_gt_force_wake_mt_get(struct inteldrm_softc *dev_priv);
+extern void __gen6_gt_force_wake_put(struct inteldrm_softc *dev_priv);
+extern void __gen6_gt_force_wake_mt_put(struct inteldrm_softc *dev_priv);
+
+extern struct intel_overlay_error_state *intel_overlay_capture_error_state(
+ struct drm_device *dev);
+#ifdef notyet
+extern void intel_overlay_print_error_state(struct sbuf *m,
+ struct intel_overlay_error_state *error);
+#endif
+extern struct intel_display_error_state *intel_display_capture_error_state(
+ struct drm_device *dev);
+#ifdef notyet
+extern void intel_display_print_error_state(struct sbuf *m,
+ struct drm_device *dev, struct intel_display_error_state *error);
+#endif
+
+/* On SNB platform, before reading ring registers forcewake bit
+ * must be set to prevent GT core from power down and stale values being
+ * returned.
+ */
+void gen6_gt_force_wake_get(struct inteldrm_softc *dev_priv);
+void gen6_gt_force_wake_put(struct inteldrm_softc *dev_priv);
+int __gen6_gt_wait_for_fifo(struct inteldrm_softc *dev_priv);
+
+int sandybridge_pcode_read(struct inteldrm_softc *dev_priv, u8 mbox, u32 *val);
+int sandybridge_pcode_write(struct inteldrm_softc *dev_priv, u8 mbox, u32 val);
+
+/* XXX need bus_space_write_8, this evaluated arguments twice */
+static __inline void
+write64(struct inteldrm_softc *dev_priv, bus_size_t off, u_int64_t reg)
+{
+ bus_space_write_4(dev_priv->regs->bst, dev_priv->regs->bsh,
+ off, (u_int32_t)reg);
+ bus_space_write_4(dev_priv->regs->bst, dev_priv->regs->bsh,
+ off + 4, upper_32_bits(reg));
+}
+
+static __inline u_int64_t
+read64(struct inteldrm_softc *dev_priv, bus_size_t off)
+{
+ u_int32_t low, high;
+
+ low = bus_space_read_4(dev_priv->regs->bst,
+ dev_priv->regs->bsh, off);
+ high = bus_space_read_4(dev_priv->regs->bst,
+ dev_priv->regs->bsh, off + 4);
+
+ return ((u_int64_t)low | ((u_int64_t)high << 32));
+}
+
+#define I915_READ64(off) read64(dev_priv, off)
+
+#define I915_WRITE64(off, reg) write64(dev_priv, off, reg)
+
+#define I915_READ(reg) bus_space_read_4(dev_priv->regs->bst, \
+ dev_priv->regs->bsh, (reg))
+#define I915_READ_NOTRACE(reg) I915_READ(reg)
+#define I915_WRITE(reg,val) bus_space_write_4(dev_priv->regs->bst, \
+ dev_priv->regs->bsh, (reg), (val))
+#define I915_WRITE_NOTRACE(reg,val) I915_WRITE(reg, val)
+#define I915_READ16(reg) bus_space_read_2(dev_priv->regs->bst, \
+ dev_priv->regs->bsh, (reg))
+#define I915_WRITE16(reg,val) bus_space_write_2(dev_priv->regs->bst, \
+ dev_priv->regs->bsh, (reg), (val))
+#define I915_READ8(reg) bus_space_read_1(dev_priv->regs->bst, \
+ dev_priv->regs->bsh, (reg))
+#define I915_WRITE8(reg,val) bus_space_write_1(dev_priv->regs->bst, \
+ dev_priv->regs->bsh, (reg), (val))
+
+#define POSTING_READ(reg) (void)I915_READ(reg)
+#define POSTING_READ16(reg) (void)I915_READ16(reg)
+
+#define INTELDRM_VERBOSE 0
+#if INTELDRM_VERBOSE > 0
+#define INTELDRM_VPRINTF(fmt, args...) DRM_INFO(fmt, ##args)
+#else
+#define INTELDRM_VPRINTF(fmt, args...)
+#endif
+
+#define LP_RING(d) (&((struct inteldrm_softc *)(d))->rings[RCS])
+
+#define BEGIN_LP_RING(n) \
+ intel_ring_begin(LP_RING(dev_priv), (n))
+
+#define OUT_RING(x) \
+ intel_ring_emit(LP_RING(dev_priv), (x))
+
+#define ADVANCE_LP_RING() \
+ intel_ring_advance(LP_RING(dev_priv))
+
+/* MCH IFP BARs */
+#define I915_IFPADDR 0x60
+#define I965_IFPADDR 0x70
+
+/**
+ * Reads a dword out of the status page, which is written to from the command
+ * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
+ * MI_STORE_DATA_IMM.
+ *
+ * The following dwords have a reserved meaning:
+ * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
+ * 0x04: ring 0 head pointer
+ * 0x05: ring 1 head pointer (915-class)
+ * 0x06: ring 2 head pointer (915-class)
+ * 0x10-0x1b: Context status DWords (GM45)
+ * 0x1f: Last written status offset. (GM45)
+ *
+ * The area from dword 0x20 to 0x3ff is available for driver usage.
+ */
+#define READ_HWSP(dev_priv, reg) inteldrm_read_hws(dev_priv, reg)
+#define I915_GEM_HWS_INDEX 0x20
+
+#define INTEL_INFO(dev) (((struct inteldrm_softc *) (dev)->dev_private)->info)
+
+/* Chipset type macros */
+
+#define IS_I830(dev) ((dev)->pci_device == 0x3577)
+#define IS_845G(dev) ((dev)->pci_device == 0x2562)
+#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
+#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
+#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
+#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
+#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
+#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
+#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
+#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
+#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
+#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
+#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
+#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
+#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
+#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
+#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
+#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
+#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
+#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
+ (dev)->pci_device == 0x0152 || \
+ (dev)->pci_device == 0x015a)
+#define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \
+ (dev)->pci_device == 0x0106 || \
+ (dev)->pci_device == 0x010A)
+#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
+
+#define IS_I9XX(dev) (INTEL_INFO(dev)->gen >= 3)
+#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->gen == 5)
+
+#define IS_SANDYBRIDGE(dev) (INTEL_INFO(dev)->gen == 6)
+#define IS_SANDYBRIDGE_D(dev) (IS_SANDYBRIDGE(dev) && \
+ (INTEL_INFO(dev)->is_mobile == 0))
+#define IS_SANDYBRIDGE_M(dev) (IS_SANDYBRIDGE(dev) && \
+ (INTEL_INFO(dev)->is_mobile == 1))
+#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
+#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
+#define IS_ULT(dev) (IS_HASWELL(dev) && \
+ ((dev)->pci_device & 0xFF00) == 0x0A00)
+
+/*
+ * The genX designation typically refers to the render engine, so render
+ * capability related checks should use IS_GEN, while display and other checks
+ * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
+ * chips, etc.).
+ */
+#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
+#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
+#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
+#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
+#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
+#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
+
+#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
+#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
+#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
+#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
+
+#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
+#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev))
+
+#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
+#define OVERLAY_NEEDS_PHYSICAL(dev) \
+ (INTEL_INFO(dev)->overlay_needs_physical)
+
+/* Early gen2 have a totally busted CS tlb and require pinned batches. */
+#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
+
+/*
+ * With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
+ * rows, which changes the alignment requirements and fence programming.
+ */
+#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && \
+ !(IS_I915G(dev) || IS_I915GM(dev)))
+
+#define HAS_RESET(dev) (INTEL_INFO(dev)->gen >= 4 && \
+ (!IS_GEN6(dev)) && (!IS_GEN7(dev)))
+
+#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
+#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
+#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
+#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
+#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
+#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
+
+#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
+#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
+#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
+
+#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
+
+#define INTEL_PCH_DEVICE_ID_MASK 0xff00
+#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
+#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
+#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
+#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
+#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
+
+#define INTEL_PCH_TYPE(dev) (((struct inteldrm_softc *) (dev)->dev_private)->pch_type)
+#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
+#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
+#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
+#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
+
+#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
+
+#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+
+#define PRIMARY_RINGBUFFER_SIZE (128*1024)
+
+#define __EXEC_OBJECT_HAS_FENCE (1<<30)
+
+/**
+ * RC6 is a special power stage which allows the GPU to enter an very
+ * low-voltage mode when idle, using down to 0V while at this stage. This
+ * stage is entered automatically when the GPU is idle when RC6 support is
+ * enabled, and as soon as new workload arises GPU wakes up automatically as well.
+ *
+ * There are different RC6 modes available in Intel GPU, which differentiate
+ * among each other with the latency required to enter and leave RC6 and
+ * voltage consumed by the GPU in different states.
+ *
+ * The combination of the following flags define which states GPU is allowed
+ * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
+ * RC6pp is deepest RC6. Their support by hardware varies according to the
+ * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
+ * which brings the most power savings; deeper states save more power, but
+ * require higher latency to switch to and wake up.
+ */
+#define INTEL_RC6_ENABLE (1<<0)
+#define INTEL_RC6p_ENABLE (1<<1)
+#define INTEL_RC6pp_ENABLE (1<<2)
+
+extern unsigned int i915_lvds_downclock;
+extern int i915_lvds_channel_mode;
+extern int i915_panel_use_ssc;
+extern int i915_panel_ignore_lid;
+extern unsigned int i915_powersave;
+extern int i915_semaphores;
+extern int i915_vbt_sdvo_panel_type;
+extern int i915_enable_rc6;
+extern int i915_enable_fbc;
+
+/* Inlines */
+
+/**
+ * Returns true if seq1 is later than seq2.
+ */
+static __inline int
+i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+{
+ return ((int32_t)(seq1 - seq2) >= 0);
+}
+
+static inline bool
+i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
+{
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
+ dev_priv->fence_regs[obj->fence_reg].pin_count++;
+ return true;
+ } else
+ return false;
+}
+
+static inline void
+i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
+{
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
+ dev_priv->fence_regs[obj->fence_reg].pin_count--;
+ }
+}
+
+#if 0
+static __inline int
+i915_obj_purgeable(struct drm_i915_gem_object *obj_priv)
+{
+ return (obj_priv->base.do_flags & I915_DONTNEED);
+}
+
+static __inline int
+i915_obj_purged(struct drm_i915_gem_object *obj_priv)
+{
+ return (obj_priv->base.do_flags & I915_PURGED);
+}
+#endif
+
+static inline int
+i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
+{
+ return obj->madv == I915_MADV_DONTNEED;
+}
+
+#endif
diff --git a/sys/dev/pci/drm/i915/i915_gem.c b/sys/dev/pci/drm/i915/i915_gem.c
new file mode 100644
index 00000000000..89a02938512
--- /dev/null
+++ b/sys/dev/pci/drm/i915/i915_gem.c
@@ -0,0 +1,3229 @@
+/* $OpenBSD: i915_gem.c,v 1.1 2013/03/18 12:36:52 jsg Exp $ */
+/*
+ * Copyright (c) 2008-2009 Owain G. Ainsworth <oga@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <dev/pci/drm/drmP.h>
+#include <dev/pci/drm/drm.h>
+#include <dev/pci/drm/i915_drm.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+#include <machine/pmap.h>
+
+#include <sys/queue.h>
+#include <sys/workq.h>
+
+int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj);
+int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj);
+void i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj);
+uint32_t i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size,
+ int tiling_mode);
+uint32_t i915_gem_get_gtt_alignment(struct drm_device *dev,
+ uint32_t size, int tiling_mode);
+void i915_gem_object_finish_gtt(struct drm_i915_gem_object *);
+void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *);
+int i915_gem_init_phys_object(struct drm_device *, int, int, int);
+int i915_gem_phys_pwrite(struct drm_device *, struct drm_i915_gem_object *,
+ struct drm_i915_gem_pwrite *, struct drm_file *);
+bool intel_enable_blt(struct drm_device *);
+int i915_gem_handle_seqno_wrap(struct drm_device *);
+void i915_gem_object_update_fence(struct drm_i915_gem_object *,
+ struct drm_i915_fence_reg *, bool);
+int i915_gem_object_flush_fence(struct drm_i915_gem_object *);
+struct drm_i915_fence_reg *i915_find_fence_reg(struct drm_device *);
+void i915_gem_reset_ring_lists(drm_i915_private_t *,
+ struct intel_ring_buffer *);
+void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *);
+void i915_gem_request_remove_from_client(struct drm_i915_gem_request *);
+int i915_gem_object_flush_active(struct drm_i915_gem_object *);
+int i915_gem_check_olr(struct intel_ring_buffer *, u32);
+void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
+int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
+ unsigned alignment, bool map_and_fenceable);
+int i915_gem_wait_for_error(struct drm_device *);
+int __wait_seqno(struct intel_ring_buffer *, uint32_t, bool, struct timespec *);
+
+extern int ticks;
+
+static inline void
+i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
+{
+ if (obj->tiling_mode)
+ i915_gem_release_mmap(obj);
+
+ /* As we do not have an associated fence register, we will force
+ * a tiling change if we ever need to acquire one.
+ */
+ obj->fence_dirty = false;
+ obj->fence_reg = I915_FENCE_REG_NONE;
+}
+
+// i915_gem_info_add_obj
+// i915_gem_info_remove_obj
+
+int
+i915_gem_wait_for_error(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv= dev->dev_private;
+ int ret;
+
+ if (!atomic_read(&dev_priv->mm.wedged))
+ return (0);
+
+ /*
+ * Only wait 10 seconds for the gpu reset to complete to avoid hanging
+ * userspace. If it takes that long something really bad is going on and
+ * we should simply try to bail out and fail as gracefully as possible.
+ */
+ mtx_enter(&dev_priv->error_completion_lock);
+ while (dev_priv->error_completion == 0) {
+ ret = -msleep(&dev_priv->error_completion,
+ &dev_priv->error_completion_lock, PCATCH, "915wco", 10*hz);
+ if (ret != 0) {
+ mtx_leave(&dev_priv->error_completion_lock);
+ return (ret);
+ }
+ }
+ mtx_leave(&dev_priv->error_completion_lock);
+
+ if (atomic_read(&dev_priv->mm.wedged)) {
+ mtx_enter(&dev_priv->error_completion_lock);
+ dev_priv->error_completion++;
+ mtx_leave(&dev_priv->error_completion_lock);
+ }
+ return (0);
+}
+
+int
+i915_mutex_lock_interruptible(struct drm_device *dev)
+{
+ int ret;
+
+ ret = i915_gem_wait_for_error(dev);
+ if (ret)
+ return ret;
+
+ ret = rw_enter(&dev->dev_lock, RW_WRITE | RW_INTR);
+ if (ret)
+ return ret;
+
+ WARN_ON(i915_verify_lists(dev));
+ return 0;
+}
+
+static inline bool
+i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
+{
+ return obj->dmamap && !obj->active && obj->pin_count == 0;
+}
+
+int
+i915_gem_init_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_i915_gem_init *args = data;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return (ENODEV);
+
+ DRM_LOCK();
+
+ if (args->gtt_start >= args->gtt_end ||
+ args->gtt_end > dev->agp->info.ai_aperture_size ||
+ (args->gtt_start & PAGE_MASK) != 0 ||
+ (args->gtt_end & PAGE_MASK) != 0) {
+ DRM_UNLOCK();
+ return (EINVAL);
+ }
+ /*
+ * putting stuff in the last page of the aperture can cause nasty
+ * problems with prefetch going into unassigned memory. Since we put
+ * a scratch page on all unused aperture pages, just leave the last
+ * page as a spill to prevent gpu hangs.
+ */
+ if (args->gtt_end == dev->agp->info.ai_aperture_size)
+ args->gtt_end -= 4096;
+
+ if (agp_bus_dma_init((struct agp_softc *)dev->agp->agpdev,
+ dev->agp->base + args->gtt_start, dev->agp->base + args->gtt_end,
+ &dev_priv->agpdmat) != 0) {
+ DRM_UNLOCK();
+ return (ENOMEM);
+ }
+
+ dev->gtt_total = (uint32_t)(args->gtt_end - args->gtt_start);
+ inteldrm_set_max_obj_size(dev_priv);
+
+ DRM_UNLOCK();
+
+ return 0;
+}
+
+int
+i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_get_aperture *args = data;
+
+ /* we need a write lock here to make sure we get the right value */
+ DRM_LOCK();
+ args->aper_size = dev->gtt_total;
+ args->aper_available_size = (args->aper_size -
+ atomic_read(&dev->pin_memory));
+ DRM_UNLOCK();
+
+ return (0);
+}
+
+int
+i915_gem_create(struct drm_file *file, struct drm_device *dev, uint64_t size,
+ uint32_t *handle_p)
+{
+ struct drm_i915_gem_object *obj;
+ uint32_t handle;
+ int ret;
+
+ size = round_page(size);
+ if (size == 0)
+ return (-EINVAL);
+
+ obj = i915_gem_alloc_object(dev, size);
+ if (obj == NULL)
+ return (-ENOMEM);
+
+ handle = 0;
+ ret = drm_handle_create(file, &obj->base, &handle);
+ if (ret != 0) {
+ drm_unref(&obj->base.uobj);
+ return (-ret);
+ }
+
+ *handle_p = handle;
+ return (0);
+}
+
+int
+i915_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+
+ /* have to work out size/pitch and return them */
+ args->pitch = roundup2(args->width * ((args->bpp + 7) / 8), 64);
+ args->size = args->pitch * args->height;
+ return (i915_gem_create(file, dev, args->size, &args->handle));
+}
+
+int
+i915_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle)
+{
+
+ printf("%s stub\n", __func__);
+ return ENOSYS;
+// return (drm_gem_handle_delete(file, handle));
+}
+
+/**
+ * Creates a new mm object and returns a handle to it.
+ */
+int
+i915_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_i915_gem_create *args = data;
+ struct drm_i915_gem_object *obj;
+ int handle, ret;
+
+ args->size = round_page(args->size);
+ /*
+ * XXX to avoid copying between 2 objs more than half the aperture size
+ * we don't allow allocations that are that big. This will be fixed
+ * eventually by intelligently falling back to cpu reads/writes in
+ * such cases. (linux allows this but does cpu maps in the ddx instead).
+ */
+ if (args->size > dev_priv->max_gem_obj_size)
+ return (EFBIG);
+
+ /* Allocate the new object */
+ obj = i915_gem_alloc_object(dev, args->size);
+ if (obj == NULL)
+ return (ENOMEM);
+
+ /* we give our reference to the handle */
+ ret = drm_handle_create(file_priv, &obj->base, &handle);
+
+ if (ret == 0)
+ args->handle = handle;
+ else
+ drm_unref(&obj->base.uobj);
+
+ return (ret);
+}
+
+int
+i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
+{
+ drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
+
+ return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
+ obj->tiling_mode != I915_TILING_NONE;
+}
+
+// __copy_to_user_swizzled
+// __copy_from_user_swizzled
+// shmem_pread_fast
+// shmem_clflush_swizzled_range
+// shmem_pread_slow
+// i915_gem_shmem_pread
+
+/**
+ * Reads data from the object referenced by handle.
+ *
+ * On error, the contents of *data are undefined.
+ */
+int
+i915_gem_pread_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_i915_gem_pread *args = data;
+ struct drm_i915_gem_object *obj;
+ char *vaddr;
+ bus_space_handle_t bsh;
+ bus_size_t bsize;
+ voff_t offset;
+ int ret;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (obj == NULL)
+ return (EBADF);
+ DRM_READLOCK();
+ drm_hold_object(&obj->base);
+
+ /*
+ * Bounds check source.
+ */
+ if (args->offset > obj->base.size || args->size > obj->base.size ||
+ args->offset + args->size > obj->base.size) {
+ ret = EINVAL;
+ goto out;
+ }
+
+ ret = i915_gem_object_pin(obj, 0, 1);
+ if (ret) {
+ goto out;
+ }
+ ret = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (ret)
+ goto unpin;
+
+ offset = obj->gtt_offset + args->offset;
+ bsize = round_page(offset + args->size) - trunc_page(offset);
+
+ if ((ret = agp_map_subregion(dev_priv->agph,
+ trunc_page(offset), bsize, &bsh)) != 0)
+ goto unpin;
+ vaddr = bus_space_vaddr(dev->bst, bsh);
+ if (vaddr == NULL) {
+ ret = EFAULT;
+ goto unmap;
+ }
+
+ ret = copyout(vaddr + (offset & PAGE_MASK),
+ (char *)(uintptr_t)args->data_ptr, args->size);
+
+unmap:
+ agp_unmap_subregion(dev_priv->agph, bsh, bsize);
+unpin:
+ i915_gem_object_unpin(obj);
+out:
+ drm_unhold_and_unref(&obj->base);
+ DRM_READUNLOCK();
+
+ return (ret);
+}
+
+// fast_user_write
+// i915_gem_gtt_pwrite_fast
+// shmem_pwrite_fast
+// shmem_pwrite_slow
+// i915_gem_shmem_pwrite
+
+/**
+ * Writes data to the object referenced by handle.
+ *
+ * On error, the contents of the buffer that were to be modified are undefined.
+ */
+int
+i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_i915_gem_pwrite *args = data;
+ struct drm_i915_gem_object *obj;
+ char *vaddr;
+ bus_space_handle_t bsh;
+ bus_size_t bsize;
+ off_t offset;
+ int ret = 0;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (obj == NULL)
+ return (EBADF);
+ DRM_READLOCK();
+ drm_hold_object(&obj->base);
+
+ /* Bounds check destination. */
+ if (args->offset > obj->base.size || args->size > obj->base.size ||
+ args->offset + args->size > obj->base.size) {
+ ret = EINVAL;
+ goto out;
+ }
+
+ if (obj->phys_obj) {
+ ret = i915_gem_phys_pwrite(dev, obj, args, file);
+ goto out;
+ }
+
+ ret = i915_gem_object_pin(obj, 0, 1);
+ if (ret) {
+ goto out;
+ }
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ goto unpin;
+
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ goto unpin;
+
+ offset = obj->gtt_offset + args->offset;
+ bsize = round_page(offset + args->size) - trunc_page(offset);
+
+ if ((ret = agp_map_subregion(dev_priv->agph,
+ trunc_page(offset), bsize, &bsh)) != 0)
+ goto unpin;
+ vaddr = bus_space_vaddr(dev_priv->bst, bsh);
+ if (vaddr == NULL) {
+ ret = EFAULT;
+ goto unmap;
+ }
+
+ ret = copyin((char *)(uintptr_t)args->data_ptr,
+ vaddr + (offset & PAGE_MASK), args->size);
+
+unmap:
+ agp_unmap_subregion(dev_priv->agph, bsh, bsize);
+unpin:
+ i915_gem_object_unpin(obj);
+out:
+ drm_unhold_and_unref(&obj->base);
+ DRM_READUNLOCK();
+
+ return (ret);
+}
+
+int
+i915_gem_check_wedge(struct inteldrm_softc *dev_priv,
+ bool interruptible)
+{
+ if (atomic_read(&dev_priv->mm.wedged) != 0) {
+ bool recovery_complete;
+
+ /* Give the error handler a chance to run. */
+ mtx_enter(&dev_priv->error_completion_lock);
+ recovery_complete = (&dev_priv->error_completion) > 0;
+ mtx_leave(&dev_priv->error_completion_lock);
+
+ /* Non-interruptible callers can't handle -EAGAIN, hence return
+ * -EIO unconditionally for these. */
+ if (!interruptible)
+ return -EIO;
+
+ /* Recovery complete, but still wedged means reset failure. */
+ if (recovery_complete)
+ return -EIO;
+
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+/*
+ * Compare seqno against outstanding lazy request. Emit a request if they are
+ * equal.
+ */
+int
+i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
+{
+ int ret;
+
+// BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+
+ ret = 0;
+ if (seqno == ring->outstanding_lazy_request)
+ ret = i915_add_request(ring, NULL, NULL);
+
+ return ret;
+}
+
+/**
+ * __wait_seqno - wait until execution of seqno has finished
+ * @ring: the ring expected to report seqno
+ * @seqno: duh!
+ * @interruptible: do an interruptible wait (normally yes)
+ * @timeout: in - how long to wait (NULL forever); out - how much time remaining
+ *
+ * Returns 0 if the seqno was found within the alloted time. Else returns the
+ * errno with remaining time filled in timeout argument.
+ */
+int
+__wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno,
+ bool interruptible, struct timespec *timeout)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret = 0;
+
+ mtx_enter(&dev_priv->irq_lock);
+ if (!i915_seqno_passed(ring->get_seqno(ring, true), seqno)) {
+ ring->irq_get(ring);
+ while (ret == 0) {
+ if (i915_seqno_passed(ring->get_seqno(ring, false),
+ seqno) || dev_priv->mm.wedged)
+ break;
+ ret = msleep(ring, &dev_priv->irq_lock,
+ PZERO | (interruptible ? PCATCH : 0),
+ "gemwt", 0);
+ }
+ ring->irq_put(ring);
+ }
+ mtx_leave(&dev_priv->irq_lock);
+ if (dev_priv->mm.wedged)
+ ret = EIO;
+
+ return (ret);
+}
+
+/**
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
+int
+i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ bool interruptible = dev_priv->mm.interruptible;
+ int ret;
+
+// BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ BUG_ON(seqno == 0);
+
+ ret = i915_gem_check_wedge(dev_priv, interruptible);
+ if (ret)
+ return (ret);
+
+ ret = i915_gem_check_olr(ring, seqno);
+ if (ret)
+ return ret;
+
+ return __wait_seqno(ring, seqno, interruptible, NULL);
+}
+
+int
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+ bool readonly)
+{
+ struct intel_ring_buffer *ring = obj->ring;
+ u32 seqno;
+ int ret;
+
+ seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
+ if (seqno == 0)
+ return 0;
+
+ ret = i915_wait_seqno(ring, seqno);
+ if (ret)
+ return ret;
+
+ i915_gem_retire_requests_ring(ring);
+
+ /* Manually manage the write flush as we may have not yet
+ * retired the buffer.
+ */
+ if (obj->last_write_seqno &&
+ i915_seqno_passed(seqno, obj->last_write_seqno)) {
+ obj->last_write_seqno = 0;
+ obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
+ }
+
+ return 0;
+}
+
+// i915_gem_object_wait_rendering__nonblocking
+
+/**
+ * Called when user space prepares to use an object with the CPU, either through
+ * the mmap ioctl's mapping or a GTT mapping.
+ */
+int
+i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_set_domain*args = data;
+ struct drm_i915_gem_object *obj;
+ uint32_t read_domains = args->read_domains;
+ uint32_t write_domain = args->write_domain;
+ int ret;
+
+ /*
+ * Only handle setting domains to types we allow the cpu to see.
+ * while linux allows the CPU domain here, we only allow GTT since that
+ * is all that we let userland near.
+ * Also sanity check that having something in the write domain implies
+ * it's in the read domain, and only that read domain.
+ */
+ if ((write_domain | read_domains) & ~I915_GEM_DOMAIN_GTT ||
+ (write_domain != 0 && read_domains != write_domain))
+ return (EINVAL);
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
+ ret = EBADF;
+ goto unlock;
+ }
+
+ drm_hold_object(&obj->base);
+ ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
+ drm_unhold_and_unref(&obj->base);
+
+ /*
+ * Silently promote `you're not bound, there was nothing to do'
+ * to success, since the client was just asking us to make sure
+ * everything was done.
+ */
+ if (ret == EINVAL)
+ ret = 0;
+
+unlock:
+ DRM_UNLOCK();
+ return ret;
+}
+
+// i915_gem_sw_finish_ioctl
+// i915_gem_mmap_ioctl
+
+int
+i915_gem_fault(struct drm_obj *gem_obj, struct uvm_faultinfo *ufi,
+ off_t offset, vaddr_t vaddr, vm_page_t *pps, int npages, int centeridx,
+ vm_prot_t access_type, int flags)
+{
+ struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+ struct drm_device *dev = obj->base.dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ paddr_t paddr;
+ int lcv, ret;
+ int write = !!(access_type & VM_PROT_WRITE);
+ vm_prot_t mapprot;
+ boolean_t locked = TRUE;
+
+ /* Are we about to suspend?, if so wait until we're done */
+ if (dev_priv->sc_flags & INTELDRM_QUIET) {
+ /* we're about to sleep, unlock the map etc */
+ uvmfault_unlockall(ufi, NULL, &obj->base.uobj, NULL);
+ while (dev_priv->sc_flags & INTELDRM_QUIET)
+ tsleep(&dev_priv->flags, 0, "intelflt", 0);
+ dev_priv->entries++;
+ /*
+ * relock so we're in the same state we would be in if we
+ * were not quiesced before
+ */
+ locked = uvmfault_relock(ufi);
+ if (locked) {
+ drm_lock_obj(&obj->base);
+ } else {
+ dev_priv->entries--;
+ if (dev_priv->sc_flags & INTELDRM_QUIET)
+ wakeup(&dev_priv->entries);
+ return (VM_PAGER_REFAULT);
+ }
+ } else {
+ dev_priv->entries++;
+ }
+
+ if (rw_enter(&dev->dev_lock, RW_NOSLEEP | RW_READ) != 0) {
+ uvmfault_unlockall(ufi, NULL, &obj->base.uobj, NULL);
+ DRM_READLOCK();
+ locked = uvmfault_relock(ufi);
+ if (locked)
+ drm_lock_obj(&obj->base);
+ }
+ if (locked)
+ drm_hold_object_locked(&obj->base);
+ else { /* obj already unlocked */
+ dev_priv->entries--;
+ if (dev_priv->sc_flags & INTELDRM_QUIET)
+ wakeup(&dev_priv->entries);
+ return (VM_PAGER_REFAULT);
+ }
+
+ /* we have a hold set on the object now, we can unlock so that we can
+ * sleep in binding and flushing.
+ */
+ drm_unlock_obj(&obj->base);
+
+ /* Now bind into the GTT if needed */
+ if (!obj->map_and_fenceable) {
+ ret = i915_gem_object_unbind(obj);
+ if (ret)
+ goto error;
+ }
+
+ if (obj->dmamap == NULL) {
+ ret = i915_gem_object_bind_to_gtt(obj, 0, true);
+ if (ret)
+ goto error;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, write);
+ if (ret)
+ goto error;
+ }
+
+ ret = i915_gem_object_get_fence(obj);
+ if (ret)
+ goto error;
+
+ if (i915_gem_object_is_inactive(obj))
+ list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+
+ obj->fault_mappable = true;
+
+ mapprot = ufi->entry->protection;
+ /*
+ * if it's only a read fault, we only put ourselves into the gtt
+ * read domain, so make sure we fault again and set ourselves to write.
+ * this prevents us needing userland to do domain management and get
+ * it wrong, and makes us fully coherent with the gpu re mmap.
+ */
+ if (write == 0)
+ mapprot &= ~VM_PROT_WRITE;
+ /* XXX try and be more efficient when we do this */
+ for (lcv = 0 ; lcv < npages ; lcv++, offset += PAGE_SIZE,
+ vaddr += PAGE_SIZE) {
+ if ((flags & PGO_ALLPAGES) == 0 && lcv != centeridx)
+ continue;
+
+ if (pps[lcv] == PGO_DONTCARE)
+ continue;
+
+ paddr = dev->agp->base + obj->gtt_offset + offset;
+
+ if (pmap_enter(ufi->orig_map->pmap, vaddr, paddr,
+ mapprot, PMAP_CANFAIL | mapprot) != 0) {
+ drm_unhold_object(&obj->base);
+ uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
+ NULL, NULL);
+ DRM_READUNLOCK();
+ dev_priv->entries--;
+ if (dev_priv->sc_flags & INTELDRM_QUIET)
+ wakeup(&dev_priv->entries);
+ uvm_wait("intelflt");
+ return (VM_PAGER_REFAULT);
+ }
+ }
+error:
+ drm_unhold_object(&obj->base);
+ uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, NULL, NULL);
+ DRM_READUNLOCK();
+ dev_priv->entries--;
+ if (dev_priv->sc_flags & INTELDRM_QUIET)
+ wakeup(&dev_priv->entries);
+ pmap_update(ufi->orig_map->pmap);
+ if (ret == EIO) {
+ /*
+ * EIO means we're wedged, so upon resetting the gpu we'll
+ * be alright and can refault. XXX only on resettable chips.
+ */
+ ret = VM_PAGER_REFAULT;
+ } else if (ret) {
+ ret = VM_PAGER_ERROR;
+ } else {
+ ret = VM_PAGER_OK;
+ }
+ return (ret);
+}
+
+/**
+ * i915_gem_release_mmap - remove physical page mappings
+ * @obj: obj in question
+ *
+ * Preserve the reservation of the mmapping with the DRM core code, but
+ * relinquish ownership of the pages back to the system.
+ *
+ * It is vital that we remove the page mapping if we have mapped a tiled
+ * object through the GTT and then lose the fence register due to
+ * resource pressure. Similarly if the object has been moved out of the
+ * aperture, than pages mapped into userspace must be revoked. Removing the
+ * mapping will then trigger a page fault on the next user access, allowing
+ * fixup by i915_gem_fault().
+ */
+void
+i915_gem_release_mmap(struct drm_i915_gem_object *obj)
+{
+ struct inteldrm_softc *dev_priv = obj->base.dev->dev_private;
+ struct vm_page *pg;
+
+ if (!obj->fault_mappable)
+ return;
+
+ for (pg = &dev_priv->pgs[atop(obj->gtt_offset)];
+ pg != &dev_priv->pgs[atop(obj->gtt_offset + obj->base.size)];
+ pg++)
+ pmap_page_protect(pg, VM_PROT_NONE);
+
+ obj->fault_mappable = false;
+}
+
+uint32_t
+i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
+{
+ uint32_t gtt_size;
+
+ if (INTEL_INFO(dev)->gen >= 4 ||
+ tiling_mode == I915_TILING_NONE)
+ return size;
+
+ /* Previous chips need a power-of-two fence region when tiling */
+ if (INTEL_INFO(dev)->gen == 3)
+ gtt_size = 1024*1024;
+ else
+ gtt_size = 512*1024;
+
+ while (gtt_size < size)
+ gtt_size <<= 1;
+
+ return gtt_size;
+}
+
+/**
+ * i915_gem_get_gtt_alignment - return required GTT alignment for an object
+ * @obj: object to check
+ *
+ * Return the required GTT alignment for an object, taking into account
+ * potential fence register mapping.
+ */
+uint32_t
+i915_gem_get_gtt_alignment(struct drm_device *dev,
+ uint32_t size,
+ int tiling_mode)
+{
+ /*
+ * Minimum alignment is 4k (GTT page size), but might be greater
+ * if a fence register is needed for the object.
+ */
+ if (INTEL_INFO(dev)->gen >= 4 ||
+ tiling_mode == I915_TILING_NONE)
+ return 4096;
+
+ /*
+ * Previous chips need to be aligned to the size of the smallest
+ * fence register that can contain the object.
+ */
+ return i915_gem_get_gtt_size(dev, size, tiling_mode);
+}
+
+/**
+ * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
+ * unfenced object
+ * @dev: the device
+ * @size: size of the object
+ * @tiling_mode: tiling mode of the object
+ *
+ * Return the required GTT alignment for an object, only taking into account
+ * unfenced tiled surface requirements.
+ */
+uint32_t
+i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
+ uint32_t size,
+ int tiling_mode)
+{
+ /*
+ * Minimum alignment is 4k (GTT page size) for sane hw.
+ */
+ if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
+ tiling_mode == I915_TILING_NONE)
+ return 4096;
+
+ /* Previous hardware however needs to be aligned to a power-of-two
+ * tile height. The simplest method for determining this is to reuse
+ * the power-of-tile object size.
+ */
+ return i915_gem_get_gtt_size(dev, size, tiling_mode);
+}
+
+// i915_gem_object_create_mmap_offset
+// i915_gem_object_free_mmap_offset
+
+int
+i915_gem_mmap_gtt(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *mmap_offset)
+{
+ struct drm_i915_gem_object *obj;
+ struct drm_local_map *map;
+ voff_t offset;
+ vsize_t end, nsize;
+ int ret;
+
+ offset = (voff_t)*mmap_offset;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
+ if (obj == NULL)
+ return (EBADF);
+
+ /* Since we are doing purely uvm-related operations here we do
+ * not need to hold the object, a reference alone is sufficient
+ */
+
+ /* Check size. */
+ if (offset > obj->base.size) {
+ ret = EINVAL;
+ goto done;
+ }
+
+ if (obj->madv != I915_MADV_WILLNEED) {
+ DRM_ERROR("Attempting to mmap a purgeable buffer\n");
+ ret = EINVAL;
+ goto done;
+ }
+
+ ret = i915_gem_object_bind_to_gtt(obj, 0, true);
+ if (ret) {
+ printf("%s: failed to bind\n", __func__);
+ goto done;
+ }
+ i915_gem_object_move_to_inactive(obj);
+
+ end = round_page(offset + obj->base.size);
+ offset = trunc_page(offset);
+ nsize = end - offset;
+
+ ret = drm_addmap(dev, offset + obj->gtt_offset, nsize, _DRM_AGP,
+ _DRM_WRITE_COMBINING, &map);
+
+done:
+ if (ret == 0)
+ *mmap_offset = map->ext;
+ else
+ drm_unref(&obj->base.uobj);
+
+ return (ret);
+}
+
+int
+i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct inteldrm_softc *dev_priv;
+ struct drm_i915_gem_mmap_gtt *args;
+
+ dev_priv = dev->dev_private;
+ args = data;
+
+ return (i915_gem_mmap_gtt(file, dev, args->handle, &args->offset));
+}
+
+/* Immediately discard the backing storage */
+void
+i915_gem_object_truncate(struct drm_i915_gem_object *obj)
+{
+ DRM_ASSERT_HELD(&obj->base);
+
+ simple_lock(&obj->base.uao->vmobjlock);
+ obj->base.uao->pgops->pgo_flush(obj->base.uao, 0, obj->base.size,
+ PGO_ALLPAGES | PGO_FREE);
+ simple_unlock(&obj->base.uao->vmobjlock);
+
+ obj->madv = __I915_MADV_PURGED;
+}
+
+// i915_gem_object_is_purgeable
+// i915_gem_object_put_pages
+// __i915_gem_shrink
+// i915_gem_purge
+// i915_gem_shrink_all
+// i915_gem_object_get_pages
+
+int
+i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
+{
+#if 0
+ int page_count, i;
+ struct address_space *mapping;
+ struct inode *inode;
+ struct page *page;
+
+ /* Get the list of pages out of our struct file. They'll be pinned
+ * at this point until we release them.
+ */
+ page_count = obj->base.size / PAGE_SIZE;
+ BUG_ON(obj->pages != NULL);
+ obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
+ if (obj->pages == NULL)
+ return -ENOMEM;
+
+ inode = obj->base.filp->f_path.dentry->d_inode;
+ mapping = inode->i_mapping;
+ gfpmask |= mapping_gfp_mask(mapping);
+
+ for (i = 0; i < page_count; i++) {
+ page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
+ if (IS_ERR(page))
+ goto err_pages;
+
+ obj->pages[i] = page;
+ }
+#endif
+
+ if (i915_gem_object_needs_bit17_swizzle(obj))
+ i915_gem_object_do_bit_17_swizzle(obj);
+
+ return 0;
+
+#if 0
+err_pages:
+ while (i--)
+ page_cache_release(obj->pages[i]);
+
+ drm_free_large(obj->pages);
+ obj->pages = NULL;
+ return PTR_ERR(page);
+#endif
+}
+
+void
+i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
+{
+#if 0
+ int page_count = obj->base.size / PAGE_SIZE;
+ int i;
+#endif
+
+ BUG_ON(obj->madv == __I915_MADV_PURGED);
+
+ if (i915_gem_object_needs_bit17_swizzle(obj))
+ i915_gem_object_save_bit_17_swizzle(obj);
+
+ if (obj->madv == I915_MADV_DONTNEED)
+ obj->dirty = 0;
+
+#if 0
+ for (i = 0; i < page_count; i++) {
+ if (obj->dirty)
+ set_page_dirty(obj->pages[i]);
+
+ if (obj->madv == I915_MADV_WILLNEED)
+ mark_page_accessed(obj->pages[i]);
+
+ page_cache_release(obj->pages[i]);
+ }
+#endif
+ obj->dirty = 0;
+
+#if 0
+ drm_free_large(obj->pages);
+ obj->pages = NULL;
+#endif
+}
+
+void
+i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u_int32_t seqno;
+
+ seqno = intel_ring_get_seqno(ring);
+
+ BUG_ON(ring == NULL);
+ obj->ring = ring;
+
+ /* Add a reference if we're newly entering the active list. */
+ if (!obj->active) {
+ drm_gem_object_reference(&obj->base);
+ obj->active = 1;
+ }
+
+ /* Move from whatever list we were on to the tail of execution. */
+ list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
+ list_move_tail(&obj->ring_list, &ring->active_list);
+
+ obj->last_read_seqno = seqno;
+
+ if (obj->fenced_gpu_access) {
+ obj->last_fenced_seqno = seqno;
+
+ /* Bump MRU to take account of the delayed flush */
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_fence_reg *reg;
+
+ reg = &dev_priv->fence_regs[obj->fence_reg];
+ list_move_tail(&reg->lru_list,
+ &dev_priv->mm.fence_list);
+ }
+ }
+}
+
+/* called locked */
+void
+i915_gem_object_move_to_inactive_locked(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ DRM_OBJ_ASSERT_LOCKED(&obj->base);
+ inteldrm_verify_inactive(dev_priv, __FILE__, __LINE__);
+ BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
+ BUG_ON(!obj->active);
+
+ if (obj->pin_count != 0)
+ list_del_init(&obj->mm_list);
+ else
+ list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+
+ list_del_init(&obj->ring_list);
+ obj->ring = NULL;
+
+ obj->last_read_seqno = 0;
+ obj->last_write_seqno = 0;
+ obj->base.write_domain = 0;
+
+ obj->last_fenced_seqno = 0;
+ obj->fenced_gpu_access = false;
+
+ obj->active = 0;
+ drm_gem_object_unreference(&obj->base);
+
+ inteldrm_verify_inactive(dev_priv, __FILE__, __LINE__);
+}
+
+/* If you call this on an object that you have held, you must have your own
+ * reference, not just the reference from the active list.
+ */
+void
+i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
+{
+ drm_lock_obj(&obj->base);
+ /* unlocks object lock */
+ i915_gem_object_move_to_inactive_locked(obj);
+}
+
+int
+i915_gem_handle_seqno_wrap(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ int ret, i, j;
+
+ /* The hardware uses various monotonic 32-bit counters, if we
+ * detect that they will wraparound we need to idle the GPU
+ * and reset those counters.
+ */
+ ret = 0;
+ for_each_ring(ring, dev_priv, i) {
+ for (j = 0; j < nitems(ring->sync_seqno); j++)
+ ret |= ring->sync_seqno[j] != 0;
+ }
+ if (ret == 0)
+ return ret;
+
+ ret = i915_gpu_idle(dev);
+ if (ret)
+ return ret;
+
+ i915_gem_retire_requests(dev_priv);
+ for_each_ring(ring, dev_priv, i) {
+ for (j = 0; j < nitems(ring->sync_seqno); j++)
+ ring->sync_seqno[j] = 0;
+ }
+
+ return 0;
+}
+
+int
+i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ /* reserve 0 for non-seqno */
+ if (dev_priv->next_seqno == 0) {
+ int ret = i915_gem_handle_seqno_wrap(dev);
+ if (ret)
+ return ret;
+
+ dev_priv->next_seqno = 1;
+ }
+
+ *seqno = dev_priv->next_seqno++;
+ return 0;
+}
+
+/**
+ * Creates a new sequence number, emitting a write of it to the status page
+ * plus an interrupt, which will trigger and interrupt if they are currently
+ * enabled.
+ *
+ * Returned sequence numbers are nonzero on success.
+ */
+int
+i915_add_request(struct intel_ring_buffer *ring,
+ struct drm_file *file,
+ u32 *out_seqno)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ struct drm_i915_gem_request *request;
+ u32 request_ring_position;
+ int was_empty, ret;
+
+ /*
+ * Emit any outstanding flushes - execbuf can fail to emit the flush
+ * after having emitted the batchbuffer command. Hence we need to fix
+ * things up similar to emitting the lazy request. The difference here
+ * is that the flush _must_ happen before the next request, no matter
+ * what.
+ */
+ ret = intel_ring_flush_all_caches(ring);
+ if (ret)
+ return ret;
+
+ request = drm_calloc(1, sizeof(*request));
+ if (request == NULL) {
+ printf("%s: failed to allocate request\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* Record the position of the start of the request so that
+ * should we detect the updated seqno part-way through the
+ * GPU processing the request, we never over-estimate the
+ * position of the head.
+ */
+ request_ring_position = intel_ring_get_tail(ring);
+
+ ret = ring->add_request(ring);
+ if (ret) {
+ drm_free(request);
+ return ret;
+ }
+
+ request->seqno = intel_ring_get_seqno(ring);
+ request->ring = ring;
+ request->tail = request_ring_position;
+ request->emitted_ticks = ticks;
+ was_empty = list_empty(&ring->request_list);
+ list_add_tail(&request->list, &ring->request_list);
+
+ DRM_DEBUG("%d\n", request->seqno);
+
+ if (file) {
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+
+ mtx_enter(&file_priv->mm.lock);
+ request->file_priv = file_priv;
+ list_add_tail(&request->client_list,
+ &file_priv->mm.request_list);
+ mtx_leave(&file_priv->mm.lock);
+ }
+
+ ring->outstanding_lazy_request = 0;
+
+ if (dev_priv->mm.suspended == 0) {
+ if (was_empty)
+ timeout_add_sec(&dev_priv->mm.retire_timer, 1);
+ /* XXX was_empty? */
+ timeout_add_msec(&dev_priv->hangcheck_timer,
+ DRM_I915_HANGCHECK_PERIOD);
+ }
+
+ if (out_seqno)
+ *out_seqno = request->seqno;
+ return 0;
+}
+
+void
+i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
+{
+ struct drm_i915_file_private *file_priv = request->file_priv;
+
+ if (!file_priv)
+ return;
+
+ mtx_enter(&file_priv->mm.lock);
+ if (request->file_priv) {
+ list_del(&request->client_list);
+ request->file_priv = NULL;
+ }
+ mtx_leave(&file_priv->mm.lock);
+}
+
+void
+i915_gem_reset_ring_lists(drm_i915_private_t *dev_priv,
+ struct intel_ring_buffer *ring)
+{
+ while (!list_empty(&ring->request_list)) {
+ struct drm_i915_gem_request *request;
+
+ request = list_first_entry(&ring->request_list,
+ struct drm_i915_gem_request,
+ list);
+
+ list_del(&request->list);
+ i915_gem_request_remove_from_client(request);
+ free(request, M_DRM);
+ }
+
+ while (!list_empty(&ring->active_list)) {
+ struct drm_i915_gem_object *obj;
+
+ obj = list_first_entry(&ring->active_list,
+ struct drm_i915_gem_object,
+ ring_list);
+
+ i915_gem_object_move_to_inactive(obj);
+ }
+}
+
+void
+i915_gem_reset_fences(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
+
+ for (i = 0; i < dev_priv->num_fence_regs; i++) {
+ struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+
+ i915_gem_write_fence(dev, i, NULL);
+
+ if (reg->obj)
+ i915_gem_object_fence_lost(reg->obj);
+
+ reg->pin_count = 0;
+ reg->obj = NULL;
+ INIT_LIST_HEAD(&reg->lru_list);
+ }
+
+ INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+}
+
+void
+i915_gem_reset(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ int i;
+
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ i915_gem_reset_ring_lists(dev_priv, &dev_priv->rings[i]);
+
+ /* Move everything out of the GPU domains to ensure we do any
+ * necessary invalidation upon reuse.
+ */
+ list_for_each_entry(obj,
+ &dev_priv->mm.inactive_list,
+ mm_list)
+ {
+ obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
+ }
+
+ /* The fence registers are invalidated so clear them out */
+ i915_gem_reset_fences(dev);
+}
+
+/**
+ * This function clears the request list as sequence numbers are passed.
+ */
+void
+i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
+{
+ uint32_t seqno;
+
+ if (list_empty(&ring->request_list))
+ return;
+
+ seqno = ring->get_seqno(ring, true);
+
+ while (!list_empty(&ring->request_list)) {
+ struct drm_i915_gem_request *request;
+
+ request = list_first_entry(&ring->request_list,
+ struct drm_i915_gem_request,
+ list);
+
+ if (!i915_seqno_passed(seqno, request->seqno))
+ break;
+
+// trace_i915_gem_request_retire(ring, request->seqno);
+ /* We know the GPU must have read the request to have
+ * sent us the seqno + interrupt, so use the position
+ * of tail of the request to update the last known position
+ * of the GPU head.
+ */
+ ring->last_retired_head = request->tail;
+
+ list_del(&request->list);
+ i915_gem_request_remove_from_client(request);
+ drm_free(request);
+ }
+
+ /* Move any buffers on the active list that are no longer referenced
+ * by the ringbuffer to the flushing/inactive lists as appropriate.
+ */
+ while (!list_empty(&ring->active_list)) {
+ struct drm_i915_gem_object *obj;
+
+ obj = list_first_entry(&ring->active_list,
+ struct drm_i915_gem_object,
+ ring_list);
+
+ if (!i915_seqno_passed(seqno, obj->last_read_seqno))
+ break;
+
+ i915_gem_object_move_to_inactive(obj);
+ }
+}
+
+void
+i915_gem_retire_requests(struct inteldrm_softc *dev_priv)
+{
+ struct intel_ring_buffer *ring;
+ int i;
+
+ for_each_ring(ring, dev_priv, i)
+ i915_gem_retire_requests_ring(ring);
+}
+
+void
+i915_gem_retire_work_handler(void *arg1, void *unused)
+{
+ struct inteldrm_softc *dev_priv = arg1;
+ struct intel_ring_buffer *ring;
+ bool idle;
+ int i;
+
+ i915_gem_retire_requests(dev_priv);
+ idle = true;
+ for_each_ring(ring, dev_priv, i) {
+ if (ring->gpu_caches_dirty)
+ i915_add_request(ring, NULL, NULL);
+
+ idle &= list_empty(&ring->request_list);
+ }
+ if (!dev_priv->mm.suspended && !idle)
+ timeout_add_sec(&dev_priv->mm.retire_timer, 1);
+}
+
+/**
+ * Ensures that an object will eventually get non-busy by flushing any required
+ * write domains, emitting any outstanding lazy request and retiring and
+ * completed requests.
+ */
+int
+i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
+{
+ int ret;
+
+ if (obj->active) {
+ ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
+ if (ret)
+ return ret;
+
+ i915_gem_retire_requests_ring(obj->ring);
+ }
+
+ return 0;
+}
+
+// i915_gem_wait_ioctl
+
+/**
+ * i915_gem_object_sync - sync an object to a ring.
+ *
+ * @obj: object which may be in use on another ring.
+ * @to: ring we wish to use the object on. May be NULL.
+ *
+ * This code is meant to abstract object synchronization with the GPU.
+ * Calling with NULL implies synchronizing the object with the CPU
+ * rather than a particular GPU ring.
+ *
+ * Returns 0 if successful, else propagates up the lower layer error.
+ */
+int
+i915_gem_object_sync(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *to)
+{
+ struct intel_ring_buffer *from = obj->ring;
+ u32 seqno;
+ int ret, idx;
+
+ if (from == NULL || to == from)
+ return 0;
+
+ if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
+ return i915_gem_object_wait_rendering(obj, false);
+
+ idx = intel_ring_sync_index(from, to);
+
+ seqno = obj->last_read_seqno;
+ if (seqno <= from->sync_seqno[idx])
+ return 0;
+
+ ret = i915_gem_check_olr(obj->ring, seqno);
+ if (ret)
+ return ret;
+
+ ret = to->sync_to(to, from, seqno);
+ if (!ret)
+ /* We use last_read_seqno because sync_to()
+ * might have just caused seqno wrap under
+ * the radar.
+ */
+ from->sync_seqno[idx] = obj->last_read_seqno;
+
+ return ret;
+}
+
+void
+i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
+{
+ u32 old_write_domain, old_read_domains;
+
+ /* Act a barrier for all accesses through the GTT */
+ DRM_MEMORYBARRIER();
+
+ /* Force a pagefault for domain tracking on next user access */
+ i915_gem_release_mmap(obj);
+
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
+ return;
+
+ old_read_domains = obj->base.read_domains;
+ old_write_domain = obj->base.write_domain;
+
+ obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
+ obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
+
+#if 0
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ old_write_domain);
+#endif
+}
+
+
+/**
+ * Unbinds an object from the GTT aperture.
+ *
+ * XXX track dirty and pass down to uvm (note, DONTNEED buffers are clean).
+ */
+int
+i915_gem_object_unbind(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ int ret = 0;
+
+ DRM_ASSERT_HELD(&obj->base);
+ /*
+ * if it's already unbound, or we've already done lastclose, just
+ * let it happen. XXX does this fail to unwire?
+ */
+ if (obj->dmamap == NULL || dev_priv->agpdmat == NULL)
+ return 0;
+
+ if (obj->pin_count != 0) {
+ DRM_ERROR("Attempting to unbind pinned buffer\n");
+ return (EINVAL);
+ }
+
+ ret = i915_gem_object_finish_gpu(obj);
+ if (ret == ERESTART || ret == EINTR)
+ return ret;
+ /* Continue on if we fail due to EIO, the GPU is hung so we
+ * should be safe and we need to cleanup or else we might
+ * cause memory corruption through use-after-free.
+ */
+
+ i915_gem_object_finish_gtt(obj);
+
+ /* release the fence reg _after_ flushing */
+ ret = i915_gem_object_put_fence(obj);
+ if (ret == ERESTART || ret == EINTR)
+ return ret;
+
+ i915_gem_object_put_pages_gtt(obj);
+
+ /*
+ * unload the map, then unwire the backing object.
+ */
+ bus_dmamap_unload(dev_priv->agpdmat, obj->dmamap);
+ uvm_objunwire(obj->base.uao, 0, obj->base.size);
+ /* XXX persistent dmamap worth the memory? */
+ bus_dmamap_destroy(dev_priv->agpdmat, obj->dmamap);
+ obj->dmamap = NULL;
+ free(obj->dma_segs, M_DRM);
+ obj->dma_segs = NULL;
+
+ list_del_init(&obj->gtt_list);
+ list_del_init(&obj->mm_list);
+ /* Avoid an unnecessary call to unbind on rebind. */
+ obj->map_and_fenceable = true;
+
+ obj->gtt_offset = 0;
+ atomic_dec(&dev->gtt_count);
+ atomic_sub(obj->base.size, &dev->gtt_memory);
+
+ if (i915_gem_object_is_purgeable(obj))
+ i915_gem_object_truncate(obj);
+
+ return ret;
+}
+
+int
+i915_gpu_idle(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ int ret, i;
+
+ /* Flush everything onto the inactive list. */
+ for_each_ring(ring, dev_priv, i) {
+#ifdef notyet
+ ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
+ if (ret)
+ return ret;
+#endif
+
+ ret = intel_ring_idle(ring);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void
+sandybridge_write_fence_reg(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint64_t val;
+
+ if (obj) {
+ u32 size = obj->dmamap->dm_segs[0].ds_len;
+
+ val = (uint64_t)((obj->gtt_offset + size - 4096) &
+ 0xfffff000) << 32;
+ val |= obj->gtt_offset & 0xfffff000;
+ val |= (uint64_t)((obj->stride / 128) - 1) <<
+ SANDYBRIDGE_FENCE_PITCH_SHIFT;
+
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+ val |= I965_FENCE_REG_VALID;
+ } else
+ val = 0;
+
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val);
+ POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
+}
+
+void
+i965_write_fence_reg(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint64_t val;
+
+ if (obj) {
+ u32 size = obj->dmamap->dm_segs[0].ds_len;
+
+ val = (uint64_t)((obj->gtt_offset + size - 4096) &
+ 0xfffff000) << 32;
+ val |= obj->gtt_offset & 0xfffff000;
+ val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+ val |= I965_FENCE_REG_VALID;
+ } else
+ val = 0;
+
+ I915_WRITE64(FENCE_REG_965_0 + reg * 8, val);
+ POSTING_READ(FENCE_REG_965_0 + reg * 8);
+}
+
+void
+i915_write_fence_reg(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 val;
+
+ if (obj) {
+ u32 size = obj->dmamap->dm_segs[0].ds_len;
+ int pitch_val;
+ int tile_width;
+
+ WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
+ (size & -size) != size ||
+ (obj->gtt_offset & (size - 1)),
+ "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
+ obj->gtt_offset, obj->map_and_fenceable, size);
+
+ if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
+ tile_width = 128;
+ else
+ tile_width = 512;
+
+ /* Note: pitch better be a power of two tile widths */
+ pitch_val = obj->stride / tile_width;
+ pitch_val = ffs(pitch_val) - 1;
+
+ val = obj->gtt_offset;
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+ val |= I915_FENCE_SIZE_BITS(size);
+ val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+ val |= I830_FENCE_REG_VALID;
+ } else
+ val = 0;
+
+ if (reg < 8)
+ reg = FENCE_REG_830_0 + reg * 4;
+ else
+ reg = FENCE_REG_945_8 + (reg - 8) * 4;
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+}
+
+void
+i830_write_fence_reg(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t val;
+
+ if (obj) {
+ u32 size = obj->dmamap->dm_segs[0].ds_len;
+ uint32_t pitch_val;
+
+ WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
+ (size & -size) != size ||
+ (obj->gtt_offset & (size - 1)),
+ "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
+ obj->gtt_offset, size);
+
+ pitch_val = obj->stride / 128;
+ pitch_val = ffs(pitch_val) - 1;
+
+ val = obj->gtt_offset;
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+ val |= I830_FENCE_SIZE_BITS(size);
+ val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+ val |= I830_FENCE_REG_VALID;
+ } else
+ val = 0;
+
+ I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
+ POSTING_READ(FENCE_REG_830_0 + reg * 4);
+}
+
+void
+i915_gem_write_fence(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
+{
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6: sandybridge_write_fence_reg(dev, reg, obj); break;
+ case 5:
+ case 4: i965_write_fence_reg(dev, reg, obj); break;
+ case 3: i915_write_fence_reg(dev, reg, obj); break;
+ case 2: i830_write_fence_reg(dev, reg, obj); break;
+ default: break;
+ }
+}
+
+static inline int
+fence_number(drm_i915_private_t *dev_priv,
+ struct drm_i915_fence_reg *fence)
+{
+ return fence - dev_priv->fence_regs;
+}
+
+void
+i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
+ struct drm_i915_fence_reg *fence,
+ bool enable)
+{
+ drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
+ int reg = fence_number(dev_priv, fence);
+
+ i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
+
+ if (enable) {
+ obj->fence_reg = reg;
+ fence->obj = obj;
+ list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
+ } else {
+ obj->fence_reg = I915_FENCE_REG_NONE;
+ fence->obj = NULL;
+ list_del_init(&fence->lru_list);
+ }
+}
+
+int
+i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
+{
+ if (obj->last_fenced_seqno) {
+ int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
+ if (ret)
+ return ret;
+
+ obj->last_fenced_seqno = 0;
+ }
+
+ /* Ensure that all CPU reads are completed before installing a fence
+ * and all writes before removing the fence.
+ */
+ if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
+ DRM_WRITEMEMORYBARRIER();
+
+ obj->fenced_gpu_access = false;
+ return 0;
+}
+
+int
+i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
+{
+ drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
+ int ret;
+
+ ret = i915_gem_object_flush_fence(obj);
+ if (ret)
+ return ret;
+
+ if (obj->fence_reg == I915_FENCE_REG_NONE)
+ return 0;
+
+ i915_gem_object_update_fence(obj,
+ &dev_priv->fence_regs[obj->fence_reg],
+ false);
+ i915_gem_object_fence_lost(obj);
+
+ return 0;
+}
+
+struct drm_i915_fence_reg *
+i915_find_fence_reg(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_fence_reg *reg, *avail;
+ int i;
+
+ /* First try to find a free reg */
+ avail = NULL;
+ for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
+ reg = &dev_priv->fence_regs[i];
+ if (!reg->obj)
+ return reg;
+
+ if (!reg->pin_count)
+ avail = reg;
+ }
+
+ if (avail == NULL)
+ return NULL;
+
+ /* None available, try to steal one or wait for a user to finish */
+ list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
+ if (reg->pin_count)
+ continue;
+
+ return reg;
+ }
+
+ return NULL;
+}
+
+/**
+ * i915_gem_object_get_fence - set up fencing for an object
+ * @obj: object to map through a fence reg
+ *
+ * When mapping objects through the GTT, userspace wants to be able to write
+ * to them without having to worry about swizzling if the object is tiled.
+ * This function walks the fence regs looking for a free one for @obj,
+ * stealing one if it can't find any.
+ *
+ * It then sets up the reg based on the object's properties: address, pitch
+ * and tiling format.
+ *
+ * For an untiled surface, this removes any existing fence.
+ */
+int
+i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ bool enable = obj->tiling_mode != I915_TILING_NONE;
+ struct drm_i915_fence_reg *reg;
+ int ret;
+
+ /* Have we updated the tiling parameters upon the object and so
+ * will need to serialise the write to the associated fence register?
+ */
+ if (obj->fence_dirty) {
+ ret = i915_gem_object_flush_fence(obj);
+ if (ret)
+ return ret;
+ }
+
+ /* Just update our place in the LRU if our fence is getting reused. */
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ reg = &dev_priv->fence_regs[obj->fence_reg];
+ if (!obj->fence_dirty) {
+ list_move_tail(&reg->lru_list,
+ &dev_priv->mm.fence_list);
+ return 0;
+ }
+ } else if (enable) {
+ reg = i915_find_fence_reg(dev);
+ if (reg == NULL)
+ return -EDEADLK;
+
+ if (reg->obj) {
+ struct drm_i915_gem_object *old = reg->obj;
+
+ ret = i915_gem_object_flush_fence(old);
+ if (ret)
+ return ret;
+
+ i915_gem_object_fence_lost(old);
+ }
+ } else
+ return 0;
+
+ i915_gem_object_update_fence(obj, reg, enable);
+ obj->fence_dirty = false;
+
+ return 0;
+}
+
+// i915_gem_valid_gtt_space
+// i915_gem_verify_gtt
+
+/**
+ * Finds free space in the GTT aperture and binds the object there.
+ */
+int
+i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
+ unsigned alignment, bool map_and_fenceable)
+{
+ struct drm_device *dev = obj->base.dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 size, fence_size, fence_alignment, unfenced_alignment;
+ bool mappable, fenceable;
+ int ret;
+ int flags;
+
+ DRM_ASSERT_HELD(&obj->base);
+
+ if (obj->madv != I915_MADV_WILLNEED) {
+ DRM_ERROR("Attempting to bind a purgeable object\n");
+ return EINVAL;
+ }
+
+ fence_size = i915_gem_get_gtt_size(dev,
+ obj->base.size,
+ obj->tiling_mode);
+ fence_alignment = i915_gem_get_gtt_alignment(dev,
+ obj->base.size,
+ obj->tiling_mode);
+ unfenced_alignment =
+ i915_gem_get_unfenced_gtt_alignment(dev,
+ obj->base.size,
+ obj->tiling_mode);
+
+ if (alignment == 0)
+ alignment = map_and_fenceable ? fence_alignment :
+ unfenced_alignment;
+ if (map_and_fenceable && alignment & (fence_alignment - 1)) {
+ DRM_ERROR("Invalid object alignment requested %u\n", alignment);
+ return EINVAL;
+ }
+
+ size = map_and_fenceable ? fence_size : obj->base.size;
+
+#ifdef notyet
+ /* If the object is bigger than the entire aperture, reject it early
+ * before evicting everything in a vain attempt to find space.
+ */
+ if (obj->base.size >
+ (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
+ DRM_ERROR("Attempting to bind an object larger than the aperture\n");
+ return -E2BIG;
+ }
+#endif
+
+ if ((ret = bus_dmamap_create(dev_priv->agpdmat, size, 1,
+ size, 0, BUS_DMA_WAITOK, &obj->dmamap)) != 0) {
+ DRM_ERROR("Failed to create dmamap\n");
+ return (ret);
+ }
+ agp_bus_dma_set_alignment(dev_priv->agpdmat, obj->dmamap,
+ alignment);
+
+ search_free:
+ switch (obj->cache_level) {
+ case I915_CACHE_NONE:
+ flags = BUS_DMA_GTT_NOCACHE;
+ break;
+ case I915_CACHE_LLC:
+ flags = BUS_DMA_GTT_CACHE_LLC;
+ break;
+ case I915_CACHE_LLC_MLC:
+ flags = BUS_DMA_GTT_CACHE_LLC_MLC;
+ break;
+ default:
+ BUG();
+ }
+ /*
+ * the helper function wires the uao then binds it to the aperture for
+ * us, so all we have to do is set up the dmamap then load it.
+ */
+ ret = drm_gem_load_uao(dev_priv->agpdmat, obj->dmamap, obj->base.uao,
+ obj->base.size, BUS_DMA_WAITOK | obj->dma_flags | flags,
+ &obj->dma_segs);
+ /* XXX NOWAIT? */
+ if (ret != 0) {
+ /* If the gtt is empty and we're still having trouble
+ * fitting our object in, we're out of memory.
+ */
+ if (list_empty(&dev_priv->mm.inactive_list) &&
+ list_empty(&dev_priv->mm.active_list)) {
+ DRM_ERROR("GTT full, but LRU list empty\n");
+ goto error;
+ }
+
+ ret = i915_gem_evict_something(dev_priv, obj->base.size);
+ if (ret != 0)
+ goto error;
+ goto search_free;
+ }
+
+ i915_gem_object_get_pages_gtt(obj);
+
+ list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
+
+ /* Assert that the object is not currently in any GPU domain. As it
+ * wasn't in the GTT, there shouldn't be any way it could have been in
+ * a GPU cache
+ */
+ BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
+ BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
+
+ obj->gtt_offset = obj->dmamap->dm_segs[0].ds_addr - dev->agp->base;
+
+ fenceable =
+ obj->dmamap->dm_segs[0].ds_len == fence_size &&
+ (obj->dmamap->dm_segs[0].ds_addr & (fence_alignment - 1)) == 0;
+
+#ifdef notyet
+ mappable =
+ obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
+#else
+ mappable = true;
+#endif
+
+ obj->map_and_fenceable = mappable && fenceable;
+
+ atomic_inc(&dev->gtt_count);
+ atomic_add(obj->base.size, &dev->gtt_memory);
+
+ return (0);
+
+error:
+ bus_dmamap_destroy(dev_priv->agpdmat, obj->dmamap);
+ obj->dmamap = NULL;
+ obj->gtt_offset = 0;
+ return (ret);
+}
+
+void
+i915_gem_clflush_object(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ /* If we don't have a page list set up, then we're not pinned
+ * to GPU, and we can ignore the cache flush because it'll happen
+ * again at bind time.
+ */
+ if (obj->dmamap == NULL)
+ return;
+
+ /* If the GPU is snooping the contents of the CPU cache,
+ * we do not need to manually clear the CPU cache lines. However,
+ * the caches are only snooped when the render cache is
+ * flushed/invalidated. As we always have to emit invalidations
+ * and flushes when moving into and out of the RENDER domain, correct
+ * snooping behaviour occurs naturally as the result of our domain
+ * tracking.
+ */
+ if (obj->cache_level != I915_CACHE_NONE)
+ return;
+
+ bus_dmamap_sync(dev_priv->agpdmat, obj->dmamap, 0,
+ obj->base.size, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+}
+
+/** Flushes the GTT write domain for the object if it's dirty. */
+void
+i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
+{
+ uint32_t old_write_domain;
+
+ if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
+ return;
+
+ /* No actual flushing is required for the GTT write domain. Writes
+ * to it immediately go to main memory as far as we know, so there's
+ * no chipset flush. It also doesn't land in render cache.
+ *
+ * However, we do have to enforce the order so that all writes through
+ * the GTT land before any writes to the device, such as updates to
+ * the GATT itself.
+ */
+ DRM_WRITEMEMORYBARRIER();
+
+ old_write_domain = obj->base.write_domain;
+ obj->base.write_domain = 0;
+
+#if 0
+ trace_i915_gem_object_change_domain(obj,
+ obj->base.read_domains,
+ old_write_domain);
+#endif
+}
+
+/** Flushes the CPU write domain for the object if it's dirty. */
+void
+i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
+{
+ uint32_t old_write_domain;
+
+ if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
+ return;
+
+ i915_gem_clflush_object(obj);
+ i915_gem_chipset_flush(obj->base.dev);
+ old_write_domain = obj->base.write_domain;
+ obj->base.write_domain = 0;
+
+#if 0
+ trace_i915_gem_object_change_domain(obj,
+ obj->base.read_domains,
+ old_write_domain);
+#endif
+}
+
+/**
+ * Moves a single object to the GTT read, and possibly write domain.
+ *
+ * This function returns when the move is complete, including waiting on
+ * flushes to occur.
+ */
+int
+i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
+{
+ drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
+// uint32_t old_write_domain, old_read_domains;
+ int ret;
+
+ DRM_ASSERT_HELD(&obj->base);
+
+ /* Not valid to be called on unbound objects. */
+ if (obj->dmamap == NULL)
+ return (EINVAL);
+
+ if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
+ return 0;
+
+ ret = i915_gem_object_wait_rendering(obj, !write);
+ if (ret)
+ return ret;
+
+ i915_gem_object_flush_cpu_write_domain(obj);
+
+// old_write_domain = obj->base.write_domain;
+// old_read_domains = obj->base.read_domains;
+
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
+ */
+ WARN_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+ obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
+ if (write) {
+ obj->base.read_domains = I915_GEM_DOMAIN_GTT;
+ obj->base.write_domain = I915_GEM_DOMAIN_GTT;
+ obj->dirty = 1;
+ }
+
+// trace_i915_gem_object_change_domain(obj,
+// old_read_domains,
+// old_write_domain);
+
+ /* And bump the LRU for this access */
+ if (i915_gem_object_is_inactive(obj))
+ list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+
+ return 0;
+}
+
+int
+i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level)
+{
+// struct drm_device *dev = obj->base.dev;
+// drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ if (obj->cache_level == cache_level)
+ return 0;
+
+ if (obj->pin_count) {
+ DRM_DEBUG("can not change the cache level of pinned objects\n");
+ return -EBUSY;
+ }
+
+ if (obj->dmamap != NULL) {
+ ret = i915_gem_object_finish_gpu(obj);
+ if (ret)
+ return ret;
+
+ i915_gem_object_finish_gtt(obj);
+
+ /* Before SandyBridge, you could not use tiling or fence
+ * registers with snooped memory, so relinquish any fences
+ * currently pointing to our region in the aperture.
+ */
+ if (INTEL_INFO(obj->base.dev)->gen < 6) {
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ return ret;
+ }
+
+ i915_gem_gtt_rebind_object(obj, cache_level);
+
+#ifdef notyet
+ if (obj->has_aliasing_ppgtt_mapping)
+ i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
+ obj, cache_level);
+#endif
+ }
+
+ if (cache_level == I915_CACHE_NONE) {
+ u32 old_read_domains, old_write_domain;
+
+ /* If we're coming from LLC cached, then we haven't
+ * actually been tracking whether the data is in the
+ * CPU cache or not, since we only allow one bit set
+ * in obj->write_domain and have been skipping the clflushes.
+ * Just set it to the CPU cache for now.
+ */
+ WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
+ WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
+
+ old_read_domains = obj->base.read_domains;
+ old_write_domain = obj->base.write_domain;
+
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+
+#if 0
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ old_write_domain);
+#endif
+ }
+
+ obj->cache_level = cache_level;
+ return 0;
+}
+
+// i915_gem_get_caching_ioctl
+// i915_gem_set_caching_ioctl
+
+int
+i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
+ u32 alignment,
+ struct intel_ring_buffer *pipelined)
+{
+// u32 old_read_domains, old_write_domain;
+ int ret;
+
+ if (pipelined != obj->ring) {
+ ret = i915_gem_object_sync(obj, pipelined);
+ if (ret)
+ return (ret);
+ }
+
+ /* The display engine is not coherent with the LLC cache on gen6. As
+ * a result, we make sure that the pinning that is about to occur is
+ * done with uncached PTEs. This is lowest common denominator for all
+ * chipsets.
+ *
+ * However for gen6+, we could do better by using the GFDT bit instead
+ * of uncaching, which would allow us to flush all the LLC-cached data
+ * with that bit in the PTE to main memory with just one PIPE_CONTROL.
+ */
+ ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
+ if (ret)
+ return ret;
+
+ /* As the user may map the buffer once pinned in the display plane
+ * (e.g. libkms for the bootup splash), we have to ensure that we
+ * always use map_and_fenceable for all scanout buffers.
+ */
+ ret = i915_gem_object_pin(obj, alignment, true);
+ if (ret)
+ return ret;
+
+ i915_gem_object_flush_cpu_write_domain(obj);
+
+// old_write_domain = obj->write_domain;
+// old_read_domains = obj->read_domains;
+
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
+ */
+ obj->base.write_domain = 0;
+ obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
+
+// trace_i915_gem_object_change_domain(obj,
+// old_read_domains,
+// old_write_domain);
+
+ return 0;
+}
+
+int
+i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
+{
+ int ret;
+
+ if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
+ return 0;
+
+ ret = i915_gem_object_wait_rendering(obj, false);
+ if (ret)
+ return ret;
+
+ /* Ensure that we invalidate the GPU's caches and TLBs. */
+ obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
+ return 0;
+}
+
+/**
+ * Moves a single object to the CPU read, and possibly write domain.
+ *
+ * This function returns when the move is complete, including waiting on
+ * flushes to return.
+ */
+int
+i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
+{
+// uint32_t old_write_domain, old_read_domains;
+ int ret;
+
+ DRM_ASSERT_HELD(obj);
+
+ if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
+ return 0;
+
+ ret = i915_gem_object_wait_rendering(obj, !write);
+ if (ret)
+ return ret;
+
+ i915_gem_object_flush_gtt_write_domain(obj);
+
+// old_write_domain = obj->base.write_domain;
+// old_read_domains = obj->base.read_domains;
+
+ /* Flush the CPU cache if it's still invalid. */
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
+ i915_gem_clflush_object(obj);
+
+ obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
+ }
+
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
+ */
+ BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
+
+ /* If we're writing through the CPU, then the GPU read domains will
+ * need to be invalidated at next use.
+ */
+ if (write) {
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ }
+
+// trace_i915_gem_object_change_domain(obj,
+// old_read_domains,
+// old_write_domain);
+
+ return 0;
+}
+
+/* Throttle our rendering by waiting until the ring has completed our requests
+ * emitted over 20 msec ago.
+ *
+ * Note that if we were to use the current jiffies each time around the loop,
+ * we wouldn't escape the function with any frames outstanding if the time to
+ * render a frame was over 20ms.
+ *
+ * This should get us reasonable parallelism between CPU and GPU but also
+ * relatively low latency when blocking on a particular request to finish.
+ */
+int
+i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ unsigned long recent_enough = ticks - msecs_to_jiffies(20);
+ struct drm_i915_gem_request *request;
+ struct intel_ring_buffer *ring = NULL;
+ u32 seqno = 0;
+ int ret;
+
+ if (atomic_read(&dev_priv->mm.wedged))
+ return EIO;
+
+ mtx_enter(&file_priv->mm.lock);
+ list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
+ if (time_after_eq(request->emitted_ticks, recent_enough))
+ break;
+
+ ring = request->ring;
+ seqno = request->seqno;
+ }
+ mtx_leave(&file_priv->mm.lock);
+
+ if (seqno == 0)
+ return 0;
+
+ ret = __wait_seqno(ring, seqno, true, NULL);
+ if (ret == 0)
+ timeout_add_sec(&dev_priv->mm.retire_timer, 0);
+
+ return ret;
+}
+
+int
+i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ uint32_t alignment,
+ bool map_and_fenceable)
+{
+ struct drm_device *dev = obj->base.dev;
+ int ret;
+
+ DRM_ASSERT_HELD(&obj->base);
+ inteldrm_verify_inactive(dev_priv, __FILE__, __LINE__);
+
+ if (obj->dmamap != NULL) {
+ if ((alignment && obj->gtt_offset & (alignment - 1)) ||
+ (map_and_fenceable && !obj->map_and_fenceable)) {
+ WARN(obj->pin_count,
+ "bo is already pinned with incorrect alignment:"
+ " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
+ " obj->map_and_fenceable=%d\n",
+ obj->gtt_offset, alignment,
+ map_and_fenceable,
+ obj->map_and_fenceable);
+ ret = i915_gem_object_unbind(obj);
+ if (ret)
+ return ret;
+ }
+ }
+
+ if (obj->dmamap == NULL) {
+ ret = i915_gem_object_bind_to_gtt(obj, alignment,
+ map_and_fenceable);
+ if (ret)
+ return ret;
+ }
+
+ if (obj->pin_count++ == 0) {
+ atomic_inc(&dev->pin_count);
+ atomic_add(obj->base.size, &dev->pin_memory);
+ if (!obj->active)
+ list_del_init(&obj->mm_list);
+ }
+ inteldrm_verify_inactive(dev_priv, __FILE__, __LINE__);
+
+ return (0);
+}
+
+void
+i915_gem_object_unpin(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ DRM_ASSERT_HELD(&obj->base);
+ inteldrm_verify_inactive(dev_priv, __FILE__, __LINE__);
+
+ BUG_ON(obj->pin_count == 0);
+ BUG_ON(obj->dmamap == NULL);
+
+ if (--obj->pin_count == 0) {
+ if (!obj->active)
+ list_move_tail(&obj->mm_list,
+ &dev_priv->mm.inactive_list);
+ atomic_dec(&dev->pin_count);
+ atomic_sub(obj->base.size, &dev->pin_memory);
+ }
+ inteldrm_verify_inactive(dev_priv, __FILE__, __LINE__);
+}
+
+int
+i915_gem_pin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_i915_gem_pin *args = data;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
+ ret = EBADF;
+ goto unlock;
+ }
+
+ drm_hold_object(&obj->base);
+
+ if (obj->madv != I915_MADV_WILLNEED) {
+ DRM_ERROR("Attempting to pin a purgeable buffer\n");
+ ret = EINVAL;
+ goto out;
+ }
+
+ if (++obj->user_pin_count == 1) {
+ ret = i915_gem_object_pin(obj, args->alignment, 1);
+ if (ret != 0)
+ goto out;
+ inteldrm_set_max_obj_size(dev_priv);
+ }
+
+ /* XXX - flush the CPU caches for pinned objects
+ * as the X server doesn't manage domains yet
+ */
+ i915_gem_object_set_to_gtt_domain(obj, true);
+ args->offset = obj->gtt_offset;
+
+out:
+ drm_unhold_and_unref(&obj->base);
+unlock:
+ DRM_UNLOCK();
+ return ret;
+}
+
+int
+i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_i915_gem_pin *args = data;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
+ ret = EBADF;
+ goto unlock;
+ }
+
+ drm_hold_object(&obj->base);
+
+ if (obj->user_pin_count == 0) {
+ ret = EINVAL;
+ goto out;
+ }
+
+ obj->user_pin_count--;
+ if (obj->user_pin_count == 0) {
+ i915_gem_object_unpin(obj);
+ inteldrm_set_max_obj_size(dev_priv);
+ }
+
+out:
+ drm_unhold_and_unref(&obj->base);
+unlock:
+ DRM_UNLOCK();
+ return ret;
+}
+
+int
+i915_gem_busy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_busy *args = data;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
+ ret = EBADF;
+ goto unlock;
+ }
+
+ ret = i915_gem_object_flush_active(obj);
+ args->busy = obj->active;
+
+ drm_gem_object_unreference(&obj->base);
+unlock:
+ DRM_UNLOCK();
+ return ret;
+}
+
+// i915_gem_throttle_ioctl
+
+int
+i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_madvise *args = data;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ switch (args->madv) {
+ case I915_MADV_DONTNEED:
+ case I915_MADV_WILLNEED:
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
+ if (&obj->base == NULL) {
+ ret = EBADF;
+ goto unlock;
+ }
+
+ drm_hold_object(&obj->base);
+
+ /* invalid to madvise on a pinned BO */
+ if (obj->pin_count) {
+ ret = EINVAL;
+ goto out;
+ }
+
+ if (obj->madv != __I915_MADV_PURGED)
+ obj->madv = args->madv;
+
+ /* if the object is no longer bound, discard its backing storage */
+ if (i915_gem_object_is_purgeable(obj) && obj->dmamap == NULL)
+ i915_gem_object_truncate(obj);
+
+ args->retained = obj->madv != __I915_MADV_PURGED;
+
+out:
+ drm_unhold_and_unref(&obj->base);
+unlock:
+ DRM_UNLOCK();
+ return ret;
+}
+
+struct drm_i915_gem_object *
+i915_gem_alloc_object(struct drm_device *dev, size_t size)
+{
+ struct drm_obj *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ obj = drm_gem_object_alloc(dev, size);
+ if (obj == NULL)
+ return NULL;
+
+ obj_priv = to_intel_bo(obj);
+
+ return (obj_priv);
+}
+
+int
+i915_gem_init_object(struct drm_obj *obj)
+{
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct drm_device *dev = obj->dev;
+
+ /*
+ * We've just allocated pages from the kernel,
+ * so they've just been written by the CPU with
+ * zeros. They'll need to be flushed before we
+ * use them with the GPU.
+ */
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
+
+ if (HAS_LLC(dev)) {
+ /* On some devices, we can have the GPU use the LLC (the CPU
+ * cache) for about a 10% performance improvement
+ * compared to uncached. Graphics requests other than
+ * display scanout are coherent with the CPU in
+ * accessing this cache. This means in this mode we
+ * don't need to clflush on the CPU side, and on the
+ * GPU side we only need to flush internal caches to
+ * get data visible to the CPU.
+ *
+ * However, we maintain the display planes as UC, and so
+ * need to rebind when first used as such.
+ */
+ obj_priv->cache_level = I915_CACHE_LLC;
+ } else
+ obj_priv->cache_level = I915_CACHE_NONE;
+
+ /* normal objects don't need special treatment */
+ obj_priv->dma_flags = 0;
+ obj_priv->fence_reg = I915_FENCE_REG_NONE;
+ obj_priv->madv = I915_MADV_WILLNEED;
+ /* Avoid an unnecessary call to unbind on the first bind. */
+ obj_priv->map_and_fenceable = true;
+
+ INIT_LIST_HEAD(&obj_priv->mm_list);
+ INIT_LIST_HEAD(&obj_priv->gtt_list);
+ INIT_LIST_HEAD(&obj_priv->ring_list);
+
+ return 0;
+}
+
+/*
+ * NOTE all object unreferences in this driver need to hold the DRM_LOCK(),
+ * because if they free they poke around in driver structures.
+ */
+void
+i915_gem_free_object(struct drm_obj *gem_obj)
+{
+ struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+ struct drm_device *dev = gem_obj->dev;
+
+ DRM_ASSERT_HELD(&obj->base);
+
+ if (obj->phys_obj)
+ i915_gem_detach_phys_object(dev, obj);
+
+ while (obj->pin_count > 0)
+ i915_gem_object_unpin(obj);
+
+ i915_gem_object_unbind(obj);
+ drm_free(obj->bit_17);
+ obj->bit_17 = NULL;
+ /* XXX dmatag went away? */
+}
+
+int
+i915_gem_idle(struct inteldrm_softc *dev_priv)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ int ret;
+
+ /* If drm attach failed */
+ if (dev == NULL)
+ return (0);
+
+ DRM_LOCK();
+
+ if (dev_priv->mm.suspended) {
+ DRM_UNLOCK();
+ return 0;
+ }
+
+ ret = i915_gpu_idle(dev);
+ if (ret) {
+ DRM_UNLOCK();
+ return (ret);
+ }
+ i915_gem_retire_requests(dev_priv);
+
+ /* Under UMS, be paranoid and evict. */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_gem_evict_everything(dev_priv);
+
+ i915_gem_reset_fences(dev);
+
+ /* Hack! Don't let anybody do execbuf while we don't control the chip.
+ * We need to replace this with a semaphore, or something.
+ */
+ dev_priv->mm.suspended = 1;
+ /* if we hung then the timer alredy fired. */
+ timeout_del(&dev_priv->hangcheck_timer);
+
+ i915_kernel_lost_context(dev);
+ i915_gem_cleanup_ringbuffer(dev);
+ DRM_UNLOCK();
+
+ /* this should be idle now */
+ timeout_del(&dev_priv->mm.retire_timer);
+
+ return 0;
+}
+
+// i915_gem_l3_remap
+
+void
+i915_gem_init_swizzling(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (INTEL_INFO(dev)->gen < 5 ||
+ dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
+ return;
+
+ I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ DISP_TILE_SURFACE_SWIZZLING);
+
+ if (IS_GEN5(dev))
+ return;
+
+ I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
+ if (IS_GEN6(dev))
+ I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
+ else
+ I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
+}
+
+bool
+intel_enable_blt(struct drm_device *dev)
+{
+ if (!HAS_BLT(dev))
+ return false;
+
+#ifdef notyet
+ /* The blitter was dysfunctional on early prototypes */
+ if (IS_GEN6(dev) && dev->pdev->revision < 8) {
+ DRM_INFO("BLT not supported on this pre-production hardware;"
+ " graphics performance will be degraded.\n");
+ return false;
+ }
+#endif
+
+ return true;
+}
+
+int
+i915_gem_init_hw(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+#ifdef notyet
+ if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
+ return -EIO;
+
+ if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
+ I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
+
+ i915_gem_l3_remap(dev);
+
+#endif
+ i915_gem_init_swizzling(dev);
+
+ ret = intel_init_render_ring_buffer(dev);
+ if (ret)
+ return ret;
+
+ if (HAS_BSD(dev)) {
+ ret = intel_init_bsd_ring_buffer(dev);
+ if (ret)
+ goto cleanup_render_ring;
+ }
+
+ if (intel_enable_blt(dev)) {
+ ret = intel_init_blt_ring_buffer(dev);
+ if (ret)
+ goto cleanup_bsd_ring;
+ }
+
+ dev_priv->next_seqno = 1;
+
+ /*
+ * XXX: There was some w/a described somewhere suggesting loading
+ * contexts before PPGTT.
+ */
+#ifdef notyet
+ i915_gem_context_init(dev);
+ i915_gem_init_ppgtt(dev);
+#endif
+
+ return 0;
+
+cleanup_bsd_ring:
+ intel_cleanup_ring_buffer(&dev_priv->rings[VCS]);
+cleanup_render_ring:
+ intel_cleanup_ring_buffer(&dev_priv->rings[RCS]);
+ return ret;
+}
+
+// intel_enable_ppgtt
+
+int
+i915_gem_init(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ uint64_t gtt_start, gtt_end;
+ struct agp_softc *asc;
+ int ret;
+
+ DRM_LOCK();
+
+ asc = (struct agp_softc *)dev->agp->agpdev;
+ gtt_start = asc->sc_stolen_entries * 4096;
+
+ /*
+ * putting stuff in the last page of the aperture can cause nasty
+ * problems with prefetch going into unassigned memory. Since we put
+ * a scratch page on all unused aperture pages, just leave the last
+ * page as a spill to prevent gpu hangs.
+ */
+ gtt_end = dev->agp->info.ai_aperture_size - 4096;
+
+ if (agp_bus_dma_init(asc,
+ dev->agp->base + gtt_start, dev->agp->base + gtt_end,
+ &dev_priv->agpdmat) != 0) {
+ DRM_UNLOCK();
+ return (ENOMEM);
+ }
+
+ dev->gtt_total = (uint32_t)(gtt_end - gtt_start);
+ inteldrm_set_max_obj_size(dev_priv);
+
+ ret = i915_gem_init_hw(dev);
+ if (ret != 0) {
+ DRM_UNLOCK();
+ return (ret);
+ }
+
+ DRM_UNLOCK();
+
+ return 0;
+}
+
+void
+i915_gem_cleanup_ringbuffer(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ int i;
+
+ for_each_ring(ring, dev_priv, i)
+ intel_cleanup_ring_buffer(ring);
+}
+
+int
+i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ int ret, i;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return (0);
+
+ /* XXX until we have support for the rings on sandybridge */
+ if (IS_GEN6(dev) || IS_GEN7(dev))
+ return (0);
+
+ if (dev_priv->mm.wedged) {
+ DRM_ERROR("Reenabling wedged hardware, good luck\n");
+ dev_priv->mm.wedged = 0;
+ }
+
+
+ DRM_LOCK();
+ dev_priv->mm.suspended = 0;
+
+ ret = i915_gem_init_hw(dev);
+ if (ret != 0) {
+ DRM_UNLOCK();
+ return (ret);
+ }
+
+ /* gtt mapping means that the inactive list may not be empty */
+ KASSERT(list_empty(&dev_priv->mm.active_list));
+ for_each_ring(ring, dev_priv, i)
+ KASSERT(list_empty(&ring->request_list));
+ DRM_UNLOCK();
+
+ drm_irq_install(dev);
+
+ return (0);
+}
+
+int
+i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ int ret;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return 0;
+
+ /* don't unistall if we fail, repeat calls on failure will screw us */
+ if ((ret = i915_gem_idle(dev_priv)) == 0)
+ drm_irq_uninstall(dev);
+ return (ret);
+}
+
+// i915_gem_lastclose
+
+void
+init_ring_lists(struct intel_ring_buffer *ring)
+{
+ INIT_LIST_HEAD(&ring->active_list);
+ INIT_LIST_HEAD(&ring->request_list);
+}
+
+// i915_gem_load
+
+int
+i915_gem_init_phys_object(struct drm_device *dev,
+ int id, int size, int align)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_phys_object *phys_obj;
+ int ret;
+
+ if (dev_priv->mm.phys_objs[id - 1] || !size)
+ return 0;
+
+ phys_obj = drm_alloc(sizeof(struct drm_i915_gem_phys_object));
+ if (!phys_obj)
+ return -ENOMEM;
+
+ phys_obj->id = id;
+
+ phys_obj->handle = drm_dmamem_alloc(dev->dmat, size, align, 1, size, BUS_DMA_NOCACHE, 0);
+ if (!phys_obj->handle) {
+ ret = -ENOMEM;
+ goto kfree_obj;
+ }
+
+ dev_priv->mm.phys_objs[id - 1] = phys_obj;
+
+ return 0;
+kfree_obj:
+ drm_free(phys_obj);
+ return ret;
+}
+
+// i915_gem_free_phys_object
+// i915_gem_free_all_phys_object
+
+void i915_gem_detach_phys_object(struct drm_device *dev,
+ struct drm_i915_gem_object *obj)
+{
+ char *vaddr;
+ int i;
+ int page_count;
+
+ if (!obj->phys_obj)
+ return;
+ vaddr = obj->phys_obj->handle->kva;
+
+ page_count = obj->base.size / PAGE_SIZE;
+ for (i = 0; i < page_count; i++) {
+#ifdef notyet
+ struct page *page = shmem_read_mapping_page(mapping, i);
+ if (!IS_ERR(page)) {
+ char *dst = kmap_atomic(page);
+ memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
+ kunmap_atomic(dst);
+
+ drm_clflush_pages(&page, 1);
+
+ set_page_dirty(page);
+ mark_page_accessed(page);
+ page_cache_release(page);
+ }
+#endif
+ }
+ i915_gem_chipset_flush(dev);
+
+ obj->phys_obj->cur_obj = NULL;
+ obj->phys_obj = NULL;
+}
+
+int
+i915_gem_attach_phys_object(struct drm_device *dev,
+ struct drm_i915_gem_object *obj, int id, int align)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret = 0;
+ int page_count;
+ int i;
+
+ if (id > I915_MAX_PHYS_OBJECT)
+ return -EINVAL;
+
+ if (obj->phys_obj) {
+ if (obj->phys_obj->id == id)
+ return 0;
+ i915_gem_detach_phys_object(dev, obj);
+ }
+
+ /* create a new object */
+ if (!dev_priv->mm.phys_objs[id - 1]) {
+ ret = i915_gem_init_phys_object(dev, id,
+ obj->base.size, align);
+ if (ret) {
+ DRM_ERROR("failed to init phys object %d size: %zu\n",
+ id, obj->base.size);
+ return (ret);
+ }
+ }
+
+ /* bind to the object */
+ obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
+ obj->phys_obj->cur_obj = obj;
+
+ page_count = obj->base.size / PAGE_SIZE;
+
+ for (i = 0; i < page_count; i++) {
+#ifdef notyet
+ struct page *page;
+ char *dst, *src;
+
+ page = shmem_read_mapping_page(mapping, i);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ src = kmap_atomic(page);
+ dst = obj->phys_obj->handle->kva + (i * PAGE_SIZE);
+ memcpy(dst, src, PAGE_SIZE);
+ kunmap_atomic(src);
+
+ mark_page_accessed(page);
+ page_cache_release(page);
+#endif
+ }
+
+ return 0;
+}
+
+int
+i915_gem_phys_pwrite(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_pwrite *args,
+ struct drm_file *file_priv)
+{
+ void *vaddr = obj->phys_obj->handle->kva + args->offset;
+ int ret;
+
+ ret = copyin((char *)(uintptr_t)args->data_ptr,
+ vaddr, args->size);
+
+ i915_gem_chipset_flush(dev);
+
+ return ret;
+}
+
+void
+i915_gem_release(struct drm_device *dev, struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+
+ /* Clean up our request list when the client is going away, so that
+ * later retire_requests won't dereference our soon-to-be-gone
+ * file_priv.
+ */
+ mtx_enter(&file_priv->mm.lock);
+ while (!list_empty(&file_priv->mm.request_list)) {
+ struct drm_i915_gem_request *request;
+
+ request = list_first_entry(&file_priv->mm.request_list,
+ struct drm_i915_gem_request,
+ client_list);
+ list_del(&request->client_list);
+ request->file_priv = NULL;
+ }
+ mtx_leave(&file_priv->mm.lock);
+}
+
+// i915_gem_release
+// mutex_is_locked_by
+// i915_gem_inactive_shrink
diff --git a/sys/dev/pci/drm/i915/i915_gem_evict.c b/sys/dev/pci/drm/i915/i915_gem_evict.c
new file mode 100644
index 00000000000..5343d6e8771
--- /dev/null
+++ b/sys/dev/pci/drm/i915/i915_gem_evict.c
@@ -0,0 +1,182 @@
+/* $OpenBSD: i915_gem_evict.c,v 1.1 2013/03/18 12:36:52 jsg Exp $ */
+/*
+ * Copyright (c) 2008-2009 Owain G. Ainsworth <oga@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/*
+ * Copyright © 2008-2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Chris Wilson <chris@chris-wilson.co.uuk>
+ *
+ */
+
+#include <dev/pci/drm/drmP.h>
+#include <dev/pci/drm/drm.h>
+#include <dev/pci/drm/i915_drm.h>
+#include "i915_drv.h"
+
+#include <machine/pmap.h>
+
+#include <sys/queue.h>
+#include <sys/workq.h>
+
+int
+i915_gem_evict_something(struct inteldrm_softc *dev_priv, size_t min_size)
+{
+ struct drm_obj *obj;
+ struct drm_i915_gem_request *request;
+ struct drm_i915_gem_object *obj_priv;
+ struct intel_ring_buffer *ring;
+ u_int32_t seqno;
+ int ret = 0, i;
+ int found;
+
+ for (;;) {
+ i915_gem_retire_requests(dev_priv);
+
+ /* If there's an inactive buffer available now, grab it
+ * and be done.
+ */
+ obj = i915_gem_find_inactive_object(dev_priv, min_size);
+ if (obj != NULL) {
+ obj_priv = to_intel_bo(obj);
+ /* find inactive object returns the object with a
+ * reference for us, and held
+ */
+ KASSERT(obj_priv->pin_count == 0);
+ KASSERT(!obj_priv->active);
+ DRM_ASSERT_HELD(obj);
+
+ /* Wait on the rendering and unbind the buffer. */
+ ret = i915_gem_object_unbind(obj_priv);
+ drm_unhold_and_unref(obj);
+ return (ret);
+ }
+
+ /* If we didn't get anything, but the ring is still processing
+ * things, wait for one of those things to finish and hopefully
+ * leave us a buffer to evict.
+ */
+ found = 0;
+ for_each_ring(ring, dev_priv, i) {
+ if (!list_empty(&ring->request_list)) {
+ request = list_first_entry(&ring->request_list,
+ struct drm_i915_gem_request, list);
+
+ seqno = request->seqno;
+
+ ret = i915_wait_seqno(request->ring, seqno);
+ if (ret)
+ return (ret);
+
+ found = 1;
+ break;
+ }
+ }
+ if (found)
+ continue;
+
+ /*
+ * If we didn't do any of the above, there's no single buffer
+ * large enough to swap out for the new one, so just evict
+ * everything and start again. (This should be rare.)
+ */
+ if (!list_empty(&dev_priv->mm.inactive_list))
+ return (i915_gem_evict_inactive(dev_priv));
+ else
+ return (i915_gem_evict_everything(dev_priv));
+ }
+ /* NOTREACHED */
+}
+
+int
+i915_gem_evict_everything(struct inteldrm_softc *dev_priv)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ int ret;
+
+ if (list_empty(&dev_priv->mm.inactive_list) &&
+ list_empty(&dev_priv->mm.active_list))
+ return (ENOSPC);
+
+ /* The gpu_idle will flush everything in the write domain to the
+ * active list. Then we must move everything off the active list
+ * with retire requests.
+ */
+ ret = i915_gpu_idle(dev);
+ if (ret)
+ return ret;
+
+ i915_gem_retire_requests(dev_priv);
+
+ i915_gem_evict_inactive(dev_priv);
+
+ /*
+ * All lists should be empty because we flushed the whole queue, then
+ * we evicted the whole shebang, only pinned objects are still bound.
+ */
+ KASSERT(list_empty(&dev_priv->mm.inactive_list));
+ KASSERT(list_empty(&dev_priv->mm.active_list));
+
+ return (0);
+}
+
+/* Clear out the inactive list and unbind everything in it. */
+int
+i915_gem_evict_inactive(struct inteldrm_softc *dev_priv)
+{
+ struct drm_i915_gem_object *obj_priv, *next;
+ int ret = 0;
+
+ list_for_each_entry_safe(obj_priv, next,
+ &dev_priv->mm.inactive_list, mm_list) {
+ if (obj_priv->pin_count != 0) {
+ ret = EINVAL;
+ DRM_ERROR("Pinned object in unbind list\n");
+ break;
+ }
+ /* reference it so that we can frob it outside the lock */
+ drm_ref(&obj_priv->base.uobj);
+
+ drm_hold_object(&obj_priv->base);
+ ret = i915_gem_object_unbind(obj_priv);
+ drm_unhold_and_unref(&obj_priv->base);
+
+ if (ret)
+ break;
+ }
+
+ return (ret);
+}
diff --git a/sys/dev/pci/drm/i915/i915_gem_execbuffer.c b/sys/dev/pci/drm/i915/i915_gem_execbuffer.c
new file mode 100644
index 00000000000..6132dc13e15
--- /dev/null
+++ b/sys/dev/pci/drm/i915/i915_gem_execbuffer.c
@@ -0,0 +1,517 @@
+/* $OpenBSD: i915_gem_execbuffer.c,v 1.1 2013/03/18 12:36:52 jsg Exp $ */
+/*
+ * Copyright (c) 2008-2009 Owain G. Ainsworth <oga@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/*
+ * Copyright © 2008,2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ *
+ */
+
+#include <dev/pci/drm/drmP.h>
+#include <dev/pci/drm/drm.h>
+#include <dev/pci/drm/i915_drm.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+#include <machine/pmap.h>
+
+#include <sys/queue.h>
+#include <sys/workq.h>
+
+int i915_reset_gen7_sol_offsets(struct drm_device *,
+ struct intel_ring_buffer *);
+int i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *, u32);
+int i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *,
+ struct drm_obj **, int);
+void i915_gem_execbuffer_move_to_active(struct drm_obj **, int,
+ struct intel_ring_buffer *);
+void i915_gem_execbuffer_retire_commands(struct drm_device *,
+ struct drm_file *, struct intel_ring_buffer *);
+
+// struct eb_objects {
+// eb_create
+// eb_reset
+// eb_add_object
+// eb_get_object
+// eb_destroy
+// i915_gem_execbuffer_relocate_entry
+// i915_gem_execbuffer_relocate_object
+// i915_gem_execbuffer_relocate_object_slow
+// i915_gem_execbuffer_relocate
+// pin_and_fence_object
+// i915_gem_execbuffer_reserve
+// i915_gem_execbuffer_relocate_slow
+
+int
+i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
+{
+ u32 plane, flip_mask;
+ int ret;
+
+ /* Check for any pending flips. As we only maintain a flip queue depth
+ * of 1, we can simply insert a WAIT for the next display flip prior
+ * to executing the batch and avoid stalling the CPU.
+ */
+
+ for (plane = 0; flips >> plane; plane++) {
+ if (((flips >> plane) & 1) == 0)
+ continue;
+
+ if (plane)
+ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ else
+ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ }
+
+ return 0;
+}
+
+int
+i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
+ struct drm_obj **object_list, int buffer_count)
+{
+ struct drm_i915_gem_object *obj;
+ uint32_t flush_domains = 0;
+ uint32_t flips = 0;
+ int ret, i;
+
+ for (i = 0; i < buffer_count; i++) {
+ obj = to_intel_bo(object_list[i]);
+ ret = i915_gem_object_sync(obj, ring);
+ if (ret)
+ return ret;
+
+ if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
+ i915_gem_clflush_object(obj);
+
+ if (obj->base.pending_write_domain)
+ flips |= atomic_read(&obj->pending_flip);
+
+ flush_domains |= obj->base.write_domain;
+ }
+
+ if (flips) {
+ ret = i915_gem_execbuffer_wait_for_flips(ring, flips);
+ if (ret)
+ return ret;
+ }
+
+ if (flush_domains & I915_GEM_DOMAIN_CPU)
+ i915_gem_chipset_flush(ring->dev);
+
+ if (flush_domains & I915_GEM_DOMAIN_GTT)
+ DRM_WRITEMEMORYBARRIER();
+
+ /* Unconditionally invalidate gpu caches and ensure that we do flush
+ * any residual writes from the previous batch.
+ */
+ return intel_ring_invalidate_all_caches(ring);
+}
+
+// i915_gem_check_execbuffer
+// validate_exec_list
+
+void
+i915_gem_execbuffer_move_to_active(struct drm_obj **object_list,
+ int buffer_count, struct intel_ring_buffer *ring)
+{
+ struct drm_i915_gem_object *obj;
+ int i;
+
+ for (i = 0; i < buffer_count; i++) {
+ obj = to_intel_bo(object_list[i]);
+#if 0
+ u32 old_read = obj->base.read_domains;
+ u32 old_write = obj->base.write_domain;
+#endif
+
+ obj->base.read_domains = obj->base.pending_read_domains;
+ obj->base.write_domain = obj->base.pending_write_domain;
+ obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
+
+ i915_gem_object_move_to_active(obj, ring);
+ if (obj->base.write_domain) {
+ obj->dirty = 1;
+ obj->last_write_seqno = intel_ring_get_seqno(ring);
+ intel_mark_busy(ring->dev);
+ }
+
+// trace_i915_gem_object_change_domain(obj, old_read, old_write);
+ }
+}
+
+void
+i915_gem_execbuffer_retire_commands(struct drm_device *dev,
+ struct drm_file *file,
+ struct intel_ring_buffer *ring)
+{
+ /* Unconditionally force add_request to emit a full flush. */
+ ring->gpu_caches_dirty = true;
+
+ /* Add a breadcrumb for the completion of the batch buffer */
+ i915_add_request(ring, file, NULL);
+}
+
+// i915_gem_fix_mi_batchbuffer_end
+
+int
+i915_reset_gen7_sol_offsets(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret, i;
+
+ if (!IS_GEN7(dev) || ring != &dev_priv->rings[RCS])
+ return 0;
+
+ ret = intel_ring_begin(ring, 4 * 3);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < 4; i++) {
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
+ intel_ring_emit(ring, 0);
+ }
+
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+// i915_gem_do_execbuffer
+// i915_gem_execbuffer
+
+int
+i915_gem_execbuffer2(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_i915_gem_execbuffer2 *args = data;
+ struct drm_i915_gem_exec_object2 *exec_list = NULL;
+ struct drm_i915_gem_relocation_entry *relocs = NULL;
+ struct drm_i915_gem_object *batch_obj_priv;
+ struct drm_obj **object_list = NULL;
+ struct drm_obj *batch_obj, *obj;
+ struct intel_ring_buffer *ring;
+ size_t oflow;
+ int ret, ret2, i;
+ int pinned = 0, pin_tries;
+ uint32_t reloc_index;
+ uint32_t flags;
+ uint32_t exec_start, exec_len;
+ uint32_t mask;
+ int mode;
+
+ /*
+ * Check for valid execbuffer offset. We can do this early because
+ * bound object are always page aligned, so only the start offset
+ * matters. Also check for integer overflow in the batch offset and size
+ */
+ if ((args->batch_start_offset | args->batch_len) & 0x7 ||
+ args->batch_start_offset + args->batch_len < args->batch_len ||
+ args->batch_start_offset + args->batch_len <
+ args->batch_start_offset)
+ return (EINVAL);
+
+ if (args->buffer_count < 1) {
+ DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
+ return (EINVAL);
+ }
+
+ flags = 0;
+ if (args->flags & I915_EXEC_SECURE) {
+ if (!DRM_SUSER(curproc))
+ return (EPERM);
+
+ flags |= I915_DISPATCH_SECURE;
+ }
+ if (args->flags & I915_EXEC_IS_PINNED)
+ flags |= I915_DISPATCH_PINNED;
+
+ switch (args->flags & I915_EXEC_RING_MASK) {
+ case I915_EXEC_DEFAULT:
+ case I915_EXEC_RENDER:
+ ring = &dev_priv->rings[RCS];
+ break;
+ case I915_EXEC_BSD:
+ ring = &dev_priv->rings[VCS];
+ break;
+ case I915_EXEC_BLT:
+ ring = &dev_priv->rings[BCS];
+ break;
+ default:
+ printf("unknown ring %d\n",
+ (int)(args->flags & I915_EXEC_RING_MASK));
+ return (EINVAL);
+ }
+ if (!intel_ring_initialized(ring)) {
+ DRM_DEBUG("execbuf with invalid ring: %d\n",
+ (int)(args->flags & I915_EXEC_RING_MASK));
+ return (EINVAL);
+ }
+
+ mode = args->flags & I915_EXEC_CONSTANTS_MASK;
+ mask = I915_EXEC_CONSTANTS_MASK;
+ switch (mode) {
+ case I915_EXEC_CONSTANTS_REL_GENERAL:
+ case I915_EXEC_CONSTANTS_ABSOLUTE:
+ case I915_EXEC_CONSTANTS_REL_SURFACE:
+ if (ring == &dev_priv->rings[RCS] &&
+ mode != dev_priv->relative_constants_mode) {
+ if (INTEL_INFO(dev)->gen < 4)
+ return EINVAL;
+
+ if (INTEL_INFO(dev)->gen > 5 &&
+ mode == I915_EXEC_CONSTANTS_REL_SURFACE)
+ return EINVAL;
+
+ /* The HW changed the meaning on this bit on gen6 */
+ if (INTEL_INFO(dev)->gen >= 6)
+ mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
+ }
+ break;
+ default:
+ DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
+ return EINVAL;
+ }
+
+ /* Copy in the exec list from userland, check for overflow */
+ oflow = SIZE_MAX / args->buffer_count;
+ if (oflow < sizeof(*exec_list) || oflow < sizeof(*object_list))
+ return (EINVAL);
+ exec_list = drm_alloc(sizeof(*exec_list) * args->buffer_count);
+ object_list = drm_alloc(sizeof(*object_list) * args->buffer_count);
+ if (exec_list == NULL || object_list == NULL) {
+ ret = ENOMEM;
+ goto pre_mutex_err;
+ }
+ ret = copyin((void *)(uintptr_t)args->buffers_ptr, exec_list,
+ sizeof(*exec_list) * args->buffer_count);
+ if (ret != 0)
+ goto pre_mutex_err;
+
+ ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
+ &relocs);
+ if (ret != 0)
+ goto pre_mutex_err;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto pre_mutex_err;
+ inteldrm_verify_inactive(dev_priv, __FILE__, __LINE__);
+
+ /* XXX check these before we copyin... but we do need the lock */
+ if (dev_priv->mm.wedged) {
+ ret = EIO;
+ goto unlock;
+ }
+
+ if (dev_priv->mm.suspended) {
+ ret = EBUSY;
+ goto unlock;
+ }
+
+ /* Look up object handles */
+ for (i = 0; i < args->buffer_count; i++) {
+ object_list[i] = drm_gem_object_lookup(dev, file_priv,
+ exec_list[i].handle);
+ obj = object_list[i];
+ if (obj == NULL) {
+ DRM_ERROR("Invalid object handle %d at index %d\n",
+ exec_list[i].handle, i);
+ ret = EBADF;
+ goto err;
+ }
+ if (obj->do_flags & I915_IN_EXEC) {
+ DRM_ERROR("Object %p appears more than once in object_list\n",
+ object_list[i]);
+ ret = EBADF;
+ goto err;
+ }
+ atomic_setbits_int(&obj->do_flags, I915_IN_EXEC);
+ }
+
+ /* Pin and relocate */
+ for (pin_tries = 0; ; pin_tries++) {
+ ret = pinned = 0;
+ reloc_index = 0;
+
+ for (i = 0; i < args->buffer_count; i++) {
+ object_list[i]->pending_read_domains = 0;
+ object_list[i]->pending_write_domain = 0;
+ to_intel_bo(object_list[i])->pending_fenced_gpu_access = false;
+ drm_hold_object(object_list[i]);
+ ret = i915_gem_object_pin_and_relocate(object_list[i],
+ file_priv, &exec_list[i], &relocs[reloc_index]);
+ if (ret) {
+ drm_unhold_object(object_list[i]);
+ break;
+ }
+ pinned++;
+ reloc_index += exec_list[i].relocation_count;
+ }
+ /* success */
+ if (ret == 0)
+ break;
+
+ /* error other than GTT full, or we've already tried again */
+ if (ret != ENOSPC || pin_tries >= 1)
+ goto err;
+
+ /*
+ * unpin all of our buffers and unhold them so they can be
+ * unbound so we can try and refit everything in the aperture.
+ */
+ for (i = 0; i < pinned; i++) {
+ if (object_list[i]->do_flags & __EXEC_OBJECT_HAS_FENCE) {
+ i915_gem_object_unpin_fence(to_intel_bo(object_list[i]));
+ object_list[i]->do_flags &= ~__EXEC_OBJECT_HAS_FENCE;
+ }
+ i915_gem_object_unpin(to_intel_bo(object_list[i]));
+ drm_unhold_object(object_list[i]);
+ }
+ pinned = 0;
+ /* evict everyone we can from the aperture */
+ ret = i915_gem_evict_everything(dev_priv);
+ if (ret)
+ goto err;
+ }
+
+ /* If we get here all involved objects are referenced, pinned, relocated
+ * and held. Now we can finish off the exec processing.
+ *
+ * First, set the pending read domains for the batch buffer to
+ * command.
+ */
+ batch_obj = object_list[args->buffer_count - 1];
+ batch_obj_priv = to_intel_bo(batch_obj);
+ if (args->batch_start_offset + args->batch_len > batch_obj->size ||
+ batch_obj->pending_write_domain) {
+ ret = EINVAL;
+ goto err;
+ }
+ batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+
+ ret = i915_gem_execbuffer_move_to_gpu(ring, object_list,
+ args->buffer_count);
+ if (ret)
+ goto err;
+
+ if (ring == &dev_priv->rings[RCS] &&
+ mode != dev_priv->relative_constants_mode) {
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ goto err;
+
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, INSTPM);
+ intel_ring_emit(ring, mask << 16 | mode);
+ intel_ring_advance(ring);
+
+ dev_priv->relative_constants_mode = mode;
+ }
+
+ if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
+ ret = i915_reset_gen7_sol_offsets(dev, ring);
+ if (ret)
+ goto err;
+ }
+
+ /* Exec the batchbuffer */
+ /*
+ * XXX make sure that this may never fail by preallocating the request.
+ */
+
+ exec_start = batch_obj_priv->gtt_offset + args->batch_start_offset;
+ exec_len = args->batch_len;
+
+ ret = ring->dispatch_execbuffer(ring, exec_start, exec_len, flags);
+ if (ret)
+ goto err;
+
+ i915_gem_execbuffer_move_to_active(object_list, args->buffer_count, ring);
+ i915_gem_execbuffer_retire_commands(dev, file_priv, ring);
+
+ ret = copyout(exec_list, (void *)(uintptr_t)args->buffers_ptr,
+ sizeof(*exec_list) * args->buffer_count);
+
+err:
+ for (i = 0; i < args->buffer_count; i++) {
+ if (object_list[i] == NULL)
+ break;
+
+ if (object_list[i]->do_flags & __EXEC_OBJECT_HAS_FENCE) {
+ i915_gem_object_unpin_fence(to_intel_bo(object_list[i]));
+ object_list[i]->do_flags &= ~__EXEC_OBJECT_HAS_FENCE;
+ }
+
+ atomic_clearbits_int(&object_list[i]->do_flags, I915_IN_EXEC |
+ I915_EXEC_NEEDS_FENCE);
+ if (i < pinned) {
+ i915_gem_object_unpin(to_intel_bo(object_list[i]));
+ drm_unhold_and_unref(object_list[i]);
+ } else {
+ drm_unref(&object_list[i]->uobj);
+ }
+ }
+
+unlock:
+ DRM_UNLOCK();
+
+pre_mutex_err:
+ /* update userlands reloc state. */
+ ret2 = i915_gem_put_relocs_to_user(exec_list,
+ args->buffer_count, relocs);
+ if (ret2 != 0 && ret == 0)
+ ret = ret2;
+
+ drm_free(object_list);
+ drm_free(exec_list);
+
+ return ret;
+}
diff --git a/sys/dev/pci/drm/i915/i915_gem_gtt.c b/sys/dev/pci/drm/i915/i915_gem_gtt.c
new file mode 100644
index 00000000000..79c125b4b30
--- /dev/null
+++ b/sys/dev/pci/drm/i915/i915_gem_gtt.c
@@ -0,0 +1,74 @@
+/* $OpenBSD: i915_gem_gtt.c,v 1.1 2013/03/18 12:36:52 jsg Exp $ */
+/*
+ * Copyright © 2010 Daniel Vetter
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <dev/pci/drm/drmP.h>
+#include <dev/pci/drm/drm.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+void
+i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
+{
+ printf("%s stub\n", __func__);
+}
+
+void
+i915_gem_restore_gtt_mappings(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
+ i915_gem_clflush_object(obj);
+ i915_gem_gtt_rebind_object(obj, obj->cache_level);
+ }
+
+ i915_gem_chipset_flush(dev);
+}
+
+void
+i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level)
+{
+ struct drm_device *dev = obj->base.dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int flags = obj->dma_flags;
+
+ switch (cache_level) {
+ case I915_CACHE_NONE:
+ flags |= BUS_DMA_GTT_NOCACHE;
+ break;
+ case I915_CACHE_LLC:
+ flags |= BUS_DMA_GTT_CACHE_LLC;
+ break;
+ case I915_CACHE_LLC_MLC:
+ flags |= BUS_DMA_GTT_CACHE_LLC_MLC;
+ break;
+ default:
+ BUG();
+ }
+
+ agp_bus_dma_rebind(dev_priv->agpdmat, obj->dmamap, flags);
+}
diff --git a/sys/dev/pci/drm/i915/i915_gem_tiling.c b/sys/dev/pci/drm/i915/i915_gem_tiling.c
new file mode 100644
index 00000000000..bf5eb3586f4
--- /dev/null
+++ b/sys/dev/pci/drm/i915/i915_gem_tiling.c
@@ -0,0 +1,574 @@
+/* $OpenBSD: i915_gem_tiling.c,v 1.1 2013/03/18 12:36:52 jsg Exp $ */
+/*
+ * Copyright (c) 2008-2009 Owain G. Ainsworth <oga@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <dev/pci/drm/drmP.h>
+#include <dev/pci/drm/drm.h>
+#include <dev/pci/drm/i915_drm.h>
+#include "i915_drv.h"
+
+#include <machine/pmap.h>
+
+#include <sys/queue.h>
+#include <sys/workq.h>
+
+/** @file i915_gem_tiling.c
+ *
+ * Support for managing tiling state of buffer objects.
+ *
+ * The idea behind tiling is to increase cache hit rates by rearranging
+ * pixel data so that a group of pixel accesses are in the same cacheline.
+ * Performance improvement from doing this on the back/depth buffer are on
+ * the order of 30%.
+ *
+ * Intel architectures make this somewhat more complicated, though, by
+ * adjustments made to addressing of data when the memory is in interleaved
+ * mode (matched pairs of DIMMS) to improve memory bandwidth.
+ * For interleaved memory, the CPU sends every sequential 64 bytes
+ * to an alternate memory channel so it can get the bandwidth from both.
+ *
+ * The GPU also rearranges its accesses for increased bandwidth to interleaved
+ * memory, and it matches what the CPU does for non-tiled. However, when tiled
+ * it does it a little differently, since one walks addresses not just in the
+ * X direction but also Y. So, along with alternating channels when bit
+ * 6 of the address flips, it also alternates when other bits flip -- Bits 9
+ * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
+ * are common to both the 915 and 965-class hardware.
+ *
+ * The CPU also sometimes XORs in higher bits as well, to improve
+ * bandwidth doing strided access like we do so frequently in graphics. This
+ * is called "Channel XOR Randomization" in the MCH documentation. The result
+ * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
+ * decode.
+ *
+ * All of this bit 6 XORing has an effect on our memory management,
+ * as we need to make sure that the 3d driver can correctly address object
+ * contents.
+ *
+ * If we don't have interleaved memory, all tiling is safe and no swizzling is
+ * required.
+ *
+ * When bit 17 is XORed in, we simply refuse to tile at all. Bit
+ * 17 is not just a page offset, so as we page an objet out and back in,
+ * individual pages in it will have different bit 17 addresses, resulting in
+ * each 64 bytes being swapped with its neighbor!
+ *
+ * Otherwise, if interleaved, we have to tell the 3d driver what the address
+ * swizzling it needs to do is, since it's writing with the CPU to the pages
+ * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
+ * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
+ * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
+ * to match what the GPU expects.
+ */
+
+/**
+ * Detects bit 6 swizzling of address lookup between IGD access and CPU
+ * access through main memory.
+ */
+void
+i915_gem_detect_bit_6_swizzle(struct inteldrm_softc *dev_priv,
+ struct pci_attach_args *bpa)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+ uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+ int need_disable;
+
+ if (IS_VALLEYVIEW(dev)) {
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ } else if (INTEL_INFO(dev)->gen >= 6) {
+ uint32_t dimm_c0, dimm_c1;
+ dimm_c0 = I915_READ(MAD_DIMM_C0);
+ dimm_c1 = I915_READ(MAD_DIMM_C1);
+ dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+ dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+ /* Enable swizzling when the channels are populated with
+ * identically sized dimms. We don't need to check the 3rd
+ * channel because no cpu with gpu attached ships in that
+ * configuration. Also, swizzling only makes sense for 2
+ * channels anyway. */
+ if (dimm_c0 == dimm_c1) {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else {
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ }
+ } else if (IS_GEN5(dev)) {
+ /*
+ * On Ironlake whatever DRAM config, GPU always do
+ * same swizzling setup.
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else if (IS_GEN2(dev)) {
+ /* As far as we know, the 865 doesn't have these bit 6
+ * swizzling issues.
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
+ uint32_t dcc;
+
+ /* try to enable MCHBAR, a lot of biosen disable it */
+ need_disable = intel_setup_mchbar(dev_priv, bpa);
+
+ /* On 915-945 and GM965, channel interleave by the CPU is
+ * determined by DCC. The CPU will alternate based on bit 6
+ * in interleaved mode, and the GPU will then also alternate
+ * on bit 6, 9, and 10 for X, but the CPU may also optionally
+ * alternate based on bit 17 (XOR not disabled and XOR
+ * bit == 17).
+ */
+ dcc = I915_READ(DCC);
+ switch (dcc & DCC_ADDRESSING_MODE_MASK) {
+ case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
+ case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ break;
+ case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
+ if (dcc & DCC_CHANNEL_XOR_DISABLE) {
+ /* This is the base swizzling by the GPU for
+ * tiled buffers.
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
+ /* Bit 11 swizzling by the CPU in addition. */
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
+ swizzle_y = I915_BIT_6_SWIZZLE_9_11;
+ } else {
+ /* Bit 17 swizzling by the CPU in addition. */
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
+ swizzle_y = I915_BIT_6_SWIZZLE_9_17;
+ }
+ break;
+ }
+ if (dcc == 0xffffffff) {
+ DRM_ERROR("Couldn't read from MCHBAR. "
+ "Disabling tiling.\n");
+ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+ swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+ }
+
+ intel_teardown_mchbar(dev_priv, bpa, need_disable);
+ } else {
+ /* The 965, G33, and newer, have a very flexible memory
+ * configuration. It will enable dual-channel mode
+ * (interleaving) on as much memory as it can, and the GPU
+ * will additionally sometimes enable different bit 6
+ * swizzling for tiled objects from the CPU.
+ *
+ * Here's what I found on G965:
+ *
+ * slot fill memory size swizzling
+ * 0A 0B 1A 1B 1-ch 2-ch
+ * 512 0 0 0 512 0 O
+ * 512 0 512 0 16 1008 X
+ * 512 0 0 512 16 1008 X
+ * 0 512 0 512 16 1008 X
+ * 1024 1024 1024 0 2048 1024 O
+ *
+ * We could probably detect this based on either the DRB
+ * matching, which was the case for the swizzling required in
+ * the table above, or from the 1-ch value being less than
+ * the minimum size of a rank.
+ */
+ if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ } else {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ }
+ }
+
+ dev_priv->mm.bit_6_swizzle_x = swizzle_x;
+ dev_priv->mm.bit_6_swizzle_y = swizzle_y;
+}
+
+/* Check pitch constriants for all chips & tiling formats */
+bool
+i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
+{
+ int tile_width;
+
+ /* Linear is always fine */
+ if (tiling_mode == I915_TILING_NONE)
+ return true;
+
+ if (IS_GEN2(dev) ||
+ (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
+ tile_width = 128;
+ else
+ tile_width = 512;
+
+ /* check maximum stride & object size */
+ if (INTEL_INFO(dev)->gen >= 4) {
+ /* i965 stores the end address of the gtt mapping in the fence
+ * reg, so dont bother to check the size */
+ if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
+ return false;
+ } else {
+ if (stride > 8192)
+ return false;
+
+ if (IS_GEN3(dev)) {
+ if (size > I830_FENCE_MAX_SIZE_VAL << 20)
+ return false;
+ } else {
+ if (size > I830_FENCE_MAX_SIZE_VAL << 19)
+ return false;
+ }
+ }
+
+ /* 965+ just needs multiples of tile width */
+ if (INTEL_INFO(dev)->gen >= 4) {
+ if (stride & (tile_width - 1))
+ return false;
+ return true;
+ }
+
+ /* Pre-965 needs power of two tile widths */
+ if (stride < tile_width)
+ return false;
+
+ if (stride & (stride - 1))
+ return false;
+
+ return true;
+}
+
+bool
+i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
+{
+ u32 size;
+
+ if (tiling_mode == I915_TILING_NONE)
+ return true;
+
+ if (INTEL_INFO(obj->base.dev)->gen >= 4)
+ return true;
+
+ if (INTEL_INFO(obj->base.dev)->gen == 3) {
+ if (obj->gtt_offset & ~I915_FENCE_START_MASK)
+ return false;
+ } else {
+ if (obj->gtt_offset & ~I830_FENCE_START_MASK)
+ return false;
+ }
+
+ /*
+ * Previous chips need to be aligned to the size of the smallest
+ * fence register that can contain the object.
+ */
+ if (INTEL_INFO(obj->base.dev)->gen == 3)
+ size = 1024*1024;
+ else
+ size = 512*1024;
+
+ while (size < obj->base.size)
+ size <<= 1;
+
+#if 0
+ if (obj->gtt_space->size != size)
+ return false;
+#endif
+
+ if (obj->gtt_offset & (size - 1))
+ return false;
+
+ return true;
+}
+
+/**
+ * Sets the tiling mode of an object, returning the required swizzling of
+ * bit 6 of addresses in the object.
+ */
+int
+i915_gem_set_tiling(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_set_tiling *args = data;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_obj *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret = 0;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return (EBADF);
+ obj_priv = to_intel_bo(obj);
+ drm_hold_object(obj);
+
+ if (obj_priv->pin_count != 0) {
+ ret = EBUSY;
+ goto out;
+ }
+ if (i915_tiling_ok(dev, args->stride, obj->size,
+ args->tiling_mode) == 0) {
+ ret = EINVAL;
+ goto out;
+ }
+
+ if (args->tiling_mode == I915_TILING_NONE) {
+ args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+ args->stride = 0;
+ } else {
+ if (args->tiling_mode == I915_TILING_X)
+ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
+ else
+ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
+ /* If we can't handle the swizzling, make it untiled. */
+ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
+ args->tiling_mode = I915_TILING_NONE;
+ args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+ args->stride = 0;
+ }
+ }
+
+ if (args->tiling_mode != obj_priv->tiling_mode ||
+ args->stride != obj_priv->stride) {
+ /*
+ * We need to rebind the object if its current allocation no
+ * longer meets the alignment restrictions for its new tiling
+ * mode. Otherwise we can leave it alone, but must clear any
+ * fence register.
+ */
+ /* fence may no longer be correct, wipe it */
+
+ obj_priv->map_and_fenceable =
+ obj_priv->dmamap == NULL ||
+#ifdef notyet
+ (obj_priv->gtt_offset + obj->size <=
+ dev_priv->mm.gtt_mappable_end &&
+#else
+ (
+#endif
+ i915_gem_object_fence_ok(obj_priv, args->tiling_mode));
+
+ /* Rebind if we need a change of alignment */
+ if (!obj_priv->map_and_fenceable) {
+ u32 unfenced_alignment =
+ i915_gem_get_unfenced_gtt_alignment(dev,
+ obj_priv->base.size,
+ args->tiling_mode);
+ if (obj_priv->gtt_offset & (unfenced_alignment - 1))
+ ret = i915_gem_object_unbind(obj_priv);
+ }
+
+ if (ret == 0) {
+ obj_priv->fence_dirty =
+ obj_priv->fenced_gpu_access ||
+ obj_priv->fence_reg != I915_FENCE_REG_NONE;
+
+ obj_priv->tiling_mode = args->tiling_mode;
+ obj_priv->stride = args->stride;
+
+ /* Force the fence to be reacquired for GTT access */
+ i915_gem_release_mmap(obj_priv);
+ }
+
+ }
+ /* we have to maintain this existing ABI... */
+ args->stride = obj_priv->stride;
+ args->tiling_mode = obj_priv->tiling_mode;
+out:
+ drm_unhold_and_unref(obj);
+
+ return (ret);
+}
+
+/**
+ * Returns the current tiling mode and required bit 6 swizzling for the object.
+ */
+int
+i915_gem_get_tiling(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_get_tiling *args = data;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_obj *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return (EBADF);
+ drm_hold_object(obj);
+ obj_priv = to_intel_bo(obj);
+
+ args->tiling_mode = obj_priv->tiling_mode;
+ switch (obj_priv->tiling_mode) {
+ case I915_TILING_X:
+ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
+ break;
+ case I915_TILING_Y:
+ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
+ break;
+ case I915_TILING_NONE:
+ args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+ break;
+ default:
+ DRM_ERROR("unknown tiling mode\n");
+ }
+
+ drm_unhold_and_unref(obj);
+
+ return 0;
+}
+
+int
+i915_gem_swizzle_page(struct vm_page *pg)
+{
+ vaddr_t va;
+ int i;
+ u_int8_t temp[64], *vaddr;
+
+#if defined (__HAVE_PMAP_DIRECT)
+ va = pmap_map_direct(pg);
+#else
+ va = uvm_km_valloc(kernel_map, PAGE_SIZE);
+ if (va == 0)
+ return (ENOMEM);
+ pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), UVM_PROT_RW);
+ pmap_update(pmap_kernel());
+#endif
+ vaddr = (u_int8_t *)va;
+
+ for (i = 0; i < PAGE_SIZE; i += 128) {
+ memcpy(temp, &vaddr[i], 64);
+ memcpy(&vaddr[i], &vaddr[i + 64], 64);
+ memcpy(&vaddr[i + 64], temp, 64);
+ }
+
+#if defined (__HAVE_PMAP_DIRECT)
+ pmap_unmap_direct(va);
+#else
+ pmap_kremove(va, PAGE_SIZE);
+ pmap_update(pmap_kernel());
+ uvm_km_free(kernel_map, va, PAGE_SIZE);
+#endif
+ return (0);
+}
+
+void
+i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct vm_page *pg;
+ bus_dma_segment_t *segp;
+ int page_count = obj->base.size >> PAGE_SHIFT;
+ int i, n, ret;
+
+ if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17 ||
+ obj->bit_17 == NULL)
+ return;
+
+ segp = &obj->dma_segs[0];
+ n = 0;
+ for (i = 0; i < page_count; i++) {
+ /* compare bit 17 with previous one (in case we swapped).
+ * if they don't match we'll have to swizzle the page
+ */
+ if ((((segp->ds_addr + n) >> 17) & 0x1) !=
+ test_bit(i, obj->bit_17)) {
+ /* XXX move this to somewhere where we already have pg */
+ pg = PHYS_TO_VM_PAGE(segp->ds_addr + n);
+ KASSERT(pg != NULL);
+ ret = i915_gem_swizzle_page(pg);
+ if (ret)
+ return;
+ atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
+ }
+
+ n += PAGE_SIZE;
+ if (n >= segp->ds_len) {
+ n = 0;
+ segp++;
+ }
+ }
+
+}
+
+void
+i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ bus_dma_segment_t *segp;
+ int page_count = obj->base.size >> PAGE_SHIFT;
+ int i, n;
+
+ if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
+ return;
+
+ if (obj->bit_17 == NULL) {
+ /* round up number of pages to a multiple of 32 so we know what
+ * size to make the bitmask. XXX this is wasteful with malloc
+ * and a better way should be done
+ */
+ size_t nb17 = ((page_count + 31) & ~31)/32;
+ obj->bit_17 = drm_alloc(nb17 * sizeof(u_int32_t));
+ if (obj-> bit_17 == NULL) {
+ return;
+ }
+
+ }
+
+ segp = &obj->dma_segs[0];
+ n = 0;
+ for (i = 0; i < page_count; i++) {
+ if ((segp->ds_addr + n) & (1 << 17))
+ set_bit(i, obj->bit_17);
+ else
+ clear_bit(i, obj->bit_17);
+
+ n += PAGE_SIZE;
+ if (n >= segp->ds_len) {
+ n = 0;
+ segp++;
+ }
+ }
+}
diff --git a/sys/dev/pci/drm/i915/i915_irq.c b/sys/dev/pci/drm/i915/i915_irq.c
new file mode 100644
index 00000000000..20f89d06f7b
--- /dev/null
+++ b/sys/dev/pci/drm/i915/i915_irq.c
@@ -0,0 +1,2923 @@
+/* $OpenBSD: i915_irq.c,v 1.1 2013/03/18 12:36:52 jsg Exp $ */
+/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
+ */
+/*
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <dev/pci/drm/drmP.h>
+#include <dev/pci/drm/i915_drm.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+void ironlake_enable_display_irq(drm_i915_private_t *, u32);
+void intel_enable_asle(struct drm_device *);
+int i915_pipeconf_enabled(struct drm_device *, int);
+u32 gm45_get_vblank_counter(struct drm_device *, int);
+int i915_get_crtc_scanoutpos(struct drm_device *, int, int *, int *);
+int i915_get_vblank_timestamp(struct drm_device *, int, int *,
+ struct timeval *, unsigned);
+void ironlake_handle_rps_change(struct drm_device *);
+void notify_ring(struct drm_device *, struct intel_ring_buffer *);
+void ivybridge_handle_parity_error(struct drm_device *);
+void snb_gt_irq_handler(struct drm_device *, struct inteldrm_softc *, u32);
+void snb_gt_irq_handler(struct drm_device *, struct inteldrm_softc *, u32);
+void gen6_queue_rps_work(struct inteldrm_softc *, u32);
+int valleyview_intr(void *);
+void ibx_irq_handler(struct drm_device *, u32);
+void cpt_irq_handler(struct drm_device *, u32);
+int ivybridge_intr(void *);
+void ilk_gt_irq_handler(struct drm_device *, struct inteldrm_softc *, u32);
+int ironlake_intr(void *);
+void i915_get_extra_instdone(struct drm_device *, uint32_t *);
+void i915_report_and_clear_eir(struct drm_device *);
+void i915_handle_error(struct drm_device *, bool);
+void i915_pageflip_stall_check(struct drm_device *, int);
+int ironlake_enable_vblank(struct drm_device *, int);
+int ivybridge_enable_vblank(struct drm_device *, int);
+int valleyview_enable_vblank(struct drm_device *, int);
+void ironlake_disable_vblank(struct drm_device *, int);
+void ivybridge_disable_vblank(struct drm_device *, int);
+void valleyview_disable_vblank(struct drm_device *, int);
+u32 ring_last_seqno(struct intel_ring_buffer *);
+bool i915_hangcheck_ring_idle(struct intel_ring_buffer *, bool *);
+bool kick_ring(struct intel_ring_buffer *);
+bool i915_hangcheck_hung(struct drm_device *);
+void ironlake_irq_preinstall(struct drm_device *);
+void valleyview_irq_preinstall(struct drm_device *);
+void ironlake_enable_pch_hotplug(struct drm_device *);
+int ironlake_irq_postinstall(struct drm_device *);
+int ivybridge_irq_postinstall(struct drm_device *);
+int valleyview_irq_postinstall(struct drm_device *);
+void valleyview_irq_uninstall(struct drm_device *);
+void ironlake_irq_uninstall(struct drm_device *);
+void i8xx_irq_preinstall(struct drm_device *);
+int i8xx_irq_postinstall(struct drm_device *);
+int i8xx_intr(void *);
+void i8xx_irq_uninstall(struct drm_device *);
+void i915_irq_preinstall(struct drm_device *);
+int i915_irq_postinstall(struct drm_device *);
+int i915_intr(void *);
+void i915_irq_uninstall(struct drm_device *);
+void i965_irq_preinstall(struct drm_device *);
+int i965_irq_postinstall(struct drm_device *);
+int i965_intr(void *);
+void i965_irq_uninstall(struct drm_device *);
+void intel_irq_init(struct drm_device *);
+void i915_hotplug_work_func(void *, void *);
+void i915_error_work_func(void *, void *);
+void ivybridge_parity_work(void *, void *);
+void gen6_pm_rps_work(void *, void *);
+
+/* For display hotplug interrupt */
+void
+ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+ if ((dev_priv->irq_mask & mask) != 0) {
+ dev_priv->irq_mask &= ~mask;
+ I915_WRITE(DEIMR, dev_priv->irq_mask);
+ POSTING_READ(DEIMR);
+ }
+}
+
+static inline void
+ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+ if ((dev_priv->irq_mask & mask) != mask) {
+ dev_priv->irq_mask |= mask;
+ I915_WRITE(DEIMR, dev_priv->irq_mask);
+ POSTING_READ(DEIMR);
+ }
+}
+
+void
+i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
+{
+ if ((dev_priv->pipestat[pipe] & mask) != mask) {
+ u32 reg = PIPESTAT(pipe);
+
+ dev_priv->pipestat[pipe] |= mask;
+ /* Enable the interrupt, clear any pending status */
+ I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
+ POSTING_READ(reg);
+ }
+}
+
+void
+i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
+{
+ if ((dev_priv->pipestat[pipe] & mask) != 0) {
+ u32 reg = PIPESTAT(pipe);
+
+ dev_priv->pipestat[pipe] &= ~mask;
+ I915_WRITE(reg, dev_priv->pipestat[pipe]);
+ POSTING_READ(reg);
+ }
+}
+
+/**
+ * intel_enable_asle - enable ASLE interrupt for OpRegion
+ */
+void
+intel_enable_asle(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ /* FIXME: opregion/asle for VLV */
+ if (IS_VALLEYVIEW(dev))
+ return;
+
+ mtx_enter(&dev_priv->irq_lock);
+
+ if (HAS_PCH_SPLIT(dev))
+ ironlake_enable_display_irq(dev_priv, DE_GSE);
+ else {
+ i915_enable_pipestat(dev_priv, 1,
+ PIPE_LEGACY_BLC_EVENT_ENABLE);
+ if (INTEL_INFO(dev)->gen >= 4)
+ i915_enable_pipestat(dev_priv, 0,
+ PIPE_LEGACY_BLC_EVENT_ENABLE);
+ }
+
+ mtx_leave(&dev_priv->irq_lock);
+}
+
+/**
+ * i915_pipeconf_enabled - check if a pipe is enabled
+ * @dev: DRM device
+ * @pipe: pipe to check
+ *
+ * Reading certain registers when the pipe is disabled can hang the chip.
+ * Use this routine to make sure the PLL is running and the pipe is active
+ * before reading such registers if unsure.
+ */
+int
+i915_pipeconf_enabled(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+
+ return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
+}
+
+/* Called from drm generic code, passed a 'crtc', which
+ * we use as a pipe index
+ */
+u32
+i915_get_vblank_counter(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long high_frame;
+ unsigned long low_frame;
+ u32 high1, high2, low;
+
+ if (!i915_pipeconf_enabled(dev, pipe)) {
+ DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
+ "pipe %c\n", pipe_name(pipe));
+ return 0;
+ }
+
+ high_frame = PIPEFRAME(pipe);
+ low_frame = PIPEFRAMEPIXEL(pipe);
+
+ /*
+ * High & low register fields aren't synchronized, so make sure
+ * we get a low value that's stable across two reads of the high
+ * register.
+ */
+ do {
+ high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
+ low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
+ high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
+ } while (high1 != high2);
+
+ high1 >>= PIPE_FRAME_HIGH_SHIFT;
+ low >>= PIPE_FRAME_LOW_SHIFT;
+ return (high1 << 8) | low;
+}
+
+u32
+gm45_get_vblank_counter(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int reg = PIPE_FRMCOUNT_GM45(pipe);
+
+ if (!i915_pipeconf_enabled(dev, pipe)) {
+ DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
+ "pipe %c\n", pipe_name(pipe));
+ return 0;
+ }
+
+ return I915_READ(reg);
+}
+
+int
+i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
+ int *vpos, int *hpos)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 vbl = 0, position = 0;
+ int vbl_start, vbl_end, htotal, vtotal;
+ bool in_vbl = true;
+ int ret = 0;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+
+ if (!i915_pipeconf_enabled(dev, pipe)) {
+ DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
+ "pipe %c\n", pipe_name(pipe));
+ return 0;
+ }
+
+ /* Get vtotal. */
+ vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ /* No obvious pixelcount register. Only query vertical
+ * scanout position from Display scan line register.
+ */
+ position = I915_READ(PIPEDSL(pipe));
+
+ /* Decode into vertical scanout position. Don't have
+ * horizontal scanout position.
+ */
+ *vpos = position & 0x1fff;
+ *hpos = 0;
+ } else {
+ /* Have access to pixelcount since start of frame.
+ * We can split this into vertical and horizontal
+ * scanout position.
+ */
+ position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
+
+ htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
+ *vpos = position / htotal;
+ *hpos = position - (*vpos * htotal);
+ }
+
+ /* Query vblank area. */
+ vbl = I915_READ(VBLANK(cpu_transcoder));
+
+ /* Test position against vblank region. */
+ vbl_start = vbl & 0x1fff;
+ vbl_end = (vbl >> 16) & 0x1fff;
+
+ if ((*vpos < vbl_start) || (*vpos > vbl_end))
+ in_vbl = false;
+
+ /* Inside "upper part" of vblank area? Apply corrective offset: */
+ if (in_vbl && (*vpos >= vbl_start))
+ *vpos = *vpos - vtotal;
+
+ /* Readouts valid? */
+ if (vbl > 0)
+ ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
+
+ /* In vblank? */
+ if (in_vbl)
+ ret |= DRM_SCANOUTPOS_INVBL;
+
+ return ret;
+}
+
+int
+i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+
+ if (pipe < 0 || pipe >= dev_priv->num_pipe) {
+ DRM_ERROR("Invalid crtc %d\n", pipe);
+ return -EINVAL;
+ }
+
+ /* Get drm_crtc to timestamp: */
+ crtc = intel_get_crtc_for_pipe(dev, pipe);
+ if (crtc == NULL) {
+ DRM_ERROR("Invalid crtc %d\n", pipe);
+ return -EINVAL;
+ }
+
+ if (!crtc->enabled) {
+ DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
+ return -EBUSY;
+ }
+
+ /* Helper routine in DRM core does all the work: */
+ return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
+ vblank_time, flags,
+ crtc);
+}
+
+/*
+ * Handle hotplug events outside the interrupt handler proper.
+ */
+void
+i915_hotplug_work_func(void *arg1, void *arg2)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *)arg1;
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *encoder;
+
+ /* HPD irq before everything is fully set up. */
+ if (!dev_priv->enable_hotplug_processing)
+ return;
+
+ rw_enter_write(&mode_config->rwl);
+ DRM_DEBUG_KMS("running encoder hotplug functions\n");
+
+ list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
+ if (encoder->hot_plug)
+ encoder->hot_plug(encoder);
+
+ rw_exit_write(&mode_config->rwl);
+
+ /* Just fire off a uevent and let userspace tell us what to do */
+ drm_helper_hpd_irq_event(dev);
+}
+
+/* defined intel_pm.c */
+extern struct mutex mchdev_lock;
+
+void
+ironlake_handle_rps_change(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 busy_up, busy_down, max_avg, min_avg;
+ u8 new_delay;
+
+ mtx_enter(&mchdev_lock);
+
+ I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
+
+ new_delay = dev_priv->ips.cur_delay;
+
+ I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
+ busy_up = I915_READ(RCPREVBSYTUPAVG);
+ busy_down = I915_READ(RCPREVBSYTDNAVG);
+ max_avg = I915_READ(RCBMAXAVG);
+ min_avg = I915_READ(RCBMINAVG);
+
+ /* Handle RCS change request from hw */
+ if (busy_up > max_avg) {
+ if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
+ new_delay = dev_priv->ips.cur_delay - 1;
+ if (new_delay < dev_priv->ips.max_delay)
+ new_delay = dev_priv->ips.max_delay;
+ } else if (busy_down < min_avg) {
+ if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
+ new_delay = dev_priv->ips.cur_delay + 1;
+ if (new_delay > dev_priv->ips.min_delay)
+ new_delay = dev_priv->ips.min_delay;
+ }
+
+ if (ironlake_set_drps(dev, new_delay))
+ dev_priv->ips.cur_delay = new_delay;
+
+ mtx_leave(&mchdev_lock);
+
+ return;
+}
+
+void
+notify_ring(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ if (ring->obj == NULL)
+ return;
+
+// trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
+
+ wakeup(ring);
+ dev_priv->hangcheck_count = 0;
+ timeout_add_msec(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD);
+
+#ifdef notyet
+ wakeup(&ring->irq_queue);
+ if (i915_enable_hangcheck) {
+ dev_priv->hangcheck_count = 0;
+ mod_timer(&dev_priv->hangcheck_timer,
+ round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
+ }
+#endif
+}
+
+void
+gen6_pm_rps_work(void *arg1, void *arg2)
+{
+ drm_i915_private_t *dev_priv = arg1;
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ u32 pm_iir, pm_imr;
+ u8 new_delay;
+
+ mtx_enter(&dev_priv->rps.lock);
+ pm_iir = dev_priv->rps.pm_iir;
+ dev_priv->rps.pm_iir = 0;
+ pm_imr = I915_READ(GEN6_PMIMR);
+ I915_WRITE(GEN6_PMIMR, 0);
+ mtx_leave(&dev_priv->rps.lock);
+
+ if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
+ return;
+
+ rw_enter_write(&dev_priv->rps.hw_lock);
+
+ if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
+ new_delay = dev_priv->rps.cur_delay + 1;
+ else
+ new_delay = dev_priv->rps.cur_delay - 1;
+
+ /* sysfs frequency interfaces may have snuck in while servicing the
+ * interrupt
+ */
+ if (!(new_delay > dev_priv->rps.max_delay ||
+ new_delay < dev_priv->rps.min_delay)) {
+ gen6_set_rps(dev, new_delay);
+ }
+
+ rw_exit_write(&dev_priv->rps.hw_lock);
+}
+
+/**
+ * ivybridge_parity_work - Workqueue called when a parity error interrupt
+ * occurred.
+ * @work: workqueue struct
+ *
+ * Doesn't actually do anything except notify userspace. As a consequence of
+ * this event, userspace should try to remap the bad rows since statistically
+ * it is likely the same row is more likely to go bad again.
+ */
+void
+ivybridge_parity_work(void *arg1, void *arg2)
+{
+ drm_i915_private_t *dev_priv = arg1;
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ u32 error_status, row, bank, subbank;
+// char *parity_event[5];
+ uint32_t misccpctl;
+
+ /* We must turn off DOP level clock gating to access the L3 registers.
+ * In order to prevent a get/put style interface, acquire struct mutex
+ * any time we access those registers.
+ */
+ DRM_LOCK();
+
+ misccpctl = I915_READ(GEN7_MISCCPCTL);
+ I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+ POSTING_READ(GEN7_MISCCPCTL);
+
+ error_status = I915_READ(GEN7_L3CDERRST1);
+ row = GEN7_PARITY_ERROR_ROW(error_status);
+ bank = GEN7_PARITY_ERROR_BANK(error_status);
+ subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
+
+ I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
+ GEN7_L3CDERRST1_ENABLE);
+ POSTING_READ(GEN7_L3CDERRST1);
+
+ I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+
+ mtx_enter(&dev_priv->irq_lock);
+ dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ mtx_leave(&dev_priv->irq_lock);
+
+ DRM_UNLOCK();
+
+#if 0
+ parity_event[0] = "L3_PARITY_ERROR=1";
+ parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
+ parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
+ parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
+ parity_event[4] = NULL;
+#endif
+
+#ifdef notyet
+ kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
+ KOBJ_CHANGE, parity_event);
+#endif
+
+ DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
+ row, bank, subbank);
+
+#if 0
+ free(parity_event[3], M_DRM);
+ free(parity_event[2], M_DRM);
+ free(parity_event[1], M_DRM);
+#endif
+}
+
+void
+ivybridge_handle_parity_error(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ if (!HAS_L3_GPU_CACHE(dev))
+ return;
+
+ mtx_enter(&dev_priv->irq_lock);
+ dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ mtx_leave(&dev_priv->irq_lock);
+
+ workq_queue_task(NULL, &dev_priv->l3_parity.error_task, 0,
+ ivybridge_parity_work, dev_priv, NULL);
+
+}
+
+void
+snb_gt_irq_handler(struct drm_device *dev,
+ struct inteldrm_softc *dev_priv,
+ u32 gt_iir)
+{
+
+ if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
+ GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
+ notify_ring(dev, &dev_priv->rings[RCS]);
+ if (gt_iir & GEN6_BSD_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->rings[VCS]);
+ if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->rings[BCS]);
+
+ if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
+ GT_GEN6_BSD_CS_ERROR_INTERRUPT |
+ GT_RENDER_CS_ERROR_INTERRUPT)) {
+ DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
+ i915_handle_error(dev, false);
+ }
+
+ if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
+ ivybridge_handle_parity_error(dev);
+}
+
+void
+gen6_queue_rps_work(struct inteldrm_softc *dev_priv,
+ u32 pm_iir)
+{
+ /*
+ * IIR bits should never already be set because IMR should
+ * prevent an interrupt from being shown in IIR. The warning
+ * displays a case where we've unsafely cleared
+ * dev_priv->rps.pm_iir. Although missing an interrupt of the same
+ * type is not a problem, it displays a problem in the logic.
+ *
+ * The mask bit in IMR is cleared by dev_priv->rps.work.
+ */
+
+ mtx_enter(&dev_priv->rps.lock);
+ dev_priv->rps.pm_iir |= pm_iir;
+ I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
+ POSTING_READ(GEN6_PMIMR);
+ mtx_leave(&dev_priv->rps.lock);
+
+ workq_queue_task(NULL, &dev_priv->rps.task, 0, gen6_pm_rps_work,
+ dev_priv, NULL);
+}
+
+int
+valleyview_intr(void *arg)
+{
+ drm_i915_private_t *dev_priv = arg;
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ u32 iir, gt_iir, pm_iir;
+ int ret = 0;
+ int pipe;
+ u32 pipe_stats[I915_MAX_PIPES];
+ bool blc_event;
+
+// atomic_inc(&dev_priv->irq_received);
+
+ while (true) {
+ iir = I915_READ(VLV_IIR);
+ gt_iir = I915_READ(GTIIR);
+ pm_iir = I915_READ(GEN6_PMIIR);
+
+ if (gt_iir == 0 && pm_iir == 0 && iir == 0)
+ goto out;
+
+ ret = 1;
+
+ snb_gt_irq_handler(dev, dev_priv, gt_iir);
+
+ mtx_enter(&dev_priv->irq_lock);
+ for_each_pipe(pipe) {
+ int reg = PIPESTAT(pipe);
+ pipe_stats[pipe] = I915_READ(reg);
+
+ /*
+ * Clear the PIPE*STAT regs before the IIR
+ */
+ if (pipe_stats[pipe] & 0x8000ffff) {
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_DEBUG_DRIVER("pipe %c underrun\n",
+ pipe_name(pipe));
+ I915_WRITE(reg, pipe_stats[pipe]);
+ }
+ }
+ mtx_leave(&dev_priv->irq_lock);
+
+ for_each_pipe(pipe) {
+ if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
+ drm_handle_vblank(dev, pipe);
+
+ if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
+ intel_prepare_page_flip(dev, pipe);
+ intel_finish_page_flip(dev, pipe);
+ }
+ }
+
+ /* Consume port. Then clear IIR or we'll miss events */
+ if (iir & I915_DISPLAY_PORT_INTERRUPT) {
+ u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
+ hotplug_status);
+ if (hotplug_status & dev_priv->hotplug_supported_mask)
+ workq_queue_task(NULL, &dev_priv->hotplug_task,
+ 0, i915_hotplug_work_func, dev_priv, NULL);
+
+ I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ I915_READ(PORT_HOTPLUG_STAT);
+ }
+
+ if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
+ blc_event = true;
+
+ if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
+ gen6_queue_rps_work(dev_priv, pm_iir);
+
+ I915_WRITE(GTIIR, gt_iir);
+ I915_WRITE(GEN6_PMIIR, pm_iir);
+ I915_WRITE(VLV_IIR, iir);
+ }
+
+out:
+ return ret;
+}
+
+void
+ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ if (pch_iir & SDE_HOTPLUG_MASK)
+ workq_queue_task(NULL, &dev_priv->hotplug_task, 0,
+ i915_hotplug_work_func, dev_priv, NULL);
+
+ if (pch_iir & SDE_AUDIO_POWER_MASK)
+ DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
+ (pch_iir & SDE_AUDIO_POWER_MASK) >>
+ SDE_AUDIO_POWER_SHIFT);
+
+ if (pch_iir & SDE_GMBUS)
+ DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
+
+ if (pch_iir & SDE_AUDIO_HDCP_MASK)
+ DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
+
+ if (pch_iir & SDE_AUDIO_TRANS_MASK)
+ DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
+
+ if (pch_iir & SDE_POISON)
+ DRM_ERROR("PCH poison interrupt\n");
+
+ if (pch_iir & SDE_FDI_MASK)
+ for_each_pipe(pipe)
+ DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
+ pipe_name(pipe),
+ I915_READ(FDI_RX_IIR(pipe)));
+
+ if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
+ DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
+
+ if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
+ DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
+
+ if (pch_iir & SDE_TRANSB_FIFO_UNDER)
+ DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
+ if (pch_iir & SDE_TRANSA_FIFO_UNDER)
+ DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
+}
+
+void
+cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ if (pch_iir & SDE_HOTPLUG_MASK_CPT)
+ workq_queue_task(NULL, &dev_priv->hotplug_task, 0,
+ i915_hotplug_work_func, dev_priv, NULL);
+
+ if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
+ DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
+ (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
+ SDE_AUDIO_POWER_SHIFT_CPT);
+
+ if (pch_iir & SDE_AUX_MASK_CPT)
+ DRM_DEBUG_DRIVER("AUX channel interrupt\n");
+
+ if (pch_iir & SDE_GMBUS_CPT)
+ DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
+
+ if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
+ DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
+
+ if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
+ DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
+
+ if (pch_iir & SDE_FDI_MASK_CPT)
+ for_each_pipe(pipe)
+ DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
+ pipe_name(pipe),
+ I915_READ(FDI_RX_IIR(pipe)));
+}
+
+int
+ivybridge_intr(void *arg)
+{
+ drm_i915_private_t *dev_priv = arg;
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ u32 de_iir, gt_iir, de_ier, pm_iir;
+ int ret = 0;
+ int i;
+
+// atomic_inc(&dev_priv->irq_received);
+
+ /* disable master interrupt before clearing iir */
+ de_ier = I915_READ(DEIER);
+ I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
+
+ gt_iir = I915_READ(GTIIR);
+ if (gt_iir) {
+ snb_gt_irq_handler(dev, dev_priv, gt_iir);
+ I915_WRITE(GTIIR, gt_iir);
+ ret = 1;
+ }
+
+ de_iir = I915_READ(DEIIR);
+ if (de_iir) {
+#ifdef notyet
+ if (de_iir & DE_GSE_IVB)
+ intel_opregion_gse_intr(dev);
+#endif
+
+ for (i = 0; i < 3; i++) {
+ if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
+ drm_handle_vblank(dev, i);
+ if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
+ intel_prepare_page_flip(dev, i);
+ intel_finish_page_flip_plane(dev, i);
+ }
+ }
+
+ /* check event from PCH */
+ if (de_iir & DE_PCH_EVENT_IVB) {
+ u32 pch_iir = I915_READ(SDEIIR);
+
+ cpt_irq_handler(dev, pch_iir);
+
+ /* clear PCH hotplug event before clear CPU irq */
+ I915_WRITE(SDEIIR, pch_iir);
+ }
+
+ I915_WRITE(DEIIR, de_iir);
+ ret = 1;
+ }
+
+ pm_iir = I915_READ(GEN6_PMIIR);
+ if (pm_iir) {
+ if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
+ gen6_queue_rps_work(dev_priv, pm_iir);
+ I915_WRITE(GEN6_PMIIR, pm_iir);
+ ret = 1;
+ }
+
+ I915_WRITE(DEIER, de_ier);
+ POSTING_READ(DEIER);
+
+ return ret;
+}
+
+void
+ilk_gt_irq_handler(struct drm_device *dev,
+ struct inteldrm_softc *dev_priv,
+ u32 gt_iir)
+{
+ if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
+ notify_ring(dev, &dev_priv->rings[RCS]);
+ if (gt_iir & GT_BSD_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->rings[VCS]);
+}
+
+int
+ironlake_intr(void *arg)
+{
+ drm_i915_private_t *dev_priv = arg;
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ int ret = 0;
+ u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
+
+// atomic_inc(&dev_priv->irq_received);
+
+ /* disable master interrupt before clearing iir */
+ de_ier = I915_READ(DEIER);
+ I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
+ POSTING_READ(DEIER);
+
+ de_iir = I915_READ(DEIIR);
+ gt_iir = I915_READ(GTIIR);
+ pch_iir = I915_READ(SDEIIR);
+ pm_iir = I915_READ(GEN6_PMIIR);
+
+ if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
+ (!IS_GEN6(dev) || pm_iir == 0))
+ goto done;
+
+ ret = 1;
+
+ if (IS_GEN5(dev))
+ ilk_gt_irq_handler(dev, dev_priv, gt_iir);
+ else
+ snb_gt_irq_handler(dev, dev_priv, gt_iir);
+
+#ifdef notyet
+ if (de_iir & DE_GSE)
+ intel_opregion_gse_intr(dev);
+#endif
+
+ if (de_iir & DE_PIPEA_VBLANK)
+ drm_handle_vblank(dev, 0);
+
+ if (de_iir & DE_PIPEB_VBLANK)
+ drm_handle_vblank(dev, 1);
+
+ if (de_iir & DE_PLANEA_FLIP_DONE) {
+ intel_prepare_page_flip(dev, 0);
+ intel_finish_page_flip_plane(dev, 0);
+ }
+
+ if (de_iir & DE_PLANEB_FLIP_DONE) {
+ intel_prepare_page_flip(dev, 1);
+ intel_finish_page_flip_plane(dev, 1);
+ }
+
+ /* check event from PCH */
+ if (de_iir & DE_PCH_EVENT) {
+ if (HAS_PCH_CPT(dev))
+ cpt_irq_handler(dev, pch_iir);
+ else
+ ibx_irq_handler(dev, pch_iir);
+ }
+
+ if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
+ ironlake_handle_rps_change(dev);
+
+ if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
+ gen6_queue_rps_work(dev_priv, pm_iir);
+
+ /* should clear PCH hotplug event before clear CPU irq */
+ I915_WRITE(SDEIIR, pch_iir);
+ I915_WRITE(GTIIR, gt_iir);
+ I915_WRITE(DEIIR, de_iir);
+ I915_WRITE(GEN6_PMIIR, pm_iir);
+
+done:
+ I915_WRITE(DEIER, de_ier);
+ POSTING_READ(DEIER);
+
+ return ret;
+}
+
+/**
+ * i915_error_work_func - do process context error handling work
+ * @work: work struct
+ *
+ * Fire an error uevent so userspace can see that a hang or error
+ * was detected.
+ */
+void
+i915_error_work_func(void *arg1, void *arg2)
+{
+ drm_i915_private_t *dev_priv = arg1;
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+#if 0
+ char *error_event[] = { "ERROR=1", NULL };
+ char *reset_event[] = { "RESET=1", NULL };
+ char *reset_done_event[] = { "ERROR=0", NULL };
+
+ kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
+#endif
+
+ if (atomic_read(&dev_priv->mm.wedged)) {
+ DRM_DEBUG_DRIVER("resetting chip\n");
+// kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
+ if (!i915_reset(dev)) {
+ atomic_set(&dev_priv->mm.wedged, 0);
+// kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
+ }
+ mtx_enter(&dev_priv->error_completion_lock);
+ dev_priv->error_completion++;
+ wakeup(&dev_priv->error_completion);
+ mtx_leave(&dev_priv->error_completion_lock);
+ }
+}
+
+/* NB: please notice the memset */
+void
+i915_get_extra_instdone(struct drm_device *dev,
+ uint32_t *instdone)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
+
+ switch(INTEL_INFO(dev)->gen) {
+ case 2:
+ case 3:
+ instdone[0] = I915_READ(INSTDONE);
+ break;
+ case 4:
+ case 5:
+ case 6:
+ instdone[0] = I915_READ(INSTDONE_I965);
+ instdone[1] = I915_READ(INSTDONE1);
+ break;
+ default:
+// WARN_ONCE(1, "Unsupported platform\n");
+ case 7:
+ instdone[0] = I915_READ(GEN7_INSTDONE_1);
+ instdone[1] = I915_READ(GEN7_SC_INSTDONE);
+ instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
+ instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
+ break;
+ }
+}
+
+#ifdef CONFIG_DEBUG_FS
+struct drm_i915_error_object *
+i915_error_object_create(struct inteldrm_softc *dev_priv,
+ struct drm_i915_gem_object *src)
+{
+ struct drm_i915_error_object *dst;
+ int i, count;
+ u32 reloc_offset;
+
+ if (src == NULL || src->pages == NULL)
+ return NULL;
+
+ count = src->base.size / PAGE_SIZE;
+
+ dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC);
+ if (dst == NULL)
+ return NULL;
+
+ reloc_offset = src->gtt_offset;
+ for (i = 0; i < count; i++) {
+ unsigned long flags;
+ void *d;
+
+ d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
+ if (d == NULL)
+ goto unwind;
+
+ local_irq_save(flags);
+ if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
+ src->has_global_gtt_mapping) {
+ void __iomem *s;
+
+ /* Simply ignore tiling or any overlapping fence.
+ * It's part of the error state, and this hopefully
+ * captures what the GPU read.
+ */
+
+ s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+ reloc_offset);
+ memcpy_fromio(d, s, PAGE_SIZE);
+ io_mapping_unmap_atomic(s);
+ } else {
+ struct page *page;
+ void *s;
+
+ page = i915_gem_object_get_page(src, i);
+
+ drm_clflush_pages(&page, 1);
+
+ s = kmap_atomic(page);
+ memcpy(d, s, PAGE_SIZE);
+ kunmap_atomic(s);
+
+ drm_clflush_pages(&page, 1);
+ }
+ local_irq_restore(flags);
+
+ dst->pages[i] = d;
+
+ reloc_offset += PAGE_SIZE;
+ }
+ dst->page_count = count;
+ dst->gtt_offset = src->gtt_offset;
+
+ return dst;
+
+unwind:
+ while (i--)
+ kfree(dst->pages[i]);
+ kfree(dst);
+ return NULL;
+}
+
+void
+i915_error_object_free(struct drm_i915_error_object *obj)
+{
+ int page;
+
+ if (obj == NULL)
+ return;
+
+ for (page = 0; page < obj->page_count; page++)
+ kfree(obj->pages[page]);
+
+ kfree(obj);
+}
+
+void
+i915_error_state_free(struct kref *error_ref)
+{
+ struct drm_i915_error_state *error = container_of(error_ref,
+ typeof(*error), ref);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+ i915_error_object_free(error->ring[i].batchbuffer);
+ i915_error_object_free(error->ring[i].ringbuffer);
+ kfree(error->ring[i].requests);
+ }
+
+ kfree(error->active_bo);
+ kfree(error->overlay);
+ kfree(error);
+}
+
+void
+capture_bo(struct drm_i915_error_buffer *err,
+ struct drm_i915_gem_object *obj)
+{
+ err->size = obj->base.size;
+ err->name = obj->base.name;
+ err->rseqno = obj->last_read_seqno;
+ err->wseqno = obj->last_write_seqno;
+ err->gtt_offset = obj->gtt_offset;
+ err->read_domains = obj->base.read_domains;
+ err->write_domain = obj->base.write_domain;
+ err->fence_reg = obj->fence_reg;
+ err->pinned = 0;
+ if (obj->pin_count > 0)
+ err->pinned = 1;
+ if (obj->user_pin_count > 0)
+ err->pinned = -1;
+ err->tiling = obj->tiling_mode;
+ err->dirty = obj->dirty;
+ err->purgeable = obj->madv != I915_MADV_WILLNEED;
+ err->ring = obj->ring ? obj->ring->id : -1;
+ err->cache_level = obj->cache_level;
+}
+
+u32
+capture_active_bo(struct drm_i915_error_buffer *err,
+ int count, struct list_head *head)
+{
+ struct drm_i915_gem_object *obj;
+ int i = 0;
+
+ list_for_each_entry(obj, head, mm_list) {
+ capture_bo(err++, obj);
+ if (++i == count)
+ break;
+ }
+
+ return i;
+}
+
+u32
+capture_pinned_bo(struct drm_i915_error_buffer *err,
+ int count, struct list_head *head)
+{
+ struct drm_i915_gem_object *obj;
+ int i = 0;
+
+ list_for_each_entry(obj, head, gtt_list) {
+ if (obj->pin_count == 0)
+ continue;
+
+ capture_bo(err++, obj);
+ if (++i == count)
+ break;
+ }
+
+ return i;
+}
+
+void
+i915_gem_record_fences(struct drm_device *dev,
+ struct drm_i915_error_state *error)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ int i;
+
+ /* Fences */
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6:
+ for (i = 0; i < 16; i++)
+ error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+ break;
+ case 5:
+ case 4:
+ for (i = 0; i < 16; i++)
+ error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+ break;
+ case 3:
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+ case 2:
+ for (i = 0; i < 8; i++)
+ error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+ break;
+
+ }
+}
+
+struct drm_i915_error_object *
+i915_error_first_batchbuffer(struct inteldrm_softc *dev_priv,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_i915_gem_object *obj;
+ u32 seqno;
+
+ if (!ring->get_seqno)
+ return NULL;
+
+ if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
+ u32 acthd = I915_READ(ACTHD);
+
+ if (WARN_ON(ring->id != RCS))
+ return NULL;
+
+ obj = ring->private;
+ if (acthd >= obj->gtt_offset &&
+ acthd < obj->gtt_offset + obj->base.size)
+ return i915_error_object_create(dev_priv, obj);
+ }
+
+ seqno = ring->get_seqno(ring, false);
+ list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+ if (obj->ring != ring)
+ continue;
+
+ if (i915_seqno_passed(seqno, obj->last_read_seqno))
+ continue;
+
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
+ continue;
+
+ /* We need to copy these to an anonymous buffer as the simplest
+ * method to avoid being overwritten by userspace.
+ */
+ return i915_error_object_create(dev_priv, obj);
+ }
+
+ return NULL;
+}
+
+void
+i915_record_ring_state(struct drm_device *dev,
+ struct drm_i915_error_state *error,
+ struct intel_ring_buffer *ring)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
+ error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
+ error->semaphore_mboxes[ring->id][0]
+ = I915_READ(RING_SYNC_0(ring->mmio_base));
+ error->semaphore_mboxes[ring->id][1]
+ = I915_READ(RING_SYNC_1(ring->mmio_base));
+ error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
+ error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
+ }
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
+ error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
+ error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
+ error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
+ error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
+ if (ring->id == RCS)
+ error->bbaddr = I915_READ64(BB_ADDR);
+ } else {
+ error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
+ error->ipeir[ring->id] = I915_READ(IPEIR);
+ error->ipehr[ring->id] = I915_READ(IPEHR);
+ error->instdone[ring->id] = I915_READ(INSTDONE);
+ }
+
+ error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
+ error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
+ error->seqno[ring->id] = ring->get_seqno(ring, false);
+ error->acthd[ring->id] = intel_ring_get_active_head(ring);
+ error->head[ring->id] = I915_READ_HEAD(ring);
+ error->tail[ring->id] = I915_READ_TAIL(ring);
+ error->ctl[ring->id] = I915_READ_CTL(ring);
+
+ error->cpu_ring_head[ring->id] = ring->head;
+ error->cpu_ring_tail[ring->id] = ring->tail;
+}
+
+void
+i915_gem_record_rings(struct drm_device *dev,
+ struct drm_i915_error_state *error)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ struct drm_i915_gem_request *request;
+ int i, count;
+
+ for_each_ring(ring, dev_priv, i) {
+ i915_record_ring_state(dev, error, ring);
+
+ error->ring[i].batchbuffer =
+ i915_error_first_batchbuffer(dev_priv, ring);
+
+ error->ring[i].ringbuffer =
+ i915_error_object_create(dev_priv, ring->obj);
+
+ count = 0;
+ list_for_each_entry(request, &ring->request_list, list)
+ count++;
+
+ error->ring[i].num_requests = count;
+ error->ring[i].requests =
+ kmalloc(count*sizeof(struct drm_i915_error_request),
+ GFP_ATOMIC);
+ if (error->ring[i].requests == NULL) {
+ error->ring[i].num_requests = 0;
+ continue;
+ }
+
+ count = 0;
+ list_for_each_entry(request, &ring->request_list, list) {
+ struct drm_i915_error_request *erq;
+
+ erq = &error->ring[i].requests[count++];
+ erq->seqno = request->seqno;
+ erq->jiffies = request->emitted_jiffies;
+ erq->tail = request->tail;
+ }
+ }
+}
+
+/**
+ * i915_capture_error_state - capture an error record for later analysis
+ * @dev: drm device
+ *
+ * Should be called when an error is detected (either a hang or an error
+ * interrupt) to capture error state from the time of the error. Fills
+ * out a structure which becomes available in debugfs for user level tools
+ * to pick up.
+ */
+void
+i915_capture_error_state(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ struct drm_i915_error_state *error;
+ unsigned long flags;
+ int i, pipe;
+
+ spin_lock_irqsave(&dev_priv->error_lock, flags);
+ error = dev_priv->first_error;
+ spin_unlock_irqrestore(&dev_priv->error_lock, flags);
+ if (error)
+ return;
+
+ /* Account for pipe specific data like PIPE*STAT */
+ error = kzalloc(sizeof(*error), GFP_ATOMIC);
+ if (!error) {
+ DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
+ return;
+ }
+
+ DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
+ dev->primary->index);
+
+ kref_init(&error->ref);
+ error->eir = I915_READ(EIR);
+ error->pgtbl_er = I915_READ(PGTBL_ER);
+ error->ccid = I915_READ(CCID);
+
+ if (HAS_PCH_SPLIT(dev))
+ error->ier = I915_READ(DEIER) | I915_READ(GTIER);
+ else if (IS_VALLEYVIEW(dev))
+ error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
+ else if (IS_GEN2(dev))
+ error->ier = I915_READ16(IER);
+ else
+ error->ier = I915_READ(IER);
+
+ if (INTEL_INFO(dev)->gen >= 6)
+ error->derrmr = I915_READ(DERRMR);
+
+ if (IS_VALLEYVIEW(dev))
+ error->forcewake = I915_READ(FORCEWAKE_VLV);
+ else if (INTEL_INFO(dev)->gen >= 7)
+ error->forcewake = I915_READ(FORCEWAKE_MT);
+ else if (INTEL_INFO(dev)->gen == 6)
+ error->forcewake = I915_READ(FORCEWAKE);
+
+ for_each_pipe(pipe)
+ error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ error->error = I915_READ(ERROR_GEN6);
+ error->done_reg = I915_READ(DONE_REG);
+ }
+
+ if (INTEL_INFO(dev)->gen == 7)
+ error->err_int = I915_READ(GEN7_ERR_INT);
+
+ i915_get_extra_instdone(dev, error->extra_instdone);
+
+ i915_gem_record_fences(dev, error);
+ i915_gem_record_rings(dev, error);
+
+ /* Record buffers on the active and pinned lists. */
+ error->active_bo = NULL;
+ error->pinned_bo = NULL;
+
+ i = 0;
+ list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
+ i++;
+ error->active_bo_count = i;
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
+ if (obj->pin_count)
+ i++;
+ error->pinned_bo_count = i - error->active_bo_count;
+
+ error->active_bo = NULL;
+ error->pinned_bo = NULL;
+ if (i) {
+ error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
+ GFP_ATOMIC);
+ if (error->active_bo)
+ error->pinned_bo =
+ error->active_bo + error->active_bo_count;
+ }
+
+ if (error->active_bo)
+ error->active_bo_count =
+ capture_active_bo(error->active_bo,
+ error->active_bo_count,
+ &dev_priv->mm.active_list);
+
+ if (error->pinned_bo)
+ error->pinned_bo_count =
+ capture_pinned_bo(error->pinned_bo,
+ error->pinned_bo_count,
+ &dev_priv->mm.bound_list);
+
+ do_gettimeofday(&error->time);
+
+ error->overlay = intel_overlay_capture_error_state(dev);
+ error->display = intel_display_capture_error_state(dev);
+
+ spin_lock_irqsave(&dev_priv->error_lock, flags);
+ if (dev_priv->first_error == NULL) {
+ dev_priv->first_error = error;
+ error = NULL;
+ }
+ spin_unlock_irqrestore(&dev_priv->error_lock, flags);
+
+ if (error)
+ i915_error_state_free(&error->ref);
+}
+
+void
+i915_destroy_error_state(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_i915_error_state *error;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->error_lock, flags);
+ error = dev_priv->first_error;
+ dev_priv->first_error = NULL;
+ spin_unlock_irqrestore(&dev_priv->error_lock, flags);
+
+ if (error)
+ kref_put(&error->ref, i915_error_state_free);
+}
+#else
+#define i915_capture_error_state(x)
+#endif
+
+void
+i915_report_and_clear_eir(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ uint32_t instdone[I915_NUM_INSTDONE_REG];
+ u32 eir = I915_READ(EIR);
+ int pipe, i;
+
+ if (!eir)
+ return;
+
+ printf("render error detected, EIR: 0x%08x\n", eir);
+
+ i915_get_extra_instdone(dev, instdone);
+
+ if (IS_G4X(dev)) {
+ if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
+ u32 ipeir = I915_READ(IPEIR_I965);
+
+ printf(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
+ printf(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
+ for (i = 0; i < ARRAY_SIZE(instdone); i++)
+ printf(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
+ printf(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
+ printf(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
+ I915_WRITE(IPEIR_I965, ipeir);
+ POSTING_READ(IPEIR_I965);
+ }
+ if (eir & GM45_ERROR_PAGE_TABLE) {
+ u32 pgtbl_err = I915_READ(PGTBL_ER);
+ printf("page table error\n");
+ printf(" PGTBL_ER: 0x%08x\n", pgtbl_err);
+ I915_WRITE(PGTBL_ER, pgtbl_err);
+ POSTING_READ(PGTBL_ER);
+ }
+ }
+
+ if (!IS_GEN2(dev)) {
+ if (eir & I915_ERROR_PAGE_TABLE) {
+ u32 pgtbl_err = I915_READ(PGTBL_ER);
+ printf("page table error\n");
+ printf(" PGTBL_ER: 0x%08x\n", pgtbl_err);
+ I915_WRITE(PGTBL_ER, pgtbl_err);
+ POSTING_READ(PGTBL_ER);
+ }
+ }
+
+ if (eir & I915_ERROR_MEMORY_REFRESH) {
+ printf("memory refresh error:\n");
+ for_each_pipe(pipe)
+ printf("pipe %c stat: 0x%08x\n",
+ pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
+ /* pipestat has already been acked */
+ }
+ if (eir & I915_ERROR_INSTRUCTION) {
+ printf("instruction error\n");
+ printf(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
+ for (i = 0; i < ARRAY_SIZE(instdone); i++)
+ printf(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
+ if (INTEL_INFO(dev)->gen < 4) {
+ u32 ipeir = I915_READ(IPEIR);
+
+ printf(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
+ printf(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
+ printf(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
+ I915_WRITE(IPEIR, ipeir);
+ POSTING_READ(IPEIR);
+ } else {
+ u32 ipeir = I915_READ(IPEIR_I965);
+
+ printf(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
+ printf(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
+ printf(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
+ printf(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
+ I915_WRITE(IPEIR_I965, ipeir);
+ POSTING_READ(IPEIR_I965);
+ }
+ }
+
+ I915_WRITE(EIR, eir);
+ POSTING_READ(EIR);
+ eir = I915_READ(EIR);
+ if (eir) {
+ /*
+ * some errors might have become stuck,
+ * mask them.
+ */
+ DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
+ I915_WRITE(EMR, I915_READ(EMR) | eir);
+ I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+ }
+}
+
+/**
+ * i915_handle_error - handle an error interrupt
+ * @dev: drm device
+ *
+ * Do some basic checking of regsiter state at error interrupt time and
+ * dump it to the syslog. Also call i915_capture_error_state() to make
+ * sure we get a record and make it available in debugfs. Fire a uevent
+ * so userspace knows something bad happened (should trigger collection
+ * of a ring dump etc.).
+ */
+void
+i915_handle_error(struct drm_device *dev, bool wedged)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ int i;
+
+ i915_capture_error_state(dev);
+ i915_report_and_clear_eir(dev);
+
+ if (wedged) {
+// INIT_COMPLETION(dev_priv->error_completion);
+ atomic_set(&dev_priv->mm.wedged, 1);
+
+ /*
+ * Wakeup waiting processes so they don't hang
+ */
+ for_each_ring(ring, dev_priv, i)
+ wakeup(ring);
+ }
+
+ workq_queue_task(NULL, &dev_priv->error_task, 0,
+ i915_error_work_func, dev_priv, NULL);
+}
+
+void
+i915_pageflip_stall_check(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_gem_object *obj;
+ struct intel_unpin_work *work;
+ bool stall_detected;
+
+ /* Ignore early vblank irqs */
+ if (intel_crtc == NULL)
+ return;
+
+ mtx_enter(&dev->event_lock);
+ work = intel_crtc->unpin_work;
+
+ if (work == NULL ||
+ atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
+ !work->enable_stall_check) {
+ /* Either the pending flip IRQ arrived, or we're too early. Don't check */
+ mtx_leave(&dev->event_lock);
+ return;
+ }
+
+ /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
+ obj = work->pending_flip_obj;
+ if (INTEL_INFO(dev)->gen >= 4) {
+ int dspsurf = DSPSURF(intel_crtc->plane);
+ stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
+ obj->gtt_offset;
+ } else {
+ int dspaddr = DSPADDR(intel_crtc->plane);
+ stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
+ crtc->y * crtc->fb->pitches[0] +
+ crtc->x * crtc->fb->bits_per_pixel/8);
+ }
+
+ mtx_leave(&dev->event_lock);
+
+ if (stall_detected) {
+ DRM_DEBUG_DRIVER("Pageflip stall detected\n");
+ intel_prepare_page_flip(dev, intel_crtc->plane);
+ }
+}
+
+/* Called from drm generic code, passed 'crtc' which
+ * we use as a pipe index
+ */
+int
+i915_enable_vblank(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ if (!i915_pipeconf_enabled(dev, pipe))
+ return -EINVAL;
+
+ mtx_enter(&dev_priv->irq_lock);
+ if (INTEL_INFO(dev)->gen >= 4)
+ i915_enable_pipestat(dev_priv, pipe,
+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
+ else
+ i915_enable_pipestat(dev_priv, pipe,
+ PIPE_VBLANK_INTERRUPT_ENABLE);
+
+ /* maintain vblank delivery even in deep C-states */
+ if (dev_priv->info->gen == 3)
+ I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
+ mtx_leave(&dev_priv->irq_lock);
+
+ return 0;
+}
+
+int
+ironlake_enable_vblank(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ if (!i915_pipeconf_enabled(dev, pipe))
+ return -EINVAL;
+
+ mtx_enter(&dev_priv->irq_lock);
+ ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
+ DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
+ mtx_leave(&dev_priv->irq_lock);
+
+ return 0;
+}
+
+int
+ivybridge_enable_vblank(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ if (!i915_pipeconf_enabled(dev, pipe))
+ return -EINVAL;
+
+ mtx_enter(&dev_priv->irq_lock);
+ ironlake_enable_display_irq(dev_priv,
+ DE_PIPEA_VBLANK_IVB << (5 * pipe));
+ mtx_leave(&dev_priv->irq_lock);
+
+ return 0;
+}
+
+int
+valleyview_enable_vblank(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 imr;
+
+ if (!i915_pipeconf_enabled(dev, pipe))
+ return -EINVAL;
+
+ mtx_enter(&dev_priv->irq_lock);
+ imr = I915_READ(VLV_IMR);
+ if (pipe == 0)
+ imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
+ else
+ imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+ I915_WRITE(VLV_IMR, imr);
+ i915_enable_pipestat(dev_priv, pipe,
+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
+ mtx_leave(&dev_priv->irq_lock);
+
+ return 0;
+}
+
+/* Called from drm generic code, passed 'crtc' which
+ * we use as a pipe index
+ */
+void
+i915_disable_vblank(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ mtx_enter(&dev_priv->irq_lock);
+ if (dev_priv->info->gen == 3)
+ I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
+
+ i915_disable_pipestat(dev_priv, pipe,
+ PIPE_VBLANK_INTERRUPT_ENABLE |
+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
+ mtx_leave(&dev_priv->irq_lock);
+}
+
+void
+ironlake_disable_vblank(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ mtx_enter(&dev_priv->irq_lock);
+ ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
+ DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
+ mtx_leave(&dev_priv->irq_lock);
+}
+
+void
+ivybridge_disable_vblank(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ mtx_enter(&dev_priv->irq_lock);
+ ironlake_disable_display_irq(dev_priv,
+ DE_PIPEA_VBLANK_IVB << (pipe * 5));
+ mtx_leave(&dev_priv->irq_lock);
+}
+
+void
+valleyview_disable_vblank(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 imr;
+
+ mtx_enter(&dev_priv->irq_lock);
+ i915_disable_pipestat(dev_priv, pipe,
+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
+ imr = I915_READ(VLV_IMR);
+ if (pipe == 0)
+ imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
+ else
+ imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+ I915_WRITE(VLV_IMR, imr);
+ mtx_leave(&dev_priv->irq_lock);
+}
+
+u32
+ring_last_seqno(struct intel_ring_buffer *ring)
+{
+ return list_entry(ring->request_list.prev,
+ struct drm_i915_gem_request, list)->seqno;
+}
+
+bool
+i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
+{
+ if (list_empty(&ring->request_list) ||
+ i915_seqno_passed(ring->get_seqno(ring, false),
+ ring_last_seqno(ring))) {
+ /* Issue a wake-up to catch stuck h/w. */
+#ifdef notyet
+ if (wakeup(ring) > 0) {
+ DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
+ ring->name);
+ *err = true;
+ }
+#else
+ wakeup(ring);
+#endif
+ return true;
+ }
+ return false;
+}
+
+bool
+kick_ring(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 tmp = I915_READ_CTL(ring);
+ if (tmp & RING_WAIT) {
+ DRM_ERROR("Kicking stuck wait on %s\n",
+ ring->name);
+ I915_WRITE_CTL(ring, tmp);
+ return true;
+ }
+ return false;
+}
+
+bool
+i915_hangcheck_hung(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (dev_priv->hangcheck_count++ > 1) {
+ bool hung = true;
+
+ DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
+ i915_handle_error(dev, true);
+
+ if (!IS_GEN2(dev)) {
+ struct intel_ring_buffer *ring;
+ int i;
+
+ /* Is the chip hanging on a WAIT_FOR_EVENT?
+ * If so we can simply poke the RB_WAIT bit
+ * and break the hang. This should work on
+ * all but the second generation chipsets.
+ */
+ for_each_ring(ring, dev_priv, i)
+ hung &= !kick_ring(ring);
+ }
+
+ return hung;
+ }
+
+ return false;
+}
+
+/**
+ * This is called when the chip hasn't reported back with completed
+ * batchbuffers in a long time. The first time this is called we simply record
+ * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
+ * again, we assume the chip is wedged and try to fix it.
+ */
+void
+i915_hangcheck_elapsed(void *arg)
+{
+ struct inteldrm_softc *dev_priv = arg;
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG];
+ struct intel_ring_buffer *ring;
+ bool err = false, idle;
+ int i;
+
+#ifdef notyet
+ if (!i915_enable_hangcheck)
+ return;
+#endif
+
+ memset(acthd, 0, sizeof(acthd));
+ idle = true;
+ for_each_ring(ring, dev_priv, i) {
+ idle &= i915_hangcheck_ring_idle(ring, &err);
+ acthd[i] = intel_ring_get_active_head(ring);
+ }
+
+ /* If all work is done then ACTHD clearly hasn't advanced. */
+ if (idle) {
+ if (err) {
+ if (i915_hangcheck_hung(dev))
+ return;
+
+ goto repeat;
+ }
+
+ dev_priv->hangcheck_count = 0;
+ return;
+ }
+
+ i915_get_extra_instdone(dev, instdone);
+ if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
+ memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) {
+ if (i915_hangcheck_hung(dev))
+ return;
+ } else {
+ dev_priv->hangcheck_count = 0;
+
+ memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
+ memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone));
+ }
+
+repeat:
+ /* Reset timer case chip hangs without another request being added */
+ timeout_add_msec(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD);
+}
+
+/* drm_dma.h hooks
+*/
+void
+ironlake_irq_preinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+// atomic_set(&dev_priv->irq_received, 0);
+
+ I915_WRITE(HWSTAM, 0xeffe);
+
+ /* XXX hotplug from PCH */
+
+ I915_WRITE(DEIMR, 0xffffffff);
+ I915_WRITE(DEIER, 0x0);
+ POSTING_READ(DEIER);
+
+ /* and GT */
+ I915_WRITE(GTIMR, 0xffffffff);
+ I915_WRITE(GTIER, 0x0);
+ POSTING_READ(GTIER);
+
+ /* south display irq */
+ I915_WRITE(SDEIMR, 0xffffffff);
+ I915_WRITE(SDEIER, 0x0);
+ POSTING_READ(SDEIER);
+}
+
+void
+valleyview_irq_preinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+// atomic_set(&dev_priv->irq_received, 0);
+
+ /* VLV magic */
+ I915_WRITE(VLV_IMR, 0);
+ I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
+ I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
+ I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
+
+ /* and GT */
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ I915_WRITE(GTIMR, 0xffffffff);
+ I915_WRITE(GTIER, 0x0);
+ POSTING_READ(GTIER);
+
+ I915_WRITE(DPINVGTT, 0xff);
+
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0xffff);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ I915_WRITE(VLV_IMR, 0xffffffff);
+ I915_WRITE(VLV_IER, 0x0);
+ POSTING_READ(VLV_IER);
+}
+
+/*
+ * Enable digital hotplug on the PCH, and configure the DP short pulse
+ * duration to 2ms (which is the minimum in the Display Port spec)
+ *
+ * This register is the same on all known PCH chips.
+ */
+
+void
+ironlake_enable_pch_hotplug(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 hotplug;
+
+ hotplug = I915_READ(PCH_PORT_HOTPLUG);
+ hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
+ hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
+ hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
+ hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
+ I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
+}
+
+int
+ironlake_irq_postinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ /* enable kind of interrupts always enabled */
+ u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
+ DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
+ u32 render_irqs;
+ u32 hotplug_mask;
+
+ dev_priv->irq_mask = ~display_mask;
+
+ /* should always can generate irq */
+ I915_WRITE(DEIIR, I915_READ(DEIIR));
+ I915_WRITE(DEIMR, dev_priv->irq_mask);
+ I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
+ POSTING_READ(DEIER);
+
+ dev_priv->gt_irq_mask = ~0;
+
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+
+ if (IS_GEN6(dev))
+ render_irqs =
+ GT_USER_INTERRUPT |
+ GEN6_BSD_USER_INTERRUPT |
+ GEN6_BLITTER_USER_INTERRUPT;
+ else
+ render_irqs =
+ GT_USER_INTERRUPT |
+ GT_PIPE_NOTIFY |
+ GT_BSD_USER_INTERRUPT;
+ I915_WRITE(GTIER, render_irqs);
+ POSTING_READ(GTIER);
+
+ if (HAS_PCH_CPT(dev)) {
+ hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
+ SDE_PORTB_HOTPLUG_CPT |
+ SDE_PORTC_HOTPLUG_CPT |
+ SDE_PORTD_HOTPLUG_CPT);
+ } else {
+ hotplug_mask = (SDE_CRT_HOTPLUG |
+ SDE_PORTB_HOTPLUG |
+ SDE_PORTC_HOTPLUG |
+ SDE_PORTD_HOTPLUG |
+ SDE_AUX_MASK);
+ }
+
+ dev_priv->pch_irq_mask = ~hotplug_mask;
+
+ I915_WRITE(SDEIIR, I915_READ(SDEIIR));
+ I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
+ I915_WRITE(SDEIER, hotplug_mask);
+ POSTING_READ(SDEIER);
+
+ ironlake_enable_pch_hotplug(dev);
+
+ if (IS_IRONLAKE_M(dev)) {
+ /* Clear & enable PCU event interrupts */
+ I915_WRITE(DEIIR, DE_PCU_EVENT);
+ I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
+ ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
+ }
+
+ return 0;
+}
+
+int
+ivybridge_irq_postinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ /* enable kind of interrupts always enabled */
+ u32 display_mask =
+ DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
+ DE_PLANEC_FLIP_DONE_IVB |
+ DE_PLANEB_FLIP_DONE_IVB |
+ DE_PLANEA_FLIP_DONE_IVB;
+ u32 render_irqs;
+ u32 hotplug_mask;
+
+ dev_priv->irq_mask = ~display_mask;
+
+ /* should always can generate irq */
+ I915_WRITE(DEIIR, I915_READ(DEIIR));
+ I915_WRITE(DEIMR, dev_priv->irq_mask);
+ I915_WRITE(DEIER,
+ display_mask |
+ DE_PIPEC_VBLANK_IVB |
+ DE_PIPEB_VBLANK_IVB |
+ DE_PIPEA_VBLANK_IVB);
+ POSTING_READ(DEIER);
+
+ dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
+
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+
+ render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
+ GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
+ I915_WRITE(GTIER, render_irqs);
+ POSTING_READ(GTIER);
+
+ hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
+ SDE_PORTB_HOTPLUG_CPT |
+ SDE_PORTC_HOTPLUG_CPT |
+ SDE_PORTD_HOTPLUG_CPT);
+ dev_priv->pch_irq_mask = ~hotplug_mask;
+
+ I915_WRITE(SDEIIR, I915_READ(SDEIIR));
+ I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
+ I915_WRITE(SDEIER, hotplug_mask);
+ POSTING_READ(SDEIER);
+
+ ironlake_enable_pch_hotplug(dev);
+
+ return 0;
+}
+
+int
+valleyview_irq_postinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 enable_mask;
+ u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+ u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
+ u32 render_irqs;
+ u16 msid;
+
+ enable_mask = I915_DISPLAY_PORT_INTERRUPT;
+ enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+ /*
+ *Leave vblank interrupts masked initially. enable/disable will
+ * toggle them based on usage.
+ */
+ dev_priv->irq_mask = (~enable_mask) |
+ I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
+ I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+ dev_priv->pipestat[0] = 0;
+ dev_priv->pipestat[1] = 0;
+
+ /* Hack for broken MSIs on VLV */
+ pci_conf_write(dev_priv->pc, dev_priv->tag, 0x94, 0xfee00000);
+ msid = pci_conf_read(dev_priv->pc, dev_priv->tag, 0x98);
+ msid &= 0xff; /* mask out delivery bits */
+ msid |= (1<<14);
+ pci_conf_write(dev_priv->pc, dev_priv->tag, 0x98, msid);
+
+ I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+ I915_WRITE(VLV_IER, enable_mask);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ I915_WRITE(PIPESTAT(0), 0xffff);
+ I915_WRITE(PIPESTAT(1), 0xffff);
+ POSTING_READ(VLV_IER);
+
+ i915_enable_pipestat(dev_priv, 0, pipestat_enable);
+ i915_enable_pipestat(dev_priv, 1, pipestat_enable);
+
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+
+ render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
+ GEN6_BLITTER_USER_INTERRUPT;
+ I915_WRITE(GTIER, render_irqs);
+ POSTING_READ(GTIER);
+
+ /* ack & enable invalid PTE error interrupts */
+#if 0 /* FIXME: add support to irq handler for checking these bits */
+ I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
+ I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
+#endif
+
+ I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
+ /* Note HDMI and DP share bits */
+ if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMID_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
+ hotplug_en |= SDVOC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
+ hotplug_en |= SDVOB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
+ hotplug_en |= CRT_HOTPLUG_INT_EN;
+ hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+ }
+
+ I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+
+ return 0;
+}
+
+void
+valleyview_irq_uninstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ if (!dev_priv)
+ return;
+
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0xffff);
+
+ I915_WRITE(HWSTAM, 0xffffffff);
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0xffff);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ I915_WRITE(VLV_IMR, 0xffffffff);
+ I915_WRITE(VLV_IER, 0x0);
+ POSTING_READ(VLV_IER);
+}
+
+void
+ironlake_irq_uninstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ if (!dev_priv)
+ return;
+
+ I915_WRITE(HWSTAM, 0xffffffff);
+
+ I915_WRITE(DEIMR, 0xffffffff);
+ I915_WRITE(DEIER, 0x0);
+ I915_WRITE(DEIIR, I915_READ(DEIIR));
+
+ I915_WRITE(GTIMR, 0xffffffff);
+ I915_WRITE(GTIER, 0x0);
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+
+ I915_WRITE(SDEIMR, 0xffffffff);
+ I915_WRITE(SDEIER, 0x0);
+ I915_WRITE(SDEIIR, I915_READ(SDEIIR));
+}
+
+void
+i8xx_irq_preinstall(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+// atomic_set(&dev_priv->irq_received, 0);
+
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0);
+ I915_WRITE16(IMR, 0xffff);
+ I915_WRITE16(IER, 0x0);
+ POSTING_READ16(IER);
+}
+
+int
+i8xx_irq_postinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ dev_priv->pipestat[0] = 0;
+ dev_priv->pipestat[1] = 0;
+
+ I915_WRITE16(EMR,
+ ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
+
+ /* Unmask the interrupts that we always want on. */
+ dev_priv->irq_mask =
+ ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+ I915_WRITE16(IMR, dev_priv->irq_mask);
+
+ I915_WRITE16(IER,
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
+ I915_USER_INTERRUPT);
+ POSTING_READ16(IER);
+
+ return 0;
+}
+
+int
+i8xx_intr(void *arg)
+{
+ drm_i915_private_t *dev_priv = arg;
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ u16 iir, new_iir;
+ u32 pipe_stats[2];
+ int irq_received;
+ int pipe;
+ u16 flip_mask =
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+
+// atomic_inc(&dev_priv->irq_received);
+
+ iir = I915_READ16(IIR);
+ if (iir == 0)
+ return 0;
+
+ while (iir & ~flip_mask) {
+ /* Can't rely on pipestat interrupt bit in iir as it might
+ * have been cleared after the pipestat interrupt was received.
+ * It doesn't set the bit in iir again, but it still produces
+ * interrupts (for non-MSI).
+ */
+ mtx_enter(&dev_priv->irq_lock);
+ if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+ i915_handle_error(dev, false);
+
+ for_each_pipe(pipe) {
+ int reg = PIPESTAT(pipe);
+ pipe_stats[pipe] = I915_READ(reg);
+
+ /*
+ * Clear the PIPE*STAT regs before the IIR
+ */
+ if (pipe_stats[pipe] & 0x8000ffff) {
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_DEBUG_DRIVER("pipe %c underrun\n",
+ pipe_name(pipe));
+ I915_WRITE(reg, pipe_stats[pipe]);
+ irq_received = 1;
+ }
+ }
+ mtx_leave(&dev_priv->irq_lock);
+
+ I915_WRITE16(IIR, iir & ~flip_mask);
+ new_iir = I915_READ16(IIR); /* Flush posted writes */
+
+// i915_update_dri1_breadcrumb(dev);
+
+ if (iir & I915_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->rings[RCS]);
+
+ if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
+ drm_handle_vblank(dev, 0)) {
+ if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
+ intel_prepare_page_flip(dev, 0);
+ intel_finish_page_flip(dev, 0);
+ flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
+ }
+ }
+
+ if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
+ drm_handle_vblank(dev, 1)) {
+ if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
+ intel_prepare_page_flip(dev, 1);
+ intel_finish_page_flip(dev, 1);
+ flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+ }
+ }
+
+ iir = new_iir;
+ }
+
+ return 1;
+}
+
+void
+i8xx_irq_uninstall(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ for_each_pipe(pipe) {
+ /* Clear enable bits; then clear status bits */
+ I915_WRITE(PIPESTAT(pipe), 0);
+ I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
+ }
+ I915_WRITE16(IMR, 0xffff);
+ I915_WRITE16(IER, 0x0);
+ I915_WRITE16(IIR, I915_READ16(IIR));
+}
+
+void
+i915_irq_preinstall(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+// atomic_set(&dev_priv->irq_received, 0);
+
+ if (I915_HAS_HOTPLUG(dev)) {
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ }
+
+ I915_WRITE16(HWSTAM, 0xeffe);
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0);
+ I915_WRITE(IMR, 0xffffffff);
+ I915_WRITE(IER, 0x0);
+ POSTING_READ(IER);
+}
+
+int
+i915_irq_postinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 enable_mask;
+
+ dev_priv->pipestat[0] = 0;
+ dev_priv->pipestat[1] = 0;
+
+ I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
+
+ /* Unmask the interrupts that we always want on. */
+ dev_priv->irq_mask =
+#ifdef notyet
+ ~(I915_ASLE_INTERRUPT |
+#else
+ ~(
+#endif
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+
+ enable_mask =
+#ifdef notyet
+ I915_ASLE_INTERRUPT |
+#endif
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
+ I915_USER_INTERRUPT;
+
+ if (I915_HAS_HOTPLUG(dev)) {
+ /* Enable in IER... */
+ enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
+ /* and unmask in IMR */
+ dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
+ }
+
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ I915_WRITE(IER, enable_mask);
+ POSTING_READ(IER);
+
+ if (I915_HAS_HOTPLUG(dev)) {
+ u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+
+ if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMID_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
+ hotplug_en |= SDVOC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
+ hotplug_en |= SDVOB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
+ hotplug_en |= CRT_HOTPLUG_INT_EN;
+ hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+ }
+
+ /* Ignore TV since it's buggy */
+
+ I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+ }
+
+// intel_opregion_enable_asle(dev);
+
+ return 0;
+}
+
+int
+i915_intr(void *arg)
+{
+ drm_i915_private_t *dev_priv = arg;
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
+ u32 flip_mask =
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+ u32 flip[2] = {
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
+ };
+ int pipe, ret = 0;
+
+// atomic_inc(&dev_priv->irq_received);
+
+ iir = I915_READ(IIR);
+ do {
+ bool irq_received = (iir & ~flip_mask) != 0;
+ bool blc_event = false;
+
+ /* Can't rely on pipestat interrupt bit in iir as it might
+ * have been cleared after the pipestat interrupt was received.
+ * It doesn't set the bit in iir again, but it still produces
+ * interrupts (for non-MSI).
+ */
+ mtx_enter(&dev_priv->irq_lock);
+ if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+ i915_handle_error(dev, false);
+
+ for_each_pipe(pipe) {
+ int reg = PIPESTAT(pipe);
+ pipe_stats[pipe] = I915_READ(reg);
+
+ /* Clear the PIPE*STAT regs before the IIR */
+ if (pipe_stats[pipe] & 0x8000ffff) {
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_DEBUG_DRIVER("pipe %c underrun\n",
+ pipe_name(pipe));
+ I915_WRITE(reg, pipe_stats[pipe]);
+ irq_received = true;
+ }
+ }
+ mtx_leave(&dev_priv->irq_lock);
+
+ if (!irq_received)
+ break;
+
+ /* Consume port. Then clear IIR or we'll miss events */
+ if ((I915_HAS_HOTPLUG(dev)) &&
+ (iir & I915_DISPLAY_PORT_INTERRUPT)) {
+ u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
+ hotplug_status);
+ if (hotplug_status & dev_priv->hotplug_supported_mask)
+ workq_queue_task(NULL, &dev_priv->hotplug_task,
+ 0, i915_hotplug_work_func, dev_priv, NULL);
+
+ I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ POSTING_READ(PORT_HOTPLUG_STAT);
+ }
+
+ I915_WRITE(IIR, iir & ~flip_mask);
+ new_iir = I915_READ(IIR); /* Flush posted writes */
+
+ if (iir & I915_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->rings[RCS]);
+
+ for_each_pipe(pipe) {
+ int plane = pipe;
+ if (IS_MOBILE(dev))
+ plane = !plane;
+ if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
+ drm_handle_vblank(dev, pipe)) {
+ if (iir & flip[plane]) {
+ intel_prepare_page_flip(dev, plane);
+ intel_finish_page_flip(dev, pipe);
+ flip_mask &= ~flip[plane];
+ }
+ }
+
+ if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
+ blc_event = true;
+ }
+
+#ifdef notyet
+ if (blc_event || (iir & I915_ASLE_INTERRUPT))
+ intel_opregion_asle_intr(dev);
+#endif
+
+ /* With MSI, interrupts are only generated when iir
+ * transitions from zero to nonzero. If another bit got
+ * set while we were handling the existing iir bits, then
+ * we would never get another interrupt.
+ *
+ * This is fine on non-MSI as well, as if we hit this path
+ * we avoid exiting the interrupt handler only to generate
+ * another one.
+ *
+ * Note that for MSI this could cause a stray interrupt report
+ * if an interrupt landed in the time between writing IIR and
+ * the posting read. This should be rare enough to never
+ * trigger the 99% of 100,000 interrupts test for disabling
+ * stray interrupts.
+ */
+ ret = 1;
+ iir = new_iir;
+ } while (iir & ~flip_mask);
+
+// i915_update_dri1_breadcrumb(dev);
+
+ return ret;
+}
+
+void
+i915_irq_uninstall(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ if (I915_HAS_HOTPLUG(dev)) {
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ }
+
+ I915_WRITE16(HWSTAM, 0xffff);
+ for_each_pipe(pipe) {
+ /* Clear enable bits; then clear status bits */
+ I915_WRITE(PIPESTAT(pipe), 0);
+ I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
+ }
+ I915_WRITE(IMR, 0xffffffff);
+ I915_WRITE(IER, 0x0);
+
+ I915_WRITE(IIR, I915_READ(IIR));
+}
+
+void
+i965_irq_preinstall(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+// atomic_set(&dev_priv->irq_received, 0);
+
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+
+ I915_WRITE(HWSTAM, 0xeffe);
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0);
+ I915_WRITE(IMR, 0xffffffff);
+ I915_WRITE(IER, 0x0);
+ POSTING_READ(IER);
+}
+
+int
+i965_irq_postinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 hotplug_en;
+ u32 enable_mask;
+ u32 error_mask;
+
+ /* Unmask the interrupts that we always want on. */
+#ifdef notyet
+ dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
+#else
+ dev_priv->irq_mask = ~(
+#endif
+ I915_DISPLAY_PORT_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+
+ enable_mask = ~dev_priv->irq_mask;
+ enable_mask |= I915_USER_INTERRUPT;
+
+ if (IS_G4X(dev))
+ enable_mask |= I915_BSD_USER_INTERRUPT;
+
+ dev_priv->pipestat[0] = 0;
+ dev_priv->pipestat[1] = 0;
+
+ /*
+ * Enable some error detection, note the instruction error mask
+ * bit is reserved, so we leave it masked.
+ */
+ if (IS_G4X(dev)) {
+ error_mask = ~(GM45_ERROR_PAGE_TABLE |
+ GM45_ERROR_MEM_PRIV |
+ GM45_ERROR_CP_PRIV |
+ I915_ERROR_MEMORY_REFRESH);
+ } else {
+ error_mask = ~(I915_ERROR_PAGE_TABLE |
+ I915_ERROR_MEMORY_REFRESH);
+ }
+ I915_WRITE(EMR, error_mask);
+
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ I915_WRITE(IER, enable_mask);
+ POSTING_READ(IER);
+
+ /* Note HDMI and DP share hotplug bits */
+ hotplug_en = 0;
+ if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMID_HOTPLUG_INT_EN;
+ if (IS_G4X(dev)) {
+ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
+ hotplug_en |= SDVOC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
+ hotplug_en |= SDVOB_HOTPLUG_INT_EN;
+ } else {
+ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
+ hotplug_en |= SDVOC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
+ hotplug_en |= SDVOB_HOTPLUG_INT_EN;
+ }
+ if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
+ hotplug_en |= CRT_HOTPLUG_INT_EN;
+
+ /* Programming the CRT detection parameters tends
+ to generate a spurious hotplug event about three
+ seconds later. So just do it once.
+ */
+ if (IS_G4X(dev))
+ hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
+ hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+ }
+
+ /* Ignore TV since it's buggy */
+
+ I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+
+#ifdef notyet
+ intel_opregion_enable_asle(dev);
+#endif
+
+ return 0;
+}
+
+int
+i965_intr(void *arg)
+{
+ drm_i915_private_t *dev_priv = arg;
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ u32 iir, new_iir;
+ u32 pipe_stats[I915_MAX_PIPES];
+ int irq_received;
+ int ret = 0, pipe;
+
+// atomic_inc(&dev_priv->irq_received);
+
+ iir = I915_READ(IIR);
+
+ for (;;) {
+ bool blc_event = false;
+
+ irq_received = iir != 0;
+
+ /* Can't rely on pipestat interrupt bit in iir as it might
+ * have been cleared after the pipestat interrupt was received.
+ * It doesn't set the bit in iir again, but it still produces
+ * interrupts (for non-MSI).
+ */
+ mtx_enter(&dev_priv->irq_lock);
+ if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+ i915_handle_error(dev, false);
+
+ for_each_pipe(pipe) {
+ int reg = PIPESTAT(pipe);
+ pipe_stats[pipe] = I915_READ(reg);
+
+ /*
+ * Clear the PIPE*STAT regs before the IIR
+ */
+ if (pipe_stats[pipe] & 0x8000ffff) {
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_DEBUG_DRIVER("pipe %c underrun\n",
+ pipe_name(pipe));
+ I915_WRITE(reg, pipe_stats[pipe]);
+ irq_received = 1;
+ }
+ }
+ mtx_leave(&dev_priv->irq_lock);
+
+ if (!irq_received)
+ break;
+
+ ret = 1;
+
+ /* Consume port. Then clear IIR or we'll miss events */
+ if (iir & I915_DISPLAY_PORT_INTERRUPT) {
+ u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
+ hotplug_status);
+ if (hotplug_status & dev_priv->hotplug_supported_mask)
+ workq_queue_task(NULL, &dev_priv->hotplug_task,
+ 0, i915_hotplug_work_func, dev_priv, NULL);
+
+ I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ I915_READ(PORT_HOTPLUG_STAT);
+ }
+
+ I915_WRITE(IIR, iir);
+ new_iir = I915_READ(IIR); /* Flush posted writes */
+
+ if (iir & I915_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->rings[RCS]);
+ if (iir & I915_BSD_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->rings[VCS]);
+
+ if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
+ intel_prepare_page_flip(dev, 0);
+
+ if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
+ intel_prepare_page_flip(dev, 1);
+
+ for_each_pipe(pipe) {
+ if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
+ drm_handle_vblank(dev, pipe)) {
+ i915_pageflip_stall_check(dev, pipe);
+ intel_finish_page_flip(dev, pipe);
+ }
+
+ if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
+ blc_event = true;
+ }
+
+
+#ifdef notyet
+ if (blc_event || (iir & I915_ASLE_INTERRUPT))
+ intel_opregion_asle_intr(dev);
+#endif
+
+ /* With MSI, interrupts are only generated when iir
+ * transitions from zero to nonzero. If another bit got
+ * set while we were handling the existing iir bits, then
+ * we would never get another interrupt.
+ *
+ * This is fine on non-MSI as well, as if we hit this path
+ * we avoid exiting the interrupt handler only to generate
+ * another one.
+ *
+ * Note that for MSI this could cause a stray interrupt report
+ * if an interrupt landed in the time between writing IIR and
+ * the posting read. This should be rare enough to never
+ * trigger the 99% of 100,000 interrupts test for disabling
+ * stray interrupts.
+ */
+ iir = new_iir;
+ }
+
+// i915_update_dri1_breadcrumb(dev);
+
+ return ret;
+}
+
+void
+i965_irq_uninstall(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ if (!dev_priv)
+ return;
+
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+
+ I915_WRITE(HWSTAM, 0xffffffff);
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0);
+ I915_WRITE(IMR, 0xffffffff);
+ I915_WRITE(IER, 0x0);
+
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe),
+ I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
+ I915_WRITE(IIR, I915_READ(IIR));
+}
+
+void
+intel_irq_init(struct drm_device *dev)
+{
+// struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ dev->driver->get_vblank_counter = i915_get_vblank_counter;
+ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+ if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+ dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
+ dev->driver->get_vblank_counter = gm45_get_vblank_counter;
+ }
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
+ else
+ dev->driver->get_vblank_timestamp = NULL;
+ dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
+
+ if (IS_VALLEYVIEW(dev)) {
+ dev->driver->irq_handler = valleyview_intr;
+ dev->driver->irq_preinstall = valleyview_irq_preinstall;
+ dev->driver->irq_postinstall = valleyview_irq_postinstall;
+ dev->driver->irq_uninstall = valleyview_irq_uninstall;
+ dev->driver->enable_vblank = valleyview_enable_vblank;
+ dev->driver->disable_vblank = valleyview_disable_vblank;
+ } else if (IS_IVYBRIDGE(dev)) {
+ /* Share pre & uninstall handlers with ILK/SNB */
+ dev->driver->irq_handler = ivybridge_intr;
+ dev->driver->irq_preinstall = ironlake_irq_preinstall;
+ dev->driver->irq_postinstall = ivybridge_irq_postinstall;
+ dev->driver->irq_uninstall = ironlake_irq_uninstall;
+ dev->driver->enable_vblank = ivybridge_enable_vblank;
+ dev->driver->disable_vblank = ivybridge_disable_vblank;
+ } else if (IS_HASWELL(dev)) {
+ /* Share interrupts handling with IVB */
+ dev->driver->irq_handler = ivybridge_intr;
+ dev->driver->irq_preinstall = ironlake_irq_preinstall;
+ dev->driver->irq_postinstall = ivybridge_irq_postinstall;
+ dev->driver->irq_uninstall = ironlake_irq_uninstall;
+ dev->driver->enable_vblank = ivybridge_enable_vblank;
+ dev->driver->disable_vblank = ivybridge_disable_vblank;
+ } else if (HAS_PCH_SPLIT(dev)) {
+ dev->driver->irq_handler = ironlake_intr;
+ dev->driver->irq_preinstall = ironlake_irq_preinstall;
+ dev->driver->irq_postinstall = ironlake_irq_postinstall;
+ dev->driver->irq_uninstall = ironlake_irq_uninstall;
+ dev->driver->enable_vblank = ironlake_enable_vblank;
+ dev->driver->disable_vblank = ironlake_disable_vblank;
+ } else {
+ if (INTEL_INFO(dev)->gen == 2) {
+ dev->driver->irq_preinstall = i8xx_irq_preinstall;
+ dev->driver->irq_postinstall = i8xx_irq_postinstall;
+ dev->driver->irq_handler = i8xx_intr;
+ dev->driver->irq_uninstall = i8xx_irq_uninstall;
+ } else if (INTEL_INFO(dev)->gen == 3) {
+ dev->driver->irq_preinstall = i915_irq_preinstall;
+ dev->driver->irq_postinstall = i915_irq_postinstall;
+ dev->driver->irq_uninstall = i915_irq_uninstall;
+ dev->driver->irq_handler = i915_intr;
+ } else {
+ dev->driver->irq_preinstall = i965_irq_preinstall;
+ dev->driver->irq_postinstall = i965_irq_postinstall;
+ dev->driver->irq_uninstall = i965_irq_uninstall;
+ dev->driver->irq_handler = i965_intr;
+ }
+ dev->driver->enable_vblank = i915_enable_vblank;
+ dev->driver->disable_vblank = i915_disable_vblank;
+ }
+}
diff --git a/sys/dev/pci/drm/i915_reg.h b/sys/dev/pci/drm/i915/i915_reg.h
index bfc1846f3c0..25ac61b9b2b 100644
--- a/sys/dev/pci/drm/i915_reg.h
+++ b/sys/dev/pci/drm/i915/i915_reg.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: i915_reg.h,v 1.1 2013/03/18 12:36:52 jsg Exp $ */
/* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
@@ -26,13 +27,29 @@
#define _I915_REG_H_
#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
+
+#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
+
+#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
+#define _MASKED_BIT_DISABLE(a) ((a) << 16)
/*
* The Bridge device's PCI config space has information about the
* fb aperture size and the amount of pre-reserved memory.
+ * This is all handled in the intel-gtt.ko module. i915.ko only
+ * cares about the vga bit for the vga rbiter.
*/
#define INTEL_GMCH_CTRL 0x52
#define INTEL_GMCH_VGA_DISABLE (1 << 1)
+#define SNB_GMCH_CTRL 0x50
+#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
+#define SNB_GMCH_GGMS_MASK 0x3
+#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
+#define SNB_GMCH_GMS_MASK 0x1f
+#define IVB_GMCH_GMS_SHIFT 4
+#define IVB_GMCH_GMS_MASK 0xf
+
/* PCI config space */
@@ -75,6 +92,7 @@
#define GRDOM_FULL (0<<2)
#define GRDOM_RENDER (1<<2)
#define GRDOM_MEDIA (3<<2)
+#define GRDOM_RESET_ENABLE (1<<0)
#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */
#define GEN6_MBC_SNPCR_SHIFT 21
@@ -97,22 +115,6 @@
#define GEN6_GRDOM_MEDIA (1 << 2)
#define GEN6_GRDOM_BLT (1 << 3)
-/* PPGTT stuff */
-#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
-
-#define GEN6_PDE_VALID (1 << 0)
-#define GEN6_PDE_LARGE_PAGE (2 << 0) /* use 32kb pages */
-/* gen6+ has bit 11-4 for physical addr bit 39-32 */
-#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
-
-#define GEN6_PTE_VALID (1 << 0)
-#define GEN6_PTE_UNCACHED (1 << 1)
-#define GEN6_PTE_CACHE_LLC (2 << 1)
-#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
-#define GEN6_PTE_CACHE_BITS (3 << 1)
-#define GEN6_PTE_GFDT (1 << 3)
-#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
-
#define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228)
#define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518)
#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220)
@@ -123,6 +125,13 @@
#define ECOCHK_PPGTT_CACHE64B (0x3<<3)
#define ECOCHK_PPGTT_CACHE4B (0x0<<3)
+#define GAC_ECO_BITS 0x14090
+#define ECOBITS_PPGTT_CACHE64B (3<<8)
+#define ECOBITS_PPGTT_CACHE4B (0<<8)
+
+#define GAB_CTL 0x24000
+#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8)
+
/* VGA stuff */
#define VGA_ST01_MDA 0x3ba
@@ -195,6 +204,17 @@
#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2)
#define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1)
#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
+/* IVB has funny definitions for which plane to flip. */
+#define MI_DISPLAY_FLIP_IVB_PLANE_A (0 << 19)
+#define MI_DISPLAY_FLIP_IVB_PLANE_B (1 << 19)
+#define MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19)
+#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
+#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
+#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
+#define MI_ARB_ON_OFF MI_INSTR(0x08, 0)
+#define MI_ARB_ENABLE (1<<0)
+#define MI_ARB_DISABLE (0<<0)
+
#define MI_SET_CONTEXT MI_INSTR(0x18, 0)
#define MI_MM_SPACE_GTT (1<<8)
#define MI_MM_SPACE_PHYSICAL (0<<8)
@@ -214,12 +234,20 @@
*/
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
-#define MI_INVALIDATE_TLB (1<<18)
-#define MI_INVALIDATE_BSD (1<<7)
+#define MI_FLUSH_DW_STORE_INDEX (1<<21)
+#define MI_INVALIDATE_TLB (1<<18)
+#define MI_FLUSH_DW_OP_STOREDW (1<<14)
+#define MI_INVALIDATE_BSD (1<<7)
+#define MI_FLUSH_DW_USE_GTT (1<<2)
+#define MI_FLUSH_DW_USE_PPGTT (0<<2)
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
-#define MI_BATCH_NON_SECURE (1)
-#define MI_BATCH_NON_SECURE_I965 (1<<8)
+#define MI_BATCH_NON_SECURE (1)
+/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
+#define MI_BATCH_NON_SECURE_I965 (1<<8)
+#define MI_BATCH_PPGTT_HSW (1<<8)
+#define MI_BATCH_NON_SECURE_HSW (1<<13)
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
+#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
#define MI_SEMAPHORE_UPDATE (1<<21)
@@ -274,7 +302,9 @@
#define DISPLAY_PLANE_A (0<<20)
#define DISPLAY_PLANE_B (1<<20)
#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
+#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */
#define PIPE_CONTROL_CS_STALL (1<<20)
+#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
#define PIPE_CONTROL_QW_WRITE (1<<14)
#define PIPE_CONTROL_DEPTH_STALL (1<<13)
#define PIPE_CONTROL_WRITE_FLUSH (1<<12)
@@ -299,6 +329,65 @@
#define DEBUG_RESET_RENDER (1<<8)
#define DEBUG_RESET_DISPLAY (1<<9)
+/*
+ * DPIO - a special bus for various display related registers to hide behind:
+ * 0x800c: m1, m2, n, p1, p2, k dividers
+ * 0x8014: REF and SFR select
+ * 0x8014: N divider, VCO select
+ * 0x801c/3c: core clock bits
+ * 0x8048/68: low pass filter coefficients
+ * 0x8100: fast clock controls
+ */
+#define DPIO_PKT 0x2100
+#define DPIO_RID (0<<24)
+#define DPIO_OP_WRITE (1<<16)
+#define DPIO_OP_READ (0<<16)
+#define DPIO_PORTID (0x12<<8)
+#define DPIO_BYTE (0xf<<4)
+#define DPIO_BUSY (1<<0) /* status only */
+#define DPIO_DATA 0x2104
+#define DPIO_REG 0x2108
+#define DPIO_CTL 0x2110
+#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
+#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
+#define DPIO_SFR_BYPASS (1<<1)
+#define DPIO_RESET (1<<0)
+
+#define _DPIO_DIV_A 0x800c
+#define DPIO_POST_DIV_SHIFT (28) /* 3 bits */
+#define DPIO_K_SHIFT (24) /* 4 bits */
+#define DPIO_P1_SHIFT (21) /* 3 bits */
+#define DPIO_P2_SHIFT (16) /* 5 bits */
+#define DPIO_N_SHIFT (12) /* 4 bits */
+#define DPIO_ENABLE_CALIBRATION (1<<11)
+#define DPIO_M1DIV_SHIFT (8) /* 3 bits */
+#define DPIO_M2DIV_MASK 0xff
+#define _DPIO_DIV_B 0x802c
+#define DPIO_DIV(pipe) _PIPE(pipe, _DPIO_DIV_A, _DPIO_DIV_B)
+
+#define _DPIO_REFSFR_A 0x8014
+#define DPIO_REFSEL_OVERRIDE 27
+#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */
+#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */
+#define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */
+#define DPIO_PLL_REFCLK_SEL_MASK 3
+#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */
+#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */
+#define _DPIO_REFSFR_B 0x8034
+#define DPIO_REFSFR(pipe) _PIPE(pipe, _DPIO_REFSFR_A, _DPIO_REFSFR_B)
+
+#define _DPIO_CORE_CLK_A 0x801c
+#define _DPIO_CORE_CLK_B 0x803c
+#define DPIO_CORE_CLK(pipe) _PIPE(pipe, _DPIO_CORE_CLK_A, _DPIO_CORE_CLK_B)
+
+#define _DPIO_LFP_COEFF_A 0x8048
+#define _DPIO_LFP_COEFF_B 0x8068
+#define DPIO_LFP_COEFF(pipe) _PIPE(pipe, _DPIO_LFP_COEFF_A, _DPIO_LFP_COEFF_B)
+
+#define DPIO_FASTCLK_DISABLE 0x8100
+
+#define DPIO_DATA_CHANNEL1 0x8220
+#define DPIO_DATA_CHANNEL2 0x8420
/*
* Fence registers
@@ -358,8 +447,6 @@
#define ARB_MODE 0x04030
#define ARB_MODE_SWIZZLE_SNB (1<<4)
#define ARB_MODE_SWIZZLE_IVB (1<<5)
-#define ARB_MODE_ENABLE(x) GFX_MODE_ENABLE(x)
-#define ARB_MODE_DISABLE(x) GFX_MODE_DISABLE(x)
#define RENDER_HWS_PGA_GEN7 (0x04080)
#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id)
#define DONE_REG 0x40b0
@@ -368,6 +455,7 @@
#define RING_ACTHD(base) ((base)+0x74)
#define RING_NOPID(base) ((base)+0x94)
#define RING_IMR(base) ((base)+0xa8)
+#define RING_TIMESTAMP(base) ((base)+0x358)
#define TAIL_ADDR 0x001FFFF8
#define HEAD_WRAP_COUNT 0xFFE00000
#define HEAD_WRAP_ONE 0x00200000
@@ -396,6 +484,11 @@
#define IPEIR_I965 0x02064
#define IPEHR_I965 0x02068
#define INSTDONE_I965 0x0206c
+#define GEN7_INSTDONE_1 0x0206c
+#define GEN7_SC_INSTDONE 0x07100
+#define GEN7_SAMPLER_INSTDONE 0x0e160
+#define GEN7_ROW_INSTDONE 0x0e164
+#define I915_NUM_INSTDONE_REG 4
#define RING_IPEIR(base) ((base)+0x64)
#define RING_IPEHR(base) ((base)+0x68)
#define RING_INSTDONE(base) ((base)+0x6c)
@@ -415,14 +508,20 @@
#define INSTDONE 0x02090
#define NOPID 0x02094
#define HWSTAM 0x02098
+#define DMA_FADD_I8XX 0x020d0
#define ERROR_GEN6 0x040a0
+#define GEN7_ERR_INT 0x44040
+#define ERR_INT_MMIO_UNCLAIMED (1<<13)
+
+#define DERRMR 0x44050
/* GM45+ chicken bits -- debug workaround bits that may be required
* for various sorts of correct behavior. The top 16 bits of each are
* the enables for writing to the corresponding low bit.
*/
#define _3D_CHICKEN 0x02084
+#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10)
#define _3D_CHICKEN2 0x0208c
/* Disables pipelining of read flushes past the SF-WIZ interface.
* Required on all Ironlake steppings according to the B-Spec, but the
@@ -430,10 +529,17 @@
*/
# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
#define _3D_CHICKEN3 0x02090
+#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
+#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
#define MI_MODE 0x0209c
# define VS_TIMER_DISPATCH (1 << 6)
# define MI_FLUSH_ENABLE (1 << 12)
+# define ASYNC_FLIP_PERF_DISABLE (1 << 14)
+
+#define GEN6_GT_MODE 0x20d0
+#define GEN6_GT_MODE_HI (1 << 9)
+#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
#define GFX_MODE 0x02520
#define GFX_MODE_GEN7 0x0229c
@@ -445,14 +551,20 @@
#define GFX_PSMI_GRANULARITY (1<<10)
#define GFX_PPGTT_ENABLE (1<<9)
-#define GFX_MODE_ENABLE(bit) (((bit) << 16) | (bit))
-#define GFX_MODE_DISABLE(bit) (((bit) << 16) | (0))
+#define VLV_DISPLAY_BASE 0x180000
#define SCPD0 0x0209c /* 915+ only */
#define IER 0x020a0
#define IIR 0x020a4
#define IMR 0x020a8
#define ISR 0x020ac
+#define VLV_GUNIT_CLOCK_GATE 0x182060
+#define GCFG_DIS (1<<8)
+#define VLV_IIR_RW 0x182084
+#define VLV_IER 0x1820a0
+#define VLV_IIR 0x1820a4
+#define VLV_IMR 0x1820a8
+#define VLV_ISR 0x1820ac
#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
@@ -498,7 +610,6 @@
#define LM_BURST_LENGTH 0x00000700
#define LM_FIFO_WATERMARK 0x0000001F
#define MI_ARB_STATE 0x020e4 /* 915+ only */
-#define MI_ARB_MASK_SHIFT 16 /* shift for enable bits */
/* Make render/texture TLB fetches lower priorty than associated data
* fetches. This is not turned on by default
@@ -563,7 +674,7 @@
#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
#define CACHE_MODE_0 0x02120 /* 915+ only */
-#define CM0_MASK_SHIFT 16
+#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8)
#define CM0_IZ_OPT_DISABLE (1<<6)
#define CM0_ZR_OPT_DISABLE (1<<5)
#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
@@ -573,11 +684,18 @@
#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
#define BB_ADDR 0x02140 /* 8 bytes */
#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
+#define GFX_FLSH_CNTL_GEN6 0x101008
+#define GFX_FLSH_CNTL_EN (1<<0)
#define ECOSKPD 0x021d0
#define ECO_GATING_CX_ONLY (1<<3)
#define ECO_FLIP_DONE (1<<0)
-/* GEN6 interrupt control */
+#define CACHE_MODE_1 0x7004 /* IVB+ */
+#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
+
+/* GEN6 interrupt control
+ * Note that the per-ring interrupt bits do alias with the global interrupt bits
+ * in GTIMR. */
#define GEN6_RENDER_HWSTAM 0x2098
#define GEN6_RENDER_IMR 0x20a8
#define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8)
@@ -602,10 +720,10 @@
#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
-#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16)
-#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0)
-#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE 0
-#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3)
+#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
+#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
+#define GEN6_BSD_SLEEP_INDICATOR (1 << 3)
+#define GEN6_BSD_GO_INDICATOR (1 << 4)
#define GEN6_BSD_HWSTAM 0x12098
#define GEN6_BSD_IMR 0x120a8
@@ -613,6 +731,21 @@
#define GEN6_BSD_RNCID 0x12198
+#define GEN7_FF_THREAD_MODE 0x20a0
+#define GEN7_FF_SCHED_MASK 0x0077070
+#define GEN7_FF_TS_SCHED_HS1 (0x5<<16)
+#define GEN7_FF_TS_SCHED_HS0 (0x3<<16)
+#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16)
+#define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */
+#define GEN7_FF_VS_SCHED_HS1 (0x5<<12)
+#define GEN7_FF_VS_SCHED_HS0 (0x3<<12)
+#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */
+#define GEN7_FF_VS_SCHED_HW (0x0<<12)
+#define GEN7_FF_DS_SCHED_HS1 (0x5<<4)
+#define GEN7_FF_DS_SCHED_HS0 (0x3<<4)
+#define GEN7_FF_DS_SCHED_LOAD_BALANCE (0x1<<4) /* Default */
+#define GEN7_FF_DS_SCHED_HW (0x0<<4)
+
/*
* Framebuffer compression (915+ only)
*/
@@ -741,9 +874,9 @@
#define GMBUS_PORT_PANEL 3
#define GMBUS_PORT_DPC 4 /* HDMIC */
#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */
- /* 6 reserved */
-#define GMBUS_PORT_DPD 7 /* HDMID */
-#define GMBUS_NUM_PORTS 8
+#define GMBUS_PORT_DPD 6 /* HDMID */
+#define GMBUS_PORT_RESERVED 7 /* 7 reserved */
+#define GMBUS_NUM_PORTS (GMBUS_PORT_DPD - GMBUS_PORT_SSC + 1)
#define GMBUS1 0x5104 /* command/status */
#define GMBUS_SW_CLR_INT (1<<31)
#define GMBUS_SW_RDY (1<<30)
@@ -795,7 +928,9 @@
#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
#define DPLL_VCO_ENABLE (1 << 31)
#define DPLL_DVO_HIGH_SPEED (1 << 30)
+#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30)
#define DPLL_SYNCLOCK_ENABLE (1 << 29)
+#define DPLL_REFA_CLK_ENABLE_VLV (1 << 29)
#define DPLL_VGA_MODE_DIS (1 << 28)
#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
#define DPLLB_MODE_LVDS (2 << 26) /* i915 */
@@ -807,6 +942,8 @@
#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
+#define DPLL_LOCK_VLV (1<<15)
+#define DPLL_INTEGRATED_CLOCK_VLV (1<<13)
#define SRX_INDEX 0x3c4
#define SRX_DATA 0x3c5
@@ -902,6 +1039,7 @@
#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
#define _DPLL_B_MD 0x06020 /* 965+ only */
#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD)
+
#define _FPA0 0x06040
#define _FPA1 0x06044
#define _FPB0 0x06048
@@ -1042,6 +1180,9 @@
#define RAMCLK_GATE_D 0x6210 /* CRL only */
#define DEUC 0x6214 /* CRL only */
+#define FW_BLC_SELF_VLV 0x6500
+#define FW_CSPWRDWNEN (1<<15)
+
/*
* Palette regs
*/
@@ -1347,6 +1488,10 @@
#define DDRMPLL1 0X12c20
#define PEG_BAND_GAP_DATA 0x14d68
+#define GEN6_GT_THREAD_STATUS_REG 0x13805c
+#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
+#define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16))
+
#define GEN6_GT_PERF_STATUS 0x145948
#define GEN6_RP_STATE_LIMITS 0x145994
#define GEN6_RP_STATE_CAP 0x145998
@@ -1356,6 +1501,39 @@
*/
#define CCID 0x2180
#define CCID_EN (1<<0)
+#define CXT_SIZE 0x21a0
+#define GEN6_CXT_POWER_SIZE(cxt_reg) ((cxt_reg >> 24) & 0x3f)
+#define GEN6_CXT_RING_SIZE(cxt_reg) ((cxt_reg >> 18) & 0x3f)
+#define GEN6_CXT_RENDER_SIZE(cxt_reg) ((cxt_reg >> 12) & 0x3f)
+#define GEN6_CXT_EXTENDED_SIZE(cxt_reg) ((cxt_reg >> 6) & 0x3f)
+#define GEN6_CXT_PIPELINE_SIZE(cxt_reg) ((cxt_reg >> 0) & 0x3f)
+#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_POWER_SIZE(cxt_reg) + \
+ GEN6_CXT_RING_SIZE(cxt_reg) + \
+ GEN6_CXT_RENDER_SIZE(cxt_reg) + \
+ GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \
+ GEN6_CXT_PIPELINE_SIZE(cxt_reg))
+#define GEN7_CXT_SIZE 0x21a8
+#define GEN7_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 25) & 0x7f)
+#define GEN7_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 22) & 0x7)
+#define GEN7_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 16) & 0x3f)
+#define GEN7_CXT_EXTENDED_SIZE(ctx_reg) ((ctx_reg >> 9) & 0x7f)
+#define GEN7_CXT_GT1_SIZE(ctx_reg) ((ctx_reg >> 6) & 0x7)
+#define GEN7_CXT_VFSTATE_SIZE(ctx_reg) ((ctx_reg >> 0) & 0x3f)
+#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_POWER_SIZE(ctx_reg) + \
+ GEN7_CXT_RING_SIZE(ctx_reg) + \
+ GEN7_CXT_RENDER_SIZE(ctx_reg) + \
+ GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
+ GEN7_CXT_GT1_SIZE(ctx_reg) + \
+ GEN7_CXT_VFSTATE_SIZE(ctx_reg))
+#define HSW_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 26) & 0x3f)
+#define HSW_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 23) & 0x7)
+#define HSW_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 15) & 0xff)
+#define HSW_CXT_TOTAL_SIZE(ctx_reg) (HSW_CXT_POWER_SIZE(ctx_reg) + \
+ HSW_CXT_RING_SIZE(ctx_reg) + \
+ HSW_CXT_RENDER_SIZE(ctx_reg) + \
+ GEN7_CXT_VFSTATE_SIZE(ctx_reg))
+
+
/*
* Overlay regs
*/
@@ -1397,23 +1575,46 @@
#define _VSYNCSHIFT_B 0x61028
-#define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B)
-#define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B)
-#define HSYNC(pipe) _PIPE(pipe, _HSYNC_A, _HSYNC_B)
-#define VTOTAL(pipe) _PIPE(pipe, _VTOTAL_A, _VTOTAL_B)
-#define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B)
-#define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B)
+#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
+#define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B)
+#define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B)
+#define VTOTAL(trans) _TRANSCODER(trans, _VTOTAL_A, _VTOTAL_B)
+#define VBLANK(trans) _TRANSCODER(trans, _VBLANK_A, _VBLANK_B)
+#define VSYNC(trans) _TRANSCODER(trans, _VSYNC_A, _VSYNC_B)
#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
-#define VSYNCSHIFT(pipe) _PIPE(pipe, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
+#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
/* VGA port control */
#define ADPA 0x61100
+#define PCH_ADPA 0xe1100
+#define VLV_ADPA (VLV_DISPLAY_BASE + ADPA)
+
#define ADPA_DAC_ENABLE (1<<31)
#define ADPA_DAC_DISABLE 0
#define ADPA_PIPE_SELECT_MASK (1<<30)
#define ADPA_PIPE_A_SELECT 0
#define ADPA_PIPE_B_SELECT (1<<30)
#define ADPA_PIPE_SELECT(pipe) ((pipe) << 30)
+/* CPT uses bits 29:30 for pch transcoder select */
+#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
+#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
+#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
+#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
+#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
+#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
+#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
+#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
+#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
+#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
+#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
+#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
+#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
+#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
+#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
#define ADPA_USE_VGA_HVPOLARITY (1<<15)
#define ADPA_SETS_HVPOLARITY 0
#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
@@ -1460,20 +1661,34 @@
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
#define PORT_HOTPLUG_STAT 0x61114
-#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
-#define DPB_HOTPLUG_INT_STATUS (1 << 29)
-#define HDMIC_HOTPLUG_INT_STATUS (1 << 28)
-#define DPC_HOTPLUG_INT_STATUS (1 << 28)
-#define HDMID_HOTPLUG_INT_STATUS (1 << 27)
-#define DPD_HOTPLUG_INT_STATUS (1 << 27)
+/* HDMI/DP bits are gen4+ */
+#define DPB_HOTPLUG_LIVE_STATUS (1 << 29)
+#define DPC_HOTPLUG_LIVE_STATUS (1 << 28)
+#define DPD_HOTPLUG_LIVE_STATUS (1 << 27)
+#define DPD_HOTPLUG_INT_STATUS (3 << 21)
+#define DPC_HOTPLUG_INT_STATUS (3 << 19)
+#define DPB_HOTPLUG_INT_STATUS (3 << 17)
+/* HDMI bits are shared with the DP bits */
+#define HDMIB_HOTPLUG_LIVE_STATUS (1 << 29)
+#define HDMIC_HOTPLUG_LIVE_STATUS (1 << 28)
+#define HDMID_HOTPLUG_LIVE_STATUS (1 << 27)
+#define HDMID_HOTPLUG_INT_STATUS (3 << 21)
+#define HDMIC_HOTPLUG_INT_STATUS (3 << 19)
+#define HDMIB_HOTPLUG_INT_STATUS (3 << 17)
+/* CRT/TV common between gen3+ */
#define CRT_HOTPLUG_INT_STATUS (1 << 11)
#define TV_HOTPLUG_INT_STATUS (1 << 10)
#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
#define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
#define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
-#define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
-#define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
+/* SDVO is different across gen3/4 */
+#define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3)
+#define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2)
+#define SDVOC_HOTPLUG_INT_STATUS_I965 (3 << 4)
+#define SDVOB_HOTPLUG_INT_STATUS_I965 (3 << 2)
+#define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7)
+#define SDVOB_HOTPLUG_INT_STATUS_I915 (1 << 6)
/* SDVO port control */
#define SDVOB 0x61140
@@ -1598,12 +1813,21 @@
/* Video Data Island Packet control */
#define VIDEO_DIP_DATA 0x61178
+/* Read the description of VIDEO_DIP_DATA (before Haswel) or VIDEO_DIP_ECC
+ * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
+ * of the infoframe structure specified by CEA-861. */
+#define VIDEO_DIP_DATA_SIZE 32
#define VIDEO_DIP_CTL 0x61170
+/* Pre HSW: */
#define VIDEO_DIP_ENABLE (1 << 31)
#define VIDEO_DIP_PORT_B (1 << 29)
#define VIDEO_DIP_PORT_C (2 << 29)
+#define VIDEO_DIP_PORT_D (3 << 29)
+#define VIDEO_DIP_PORT_MASK (3 << 29)
+#define VIDEO_DIP_ENABLE_GCP (1 << 25)
#define VIDEO_DIP_ENABLE_AVI (1 << 21)
#define VIDEO_DIP_ENABLE_VENDOR (2 << 21)
+#define VIDEO_DIP_ENABLE_GAMUT (4 << 21)
#define VIDEO_DIP_ENABLE_SPD (8 << 21)
#define VIDEO_DIP_SELECT_AVI (0 << 19)
#define VIDEO_DIP_SELECT_VENDOR (1 << 19)
@@ -1612,6 +1836,14 @@
#define VIDEO_DIP_FREQ_ONCE (0 << 16)
#define VIDEO_DIP_FREQ_VSYNC (1 << 16)
#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
+#define VIDEO_DIP_FREQ_MASK (3 << 16)
+/* HSW and later: */
+#define VIDEO_DIP_ENABLE_VSC_HSW (1 << 20)
+#define VIDEO_DIP_ENABLE_GCP_HSW (1 << 16)
+#define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12)
+#define VIDEO_DIP_ENABLE_VS_HSW (1 << 8)
+#define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4)
+#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
/* Panel power sequencing */
#define PP_STATUS 0x61200
@@ -1682,18 +1914,35 @@
#define PFIT_AUTO_RATIOS 0x61238
/* Backlight control */
-#define BLC_PWM_CTL 0x61254
-#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
#define BLC_PWM_CTL2 0x61250 /* 965+ only */
-#define BLM_COMBINATION_MODE (1 << 30)
+#define BLM_PWM_ENABLE (1 << 31)
+#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */
+#define BLM_PIPE_SELECT (1 << 29)
+#define BLM_PIPE_SELECT_IVB (3 << 29)
+#define BLM_PIPE_A (0 << 29)
+#define BLM_PIPE_B (1 << 29)
+#define BLM_PIPE_C (2 << 29) /* ivb + */
+#define BLM_PIPE(pipe) ((pipe) << 29)
+#define BLM_POLARITY_I965 (1 << 28) /* gen4 only */
+#define BLM_PHASE_IN_INTERUPT_STATUS (1 << 26)
+#define BLM_PHASE_IN_ENABLE (1 << 25)
+#define BLM_PHASE_IN_INTERUPT_ENABL (1 << 24)
+#define BLM_PHASE_IN_TIME_BASE_SHIFT (16)
+#define BLM_PHASE_IN_TIME_BASE_MASK (0xff << 16)
+#define BLM_PHASE_IN_COUNT_SHIFT (8)
+#define BLM_PHASE_IN_COUNT_MASK (0xff << 8)
+#define BLM_PHASE_IN_INCR_SHIFT (0)
+#define BLM_PHASE_IN_INCR_MASK (0xff << 0)
+#define BLC_PWM_CTL 0x61254
/*
* This is the most significant 15 bits of the number of backlight cycles in a
* complete cycle of the modulated backlight control.
*
* The actual value is this field multiplied by two.
*/
-#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
-#define BLM_LEGACY_MODE (1 << 16)
+#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
+#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
+#define BLM_LEGACY_MODE (1 << 16) /* gen2 only */
/*
* This is the number of cycles out of the backlight modulation cycle for which
* the backlight is on.
@@ -1703,9 +1952,24 @@
*/
#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
+#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe)
+#define BLM_POLARITY_PNV (1 << 0) /* pnv only */
#define BLC_HIST_CTL 0x61260
+/* New registers for PCH-split platforms. Safe where new bits show up, the
+ * register layout machtes with gen4 BLC_PWM_CTL[12]. */
+#define BLC_PWM_CPU_CTL2 0x48250
+#define BLC_PWM_CPU_CTL 0x48254
+
+/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is
+ * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */
+#define BLC_PWM_PCH_CTL1 0xc8250
+#define BLM_PCH_PWM_ENABLE (1 << 31)
+#define BLM_PCH_OVERRIDE_ENABLE (1 << 30)
+#define BLM_PCH_POLARITY (1 << 29)
+#define BLC_PWM_PCH_CTL2 0xc8254
+
/* TV port control */
#define TV_CTL 0x68000
/** Enables the TV encoder */
@@ -2378,7 +2642,8 @@
/* Pipe A */
#define _PIPEADSL 0x70000
-#define DSL_LINEMASK 0x00000fff
+#define DSL_LINEMASK_GEN2 0x00000fff
+#define DSL_LINEMASK_GEN3 0x00001fff
#define _PIPEACONF 0x70008
#define PIPECONF_ENABLE (1<<31)
#define PIPECONF_DISABLE 0
@@ -2392,6 +2657,7 @@
#define PIPECONF_GAMMA (1<<24)
#define PIPECONF_FORCE_BORDER (1<<25)
#define PIPECONF_INTERLACE_MASK (7 << 21)
+#define PIPECONF_INTERLACE_MASK_HSW (3 << 21)
/* Note that pre-gen3 does not support interlaced display directly. Panel
* fitting must be disabled on pre-ilk for interlaced. */
#define PIPECONF_PROGRESSIVE (0 << 21)
@@ -2420,23 +2686,30 @@
#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
#define _PIPEASTAT 0x70024
#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
+#define SPRITE1_FLIPDONE_INT_EN_VLV (1UL<<30)
#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
#define PIPE_CRC_DONE_ENABLE (1UL<<28)
#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27)
+#define PLANE_FLIP_DONE_INT_EN_VLV (1UL<<26)
#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26)
#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25)
#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
#define PIPE_DPST_EVENT_ENABLE (1UL<<23)
+#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<26)
#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */
#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
+#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16)
#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
+#define SPRITE1_FLIPDONE_INT_STATUS_VLV (1UL<<15)
+#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<15)
#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
+#define PLANE_FLIPDONE_INT_STATUS_VLV (1UL<<10)
#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10)
#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9)
#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
@@ -2455,12 +2728,46 @@
#define PIPE_12BPC (3 << 5)
#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
-#define PIPECONF(pipe) _PIPE(pipe, _PIPEACONF, _PIPEBCONF)
+#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF)
#define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL)
#define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH)
#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
+#define VLV_DPFLIPSTAT 0x70028
+#define PIPEB_LINE_COMPARE_INT_EN (1<<29)
+#define PIPEB_HLINE_INT_EN (1<<28)
+#define PIPEB_VBLANK_INT_EN (1<<27)
+#define SPRITED_FLIPDONE_INT_EN (1<<26)
+#define SPRITEC_FLIPDONE_INT_EN (1<<25)
+#define PLANEB_FLIPDONE_INT_EN (1<<24)
+#define PIPEA_LINE_COMPARE_INT_EN (1<<21)
+#define PIPEA_HLINE_INT_EN (1<<20)
+#define PIPEA_VBLANK_INT_EN (1<<19)
+#define SPRITEB_FLIPDONE_INT_EN (1<<18)
+#define SPRITEA_FLIPDONE_INT_EN (1<<17)
+#define PLANEA_FLIPDONE_INT_EN (1<<16)
+
+#define DPINVGTT 0x7002c /* VLV only */
+#define CURSORB_INVALID_GTT_INT_EN (1<<23)
+#define CURSORA_INVALID_GTT_INT_EN (1<<22)
+#define SPRITED_INVALID_GTT_INT_EN (1<<21)
+#define SPRITEC_INVALID_GTT_INT_EN (1<<20)
+#define PLANEB_INVALID_GTT_INT_EN (1<<19)
+#define SPRITEB_INVALID_GTT_INT_EN (1<<18)
+#define SPRITEA_INVALID_GTT_INT_EN (1<<17)
+#define PLANEA_INVALID_GTT_INT_EN (1<<16)
+#define DPINVGTT_EN_MASK 0xff0000
+#define CURSORB_INVALID_GTT_STATUS (1<<7)
+#define CURSORA_INVALID_GTT_STATUS (1<<6)
+#define SPRITED_INVALID_GTT_STATUS (1<<5)
+#define SPRITEC_INVALID_GTT_STATUS (1<<4)
+#define PLANEB_INVALID_GTT_STATUS (1<<3)
+#define SPRITEB_INVALID_GTT_STATUS (1<<2)
+#define SPRITEA_INVALID_GTT_STATUS (1<<1)
+#define PLANEA_INVALID_GTT_STATUS (1<<0)
+#define DPINVGTT_STATUS_MASK 0xff
+
#define DSPARB 0x70030
#define DSPARB_CSTART_MASK (0x7f << 7)
#define DSPARB_CSTART_SHIFT 7
@@ -2490,11 +2797,28 @@
#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
#define DSPFW_HPLL_SR_MASK (0x1ff)
+/* drain latency register values*/
+#define DRAIN_LATENCY_PRECISION_32 32
+#define DRAIN_LATENCY_PRECISION_16 16
+#define VLV_DDL1 0x70050
+#define DDL_CURSORA_PRECISION_32 (1<<31)
+#define DDL_CURSORA_PRECISION_16 (0<<31)
+#define DDL_CURSORA_SHIFT 24
+#define DDL_PLANEA_PRECISION_32 (1<<7)
+#define DDL_PLANEA_PRECISION_16 (0<<7)
+#define VLV_DDL2 0x70054
+#define DDL_CURSORB_PRECISION_32 (1<<31)
+#define DDL_CURSORB_PRECISION_16 (0<<31)
+#define DDL_CURSORB_SHIFT 24
+#define DDL_PLANEB_PRECISION_32 (1<<7)
+#define DDL_PLANEB_PRECISION_16 (0<<7)
+
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
#define I915_FIFO_LINE_SIZE 64
#define I830_FIFO_LINE_SIZE 32
+#define VALLEYVIEW_FIFO_SIZE 255
#define G4X_FIFO_SIZE 127
#define I965_FIFO_SIZE 512
#define I945_FIFO_SIZE 127
@@ -2502,6 +2826,7 @@
#define I855GM_FIFO_SIZE 127 /* In cachelines */
#define I830_FIFO_SIZE 95
+#define VALLEYVIEW_MAX_WM 0xff
#define G4X_MAX_WM 0x3f
#define I915_MAX_WM 0x3f
@@ -2516,6 +2841,7 @@
#define PINEVIEW_CURSOR_DFT_WM 0
#define PINEVIEW_CURSOR_GUARD_WM 5
+#define VALLEYVIEW_CURSOR_MAX_WM 64
#define I965_CURSOR_FIFO 64
#define I965_CURSOR_MAX_WM 32
#define I965_CURSOR_DFT_WM 8
@@ -2689,12 +3015,19 @@
#define DISPPLANE_GAMMA_ENABLE (1<<30)
#define DISPPLANE_GAMMA_DISABLE 0
#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
+#define DISPPLANE_YUV422 (0x0<<26)
#define DISPPLANE_8BPP (0x2<<26)
-#define DISPPLANE_15_16BPP (0x4<<26)
-#define DISPPLANE_16BPP (0x5<<26)
-#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
-#define DISPPLANE_32BPP (0x7<<26)
-#define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26)
+#define DISPPLANE_BGRA555 (0x3<<26)
+#define DISPPLANE_BGRX555 (0x4<<26)
+#define DISPPLANE_BGRX565 (0x5<<26)
+#define DISPPLANE_BGRX888 (0x6<<26)
+#define DISPPLANE_BGRA888 (0x7<<26)
+#define DISPPLANE_RGBX101010 (0x8<<26)
+#define DISPPLANE_RGBA101010 (0x9<<26)
+#define DISPPLANE_BGRX101010 (0xa<<26)
+#define DISPPLANE_RGBX161616 (0xc<<26)
+#define DISPPLANE_RGBX888 (0xe<<26)
+#define DISPPLANE_RGBA888 (0xf<<26)
#define DISPPLANE_STEREO_ENABLE (1<<25)
#define DISPPLANE_STEREO_DISABLE 0
#define DISPPLANE_SEL_PIPE_SHIFT 24
@@ -2715,6 +3048,8 @@
#define _DSPASIZE 0x70190
#define _DSPASURF 0x7019C /* 965+ only */
#define _DSPATILEOFF 0x701A4 /* 965+ only */
+#define _DSPAOFFSET 0x701A4 /* HSW */
+#define _DSPASURFLIVE 0x701AC
#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR)
#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR)
@@ -2723,6 +3058,16 @@
#define DSPSIZE(plane) _PIPE(plane, _DSPASIZE, _DSPBSIZE)
#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
+#define DSPLINOFF(plane) DSPADDR(plane)
+#define DSPOFFSET(plane) _PIPE(plane, _DSPAOFFSET, _DSPBOFFSET)
+#define DSPSURFLIVE(plane) _PIPE(plane, _DSPASURFLIVE, _DSPBSURFLIVE)
+
+/* Display/Sprite base address macros */
+#define DISP_BASEADDR_MASK (0xfffff000)
+#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK)
+#define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK)
+#define I915_MODIFY_DISPBASE(reg, gfx_addr) \
+ (I915_WRITE((reg), (gfx_addr) | I915_LO_DISPBASE(I915_READ(reg))))
/* VBIOS flags */
#define SWF00 0x71410
@@ -2761,6 +3106,8 @@
#define _DSPBSIZE 0x71190
#define _DSPBSURF 0x7119C
#define _DSPBTILEOFF 0x711A4
+#define _DSPBOFFSET 0x711A4
+#define _DSPBSURFLIVE 0x711AC
/* Sprite A control */
#define _DVSACNTR 0x72180
@@ -2826,6 +3173,7 @@
#define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
#define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
#define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
+#define DVSSURFLIVE(pipe) _PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
#define _SPRA_CTL 0x70280
#define SPRITE_ENABLE (1<<31)
@@ -2860,6 +3208,8 @@
#define _SPRA_SURF 0x7029c
#define _SPRA_KEYMAX 0x702a0
#define _SPRA_TILEOFF 0x702a4
+#define _SPRA_OFFSET 0x702a4
+#define _SPRA_SURFLIVE 0x702ac
#define _SPRA_SCALE 0x70304
#define SPRITE_SCALE_ENABLE (1<<31)
#define SPRITE_FILTER_MASK (3<<29)
@@ -2880,6 +3230,8 @@
#define _SPRB_SURF 0x7129c
#define _SPRB_KEYMAX 0x712a0
#define _SPRB_TILEOFF 0x712a4
+#define _SPRB_OFFSET 0x712a4
+#define _SPRB_SURFLIVE 0x712ac
#define _SPRB_SCALE 0x71304
#define _SPRB_GAMC 0x71400
@@ -2893,8 +3245,10 @@
#define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
#define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
#define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
+#define SPROFFSET(pipe) _PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET)
#define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
+#define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
/* VBIOS regs */
#define VGACNTRL 0x71400
@@ -2929,12 +3283,6 @@
#define DISPLAY_PORT_PLL_BIOS_1 0x46010
#define DISPLAY_PORT_PLL_BIOS_2 0x46014
-#define PCH_DSPCLK_GATE_D 0x42020
-# define DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
-# define DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
-# define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7)
-# define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5)
-
#define PCH_3DCGDIS0 0x46020
# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18)
# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1)
@@ -2984,20 +3332,22 @@
#define _PIPEB_LINK_M2 0x61048
#define _PIPEB_LINK_N2 0x6104c
-#define PIPE_DATA_M1(pipe) _PIPE(pipe, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
-#define PIPE_DATA_N1(pipe) _PIPE(pipe, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
-#define PIPE_DATA_M2(pipe) _PIPE(pipe, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
-#define PIPE_DATA_N2(pipe) _PIPE(pipe, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
-#define PIPE_LINK_M1(pipe) _PIPE(pipe, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
-#define PIPE_LINK_N1(pipe) _PIPE(pipe, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
-#define PIPE_LINK_M2(pipe) _PIPE(pipe, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
-#define PIPE_LINK_N2(pipe) _PIPE(pipe, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
+#define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
+#define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
+#define PIPE_DATA_M2(tran) _TRANSCODER(tran, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
+#define PIPE_DATA_N2(tran) _TRANSCODER(tran, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
+#define PIPE_LINK_M1(tran) _TRANSCODER(tran, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
+#define PIPE_LINK_N1(tran) _TRANSCODER(tran, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
+#define PIPE_LINK_M2(tran) _TRANSCODER(tran, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
+#define PIPE_LINK_N2(tran) _TRANSCODER(tran, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
/* CPU panel fitter */
/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
#define _PFA_CTL_1 0x68080
#define _PFB_CTL_1 0x68880
#define PF_ENABLE (1<<31)
+#define PF_PIPE_SEL_MASK_IVB (3<<29)
+#define PF_PIPE_SEL_IVB(pipe) ((pipe)<<29)
#define PF_FILTER_MASK (3<<23)
#define PF_FILTER_PROGRAMMED (0<<23)
#define PF_FILTER_MED_3x3 (1<<23)
@@ -3056,26 +3406,38 @@
#define DE_PCH_EVENT_IVB (1<<28)
#define DE_DP_A_HOTPLUG_IVB (1<<27)
#define DE_AUX_CHANNEL_A_IVB (1<<26)
+#define DE_SPRITEC_FLIP_DONE_IVB (1<<14)
+#define DE_PLANEC_FLIP_DONE_IVB (1<<13)
+#define DE_PIPEC_VBLANK_IVB (1<<10)
#define DE_SPRITEB_FLIP_DONE_IVB (1<<9)
-#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
#define DE_PLANEB_FLIP_DONE_IVB (1<<8)
-#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
#define DE_PIPEB_VBLANK_IVB (1<<5)
+#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
+#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
#define DE_PIPEA_VBLANK_IVB (1<<0)
+#define VLV_MASTER_IER 0x4400c /* Gunit master IER */
+#define MASTER_INTERRUPT_ENABLE (1<<31)
+
#define DEISR 0x44000
#define DEIMR 0x44004
#define DEIIR 0x44008
#define DEIER 0x4400c
-/* GT interrupt */
-#define GT_PIPE_NOTIFY (1 << 4)
-#define GT_MASTER_ERROR (1 << 3)
-#define GT_SYNC_STATUS (1 << 2)
-#define GT_USER_INTERRUPT (1 << 0)
-#define GT_BSD_USER_INTERRUPT (1 << 5)
-#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12)
-#define GT_BLT_USER_INTERRUPT (1 << 22)
+/* GT interrupt.
+ * Note that for gen6+ the ring-specific interrupt bits do alias with the
+ * corresponding bits in the per-ring interrupt control registers. */
+#define GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT (1 << 26)
+#define GT_GEN6_BLT_CS_ERROR_INTERRUPT (1 << 25)
+#define GT_GEN6_BLT_USER_INTERRUPT (1 << 22)
+#define GT_GEN6_BSD_CS_ERROR_INTERRUPT (1 << 15)
+#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12)
+#define GT_BSD_USER_INTERRUPT (1 << 5) /* ilk only */
+#define GT_GEN7_L3_PARITY_ERROR_INTERRUPT (1 << 5)
+#define GT_PIPE_NOTIFY (1 << 4)
+#define GT_RENDER_CS_ERROR_INTERRUPT (1 << 3)
+#define GT_SYNC_STATUS (1 << 2)
+#define GT_USER_INTERRUPT (1 << 0)
#define GTISR 0x44010
#define GTIMR 0x44014
@@ -3094,15 +3456,13 @@
#define ILK_HDCP_DISABLE (1<<25)
#define ILK_eDP_A_DISABLE (1<<24)
#define ILK_DESKTOP (1<<23)
-#define ILK_DSPCLK_GATE 0x42020
-#define IVB_VRHUNIT_CLK_GATE (1<<28)
-#define ILK_DPARB_CLK_GATE (1<<5)
-#define ILK_DPFD_CLK_GATE (1<<7)
-/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */
-#define ILK_CLK_FBC (1<<7)
-#define ILK_DPFC_DIS1 (1<<8)
-#define ILK_DPFC_DIS2 (1<<9)
+#define ILK_DSPCLK_GATE_D 0x42020
+#define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28)
+#define ILK_DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
+#define ILK_DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
+#define ILK_DPFDUNIT_CLOCK_GATE_ENABLE (1 << 7)
+#define ILK_DPARBUNIT_CLOCK_GATE_ENABLE (1 << 5)
#define IVB_CHICKEN3 0x4200c
# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5)
@@ -3118,17 +3478,24 @@
#define GEN7_L3CNTLREG1 0xB01C
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C
+#define GEN7_L3AGDIS (1<<19)
#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030
#define GEN7_WA_L3_CHICKEN_MODE 0x20000000
+#define GEN7_L3SQCREG4 0xb034
+#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27)
+
/* WaCatErrorRejectionIssue */
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
+#define HSW_FUSE_STRAP 0x42014
+#define HSW_CDCLK_LIMIT (1 << 24)
+
/* PCH */
-/* south display engine interrupt */
+/* south display engine interrupt: IBX */
#define SDE_AUDIO_POWER_D (1 << 27)
#define SDE_AUDIO_POWER_C (1 << 26)
#define SDE_AUDIO_POWER_B (1 << 25)
@@ -3164,15 +3531,44 @@
#define SDE_TRANSA_CRC_ERR (1 << 1)
#define SDE_TRANSA_FIFO_UNDER (1 << 0)
#define SDE_TRANS_MASK (0x3f)
-/* CPT */
-#define SDE_CRT_HOTPLUG_CPT (1 << 19)
+
+/* south display engine interrupt: CPT/PPT */
+#define SDE_AUDIO_POWER_D_CPT (1 << 31)
+#define SDE_AUDIO_POWER_C_CPT (1 << 30)
+#define SDE_AUDIO_POWER_B_CPT (1 << 29)
+#define SDE_AUDIO_POWER_SHIFT_CPT 29
+#define SDE_AUDIO_POWER_MASK_CPT (7 << 29)
+#define SDE_AUXD_CPT (1 << 27)
+#define SDE_AUXC_CPT (1 << 26)
+#define SDE_AUXB_CPT (1 << 25)
+#define SDE_AUX_MASK_CPT (7 << 25)
#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
+#define SDE_CRT_HOTPLUG_CPT (1 << 19)
#define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \
SDE_PORTD_HOTPLUG_CPT | \
SDE_PORTC_HOTPLUG_CPT | \
SDE_PORTB_HOTPLUG_CPT)
+#define SDE_GMBUS_CPT (1 << 17)
+#define SDE_AUDIO_CP_REQ_C_CPT (1 << 10)
+#define SDE_AUDIO_CP_CHG_C_CPT (1 << 9)
+#define SDE_FDI_RXC_CPT (1 << 8)
+#define SDE_AUDIO_CP_REQ_B_CPT (1 << 6)
+#define SDE_AUDIO_CP_CHG_B_CPT (1 << 5)
+#define SDE_FDI_RXB_CPT (1 << 4)
+#define SDE_AUDIO_CP_REQ_A_CPT (1 << 2)
+#define SDE_AUDIO_CP_CHG_A_CPT (1 << 1)
+#define SDE_FDI_RXA_CPT (1 << 0)
+#define SDE_AUDIO_CP_REQ_CPT (SDE_AUDIO_CP_REQ_C_CPT | \
+ SDE_AUDIO_CP_REQ_B_CPT | \
+ SDE_AUDIO_CP_REQ_A_CPT)
+#define SDE_AUDIO_CP_CHG_CPT (SDE_AUDIO_CP_CHG_C_CPT | \
+ SDE_AUDIO_CP_CHG_B_CPT | \
+ SDE_AUDIO_CP_CHG_A_CPT)
+#define SDE_FDI_MASK_CPT (SDE_FDI_RXC_CPT | \
+ SDE_FDI_RXB_CPT | \
+ SDE_FDI_RXA_CPT)
#define SDEISR 0xc4000
#define SDEIMR 0xc4004
@@ -3225,15 +3621,15 @@
#define _PCH_DPLL_A 0xc6014
#define _PCH_DPLL_B 0xc6018
-#define PCH_DPLL(pipe) (pipe == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
+#define _PCH_DPLL(pll) (pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
#define _PCH_FPA0 0xc6040
#define FP_CB_TUNE (0x3<<22)
#define _PCH_FPA1 0xc6044
#define _PCH_FPB0 0xc6048
#define _PCH_FPB1 0xc604c
-#define PCH_FP0(pipe) (pipe == 0 ? _PCH_FPA0 : _PCH_FPB0)
-#define PCH_FP1(pipe) (pipe == 0 ? _PCH_FPA1 : _PCH_FPB1)
+#define _PCH_FP0(pll) (pll == 0 ? _PCH_FPA0 : _PCH_FPB0)
+#define _PCH_FP1(pll) (pll == 0 ? _PCH_FPA1 : _PCH_FPB1)
#define PCH_DPLL_TEST 0xc606c
@@ -3328,6 +3724,57 @@
#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
+#define VLV_VIDEO_DIP_CTL_A 0x60200
+#define VLV_VIDEO_DIP_DATA_A 0x60208
+#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
+
+#define VLV_VIDEO_DIP_CTL_B 0x61170
+#define VLV_VIDEO_DIP_DATA_B 0x61174
+#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B 0x61178
+
+#define VLV_TVIDEO_DIP_CTL(pipe) \
+ _PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B)
+#define VLV_TVIDEO_DIP_DATA(pipe) \
+ _PIPE(pipe, VLV_VIDEO_DIP_DATA_A, VLV_VIDEO_DIP_DATA_B)
+#define VLV_TVIDEO_DIP_GCP(pipe) \
+ _PIPE(pipe, VLV_VIDEO_DIP_GDCP_PAYLOAD_A, VLV_VIDEO_DIP_GDCP_PAYLOAD_B)
+
+/* Haswell DIP controls */
+#define HSW_VIDEO_DIP_CTL_A 0x60200
+#define HSW_VIDEO_DIP_AVI_DATA_A 0x60220
+#define HSW_VIDEO_DIP_VS_DATA_A 0x60260
+#define HSW_VIDEO_DIP_SPD_DATA_A 0x602A0
+#define HSW_VIDEO_DIP_GMP_DATA_A 0x602E0
+#define HSW_VIDEO_DIP_VSC_DATA_A 0x60320
+#define HSW_VIDEO_DIP_AVI_ECC_A 0x60240
+#define HSW_VIDEO_DIP_VS_ECC_A 0x60280
+#define HSW_VIDEO_DIP_SPD_ECC_A 0x602C0
+#define HSW_VIDEO_DIP_GMP_ECC_A 0x60300
+#define HSW_VIDEO_DIP_VSC_ECC_A 0x60344
+#define HSW_VIDEO_DIP_GCP_A 0x60210
+
+#define HSW_VIDEO_DIP_CTL_B 0x61200
+#define HSW_VIDEO_DIP_AVI_DATA_B 0x61220
+#define HSW_VIDEO_DIP_VS_DATA_B 0x61260
+#define HSW_VIDEO_DIP_SPD_DATA_B 0x612A0
+#define HSW_VIDEO_DIP_GMP_DATA_B 0x612E0
+#define HSW_VIDEO_DIP_VSC_DATA_B 0x61320
+#define HSW_VIDEO_DIP_BVI_ECC_B 0x61240
+#define HSW_VIDEO_DIP_VS_ECC_B 0x61280
+#define HSW_VIDEO_DIP_SPD_ECC_B 0x612C0
+#define HSW_VIDEO_DIP_GMP_ECC_B 0x61300
+#define HSW_VIDEO_DIP_VSC_ECC_B 0x61344
+#define HSW_VIDEO_DIP_GCP_B 0x61210
+
+#define HSW_TVIDEO_DIP_CTL(pipe) \
+ _PIPE(pipe, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B)
+#define HSW_TVIDEO_DIP_AVI_DATA(pipe) \
+ _PIPE(pipe, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B)
+#define HSW_TVIDEO_DIP_SPD_DATA(pipe) \
+ _PIPE(pipe, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B)
+#define HSW_TVIDEO_DIP_GCP(pipe) \
+ _PIPE(pipe, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B)
+
#define _TRANS_HTOTAL_B 0xe1000
#define _TRANS_HBLANK_B 0xe1004
#define _TRANS_HSYNC_B 0xe1008
@@ -3386,18 +3833,26 @@
#define TRANS_6BPC (2<<5)
#define TRANS_12BPC (3<<5)
+#define _TRANSA_CHICKEN1 0xf0060
+#define _TRANSB_CHICKEN1 0xf1060
+#define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
+#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4)
#define _TRANSA_CHICKEN2 0xf0064
#define _TRANSB_CHICKEN2 0xf1064
#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
-#define TRANS_AUTOTRAIN_GEN_STALL_DIS (1<<31)
+#define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31)
+
#define SOUTH_CHICKEN1 0xc2000
#define FDIA_PHASE_SYNC_SHIFT_OVR 19
#define FDIA_PHASE_SYNC_SHIFT_EN 18
-#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
-#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
+#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
+#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
+#define FDI_BC_BIFURCATION_SELECT (1 << 12)
#define SOUTH_CHICKEN2 0xc2004
-#define DPLS_EDP_PPS_FIX_DIS (1<<0)
+#define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13)
+#define FDI_MPHY_IOSFSB_RESET_CTL (1<<12)
+#define DPLS_EDP_PPS_FIX_DIS (1<<0)
#define _FDI_RXA_CHICKEN 0xc200c
#define _FDI_RXB_CHICKEN 0xc2010
@@ -3407,6 +3862,7 @@
#define SOUTH_DSPCLK_GATE_D 0xc2020
#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
+#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12)
/* CPU: FDI_TX */
#define _FDI_TXA_CTL 0x60100
@@ -3468,11 +3924,12 @@
#define FDI_FS_ERRC_ENABLE (1<<27)
#define FDI_FE_ERRC_ENABLE (1<<26)
#define FDI_DP_PORT_WIDTH_X8 (7<<19)
+#define FDI_RX_POLARITY_REVERSED_LPT (1<<16)
#define FDI_8BPC (0<<16)
#define FDI_10BPC (1<<16)
#define FDI_6BPC (2<<16)
#define FDI_12BPC (3<<16)
-#define FDI_LINK_REVERSE_OVERWRITE (1<<15)
+#define FDI_RX_LINK_REVERSAL_OVERRIDE (1<<15)
#define FDI_DMI_LINK_REVERSE_MASK (1<<14)
#define FDI_RX_PLL_ENABLE (1<<13)
#define FDI_FS_ERR_CORRECT_ENABLE (1<<11)
@@ -3488,14 +3945,25 @@
#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8)
#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8)
#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8)
+/* LPT */
+#define FDI_PORT_WIDTH_2X_LPT (1<<19)
+#define FDI_PORT_WIDTH_1X_LPT (0<<19)
+
+#define _FDI_RXA_MISC 0xf0010
+#define _FDI_RXB_MISC 0xf1010
+#define FDI_RX_PWRDN_LANE1_MASK (3<<26)
+#define FDI_RX_PWRDN_LANE1_VAL(x) ((x)<<26)
+#define FDI_RX_PWRDN_LANE0_MASK (3<<24)
+#define FDI_RX_PWRDN_LANE0_VAL(x) ((x)<<24)
+#define FDI_RX_TP1_TO_TP2_48 (2<<20)
+#define FDI_RX_TP1_TO_TP2_64 (3<<20)
+#define FDI_RX_FDI_DELAY_90 (0x90<<0)
+#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
-#define _FDI_RXA_MISC 0xf0010
-#define _FDI_RXB_MISC 0xf1010
#define _FDI_RXA_TUSIZE1 0xf0030
#define _FDI_RXA_TUSIZE2 0xf0038
#define _FDI_RXB_TUSIZE1 0xf1030
#define _FDI_RXB_TUSIZE2 0xf1038
-#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
@@ -3522,31 +3990,6 @@
#define FDI_PLL_CTL_1 0xfe000
#define FDI_PLL_CTL_2 0xfe004
-/* CRT */
-#define PCH_ADPA 0xe1100
-#define ADPA_TRANS_SELECT_MASK (1<<30)
-#define ADPA_TRANS_A_SELECT 0
-#define ADPA_TRANS_B_SELECT (1<<30)
-#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
-#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
-#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
-#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
-#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
-#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
-#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
-#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
-#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
-#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
-#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
-#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
-#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
-#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
-#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
-
/* or SDVOB */
#define HDMIB 0xe1140
#define PORT_ENABLE (1 << 31)
@@ -3578,20 +4021,18 @@
#define PCH_LVDS 0xe1180
#define LVDS_DETECTED (1 << 1)
-#define BLC_PWM_CPU_CTL2 0x48250
-#define PWM_ENABLE (1 << 31)
-#define PWM_PIPE_A (0 << 29)
-#define PWM_PIPE_B (1 << 29)
-#define BLC_PWM_CPU_CTL 0x48254
+/* vlv has 2 sets of panel control regs. */
+#define PIPEA_PP_STATUS 0x61200
+#define PIPEA_PP_CONTROL 0x61204
+#define PIPEA_PP_ON_DELAYS 0x61208
+#define PIPEA_PP_OFF_DELAYS 0x6120c
+#define PIPEA_PP_DIVISOR 0x61210
-#define BLC_PWM_PCH_CTL1 0xc8250
-#define PWM_PCH_ENABLE (1 << 31)
-#define PWM_POLARITY_ACTIVE_LOW (1 << 29)
-#define PWM_POLARITY_ACTIVE_HIGH (0 << 29)
-#define PWM_POLARITY_ACTIVE_LOW2 (1 << 28)
-#define PWM_POLARITY_ACTIVE_HIGH2 (0 << 28)
-
-#define BLC_PWM_PCH_CTL2 0xc8254
+#define PIPEB_PP_STATUS 0x61300
+#define PIPEB_PP_CONTROL 0x61304
+#define PIPEB_PP_ON_DELAYS 0x61308
+#define PIPEB_PP_OFF_DELAYS 0x6130c
+#define PIPEB_PP_DIVISOR 0x61310
#define PCH_PP_STATUS 0xc7200
#define PCH_PP_CONTROL 0xc7204
@@ -3615,6 +4056,11 @@
#define PANEL_LIGHT_ON_DELAY_SHIFT 0
#define PCH_PP_OFF_DELAYS 0xc720c
+#define PANEL_POWER_PORT_SELECT_MASK (0x3 << 30)
+#define PANEL_POWER_PORT_LVDS (0 << 30)
+#define PANEL_POWER_PORT_DP_A (1 << 30)
+#define PANEL_POWER_PORT_DP_C (2 << 30)
+#define PANEL_POWER_PORT_DP_D (3 << 30)
#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000)
#define PANEL_POWER_DOWN_DELAY_SHIFT 16
#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff)
@@ -3656,11 +4102,13 @@
#define PORT_TRANS_C_SEL_CPT (2<<29)
#define PORT_TRANS_SEL_MASK (3<<29)
#define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29)
+#define PORT_TO_PIPE(val) (((val) & (1<<30)) >> 30)
+#define PORT_TO_PIPE_CPT(val) (((val) & PORT_TRANS_SEL_MASK) >> 29)
#define TRANS_DP_CTL_A 0xe0300
#define TRANS_DP_CTL_B 0xe1300
#define TRANS_DP_CTL_C 0xe2300
-#define TRANS_DP_CTL(pipe) (TRANS_DP_CTL_A + (pipe) * 0x01000)
+#define TRANS_DP_CTL(pipe) _PIPE(pipe, TRANS_DP_CTL_A, TRANS_DP_CTL_B)
#define TRANS_DP_OUTPUT_ENABLE (1<<31)
#define TRANS_DP_PORT_SEL_B (0<<29)
#define TRANS_DP_PORT_SEL_C (1<<29)
@@ -3713,8 +4161,13 @@
#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
#define FORCEWAKE 0xA18C
+#define FORCEWAKE_VLV 0x1300b0
+#define FORCEWAKE_ACK_VLV 0x1300b4
+#define FORCEWAKE_ACK_HSW 0x130044
#define FORCEWAKE_ACK 0x130090
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
+#define FORCEWAKE_KERNEL 0x1
+#define FORCEWAKE_USER 0x2
#define FORCEWAKE_MT_ACK 0x130040
#define ECOBUS 0xa180
#define FORCEWAKE_MT_ENABLE (1<<5)
@@ -3730,12 +4183,18 @@
#define GEN6_UCGCTL1 0x9400
# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
+# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
#define GEN6_UCGCTL2 0x9404
+# define GEN7_VDSUNIT_CLOCK_GATE_DISABLE (1 << 30)
+# define GEN7_TDLUNIT_CLOCK_GATE_DISABLE (1 << 22)
# define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13)
# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12)
# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
+#define GEN7_UCGCTL4 0x940c
+#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25)
+
#define GEN6_RPNSWREQ 0xA008
#define GEN6_TURBO_DISABLE (1<<31)
#define GEN6_FREQUENCY(x) ((x)<<25)
@@ -3754,7 +4213,9 @@
#define GEN6_RP_INTERRUPT_LIMITS 0xA014
#define GEN6_RPSTAT1 0xA01C
#define GEN6_CAGF_SHIFT 8
+#define HSW_CAGF_SHIFT 7
#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
+#define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT)
#define GEN6_RP_CONTROL 0xA024
#define GEN6_RP_MEDIA_TURBO (1<<11)
#define GEN6_RP_MEDIA_MODE_MASK (3<<9)
@@ -3767,6 +4228,7 @@
#define GEN6_RP_UP_IDLE_MIN (0x1<<3)
#define GEN6_RP_UP_BUSY_AVG (0x2<<3)
#define GEN6_RP_UP_BUSY_CONT (0x4<<3)
+#define GEN7_RP_DOWN_IDLE_AVG (0x2<<0)
#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0)
#define GEN6_RP_UP_THRESHOLD 0xA02C
#define GEN6_RP_DOWN_THRESHOLD 0xA030
@@ -3810,11 +4272,20 @@
GEN6_PM_RP_DOWN_THRESHOLD | \
GEN6_PM_RP_DOWN_TIMEOUT)
+#define GEN6_GT_GFX_RC6_LOCKED 0x138104
+#define GEN6_GT_GFX_RC6 0x138108
+#define GEN6_GT_GFX_RC6p 0x13810C
+#define GEN6_GT_GFX_RC6pp 0x138110
+
#define GEN6_PCODE_MAILBOX 0x138124
#define GEN6_PCODE_READY (1<<31)
#define GEN6_READ_OC_PARAMS 0xc
#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
+#define GEN6_PCODE_WRITE_RC6VIDS 0x4
+#define GEN6_PCODE_READ_RC6VIDS 0x5
+#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
+#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
#define GEN6_PCODE_DATA 0x138128
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
@@ -3826,6 +4297,35 @@
#define GEN6_RC6 3
#define GEN6_RC7 4
+#define GEN7_MISCCPCTL (0x9424)
+#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0)
+
+/* IVYBRIDGE DPF */
+#define GEN7_L3CDERRST1 0xB008 /* L3CD Error Status 1 */
+#define GEN7_L3CDERRST1_ROW_MASK (0x7ff<<14)
+#define GEN7_PARITY_ERROR_VALID (1<<13)
+#define GEN7_L3CDERRST1_BANK_MASK (3<<11)
+#define GEN7_L3CDERRST1_SUBBANK_MASK (7<<8)
+#define GEN7_PARITY_ERROR_ROW(reg) \
+ ((reg & GEN7_L3CDERRST1_ROW_MASK) >> 14)
+#define GEN7_PARITY_ERROR_BANK(reg) \
+ ((reg & GEN7_L3CDERRST1_BANK_MASK) >> 11)
+#define GEN7_PARITY_ERROR_SUBBANK(reg) \
+ ((reg & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8)
+#define GEN7_L3CDERRST1_ENABLE (1<<7)
+
+#define GEN7_L3LOG_BASE 0xB070
+#define GEN7_L3LOG_SIZE 0x80
+
+#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */
+#define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100
+#define GEN7_MAX_PS_THREAD_DEP (8<<12)
+#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
+
+#define GEN7_ROW_CHICKEN2 0xe4f4
+#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
+#define DOP_CLOCK_GATING_DISABLE (1<<0)
+
#define G4X_AUD_VID_DID 0x62020
#define INTEL_AUDIO_DEVCL 0x808629FB
#define INTEL_AUDIO_DEVBLC 0x80862801
@@ -3839,7 +4339,15 @@
#define G4X_HDMIW_HDMIEDID 0x6210C
#define IBX_HDMIW_HDMIEDID_A 0xE2050
+#define IBX_HDMIW_HDMIEDID_B 0xE2150
+#define IBX_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
+ IBX_HDMIW_HDMIEDID_A, \
+ IBX_HDMIW_HDMIEDID_B)
#define IBX_AUD_CNTL_ST_A 0xE20B4
+#define IBX_AUD_CNTL_ST_B 0xE21B4
+#define IBX_AUD_CNTL_ST(pipe) _PIPE(pipe, \
+ IBX_AUD_CNTL_ST_A, \
+ IBX_AUD_CNTL_ST_B)
#define IBX_ELD_BUFFER_SIZE (0x1f << 10)
#define IBX_ELD_ADDRESS (0x1f << 5)
#define IBX_ELD_ACK (1 << 4)
@@ -3848,7 +4356,15 @@
#define IBX_CP_READYB (1 << 1)
#define CPT_HDMIW_HDMIEDID_A 0xE5050
+#define CPT_HDMIW_HDMIEDID_B 0xE5150
+#define CPT_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
+ CPT_HDMIW_HDMIEDID_A, \
+ CPT_HDMIW_HDMIEDID_B)
#define CPT_AUD_CNTL_ST_A 0xE50B4
+#define CPT_AUD_CNTL_ST_B 0xE51B4
+#define CPT_AUD_CNTL_ST(pipe) _PIPE(pipe, \
+ CPT_AUD_CNTL_ST_A, \
+ CPT_AUD_CNTL_ST_B)
#define CPT_AUD_CNTRL_ST2 0xE50C0
/* These are the 4 32-bit write offset registers for each stream
@@ -3858,7 +4374,15 @@
#define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4)
#define IBX_AUD_CONFIG_A 0xe2000
+#define IBX_AUD_CONFIG_B 0xe2100
+#define IBX_AUD_CFG(pipe) _PIPE(pipe, \
+ IBX_AUD_CONFIG_A, \
+ IBX_AUD_CONFIG_B)
#define CPT_AUD_CONFIG_A 0xe5000
+#define CPT_AUD_CONFIG_B 0xe5100
+#define CPT_AUD_CFG(pipe) _PIPE(pipe, \
+ CPT_AUD_CONFIG_A, \
+ CPT_AUD_CONFIG_B)
#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
#define AUD_CONFIG_UPPER_N_SHIFT 20
@@ -3869,4 +4393,273 @@
#define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16)
#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
+/* HSW Audio */
+#define HSW_AUD_CONFIG_A 0x65000 /* Audio Configuration Transcoder A */
+#define HSW_AUD_CONFIG_B 0x65100 /* Audio Configuration Transcoder B */
+#define HSW_AUD_CFG(pipe) _PIPE(pipe, \
+ HSW_AUD_CONFIG_A, \
+ HSW_AUD_CONFIG_B)
+
+#define HSW_AUD_MISC_CTRL_A 0x65010 /* Audio Misc Control Convert 1 */
+#define HSW_AUD_MISC_CTRL_B 0x65110 /* Audio Misc Control Convert 2 */
+#define HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \
+ HSW_AUD_MISC_CTRL_A, \
+ HSW_AUD_MISC_CTRL_B)
+
+#define HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4 /* Audio DIP and ELD Control State Transcoder A */
+#define HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4 /* Audio DIP and ELD Control State Transcoder B */
+#define HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \
+ HSW_AUD_DIP_ELD_CTRL_ST_A, \
+ HSW_AUD_DIP_ELD_CTRL_ST_B)
+
+/* Audio Digital Converter */
+#define HSW_AUD_DIG_CNVT_1 0x65080 /* Audio Converter 1 */
+#define HSW_AUD_DIG_CNVT_2 0x65180 /* Audio Converter 1 */
+#define AUD_DIG_CNVT(pipe) _PIPE(pipe, \
+ HSW_AUD_DIG_CNVT_1, \
+ HSW_AUD_DIG_CNVT_2)
+#define DIP_PORT_SEL_MASK 0x3
+
+#define HSW_AUD_EDID_DATA_A 0x65050
+#define HSW_AUD_EDID_DATA_B 0x65150
+#define HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \
+ HSW_AUD_EDID_DATA_A, \
+ HSW_AUD_EDID_DATA_B)
+
+#define HSW_AUD_PIPE_CONV_CFG 0x6507c /* Audio pipe and converter configs */
+#define HSW_AUD_PIN_ELD_CP_VLD 0x650c0 /* Audio ELD and CP Ready Status */
+#define AUDIO_INACTIVE_C (1<<11)
+#define AUDIO_INACTIVE_B (1<<7)
+#define AUDIO_INACTIVE_A (1<<3)
+#define AUDIO_OUTPUT_ENABLE_A (1<<2)
+#define AUDIO_OUTPUT_ENABLE_B (1<<6)
+#define AUDIO_OUTPUT_ENABLE_C (1<<10)
+#define AUDIO_ELD_VALID_A (1<<0)
+#define AUDIO_ELD_VALID_B (1<<4)
+#define AUDIO_ELD_VALID_C (1<<8)
+#define AUDIO_CP_READY_A (1<<1)
+#define AUDIO_CP_READY_B (1<<5)
+#define AUDIO_CP_READY_C (1<<9)
+
+/* HSW Power Wells */
+#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */
+#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */
+#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */
+#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */
+#define HSW_PWR_WELL_ENABLE (1<<31)
+#define HSW_PWR_WELL_STATE (1<<30)
+#define HSW_PWR_WELL_CTL5 0x45410
+#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31)
+#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20)
+#define HSW_PWR_WELL_FORCE_ON (1<<19)
+#define HSW_PWR_WELL_CTL6 0x45414
+
+/* Per-pipe DDI Function Control */
+#define TRANS_DDI_FUNC_CTL_A 0x60400
+#define TRANS_DDI_FUNC_CTL_B 0x61400
+#define TRANS_DDI_FUNC_CTL_C 0x62400
+#define TRANS_DDI_FUNC_CTL_EDP 0x6F400
+#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER(tran, TRANS_DDI_FUNC_CTL_A, \
+ TRANS_DDI_FUNC_CTL_B)
+#define TRANS_DDI_FUNC_ENABLE (1<<31)
+/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
+#define TRANS_DDI_PORT_MASK (7<<28)
+#define TRANS_DDI_SELECT_PORT(x) ((x)<<28)
+#define TRANS_DDI_PORT_NONE (0<<28)
+#define TRANS_DDI_MODE_SELECT_MASK (7<<24)
+#define TRANS_DDI_MODE_SELECT_HDMI (0<<24)
+#define TRANS_DDI_MODE_SELECT_DVI (1<<24)
+#define TRANS_DDI_MODE_SELECT_DP_SST (2<<24)
+#define TRANS_DDI_MODE_SELECT_DP_MST (3<<24)
+#define TRANS_DDI_MODE_SELECT_FDI (4<<24)
+#define TRANS_DDI_BPC_MASK (7<<20)
+#define TRANS_DDI_BPC_8 (0<<20)
+#define TRANS_DDI_BPC_10 (1<<20)
+#define TRANS_DDI_BPC_6 (2<<20)
+#define TRANS_DDI_BPC_12 (3<<20)
+#define TRANS_DDI_PVSYNC (1<<17)
+#define TRANS_DDI_PHSYNC (1<<16)
+#define TRANS_DDI_EDP_INPUT_MASK (7<<12)
+#define TRANS_DDI_EDP_INPUT_A_ON (0<<12)
+#define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12)
+#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12)
+#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12)
+#define TRANS_DDI_BFI_ENABLE (1<<4)
+#define TRANS_DDI_PORT_WIDTH_X1 (0<<1)
+#define TRANS_DDI_PORT_WIDTH_X2 (1<<1)
+#define TRANS_DDI_PORT_WIDTH_X4 (3<<1)
+
+/* DisplayPort Transport Control */
+#define DP_TP_CTL_A 0x64040
+#define DP_TP_CTL_B 0x64140
+#define DP_TP_CTL(port) _PORT(port, DP_TP_CTL_A, DP_TP_CTL_B)
+#define DP_TP_CTL_ENABLE (1<<31)
+#define DP_TP_CTL_MODE_SST (0<<27)
+#define DP_TP_CTL_MODE_MST (1<<27)
+#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18)
+#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15)
+#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
+#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8)
+#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8)
+#define DP_TP_CTL_LINK_TRAIN_PAT3 (4<<8)
+#define DP_TP_CTL_LINK_TRAIN_IDLE (2<<8)
+#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
+#define DP_TP_CTL_SCRAMBLE_DISABLE (1<<7)
+
+/* DisplayPort Transport Status */
+#define DP_TP_STATUS_A 0x64044
+#define DP_TP_STATUS_B 0x64144
+#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
+#define DP_TP_STATUS_IDLE_DONE (1<<25)
+#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
+
+/* DDI Buffer Control */
+#define DDI_BUF_CTL_A 0x64000
+#define DDI_BUF_CTL_B 0x64100
+#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
+#define DDI_BUF_CTL_ENABLE (1<<31)
+#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */
+#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
+#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */
+#define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */
+#define DDI_BUF_EMP_600MV_0DB_HSW (4<<24) /* Sel4 */
+#define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */
+#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */
+#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
+#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
+#define DDI_BUF_EMP_MASK (0xf<<24)
+#define DDI_BUF_PORT_REVERSAL (1<<16)
+#define DDI_BUF_IS_IDLE (1<<7)
+#define DDI_A_4_LANES (1<<4)
+#define DDI_PORT_WIDTH_X1 (0<<1)
+#define DDI_PORT_WIDTH_X2 (1<<1)
+#define DDI_PORT_WIDTH_X4 (3<<1)
+#define DDI_INIT_DISPLAY_DETECTED (1<<0)
+
+/* DDI Buffer Translations */
+#define DDI_BUF_TRANS_A 0x64E00
+#define DDI_BUF_TRANS_B 0x64E60
+#define DDI_BUF_TRANS(port) _PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B)
+
+/* Sideband Interface (SBI) is programmed indirectly, via
+ * SBI_ADDR, which contains the register offset; and SBI_DATA,
+ * which contains the payload */
+#define SBI_ADDR 0xC6000
+#define SBI_DATA 0xC6004
+#define SBI_CTL_STAT 0xC6008
+#define SBI_CTL_DEST_ICLK (0x0<<16)
+#define SBI_CTL_DEST_MPHY (0x1<<16)
+#define SBI_CTL_OP_IORD (0x2<<8)
+#define SBI_CTL_OP_IOWR (0x3<<8)
+#define SBI_CTL_OP_CRRD (0x6<<8)
+#define SBI_CTL_OP_CRWR (0x7<<8)
+#define SBI_RESPONSE_FAIL (0x1<<1)
+#define SBI_RESPONSE_SUCCESS (0x0<<1)
+#define SBI_BUSY (0x1<<0)
+#define SBI_READY (0x0<<0)
+
+/* SBI offsets */
+#define SBI_SSCDIVINTPHASE6 0x0600
+#define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1)
+#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1)
+#define SBI_SSCDIVINTPHASE_INCVAL_MASK ((0x7f)<<8)
+#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8)
+#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15)
+#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
+#define SBI_SSCCTL 0x020c
+#define SBI_SSCCTL6 0x060C
+#define SBI_SSCCTL_PATHALT (1<<3)
+#define SBI_SSCCTL_DISABLE (1<<0)
+#define SBI_SSCAUXDIV6 0x0610
+#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
+#define SBI_DBUFF0 0x2a00
+#define SBI_DBUFF0_ENABLE (1<<0)
+
+/* LPT PIXCLK_GATE */
+#define PIXCLK_GATE 0xC6020
+#define PIXCLK_GATE_UNGATE (1<<0)
+#define PIXCLK_GATE_GATE (0<<0)
+
+/* SPLL */
+#define SPLL_CTL 0x46020
+#define SPLL_PLL_ENABLE (1<<31)
+#define SPLL_PLL_SSC (1<<28)
+#define SPLL_PLL_NON_SSC (2<<28)
+#define SPLL_PLL_FREQ_810MHz (0<<26)
+#define SPLL_PLL_FREQ_1350MHz (1<<26)
+
+/* WRPLL */
+#define WRPLL_CTL1 0x46040
+#define WRPLL_CTL2 0x46060
+#define WRPLL_PLL_ENABLE (1<<31)
+#define WRPLL_PLL_SELECT_SSC (0x01<<28)
+#define WRPLL_PLL_SELECT_NON_SSC (0x02<<28)
+#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
+/* WRPLL divider programming */
+#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
+#define WRPLL_DIVIDER_POST(x) ((x)<<8)
+#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16)
+
+/* Port clock selection */
+#define PORT_CLK_SEL_A 0x46100
+#define PORT_CLK_SEL_B 0x46104
+#define PORT_CLK_SEL(port) _PORT(port, PORT_CLK_SEL_A, PORT_CLK_SEL_B)
+#define PORT_CLK_SEL_LCPLL_2700 (0<<29)
+#define PORT_CLK_SEL_LCPLL_1350 (1<<29)
+#define PORT_CLK_SEL_LCPLL_810 (2<<29)
+#define PORT_CLK_SEL_SPLL (3<<29)
+#define PORT_CLK_SEL_WRPLL1 (4<<29)
+#define PORT_CLK_SEL_WRPLL2 (5<<29)
+#define PORT_CLK_SEL_NONE (7<<29)
+
+/* Transcoder clock selection */
+#define TRANS_CLK_SEL_A 0x46140
+#define TRANS_CLK_SEL_B 0x46144
+#define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B)
+/* For each transcoder, we need to select the corresponding port clock */
+#define TRANS_CLK_SEL_DISABLED (0x0<<29)
+#define TRANS_CLK_SEL_PORT(x) ((x+1)<<29)
+
+#define _TRANSA_MSA_MISC 0x60410
+#define _TRANSB_MSA_MISC 0x61410
+#define TRANS_MSA_MISC(tran) _TRANSCODER(tran, _TRANSA_MSA_MISC, \
+ _TRANSB_MSA_MISC)
+#define TRANS_MSA_SYNC_CLK (1<<0)
+#define TRANS_MSA_6_BPC (0<<5)
+#define TRANS_MSA_8_BPC (1<<5)
+#define TRANS_MSA_10_BPC (2<<5)
+#define TRANS_MSA_12_BPC (3<<5)
+#define TRANS_MSA_16_BPC (4<<5)
+
+/* LCPLL Control */
+#define LCPLL_CTL 0x130040
+#define LCPLL_PLL_DISABLE (1<<31)
+#define LCPLL_PLL_LOCK (1<<30)
+#define LCPLL_CLK_FREQ_MASK (3<<26)
+#define LCPLL_CLK_FREQ_450 (0<<26)
+#define LCPLL_CD_CLOCK_DISABLE (1<<25)
+#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
+#define LCPLL_CD_SOURCE_FCLK (1<<21)
+
+/* Pipe WM_LINETIME - watermark line time */
+#define PIPE_WM_LINETIME_A 0x45270
+#define PIPE_WM_LINETIME_B 0x45274
+#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, PIPE_WM_LINETIME_A, \
+ PIPE_WM_LINETIME_B)
+#define PIPE_WM_LINETIME_MASK (0x1ff)
+#define PIPE_WM_LINETIME_TIME(x) ((x))
+#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16)
+#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16)
+
+/* SFUSE_STRAP */
+#define SFUSE_STRAP 0xc2014
+#define SFUSE_STRAP_DDIB_DETECTED (1<<2)
+#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
+#define SFUSE_STRAP_DDID_DETECTED (1<<0)
+
+#define WM_DBG 0x45280
+#define WM_DBG_DISALLOW_MULTIPLE_LP (1<<0)
+#define WM_DBG_DISALLOW_MAXFIFO (1<<1)
+#define WM_DBG_DISALLOW_SPRITE (1<<2)
+
#endif /* _I915_REG_H_ */
diff --git a/sys/dev/pci/drm/i915/i915_suspend.c b/sys/dev/pci/drm/i915/i915_suspend.c
new file mode 100644
index 00000000000..6ac4ea0e397
--- /dev/null
+++ b/sys/dev/pci/drm/i915/i915_suspend.c
@@ -0,0 +1,939 @@
+/* $OpenBSD: i915_suspend.c,v 1.1 2013/03/18 12:36:52 jsg Exp $ */
+/*
+ *
+ * Copyright 2008 (c) Intel Corporation
+ * Jesse Barnes <jbarnes@virtuousgeek.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dev/pci/drm/drmP.h>
+#include <dev/pci/drm/drm.h>
+#include <dev/pci/drm/i915_drm.h>
+#include "intel_drv.h"
+#include "i915_reg.h"
+
+bool i915_pipe_enabled(struct drm_device *, enum pipe);
+void i915_save_palette(struct drm_device *, enum pipe);
+void i915_restore_palette(struct drm_device *, enum pipe);
+u_int8_t i915_read_indexed(struct drm_device *, u_int16_t,
+ u_int16_t, u_int8_t);
+void i915_write_indexed(struct drm_device *, u_int16_t,
+ u_int16_t, u_int8_t, u_int8_t);
+void i915_write_ar(struct drm_device *, u_int16_t, u_int8_t,
+ u_int8_t, u_int16_t);
+u_int8_t i915_read_ar(struct drm_device *, u_int16_t,
+ u_int8_t, u_int16_t);
+void i915_save_vga(struct drm_device *);
+void i915_restore_vga(struct drm_device *);
+void i915_save_modeset_reg(struct drm_device *);
+void i915_restore_modeset_reg(struct drm_device *);
+
+bool
+i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 dpll_reg;
+
+ /* On IVB, 3rd pipe shares PLL with another one */
+ if (pipe > 1)
+ return false;
+
+ if (HAS_PCH_SPLIT(dev))
+ dpll_reg = _PCH_DPLL(pipe);
+ else
+ dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
+
+ return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE);
+}
+
+void
+i915_save_palette(struct drm_device *dev, enum pipe pipe)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
+ u32 *array;
+ int i;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return;
+
+ if (HAS_PCH_SPLIT(dev))
+ reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
+
+ if (pipe == PIPE_A)
+ array = dev_priv->regfile.save_palette_a;
+ else
+ array = dev_priv->regfile.save_palette_b;
+
+ for (i = 0; i < 256; i++)
+ array[i] = I915_READ(reg + (i << 2));
+}
+
+void
+i915_restore_palette(struct drm_device *dev, enum pipe pipe)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
+ u32 *array;
+ int i;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return;
+
+ if (HAS_PCH_SPLIT(dev))
+ reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
+
+ if (pipe == PIPE_A)
+ array = dev_priv->regfile.save_palette_a;
+ else
+ array = dev_priv->regfile.save_palette_b;
+
+ for (i = 0; i < 256; i++)
+ I915_WRITE(reg + (i << 2), array[i]);
+}
+
+u_int8_t
+i915_read_indexed(struct drm_device *dev, u_int16_t index_port,
+ u_int16_t data_port, u_int8_t reg)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ I915_WRITE8(index_port, reg);
+ return I915_READ8(data_port);
+}
+
+u_int8_t
+i915_read_ar(struct drm_device *dev, u_int16_t st01,
+ u_int8_t reg, u_int16_t palette_enable)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ I915_READ8(st01);
+ I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
+ return I915_READ8(VGA_AR_DATA_READ);
+}
+
+void
+i915_write_ar(struct drm_device *dev, u_int16_t st01, u_int8_t reg,
+ u_int8_t val, u_int16_t palette_enable)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ I915_READ8(st01);
+ I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
+ I915_WRITE8(VGA_AR_DATA_WRITE, val);
+}
+
+void
+i915_write_indexed(struct drm_device *dev, u_int16_t index_port,
+ u_int16_t data_port, u_int8_t reg, u_int8_t val)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ I915_WRITE8(index_port, reg);
+ I915_WRITE8(data_port, val);
+}
+
+void
+i915_save_vga(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
+ u16 cr_index, cr_data, st01;
+
+ /* VGA color palette registers */
+ dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK);
+
+ /* MSR bits */
+ dev_priv->regfile.saveMSR = I915_READ8(VGA_MSR_READ);
+ if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
+ cr_index = VGA_CR_INDEX_CGA;
+ cr_data = VGA_CR_DATA_CGA;
+ st01 = VGA_ST01_CGA;
+ } else {
+ cr_index = VGA_CR_INDEX_MDA;
+ cr_data = VGA_CR_DATA_MDA;
+ st01 = VGA_ST01_MDA;
+ }
+
+ /* CRT controller regs */
+ i915_write_indexed(dev, cr_index, cr_data, 0x11,
+ i915_read_indexed(dev, cr_index, cr_data, 0x11) &
+ (~0x80));
+ for (i = 0; i <= 0x24; i++)
+ dev_priv->regfile.saveCR[i] =
+ i915_read_indexed(dev, cr_index, cr_data, i);
+ /* Make sure we don't turn off CR group 0 writes */
+ dev_priv->regfile.saveCR[0x11] &= ~0x80;
+
+ /* Attribute controller registers */
+ I915_READ8(st01);
+ dev_priv->regfile.saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
+ for (i = 0; i <= 0x14; i++)
+ dev_priv->regfile.saveAR[i] = i915_read_ar(dev, st01, i, 0);
+ I915_READ8(st01);
+ I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX);
+ I915_READ8(st01);
+
+ /* Graphics controller registers */
+ for (i = 0; i < 9; i++)
+ dev_priv->regfile.saveGR[i] =
+ i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
+
+ dev_priv->regfile.saveGR[0x10] =
+ i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
+ dev_priv->regfile.saveGR[0x11] =
+ i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
+ dev_priv->regfile.saveGR[0x18] =
+ i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
+
+ /* Sequencer registers */
+ for (i = 0; i < 8; i++)
+ dev_priv->regfile.saveSR[i] =
+ i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
+}
+
+void
+i915_restore_vga(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
+ u16 cr_index, cr_data, st01;
+
+ /* MSR bits */
+ I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR);
+ if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
+ cr_index = VGA_CR_INDEX_CGA;
+ cr_data = VGA_CR_DATA_CGA;
+ st01 = VGA_ST01_CGA;
+ } else {
+ cr_index = VGA_CR_INDEX_MDA;
+ cr_data = VGA_CR_DATA_MDA;
+ st01 = VGA_ST01_MDA;
+ }
+
+ /* Sequencer registers, don't write SR07 */
+ for (i = 0; i < 7; i++)
+ i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
+ dev_priv->regfile.saveSR[i]);
+
+ /* CRT controller regs */
+ /* Enable CR group 0 writes */
+ i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->regfile.saveCR[0x11]);
+ for (i = 0; i <= 0x24; i++)
+ i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->regfile.saveCR[i]);
+
+ /* Graphics controller regs */
+ for (i = 0; i < 9; i++)
+ i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
+ dev_priv->regfile.saveGR[i]);
+
+ i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
+ dev_priv->regfile.saveGR[0x10]);
+ i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
+ dev_priv->regfile.saveGR[0x11]);
+ i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
+ dev_priv->regfile.saveGR[0x18]);
+
+ /* Attribute controller registers */
+ I915_READ8(st01); /* switch back to index mode */
+ for (i = 0; i <= 0x14; i++)
+ i915_write_ar(dev, st01, i, dev_priv->regfile.saveAR[i], 0);
+ I915_READ8(st01); /* switch back to index mode */
+ I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX | 0x20);
+ I915_READ8(st01);
+
+ /* VGA color palette registers */
+ I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK);
+}
+
+void
+i915_save_modeset_reg(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ /* Cursor state */
+ dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR);
+ dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS);
+ dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE);
+ dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR);
+ dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS);
+ dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE);
+ if (IS_GEN2(dev))
+ dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
+ dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
+ }
+
+ /* Pipe & plane A info */
+ dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF);
+ dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC);
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0);
+ dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1);
+ dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A);
+ } else {
+ dev_priv->regfile.saveFPA0 = I915_READ(_FPA0);
+ dev_priv->regfile.saveFPA1 = I915_READ(_FPA1);
+ dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A);
+ }
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
+ dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A);
+ dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A);
+ dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A);
+ dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A);
+ dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A);
+ dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A);
+ if (!HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
+ dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
+ dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
+ dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
+
+ dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
+ dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
+
+ dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
+ dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
+ dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
+
+ dev_priv->regfile.saveTRANSACONF = I915_READ(_TRANSACONF);
+ dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
+ dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
+ dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
+ dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
+ dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
+ dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
+ }
+
+ dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR);
+ dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
+ dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE);
+ dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS);
+ dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF);
+ dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
+ }
+ i915_save_palette(dev, PIPE_A);
+ dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT);
+
+ /* Pipe & plane B info */
+ dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF);
+ dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC);
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0);
+ dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1);
+ dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B);
+ } else {
+ dev_priv->regfile.saveFPB0 = I915_READ(_FPB0);
+ dev_priv->regfile.saveFPB1 = I915_READ(_FPB1);
+ dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B);
+ }
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
+ dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B);
+ dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B);
+ dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B);
+ dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B);
+ dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B);
+ dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B);
+ if (!HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
+ dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
+ dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
+ dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
+
+ dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
+ dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
+
+ dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
+ dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
+ dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
+
+ dev_priv->regfile.saveTRANSBCONF = I915_READ(_TRANSBCONF);
+ dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
+ dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
+ dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
+ dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
+ dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
+ dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
+ }
+
+ dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR);
+ dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
+ dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE);
+ dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS);
+ dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF);
+ dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
+ }
+ i915_save_palette(dev, PIPE_B);
+ dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT);
+
+ /* Fences */
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6:
+ for (i = 0; i < 16; i++)
+ dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+ break;
+ case 5:
+ case 4:
+ for (i = 0; i < 16; i++)
+ dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+ break;
+ case 3:
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+ case 2:
+ for (i = 0; i < 8; i++)
+ dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+ break;
+ }
+
+ /* CRT state */
+ if (HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA);
+ else
+ dev_priv->regfile.saveADPA = I915_READ(ADPA);
+
+ return;
+}
+
+void
+i915_restore_modeset_reg(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int dpll_a_reg, fpa0_reg, fpa1_reg;
+ int dpll_b_reg, fpb0_reg, fpb1_reg;
+ int i;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ /* Fences */
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6:
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
+ break;
+ case 5:
+ case 4:
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
+ break;
+ case 3:
+ case 2:
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]);
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]);
+ break;
+ }
+
+
+ if (HAS_PCH_SPLIT(dev)) {
+ dpll_a_reg = _PCH_DPLL_A;
+ dpll_b_reg = _PCH_DPLL_B;
+ fpa0_reg = _PCH_FPA0;
+ fpb0_reg = _PCH_FPB0;
+ fpa1_reg = _PCH_FPA1;
+ fpb1_reg = _PCH_FPB1;
+ } else {
+ dpll_a_reg = _DPLL_A;
+ dpll_b_reg = _DPLL_B;
+ fpa0_reg = _FPA0;
+ fpb0_reg = _FPB0;
+ fpa1_reg = _FPA1;
+ fpb1_reg = _FPB1;
+ }
+
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL);
+ I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL);
+ }
+
+ /* Pipe & plane A info */
+ /* Prime the clock */
+ if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) {
+ I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A &
+ ~DPLL_VCO_ENABLE);
+ POSTING_READ(dpll_a_reg);
+ DRM_UDELAY(150);
+ }
+ I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0);
+ I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1);
+ /* Actually enable it */
+ I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A);
+ POSTING_READ(dpll_a_reg);
+ DRM_UDELAY(150);
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD);
+ POSTING_READ(_DPLL_A_MD);
+ }
+ DRM_UDELAY(150);
+
+ /* Restore mode */
+ I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A);
+ I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A);
+ I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A);
+ I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A);
+ I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A);
+ I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A);
+ if (!HAS_PCH_SPLIT(dev))
+ I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1);
+ I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1);
+ I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1);
+ I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1);
+
+ I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL);
+ I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL);
+
+ I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1);
+ I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ);
+ I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS);
+
+ I915_WRITE(_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
+ I915_WRITE(_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
+ I915_WRITE(_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
+ I915_WRITE(_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
+ I915_WRITE(_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
+ I915_WRITE(_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
+ I915_WRITE(_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
+ }
+
+ /* Restore plane info */
+ I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE);
+ I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS);
+ I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC);
+ I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR);
+ I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF);
+ I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF);
+ }
+
+ I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF);
+
+ i915_restore_palette(dev, PIPE_A);
+ /* Enable the plane */
+ I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR);
+ I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
+
+ /* Pipe & plane B info */
+ if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) {
+ I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B &
+ ~DPLL_VCO_ENABLE);
+ POSTING_READ(dpll_b_reg);
+ DRM_UDELAY(150);
+ }
+ I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0);
+ I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1);
+ /* Actually enable it */
+ I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B);
+ POSTING_READ(dpll_b_reg);
+ DRM_UDELAY(150);
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD);
+ POSTING_READ(_DPLL_B_MD);
+ }
+ DRM_UDELAY(150);
+
+ /* Restore mode */
+ I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B);
+ I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B);
+ I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B);
+ I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B);
+ I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B);
+ I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B);
+ if (!HAS_PCH_SPLIT(dev))
+ I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1);
+ I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1);
+ I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1);
+ I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1);
+
+ I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL);
+ I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL);
+
+ I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1);
+ I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ);
+ I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS);
+
+ I915_WRITE(_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
+ I915_WRITE(_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
+ I915_WRITE(_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
+ I915_WRITE(_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
+ I915_WRITE(_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
+ I915_WRITE(_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
+ I915_WRITE(_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
+ }
+
+ /* Restore plane info */
+ I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE);
+ I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS);
+ I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC);
+ I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR);
+ I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF);
+ I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF);
+ }
+
+ I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF);
+
+ i915_restore_palette(dev, PIPE_B);
+ /* Enable the plane */
+ I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR);
+ I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
+
+ /* Cursor state */
+ I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS);
+ I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR);
+ I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE);
+ I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS);
+ I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR);
+ I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE);
+ if (IS_GEN2(dev))
+ I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE);
+
+ /* CRT state */
+ if (HAS_PCH_SPLIT(dev))
+ I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA);
+ else
+ I915_WRITE(ADPA, dev_priv->regfile.saveADPA);
+
+ return;
+}
+
+void
+i915_save_display(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ /* Display arbitration control */
+ dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
+
+ /* This is only meaningful in non-KMS mode */
+ /* Don't save them in KMS mode */
+ i915_save_modeset_reg(dev);
+
+ /* LVDS state */
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
+ dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
+ dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
+ dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
+ dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
+ dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
+ } else {
+ dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
+ dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
+ dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+ dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
+ if (INTEL_INFO(dev)->gen >= 4)
+ dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+ if (IS_MOBILE(dev) && !IS_I830(dev))
+ dev_priv->regfile.saveLVDS = I915_READ(LVDS);
+ }
+
+ if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
+ dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
+ dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
+ } else {
+ dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
+ dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
+ dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
+ }
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Display Port state */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ dev_priv->regfile.saveDP_B = I915_READ(DP_B);
+ dev_priv->regfile.saveDP_C = I915_READ(DP_C);
+ dev_priv->regfile.saveDP_D = I915_READ(DP_D);
+ dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
+ dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
+ dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
+ dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
+ dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
+ dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
+ dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
+ dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
+ }
+ /* FIXME: save TV & SDVO state */
+ }
+
+ /* Only save FBC state on the platform that supports FBC */
+ if (I915_HAS_FBC(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
+ } else if (IS_GM45(dev)) {
+ dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
+ } else {
+ dev_priv->regfile.saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
+ dev_priv->regfile.saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
+ dev_priv->regfile.saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
+ dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+ }
+ }
+
+ /* VGA state */
+ dev_priv->regfile.saveVGA0 = I915_READ(VGA0);
+ dev_priv->regfile.saveVGA1 = I915_READ(VGA1);
+ dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD);
+ if (HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveVGACNTRL = I915_READ(CPU_VGACNTRL);
+ else
+ dev_priv->regfile.saveVGACNTRL = I915_READ(VGACNTRL);
+
+ i915_save_vga(dev);
+}
+
+void
+i915_restore_display(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ /* Display arbitration */
+ I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Display port ratios (must be done before clock is set) */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
+ I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
+ I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
+ I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
+ I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->regfile.savePIPEA_DP_LINK_M);
+ I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->regfile.savePIPEB_DP_LINK_M);
+ I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->regfile.savePIPEA_DP_LINK_N);
+ I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->regfile.savePIPEB_DP_LINK_N);
+ }
+ }
+
+ /* This is only meaningful in non-KMS mode */
+ /* Don't restore them in KMS mode */
+ i915_restore_modeset_reg(dev);
+
+ /* LVDS state */
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
+ I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS);
+ } else if (IS_MOBILE(dev) && !IS_I830(dev))
+ I915_WRITE(LVDS, dev_priv->regfile.saveLVDS);
+
+ if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
+ I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
+ I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
+ /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
+ * otherwise we get blank eDP screen after S3 on some machines
+ */
+ I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2);
+ I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL);
+ I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
+ I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
+ I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
+ I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
+ I915_WRITE(RSTDBYCTL,
+ dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
+ } else {
+ I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
+ I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
+ I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
+ I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
+ I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
+ I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
+ I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
+ }
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Display Port state */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ I915_WRITE(DP_B, dev_priv->regfile.saveDP_B);
+ I915_WRITE(DP_C, dev_priv->regfile.saveDP_C);
+ I915_WRITE(DP_D, dev_priv->regfile.saveDP_D);
+ }
+ /* FIXME: restore TV & SDVO state */
+ }
+
+ /* only restore FBC info on the platform that supports FBC*/
+ intel_disable_fbc(dev);
+ if (I915_HAS_FBC(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
+ } else if (IS_GM45(dev)) {
+ I915_WRITE(DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
+ } else {
+ I915_WRITE(FBC_CFB_BASE, dev_priv->regfile.saveFBC_CFB_BASE);
+ I915_WRITE(FBC_LL_BASE, dev_priv->regfile.saveFBC_LL_BASE);
+ I915_WRITE(FBC_CONTROL2, dev_priv->regfile.saveFBC_CONTROL2);
+ I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
+ }
+ }
+ /* VGA state */
+ if (HAS_PCH_SPLIT(dev))
+ I915_WRITE(CPU_VGACNTRL, dev_priv->regfile.saveVGACNTRL);
+ else
+ I915_WRITE(VGACNTRL, dev_priv->regfile.saveVGACNTRL);
+
+ I915_WRITE(VGA0, dev_priv->regfile.saveVGA0);
+ I915_WRITE(VGA1, dev_priv->regfile.saveVGA1);
+ I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD);
+ POSTING_READ(VGA_PD);
+ DRM_UDELAY(150);
+
+ i915_restore_vga(dev);
+}
+
+int
+i915_save_state(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
+
+ dev_priv->regfile.saveLBB = pci_conf_read(dev_priv->pc, dev_priv->tag, LBB);
+
+ DRM_LOCK();
+
+ i915_save_display(dev);
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Interrupt state */
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.saveDEIER = I915_READ(DEIER);
+ dev_priv->regfile.saveDEIMR = I915_READ(DEIMR);
+ dev_priv->regfile.saveGTIER = I915_READ(GTIER);
+ dev_priv->regfile.saveGTIMR = I915_READ(GTIMR);
+ dev_priv->regfile.saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
+ dev_priv->regfile.saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
+ dev_priv->regfile.saveMCHBAR_RENDER_STANDBY =
+ I915_READ(RSTDBYCTL);
+ dev_priv->regfile.savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
+ } else {
+ dev_priv->regfile.saveIER = I915_READ(IER);
+ dev_priv->regfile.saveIMR = I915_READ(IMR);
+ }
+ }
+
+ intel_disable_gt_powersave(dev);
+
+ /* Cache mode state */
+ dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
+
+ /* Memory Arbitration state */
+ dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
+
+ /* Scratch space */
+ for (i = 0; i < 16; i++) {
+ dev_priv->regfile.saveSWF0[i] = I915_READ(SWF00 + (i << 2));
+ dev_priv->regfile.saveSWF1[i] = I915_READ(SWF10 + (i << 2));
+ }
+ for (i = 0; i < 3; i++)
+ dev_priv->regfile.saveSWF2[i] = I915_READ(SWF30 + (i << 2));
+
+ DRM_UNLOCK();
+
+ return 0;
+}
+
+int
+i915_restore_state(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
+
+ pci_conf_write(dev_priv->pc, dev_priv->tag, LBB, dev_priv->regfile.saveLBB);
+
+ DRM_LOCK();
+
+ i915_restore_display(dev);
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Interrupt state */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(DEIER, dev_priv->regfile.saveDEIER);
+ I915_WRITE(DEIMR, dev_priv->regfile.saveDEIMR);
+ I915_WRITE(GTIER, dev_priv->regfile.saveGTIER);
+ I915_WRITE(GTIMR, dev_priv->regfile.saveGTIMR);
+ I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR);
+ I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR);
+ I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG);
+ } else {
+ I915_WRITE(IER, dev_priv->regfile.saveIER);
+ I915_WRITE(IMR, dev_priv->regfile.saveIMR);
+ }
+ }
+
+ /* Cache mode state */
+ I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 0xffff0000);
+
+ /* Memory arbitration state */
+ I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
+
+ for (i = 0; i < 16; i++) {
+ I915_WRITE(SWF00 + (i << 2), dev_priv->regfile.saveSWF0[i]);
+ I915_WRITE(SWF10 + (i << 2), dev_priv->regfile.saveSWF1[i]);
+ }
+ for (i = 0; i < 3; i++)
+ I915_WRITE(SWF30 + (i << 2), dev_priv->regfile.saveSWF2[i]);
+
+ DRM_UNLOCK();
+
+#ifdef notyet
+ intel_i2c_reset(dev);
+#endif
+
+ return 0;
+}
diff --git a/sys/dev/pci/drm/i915/intel_bios.c b/sys/dev/pci/drm/i915/intel_bios.c
new file mode 100644
index 00000000000..316fcdf592e
--- /dev/null
+++ b/sys/dev/pci/drm/i915/intel_bios.c
@@ -0,0 +1,852 @@
+/* $OpenBSD: intel_bios.c,v 1.1 2013/03/18 12:36:52 jsg Exp $ */
+/*
+ * Copyright © 2006 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+#include <dev/pci/drm/drmP.h>
+#include <dev/pci/drm/drm_dp_helper.h>
+#include <dev/pci/drm/i915_drm.h>
+#include "i915_drv.h"
+#include "intel_bios.h"
+#include <dev/isa/isareg.h>
+#include <dev/isa/isavar.h>
+
+#define SLAVE_ADDR1 0x70
+#define SLAVE_ADDR2 0x72
+
+void *find_section(struct bdb_header *, int);
+u16 get_blocksize(void *);
+void fill_detail_timing_data(struct drm_display_mode *,
+ const struct lvds_dvo_timing *);
+bool lvds_dvo_timing_equal_size(const struct lvds_dvo_timing *,
+ const struct lvds_dvo_timing *);
+const struct lvds_dvo_timing *
+ get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *,
+ const struct bdb_lvds_lfp_data_ptrs *, int);
+void parse_lfp_panel_data(struct inteldrm_softc *, struct bdb_header *);
+void parse_sdvo_panel_data(struct inteldrm_softc *, struct bdb_header *);
+int intel_bios_ssc_frequency(struct drm_device *, bool);
+void parse_general_features(struct inteldrm_softc *, struct bdb_header *);
+void parse_general_definitions(struct inteldrm_softc *,
+ struct bdb_header *);
+void parse_sdvo_device_mapping(struct inteldrm_softc *,
+ struct bdb_header *);
+void parse_driver_features(struct inteldrm_softc *, struct bdb_header *);
+void parse_edp(struct inteldrm_softc *, struct bdb_header *);
+void parse_device_mapping(struct inteldrm_softc *, struct bdb_header *);
+void init_vbt_defaults(struct inteldrm_softc *);
+int intel_no_opregion_vbt_callback(const struct dmi_system_id *);
+const struct lvds_fp_timing *
+ get_lvds_fp_timing(const struct bdb_header *,
+ const struct bdb_lvds_lfp_data *,
+ const struct bdb_lvds_lfp_data_ptrs *, int);
+bool dmi_found(const struct dmi_system_id *);
+
+static int panel_type;
+
+void *
+find_section(struct bdb_header *bdb, int section_id)
+{
+ u8 *base = (u8 *)bdb;
+ int index = 0;
+ u16 total, current_size;
+ u8 current_id;
+
+ /* skip to first section */
+ index += bdb->header_size;
+ total = bdb->bdb_size;
+
+ /* walk the sections looking for section_id */
+ while (index < total) {
+ current_id = *(base + index);
+ index++;
+ current_size = *((u16 *)(base + index));
+ index += 2;
+ if (current_id == section_id)
+ return base + index;
+ index += current_size;
+ }
+
+ return NULL;
+}
+
+u16
+get_blocksize(void *p)
+{
+ u16 *block_ptr, block_size;
+
+ block_ptr = (u16 *)((char *)p - 2);
+ block_size = *block_ptr;
+ return block_size;
+}
+
+void
+fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
+ const struct lvds_dvo_timing *dvo_timing)
+{
+ panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
+ dvo_timing->hactive_lo;
+ panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
+ ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
+ panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
+ dvo_timing->hsync_pulse_width;
+ panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
+ ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
+
+ panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
+ dvo_timing->vactive_lo;
+ panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
+ dvo_timing->vsync_off;
+ panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
+ dvo_timing->vsync_pulse_width;
+ panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
+ ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
+ panel_fixed_mode->clock = dvo_timing->clock * 10;
+ panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
+
+ if (dvo_timing->hsync_positive)
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (dvo_timing->vsync_positive)
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
+
+ /* Some VBTs have bogus h/vtotal values */
+ if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
+ panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
+ if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
+ panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
+
+ drm_mode_set_name(panel_fixed_mode);
+}
+
+bool
+lvds_dvo_timing_equal_size(const struct lvds_dvo_timing *a,
+ const struct lvds_dvo_timing *b)
+{
+ if (a->hactive_hi != b->hactive_hi ||
+ a->hactive_lo != b->hactive_lo)
+ return false;
+
+ if (a->hsync_off_hi != b->hsync_off_hi ||
+ a->hsync_off_lo != b->hsync_off_lo)
+ return false;
+
+ if (a->hsync_pulse_width != b->hsync_pulse_width)
+ return false;
+
+ if (a->hblank_hi != b->hblank_hi ||
+ a->hblank_lo != b->hblank_lo)
+ return false;
+
+ if (a->vactive_hi != b->vactive_hi ||
+ a->vactive_lo != b->vactive_lo)
+ return false;
+
+ if (a->vsync_off != b->vsync_off)
+ return false;
+
+ if (a->vsync_pulse_width != b->vsync_pulse_width)
+ return false;
+
+ if (a->vblank_hi != b->vblank_hi ||
+ a->vblank_lo != b->vblank_lo)
+ return false;
+
+ return true;
+}
+
+const struct lvds_dvo_timing *
+get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data,
+ const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs,
+ int index)
+{
+ /*
+ * the size of fp_timing varies on the different platform.
+ * So calculate the DVO timing relative offset in LVDS data
+ * entry to get the DVO timing entry
+ */
+
+ int lfp_data_size =
+ lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset -
+ lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset;
+ int dvo_timing_offset =
+ lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset -
+ lvds_lfp_data_ptrs->ptr[0].fp_timing_offset;
+ char *entry = (char *)lvds_lfp_data->data + lfp_data_size * index;
+
+ return (struct lvds_dvo_timing *)(entry + dvo_timing_offset);
+}
+
+/* get lvds_fp_timing entry
+ * this function may return NULL if the corresponding entry is invalid
+ */
+const struct lvds_fp_timing *
+get_lvds_fp_timing(const struct bdb_header *bdb,
+ const struct bdb_lvds_lfp_data *data,
+ const struct bdb_lvds_lfp_data_ptrs *ptrs,
+ int index)
+{
+ size_t data_ofs = (const u8 *)data - (const u8 *)bdb;
+ u16 data_size = ((const u16 *)data)[-1]; /* stored in header */
+ size_t ofs;
+
+ if (index >= nitems(ptrs->ptr))
+ return NULL;
+ ofs = ptrs->ptr[index].fp_timing_offset;
+ if (ofs < data_ofs ||
+ ofs + sizeof(struct lvds_fp_timing) > data_ofs + data_size)
+ return NULL;
+ return (const struct lvds_fp_timing *)((const u8 *)bdb + ofs);
+}
+
+/* Try to find integrated panel data */
+void
+parse_lfp_panel_data(struct inteldrm_softc *dev_priv,
+ struct bdb_header *bdb)
+{
+ const struct bdb_lvds_options *lvds_options;
+ const struct bdb_lvds_lfp_data *lvds_lfp_data;
+ const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
+ const struct lvds_dvo_timing *panel_dvo_timing;
+ const struct lvds_fp_timing *fp_timing;
+ struct drm_display_mode *panel_fixed_mode;
+ int i, downclock;
+
+ lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
+ if (!lvds_options)
+ return;
+
+ dev_priv->lvds_dither = lvds_options->pixel_dither;
+ if (lvds_options->panel_type == 0xff)
+ return;
+
+ panel_type = lvds_options->panel_type;
+
+ lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
+ if (!lvds_lfp_data)
+ return;
+
+ lvds_lfp_data_ptrs = find_section(bdb, BDB_LVDS_LFP_DATA_PTRS);
+ if (!lvds_lfp_data_ptrs)
+ return;
+
+ dev_priv->lvds_vbt = 1;
+
+ panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
+ lvds_lfp_data_ptrs,
+ lvds_options->panel_type);
+
+ panel_fixed_mode = malloc(sizeof(*panel_fixed_mode), M_DRM,
+ M_WAITOK | M_ZERO);
+ if (!panel_fixed_mode)
+ return;
+
+ fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing);
+
+ dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
+
+ DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
+ drm_mode_debug_printmodeline(panel_fixed_mode);
+
+ /*
+ * Iterate over the LVDS panel timing info to find the lowest clock
+ * for the native resolution.
+ */
+ downclock = panel_dvo_timing->clock;
+ for (i = 0; i < 16; i++) {
+ const struct lvds_dvo_timing *dvo_timing;
+
+ dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
+ lvds_lfp_data_ptrs,
+ i);
+ if (lvds_dvo_timing_equal_size(dvo_timing, panel_dvo_timing) &&
+ dvo_timing->clock < downclock)
+ downclock = dvo_timing->clock;
+ }
+
+ if (downclock < panel_dvo_timing->clock && i915_lvds_downclock) {
+ dev_priv->lvds_downclock_avail = 1;
+ dev_priv->lvds_downclock = downclock * 10;
+ DRM_DEBUG_KMS("LVDS downclock is found in VBT. "
+ "Normal Clock %dKHz, downclock %dKHz\n",
+ panel_fixed_mode->clock, 10*downclock);
+ }
+
+ fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
+ lvds_lfp_data_ptrs,
+ lvds_options->panel_type);
+ if (fp_timing) {
+ /* check the resolution, just to be sure */
+ if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
+ fp_timing->y_res == panel_fixed_mode->vdisplay) {
+ dev_priv->bios_lvds_val = fp_timing->lvds_reg_val;
+ DRM_DEBUG_KMS("VBT initial LVDS value %x\n",
+ dev_priv->bios_lvds_val);
+ }
+ }
+}
+
+/* Try to find sdvo panel data */
+void
+parse_sdvo_panel_data(struct inteldrm_softc *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct lvds_dvo_timing *dvo_timing;
+ struct drm_display_mode *panel_fixed_mode;
+ int index;
+
+ index = i915_vbt_sdvo_panel_type;
+ if (index == -2) {
+ DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n");
+ return;
+ }
+
+ if (index == -1) {
+ struct bdb_sdvo_lvds_options *sdvo_lvds_options;
+
+ sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
+ if (!sdvo_lvds_options)
+ return;
+
+ index = sdvo_lvds_options->panel_type;
+ }
+
+ dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
+ if (!dvo_timing)
+ return;
+
+ panel_fixed_mode = malloc(sizeof(*panel_fixed_mode), M_DRM,
+ M_WAITOK | M_ZERO);
+ if (!panel_fixed_mode)
+ return;
+
+ fill_detail_timing_data(panel_fixed_mode, dvo_timing + index);
+
+ dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
+
+ DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n");
+ drm_mode_debug_printmodeline(panel_fixed_mode);
+}
+
+int
+intel_bios_ssc_frequency(struct drm_device *dev,
+ bool alternate)
+{
+ switch (INTEL_INFO(dev)->gen) {
+ case 2:
+ return alternate ? 66 : 48;
+ case 3:
+ case 4:
+ return alternate ? 100 : 96;
+ default:
+ return alternate ? 100 : 120;
+ }
+}
+
+void
+parse_general_features(struct inteldrm_softc *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ struct bdb_general_features *general;
+
+ general = find_section(bdb, BDB_GENERAL_FEATURES);
+ if (general) {
+ dev_priv->int_tv_support = general->int_tv_support;
+ dev_priv->int_crt_support = general->int_crt_support;
+ dev_priv->lvds_use_ssc = general->enable_ssc;
+ dev_priv->lvds_ssc_freq =
+ intel_bios_ssc_frequency(dev, general->ssc_freq);
+ dev_priv->display_clock_mode = general->display_clock_mode;
+ DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d\n",
+ dev_priv->int_tv_support,
+ dev_priv->int_crt_support,
+ dev_priv->lvds_use_ssc,
+ dev_priv->lvds_ssc_freq,
+ dev_priv->display_clock_mode);
+ }
+}
+
+void
+parse_general_definitions(struct inteldrm_softc *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct bdb_general_definitions *general;
+
+ general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ if (general) {
+ u16 block_size = get_blocksize(general);
+ if (block_size >= sizeof(*general)) {
+ int bus_pin = general->crt_ddc_gmbus_pin;
+ DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
+ if (intel_gmbus_is_port_valid(bus_pin))
+ dev_priv->crt_ddc_pin = bus_pin;
+ } else {
+ DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
+ block_size);
+ }
+ }
+}
+
+void
+parse_sdvo_device_mapping(struct inteldrm_softc *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct sdvo_device_mapping *p_mapping;
+ struct bdb_general_definitions *p_defs;
+ struct child_device_config *p_child;
+ int i, child_device_num, count;
+ u16 block_size;
+
+ p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ if (!p_defs) {
+ DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
+ return;
+ }
+ /* judge whether the size of child device meets the requirements.
+ * If the child device size obtained from general definition block
+ * is different with sizeof(struct child_device_config), skip the
+ * parsing of sdvo device info
+ */
+ if (p_defs->child_dev_size != sizeof(*p_child)) {
+ /* different child dev size . Ignore it */
+ DRM_DEBUG_KMS("different child size is found. Invalid.\n");
+ return;
+ }
+ /* get the block size of general definitions */
+ block_size = get_blocksize(p_defs);
+ /* get the number of child device */
+ child_device_num = (block_size - sizeof(*p_defs)) /
+ sizeof(*p_child);
+ count = 0;
+ for (i = 0; i < child_device_num; i++) {
+ p_child = &(p_defs->devices[i]);
+ if (!p_child->device_type) {
+ /* skip the device block if device type is invalid */
+ continue;
+ }
+ if (p_child->slave_addr != SLAVE_ADDR1 &&
+ p_child->slave_addr != SLAVE_ADDR2) {
+ /*
+ * If the slave address is neither 0x70 nor 0x72,
+ * it is not a SDVO device. Skip it.
+ */
+ continue;
+ }
+ if (p_child->dvo_port != DEVICE_PORT_DVOB &&
+ p_child->dvo_port != DEVICE_PORT_DVOC) {
+ /* skip the incorrect SDVO port */
+ DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
+ continue;
+ }
+ DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
+ " %s port\n",
+ p_child->slave_addr,
+ (p_child->dvo_port == DEVICE_PORT_DVOB) ?
+ "SDVOB" : "SDVOC");
+ p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]);
+ if (!p_mapping->initialized) {
+ p_mapping->dvo_port = p_child->dvo_port;
+ p_mapping->slave_addr = p_child->slave_addr;
+ p_mapping->dvo_wiring = p_child->dvo_wiring;
+ p_mapping->ddc_pin = p_child->ddc_pin;
+ p_mapping->i2c_pin = p_child->i2c_pin;
+ p_mapping->initialized = 1;
+ DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
+ p_mapping->dvo_port,
+ p_mapping->slave_addr,
+ p_mapping->dvo_wiring,
+ p_mapping->ddc_pin,
+ p_mapping->i2c_pin);
+ } else {
+ DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
+ "two SDVO device.\n");
+ }
+ if (p_child->slave2_addr) {
+ /* Maybe this is a SDVO device with multiple inputs */
+ /* And the mapping info is not added */
+ DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
+ " is a SDVO device with multiple inputs.\n");
+ }
+ count++;
+ }
+
+ if (!count) {
+ /* No SDVO device info is found */
+ DRM_DEBUG_KMS("No SDVO device info is found in VBT\n");
+ }
+ return;
+}
+
+void
+parse_driver_features(struct inteldrm_softc *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ struct bdb_driver_features *driver;
+
+ driver = find_section(bdb, BDB_DRIVER_FEATURES);
+ if (!driver)
+ return;
+
+ if (SUPPORTS_EDP(dev) &&
+ driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
+ dev_priv->edp.support = 1;
+
+ if (driver->dual_frequency)
+ dev_priv->render_reclock_avail = true;
+}
+
+void
+parse_edp(struct inteldrm_softc *dev_priv, struct bdb_header *bdb)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ struct bdb_edp *edp;
+ struct edp_power_seq *edp_pps;
+ struct edp_link_params *edp_link_params;
+
+ edp = find_section(bdb, BDB_EDP);
+ if (!edp) {
+ if (SUPPORTS_EDP(dev) && dev_priv->edp.support)
+ DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
+ return;
+ }
+
+ switch ((edp->color_depth >> (panel_type * 2)) & 3) {
+ case EDP_18BPP:
+ dev_priv->edp.bpp = 18;
+ break;
+ case EDP_24BPP:
+ dev_priv->edp.bpp = 24;
+ break;
+ case EDP_30BPP:
+ dev_priv->edp.bpp = 30;
+ break;
+ }
+
+ /* Get the eDP sequencing and link info */
+ edp_pps = &edp->power_seqs[panel_type];
+ edp_link_params = &edp->link_params[panel_type];
+
+ dev_priv->edp.pps = *edp_pps;
+
+ dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
+ DP_LINK_BW_1_62;
+ switch (edp_link_params->lanes) {
+ case 0:
+ dev_priv->edp.lanes = 1;
+ break;
+ case 1:
+ dev_priv->edp.lanes = 2;
+ break;
+ case 3:
+ default:
+ dev_priv->edp.lanes = 4;
+ break;
+ }
+ switch (edp_link_params->preemphasis) {
+ case 0:
+ dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
+ break;
+ case 1:
+ dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
+ break;
+ case 2:
+ dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
+ break;
+ case 3:
+ dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
+ break;
+ }
+ switch (edp_link_params->vswing) {
+ case 0:
+ dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400;
+ break;
+ case 1:
+ dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600;
+ break;
+ case 2:
+ dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800;
+ break;
+ case 3:
+ dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200;
+ break;
+ }
+}
+
+void
+parse_device_mapping(struct inteldrm_softc *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct bdb_general_definitions *p_defs;
+ struct child_device_config *p_child, *child_dev_ptr;
+ int i, child_device_num, count;
+ u16 block_size;
+
+ p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ if (!p_defs) {
+ DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
+ return;
+ }
+ /* judge whether the size of child device meets the requirements.
+ * If the child device size obtained from general definition block
+ * is different with sizeof(struct child_device_config), skip the
+ * parsing of sdvo device info
+ */
+ if (p_defs->child_dev_size != sizeof(*p_child)) {
+ /* different child dev size . Ignore it */
+ DRM_DEBUG_KMS("different child size is found. Invalid.\n");
+ return;
+ }
+ /* get the block size of general definitions */
+ block_size = get_blocksize(p_defs);
+ /* get the number of child device */
+ child_device_num = (block_size - sizeof(*p_defs)) /
+ sizeof(*p_child);
+ count = 0;
+ /* get the number of child device that is present */
+ for (i = 0; i < child_device_num; i++) {
+ p_child = &(p_defs->devices[i]);
+ if (!p_child->device_type) {
+ /* skip the device block if device type is invalid */
+ continue;
+ }
+ count++;
+ }
+ if (!count) {
+ DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
+ return;
+ }
+ dev_priv->child_dev = malloc(sizeof(*p_child) * count, M_DRM,
+ M_WAITOK | M_ZERO);
+ if (!dev_priv->child_dev) {
+ DRM_DEBUG_KMS("No memory space for child device\n");
+ return;
+ }
+
+ dev_priv->child_dev_num = count;
+ count = 0;
+ for (i = 0; i < child_device_num; i++) {
+ p_child = &(p_defs->devices[i]);
+ if (!p_child->device_type) {
+ /* skip the device block if device type is invalid */
+ continue;
+ }
+ child_dev_ptr = dev_priv->child_dev + count;
+ count++;
+ memcpy((void *)child_dev_ptr, (void *)p_child,
+ sizeof(*p_child));
+ }
+ return;
+}
+
+void
+init_vbt_defaults(struct inteldrm_softc *dev_priv)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+
+ dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC;
+
+ /* LFP panel data */
+ dev_priv->lvds_dither = 1;
+ dev_priv->lvds_vbt = 0;
+
+ /* SDVO panel data */
+ dev_priv->sdvo_lvds_vbt_mode = NULL;
+
+ /* general features */
+ dev_priv->int_tv_support = 1;
+ dev_priv->int_crt_support = 1;
+
+ /* Default to using SSC */
+ dev_priv->lvds_use_ssc = 1;
+ dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
+ DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
+}
+
+int
+intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
+{
+ DRM_DEBUG_KMS("Falling back to manually reading VBT from "
+ "VBIOS ROM for %s\n",
+ id->ident);
+ return 1;
+}
+
+static const struct dmi_system_id intel_no_opregion_vbt[] = {
+ {
+ .callback = intel_no_opregion_vbt_callback,
+ .ident = "ThinkCentre A57",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "97027RG"),
+ },
+ },
+ { }
+};
+
+extern char *hw_vendor, *hw_prod;
+
+bool
+dmi_found(const struct dmi_system_id *dsi)
+{
+ int i, slot;
+
+ for (i = 0; i < nitems(dsi->matches); i++) {
+ slot = dsi->matches[i].slot;
+ switch (slot) {
+ case DMI_NONE:
+ break;
+ case DMI_SYS_VENDOR:
+ case DMI_BOARD_VENDOR:
+ if (hw_vendor != NULL &&
+ !strcmp(hw_vendor, dsi->matches[i].substr))
+ break;
+ else
+ return false;
+ case DMI_PRODUCT_NAME:
+ case DMI_BOARD_NAME:
+ if (hw_prod != NULL &&
+ !strcmp(hw_prod, dsi->matches[i].substr))
+ break;
+ else
+ return false;
+ default:
+ return false;
+ }
+ }
+
+ return true;
+}
+
+int
+dmi_check_system(const struct dmi_system_id *sysid)
+{
+ const struct dmi_system_id *dsi;
+ int num = 0;
+
+ for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
+ if (dmi_found(dsi)) {
+ num++;
+ if (dsi->callback && dsi->callback(dsi))
+ break;
+ }
+ }
+ return (num);
+}
+
+#define VGA_BIOS_ADDR 0xc0000
+#define VGA_BIOS_LEN 0x10000
+
+/**
+ * intel_parse_bios - find VBT and initialize settings from the BIOS
+ * @dev: DRM device
+ *
+ * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
+ * to appropriate values.
+ *
+ * Returns 0 on success, nonzero on failure.
+ */
+int
+intel_parse_bios(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct bdb_header *bdb = NULL;
+ u8 *bios = NULL;
+
+ init_vbt_defaults(dev_priv);
+
+ /* XXX Should this validation be moved to intel_opregion.c? */
+ if (!dmi_check_system(intel_no_opregion_vbt) && dev_priv->opregion.vbt) {
+ struct vbt_header *vbt = dev_priv->opregion.vbt;
+ if (memcmp(vbt->signature, "$VBT", 4) == 0) {
+ DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n",
+ vbt->signature);
+ bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset);
+ } else
+ dev_priv->opregion.vbt = NULL;
+ }
+
+#if defined(__amd64__) || defined(__i386)
+ if (bdb == NULL) {
+ struct vbt_header *vbt = NULL;
+ size_t size;
+ int i;
+
+ bios = (u8 *)ISA_HOLE_VADDR(VGA_BIOS_ADDR);
+ size = VGA_BIOS_LEN;
+
+ /* Scour memory looking for the VBT signature */
+ for (i = 0; i + 4 < size; i++) {
+ if (!memcmp(bios + i, "$VBT", 4)) {
+ vbt = (struct vbt_header *)(bios + i);
+ break;
+ }
+ }
+
+ if (!vbt) {
+ DRM_DEBUG_DRIVER("VBT signature missing\n");
+ return -1;
+ }
+
+ bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
+ }
+#endif /* defined(__amd64__) || defined(__i386__) */
+
+ /* Grab useful general definitions */
+ parse_general_features(dev_priv, bdb);
+ parse_general_definitions(dev_priv, bdb);
+ parse_lfp_panel_data(dev_priv, bdb);
+ parse_sdvo_panel_data(dev_priv, bdb);
+ parse_sdvo_device_mapping(dev_priv, bdb);
+ parse_device_mapping(dev_priv, bdb);
+ parse_driver_features(dev_priv, bdb);
+ parse_edp(dev_priv, bdb);
+
+ return 0;
+}
+
+/* Ensure that vital registers have been initialised, even if the BIOS
+ * is absent or just failing to do its job.
+ */
+void
+intel_setup_bios(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ /* Set the Panel Power On/Off timings if uninitialized. */
+ if (!HAS_PCH_SPLIT(dev) &&
+ I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) {
+ /* Set T2 to 40ms and T5 to 200ms */
+ I915_WRITE(PP_ON_DELAYS, 0x019007d0);
+
+ /* Set T3 to 35ms and Tx to 200ms */
+ I915_WRITE(PP_OFF_DELAYS, 0x015e07d0);
+ }
+}
diff --git a/sys/dev/pci/drm/i915/intel_bios.h b/sys/dev/pci/drm/i915/intel_bios.h
new file mode 100644
index 00000000000..548cb4393fc
--- /dev/null
+++ b/sys/dev/pci/drm/i915/intel_bios.h
@@ -0,0 +1,620 @@
+/* $OpenBSD: intel_bios.h,v 1.1 2013/03/18 12:36:52 jsg Exp $ */
+/*
+ * Copyright © 2006 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#ifndef _I830_BIOS_H_
+#define _I830_BIOS_H_
+
+#include <dev/pci/drm/drmP.h>
+
+struct vbt_header {
+ u8 signature[20]; /**< Always starts with 'VBT$' */
+ u16 version; /**< decimal */
+ u16 header_size; /**< in bytes */
+ u16 vbt_size; /**< in bytes */
+ u8 vbt_checksum;
+ u8 reserved0;
+ u32 bdb_offset; /**< from beginning of VBT */
+ u32 aim_offset[4]; /**< from beginning of VBT */
+} __attribute__((packed));
+
+struct bdb_header {
+ u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */
+ u16 version; /**< decimal */
+ u16 header_size; /**< in bytes */
+ u16 bdb_size; /**< in bytes */
+};
+
+/* strictly speaking, this is a "skip" block, but it has interesting info */
+struct vbios_data {
+ u8 type; /* 0 == desktop, 1 == mobile */
+ u8 relstage;
+ u8 chipset;
+ u8 lvds_present:1;
+ u8 tv_present:1;
+ u8 rsvd2:6; /* finish byte */
+ u8 rsvd3[4];
+ u8 signon[155];
+ u8 copyright[61];
+ u16 code_segment;
+ u8 dos_boot_mode;
+ u8 bandwidth_percent;
+ u8 rsvd4; /* popup memory size */
+ u8 resize_pci_bios;
+ u8 rsvd5; /* is crt already on ddc2 */
+} __attribute__((packed));
+
+/*
+ * There are several types of BIOS data blocks (BDBs), each block has
+ * an ID and size in the first 3 bytes (ID in first, size in next 2).
+ * Known types are listed below.
+ */
+#define BDB_GENERAL_FEATURES 1
+#define BDB_GENERAL_DEFINITIONS 2
+#define BDB_OLD_TOGGLE_LIST 3
+#define BDB_MODE_SUPPORT_LIST 4
+#define BDB_GENERIC_MODE_TABLE 5
+#define BDB_EXT_MMIO_REGS 6
+#define BDB_SWF_IO 7
+#define BDB_SWF_MMIO 8
+#define BDB_DOT_CLOCK_TABLE 9
+#define BDB_MODE_REMOVAL_TABLE 10
+#define BDB_CHILD_DEVICE_TABLE 11
+#define BDB_DRIVER_FEATURES 12
+#define BDB_DRIVER_PERSISTENCE 13
+#define BDB_EXT_TABLE_PTRS 14
+#define BDB_DOT_CLOCK_OVERRIDE 15
+#define BDB_DISPLAY_SELECT 16
+/* 17 rsvd */
+#define BDB_DRIVER_ROTATION 18
+#define BDB_DISPLAY_REMOVE 19
+#define BDB_OEM_CUSTOM 20
+#define BDB_EFP_LIST 21 /* workarounds for VGA hsync/vsync */
+#define BDB_SDVO_LVDS_OPTIONS 22
+#define BDB_SDVO_PANEL_DTDS 23
+#define BDB_SDVO_LVDS_PNP_IDS 24
+#define BDB_SDVO_LVDS_POWER_SEQ 25
+#define BDB_TV_OPTIONS 26
+#define BDB_EDP 27
+#define BDB_LVDS_OPTIONS 40
+#define BDB_LVDS_LFP_DATA_PTRS 41
+#define BDB_LVDS_LFP_DATA 42
+#define BDB_LVDS_BACKLIGHT 43
+#define BDB_LVDS_POWER 44
+#define BDB_SKIP 254 /* VBIOS private block, ignore */
+
+struct bdb_general_features {
+ /* bits 1 */
+ u8 panel_fitting:2;
+ u8 flexaim:1;
+ u8 msg_enable:1;
+ u8 clear_screen:3;
+ u8 color_flip:1;
+
+ /* bits 2 */
+ u8 download_ext_vbt:1;
+ u8 enable_ssc:1;
+ u8 ssc_freq:1;
+ u8 enable_lfp_on_override:1;
+ u8 disable_ssc_ddt:1;
+ u8 rsvd7:1;
+ u8 display_clock_mode:1;
+ u8 rsvd8:1; /* finish byte */
+
+ /* bits 3 */
+ u8 disable_smooth_vision:1;
+ u8 single_dvi:1;
+ u8 rsvd9:6; /* finish byte */
+
+ /* bits 4 */
+ u8 legacy_monitor_detect;
+
+ /* bits 5 */
+ u8 int_crt_support:1;
+ u8 int_tv_support:1;
+ u8 int_efp_support:1;
+ u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */
+ u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
+ u8 rsvd11:3; /* finish byte */
+} __attribute__((packed));
+
+/* pre-915 */
+#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */
+#define GPIO_PIN_ADD_I2C 0x05 /* "ADDCARD I2C GPIO pins" */
+#define GPIO_PIN_ADD_DDC 0x04 /* "ADDCARD DDC GPIO pins" */
+#define GPIO_PIN_ADD_DDC_I2C 0x06 /* "ADDCARD DDC/I2C GPIO pins" */
+
+/* Pre 915 */
+#define DEVICE_TYPE_NONE 0x00
+#define DEVICE_TYPE_CRT 0x01
+#define DEVICE_TYPE_TV 0x09
+#define DEVICE_TYPE_EFP 0x12
+#define DEVICE_TYPE_LFP 0x22
+/* On 915+ */
+#define DEVICE_TYPE_CRT_DPMS 0x6001
+#define DEVICE_TYPE_CRT_DPMS_HOTPLUG 0x4001
+#define DEVICE_TYPE_TV_COMPOSITE 0x0209
+#define DEVICE_TYPE_TV_MACROVISION 0x0289
+#define DEVICE_TYPE_TV_RF_COMPOSITE 0x020c
+#define DEVICE_TYPE_TV_SVIDEO_COMPOSITE 0x0609
+#define DEVICE_TYPE_TV_SCART 0x0209
+#define DEVICE_TYPE_TV_CODEC_HOTPLUG_PWR 0x6009
+#define DEVICE_TYPE_EFP_HOTPLUG_PWR 0x6012
+#define DEVICE_TYPE_EFP_DVI_HOTPLUG_PWR 0x6052
+#define DEVICE_TYPE_EFP_DVI_I 0x6053
+#define DEVICE_TYPE_EFP_DVI_D_DUAL 0x6152
+#define DEVICE_TYPE_EFP_DVI_D_HDCP 0x60d2
+#define DEVICE_TYPE_OPENLDI_HOTPLUG_PWR 0x6062
+#define DEVICE_TYPE_OPENLDI_DUALPIX 0x6162
+#define DEVICE_TYPE_LFP_PANELLINK 0x5012
+#define DEVICE_TYPE_LFP_CMOS_PWR 0x5042
+#define DEVICE_TYPE_LFP_LVDS_PWR 0x5062
+#define DEVICE_TYPE_LFP_LVDS_DUAL 0x5162
+#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP 0x51e2
+
+#define DEVICE_CFG_NONE 0x00
+#define DEVICE_CFG_12BIT_DVOB 0x01
+#define DEVICE_CFG_12BIT_DVOC 0x02
+#define DEVICE_CFG_24BIT_DVOBC 0x09
+#define DEVICE_CFG_24BIT_DVOCB 0x0a
+#define DEVICE_CFG_DUAL_DVOB 0x11
+#define DEVICE_CFG_DUAL_DVOC 0x12
+#define DEVICE_CFG_DUAL_DVOBC 0x13
+#define DEVICE_CFG_DUAL_LINK_DVOBC 0x19
+#define DEVICE_CFG_DUAL_LINK_DVOCB 0x1a
+
+#define DEVICE_WIRE_NONE 0x00
+#define DEVICE_WIRE_DVOB 0x01
+#define DEVICE_WIRE_DVOC 0x02
+#define DEVICE_WIRE_DVOBC 0x03
+#define DEVICE_WIRE_DVOBB 0x05
+#define DEVICE_WIRE_DVOCC 0x06
+#define DEVICE_WIRE_DVOB_MASTER 0x0d
+#define DEVICE_WIRE_DVOC_MASTER 0x0e
+
+#define DEVICE_PORT_DVOA 0x00 /* none on 845+ */
+#define DEVICE_PORT_DVOB 0x01
+#define DEVICE_PORT_DVOC 0x02
+
+struct child_device_config {
+ u16 handle;
+ u16 device_type;
+ u8 device_id[10]; /* ascii string */
+ u16 addin_offset;
+ u8 dvo_port; /* See Device_PORT_* above */
+ u8 i2c_pin;
+ u8 slave_addr;
+ u8 ddc_pin;
+ u16 edid_ptr;
+ u8 dvo_cfg; /* See DEVICE_CFG_* above */
+ u8 dvo2_port;
+ u8 i2c2_pin;
+ u8 slave2_addr;
+ u8 ddc2_pin;
+ u8 capabilities;
+ u8 dvo_wiring;/* See DEVICE_WIRE_* above */
+ u8 dvo2_wiring;
+ u16 extended_type;
+ u8 dvo_function;
+} __attribute__((packed));
+
+struct bdb_general_definitions {
+ /* DDC GPIO */
+ u8 crt_ddc_gmbus_pin;
+
+ /* DPMS bits */
+ u8 dpms_acpi:1;
+ u8 skip_boot_crt_detect:1;
+ u8 dpms_aim:1;
+ u8 rsvd1:5; /* finish byte */
+
+ /* boot device bits */
+ u8 boot_display[2];
+ u8 child_dev_size;
+
+ /*
+ * Device info:
+ * If TV is present, it'll be at devices[0].
+ * LVDS will be next, either devices[0] or [1], if present.
+ * On some platforms the number of device is 6. But could be as few as
+ * 4 if both TV and LVDS are missing.
+ * And the device num is related with the size of general definition
+ * block. It is obtained by using the following formula:
+ * number = (block_size - sizeof(bdb_general_definitions))/
+ * sizeof(child_device_config);
+ */
+ struct child_device_config devices[0];
+} __attribute__((packed));
+
+struct bdb_lvds_options {
+ u8 panel_type;
+ u8 rsvd1;
+ /* LVDS capabilities, stored in a dword */
+ u8 pfit_mode:2;
+ u8 pfit_text_mode_enhanced:1;
+ u8 pfit_gfx_mode_enhanced:1;
+ u8 pfit_ratio_auto:1;
+ u8 pixel_dither:1;
+ u8 lvds_edid:1;
+ u8 rsvd2:1;
+ u8 rsvd4;
+} __attribute__((packed));
+
+/* LFP pointer table contains entries to the struct below */
+struct bdb_lvds_lfp_data_ptr {
+ u16 fp_timing_offset; /* offsets are from start of bdb */
+ u8 fp_table_size;
+ u16 dvo_timing_offset;
+ u8 dvo_table_size;
+ u16 panel_pnp_id_offset;
+ u8 pnp_table_size;
+} __attribute__((packed));
+
+struct bdb_lvds_lfp_data_ptrs {
+ u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
+ struct bdb_lvds_lfp_data_ptr ptr[16];
+} __attribute__((packed));
+
+/* LFP data has 3 blocks per entry */
+struct lvds_fp_timing {
+ u16 x_res;
+ u16 y_res;
+ u32 lvds_reg;
+ u32 lvds_reg_val;
+ u32 pp_on_reg;
+ u32 pp_on_reg_val;
+ u32 pp_off_reg;
+ u32 pp_off_reg_val;
+ u32 pp_cycle_reg;
+ u32 pp_cycle_reg_val;
+ u32 pfit_reg;
+ u32 pfit_reg_val;
+ u16 terminator;
+} __attribute__((packed));
+
+struct lvds_dvo_timing {
+ u16 clock; /**< In 10khz */
+ u8 hactive_lo;
+ u8 hblank_lo;
+ u8 hblank_hi:4;
+ u8 hactive_hi:4;
+ u8 vactive_lo;
+ u8 vblank_lo;
+ u8 vblank_hi:4;
+ u8 vactive_hi:4;
+ u8 hsync_off_lo;
+ u8 hsync_pulse_width;
+ u8 vsync_pulse_width:4;
+ u8 vsync_off:4;
+ u8 rsvd0:6;
+ u8 hsync_off_hi:2;
+ u8 h_image;
+ u8 v_image;
+ u8 max_hv;
+ u8 h_border;
+ u8 v_border;
+ u8 rsvd1:3;
+ u8 digital:2;
+ u8 vsync_positive:1;
+ u8 hsync_positive:1;
+ u8 rsvd2:1;
+} __attribute__((packed));
+
+struct lvds_pnp_id {
+ u16 mfg_name;
+ u16 product_code;
+ u32 serial;
+ u8 mfg_week;
+ u8 mfg_year;
+} __attribute__((packed));
+
+struct bdb_lvds_lfp_data_entry {
+ struct lvds_fp_timing fp_timing;
+ struct lvds_dvo_timing dvo_timing;
+ struct lvds_pnp_id pnp_id;
+} __attribute__((packed));
+
+struct bdb_lvds_lfp_data {
+ struct bdb_lvds_lfp_data_entry data[16];
+} __attribute__((packed));
+
+struct aimdb_header {
+ char signature[16];
+ char oem_device[20];
+ u16 aimdb_version;
+ u16 aimdb_header_size;
+ u16 aimdb_size;
+} __attribute__((packed));
+
+struct aimdb_block {
+ u8 aimdb_id;
+ u16 aimdb_size;
+} __attribute__((packed));
+
+struct vch_panel_data {
+ u16 fp_timing_offset;
+ u8 fp_timing_size;
+ u16 dvo_timing_offset;
+ u8 dvo_timing_size;
+ u16 text_fitting_offset;
+ u8 text_fitting_size;
+ u16 graphics_fitting_offset;
+ u8 graphics_fitting_size;
+} __attribute__((packed));
+
+struct vch_bdb_22 {
+ struct aimdb_block aimdb_block;
+ struct vch_panel_data panels[16];
+} __attribute__((packed));
+
+struct bdb_sdvo_lvds_options {
+ u8 panel_backlight;
+ u8 h40_set_panel_type;
+ u8 panel_type;
+ u8 ssc_clk_freq;
+ u16 als_low_trip;
+ u16 als_high_trip;
+ u8 sclalarcoeff_tab_row_num;
+ u8 sclalarcoeff_tab_row_size;
+ u8 coefficient[8];
+ u8 panel_misc_bits_1;
+ u8 panel_misc_bits_2;
+ u8 panel_misc_bits_3;
+ u8 panel_misc_bits_4;
+} __attribute__((packed));
+
+
+#define BDB_DRIVER_FEATURE_NO_LVDS 0
+#define BDB_DRIVER_FEATURE_INT_LVDS 1
+#define BDB_DRIVER_FEATURE_SDVO_LVDS 2
+#define BDB_DRIVER_FEATURE_EDP 3
+
+struct bdb_driver_features {
+ u8 boot_dev_algorithm:1;
+ u8 block_display_switch:1;
+ u8 allow_display_switch:1;
+ u8 hotplug_dvo:1;
+ u8 dual_view_zoom:1;
+ u8 int15h_hook:1;
+ u8 sprite_in_clone:1;
+ u8 primary_lfp_id:1;
+
+ u16 boot_mode_x;
+ u16 boot_mode_y;
+ u8 boot_mode_bpp;
+ u8 boot_mode_refresh;
+
+ u16 enable_lfp_primary:1;
+ u16 selective_mode_pruning:1;
+ u16 dual_frequency:1;
+ u16 render_clock_freq:1; /* 0: high freq; 1: low freq */
+ u16 nt_clone_support:1;
+ u16 power_scheme_ui:1; /* 0: CUI; 1: 3rd party */
+ u16 sprite_display_assign:1; /* 0: secondary; 1: primary */
+ u16 cui_aspect_scaling:1;
+ u16 preserve_aspect_ratio:1;
+ u16 sdvo_device_power_down:1;
+ u16 crt_hotplug:1;
+ u16 lvds_config:2;
+ u16 tv_hotplug:1;
+ u16 hdmi_config:2;
+
+ u8 static_display:1;
+ u8 reserved2:7;
+ u16 legacy_crt_max_x;
+ u16 legacy_crt_max_y;
+ u8 legacy_crt_max_refresh;
+
+ u8 hdmi_termination;
+ u8 custom_vbt_version;
+} __attribute__((packed));
+
+#define EDP_18BPP 0
+#define EDP_24BPP 1
+#define EDP_30BPP 2
+#define EDP_RATE_1_62 0
+#define EDP_RATE_2_7 1
+#define EDP_LANE_1 0
+#define EDP_LANE_2 1
+#define EDP_LANE_4 3
+#define EDP_PREEMPHASIS_NONE 0
+#define EDP_PREEMPHASIS_3_5dB 1
+#define EDP_PREEMPHASIS_6dB 2
+#define EDP_PREEMPHASIS_9_5dB 3
+#define EDP_VSWING_0_4V 0
+#define EDP_VSWING_0_6V 1
+#define EDP_VSWING_0_8V 2
+#define EDP_VSWING_1_2V 3
+
+struct edp_power_seq {
+ u16 t1_t3;
+ u16 t8;
+ u16 t9;
+ u16 t10;
+ u16 t11_t12;
+} __attribute__ ((packed));
+
+struct edp_link_params {
+ u8 rate:4;
+ u8 lanes:4;
+ u8 preemphasis:4;
+ u8 vswing:4;
+} __attribute__ ((packed));
+
+struct bdb_edp {
+ struct edp_power_seq power_seqs[16];
+ u32 color_depth;
+ struct edp_link_params link_params[16];
+ u32 sdrrs_msa_timing_delay;
+
+ /* ith bit indicates enabled/disabled for (i+1)th panel */
+ u16 edp_s3d_feature;
+ u16 edp_t3_optimization;
+} __attribute__ ((packed));
+
+void intel_setup_bios(struct drm_device *dev);
+int intel_parse_bios(struct drm_device *dev);
+
+/*
+ * Driver<->VBIOS interaction occurs through scratch bits in
+ * GR18 & SWF*.
+ */
+
+/* GR18 bits are set on display switch and hotkey events */
+#define GR18_DRIVER_SWITCH_EN (1<<7) /* 0: VBIOS control, 1: driver control */
+#define GR18_HOTKEY_MASK 0x78 /* See also SWF4 15:0 */
+#define GR18_HK_NONE (0x0<<3)
+#define GR18_HK_LFP_STRETCH (0x1<<3)
+#define GR18_HK_TOGGLE_DISP (0x2<<3)
+#define GR18_HK_DISP_SWITCH (0x4<<3) /* see SWF14 15:0 for what to enable */
+#define GR18_HK_POPUP_DISABLED (0x6<<3)
+#define GR18_HK_POPUP_ENABLED (0x7<<3)
+#define GR18_HK_PFIT (0x8<<3)
+#define GR18_HK_APM_CHANGE (0xa<<3)
+#define GR18_HK_MULTIPLE (0xc<<3)
+#define GR18_USER_INT_EN (1<<2)
+#define GR18_A0000_FLUSH_EN (1<<1)
+#define GR18_SMM_EN (1<<0)
+
+/* Set by driver, cleared by VBIOS */
+#define SWF00_YRES_SHIFT 16
+#define SWF00_XRES_SHIFT 0
+#define SWF00_RES_MASK 0xffff
+
+/* Set by VBIOS at boot time and driver at runtime */
+#define SWF01_TV2_FORMAT_SHIFT 8
+#define SWF01_TV1_FORMAT_SHIFT 0
+#define SWF01_TV_FORMAT_MASK 0xffff
+
+#define SWF10_VBIOS_BLC_I2C_EN (1<<29)
+#define SWF10_GTT_OVERRIDE_EN (1<<28)
+#define SWF10_LFP_DPMS_OVR (1<<27) /* override DPMS on display switch */
+#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
+#define SWF10_OLD_TOGGLE 0x0
+#define SWF10_TOGGLE_LIST_1 0x1
+#define SWF10_TOGGLE_LIST_2 0x2
+#define SWF10_TOGGLE_LIST_3 0x3
+#define SWF10_TOGGLE_LIST_4 0x4
+#define SWF10_PANNING_EN (1<<23)
+#define SWF10_DRIVER_LOADED (1<<22)
+#define SWF10_EXTENDED_DESKTOP (1<<21)
+#define SWF10_EXCLUSIVE_MODE (1<<20)
+#define SWF10_OVERLAY_EN (1<<19)
+#define SWF10_PLANEB_HOLDOFF (1<<18)
+#define SWF10_PLANEA_HOLDOFF (1<<17)
+#define SWF10_VGA_HOLDOFF (1<<16)
+#define SWF10_ACTIVE_DISP_MASK 0xffff
+#define SWF10_PIPEB_LFP2 (1<<15)
+#define SWF10_PIPEB_EFP2 (1<<14)
+#define SWF10_PIPEB_TV2 (1<<13)
+#define SWF10_PIPEB_CRT2 (1<<12)
+#define SWF10_PIPEB_LFP (1<<11)
+#define SWF10_PIPEB_EFP (1<<10)
+#define SWF10_PIPEB_TV (1<<9)
+#define SWF10_PIPEB_CRT (1<<8)
+#define SWF10_PIPEA_LFP2 (1<<7)
+#define SWF10_PIPEA_EFP2 (1<<6)
+#define SWF10_PIPEA_TV2 (1<<5)
+#define SWF10_PIPEA_CRT2 (1<<4)
+#define SWF10_PIPEA_LFP (1<<3)
+#define SWF10_PIPEA_EFP (1<<2)
+#define SWF10_PIPEA_TV (1<<1)
+#define SWF10_PIPEA_CRT (1<<0)
+
+#define SWF11_MEMORY_SIZE_SHIFT 16
+#define SWF11_SV_TEST_EN (1<<15)
+#define SWF11_IS_AGP (1<<14)
+#define SWF11_DISPLAY_HOLDOFF (1<<13)
+#define SWF11_DPMS_REDUCED (1<<12)
+#define SWF11_IS_VBE_MODE (1<<11)
+#define SWF11_PIPEB_ACCESS (1<<10) /* 0 here means pipe a */
+#define SWF11_DPMS_MASK 0x07
+#define SWF11_DPMS_OFF (1<<2)
+#define SWF11_DPMS_SUSPEND (1<<1)
+#define SWF11_DPMS_STANDBY (1<<0)
+#define SWF11_DPMS_ON 0
+
+#define SWF14_GFX_PFIT_EN (1<<31)
+#define SWF14_TEXT_PFIT_EN (1<<30)
+#define SWF14_LID_STATUS_CLOSED (1<<29) /* 0 here means open */
+#define SWF14_POPUP_EN (1<<28)
+#define SWF14_DISPLAY_HOLDOFF (1<<27)
+#define SWF14_DISP_DETECT_EN (1<<26)
+#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
+#define SWF14_DRIVER_STATUS (1<<24)
+#define SWF14_OS_TYPE_WIN9X (1<<23)
+#define SWF14_OS_TYPE_WINNT (1<<22)
+/* 21:19 rsvd */
+#define SWF14_PM_TYPE_MASK 0x00070000
+#define SWF14_PM_ACPI_VIDEO (0x4 << 16)
+#define SWF14_PM_ACPI (0x3 << 16)
+#define SWF14_PM_APM_12 (0x2 << 16)
+#define SWF14_PM_APM_11 (0x1 << 16)
+#define SWF14_HK_REQUEST_MASK 0x0000ffff /* see GR18 6:3 for event type */
+ /* if GR18 indicates a display switch */
+#define SWF14_DS_PIPEB_LFP2_EN (1<<15)
+#define SWF14_DS_PIPEB_EFP2_EN (1<<14)
+#define SWF14_DS_PIPEB_TV2_EN (1<<13)
+#define SWF14_DS_PIPEB_CRT2_EN (1<<12)
+#define SWF14_DS_PIPEB_LFP_EN (1<<11)
+#define SWF14_DS_PIPEB_EFP_EN (1<<10)
+#define SWF14_DS_PIPEB_TV_EN (1<<9)
+#define SWF14_DS_PIPEB_CRT_EN (1<<8)
+#define SWF14_DS_PIPEA_LFP2_EN (1<<7)
+#define SWF14_DS_PIPEA_EFP2_EN (1<<6)
+#define SWF14_DS_PIPEA_TV2_EN (1<<5)
+#define SWF14_DS_PIPEA_CRT2_EN (1<<4)
+#define SWF14_DS_PIPEA_LFP_EN (1<<3)
+#define SWF14_DS_PIPEA_EFP_EN (1<<2)
+#define SWF14_DS_PIPEA_TV_EN (1<<1)
+#define SWF14_DS_PIPEA_CRT_EN (1<<0)
+ /* if GR18 indicates a panel fitting request */
+#define SWF14_PFIT_EN (1<<0) /* 0 means disable */
+ /* if GR18 indicates an APM change request */
+#define SWF14_APM_HIBERNATE 0x4
+#define SWF14_APM_SUSPEND 0x3
+#define SWF14_APM_STANDBY 0x1
+#define SWF14_APM_RESTORE 0x0
+
+/* Add the device class for LFP, TV, HDMI */
+#define DEVICE_TYPE_INT_LFP 0x1022
+#define DEVICE_TYPE_INT_TV 0x1009
+#define DEVICE_TYPE_HDMI 0x60D2
+#define DEVICE_TYPE_DP 0x68C6
+#define DEVICE_TYPE_eDP 0x78C6
+
+/* define the DVO port for HDMI output type */
+#define DVO_B 1
+#define DVO_C 2
+#define DVO_D 3
+
+/* define the PORT for DP output type */
+#define PORT_IDPB 7
+#define PORT_IDPC 8
+#define PORT_IDPD 9
+
+#endif /* _I830_BIOS_H_ */
diff --git a/sys/dev/pci/drm/i915/intel_crt.c b/sys/dev/pci/drm/i915/intel_crt.c
new file mode 100644
index 00000000000..e12568c5657
--- /dev/null
+++ b/sys/dev/pci/drm/i915/intel_crt.c
@@ -0,0 +1,887 @@
+/* $OpenBSD: intel_crt.c,v 1.1 2013/03/18 12:36:52 jsg Exp $ */
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#include <dev/pci/drm/drmP.h>
+#include <dev/pci/drm/drm_crtc.h>
+#include <dev/pci/drm/drm_crtc_helper.h>
+#include <dev/pci/drm/drm_edid.h>
+#include "intel_drv.h"
+#include <dev/pci/drm/i915_drm.h>
+#include "i915_drv.h"
+
+/* Here's the desired hotplug mode */
+#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \
+ ADPA_CRT_HOTPLUG_WARMUP_10MS | \
+ ADPA_CRT_HOTPLUG_SAMPLE_4S | \
+ ADPA_CRT_HOTPLUG_VOLTAGE_50 | \
+ ADPA_CRT_HOTPLUG_VOLREF_325MV | \
+ ADPA_CRT_HOTPLUG_ENABLE)
+
+struct intel_crt {
+ struct intel_encoder base;
+ bool force_hotplug_required;
+ u32 adpa_reg;
+};
+
+struct intel_crt *intel_attached_crt(struct drm_connector *);
+void intel_crt_dpms(struct drm_connector *, int);
+int intel_crt_mode_valid(struct drm_connector *,
+ struct drm_display_mode *);
+bool intel_crt_mode_fixup(struct drm_encoder *, const struct drm_display_mode *,
+ struct drm_display_mode *);
+void intel_crt_mode_set(struct drm_encoder *, struct drm_display_mode *,
+ struct drm_display_mode *);
+bool intel_ironlake_crt_detect_hotplug(struct drm_connector *);
+bool intel_crt_detect_hotplug(struct drm_connector *);
+bool intel_crt_detect_ddc(struct drm_connector *);
+enum drm_connector_status intel_crt_load_detect(struct intel_crt *);
+enum drm_connector_status intel_crt_detect(struct drm_connector *, bool);
+void intel_crt_destroy(struct drm_connector *);
+int intel_crt_get_modes(struct drm_connector *);
+int intel_crt_set_property(struct drm_connector *, struct drm_property *,
+ uint64_t);
+void intel_crt_reset(struct drm_connector *);
+int intel_no_crt_dmi_callback(const struct dmi_system_id *);
+struct intel_crt *
+ intel_encoder_to_crt(struct intel_encoder *);
+bool intel_crt_get_hw_state(struct intel_encoder *, enum pipe *);
+void intel_disable_crt(struct intel_encoder *);
+void intel_enable_crt(struct intel_encoder *);
+void intel_crt_set_dpms(struct intel_encoder *, int);
+bool valleyview_crt_detect_hotplug(struct drm_connector *);
+struct edid *
+ intel_crt_get_edid(struct drm_connector *, struct i2c_controller *);
+int intel_crt_ddc_get_modes(struct drm_connector *,
+ struct i2c_controller *);
+
+struct intel_crt *
+intel_attached_crt(struct drm_connector *connector)
+{
+ return container_of(intel_attached_encoder(connector),
+ struct intel_crt, base);
+}
+
+struct intel_crt *
+intel_encoder_to_crt(struct intel_encoder *encoder)
+{
+ return container_of(encoder, struct intel_crt, base);
+}
+
+bool
+intel_crt_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
+ u32 tmp;
+
+ tmp = I915_READ(crt->adpa_reg);
+
+ if (!(tmp & ADPA_DAC_ENABLE))
+ return false;
+
+ if (HAS_PCH_CPT(dev))
+ *pipe = PORT_TO_PIPE_CPT(tmp);
+ else
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
+void
+intel_disable_crt(struct intel_encoder *encoder)
+{
+ struct inteldrm_softc *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
+ u32 temp;
+
+ temp = I915_READ(crt->adpa_reg);
+ temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
+ temp &= ~ADPA_DAC_ENABLE;
+ I915_WRITE(crt->adpa_reg, temp);
+}
+
+void
+intel_enable_crt(struct intel_encoder *encoder)
+{
+ struct inteldrm_softc *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
+ u32 temp;
+
+ temp = I915_READ(crt->adpa_reg);
+ temp |= ADPA_DAC_ENABLE;
+ I915_WRITE(crt->adpa_reg, temp);
+}
+
+/* Note: The caller is required to filter out dpms modes not supported by the
+ * platform. */
+void
+intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
+ u32 temp;
+
+ temp = I915_READ(crt->adpa_reg);
+ temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
+ temp &= ~ADPA_DAC_ENABLE;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ temp |= ADPA_DAC_ENABLE;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ temp |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ temp |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
+ break;
+ case DRM_MODE_DPMS_OFF:
+ temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
+ break;
+ }
+
+ I915_WRITE(crt->adpa_reg, temp);
+}
+
+void
+intel_crt_dpms(struct drm_connector *connector, int mode)
+{
+ struct drm_device *dev = connector->dev;
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+ struct drm_crtc *crtc;
+ int old_dpms;
+
+ /* PCH platforms and VLV only support on/off. */
+ if (INTEL_INFO(dev)->gen >= 5 && mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
+ if (mode == connector->dpms)
+ return;
+
+ old_dpms = connector->dpms;
+ connector->dpms = mode;
+
+ /* Only need to change hw state when actually enabled */
+ crtc = encoder->base.crtc;
+ if (!crtc) {
+ encoder->connectors_active = false;
+ return;
+ }
+
+ /* We need the pipe to run for anything but OFF. */
+ if (mode == DRM_MODE_DPMS_OFF)
+ encoder->connectors_active = false;
+ else
+ encoder->connectors_active = true;
+
+ if (mode < old_dpms) {
+ /* From off to on, enable the pipe first. */
+ intel_crtc_update_dpms(crtc);
+
+ intel_crt_set_dpms(encoder, mode);
+ } else {
+ intel_crt_set_dpms(encoder, mode);
+
+ intel_crtc_update_dpms(crtc);
+ }
+
+ intel_modeset_check_state(connector->dev);
+}
+
+int
+intel_crt_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = connector->dev;
+
+ int max_clock = 0;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ if (mode->clock < 25000)
+ return MODE_CLOCK_LOW;
+
+ if (IS_GEN2(dev))
+ max_clock = 350000;
+ else
+ max_clock = 400000;
+ if (mode->clock > max_clock)
+ return MODE_CLOCK_HIGH;
+
+ /* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
+ if (HAS_PCH_LPT(dev) &&
+ (ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+bool
+intel_crt_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+void
+intel_crt_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+
+ struct drm_device *dev = encoder->dev;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crt *crt =
+ intel_encoder_to_crt(to_intel_encoder(encoder));
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 adpa;
+
+ if (HAS_PCH_SPLIT(dev))
+ adpa = ADPA_HOTPLUG_BITS;
+ else
+ adpa = 0;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ adpa |= ADPA_HSYNC_ACTIVE_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ adpa |= ADPA_VSYNC_ACTIVE_HIGH;
+
+ /* For CPT allow 3 pipe config, for others just use A or B */
+ if (HAS_PCH_LPT(dev))
+ ; /* Those bits don't exist here */
+ else if (HAS_PCH_CPT(dev))
+ adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
+ else if (intel_crtc->pipe == 0)
+ adpa |= ADPA_PIPE_A_SELECT;
+ else
+ adpa |= ADPA_PIPE_B_SELECT;
+
+ if (!HAS_PCH_SPLIT(dev))
+ I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
+
+ I915_WRITE(crt->adpa_reg, adpa);
+}
+
+bool
+intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct intel_crt *crt = intel_attached_crt(connector);
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 adpa;
+ bool ret;
+ int retries;
+
+ /* The first time through, trigger an explicit detection cycle */
+ if (crt->force_hotplug_required) {
+ bool turn_off_dac = HAS_PCH_SPLIT(dev);
+ u32 save_adpa;
+
+ crt->force_hotplug_required = 0;
+
+ save_adpa = adpa = I915_READ(PCH_ADPA);
+ DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
+
+ adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
+ if (turn_off_dac)
+ adpa &= ~ADPA_DAC_ENABLE;
+
+ I915_WRITE(PCH_ADPA, adpa);
+
+ for (retries = 1000; retries > 0; retries--) {
+ if ((I915_READ(PCH_ADPA) &
+ ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0)
+ break;
+ DELAY(1000);
+ }
+ if (retries == 0)
+ DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
+
+ if (turn_off_dac) {
+ I915_WRITE(PCH_ADPA, save_adpa);
+ POSTING_READ(PCH_ADPA);
+ }
+ }
+
+ /* Check the status to see if both blue and green are on now */
+ adpa = I915_READ(PCH_ADPA);
+ if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
+ ret = true;
+ else
+ ret = false;
+ DRM_DEBUG_KMS("ironlake hotplug adpa=0x%x, result %d\n", adpa, ret);
+
+ return ret;
+}
+
+bool
+valleyview_crt_detect_hotplug(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 adpa;
+ bool ret;
+ u32 save_adpa;
+ int retries;
+
+ save_adpa = adpa = I915_READ(ADPA);
+ DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
+
+ adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
+
+ I915_WRITE(ADPA, adpa);
+
+
+ for (retries = 1000; retries > 0; retries--) {
+ if ((I915_READ(PCH_ADPA) &
+ ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0)
+ break;
+ DELAY(1000);
+ }
+ if (retries == 0) {
+ DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
+ I915_WRITE(ADPA, save_adpa);
+ }
+
+ /* Check the status to see if both blue and green are on now */
+ adpa = I915_READ(ADPA);
+ if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
+ ret = true;
+ else
+ ret = false;
+
+ DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
+
+ /* FIXME: debug force function and remove */
+ ret = true;
+
+ return ret;
+}
+
+/**
+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
+ *
+ * Not for i915G/i915GM
+ *
+ * \return true if CRT is connected.
+ * \return false if CRT is disconnected.
+ */
+bool
+intel_crt_detect_hotplug(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 hotplug_en, orig, stat;
+ bool ret = false;
+ int i, tries = 0, retries;
+
+ if (HAS_PCH_SPLIT(dev))
+ return intel_ironlake_crt_detect_hotplug(connector);
+
+ if (IS_VALLEYVIEW(dev))
+ return valleyview_crt_detect_hotplug(connector);
+
+ /*
+ * On 4 series desktop, CRT detect sequence need to be done twice
+ * to get a reliable result.
+ */
+
+ if (IS_G4X(dev) && !IS_GM45(dev))
+ tries = 2;
+ else
+ tries = 1;
+ hotplug_en = orig = I915_READ(PORT_HOTPLUG_EN);
+ hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
+
+ for (i = 0; i < tries ; i++) {
+ /* turn on the FORCE_DETECT */
+ I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+ /* wait for FORCE_DETECT to go off */
+ for (retries = 1000; retries > 0; retries--) {
+ if ((I915_READ(PORT_HOTPLUG_EN) &
+ CRT_HOTPLUG_FORCE_DETECT) == 0)
+ break;
+ DELAY(1000);
+ }
+ if (retries == 0)
+ DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
+ }
+
+ stat = I915_READ(PORT_HOTPLUG_STAT);
+ if ((stat & CRT_HOTPLUG_MONITOR_MASK) != CRT_HOTPLUG_MONITOR_NONE)
+ ret = true;
+
+ /* clear the interrupt we just generated, if any */
+ I915_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS);
+
+ /* and put the bits back */
+ I915_WRITE(PORT_HOTPLUG_EN, orig);
+
+ return ret;
+}
+
+struct edid *
+intel_crt_get_edid(struct drm_connector *connector,
+ struct i2c_controller *i2c)
+{
+ struct edid *edid;
+
+ edid = drm_get_edid(connector, i2c);
+
+#ifdef notyet
+ if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
+ DRM_DEBUG_KMS("CRT GMBUS EDID read failed, retry using GPIO bit-banging\n");
+ intel_gmbus_force_bit(i2c, true);
+ edid = drm_get_edid(connector, i2c);
+ intel_gmbus_force_bit(i2c, false);
+ }
+#endif
+
+ return edid;
+}
+
+/* local version of intel_ddc_get_modes() to use intel_crt_get_edid() */
+int
+intel_crt_ddc_get_modes(struct drm_connector *connector,
+ struct i2c_controller *adapter)
+{
+ struct edid *edid;
+ int ret;
+
+ edid = intel_crt_get_edid(connector, adapter);
+ if (!edid)
+ return 0;
+
+ ret = intel_connector_update_modes(connector, edid);
+ free(edid, M_DRM);
+
+ return ret;
+}
+
+bool
+intel_crt_detect_ddc(struct drm_connector *connector)
+{
+ struct intel_crt *crt = intel_attached_crt(connector);
+ struct inteldrm_softc *dev_priv = crt->base.base.dev->dev_private;
+ struct i2c_controller *i2c;
+ struct edid *edid;
+
+ BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
+
+ i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
+ edid = intel_crt_get_edid(connector, i2c);
+
+ if (edid) {
+ bool is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
+ free(edid, M_DRM);
+
+ /*
+ * This may be a DVI-I connector with a shared DDC
+ * link between analog and digital outputs, so we
+ * have to check the EDID input spec of the attached device.
+ */
+ if (!is_digital) {
+ DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
+ return true;
+ }
+
+ DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
+ } else {
+ DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
+ }
+
+ return false;
+}
+
+enum drm_connector_status
+intel_crt_load_detect(struct intel_crt *crt)
+{
+ struct drm_device *dev = crt->base.base.dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ uint32_t pipe = to_intel_crtc(crt->base.base.crtc)->pipe;
+ uint32_t save_bclrpat;
+ uint32_t save_vtotal;
+ uint32_t vtotal, vactive;
+ uint32_t vsample;
+ uint32_t vblank, vblank_start, vblank_end;
+ uint32_t dsl;
+ uint32_t bclrpat_reg;
+ uint32_t vtotal_reg;
+ uint32_t vblank_reg;
+ uint32_t vsync_reg;
+ uint32_t pipeconf_reg;
+ uint32_t pipe_dsl_reg;
+ uint8_t st00;
+ enum drm_connector_status status;
+
+ DRM_DEBUG_KMS("starting load-detect on CRT\n");
+
+ bclrpat_reg = BCLRPAT(pipe);
+ vtotal_reg = VTOTAL(pipe);
+ vblank_reg = VBLANK(pipe);
+ vsync_reg = VSYNC(pipe);
+ pipeconf_reg = PIPECONF(pipe);
+ pipe_dsl_reg = PIPEDSL(pipe);
+
+ save_bclrpat = I915_READ(bclrpat_reg);
+ save_vtotal = I915_READ(vtotal_reg);
+ vblank = I915_READ(vblank_reg);
+
+ vtotal = ((save_vtotal >> 16) & 0xfff) + 1;
+ vactive = (save_vtotal & 0x7ff) + 1;
+
+ vblank_start = (vblank & 0xfff) + 1;
+ vblank_end = ((vblank >> 16) & 0xfff) + 1;
+
+ /* Set the border color to purple. */
+ I915_WRITE(bclrpat_reg, 0x500050);
+
+ if (!IS_GEN2(dev)) {
+ uint32_t pipeconf = I915_READ(pipeconf_reg);
+ I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
+ POSTING_READ(pipeconf_reg);
+ /* Wait for next Vblank to substitue
+ * border color for Color info */
+ intel_wait_for_vblank(dev, pipe);
+ st00 = I915_READ8(VGA_MSR_WRITE);
+ status = ((st00 & (1 << 4)) != 0) ?
+ connector_status_connected :
+ connector_status_disconnected;
+
+ I915_WRITE(pipeconf_reg, pipeconf);
+ } else {
+ bool restore_vblank = false;
+ int count, detect;
+
+ /*
+ * If there isn't any border, add some.
+ * Yes, this will flicker
+ */
+ if (vblank_start <= vactive && vblank_end >= vtotal) {
+ uint32_t vsync = I915_READ(vsync_reg);
+ uint32_t vsync_start = (vsync & 0xffff) + 1;
+
+ vblank_start = vsync_start;
+ I915_WRITE(vblank_reg,
+ (vblank_start - 1) |
+ ((vblank_end - 1) << 16));
+ restore_vblank = true;
+ }
+ /* sample in the vertical border, selecting the larger one */
+ if (vblank_start - vactive >= vtotal - vblank_end)
+ vsample = (vblank_start + vactive) >> 1;
+ else
+ vsample = (vtotal + vblank_end) >> 1;
+
+ /*
+ * Wait for the border to be displayed
+ */
+ while (I915_READ(pipe_dsl_reg) >= vactive)
+ ;
+ while ((dsl = I915_READ(pipe_dsl_reg)) <= vsample)
+ ;
+ /*
+ * Watch ST00 for an entire scanline
+ */
+ detect = 0;
+ count = 0;
+ do {
+ count++;
+ /* Read the ST00 VGA status register */
+ st00 = I915_READ8(VGA_MSR_WRITE);
+ if (st00 & (1 << 4))
+ detect++;
+ } while ((I915_READ(pipe_dsl_reg) == dsl));
+
+ /* restore vblank if necessary */
+ if (restore_vblank)
+ I915_WRITE(vblank_reg, vblank);
+ /*
+ * If more than 3/4 of the scanline detected a monitor,
+ * then it is assumed to be present. This works even on i830,
+ * where there isn't any way to force the border color across
+ * the screen
+ */
+ status = detect * 4 > count * 3 ?
+ connector_status_connected :
+ connector_status_disconnected;
+ }
+
+ /* Restore previous settings */
+ I915_WRITE(bclrpat_reg, save_bclrpat);
+
+ return status;
+}
+
+enum drm_connector_status
+intel_crt_detect(struct drm_connector *connector, bool force)
+{
+ struct drm_device *dev = connector->dev;
+ struct intel_crt *crt = intel_attached_crt(connector);
+ enum drm_connector_status status;
+ struct intel_load_detect_pipe tmp;
+
+ if (I915_HAS_HOTPLUG(dev)) {
+ /* We can not rely on the HPD pin always being correctly wired
+ * up, for example many KVM do not pass it through, and so
+ * only trust an assertion that the monitor is connected.
+ */
+ if (intel_crt_detect_hotplug(connector)) {
+ DRM_DEBUG_KMS("CRT detected via hotplug\n");
+ return connector_status_connected;
+ } else
+ DRM_DEBUG_KMS("CRT not detected via hotplug\n");
+ }
+
+ if (intel_crt_detect_ddc(connector))
+ return connector_status_connected;
+
+ /* Load detection is broken on HPD capable machines. Whoever wants a
+ * broken monitor (without edid) to work behind a broken kvm (that fails
+ * to have the right resistors for HP detection) needs to fix this up.
+ * For now just bail out. */
+ if (I915_HAS_HOTPLUG(dev))
+ return connector_status_disconnected;
+
+ if (!force)
+ return connector->status;
+
+ /* for pre-945g platforms use load detect */
+ if (intel_get_load_detect_pipe(connector, NULL, &tmp)) {
+ if (intel_crt_detect_ddc(connector))
+ status = connector_status_connected;
+ else
+ status = intel_crt_load_detect(crt);
+ intel_release_load_detect_pipe(connector, &tmp);
+ } else
+ status = connector_status_unknown;
+
+ return status;
+}
+
+void
+intel_crt_destroy(struct drm_connector *connector)
+{
+#if 0
+ drm_sysfs_connector_remove(connector);
+#endif
+ drm_connector_cleanup(connector);
+ free(connector, M_DRM);
+}
+
+int
+intel_crt_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ int ret;
+ struct i2c_controller *i2c;
+
+ i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
+ ret = intel_crt_ddc_get_modes(connector, i2c);
+ if (ret || !IS_G4X(dev))
+ return ret;
+
+ /* Try to probe digital port for output in DVI-I -> VGA mode. */
+ i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
+ return intel_crt_ddc_get_modes(connector, i2c);
+}
+
+int
+intel_crt_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t value)
+{
+ return 0;
+}
+
+void
+intel_crt_reset(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crt *crt = intel_attached_crt(connector);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ u32 adpa;
+
+ adpa = I915_READ(PCH_ADPA);
+ adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+ adpa |= ADPA_HOTPLUG_BITS;
+ I915_WRITE(PCH_ADPA, adpa);
+ POSTING_READ(PCH_ADPA);
+
+ DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
+ crt->force_hotplug_required = 1;
+ }
+
+}
+
+/*
+ * Routines for controlling stuff on the analog port
+ */
+
+static const struct drm_encoder_helper_funcs crt_encoder_funcs = {
+ .mode_fixup = intel_crt_mode_fixup,
+ .mode_set = intel_crt_mode_set,
+ .disable = intel_encoder_noop,
+};
+
+static const struct drm_connector_funcs intel_crt_connector_funcs = {
+ .reset = intel_crt_reset,
+ .dpms = intel_crt_dpms,
+ .detect = intel_crt_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = intel_crt_destroy,
+ .set_property = intel_crt_set_property,
+};
+
+static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
+ .mode_valid = intel_crt_mode_valid,
+ .get_modes = intel_crt_get_modes,
+ .best_encoder = intel_best_encoder,
+};
+
+static const struct drm_encoder_funcs intel_crt_enc_funcs = {
+ .destroy = intel_encoder_destroy,
+};
+
+int
+intel_no_crt_dmi_callback(const struct dmi_system_id *id)
+{
+ printf("Skipping CRT initialization for %s\n", id->ident);
+ return 1;
+}
+
+static const struct dmi_system_id intel_no_crt[] = {
+ {
+ .callback = intel_no_crt_dmi_callback,
+ .ident = "ACER ZGB",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ACER"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
+ },
+ },
+ { }
+};
+
+void
+intel_crt_init(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ struct intel_crt *crt;
+ struct intel_connector *intel_connector;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ /* Skip machines without VGA that falsely report hotplug events */
+ if (dmi_check_system(intel_no_crt))
+ return;
+
+ crt = malloc(sizeof(struct intel_crt), M_DRM, M_WAITOK | M_ZERO);
+ if (!crt)
+ return;
+
+ intel_connector = malloc(sizeof(struct intel_connector), M_DRM,
+ M_WAITOK | M_ZERO);
+ if (!intel_connector) {
+ free(crt, M_DRM);
+ return;
+ }
+
+ connector = &intel_connector->base;
+ drm_connector_init(dev, &intel_connector->base,
+ &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+ drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs,
+ DRM_MODE_ENCODER_DAC);
+
+ intel_connector_attach_encoder(intel_connector, &crt->base);
+
+ crt->base.type = INTEL_OUTPUT_ANALOG;
+ crt->base.cloneable = true;
+ if (IS_I830(dev))
+ crt->base.crtc_mask = (1 << 0);
+ else
+ crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+
+ if (IS_GEN2(dev))
+ connector->interlace_allowed = 0;
+ else
+ connector->interlace_allowed = 1;
+ connector->doublescan_allowed = 0;
+
+ if (HAS_PCH_SPLIT(dev))
+ crt->adpa_reg = PCH_ADPA;
+ else if (IS_VALLEYVIEW(dev))
+ crt->adpa_reg = VLV_ADPA;
+ else
+ crt->adpa_reg = ADPA;
+
+ crt->base.disable = intel_disable_crt;
+ crt->base.enable = intel_enable_crt;
+ if (IS_HASWELL(dev))
+ crt->base.get_hw_state = intel_ddi_get_hw_state;
+ else
+ crt->base.get_hw_state = intel_crt_get_hw_state;
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+ drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs);
+ drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
+
+#if 0
+ drm_sysfs_connector_add(connector);
+#endif
+
+ if (I915_HAS_HOTPLUG(dev))
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ else
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+
+ /*
+ * Configure the automatic hotplug detection stuff
+ */
+ crt->force_hotplug_required = 0;
+
+ dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
+
+ /*
+ * TODO: find a proper way to discover whether we need to set the the
+ * polarity and link reversal bits or not, instead of relying on the
+ * BIOS.
+ */
+ if (HAS_PCH_LPT(dev)) {
+ u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
+ FDI_RX_LINK_REVERSAL_OVERRIDE;
+
+ dev_priv->fdi_rx_config = I915_READ(_FDI_RXA_CTL) & fdi_config;
+ }
+}
diff --git a/sys/dev/pci/drm/i915/intel_ddi.c b/sys/dev/pci/drm/i915/intel_ddi.c
new file mode 100644
index 00000000000..6537f2e671c
--- /dev/null
+++ b/sys/dev/pci/drm/i915/intel_ddi.c
@@ -0,0 +1,1582 @@
+/* $OpenBSD: intel_ddi.c,v 1.1 2013/03/18 12:36:52 jsg Exp $ */
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eugeni Dodonov <eugeni.dodonov@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+/* HDMI/DVI modes ignore everything but the last 2 items. So we share
+ * them for both DP and FDI transports, allowing those ports to
+ * automatically adapt to HDMI connections as well
+ */
+static const u32 hsw_ddi_translations_dp[] = {
+ 0x00FFFFFF, 0x0006000E, /* DP parameters */
+ 0x00D75FFF, 0x0005000A,
+ 0x00C30FFF, 0x00040006,
+ 0x80AAAFFF, 0x000B0000,
+ 0x00FFFFFF, 0x0005000A,
+ 0x00D75FFF, 0x000C0004,
+ 0x80C30FFF, 0x000B0000,
+ 0x00FFFFFF, 0x00040006,
+ 0x80D75FFF, 0x000B0000,
+ 0x00FFFFFF, 0x00040006 /* HDMI parameters */
+};
+
+static const u32 hsw_ddi_translations_fdi[] = {
+ 0x00FFFFFF, 0x0007000E, /* FDI parameters */
+ 0x00D75FFF, 0x000F000A,
+ 0x00C30FFF, 0x00060006,
+ 0x00AAAFFF, 0x001E0000,
+ 0x00FFFFFF, 0x000F000A,
+ 0x00D75FFF, 0x00160004,
+ 0x00C30FFF, 0x001E0000,
+ 0x00FFFFFF, 0x00060006,
+ 0x00D75FFF, 0x001E0000,
+ 0x00FFFFFF, 0x00040006 /* HDMI parameters */
+};
+
+enum port
+ intel_ddi_get_encoder_port(struct intel_encoder *);
+void intel_prepare_ddi_buffers(struct drm_device *, enum port, bool);
+void intel_wait_ddi_buf_idle(struct inteldrm_softc *, enum port);
+void intel_ddi_mode_set(struct drm_encoder *, struct drm_display_mode *,
+ struct drm_display_mode *);
+struct intel_encoder *
+ intel_ddi_get_crtc_encoder(struct drm_crtc *);
+void intel_ddi_calculate_wrpll(int, int *, int *, int *);
+uint32_t intel_ddi_get_crtc_pll(struct inteldrm_softc *, enum pipe);
+void intel_ddi_pre_enable(struct intel_encoder *);
+void intel_ddi_post_disable(struct intel_encoder *);
+void intel_enable_ddi(struct intel_encoder *);
+void intel_disable_ddi(struct intel_encoder *);
+void intel_ddi_hot_plug(struct intel_encoder *);
+void intel_ddi_destroy(struct drm_encoder *);
+bool intel_ddi_mode_fixup(struct drm_encoder *,
+ const struct drm_display_mode *, struct drm_display_mode *);
+
+enum port
+intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ int type = intel_encoder->type;
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP ||
+ type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) {
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(encoder);
+ return intel_dig_port->port;
+
+ } else if (type == INTEL_OUTPUT_ANALOG) {
+ return PORT_E;
+
+ } else {
+ panic("Invalid DDI encoder type %d\n", type);
+ }
+}
+
+/* On Haswell, DDI port buffers must be programmed with correct values
+ * in advance. The buffer values are different for FDI and DP modes,
+ * but the HDMI/DVI fields are shared among those. So we program the DDI
+ * in either FDI or DP modes only, as HDMI connections will work with both
+ * of those
+ */
+void
+intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, bool use_fdi_mode)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 reg;
+ int i;
+ const u32 *ddi_translations = ((use_fdi_mode) ?
+ hsw_ddi_translations_fdi :
+ hsw_ddi_translations_dp);
+
+ DRM_DEBUG_DRIVER("Initializing DDI buffers for port %c in %s mode\n",
+ port_name(port),
+ use_fdi_mode ? "FDI" : "DP");
+
+ WARN((use_fdi_mode && (port != PORT_E)),
+ "Programming port %c in FDI mode, this probably will not work.\n",
+ port_name(port));
+
+ for (i=0, reg=DDI_BUF_TRANS(port); i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
+ I915_WRITE(reg, ddi_translations[i]);
+ reg += 4;
+ }
+}
+
+/* Program DDI buffers translations for DP. By default, program ports A-D in DP
+ * mode and port E for FDI.
+ */
+void
+intel_prepare_ddi(struct drm_device *dev)
+{
+ int port;
+
+ if (IS_HASWELL(dev)) {
+ for (port = PORT_A; port < PORT_E; port++)
+ intel_prepare_ddi_buffers(dev, port, false);
+
+ /* DDI E is the suggested one to work in FDI mode, so program is as such by
+ * default. It will have to be re-programmed in case a digital DP output
+ * will be detected on it
+ */
+ intel_prepare_ddi_buffers(dev, PORT_E, true);
+ }
+}
+
+static const long hsw_ddi_buf_ctl_values[] = {
+ DDI_BUF_EMP_400MV_0DB_HSW,
+ DDI_BUF_EMP_400MV_3_5DB_HSW,
+ DDI_BUF_EMP_400MV_6DB_HSW,
+ DDI_BUF_EMP_400MV_9_5DB_HSW,
+ DDI_BUF_EMP_600MV_0DB_HSW,
+ DDI_BUF_EMP_600MV_3_5DB_HSW,
+ DDI_BUF_EMP_600MV_6DB_HSW,
+ DDI_BUF_EMP_800MV_0DB_HSW,
+ DDI_BUF_EMP_800MV_3_5DB_HSW
+};
+
+void
+intel_wait_ddi_buf_idle(struct inteldrm_softc *dev_priv,
+ enum port port)
+{
+ uint32_t reg = DDI_BUF_CTL(port);
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ DELAY(1);
+ if (I915_READ(reg) & DDI_BUF_IS_IDLE)
+ return;
+ }
+ DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
+}
+
+/* Starting with Haswell, different DDI ports can work in FDI mode for
+ * connection to the PCH-located connectors. For this, it is necessary to train
+ * both the DDI port and PCH receiver for the desired DDI buffer settings.
+ *
+ * The recommended port to work in FDI mode is DDI E, which we use here. Also,
+ * please note that when FDI mode is active on DDI E, it shares 2 lines with
+ * DDI A (which is used for eDP)
+ */
+
+void
+hsw_fdi_link_train(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ u32 temp, i, rx_ctl_val;
+
+ /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
+ * mode set "sequence for CRT port" document:
+ * - TP1 to TP2 time with the default value
+ * - FDI delay to 90h
+ */
+ I915_WRITE(_FDI_RXA_MISC, FDI_RX_PWRDN_LANE1_VAL(2) |
+ FDI_RX_PWRDN_LANE0_VAL(2) |
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+ /* Enable the PCH Receiver FDI PLL */
+ rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
+ FDI_RX_PLL_ENABLE | ((intel_crtc->fdi_lanes - 1) << 19);
+ I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+ POSTING_READ(_FDI_RXA_CTL);
+ DELAY(220);
+
+ /* Switch from Rawclk to PCDclk */
+ rx_ctl_val |= FDI_PCDCLK;
+ I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+
+ /* Configure Port Clock Select */
+ I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->ddi_pll_sel);
+
+ /* Start the training iterating through available voltages and emphasis,
+ * testing each value twice. */
+ for (i = 0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values) * 2; i++) {
+ /* Configure DP_TP_CTL with auto-training */
+ I915_WRITE(DP_TP_CTL(PORT_E),
+ DP_TP_CTL_FDI_AUTOTRAIN |
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+ DP_TP_CTL_LINK_TRAIN_PAT1 |
+ DP_TP_CTL_ENABLE);
+
+ /* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
+ * DDI E does not support port reversal, the functionality is
+ * achieved on the PCH side in FDI_RX_CTL, so no need to set the
+ * port reversal bit */
+ I915_WRITE(DDI_BUF_CTL(PORT_E),
+ DDI_BUF_CTL_ENABLE |
+ ((intel_crtc->fdi_lanes - 1) << 1) |
+ hsw_ddi_buf_ctl_values[i / 2]);
+ POSTING_READ(DDI_BUF_CTL(PORT_E));
+
+ DELAY(600);
+
+ /* Program PCH FDI Receiver TU */
+ I915_WRITE(_FDI_RXA_TUSIZE1, TU_SIZE(64));
+
+ /* Enable PCH FDI Receiver with auto-training */
+ rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
+ I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+ POSTING_READ(_FDI_RXA_CTL);
+
+ /* Wait for FDI receiver lane calibration */
+ DELAY(30);
+
+ /* Unset FDI_RX_MISC pwrdn lanes */
+ temp = I915_READ(_FDI_RXA_MISC);
+ temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+ I915_WRITE(_FDI_RXA_MISC, temp);
+ POSTING_READ(_FDI_RXA_MISC);
+
+ /* Wait for FDI auto training time */
+ DELAY(5);
+
+ temp = I915_READ(DP_TP_STATUS(PORT_E));
+ if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
+ DRM_DEBUG_KMS("FDI link training done on step %d\n", i);
+
+ /* Enable normal pixel sending for FDI */
+ I915_WRITE(DP_TP_CTL(PORT_E),
+ DP_TP_CTL_FDI_AUTOTRAIN |
+ DP_TP_CTL_LINK_TRAIN_NORMAL |
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+ DP_TP_CTL_ENABLE);
+
+ return;
+ }
+
+ temp = I915_READ(DDI_BUF_CTL(PORT_E));
+ temp &= ~DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(PORT_E), temp);
+ POSTING_READ(DDI_BUF_CTL(PORT_E));
+
+ /* Disable DP_TP_CTL and FDI_RX_CTL and retry */
+ temp = I915_READ(DP_TP_CTL(PORT_E));
+ temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ I915_WRITE(DP_TP_CTL(PORT_E), temp);
+ POSTING_READ(DP_TP_CTL(PORT_E));
+
+ intel_wait_ddi_buf_idle(dev_priv, PORT_E);
+
+ rx_ctl_val &= ~FDI_RX_ENABLE;
+ I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+ POSTING_READ(_FDI_RXA_CTL);
+
+ /* Reset FDI_RX_MISC pwrdn lanes */
+ temp = I915_READ(_FDI_RXA_MISC);
+ temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+ temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
+ I915_WRITE(_FDI_RXA_MISC, temp);
+ POSTING_READ(_FDI_RXA_MISC);
+ }
+
+ DRM_ERROR("FDI link training failed!\n");
+}
+
+/* WRPLL clock dividers */
+struct wrpll_tmds_clock {
+ u32 clock;
+ u16 p; /* Post divider */
+ u16 n2; /* Feedback divider */
+ u16 r2; /* Reference divider */
+};
+
+/* Table of matching values for WRPLL clocks programming for each frequency.
+ * The code assumes this table is sorted. */
+static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
+ {19750, 38, 25, 18},
+ {20000, 48, 32, 18},
+ {21000, 36, 21, 15},
+ {21912, 42, 29, 17},
+ {22000, 36, 22, 15},
+ {23000, 36, 23, 15},
+ {23500, 40, 40, 23},
+ {23750, 26, 16, 14},
+ {24000, 36, 24, 15},
+ {25000, 36, 25, 15},
+ {25175, 26, 40, 33},
+ {25200, 30, 21, 15},
+ {26000, 36, 26, 15},
+ {27000, 30, 21, 14},
+ {27027, 18, 100, 111},
+ {27500, 30, 29, 19},
+ {28000, 34, 30, 17},
+ {28320, 26, 30, 22},
+ {28322, 32, 42, 25},
+ {28750, 24, 23, 18},
+ {29000, 30, 29, 18},
+ {29750, 32, 30, 17},
+ {30000, 30, 25, 15},
+ {30750, 30, 41, 24},
+ {31000, 30, 31, 18},
+ {31500, 30, 28, 16},
+ {32000, 30, 32, 18},
+ {32500, 28, 32, 19},
+ {33000, 24, 22, 15},
+ {34000, 28, 30, 17},
+ {35000, 26, 32, 19},
+ {35500, 24, 30, 19},
+ {36000, 26, 26, 15},
+ {36750, 26, 46, 26},
+ {37000, 24, 23, 14},
+ {37762, 22, 40, 26},
+ {37800, 20, 21, 15},
+ {38000, 24, 27, 16},
+ {38250, 24, 34, 20},
+ {39000, 24, 26, 15},
+ {40000, 24, 32, 18},
+ {40500, 20, 21, 14},
+ {40541, 22, 147, 89},
+ {40750, 18, 19, 14},
+ {41000, 16, 17, 14},
+ {41500, 22, 44, 26},
+ {41540, 22, 44, 26},
+ {42000, 18, 21, 15},
+ {42500, 22, 45, 26},
+ {43000, 20, 43, 27},
+ {43163, 20, 24, 15},
+ {44000, 18, 22, 15},
+ {44900, 20, 108, 65},
+ {45000, 20, 25, 15},
+ {45250, 20, 52, 31},
+ {46000, 18, 23, 15},
+ {46750, 20, 45, 26},
+ {47000, 20, 40, 23},
+ {48000, 18, 24, 15},
+ {49000, 18, 49, 30},
+ {49500, 16, 22, 15},
+ {50000, 18, 25, 15},
+ {50500, 18, 32, 19},
+ {51000, 18, 34, 20},
+ {52000, 18, 26, 15},
+ {52406, 14, 34, 25},
+ {53000, 16, 22, 14},
+ {54000, 16, 24, 15},
+ {54054, 16, 173, 108},
+ {54500, 14, 24, 17},
+ {55000, 12, 22, 18},
+ {56000, 14, 45, 31},
+ {56250, 16, 25, 15},
+ {56750, 14, 25, 17},
+ {57000, 16, 27, 16},
+ {58000, 16, 43, 25},
+ {58250, 16, 38, 22},
+ {58750, 16, 40, 23},
+ {59000, 14, 26, 17},
+ {59341, 14, 40, 26},
+ {59400, 16, 44, 25},
+ {60000, 16, 32, 18},
+ {60500, 12, 39, 29},
+ {61000, 14, 49, 31},
+ {62000, 14, 37, 23},
+ {62250, 14, 42, 26},
+ {63000, 12, 21, 15},
+ {63500, 14, 28, 17},
+ {64000, 12, 27, 19},
+ {65000, 14, 32, 19},
+ {65250, 12, 29, 20},
+ {65500, 12, 32, 22},
+ {66000, 12, 22, 15},
+ {66667, 14, 38, 22},
+ {66750, 10, 21, 17},
+ {67000, 14, 33, 19},
+ {67750, 14, 58, 33},
+ {68000, 14, 30, 17},
+ {68179, 14, 46, 26},
+ {68250, 14, 46, 26},
+ {69000, 12, 23, 15},
+ {70000, 12, 28, 18},
+ {71000, 12, 30, 19},
+ {72000, 12, 24, 15},
+ {73000, 10, 23, 17},
+ {74000, 12, 23, 14},
+ {74176, 8, 100, 91},
+ {74250, 10, 22, 16},
+ {74481, 12, 43, 26},
+ {74500, 10, 29, 21},
+ {75000, 12, 25, 15},
+ {75250, 10, 39, 28},
+ {76000, 12, 27, 16},
+ {77000, 12, 53, 31},
+ {78000, 12, 26, 15},
+ {78750, 12, 28, 16},
+ {79000, 10, 38, 26},
+ {79500, 10, 28, 19},
+ {80000, 12, 32, 18},
+ {81000, 10, 21, 14},
+ {81081, 6, 100, 111},
+ {81624, 8, 29, 24},
+ {82000, 8, 17, 14},
+ {83000, 10, 40, 26},
+ {83950, 10, 28, 18},
+ {84000, 10, 28, 18},
+ {84750, 6, 16, 17},
+ {85000, 6, 17, 18},
+ {85250, 10, 30, 19},
+ {85750, 10, 27, 17},
+ {86000, 10, 43, 27},
+ {87000, 10, 29, 18},
+ {88000, 10, 44, 27},
+ {88500, 10, 41, 25},
+ {89000, 10, 28, 17},
+ {89012, 6, 90, 91},
+ {89100, 10, 33, 20},
+ {90000, 10, 25, 15},
+ {91000, 10, 32, 19},
+ {92000, 10, 46, 27},
+ {93000, 10, 31, 18},
+ {94000, 10, 40, 23},
+ {94500, 10, 28, 16},
+ {95000, 10, 44, 25},
+ {95654, 10, 39, 22},
+ {95750, 10, 39, 22},
+ {96000, 10, 32, 18},
+ {97000, 8, 23, 16},
+ {97750, 8, 42, 29},
+ {98000, 8, 45, 31},
+ {99000, 8, 22, 15},
+ {99750, 8, 34, 23},
+ {100000, 6, 20, 18},
+ {100500, 6, 19, 17},
+ {101000, 6, 37, 33},
+ {101250, 8, 21, 14},
+ {102000, 6, 17, 15},
+ {102250, 6, 25, 22},
+ {103000, 8, 29, 19},
+ {104000, 8, 37, 24},
+ {105000, 8, 28, 18},
+ {106000, 8, 22, 14},
+ {107000, 8, 46, 29},
+ {107214, 8, 27, 17},
+ {108000, 8, 24, 15},
+ {108108, 8, 173, 108},
+ {109000, 6, 23, 19},
+ {110000, 6, 22, 18},
+ {110013, 6, 22, 18},
+ {110250, 8, 49, 30},
+ {110500, 8, 36, 22},
+ {111000, 8, 23, 14},
+ {111264, 8, 150, 91},
+ {111375, 8, 33, 20},
+ {112000, 8, 63, 38},
+ {112500, 8, 25, 15},
+ {113100, 8, 57, 34},
+ {113309, 8, 42, 25},
+ {114000, 8, 27, 16},
+ {115000, 6, 23, 18},
+ {116000, 8, 43, 25},
+ {117000, 8, 26, 15},
+ {117500, 8, 40, 23},
+ {118000, 6, 38, 29},
+ {119000, 8, 30, 17},
+ {119500, 8, 46, 26},
+ {119651, 8, 39, 22},
+ {120000, 8, 32, 18},
+ {121000, 6, 39, 29},
+ {121250, 6, 31, 23},
+ {121750, 6, 23, 17},
+ {122000, 6, 42, 31},
+ {122614, 6, 30, 22},
+ {123000, 6, 41, 30},
+ {123379, 6, 37, 27},
+ {124000, 6, 51, 37},
+ {125000, 6, 25, 18},
+ {125250, 4, 13, 14},
+ {125750, 4, 27, 29},
+ {126000, 6, 21, 15},
+ {127000, 6, 24, 17},
+ {127250, 6, 41, 29},
+ {128000, 6, 27, 19},
+ {129000, 6, 43, 30},
+ {129859, 4, 25, 26},
+ {130000, 6, 26, 18},
+ {130250, 6, 42, 29},
+ {131000, 6, 32, 22},
+ {131500, 6, 38, 26},
+ {131850, 6, 41, 28},
+ {132000, 6, 22, 15},
+ {132750, 6, 28, 19},
+ {133000, 6, 34, 23},
+ {133330, 6, 37, 25},
+ {134000, 6, 61, 41},
+ {135000, 6, 21, 14},
+ {135250, 6, 167, 111},
+ {136000, 6, 62, 41},
+ {137000, 6, 35, 23},
+ {138000, 6, 23, 15},
+ {138500, 6, 40, 26},
+ {138750, 6, 37, 24},
+ {139000, 6, 34, 22},
+ {139050, 6, 34, 22},
+ {139054, 6, 34, 22},
+ {140000, 6, 28, 18},
+ {141000, 6, 36, 23},
+ {141500, 6, 22, 14},
+ {142000, 6, 30, 19},
+ {143000, 6, 27, 17},
+ {143472, 4, 17, 16},
+ {144000, 6, 24, 15},
+ {145000, 6, 29, 18},
+ {146000, 6, 47, 29},
+ {146250, 6, 26, 16},
+ {147000, 6, 49, 30},
+ {147891, 6, 23, 14},
+ {148000, 6, 23, 14},
+ {148250, 6, 28, 17},
+ {148352, 4, 100, 91},
+ {148500, 6, 33, 20},
+ {149000, 6, 48, 29},
+ {150000, 6, 25, 15},
+ {151000, 4, 19, 17},
+ {152000, 6, 27, 16},
+ {152280, 6, 44, 26},
+ {153000, 6, 34, 20},
+ {154000, 6, 53, 31},
+ {155000, 6, 31, 18},
+ {155250, 6, 50, 29},
+ {155750, 6, 45, 26},
+ {156000, 6, 26, 15},
+ {157000, 6, 61, 35},
+ {157500, 6, 28, 16},
+ {158000, 6, 65, 37},
+ {158250, 6, 44, 25},
+ {159000, 6, 53, 30},
+ {159500, 6, 39, 22},
+ {160000, 6, 32, 18},
+ {161000, 4, 31, 26},
+ {162000, 4, 18, 15},
+ {162162, 4, 131, 109},
+ {162500, 4, 53, 44},
+ {163000, 4, 29, 24},
+ {164000, 4, 17, 14},
+ {165000, 4, 22, 18},
+ {166000, 4, 32, 26},
+ {167000, 4, 26, 21},
+ {168000, 4, 46, 37},
+ {169000, 4, 104, 83},
+ {169128, 4, 64, 51},
+ {169500, 4, 39, 31},
+ {170000, 4, 34, 27},
+ {171000, 4, 19, 15},
+ {172000, 4, 51, 40},
+ {172750, 4, 32, 25},
+ {172800, 4, 32, 25},
+ {173000, 4, 41, 32},
+ {174000, 4, 49, 38},
+ {174787, 4, 22, 17},
+ {175000, 4, 35, 27},
+ {176000, 4, 30, 23},
+ {177000, 4, 38, 29},
+ {178000, 4, 29, 22},
+ {178500, 4, 37, 28},
+ {179000, 4, 53, 40},
+ {179500, 4, 73, 55},
+ {180000, 4, 20, 15},
+ {181000, 4, 55, 41},
+ {182000, 4, 31, 23},
+ {183000, 4, 42, 31},
+ {184000, 4, 30, 22},
+ {184750, 4, 26, 19},
+ {185000, 4, 37, 27},
+ {186000, 4, 51, 37},
+ {187000, 4, 36, 26},
+ {188000, 4, 32, 23},
+ {189000, 4, 21, 15},
+ {190000, 4, 38, 27},
+ {190960, 4, 41, 29},
+ {191000, 4, 41, 29},
+ {192000, 4, 27, 19},
+ {192250, 4, 37, 26},
+ {193000, 4, 20, 14},
+ {193250, 4, 53, 37},
+ {194000, 4, 23, 16},
+ {194208, 4, 23, 16},
+ {195000, 4, 26, 18},
+ {196000, 4, 45, 31},
+ {197000, 4, 35, 24},
+ {197750, 4, 41, 28},
+ {198000, 4, 22, 15},
+ {198500, 4, 25, 17},
+ {199000, 4, 28, 19},
+ {200000, 4, 37, 25},
+ {201000, 4, 61, 41},
+ {202000, 4, 112, 75},
+ {202500, 4, 21, 14},
+ {203000, 4, 146, 97},
+ {204000, 4, 62, 41},
+ {204750, 4, 44, 29},
+ {205000, 4, 38, 25},
+ {206000, 4, 29, 19},
+ {207000, 4, 23, 15},
+ {207500, 4, 40, 26},
+ {208000, 4, 37, 24},
+ {208900, 4, 48, 31},
+ {209000, 4, 48, 31},
+ {209250, 4, 31, 20},
+ {210000, 4, 28, 18},
+ {211000, 4, 25, 16},
+ {212000, 4, 22, 14},
+ {213000, 4, 30, 19},
+ {213750, 4, 38, 24},
+ {214000, 4, 46, 29},
+ {214750, 4, 35, 22},
+ {215000, 4, 43, 27},
+ {216000, 4, 24, 15},
+ {217000, 4, 37, 23},
+ {218000, 4, 42, 26},
+ {218250, 4, 42, 26},
+ {218750, 4, 34, 21},
+ {219000, 4, 47, 29},
+ {220000, 4, 44, 27},
+ {220640, 4, 49, 30},
+ {220750, 4, 36, 22},
+ {221000, 4, 36, 22},
+ {222000, 4, 23, 14},
+ {222525, 4, 28, 17},
+ {222750, 4, 33, 20},
+ {227000, 4, 37, 22},
+ {230250, 4, 29, 17},
+ {233500, 4, 38, 22},
+ {235000, 4, 40, 23},
+ {238000, 4, 30, 17},
+ {241500, 2, 17, 19},
+ {245250, 2, 20, 22},
+ {247750, 2, 22, 24},
+ {253250, 2, 15, 16},
+ {256250, 2, 18, 19},
+ {262500, 2, 31, 32},
+ {267250, 2, 66, 67},
+ {268500, 2, 94, 95},
+ {270000, 2, 14, 14},
+ {272500, 2, 77, 76},
+ {273750, 2, 57, 56},
+ {280750, 2, 24, 23},
+ {281250, 2, 23, 22},
+ {286000, 2, 17, 16},
+ {291750, 2, 26, 24},
+ {296703, 2, 56, 51},
+ {297000, 2, 22, 20},
+ {298000, 2, 21, 19},
+};
+
+void
+intel_ddi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+#ifdef DRMDEBUG
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int port = intel_ddi_get_encoder_port(intel_encoder);
+ int pipe = intel_crtc->pipe;
+#endif
+ int type = intel_encoder->type;
+
+ DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n",
+ port_name(port), pipe_name(pipe));
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(encoder);
+
+ intel_dp->DP = intel_dig_port->port_reversal |
+ DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
+ switch (intel_dp->lane_count) {
+ case 1:
+ intel_dp->DP |= DDI_PORT_WIDTH_X1;
+ break;
+ case 2:
+ intel_dp->DP |= DDI_PORT_WIDTH_X2;
+ break;
+ case 4:
+ intel_dp->DP |= DDI_PORT_WIDTH_X4;
+ break;
+ default:
+ intel_dp->DP |= DDI_PORT_WIDTH_X4;
+ WARN(1, "Unexpected DP lane count %d\n",
+ intel_dp->lane_count);
+ break;
+ }
+
+ if (intel_dp->has_audio) {
+ DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n",
+ pipe_name(intel_crtc->pipe));
+
+ /* write eld */
+ DRM_DEBUG_DRIVER("DP audio: write eld information\n");
+ intel_write_eld(encoder, adjusted_mode);
+ }
+
+ intel_dp_init_link_config(intel_dp);
+
+ } else if (type == INTEL_OUTPUT_HDMI) {
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+ if (intel_hdmi->has_audio) {
+ /* Proper support for digital audio needs a new logic
+ * and a new set of registers, so we leave it for future
+ * patch bombing.
+ */
+ DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
+ pipe_name(intel_crtc->pipe));
+
+ /* write eld */
+ DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
+ intel_write_eld(encoder, adjusted_mode);
+ }
+
+ intel_hdmi->set_infoframes(encoder, adjusted_mode);
+ }
+}
+
+struct intel_encoder *
+intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *intel_encoder, *ret = NULL;
+ int num_encoders = 0;
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ ret = intel_encoder;
+ num_encoders++;
+ }
+
+ if (num_encoders != 1)
+ WARN(1, "%d encoders on crtc for pipe %d\n", num_encoders,
+ intel_crtc->pipe);
+
+ BUG_ON(ret == NULL);
+ return ret;
+}
+
+void
+intel_ddi_put_crtc_pll(struct drm_crtc *crtc)
+{
+ struct inteldrm_softc *dev_priv = crtc->dev->dev_private;
+ struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ uint32_t val;
+
+ switch (intel_crtc->ddi_pll_sel) {
+ case PORT_CLK_SEL_SPLL:
+ plls->spll_refcount--;
+ if (plls->spll_refcount == 0) {
+ DRM_DEBUG_KMS("Disabling SPLL\n");
+ val = I915_READ(SPLL_CTL);
+ WARN_ON(!(val & SPLL_PLL_ENABLE));
+ I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
+ POSTING_READ(SPLL_CTL);
+ }
+ break;
+ case PORT_CLK_SEL_WRPLL1:
+ plls->wrpll1_refcount--;
+ if (plls->wrpll1_refcount == 0) {
+ DRM_DEBUG_KMS("Disabling WRPLL 1\n");
+ val = I915_READ(WRPLL_CTL1);
+ WARN_ON(!(val & WRPLL_PLL_ENABLE));
+ I915_WRITE(WRPLL_CTL1, val & ~WRPLL_PLL_ENABLE);
+ POSTING_READ(WRPLL_CTL1);
+ }
+ break;
+ case PORT_CLK_SEL_WRPLL2:
+ plls->wrpll2_refcount--;
+ if (plls->wrpll2_refcount == 0) {
+ DRM_DEBUG_KMS("Disabling WRPLL 2\n");
+ val = I915_READ(WRPLL_CTL2);
+ WARN_ON(!(val & WRPLL_PLL_ENABLE));
+ I915_WRITE(WRPLL_CTL2, val & ~WRPLL_PLL_ENABLE);
+ POSTING_READ(WRPLL_CTL2);
+ }
+ break;
+ }
+
+ WARN(plls->spll_refcount < 0, "Invalid SPLL refcount\n");
+ WARN(plls->wrpll1_refcount < 0, "Invalid WRPLL1 refcount\n");
+ WARN(plls->wrpll2_refcount < 0, "Invalid WRPLL2 refcount\n");
+
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
+}
+
+void
+intel_ddi_calculate_wrpll(int clock, int *p, int *n2, int *r2)
+{
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++)
+ if (clock <= wrpll_tmds_clock_table[i].clock)
+ break;
+
+ if (i == ARRAY_SIZE(wrpll_tmds_clock_table))
+ i--;
+
+ *p = wrpll_tmds_clock_table[i].p;
+ *n2 = wrpll_tmds_clock_table[i].n2;
+ *r2 = wrpll_tmds_clock_table[i].r2;
+
+ if (wrpll_tmds_clock_table[i].clock != clock)
+ printf("WRPLL: using settings for %dKHz on %dKHz mode\n",
+ wrpll_tmds_clock_table[i].clock, clock);
+
+ DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
+ clock, *p, *n2, *r2);
+}
+
+bool
+intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct inteldrm_softc *dev_priv = crtc->dev->dev_private;
+ struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
+ int type = intel_encoder->type;
+#ifdef DRMDEBUG
+ enum pipe pipe = intel_crtc->pipe;
+#endif
+ uint32_t reg, val;
+
+ /* TODO: reuse PLLs when possible (compare values) */
+
+ intel_ddi_put_crtc_pll(crtc);
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ switch (intel_dp->link_bw) {
+ case DP_LINK_BW_1_62:
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
+ break;
+ case DP_LINK_BW_2_7:
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
+ break;
+ case DP_LINK_BW_5_4:
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
+ break;
+ default:
+ DRM_ERROR("Link bandwidth %d unsupported\n",
+ intel_dp->link_bw);
+ return false;
+ }
+
+ /* We don't need to turn any PLL on because we'll use LCPLL. */
+ return true;
+
+ } else if (type == INTEL_OUTPUT_HDMI) {
+ int p, n2, r2;
+
+ if (plls->wrpll1_refcount == 0) {
+ DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
+ pipe_name(pipe));
+ plls->wrpll1_refcount++;
+ reg = WRPLL_CTL1;
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
+ } else if (plls->wrpll2_refcount == 0) {
+ DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
+ pipe_name(pipe));
+ plls->wrpll2_refcount++;
+ reg = WRPLL_CTL2;
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
+ } else {
+ DRM_ERROR("No WRPLLs available!\n");
+ return false;
+ }
+
+ WARN(I915_READ(reg) & WRPLL_PLL_ENABLE,
+ "WRPLL already enabled\n");
+
+ intel_ddi_calculate_wrpll(clock, &p, &n2, &r2);
+
+ val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
+ WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
+ WRPLL_DIVIDER_POST(p);
+
+ } else if (type == INTEL_OUTPUT_ANALOG) {
+ if (plls->spll_refcount == 0) {
+ DRM_DEBUG_KMS("Using SPLL on pipe %c\n",
+ pipe_name(pipe));
+ plls->spll_refcount++;
+ reg = SPLL_CTL;
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL;
+ }
+
+ WARN(I915_READ(reg) & SPLL_PLL_ENABLE,
+ "SPLL already enabled\n");
+
+ val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
+
+ } else {
+ WARN(1, "Invalid DDI encoder type %d\n", type);
+ return false;
+ }
+
+ I915_WRITE(reg, val);
+ DELAY(20);
+
+ return true;
+}
+
+void
+intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
+{
+ struct inteldrm_softc *dev_priv = crtc->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ int type = intel_encoder->type;
+ uint32_t temp;
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+
+ temp = TRANS_MSA_SYNC_CLK;
+ switch (intel_crtc->bpp) {
+ case 18:
+ temp |= TRANS_MSA_6_BPC;
+ break;
+ case 24:
+ temp |= TRANS_MSA_8_BPC;
+ break;
+ case 30:
+ temp |= TRANS_MSA_10_BPC;
+ break;
+ case 36:
+ temp |= TRANS_MSA_12_BPC;
+ break;
+ default:
+ temp |= TRANS_MSA_8_BPC;
+ WARN(1, "%d bpp unsupported by DDI function\n",
+ intel_crtc->bpp);
+ }
+ I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
+ }
+}
+
+void
+intel_ddi_enable_pipe_func(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct inteldrm_softc *dev_priv = crtc->dev->dev_private;
+ enum pipe pipe = intel_crtc->pipe;
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ int type = intel_encoder->type;
+ uint32_t temp;
+
+ /* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
+ temp = TRANS_DDI_FUNC_ENABLE;
+ temp |= TRANS_DDI_SELECT_PORT(port);
+
+ switch (intel_crtc->bpp) {
+ case 18:
+ temp |= TRANS_DDI_BPC_6;
+ break;
+ case 24:
+ temp |= TRANS_DDI_BPC_8;
+ break;
+ case 30:
+ temp |= TRANS_DDI_BPC_10;
+ break;
+ case 36:
+ temp |= TRANS_DDI_BPC_12;
+ break;
+ default:
+ WARN(1, "%d bpp unsupported by transcoder DDI function\n",
+ intel_crtc->bpp);
+ }
+
+ if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
+ temp |= TRANS_DDI_PVSYNC;
+ if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
+ temp |= TRANS_DDI_PHSYNC;
+
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ switch (pipe) {
+ case PIPE_A:
+ temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
+ break;
+ case PIPE_B:
+ temp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
+ break;
+ case PIPE_C:
+ temp |= TRANS_DDI_EDP_INPUT_C_ONOFF;
+ break;
+ default:
+ BUG();
+ break;
+ }
+ }
+
+ if (type == INTEL_OUTPUT_HDMI) {
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+ if (intel_hdmi->has_hdmi_sink)
+ temp |= TRANS_DDI_MODE_SELECT_HDMI;
+ else
+ temp |= TRANS_DDI_MODE_SELECT_DVI;
+
+ } else if (type == INTEL_OUTPUT_ANALOG) {
+ temp |= TRANS_DDI_MODE_SELECT_FDI;
+ temp |= (intel_crtc->fdi_lanes - 1) << 1;
+
+ } else if (type == INTEL_OUTPUT_DISPLAYPORT ||
+ type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ temp |= TRANS_DDI_MODE_SELECT_DP_SST;
+
+ switch (intel_dp->lane_count) {
+ case 1:
+ temp |= TRANS_DDI_PORT_WIDTH_X1;
+ break;
+ case 2:
+ temp |= TRANS_DDI_PORT_WIDTH_X2;
+ break;
+ case 4:
+ temp |= TRANS_DDI_PORT_WIDTH_X4;
+ break;
+ default:
+ temp |= TRANS_DDI_PORT_WIDTH_X4;
+ WARN(1, "Unsupported lane count %d\n",
+ intel_dp->lane_count);
+ }
+
+ } else {
+ WARN(1, "Invalid encoder type %d for pipe %d\n",
+ intel_encoder->type, pipe);
+ }
+
+ I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+}
+
+void
+intel_ddi_disable_transcoder_func(struct inteldrm_softc *dev_priv,
+ enum transcoder cpu_transcoder)
+{
+ uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
+ uint32_t val = I915_READ(reg);
+
+ val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK);
+ val |= TRANS_DDI_PORT_NONE;
+ I915_WRITE(reg, val);
+}
+
+bool
+intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
+{
+ struct drm_device *dev = intel_connector->base.dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder = intel_connector->encoder;
+ int type = intel_connector->base.connector_type;
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ enum pipe pipe = 0;
+ enum transcoder cpu_transcoder;
+ uint32_t tmp;
+
+ if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
+ return false;
+
+ if (port == PORT_A)
+ cpu_transcoder = TRANSCODER_EDP;
+ else
+ cpu_transcoder = pipe;
+
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+
+ switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
+ case TRANS_DDI_MODE_SELECT_HDMI:
+ case TRANS_DDI_MODE_SELECT_DVI:
+ return (type == DRM_MODE_CONNECTOR_HDMIA);
+
+ case TRANS_DDI_MODE_SELECT_DP_SST:
+ if (type == DRM_MODE_CONNECTOR_eDP)
+ return true;
+ case TRANS_DDI_MODE_SELECT_DP_MST:
+ return (type == DRM_MODE_CONNECTOR_DisplayPort);
+
+ case TRANS_DDI_MODE_SELECT_FDI:
+ return (type == DRM_MODE_CONNECTOR_VGA);
+
+ default:
+ return false;
+ }
+}
+
+bool
+intel_ddi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ enum port port = intel_ddi_get_encoder_port(encoder);
+ u32 tmp;
+ int i;
+
+ tmp = I915_READ(DDI_BUF_CTL(port));
+
+ if (!(tmp & DDI_BUF_CTL_ENABLE))
+ return false;
+
+ if (port == PORT_A) {
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
+
+ switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+ case TRANS_DDI_EDP_INPUT_A_ON:
+ case TRANS_DDI_EDP_INPUT_A_ONOFF:
+ *pipe = PIPE_A;
+ break;
+ case TRANS_DDI_EDP_INPUT_B_ONOFF:
+ *pipe = PIPE_B;
+ break;
+ case TRANS_DDI_EDP_INPUT_C_ONOFF:
+ *pipe = PIPE_C;
+ break;
+ }
+
+ return true;
+ } else {
+ for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
+
+ if ((tmp & TRANS_DDI_PORT_MASK)
+ == TRANS_DDI_SELECT_PORT(port)) {
+ *pipe = i;
+ return true;
+ }
+ }
+ }
+
+ DRM_DEBUG_KMS("No pipe for ddi port %i found\n", port);
+
+ return true;
+}
+
+uint32_t
+intel_ddi_get_crtc_pll(struct inteldrm_softc *dev_priv,
+ enum pipe pipe)
+{
+ uint32_t temp, ret;
+ enum port port;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+ int i;
+
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ port = PORT_A;
+ } else {
+ temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ temp &= TRANS_DDI_PORT_MASK;
+
+ for (i = PORT_B; i <= PORT_E; i++)
+ if (temp == TRANS_DDI_SELECT_PORT(i))
+ port = i;
+ }
+
+ ret = I915_READ(PORT_CLK_SEL(port));
+
+ DRM_DEBUG_KMS("Pipe %c connected to port %c using clock 0x%08x\n",
+ pipe_name(pipe), port_name(port), ret);
+
+ return ret;
+}
+
+void
+intel_ddi_setup_hw_pll_state(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ enum pipe pipe;
+ struct intel_crtc *intel_crtc;
+
+ for_each_pipe(pipe) {
+ intel_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+
+ if (!intel_crtc->active)
+ continue;
+
+ intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
+ pipe);
+
+ switch (intel_crtc->ddi_pll_sel) {
+ case PORT_CLK_SEL_SPLL:
+ dev_priv->ddi_plls.spll_refcount++;
+ break;
+ case PORT_CLK_SEL_WRPLL1:
+ dev_priv->ddi_plls.wrpll1_refcount++;
+ break;
+ case PORT_CLK_SEL_WRPLL2:
+ dev_priv->ddi_plls.wrpll2_refcount++;
+ break;
+ }
+ }
+}
+
+void
+intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
+{
+ struct drm_crtc *crtc = &intel_crtc->base;
+ struct inteldrm_softc *dev_priv = crtc->dev->dev_private;
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+
+ if (cpu_transcoder != TRANSCODER_EDP)
+ I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
+ TRANS_CLK_SEL_PORT(port));
+}
+
+void
+intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
+{
+ struct inteldrm_softc *dev_priv = intel_crtc->base.dev->dev_private;
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+
+ if (cpu_transcoder != TRANSCODER_EDP)
+ I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
+ TRANS_CLK_SEL_DISABLED);
+}
+
+void
+intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct inteldrm_softc *dev_priv = encoder->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ int type = intel_encoder->type;
+
+ if (type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ ironlake_edp_panel_vdd_on(intel_dp);
+ ironlake_edp_panel_on(intel_dp);
+ ironlake_edp_panel_vdd_off(intel_dp, true);
+ }
+
+ WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
+ I915_WRITE(PORT_CLK_SEL(port), intel_crtc->ddi_pll_sel);
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ intel_dp_start_link_train(intel_dp);
+ intel_dp_complete_link_train(intel_dp);
+ }
+}
+
+void
+intel_ddi_post_disable(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct inteldrm_softc *dev_priv = encoder->dev->dev_private;
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ int type = intel_encoder->type;
+ uint32_t val;
+ bool wait = false;
+
+ val = I915_READ(DDI_BUF_CTL(port));
+ if (val & DDI_BUF_CTL_ENABLE) {
+ val &= ~DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(port), val);
+ wait = true;
+ }
+
+ val = I915_READ(DP_TP_CTL(port));
+ val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+ val |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ I915_WRITE(DP_TP_CTL(port), val);
+
+ if (wait)
+ intel_wait_ddi_buf_idle(dev_priv, port);
+
+ if (type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ ironlake_edp_panel_vdd_on(intel_dp);
+ ironlake_edp_panel_off(intel_dp);
+ }
+
+ I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+}
+
+void
+intel_enable_ddi(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_device *dev = encoder->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ int type = intel_encoder->type;
+
+ if (type == INTEL_OUTPUT_HDMI) {
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(encoder);
+
+ /* In HDMI/DVI mode, the port width, and swing/emphasis values
+ * are ignored so nothing special needs to be done besides
+ * enabling the port.
+ */
+ I915_WRITE(DDI_BUF_CTL(port),
+ intel_dig_port->port_reversal | DDI_BUF_CTL_ENABLE);
+ } else if (type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ ironlake_edp_backlight_on(intel_dp);
+ }
+}
+
+void
+intel_disable_ddi(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ int type = intel_encoder->type;
+
+ if (type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ ironlake_edp_backlight_off(intel_dp);
+ }
+}
+
+int
+intel_ddi_get_cdclk_freq(struct inteldrm_softc *dev_priv)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+
+ if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT)
+ return 450;
+ else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) ==
+ LCPLL_CLK_FREQ_450)
+ return 450;
+ else if (IS_ULT(dev))
+ return 338;
+ else
+ return 540;
+}
+
+void
+intel_ddi_pll_init(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ uint32_t val = I915_READ(LCPLL_CTL);
+
+ /* The LCPLL register should be turned on by the BIOS. For now let's
+ * just check its state and print errors in case something is wrong.
+ * Don't even try to turn it on.
+ */
+
+ DRM_DEBUG_KMS("CDCLK running at %dMHz\n",
+ intel_ddi_get_cdclk_freq(dev_priv));
+
+ if (val & LCPLL_CD_SOURCE_FCLK)
+ DRM_ERROR("CDCLK source is not LCPLL\n");
+
+ if (val & LCPLL_PLL_DISABLE)
+ DRM_ERROR("LCPLL is disabled\n");
+}
+
+void
+intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
+{
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct inteldrm_softc *dev_priv = encoder->dev->dev_private;
+ enum port port = intel_dig_port->port;
+ bool wait;
+ uint32_t val;
+
+ if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
+ val = I915_READ(DDI_BUF_CTL(port));
+ if (val & DDI_BUF_CTL_ENABLE) {
+ val &= ~DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(port), val);
+ wait = true;
+ }
+
+ val = I915_READ(DP_TP_CTL(port));
+ val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+ val |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ I915_WRITE(DP_TP_CTL(port), val);
+ POSTING_READ(DP_TP_CTL(port));
+
+ if (wait)
+ intel_wait_ddi_buf_idle(dev_priv, port);
+ }
+
+ val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST |
+ DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
+ if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+ val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+ I915_WRITE(DP_TP_CTL(port), val);
+ POSTING_READ(DP_TP_CTL(port));
+
+ intel_dp->DP |= DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(port), intel_dp->DP);
+ POSTING_READ(DDI_BUF_CTL(port));
+
+ DELAY(600);
+}
+
+void
+intel_ddi_fdi_disable(struct drm_crtc *crtc)
+{
+ struct inteldrm_softc *dev_priv = crtc->dev->dev_private;
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ uint32_t val;
+
+ intel_ddi_post_disable(intel_encoder);
+
+ val = I915_READ(_FDI_RXA_CTL);
+ val &= ~FDI_RX_ENABLE;
+ I915_WRITE(_FDI_RXA_CTL, val);
+
+ val = I915_READ(_FDI_RXA_MISC);
+ val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+ val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
+ I915_WRITE(_FDI_RXA_MISC, val);
+
+ val = I915_READ(_FDI_RXA_CTL);
+ val &= ~FDI_PCDCLK;
+ I915_WRITE(_FDI_RXA_CTL, val);
+
+ val = I915_READ(_FDI_RXA_CTL);
+ val &= ~FDI_RX_PLL_ENABLE;
+ I915_WRITE(_FDI_RXA_CTL, val);
+}
+
+void
+intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ int type = intel_encoder->type;
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP)
+ intel_dp_check_link_status(intel_dp);
+}
+
+void
+intel_ddi_destroy(struct drm_encoder *encoder)
+{
+ /* HDMI has nothing special to destroy, so we can go with this. */
+ intel_dp_encoder_destroy(encoder);
+}
+
+bool
+intel_ddi_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+ int type = intel_encoder->type;
+
+ WARN(type == INTEL_OUTPUT_UNKNOWN, "mode_fixup() on unknown output!\n");
+
+ if (type == INTEL_OUTPUT_HDMI)
+ return intel_hdmi_mode_fixup(encoder, mode, adjusted_mode);
+ else
+ return intel_dp_mode_fixup(encoder, mode, adjusted_mode);
+}
+
+static const struct drm_encoder_funcs intel_ddi_funcs = {
+ .destroy = intel_ddi_destroy,
+};
+
+static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = {
+ .mode_fixup = intel_ddi_mode_fixup,
+ .mode_set = intel_ddi_mode_set,
+ .disable = intel_encoder_noop,
+};
+
+void
+intel_ddi_init(struct drm_device *dev, enum port port)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_digital_port *intel_dig_port;
+ struct intel_encoder *intel_encoder;
+ struct drm_encoder *encoder;
+ struct intel_connector *hdmi_connector = NULL;
+ struct intel_connector *dp_connector = NULL;
+
+ intel_dig_port = malloc(sizeof(struct intel_digital_port), M_DRM,
+ M_WAITOK | M_ZERO);
+ if (!intel_dig_port)
+ return;
+
+ dp_connector = malloc(sizeof(struct intel_connector), M_DRM,
+ M_WAITOK | M_ZERO);
+ if (!dp_connector) {
+ free(intel_dig_port, M_DRM);
+ return;
+ }
+
+ if (port != PORT_A) {
+ hdmi_connector = malloc(sizeof(struct intel_connector),
+ M_DRM, M_WAITOK | M_ZERO);
+ if (!hdmi_connector) {
+ free(dp_connector, M_DRM);
+ free(intel_dig_port, M_DRM);
+ return;
+ }
+ }
+
+ intel_encoder = &intel_dig_port->base;
+ encoder = &intel_encoder->base;
+
+ drm_encoder_init(dev, encoder, &intel_ddi_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &intel_ddi_helper_funcs);
+
+ intel_encoder->enable = intel_enable_ddi;
+ intel_encoder->pre_enable = intel_ddi_pre_enable;
+ intel_encoder->disable = intel_disable_ddi;
+ intel_encoder->post_disable = intel_ddi_post_disable;
+ intel_encoder->get_hw_state = intel_ddi_get_hw_state;
+
+ intel_dig_port->port = port;
+ intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) &
+ DDI_BUF_PORT_REVERSAL;
+ if (hdmi_connector)
+ intel_dig_port->hdmi.sdvox_reg = DDI_BUF_CTL(port);
+ else
+ intel_dig_port->hdmi.sdvox_reg = 0;
+ intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
+
+ intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->cloneable = false;
+ intel_encoder->hot_plug = intel_ddi_hot_plug;
+
+ if (hdmi_connector)
+ intel_hdmi_init_connector(intel_dig_port, hdmi_connector);
+ intel_dp_init_connector(intel_dig_port, dp_connector);
+}
diff --git a/sys/dev/pci/drm/i915/intel_display.c b/sys/dev/pci/drm/i915/intel_display.c
new file mode 100644
index 00000000000..84d342fd2b0
--- /dev/null
+++ b/sys/dev/pci/drm/i915/intel_display.c
@@ -0,0 +1,10065 @@
+/* $OpenBSD: intel_display.c,v 1.1 2013/03/18 12:36:52 jsg Exp $ */
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#include <dev/pci/drm/drmP.h>
+#include <dev/pci/drm/drm_edid.h>
+#include "intel_drv.h"
+#include <dev/pci/drm/i915_drm.h>
+#include "i915_drv.h"
+#include <dev/pci/drm/drm_dp_helper.h>
+#include <dev/pci/drm/drm_crtc_helper.h>
+
+typedef struct {
+ /* given values */
+ int n;
+ int m1, m2;
+ int p1, p2;
+ /* derived values */
+ int dot;
+ int vco;
+ int m;
+ int p;
+} intel_clock_t;
+
+typedef struct {
+ int min, max;
+} intel_range_t;
+
+typedef struct {
+ int dot_limit;
+ int p2_slow, p2_fast;
+} intel_p2_t;
+
+#define INTEL_P2_NUM 2
+typedef struct intel_limit intel_limit_t;
+struct intel_limit {
+ intel_range_t dot, vco, n, m, m1, m2, p, p1;
+ intel_p2_t p2;
+ bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
+ int, int, intel_clock_t *, intel_clock_t *);
+};
+
+/* FDI */
+struct fdi_m_n {
+ u32 tu;
+ u32 gmch_m;
+ u32 gmch_n;
+ u32 link_m;
+ u32 link_n;
+};
+
+#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
+
+bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
+void intel_increase_pllclock(struct drm_crtc *crtc);
+void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
+
+const intel_limit_t *intel_ironlake_limit(struct drm_crtc *, int);
+const intel_limit_t *intel_g4x_limit(struct drm_crtc *);
+const intel_limit_t *intel_limit(struct drm_crtc *, int);
+
+void pineview_clock(int, intel_clock_t *);
+void intel_clock(struct drm_device *, int, intel_clock_t *clock);
+bool intel_PLL_is_valid(struct drm_device *, const intel_limit_t *,
+ const intel_clock_t *);
+void assert_pll(struct inteldrm_softc *, enum pipe, bool);
+void assert_pch_pll(struct inteldrm_softc *, struct intel_pch_pll *,
+ struct intel_crtc *, bool);
+void assert_fdi_tx(struct inteldrm_softc *, enum pipe, bool);
+void assert_fdi_rx(struct inteldrm_softc *, enum pipe, bool);
+void assert_fdi_tx_pll_enabled(struct inteldrm_softc *, enum pipe);
+void assert_fdi_rx_pll_enabled(struct inteldrm_softc *, enum pipe);
+void assert_panel_unlocked(struct inteldrm_softc *, enum pipe);
+void assert_plane(struct inteldrm_softc *, enum plane, bool);
+void assert_planes_disabled(struct inteldrm_softc *, enum pipe);
+void assert_pch_refclk_enabled(struct inteldrm_softc *);
+void assert_transcoder_disabled(struct inteldrm_softc *, enum pipe);
+bool hdmi_pipe_enabled(struct inteldrm_softc *, enum pipe, u32);
+bool lvds_pipe_enabled(struct inteldrm_softc *, enum pipe, u32);
+bool adpa_pipe_enabled(struct inteldrm_softc *, enum pipe, u32);
+bool dp_pipe_enabled(struct inteldrm_softc *, enum pipe, u32, u32);
+void assert_pch_dp_disabled(struct inteldrm_softc *, enum pipe, int, u32);
+void assert_pch_hdmi_disabled(struct inteldrm_softc *, enum pipe, int);
+void assert_pch_ports_disabled(struct inteldrm_softc *, enum pipe);
+void intel_enable_pll(struct inteldrm_softc *, enum pipe);
+void intel_disable_pll(struct inteldrm_softc *, enum pipe);
+void intel_enable_pch_pll(struct inteldrm_softc *, enum pipe);
+void intel_disable_pch_pll(struct intel_crtc *);
+void intel_enable_transcoder(struct inteldrm_softc *, enum pipe);
+void intel_disable_transcoder(struct inteldrm_softc *, enum pipe);
+void intel_enable_pipe(struct inteldrm_softc *, enum pipe, bool);
+void intel_disable_pipe(struct inteldrm_softc *, enum pipe);
+void intel_flush_display_plane(struct inteldrm_softc *, enum plane);
+void intel_enable_plane(struct inteldrm_softc *, enum plane, enum pipe);
+void intel_disable_plane(struct inteldrm_softc *, enum plane, enum pipe);
+void disable_pch_dp(struct inteldrm_softc *, enum pipe, int, u32);
+void disable_pch_hdmi(struct inteldrm_softc *, enum pipe, int);
+void intel_disable_pch_ports(struct inteldrm_softc *, enum pipe);
+void i8xx_disable_fbc(struct drm_device *);
+void i8xx_enable_fbc(struct drm_crtc *, unsigned long);
+bool i8xx_fbc_enabled(struct drm_device *);
+void g4x_enable_fbc(struct drm_crtc *, unsigned long);
+void g4x_disable_fbc(struct drm_device *);
+bool g4x_fbc_enabled(struct drm_device *);
+void sandybridge_blit_fbc_update(struct drm_device *);
+void ironlake_enable_fbc(struct drm_crtc *, unsigned long);
+void ironlake_disable_fbc(struct drm_device *);
+bool ironlake_fbc_enabled(struct drm_device *);
+void intel_fbc_work_fn(void *, int);
+void intel_cancel_fbc_work(struct inteldrm_softc *);
+void intel_enable_fbc(struct drm_crtc *, unsigned long);
+void intel_update_fbc(struct drm_device *);
+int i9xx_update_plane(struct drm_crtc *, struct drm_framebuffer *, int,
+ int);
+int ironlake_update_plane(struct drm_crtc *, struct drm_framebuffer *, int,
+ int);
+int intel_pipe_set_base_atomic(struct drm_crtc *, struct drm_framebuffer *,
+ int, int, enum mode_set_atomic);
+int intel_finish_fb(struct drm_framebuffer *);
+int intel_pipe_set_base(struct drm_crtc *, int, int,
+ struct drm_framebuffer *);
+void ironlake_set_pll_edp(struct drm_crtc *, int);
+void intel_fdi_normal_train(struct drm_crtc *);
+void cpt_phase_pointer_enable(struct drm_device *, int);
+void ironlake_fdi_link_train(struct drm_crtc *);
+void gen6_fdi_link_train(struct drm_crtc *);
+void ivb_manual_fdi_link_train(struct drm_crtc *);
+void ironlake_fdi_pll_enable(struct intel_crtc *);
+void cpt_phase_pointer_disable(struct drm_device *, int);
+void ironlake_fdi_disable(struct drm_crtc *);
+void intel_clear_scanline_wait(struct drm_device *);
+void intel_crtc_wait_for_pending_flips(struct drm_crtc *);
+bool intel_crtc_driving_pch(struct drm_crtc *);
+void ironlake_pch_enable(struct drm_crtc *);
+void ironlake_crtc_enable(struct drm_crtc *);
+void ironlake_crtc_disable(struct drm_crtc *);
+void ironlake_crtc_dpms(struct drm_crtc *, int);
+void intel_crtc_dpms_overlay(struct intel_crtc *, bool);
+void i9xx_crtc_enable(struct drm_crtc *);
+void i9xx_crtc_disable(struct drm_crtc *);
+void i9xx_crtc_dpms(struct drm_crtc *, int);
+void intel_crtc_dpms(struct drm_crtc *, int);
+void intel_crtc_disable(struct drm_crtc *);
+void i9xx_crtc_prepare(struct drm_crtc *);
+void i9xx_crtc_commit(struct drm_crtc *);
+void ironlake_crtc_prepare(struct drm_crtc *);
+void ironlake_crtc_commit(struct drm_crtc *);
+bool intel_crtc_mode_fixup(struct drm_crtc *, const struct drm_display_mode *,
+ struct drm_display_mode *);
+int i945_get_display_clock_speed(struct drm_device *);
+int i915_get_display_clock_speed(struct drm_device *);
+int i9xx_misc_get_display_clock_speed(struct drm_device *);
+int i915gm_get_display_clock_speed(struct drm_device *);
+int i865_get_display_clock_speed(struct drm_device *);
+int i855_get_display_clock_speed(struct drm_device *);
+int i830_get_display_clock_speed(struct drm_device *);
+void fdi_reduce_ratio(u32 *, u32 *);
+void ironlake_compute_m_n(int, int, int, int, struct fdi_m_n *);
+unsigned long intel_calculate_wm(unsigned long,
+ const struct intel_watermark_params *, int, int,
+ unsigned long);
+const struct cxsr_latency *intel_get_cxsr_latency(int, int, int, int);
+void pineview_disable_cxsr(struct drm_device *);
+int i9xx_get_fifo_size(struct drm_device *, int);
+int i85x_get_fifo_size(struct drm_device *, int);
+int i845_get_fifo_size(struct drm_device *, int);
+int i830_get_fifo_size(struct drm_device *, int);
+struct drm_crtc *single_enabled_crtc(struct drm_device *);
+void pineview_update_wm(struct drm_device *);
+bool g4x_compute_wm0(struct drm_device *, int,
+ const struct intel_watermark_params *, int,
+ const struct intel_watermark_params *, int, int *, int *);
+bool g4x_check_srwm(struct drm_device *, int, int,
+ const struct intel_watermark_params *,
+ const struct intel_watermark_params *);
+bool g4x_compute_srwm(struct drm_device *, int, int,
+ const struct intel_watermark_params *,
+ const struct intel_watermark_params *, int *, int *);
+void g4x_update_wm(struct drm_device *);
+void i965_update_wm(struct drm_device *);
+void i9xx_update_wm(struct drm_device *);
+void i830_update_wm(struct drm_device *);
+bool ironlake_check_srwm(struct drm_device *, int, int, int, int,
+ const struct intel_watermark_params *,
+ const struct intel_watermark_params *);
+bool ironlake_compute_srwm(struct drm_device *, int, int, int,
+ const struct intel_watermark_params *,
+ const struct intel_watermark_params *, int *, int *, int *);
+void ironlake_update_wm(struct drm_device *);
+bool sandybridge_compute_sprite_wm(struct drm_device *, int, uint32_t, int,
+ const struct intel_watermark_params *, int, int *);
+bool sandybridge_compute_sprite_srwm(struct drm_device *, int, uint32_t,
+ int, const struct intel_watermark_params *, int, int *);
+void sandybridge_update_sprite_wm(struct drm_device *, int, uint32_t, int);
+bool intel_choose_pipe_bpp_dither(struct drm_crtc *, struct drm_framebuffer *,
+ unsigned int *, struct drm_display_mode *);
+int i9xx_get_refclk(struct drm_crtc *, int);
+void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *, intel_clock_t *);
+void i9xx_update_pll_dividers(struct drm_crtc *, intel_clock_t *,
+ intel_clock_t *);
+int i9xx_crtc_mode_set(struct drm_crtc *, struct drm_display_mode *,
+ struct drm_display_mode *, int, int, struct drm_framebuffer *);
+int ironlake_get_refclk(struct drm_crtc *);
+int ironlake_crtc_mode_set(struct drm_crtc *, struct drm_display_mode *,
+ struct drm_display_mode *, int, int, struct drm_framebuffer *);
+int intel_crtc_mode_set(struct drm_crtc *, struct drm_display_mode *,
+ struct drm_display_mode *, int, int, struct drm_framebuffer *);
+bool intel_eld_uptodate(struct drm_connector *, int, uint32_t, int,
+ uint32_t, int);
+void g4x_write_eld(struct drm_connector *, struct drm_crtc *);
+void ironlake_write_eld(struct drm_connector *, struct drm_crtc *);
+void i845_update_cursor(struct drm_crtc *, u32);
+void i9xx_update_cursor(struct drm_crtc *, u32);
+void ivb_update_cursor(struct drm_crtc *, u32);
+int intel_crtc_cursor_set(struct drm_crtc *, struct drm_file *, uint32_t,
+ uint32_t, uint32_t);
+int intel_crtc_cursor_move(struct drm_crtc *, int, int);
+void intel_crtc_gamma_set(struct drm_crtc *, u16 *, u16 *, u16 *, uint32_t,
+ uint32_t);
+struct drm_framebuffer *
+ intel_framebuffer_create(struct drm_device *,
+ struct drm_mode_fb_cmd2 *, struct drm_i915_gem_object *);
+u32 intel_framebuffer_pitch_for_width(int, int);
+u32 intel_framebuffer_size_for_mode(struct drm_display_mode *, int);
+struct drm_framebuffer *
+ intel_framebuffer_create_for_mode(struct drm_device *,
+ struct drm_display_mode *, int, int);
+struct drm_framebuffer *
+ mode_fits_in_fbdev(struct drm_device *, struct drm_display_mode *);
+int intel_crtc_clock_get(struct drm_device *, struct drm_crtc *);
+void intel_crtc_destroy(struct drm_crtc *);
+void do_intel_finish_page_flip(struct drm_device *, struct drm_crtc *);
+int intel_crtc_page_flip(struct drm_crtc *, struct drm_framebuffer *,
+ struct drm_pending_vblank_event *);
+void intel_sanitize_modesetting(struct drm_device *, int, int);
+void intel_crtc_reset(struct drm_crtc *);
+void intel_crtc_init(struct drm_device *, int);
+int intel_encoder_clones(struct intel_encoder *);
+bool has_edp_a(struct drm_device *);
+void intel_setup_outputs(struct drm_device *);
+void intel_user_framebuffer_destroy(struct drm_framebuffer *);
+int intel_user_framebuffer_create_handle(struct drm_framebuffer *,
+ struct drm_file *, unsigned int *);
+struct drm_framebuffer *
+ intel_user_framebuffer_create(struct drm_device *,
+ struct drm_file *, struct drm_mode_fb_cmd2 *);
+unsigned long intel_pxfreq(u32);
+void ironlake_init_clock_gating(struct drm_device *);
+void gen6_init_clock_gating(struct drm_device *);
+void ivybridge_init_clock_gating(struct drm_device *);
+void g4x_init_clock_gating(struct drm_device *);
+void crestline_init_clock_gating(struct drm_device *);
+void broadwater_init_clock_gating(struct drm_device *);
+void gen3_init_clock_gating(struct drm_device *);
+void i85x_init_clock_gating(struct drm_device *);
+void i830_init_clock_gating(struct drm_device *);
+void ibx_init_clock_gating(struct drm_device *);
+void cpt_init_clock_gating(struct drm_device *);
+void ironlake_teardown_rc6(struct drm_device *);
+void ironlake_disable_rc6(struct drm_device *);
+int ironlake_setup_rc6(struct drm_device *);
+void intel_init_display(struct drm_device *);
+void quirk_pipea_force(struct drm_device *);
+void quirk_ssc_force_disable(struct drm_device *);
+void intel_init_quirks(struct drm_device *);
+void i915_disable_vga(struct drm_device *);
+void intel_crtc_idle_timer(void *);
+void intel_gpu_idle_timer(void *);
+void intel_idle_update(void *, int);
+void intel_decrease_pllclock(struct drm_crtc *);
+int intel_default_queue_flip(struct drm_device *, struct drm_crtc *,
+ struct drm_framebuffer *, struct drm_i915_gem_object *);
+int intel_gen2_queue_flip(struct drm_device *, struct drm_crtc *,
+ struct drm_framebuffer *, struct drm_i915_gem_object *);
+int intel_gen3_queue_flip(struct drm_device *, struct drm_crtc *,
+ struct drm_framebuffer *, struct drm_i915_gem_object *);
+int intel_gen4_queue_flip(struct drm_device *, struct drm_crtc *,
+ struct drm_framebuffer *, struct drm_i915_gem_object *);
+int intel_gen6_queue_flip(struct drm_device *, struct drm_crtc *,
+ struct drm_framebuffer *, struct drm_i915_gem_object *);
+int intel_gen7_queue_flip(struct drm_device *, struct drm_crtc *,
+ struct drm_framebuffer *, struct drm_i915_gem_object *);
+void intel_unpin_work_fn(void *, void *);
+void intel_dpio_write(struct inteldrm_softc *, int, u32);
+void vlv_init_dpio(struct drm_device *);
+int intel_dual_link_lvds_callback(const struct dmi_system_id *);
+bool is_dual_link_lvds(struct inteldrm_softc *, unsigned int);
+void ironlake_wait_for_vblank(struct drm_device *, int);
+const char *state_string(bool);
+void intel_sbi_write(struct inteldrm_softc *, u16, u32,
+ enum intel_sbi_destination);
+u32 intel_sbi_read(struct inteldrm_softc *, u16,
+ enum intel_sbi_destination);
+void ironlake_enable_pch_pll(struct intel_crtc *);
+void ironlake_enable_pch_transcoder(struct inteldrm_softc *, enum pipe);
+void lpt_enable_pch_transcoder(struct inteldrm_softc *, enum transcoder);
+void ironlake_disable_pch_transcoder(struct inteldrm_softc *, enum pipe);
+void lpt_disable_pch_transcoder(struct inteldrm_softc *);
+void intel_crtc_update_sarea_pos(struct drm_crtc *, int, int);
+void ivb_modeset_global_resources(struct drm_device *);
+void ironlake_fdi_pll_disable(struct intel_crtc *);
+bool intel_crtc_has_pending_flip(struct drm_crtc *);
+bool ironlake_crtc_driving_pch(struct drm_crtc *);
+bool haswell_crtc_driving_pch(struct drm_crtc *);
+void lpt_program_iclkip(struct drm_crtc *);
+void lpt_pch_enable(struct drm_crtc *);
+void intel_put_pch_pll(struct intel_crtc *);
+void haswell_crtc_enable(struct drm_crtc *);
+void haswell_crtc_disable(struct drm_crtc *);
+void ironlake_crtc_off(struct drm_crtc *);
+void haswell_crtc_off(struct drm_crtc *);
+void i9xx_crtc_off(struct drm_crtc *);
+void intel_crtc_update_sarea(struct drm_crtc *, bool);
+void intel_crtc_noop(struct drm_crtc *);
+void intel_connector_check_state(struct intel_connector *);
+int valleyview_get_display_clock_speed(struct drm_device *);
+int vlv_get_refclk(struct drm_crtc *);
+void intel_update_lvds(struct drm_crtc *, intel_clock_t *,
+ struct drm_display_mode *);
+void vlv_update_pll(struct drm_crtc *, struct drm_display_mode *,
+ struct drm_display_mode *, intel_clock_t *, intel_clock_t *, int);
+void i9xx_update_pll(struct drm_crtc *, struct drm_display_mode *,
+ struct drm_display_mode *, intel_clock_t *, intel_clock_t *, int);
+void i8xx_update_pll(struct drm_crtc *, struct drm_display_mode *,
+ intel_clock_t *, intel_clock_t *, int);
+void intel_set_pipe_timings(struct intel_crtc *, struct drm_display_mode *,
+ struct drm_display_mode *);
+void lpt_init_pch_refclk(struct drm_device *);
+void ironlake_set_pipeconf(struct drm_crtc *, struct drm_display_mode *,
+ bool);
+void haswell_set_pipeconf(struct drm_crtc *, struct drm_display_mode *,
+ bool);
+bool ironlake_compute_clocks(struct drm_crtc *, struct drm_display_mode *,
+ intel_clock_t *, bool *, intel_clock_t *);
+void cpt_enable_fdi_bc_bifurcation(struct drm_device *);
+bool ironlake_check_fdi_lanes(struct intel_crtc *);
+void ironlake_set_m_n(struct drm_crtc *, struct drm_display_mode *,
+ struct drm_display_mode *);
+uint32_t ironlake_compute_dpll(struct intel_crtc *, struct drm_display_mode *,
+ intel_clock_t *, u32);
+int haswell_crtc_mode_set(struct drm_crtc *, struct drm_display_mode *,
+ struct drm_display_mode *, int, int, struct drm_framebuffer *);
+void haswell_write_eld(struct drm_connector *, struct drm_crtc *);
+bool intel_encoder_crtc_ok(struct drm_encoder *, struct drm_crtc *);
+void intel_modeset_update_staged_output_state(struct drm_device *);
+void intel_modeset_commit_output_state(struct drm_device *);
+struct drm_display_mode *
+ intel_modeset_adjusted_mode(struct drm_crtc *, struct drm_display_mode *);
+void intel_modeset_affected_pipes(struct drm_crtc *, unsigned *,
+ unsigned *, unsigned *);
+void intel_modeset_affected_pipes(struct drm_crtc *, unsigned *,
+ unsigned *, unsigned *);
+bool intel_crtc_in_use(struct drm_crtc *);
+void intel_modeset_update_state(struct drm_device *, unsigned);
+void intel_set_config_free(struct intel_set_config *);
+int intel_set_config_save_state(struct drm_device *,
+ struct intel_set_config *);
+void intel_set_config_restore_state(struct drm_device *,
+ struct intel_set_config *);
+void intel_set_config_compute_mode_changes(struct drm_mode_set *,
+ struct intel_set_config *);
+int intel_modeset_stage_output_state(struct drm_device *,
+ struct drm_mode_set *, struct intel_set_config *);
+int intel_crtc_set_config(struct drm_mode_set *);
+void intel_cpu_pll_init(struct drm_device *);
+void i915_redisable_vga(struct drm_device *);
+void intel_pch_pll_init(struct drm_device *);
+void quirk_invert_brightness(struct drm_device *);
+int intel_dmi_reverse_brightness(const struct dmi_system_id *);
+void intel_connector_break_all_links(struct intel_connector *);
+void intel_enable_pipe_a(struct drm_device *);
+bool intel_check_plane_mapping(struct intel_crtc *);
+void intel_sanitize_crtc(struct intel_crtc *);
+void intel_sanitize_encoder(struct intel_encoder *);
+
+int
+intel_pch_rawclk(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ WARN_ON(!HAS_PCH_SPLIT(dev));
+
+ return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
+}
+
+static bool
+intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock);
+static bool
+intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock);
+
+static bool
+intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock);
+static bool
+intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock);
+
+static bool
+intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock);
+
+static inline u32 /* units of 100MHz */
+intel_fdi_link_freq(struct drm_device *dev)
+{
+ if (IS_GEN5(dev)) {
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
+ } else
+ return 27;
+}
+
+static const intel_limit_t intel_limits_i8xx_dvo = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 930000, .max = 1400000 },
+ .n = { .min = 3, .max = 16 },
+ .m = { .min = 96, .max = 140 },
+ .m1 = { .min = 18, .max = 26 },
+ .m2 = { .min = 6, .max = 16 },
+ .p = { .min = 4, .max = 128 },
+ .p1 = { .min = 2, .max = 33 },
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 4, .p2_fast = 2 },
+ .find_pll = intel_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_i8xx_lvds = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 930000, .max = 1400000 },
+ .n = { .min = 3, .max = 16 },
+ .m = { .min = 96, .max = 140 },
+ .m1 = { .min = 18, .max = 26 },
+ .m2 = { .min = 6, .max = 16 },
+ .p = { .min = 4, .max = 128 },
+ .p1 = { .min = 1, .max = 6 },
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 14, .p2_fast = 7 },
+ .find_pll = intel_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_i9xx_sdvo = {
+ .dot = { .min = 20000, .max = 400000 },
+ .vco = { .min = 1400000, .max = 2800000 },
+ .n = { .min = 1, .max = 6 },
+ .m = { .min = 70, .max = 120 },
+ .m1 = { .min = 8, .max = 18 },
+ .m2 = { .min = 3, .max = 7 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 200000,
+ .p2_slow = 10, .p2_fast = 5 },
+ .find_pll = intel_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_i9xx_lvds = {
+ .dot = { .min = 20000, .max = 400000 },
+ .vco = { .min = 1400000, .max = 2800000 },
+ .n = { .min = 1, .max = 6 },
+ .m = { .min = 70, .max = 120 },
+ .m1 = { .min = 10, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 7, .max = 98 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 112000,
+ .p2_slow = 14, .p2_fast = 7 },
+ .find_pll = intel_find_best_PLL,
+};
+
+
+static const intel_limit_t intel_limits_g4x_sdvo = {
+ .dot = { .min = 25000, .max = 270000 },
+ .vco = { .min = 1750000, .max = 3500000},
+ .n = { .min = 1, .max = 4 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 17, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 10, .max = 30 },
+ .p1 = { .min = 1, .max = 3},
+ .p2 = { .dot_limit = 270000,
+ .p2_slow = 10,
+ .p2_fast = 10
+ },
+ .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_g4x_hdmi = {
+ .dot = { .min = 22000, .max = 400000 },
+ .vco = { .min = 1750000, .max = 3500000},
+ .n = { .min = 1, .max = 4 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 16, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8},
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 10, .p2_fast = 5 },
+ .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
+ .dot = { .min = 20000, .max = 115000 },
+ .vco = { .min = 1750000, .max = 3500000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 17, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 28, .max = 112 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 0,
+ .p2_slow = 14, .p2_fast = 14
+ },
+ .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
+ .dot = { .min = 80000, .max = 224000 },
+ .vco = { .min = 1750000, .max = 3500000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 17, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 14, .max = 42 },
+ .p1 = { .min = 2, .max = 6 },
+ .p2 = { .dot_limit = 0,
+ .p2_slow = 7, .p2_fast = 7
+ },
+ .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_g4x_display_port = {
+ .dot = { .min = 161670, .max = 227000 },
+ .vco = { .min = 1750000, .max = 3500000},
+ .n = { .min = 1, .max = 2 },
+ .m = { .min = 97, .max = 108 },
+ .m1 = { .min = 0x10, .max = 0x12 },
+ .m2 = { .min = 0x05, .max = 0x06 },
+ .p = { .min = 10, .max = 20 },
+ .p1 = { .min = 1, .max = 2},
+ .p2 = { .dot_limit = 0,
+ .p2_slow = 10, .p2_fast = 10 },
+ .find_pll = intel_find_pll_g4x_dp,
+};
+
+static const intel_limit_t intel_limits_pineview_sdvo = {
+ .dot = { .min = 20000, .max = 400000},
+ .vco = { .min = 1700000, .max = 3500000 },
+ /* Pineview's Ncounter is a ring counter */
+ .n = { .min = 3, .max = 6 },
+ .m = { .min = 2, .max = 256 },
+ /* Pineview only has one combined m divider, which we treat as m2. */
+ .m1 = { .min = 0, .max = 0 },
+ .m2 = { .min = 0, .max = 254 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 200000,
+ .p2_slow = 10, .p2_fast = 5 },
+ .find_pll = intel_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_pineview_lvds = {
+ .dot = { .min = 20000, .max = 400000 },
+ .vco = { .min = 1700000, .max = 3500000 },
+ .n = { .min = 3, .max = 6 },
+ .m = { .min = 2, .max = 256 },
+ .m1 = { .min = 0, .max = 0 },
+ .m2 = { .min = 0, .max = 254 },
+ .p = { .min = 7, .max = 112 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 112000,
+ .p2_slow = 14, .p2_fast = 14 },
+ .find_pll = intel_find_best_PLL,
+};
+
+/* Ironlake / Sandybridge
+ *
+ * We calculate clock using (register_value + 2) for N/M1/M2, so here
+ * the range value for them is (actual_value - 2).
+ */
+static const intel_limit_t intel_limits_ironlake_dac = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 5 },
+ .m = { .min = 79, .max = 127 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 10, .p2_fast = 5 },
+ .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_ironlake_single_lvds = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 79, .max = 118 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 28, .max = 112 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 14, .p2_fast = 14 },
+ .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_ironlake_dual_lvds = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 79, .max = 127 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 14, .max = 56 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 7, .p2_fast = 7 },
+ .find_pll = intel_g4x_find_best_PLL,
+};
+
+/* LVDS 100mhz refclk limits. */
+static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 2 },
+ .m = { .min = 79, .max = 126 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 28, .max = 112 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 14, .p2_fast = 14 },
+ .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 79, .max = 126 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 14, .max = 42 },
+ .p1 = { .min = 2, .max = 6 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 7, .p2_fast = 7 },
+ .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_ironlake_display_port = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000},
+ .n = { .min = 1, .max = 2 },
+ .m = { .min = 81, .max = 90 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 10, .max = 20 },
+ .p1 = { .min = 1, .max = 2},
+ .p2 = { .dot_limit = 0,
+ .p2_slow = 10, .p2_fast = 10 },
+ .find_pll = intel_find_pll_ironlake_dp,
+};
+
+static const intel_limit_t intel_limits_vlv_dac = {
+ .dot = { .min = 25000, .max = 270000 },
+ .vco = { .min = 4000000, .max = 6000000 },
+ .n = { .min = 1, .max = 7 },
+ .m = { .min = 22, .max = 450 }, /* guess */
+ .m1 = { .min = 2, .max = 3 },
+ .m2 = { .min = 11, .max = 156 },
+ .p = { .min = 10, .max = 30 },
+ .p1 = { .min = 2, .max = 3 },
+ .p2 = { .dot_limit = 270000,
+ .p2_slow = 2, .p2_fast = 20 },
+ .find_pll = intel_vlv_find_best_pll,
+};
+
+static const intel_limit_t intel_limits_vlv_hdmi = {
+ .dot = { .min = 20000, .max = 165000 },
+ .vco = { .min = 4000000, .max = 5994000},
+ .n = { .min = 1, .max = 7 },
+ .m = { .min = 60, .max = 300 }, /* guess */
+ .m1 = { .min = 2, .max = 3 },
+ .m2 = { .min = 11, .max = 156 },
+ .p = { .min = 10, .max = 30 },
+ .p1 = { .min = 2, .max = 3 },
+ .p2 = { .dot_limit = 270000,
+ .p2_slow = 2, .p2_fast = 20 },
+ .find_pll = intel_vlv_find_best_pll,
+};
+
+static const intel_limit_t intel_limits_vlv_dp = {
+ .dot = { .min = 25000, .max = 270000 },
+ .vco = { .min = 4000000, .max = 6000000 },
+ .n = { .min = 1, .max = 7 },
+ .m = { .min = 22, .max = 450 },
+ .m1 = { .min = 2, .max = 3 },
+ .m2 = { .min = 11, .max = 156 },
+ .p = { .min = 10, .max = 30 },
+ .p1 = { .min = 2, .max = 3 },
+ .p2 = { .dot_limit = 270000,
+ .p2_slow = 2, .p2_fast = 20 },
+ .find_pll = intel_vlv_find_best_pll,
+};
+
+u32
+intel_dpio_read(struct inteldrm_softc *dev_priv, int reg)
+{
+ u32 val = 0;
+ int retries;
+
+ mtx_enter(&dev_priv->dpio_lock);
+ for (retries = 50; retries > 0; retries--) {
+ if ((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0)
+ break;
+ DELAY(100);
+ }
+ if (retries == 0) {
+ DRM_ERROR("DPIO idle wait timed out\n");
+ goto out_unlock;
+ }
+
+ I915_WRITE(DPIO_REG, reg);
+ I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID |
+ DPIO_BYTE);
+ for (retries = 50; retries > 0; retries--) {
+ if ((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0)
+ break;
+ DELAY(100);
+ }
+ if (retries == 0) {
+ DRM_ERROR("DPIO read wait timed out\n");
+ goto out_unlock;
+ }
+ val = I915_READ(DPIO_DATA);
+
+out_unlock:
+ mtx_leave(&dev_priv->dpio_lock);
+ return val;
+}
+
+void
+intel_dpio_write(struct inteldrm_softc *dev_priv, int reg,
+ u32 val)
+{
+ int retries;
+
+ mtx_enter(&dev_priv->dpio_lock);
+ for (retries = 50; retries > 0; retries--) {
+ if ((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0)
+ break;
+ DELAY(100);
+ }
+ if (retries == 0) {
+ DRM_ERROR("DPIO idle wait timed out\n");
+ goto out_unlock;
+ }
+
+ I915_WRITE(DPIO_DATA, val);
+ I915_WRITE(DPIO_REG, reg);
+ I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_WRITE | DPIO_PORTID |
+ DPIO_BYTE);
+ for (retries = 50; retries > 0; retries--) {
+ if ((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0)
+ break;
+ DELAY(100);
+ }
+ if (retries == 0)
+ DRM_ERROR("DPIO write wait timed out\n");
+
+out_unlock:
+ mtx_leave(&dev_priv->dpio_lock);
+}
+
+void
+vlv_init_dpio(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ /* Reset the DPIO config */
+ I915_WRITE(DPIO_CTL, 0);
+ POSTING_READ(DPIO_CTL);
+ I915_WRITE(DPIO_CTL, 1);
+ POSTING_READ(DPIO_CTL);
+}
+
+int
+intel_dual_link_lvds_callback(const struct dmi_system_id *id)
+{
+ printf("Forcing lvds to dual link mode on %s\n", id->ident);
+ return 1;
+}
+
+static const struct dmi_system_id intel_dual_link_lvds[] = {
+ {
+ .callback = intel_dual_link_lvds_callback,
+ .ident = "Apple MacBook Pro (Core i5/i7 Series)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
+ },
+ },
+ { } /* terminating entry */
+};
+
+bool
+is_dual_link_lvds(struct inteldrm_softc *dev_priv,
+ unsigned int reg)
+{
+ unsigned int val;
+
+ /* use the module option value if specified */
+ if (i915_lvds_channel_mode > 0)
+ return i915_lvds_channel_mode == 2;
+
+ if (dmi_check_system(intel_dual_link_lvds))
+ return true;
+
+ if (dev_priv->lvds_val)
+ val = dev_priv->lvds_val;
+ else {
+ /* BIOS should set the proper LVDS register value at boot, but
+ * in reality, it doesn't set the value when the lid is closed;
+ * we need to check "the value to be set" in VBT when LVDS
+ * register is uninitialized.
+ */
+ val = I915_READ(reg);
+ if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
+ val = dev_priv->bios_lvds_val;
+ dev_priv->lvds_val = val;
+ }
+ return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
+}
+
+const intel_limit_t *
+intel_ironlake_limit(struct drm_crtc *crtc,
+ int refclk)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ const intel_limit_t *limit;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (is_dual_link_lvds(dev_priv, PCH_LVDS)) {
+ /* LVDS dual channel */
+ if (refclk == 100000)
+ limit = &intel_limits_ironlake_dual_lvds_100m;
+ else
+ limit = &intel_limits_ironlake_dual_lvds;
+ } else {
+ if (refclk == 100000)
+ limit = &intel_limits_ironlake_single_lvds_100m;
+ else
+ limit = &intel_limits_ironlake_single_lvds;
+ }
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
+ limit = &intel_limits_ironlake_display_port;
+ else
+ limit = &intel_limits_ironlake_dac;
+
+ return limit;
+}
+
+const intel_limit_t *
+intel_g4x_limit(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ const intel_limit_t *limit;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (is_dual_link_lvds(dev_priv, LVDS))
+ /* LVDS with dual channel */
+ limit = &intel_limits_g4x_dual_channel_lvds;
+ else
+ /* LVDS with dual channel */
+ limit = &intel_limits_g4x_single_channel_lvds;
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
+ limit = &intel_limits_g4x_hdmi;
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
+ limit = &intel_limits_g4x_sdvo;
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ limit = &intel_limits_g4x_display_port;
+ } else /* The option is for other outputs */
+ limit = &intel_limits_i9xx_sdvo;
+
+ return limit;
+}
+
+const intel_limit_t *
+intel_limit(struct drm_crtc *crtc, int refclk)
+{
+ struct drm_device *dev = crtc->dev;
+ const intel_limit_t *limit;
+
+ if (HAS_PCH_SPLIT(dev))
+ limit = intel_ironlake_limit(crtc, refclk);
+ else if (IS_G4X(dev)) {
+ limit = intel_g4x_limit(crtc);
+ } else if (IS_PINEVIEW(dev)) {
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ limit = &intel_limits_pineview_lvds;
+ else
+ limit = &intel_limits_pineview_sdvo;
+ } else if (IS_VALLEYVIEW(dev)) {
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG))
+ limit = &intel_limits_vlv_dac;
+ else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
+ limit = &intel_limits_vlv_hdmi;
+ else
+ limit = &intel_limits_vlv_dp;
+ } else if (!IS_GEN2(dev)) {
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ limit = &intel_limits_i9xx_lvds;
+ else
+ limit = &intel_limits_i9xx_sdvo;
+ } else {
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ limit = &intel_limits_i8xx_lvds;
+ else
+ limit = &intel_limits_i8xx_dvo;
+ }
+ return limit;
+}
+
+/* m1 is reserved as 0 in Pineview, n is a ring counter */
+void
+pineview_clock(int refclk, intel_clock_t *clock)
+{
+ clock->m = clock->m2 + 2;
+ clock->p = clock->p1 * clock->p2;
+ clock->vco = refclk * clock->m / clock->n;
+ clock->dot = clock->vco / clock->p;
+}
+
+void
+intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
+{
+ if (IS_PINEVIEW(dev)) {
+ pineview_clock(refclk, clock);
+ return;
+ }
+ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
+ clock->p = clock->p1 * clock->p2;
+ clock->vco = refclk * clock->m / (clock->n + 2);
+ clock->dot = clock->vco / clock->p;
+}
+
+/**
+ * Returns whether any output on the specified pipe is of the specified type
+ */
+bool
+intel_pipe_has_type(struct drm_crtc *crtc, int type)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_encoder *encoder;
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->type == type)
+ return true;
+
+ return false;
+}
+
+#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
+/**
+ * Returns whether the given set of divisors are valid for a given refclk with
+ * the given connectors.
+ */
+
+bool
+intel_PLL_is_valid(struct drm_device *dev,
+ const intel_limit_t *limit,
+ const intel_clock_t *clock)
+{
+ if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
+ INTELPllInvalid("p1 out of range\n");
+ if (clock->p < limit->p.min || limit->p.max < clock->p)
+ INTELPllInvalid("p out of range\n");
+ if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
+ INTELPllInvalid("m2 out of range\n");
+ if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
+ INTELPllInvalid("m1 out of range\n");
+ if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
+ INTELPllInvalid("m1 <= m2\n");
+ if (clock->m < limit->m.min || limit->m.max < clock->m)
+ INTELPllInvalid("m out of range\n");
+ if (clock->n < limit->n.min || limit->n.max < clock->n)
+ INTELPllInvalid("n out of range\n");
+ if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
+ INTELPllInvalid("vco out of range\n");
+ /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
+ * connector, etc., rather than just a single range.
+ */
+ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
+ INTELPllInvalid("dot out of range\n");
+
+ return true;
+}
+
+bool
+intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock)
+
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ intel_clock_t clock;
+ int err = target;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ (I915_READ(LVDS)) != 0) {
+ /*
+ * For LVDS, if the panel is on, just rely on its current
+ * settings for dual-channel. We haven't figured out how to
+ * reliably set up different single/dual channel state, if we
+ * even can.
+ */
+ if (is_dual_link_lvds(dev_priv, LVDS))
+ clock.p2 = limit->p2.p2_fast;
+ else
+ clock.p2 = limit->p2.p2_slow;
+ } else {
+ if (target < limit->p2.dot_limit)
+ clock.p2 = limit->p2.p2_slow;
+ else
+ clock.p2 = limit->p2.p2_fast;
+ }
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
+ clock.m1++) {
+ for (clock.m2 = limit->m2.min;
+ clock.m2 <= limit->m2.max; clock.m2++) {
+ /* m1 is always 0 in Pineview */
+ if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
+ break;
+ for (clock.n = limit->n.min;
+ clock.n <= limit->n.max; clock.n++) {
+ for (clock.p1 = limit->p1.min;
+ clock.p1 <= limit->p1.max; clock.p1++) {
+ int this_err;
+
+ intel_clock(dev, refclk, &clock);
+ if (!intel_PLL_is_valid(dev, limit,
+ &clock))
+ continue;
+ if (match_clock &&
+ clock.p != match_clock->p)
+ continue;
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err) {
+ *best_clock = clock;
+ err = this_err;
+ }
+ }
+ }
+ }
+ }
+
+ return (err != target);
+}
+
+bool
+intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ intel_clock_t clock;
+ int max_n;
+ bool found;
+ /* approximately equals target * 0.00585 */
+ int err_most = (target >> 8) + (target >> 9);
+ found = false;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ int lvds_reg;
+
+ if (HAS_PCH_SPLIT(dev))
+ lvds_reg = PCH_LVDS;
+ else
+ lvds_reg = LVDS;
+ if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
+ LVDS_CLKB_POWER_UP)
+ clock.p2 = limit->p2.p2_fast;
+ else
+ clock.p2 = limit->p2.p2_slow;
+ } else {
+ if (target < limit->p2.dot_limit)
+ clock.p2 = limit->p2.p2_slow;
+ else
+ clock.p2 = limit->p2.p2_fast;
+ }
+
+ memset(best_clock, 0, sizeof(*best_clock));
+ max_n = limit->n.max;
+ /* based on hardware requirement, prefer smaller n to precision */
+ for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
+ /* based on hardware requirement, prefere larger m1,m2 */
+ for (clock.m1 = limit->m1.max;
+ clock.m1 >= limit->m1.min; clock.m1--) {
+ for (clock.m2 = limit->m2.max;
+ clock.m2 >= limit->m2.min; clock.m2--) {
+ for (clock.p1 = limit->p1.max;
+ clock.p1 >= limit->p1.min; clock.p1--) {
+ int this_err;
+
+ intel_clock(dev, refclk, &clock);
+ if (!intel_PLL_is_valid(dev, limit,
+ &clock))
+ continue;
+ if (match_clock &&
+ clock.p != match_clock->p)
+ continue;
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err_most) {
+ *best_clock = clock;
+ err_most = this_err;
+ max_n = clock.n;
+ found = true;
+ }
+ }
+ }
+ }
+ }
+ return found;
+}
+
+bool
+intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ intel_clock_t clock;
+
+ if (target < 200000) {
+ clock.n = 1;
+ clock.p1 = 2;
+ clock.p2 = 10;
+ clock.m1 = 12;
+ clock.m2 = 9;
+ } else {
+ clock.n = 2;
+ clock.p1 = 1;
+ clock.p2 = 10;
+ clock.m1 = 14;
+ clock.m2 = 8;
+ }
+ intel_clock(dev, refclk, &clock);
+ memcpy(best_clock, &clock, sizeof(intel_clock_t));
+ return true;
+}
+
+/* DisplayPort has only two frequencies, 162MHz and 270MHz */
+bool
+intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock)
+{
+ intel_clock_t clock;
+ if (target < 200000) {
+ clock.p1 = 2;
+ clock.p2 = 10;
+ clock.n = 2;
+ clock.m1 = 23;
+ clock.m2 = 8;
+ } else {
+ clock.p1 = 1;
+ clock.p2 = 10;
+ clock.n = 1;
+ clock.m1 = 14;
+ clock.m2 = 2;
+ }
+ clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
+ clock.p = (clock.p1 * clock.p2);
+ clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
+ clock.vco = 0;
+ memcpy(best_clock, &clock, sizeof(intel_clock_t));
+ return true;
+}
+
+bool
+intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock)
+{
+ u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2;
+ u32 m, n, fastclk;
+ u32 updrate, minupdate, fracbits, p;
+ unsigned long bestppm, ppm, absppm;
+ int dotclk, flag;
+
+ flag = 0;
+ dotclk = target * 1000;
+ bestppm = 1000000;
+ ppm = absppm = 0;
+ fastclk = dotclk / (2*100);
+ updrate = 0;
+ minupdate = 19200;
+ fracbits = 1;
+ n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0;
+ bestm1 = bestm2 = bestp1 = bestp2 = 0;
+
+ /* based on hardware requirement, prefer smaller n to precision */
+ for (n = limit->n.min; n <= ((refclk) / minupdate); n++) {
+ updrate = refclk / n;
+ for (p1 = limit->p1.max; p1 > limit->p1.min; p1--) {
+ for (p2 = limit->p2.p2_fast+1; p2 > 0; p2--) {
+ if (p2 > 10)
+ p2 = p2 - 1;
+ p = p1 * p2;
+ /* based on hardware requirement, prefer bigger m1,m2 values */
+ for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) {
+ m2 = (((2*(fastclk * p * n / m1 )) +
+ refclk) / (2*refclk));
+ m = m1 * m2;
+ vco = updrate * m;
+ if (vco >= limit->vco.min && vco < limit->vco.max) {
+ ppm = 1000000 * ((vco / p) - fastclk) / fastclk;
+ absppm = (ppm > 0) ? ppm : (-ppm);
+ if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) {
+ bestppm = 0;
+ flag = 1;
+ }
+ if (absppm < bestppm - 10) {
+ bestppm = absppm;
+ flag = 1;
+ }
+ if (flag) {
+ bestn = n;
+ bestm1 = m1;
+ bestm2 = m2;
+ bestp1 = p1;
+ bestp2 = p2;
+ flag = 0;
+ }
+ }
+ }
+ }
+ }
+ }
+ best_clock->n = bestn;
+ best_clock->m1 = bestm1;
+ best_clock->m2 = bestm2;
+ best_clock->p1 = bestp1;
+ best_clock->p2 = bestp2;
+
+ return true;
+}
+
+enum transcoder
+intel_pipe_to_cpu_transcoder(struct inteldrm_softc *dev_priv,
+ enum pipe pipe)
+{
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ return intel_crtc->cpu_transcoder;
+}
+
+void
+ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 frame, frame_reg = PIPEFRAME(pipe);
+ int retries;
+
+ frame = I915_READ(frame_reg);
+
+ for (retries = 50; retries > 0; retries--) {
+ if (I915_READ_NOTRACE(frame_reg) != frame)
+ break;
+ DELAY(1000);
+ }
+ if (retries == 0)
+ DRM_DEBUG_KMS("vblank wait timed out\n");
+}
+
+/**
+ * intel_wait_for_vblank - wait for vblank on a given pipe
+ * @dev: drm device
+ * @pipe: pipe to wait for
+ *
+ * Wait for vblank to occur on a given pipe. Needed for various bits of
+ * mode setting code.
+ */
+void
+intel_wait_for_vblank(struct drm_device *dev, int pipe)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ int pipestat_reg = PIPESTAT(pipe);
+ int retries;
+
+ if (INTEL_INFO(dev)->gen >= 5) {
+ ironlake_wait_for_vblank(dev, pipe);
+ return;
+ }
+
+ /* Clear existing vblank status. Note this will clear any other
+ * sticky status fields as well.
+ *
+ * This races with i915_driver_irq_handler() with the result
+ * that either function could miss a vblank event. Here it is not
+ * fatal, as we will either wait upon the next vblank interrupt or
+ * timeout. Generally speaking intel_wait_for_vblank() is only
+ * called during modeset at which time the GPU should be idle and
+ * should *not* be performing page flips and thus not waiting on
+ * vblanks...
+ * Currently, the result of us stealing a vblank from the irq
+ * handler is that a single frame will be skipped during swapbuffers.
+ */
+ I915_WRITE(pipestat_reg,
+ I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
+
+ /* Wait for vblank interrupt bit to set */
+ for (retries = 50; retries > 0; retries--) {
+ if (I915_READ(pipestat_reg) & PIPE_VBLANK_INTERRUPT_STATUS)
+ break;
+ DELAY(1000);
+ }
+ if (retries == 0)
+ DRM_DEBUG_KMS("vblank wait timed out\n");
+}
+
+/*
+ * intel_wait_for_pipe_off - wait for pipe to turn off
+ * @dev: drm device
+ * @pipe: pipe to wait for
+ *
+ * After disabling a pipe, we can't wait for vblank in the usual way,
+ * spinning on the vblank interrupt status bit, since we won't actually
+ * see an interrupt when the pipe is disabled.
+ *
+ * On Gen4 and above:
+ * wait for the pipe register state bit to turn off
+ *
+ * Otherwise:
+ * wait for the display line value to settle (it usually
+ * ends up stopping at the start of the next frame).
+ *
+ */
+void
+intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+ int retries;
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ int reg = PIPECONF(cpu_transcoder);
+
+ /* Wait for the Pipe State to go off */
+ for (retries = 100; retries > 0; retries--) {
+ if ((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0)
+ break;
+ DELAY(1000);
+ }
+ if (retries == 0)
+ WARN(1, "pipe_off wait timed out\n");
+ } else {
+ u32 last_line, line_mask;
+ int reg = PIPEDSL(pipe);
+
+ if (IS_GEN2(dev))
+ line_mask = DSL_LINEMASK_GEN2;
+ else
+ line_mask = DSL_LINEMASK_GEN3;
+
+ /* Wait for the display line to settle */
+ for (retries = 100; retries > 0; retries--) {
+ last_line = I915_READ(reg) & line_mask;
+ DELAY(5000);
+ if ((I915_READ(reg) & line_mask) == last_line)
+ break;
+ }
+ if (retries == 0)
+ WARN(1, "pipe_off wait timed out\n");
+ }
+}
+
+const char *
+state_string(bool enabled)
+{
+ return enabled ? "on" : "off";
+}
+
+/* Only for pre-ILK configs */
+void
+assert_pll(struct inteldrm_softc *dev_priv,
+ enum pipe pipe, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+
+ reg = DPLL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & DPLL_VCO_ENABLE);
+ WARN(cur_state != state,
+ "PLL state assertion failure (expected %s, current %s)\n",
+ state_string(state), state_string(cur_state));
+}
+#define assert_pll_enabled(d, p) assert_pll(d, p, true)
+#define assert_pll_disabled(d, p) assert_pll(d, p, false)
+
+/* For ILK+ */
+void
+assert_pch_pll(struct inteldrm_softc *dev_priv,
+ struct intel_pch_pll *pll,
+ struct intel_crtc *crtc,
+ bool state)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ u32 val;
+ bool cur_state;
+
+ if (HAS_PCH_LPT(dev)) {
+ DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
+ return;
+ }
+
+ if (WARN (!pll,
+ "asserting PCH PLL %s with no PLL\n", state_string(state)))
+ return;
+
+ val = I915_READ(pll->pll_reg);
+ cur_state = !!(val & DPLL_VCO_ENABLE);
+ WARN(cur_state != state,
+ "PCH PLL state for reg %x assertion failure (expected %s, current %s), val=%08x\n",
+ pll->pll_reg, state_string(state), state_string(cur_state), val);
+
+ /* Make sure the selected PLL is correctly attached to the transcoder */
+ if (crtc && HAS_PCH_CPT(dev)) {
+ u32 pch_dpll;
+
+ pch_dpll = I915_READ(PCH_DPLL_SEL);
+ cur_state = pll->pll_reg == _PCH_DPLL_B;
+ if (!WARN(((pch_dpll >> (4 * crtc->pipe)) & 1) != cur_state,
+ "PLL[%d] not attached to this transcoder %d: %08x\n",
+ cur_state, crtc->pipe, pch_dpll)) {
+ cur_state = !!(val >> (4*crtc->pipe + 3));
+ WARN(cur_state != state,
+ "PLL[%d] not %s on this transcoder %d: %08x\n",
+ pll->pll_reg == _PCH_DPLL_B,
+ state_string(state),
+ crtc->pipe,
+ val);
+ }
+ }
+}
+#define assert_pch_pll_enabled(d, p, c) assert_pch_pll(d, p, c, true)
+#define assert_pch_pll_disabled(d, p, c) assert_pch_pll(d, p, c, false)
+
+void
+assert_fdi_tx(struct inteldrm_softc *dev_priv,
+ enum pipe pipe, bool state)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ int reg;
+ u32 val;
+ bool cur_state;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+
+ if (IS_HASWELL(dev)) {
+ /* On Haswell, DDI is used instead of FDI_TX_CTL */
+ reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
+ val = I915_READ(reg);
+ cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
+ } else {
+ reg = FDI_TX_CTL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & FDI_TX_ENABLE);
+ }
+ WARN(cur_state != state,
+ "FDI TX state assertion failure (expected %s, current %s)\n",
+ state_string(state), state_string(cur_state));
+}
+#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
+#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
+
+void
+assert_fdi_rx(struct inteldrm_softc *dev_priv,
+ enum pipe pipe, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+
+ reg = FDI_RX_CTL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & FDI_RX_ENABLE);
+ WARN(cur_state != state,
+ "FDI RX state assertion failure (expected %s, current %s)\n",
+ state_string(state), state_string(cur_state));
+}
+#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
+#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
+
+void
+assert_fdi_tx_pll_enabled(struct inteldrm_softc *dev_priv,
+ enum pipe pipe)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ int reg;
+ u32 val;
+
+ /* ILK FDI PLL is always enabled */
+ if (dev_priv->info->gen == 5)
+ return;
+
+ /* On Haswell, DDI ports are responsible for the FDI PLL setup */
+ if (IS_HASWELL(dev))
+ return;
+
+ reg = FDI_TX_CTL(pipe);
+ val = I915_READ(reg);
+ WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
+}
+
+void
+assert_fdi_rx_pll_enabled(struct inteldrm_softc *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ reg = FDI_RX_CTL(pipe);
+ val = I915_READ(reg);
+ WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
+}
+
+void
+assert_panel_unlocked(struct inteldrm_softc *dev_priv,
+ enum pipe pipe)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ int pp_reg, lvds_reg;
+ u32 val;
+ enum pipe panel_pipe = PIPE_A;
+ bool locked = true;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ pp_reg = PCH_PP_CONTROL;
+ lvds_reg = PCH_LVDS;
+ } else {
+ pp_reg = PP_CONTROL;
+ lvds_reg = LVDS;
+ }
+
+ val = I915_READ(pp_reg);
+ if (!(val & PANEL_POWER_ON) ||
+ ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
+ locked = false;
+
+ if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
+ panel_pipe = PIPE_B;
+
+ WARN(panel_pipe == pipe && locked,
+ "panel assertion failure, pipe %c regs locked\n",
+ pipe_name(pipe));
+}
+
+void
+assert_pipe(struct inteldrm_softc *dev_priv,
+ enum pipe pipe, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+
+ /* if we need the pipe A quirk it must be always on */
+ if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
+ state = true;
+
+ reg = PIPECONF(cpu_transcoder);
+ val = I915_READ(reg);
+ cur_state = !!(val & PIPECONF_ENABLE);
+ WARN(cur_state != state,
+ "pipe %c assertion failure (expected %s, current %s)\n",
+ pipe_name(pipe), state_string(state), state_string(cur_state));
+}
+
+void
+assert_plane(struct inteldrm_softc *dev_priv,
+ enum plane plane, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+
+ reg = DSPCNTR(plane);
+ val = I915_READ(reg);
+ cur_state = !!(val & DISPLAY_PLANE_ENABLE);
+ WARN(cur_state != state,
+ "plane %c assertion failure (expected %s, current %s)\n",
+ plane_name(plane), state_string(state), state_string(cur_state));
+}
+
+#define assert_plane_enabled(d, p) assert_plane(d, p, true)
+#define assert_plane_disabled(d, p) assert_plane(d, p, false)
+
+void
+assert_planes_disabled(struct inteldrm_softc *dev_priv,
+ enum pipe pipe)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ int reg, i;
+ u32 val;
+ int cur_pipe;
+
+ /* Planes are fixed to pipes on ILK+ */
+ if (HAS_PCH_SPLIT(dev)) {
+ reg = DSPCNTR(pipe);
+ val = I915_READ(reg);
+ WARN((val & DISPLAY_PLANE_ENABLE),
+ "plane %c assertion failure, should be disabled but not\n",
+ plane_name(pipe));
+ return;
+ }
+
+ /* Need to check both planes against the pipe */
+ for (i = 0; i < 2; i++) {
+ reg = DSPCNTR(i);
+ val = I915_READ(reg);
+ cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
+ DISPPLANE_SEL_PIPE_SHIFT;
+ WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
+ "plane %c assertion failure, should be off on pipe %c but is still active\n",
+ plane_name(i), pipe_name(pipe));
+ }
+}
+
+void
+assert_pch_refclk_enabled(struct inteldrm_softc *dev_priv)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ u32 val;
+ bool enabled;
+
+ if (HAS_PCH_LPT(dev)) {
+ DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
+ return;
+ }
+
+ val = I915_READ(PCH_DREF_CONTROL);
+ enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
+ DREF_SUPERSPREAD_SOURCE_MASK));
+ WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
+}
+
+void
+assert_transcoder_disabled(struct inteldrm_softc *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+ bool enabled;
+
+ reg = TRANSCONF(pipe);
+ val = I915_READ(reg);
+ enabled = !!(val & TRANS_ENABLE);
+ WARN(enabled,
+ "transcoder assertion failed, should be off on pipe %c but is still active\n",
+ pipe_name(pipe));
+}
+
+bool
+dp_pipe_enabled(struct inteldrm_softc *dev_priv,
+ enum pipe pipe, u32 port_sel, u32 val)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+
+ if ((val & DP_PORT_EN) == 0)
+ return false;
+
+ if (HAS_PCH_CPT(dev)) {
+ u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
+ u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
+ if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
+ return false;
+ } else {
+ if ((val & DP_PIPE_MASK) != (pipe << 30))
+ return false;
+ }
+ return true;
+}
+
+bool
+hdmi_pipe_enabled(struct inteldrm_softc *dev_priv,
+ enum pipe pipe, u32 val)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+
+ if ((val & PORT_ENABLE) == 0)
+ return false;
+
+ if (HAS_PCH_CPT(dev)) {
+ if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
+ return false;
+ } else {
+ if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
+ return false;
+ }
+ return true;
+}
+
+bool
+lvds_pipe_enabled(struct inteldrm_softc *dev_priv,
+ enum pipe pipe, u32 val)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+
+ if ((val & LVDS_PORT_EN) == 0)
+ return false;
+
+ if (HAS_PCH_CPT(dev)) {
+ if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
+ return false;
+ } else {
+ if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
+ return false;
+ }
+ return true;
+}
+
+bool
+adpa_pipe_enabled(struct inteldrm_softc *dev_priv,
+ enum pipe pipe, u32 val)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+
+ if ((val & ADPA_DAC_ENABLE) == 0)
+ return false;
+ if (HAS_PCH_CPT(dev)) {
+ if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
+ return false;
+ } else {
+ if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
+ return false;
+ }
+ return true;
+}
+
+void
+assert_pch_dp_disabled(struct inteldrm_softc *dev_priv,
+ enum pipe pipe, int reg, u32 port_sel)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ u32 val = I915_READ(reg);
+ WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
+ "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
+ reg, pipe_name(pipe));
+
+ WARN(HAS_PCH_IBX(dev) && (val & DP_PORT_EN) == 0
+ && (val & DP_PIPEB_SELECT),
+ "IBX PCH dp port still using transcoder B\n");
+}
+
+void
+assert_pch_hdmi_disabled(struct inteldrm_softc *dev_priv,
+ enum pipe pipe, int reg)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ u32 val = I915_READ(reg);
+ WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
+ "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
+ reg, pipe_name(pipe));
+
+ WARN(HAS_PCH_IBX(dev) && (val & PORT_ENABLE) == 0
+ && (val & SDVO_PIPE_B_SELECT),
+ "IBX PCH hdmi port still using transcoder B\n");
+}
+
+void
+assert_pch_ports_disabled(struct inteldrm_softc *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
+ assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
+ assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
+
+ reg = PCH_ADPA;
+ val = I915_READ(reg);
+ WARN(adpa_pipe_enabled(dev_priv, pipe, val),
+ "PCH VGA enabled on transcoder %c, should be disabled\n",
+ pipe_name(pipe));
+
+ reg = PCH_LVDS;
+ val = I915_READ(reg);
+ WARN(lvds_pipe_enabled(dev_priv, pipe, val),
+ "PCH LVDS enabled on transcoder %c, should be disabled\n",
+ pipe_name(pipe));
+
+ assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
+ assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
+ assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
+}
+
+/**
+ * intel_enable_pll - enable a PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to enable
+ *
+ * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to
+ * make sure the PLL reg is writable first though, since the panel write
+ * protect mechanism may be enabled.
+ *
+ * Note! This is for pre-ILK only.
+ *
+ * Unfortunately needed by dvo_ns2501 since the dvo depends on it running.
+ */
+void
+intel_enable_pll(struct inteldrm_softc *dev_priv, enum pipe pipe)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ int reg;
+ u32 val;
+
+ /* No really, not for ILK+ */
+ BUG_ON(!IS_VALLEYVIEW(dev) && dev_priv->info->gen >= 5);
+
+ /* PLL is protected by panel, make sure we can write it */
+ if (IS_MOBILE(dev) && !IS_I830(dev))
+ assert_panel_unlocked(dev_priv, pipe);
+
+ reg = DPLL(pipe);
+ val = I915_READ(reg);
+ val |= DPLL_VCO_ENABLE;
+
+ /* We do this three times for luck */
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ DELAY(150); /* wait for warmup */
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ DELAY(150); /* wait for warmup */
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ DELAY(150); /* wait for warmup */
+}
+
+/**
+ * intel_disable_pll - disable a PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to disable
+ *
+ * Disable the PLL for @pipe, making sure the pipe is off first.
+ *
+ * Note! This is for pre-ILK only.
+ */
+void
+intel_disable_pll(struct inteldrm_softc *dev_priv, enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /* Don't disable pipe A or pipe A PLLs if needed */
+ if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+ return;
+
+ /* Make sure the pipe isn't still relying on us */
+ assert_pipe_disabled(dev_priv, pipe);
+
+ reg = DPLL(pipe);
+ val = I915_READ(reg);
+ val &= ~DPLL_VCO_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+}
+
+/* SBI access */
+void
+intel_sbi_write(struct inteldrm_softc *dev_priv, u16 reg, u32 value,
+ enum intel_sbi_destination destination)
+{
+ u32 tmp;
+ int retries;
+
+ mtx_enter(&dev_priv->dpio_lock);
+ for (retries = 100; retries > 0; retries--) {
+ if ((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0)
+ break;
+ DELAY(1000);
+ }
+ if (retries == 0) {
+ DRM_ERROR("timeout waiting for SBI to become ready\n");
+ goto out_unlock;
+ }
+
+ I915_WRITE(SBI_ADDR, (reg << 16));
+ I915_WRITE(SBI_DATA, value);
+
+ if (destination == SBI_ICLK)
+ tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
+ else
+ tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
+ I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
+
+ for (retries = 100; retries > 0; retries--) {
+ if ((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0)
+ break;
+ DELAY(1000);
+ }
+ if (retries == 0) {
+ DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
+ goto out_unlock;
+ }
+
+out_unlock:
+ mtx_leave(&dev_priv->dpio_lock);
+}
+
+u32
+intel_sbi_read(struct inteldrm_softc *dev_priv, u16 reg,
+ enum intel_sbi_destination destination)
+{
+ u32 value = 0;
+ int retries;
+
+ mtx_enter(&dev_priv->dpio_lock);
+ for (retries = 100; retries > 0; retries--) {
+ if ((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0)
+ break;
+ DELAY(1000);
+ }
+ if (retries == 0) {
+ DRM_ERROR("timeout waiting for SBI to become ready\n");
+ goto out_unlock;
+ }
+
+ I915_WRITE(SBI_ADDR, (reg << 16));
+
+ if (destination == SBI_ICLK)
+ value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
+ else
+ value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
+ I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
+
+ for (retries = 100; retries > 0; retries--) {
+ if ((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0)
+ break;
+ DELAY(1000);
+ }
+ if (retries == 0) {
+ DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
+ goto out_unlock;
+ }
+
+ value = I915_READ(SBI_DATA);
+
+out_unlock:
+ mtx_leave(&dev_priv->dpio_lock);
+ return value;
+}
+
+/**
+ * ironlake_enable_pch_pll - enable PCH PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to enable
+ *
+ * The PCH PLL needs to be enabled before the PCH transcoder, since it
+ * drives the transcoder clock.
+ */
+void
+ironlake_enable_pch_pll(struct intel_crtc *intel_crtc)
+{
+ struct inteldrm_softc *dev_priv = intel_crtc->base.dev->dev_private;
+ struct intel_pch_pll *pll;
+ int reg;
+ u32 val;
+
+ /* PCH PLLs only available on ILK, SNB and IVB */
+ BUG_ON(dev_priv->info->gen < 5);
+ pll = intel_crtc->pch_pll;
+ if (pll == NULL)
+ return;
+
+ if (WARN_ON(pll->refcount == 0))
+ return;
+
+ DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n",
+ pll->pll_reg, pll->active, pll->on,
+ intel_crtc->base.base.id);
+
+ /* PCH refclock must be enabled first */
+ assert_pch_refclk_enabled(dev_priv);
+
+ if (pll->active++ && pll->on) {
+ assert_pch_pll_enabled(dev_priv, pll, NULL);
+ return;
+ }
+
+ DRM_DEBUG_KMS("enabling PCH PLL %x\n", pll->pll_reg);
+
+ reg = pll->pll_reg;
+ val = I915_READ(reg);
+ val |= DPLL_VCO_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ DELAY(200);
+
+ pll->on = true;
+}
+
+void
+intel_disable_pch_pll(struct intel_crtc *intel_crtc)
+{
+ struct inteldrm_softc *dev_priv = intel_crtc->base.dev->dev_private;
+ struct intel_pch_pll *pll = intel_crtc->pch_pll;
+ int reg;
+ u32 val;
+
+ /* PCH only available on ILK+ */
+ BUG_ON(dev_priv->info->gen < 5);
+ if (pll == NULL)
+ return;
+
+ if (WARN_ON(pll->refcount == 0))
+ return;
+
+ DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n",
+ pll->pll_reg, pll->active, pll->on,
+ intel_crtc->base.base.id);
+
+ if (WARN_ON(pll->active == 0)) {
+ assert_pch_pll_disabled(dev_priv, pll, NULL);
+ return;
+ }
+
+ if (--pll->active) {
+ assert_pch_pll_enabled(dev_priv, pll, NULL);
+ return;
+ }
+
+ DRM_DEBUG_KMS("disabling PCH PLL %x\n", pll->pll_reg);
+
+ /* Make sure transcoder isn't still depending on us */
+ assert_transcoder_disabled(dev_priv, intel_crtc->pipe);
+
+ reg = pll->pll_reg;
+ val = I915_READ(reg);
+ val &= ~DPLL_VCO_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ DELAY(200);
+
+ pll->on = false;
+}
+
+void
+ironlake_enable_pch_transcoder(struct inteldrm_softc *dev_priv,
+ enum pipe pipe)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ uint32_t reg, val, pipeconf_val;
+ int retries;
+
+ /* PCH only available on ILK+ */
+ BUG_ON(dev_priv->info->gen < 5);
+
+ /* Make sure PCH DPLL is enabled */
+ assert_pch_pll_enabled(dev_priv,
+ to_intel_crtc(crtc)->pch_pll,
+ to_intel_crtc(crtc));
+
+ /* FDI must be feeding us bits for PCH ports */
+ assert_fdi_tx_enabled(dev_priv, pipe);
+ assert_fdi_rx_enabled(dev_priv, pipe);
+
+ if (HAS_PCH_CPT(dev)) {
+ /* Workaround: Set the timing override bit before enabling the
+ * pch transcoder. */
+ reg = TRANS_CHICKEN2(pipe);
+ val = I915_READ(reg);
+ val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+ I915_WRITE(reg, val);
+ }
+
+ reg = TRANSCONF(pipe);
+ val = I915_READ(reg);
+ pipeconf_val = I915_READ(PIPECONF(pipe));
+
+ if (HAS_PCH_IBX(dev)) {
+ /*
+ * make the BPC in transcoder be consistent with
+ * that in pipeconf reg.
+ */
+ val &= ~PIPE_BPC_MASK;
+ val |= pipeconf_val & PIPE_BPC_MASK;
+ }
+
+ val &= ~TRANS_INTERLACE_MASK;
+ if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
+ if (HAS_PCH_IBX(dev) &&
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
+ val |= TRANS_LEGACY_INTERLACED_ILK;
+ else
+ val |= TRANS_INTERLACED;
+ else
+ val |= TRANS_PROGRESSIVE;
+
+ I915_WRITE(reg, val | TRANS_ENABLE);
+ for (retries = 100; retries > 0; retries--) {
+ if (I915_READ(reg) & TRANS_STATE_ENABLE)
+ break;
+ DELAY(1000);
+ }
+ if (retries == 0)
+ DRM_ERROR("failed to enable transcoder %d\n", pipe);
+}
+
+void
+lpt_enable_pch_transcoder(struct inteldrm_softc *dev_priv,
+ enum transcoder cpu_transcoder)
+{
+ u32 val, pipeconf_val;
+ int retries;
+
+ /* PCH only available on ILK+ */
+ BUG_ON(dev_priv->info->gen < 5);
+
+ /* FDI must be feeding us bits for PCH ports */
+ assert_fdi_tx_enabled(dev_priv, cpu_transcoder);
+ assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
+
+ /* Workaround: set timing override bit. */
+ val = I915_READ(_TRANSA_CHICKEN2);
+ val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+ I915_WRITE(_TRANSA_CHICKEN2, val);
+
+ val = TRANS_ENABLE;
+ pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
+
+ if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
+ PIPECONF_INTERLACED_ILK)
+ val |= TRANS_INTERLACED;
+ else
+ val |= TRANS_PROGRESSIVE;
+
+ I915_WRITE(TRANSCONF(TRANSCODER_A), val);
+ for (retries = 100; retries > 0; retries--) {
+ if (I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE)
+ break;
+ DELAY(1000);
+ }
+ if (retries == 0)
+ DRM_ERROR("Failed to enable PCH transcoder\n");
+}
+
+void
+ironlake_disable_pch_transcoder(struct inteldrm_softc *dev_priv,
+ enum pipe pipe)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ uint32_t reg, val;
+ int retries;
+
+ /* FDI relies on the transcoder */
+ assert_fdi_tx_disabled(dev_priv, pipe);
+ assert_fdi_rx_disabled(dev_priv, pipe);
+
+ /* Ports must be off as well */
+ assert_pch_ports_disabled(dev_priv, pipe);
+
+ reg = TRANSCONF(pipe);
+ val = I915_READ(reg);
+ val &= ~TRANS_ENABLE;
+ I915_WRITE(reg, val);
+ /* wait for PCH transcoder off, transcoder state */
+ for (retries = 50; retries > 0; retries--) {
+ if ((I915_READ(reg) & TRANS_STATE_ENABLE) == 0)
+ break;
+ DELAY(1000);
+ }
+ if (retries == 0)
+ DRM_ERROR("failed to disable transcoder %d\n", pipe);
+
+ if (!HAS_PCH_IBX(dev)) {
+ /* Workaround: Clear the timing override chicken bit again. */
+ reg = TRANS_CHICKEN2(pipe);
+ val = I915_READ(reg);
+ val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
+ I915_WRITE(reg, val);
+ }
+}
+
+void
+lpt_disable_pch_transcoder(struct inteldrm_softc *dev_priv)
+{
+ u32 val;
+ int retries;
+
+ val = I915_READ(_TRANSACONF);
+ val &= ~TRANS_ENABLE;
+ I915_WRITE(_TRANSACONF, val);
+ /* wait for PCH transcoder off, transcoder state */
+ for (retries = 50; retries > 0; retries--) {
+ if ((I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE) == 0)
+ break;
+ DELAY(1000);
+ }
+ if (retries == 0)
+ DRM_ERROR("Failed to disable PCH transcoder\n");
+
+ /* Workaround: clear timing override bit. */
+ val = I915_READ(_TRANSA_CHICKEN2);
+ val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
+ I915_WRITE(_TRANSA_CHICKEN2, val);
+}
+
+/**
+ * intel_enable_pipe - enable a pipe, asserting requirements
+ * @dev_priv: i915 private structure
+ * @pipe: pipe to enable
+ * @pch_port: on ILK+, is this pipe driving a PCH port or not
+ *
+ * Enable @pipe, making sure that various hardware specific requirements
+ * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
+ *
+ * @pipe should be %PIPE_A or %PIPE_B.
+ *
+ * Will wait until the pipe is actually running (i.e. first vblank) before
+ * returning.
+ */
+void
+intel_enable_pipe(struct inteldrm_softc *dev_priv, enum pipe pipe,
+ bool pch_port)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+ enum transcoder pch_transcoder;
+ int reg;
+ u32 val;
+
+ if (IS_HASWELL(dev))
+ pch_transcoder = TRANSCODER_A;
+ else
+ pch_transcoder = pipe;
+
+ /*
+ * A pipe without a PLL won't actually be able to drive bits from
+ * a plane. On ILK+ the pipe PLLs are integrated, so we don't
+ * need the check.
+ */
+ if (!HAS_PCH_SPLIT(dev))
+ assert_pll_enabled(dev_priv, pipe);
+ else {
+ if (pch_port) {
+ /* if driving the PCH, we need FDI enabled */
+ assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
+ assert_fdi_tx_pll_enabled(dev_priv, cpu_transcoder);
+ }
+ /* FIXME: assert CPU port conditions for SNB+ */
+ }
+
+ reg = PIPECONF(cpu_transcoder);
+ val = I915_READ(reg);
+ if (val & PIPECONF_ENABLE)
+ return;
+
+ I915_WRITE(reg, val | PIPECONF_ENABLE);
+ intel_wait_for_vblank(dev, pipe);
+}
+
+/**
+ * intel_disable_pipe - disable a pipe, asserting requirements
+ * @dev_priv: i915 private structure
+ * @pipe: pipe to disable
+ *
+ * Disable @pipe, making sure that various hardware specific requirements
+ * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
+ *
+ * @pipe should be %PIPE_A or %PIPE_B.
+ *
+ * Will wait until the pipe has shut down before returning.
+ */
+void
+intel_disable_pipe(struct inteldrm_softc *dev_priv,
+ enum pipe pipe)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+ int reg;
+ u32 val;
+
+ /*
+ * Make sure planes won't keep trying to pump pixels to us,
+ * or we might hang the display.
+ */
+ assert_planes_disabled(dev_priv, pipe);
+
+ /* Don't disable pipe A or pipe A PLLs if needed */
+ if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+ return;
+
+ reg = PIPECONF(cpu_transcoder);
+ val = I915_READ(reg);
+ if ((val & PIPECONF_ENABLE) == 0)
+ return;
+
+ I915_WRITE(reg, val & ~PIPECONF_ENABLE);
+ intel_wait_for_pipe_off(dev, pipe);
+}
+
+/*
+ * Plane regs are double buffered, going from enabled->disabled needs a
+ * trigger in order to latch. The display address reg provides this.
+ */
+void
+intel_flush_display_plane(struct inteldrm_softc *dev_priv,
+ enum plane plane)
+{
+ if (dev_priv->info->gen >= 4)
+ I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
+ else
+ I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
+}
+
+/**
+ * intel_enable_plane - enable a display plane on a given pipe
+ * @dev_priv: i915 private structure
+ * @plane: plane to enable
+ * @pipe: pipe being fed
+ *
+ * Enable @plane on @pipe, making sure that @pipe is running first.
+ */
+void
+intel_enable_plane(struct inteldrm_softc *dev_priv,
+ enum plane plane, enum pipe pipe)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ int reg;
+ u32 val;
+
+ /* If the pipe isn't enabled, we can't pump pixels and may hang */
+ assert_pipe_enabled(dev_priv, pipe);
+
+ reg = DSPCNTR(plane);
+ val = I915_READ(reg);
+ if (val & DISPLAY_PLANE_ENABLE)
+ return;
+
+ I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
+ intel_flush_display_plane(dev_priv, plane);
+ intel_wait_for_vblank(dev, pipe);
+}
+
+/**
+ * intel_disable_plane - disable a display plane
+ * @dev_priv: i915 private structure
+ * @plane: plane to disable
+ * @pipe: pipe consuming the data
+ *
+ * Disable @plane; should be an independent operation.
+ */
+void
+intel_disable_plane(struct inteldrm_softc *dev_priv,
+ enum plane plane, enum pipe pipe)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ int reg;
+ u32 val;
+
+ reg = DSPCNTR(plane);
+ val = I915_READ(reg);
+ if ((val & DISPLAY_PLANE_ENABLE) == 0)
+ return;
+
+ I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
+ intel_flush_display_plane(dev_priv, plane);
+ intel_wait_for_vblank(dev, pipe);
+}
+
+int
+intel_pin_and_fence_fb_obj(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 alignment;
+ int ret;
+
+ switch (obj->tiling_mode) {
+ case I915_TILING_NONE:
+ if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
+ alignment = 128 * 1024;
+ else if (INTEL_INFO(dev)->gen >= 4)
+ alignment = 4 * 1024;
+ else
+ alignment = 64 * 1024;
+ break;
+ case I915_TILING_X:
+ /* pin() will align the object as required by fence */
+ alignment = 0;
+ break;
+ case I915_TILING_Y:
+ /* FIXME: Is this true? */
+ DRM_ERROR("Y tiled not allowed for scan out buffers\n");
+ return -EINVAL;
+ default:
+#ifdef notyet
+ BUG();
+#else
+ DRM_ERROR("invalid tiling mode %d", obj->tiling_mode);
+ return -EINVAL;
+#endif
+ }
+
+ dev_priv->mm.interruptible = false;
+ ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
+ if (ret)
+ goto err_interruptible;
+
+ /* Install a fence for tiled scan-out. Pre-i965 always needs a
+ * fence, whereas 965+ only requires a fence if using
+ * framebuffer compression. For simplicity, we always install
+ * a fence as the cost is not that onerous.
+ */
+ ret = i915_gem_object_get_fence(obj);
+ if (ret)
+ goto err_unpin;
+
+ i915_gem_object_pin_fence(obj);
+
+ dev_priv->mm.interruptible = true;
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_interruptible:
+ dev_priv->mm.interruptible = true;
+ return ret;
+}
+
+void
+intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
+{
+ i915_gem_object_unpin_fence(obj);
+ i915_gem_object_unpin(obj);
+}
+
+/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
+ * is assumed to be a power-of-two. */
+unsigned long
+intel_gen4_compute_page_offset(int *x, int *y,
+ unsigned int tiling_mode,
+ unsigned int cpp,
+ unsigned int pitch)
+{
+ if (tiling_mode != I915_TILING_NONE) {
+ unsigned int tile_rows, tiles;
+
+ tile_rows = *y / 8;
+ *y %= 8;
+
+ tiles = *x / (512/cpp);
+ *x %= 512/cpp;
+
+ return tile_rows * pitch * 8 + tiles * 4096;
+ } else {
+ unsigned int offset;
+
+ offset = *y * pitch + *x * cpp;
+ *y = 0;
+ *x = (offset & 4095) / cpp;
+ return offset & -4096;
+ }
+}
+
+int
+i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj;
+ int plane = intel_crtc->plane;
+ unsigned long linear_offset;
+ u32 dspcntr;
+ u32 reg;
+
+ switch (plane) {
+ case 0:
+ case 1:
+ break;
+ default:
+ DRM_ERROR("Can't update plane %d in SAREA\n", plane);
+ return -EINVAL;
+ }
+
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
+
+ reg = DSPCNTR(plane);
+ dspcntr = I915_READ(reg);
+ /* Mask out pixel format bits in case we change it */
+ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_C8:
+ dspcntr |= DISPPLANE_8BPP;
+ break;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_ARGB1555:
+ dspcntr |= DISPPLANE_BGRX555;
+ break;
+ case DRM_FORMAT_RGB565:
+ dspcntr |= DISPPLANE_BGRX565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ dspcntr |= DISPPLANE_BGRX888;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ dspcntr |= DISPPLANE_RGBX888;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ dspcntr |= DISPPLANE_BGRX101010;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ dspcntr |= DISPPLANE_RGBX101010;
+ break;
+ default:
+ DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
+ return -EINVAL;
+ }
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ if (obj->tiling_mode != I915_TILING_NONE)
+ dspcntr |= DISPPLANE_TILED;
+ else
+ dspcntr &= ~DISPPLANE_TILED;
+ }
+
+ I915_WRITE(reg, dspcntr);
+
+ linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ intel_crtc->dspaddr_offset =
+ intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ fb->bits_per_pixel / 8,
+ fb->pitches[0]);
+ linear_offset -= intel_crtc->dspaddr_offset;
+ } else {
+ intel_crtc->dspaddr_offset = linear_offset;
+ }
+
+ DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
+ obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
+ I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ I915_MODIFY_DISPBASE(DSPSURF(plane),
+ obj->gtt_offset + intel_crtc->dspaddr_offset);
+ I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
+ I915_WRITE(DSPLINOFF(plane), linear_offset);
+ } else
+ I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset);
+ POSTING_READ(reg);
+
+ return 0;
+}
+
+int
+ironlake_update_plane(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj;
+ int plane = intel_crtc->plane;
+ unsigned long linear_offset;
+ u32 dspcntr;
+ u32 reg;
+
+ switch (plane) {
+ case 0:
+ case 1:
+ case 2:
+ break;
+ default:
+ DRM_ERROR("Can't update plane %d in SAREA\n", plane);
+ return -EINVAL;
+ }
+
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
+
+ reg = DSPCNTR(plane);
+ dspcntr = I915_READ(reg);
+ /* Mask out pixel format bits in case we change it */
+ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_C8:
+ dspcntr |= DISPPLANE_8BPP;
+ break;
+ case DRM_FORMAT_RGB565:
+ dspcntr |= DISPPLANE_BGRX565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ dspcntr |= DISPPLANE_BGRX888;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ dspcntr |= DISPPLANE_RGBX888;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ dspcntr |= DISPPLANE_BGRX101010;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ dspcntr |= DISPPLANE_RGBX101010;
+ break;
+ default:
+ DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
+ return -EINVAL;
+ }
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ dspcntr |= DISPPLANE_TILED;
+ else
+ dspcntr &= ~DISPPLANE_TILED;
+
+ /* must disable */
+ dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+
+ I915_WRITE(reg, dspcntr);
+
+ linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+ intel_crtc->dspaddr_offset =
+ intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ fb->bits_per_pixel / 8,
+ fb->pitches[0]);
+ linear_offset -= intel_crtc->dspaddr_offset;
+
+ DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
+ obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
+ I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
+ I915_MODIFY_DISPBASE(DSPSURF(plane),
+ obj->gtt_offset + intel_crtc->dspaddr_offset);
+ if (IS_HASWELL(dev)) {
+ I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
+ } else {
+ I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
+ I915_WRITE(DSPLINOFF(plane), linear_offset);
+ }
+ POSTING_READ(reg);
+
+ return 0;
+}
+
+/* Assume fb object is pinned & idle & fenced and just update base pointers */
+int
+intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int x, int y, enum mode_set_atomic state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.disable_fbc)
+ dev_priv->display.disable_fbc(dev);
+ intel_increase_pllclock(crtc);
+
+ return dev_priv->display.update_plane(crtc, fb, x, y);
+}
+
+int
+intel_finish_fb(struct drm_framebuffer *old_fb)
+{
+ struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
+ struct inteldrm_softc *dev_priv = obj->base.dev->dev_private;
+ bool was_interruptible = dev_priv->mm.interruptible;
+ int ret;
+
+ while(!atomic_read(&dev_priv->mm.wedged) &&
+ atomic_read(&obj->pending_flip) != 0) {
+ tsleep(&dev_priv->pending_flip_queue, 0, "915flp", 0);
+ }
+
+ /* Big Hammer, we also need to ensure that any pending
+ * MI_WAIT_FOR_EVENT inside a user batch buffer on the
+ * current scanout is retired before unpinning the old
+ * framebuffer.
+ *
+ * This should only fail upon a hung GPU, in which case we
+ * can safely continue.
+ */
+ dev_priv->mm.interruptible = false;
+ ret = i915_gem_object_finish_gpu(obj);
+ dev_priv->mm.interruptible = was_interruptible;
+
+ return ret;
+}
+
+void
+intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y)
+{
+#if 0
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_master_private *master_priv;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ if (!dev->primary->master)
+ return;
+
+ master_priv = dev->primary->master->driver_priv;
+ if (!master_priv->sarea_priv)
+ return;
+
+ switch (intel_crtc->pipe) {
+ case 0:
+ master_priv->sarea_priv->pipeA_x = x;
+ master_priv->sarea_priv->pipeA_y = y;
+ break;
+ case 1:
+ master_priv->sarea_priv->pipeB_x = x;
+ master_priv->sarea_priv->pipeB_y = y;
+ break;
+ default:
+ break;
+ }
+#endif
+}
+
+int
+intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_framebuffer *old_fb;
+ int ret;
+
+ /* no fb bound */
+ if (!fb) {
+ DRM_ERROR("No FB bound\n");
+ return 0;
+ }
+
+ if(intel_crtc->plane > dev_priv->num_pipe) {
+ DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n",
+ intel_crtc->plane,
+ dev_priv->num_pipe);
+ return -EINVAL;
+ }
+
+ DRM_LOCK();
+ ret = intel_pin_and_fence_fb_obj(dev,
+ to_intel_framebuffer(fb)->obj,
+ NULL);
+ if (ret != 0) {
+ DRM_UNLOCK();
+ DRM_ERROR("pin & fence failed\n");
+ return ret;
+ }
+
+ if (crtc->fb)
+ intel_finish_fb(crtc->fb);
+
+ ret = dev_priv->display.update_plane(crtc, fb, x, y);
+ if (ret) {
+ intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
+ DRM_UNLOCK();
+ DRM_ERROR("failed to update base address\n");
+ return ret;
+ }
+
+ old_fb = crtc->fb;
+ crtc->fb = fb;
+ crtc->x = x;
+ crtc->y = y;
+
+ if (old_fb) {
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
+ }
+
+ intel_update_fbc(dev);
+ DRM_UNLOCK();
+
+ intel_crtc_update_sarea_pos(crtc, x, y);
+
+ return 0;
+}
+
+void
+ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 dpa_ctl;
+
+ DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
+ dpa_ctl = I915_READ(DP_A);
+ dpa_ctl &= ~DP_PLL_FREQ_MASK;
+
+ if (clock < 200000) {
+ u32 temp;
+ dpa_ctl |= DP_PLL_FREQ_160MHZ;
+ /* workaround for 160Mhz:
+ 1) program 0x4600c bits 15:0 = 0x8124
+ 2) program 0x46010 bit 0 = 1
+ 3) program 0x46034 bit 24 = 1
+ 4) program 0x64000 bit 14 = 1
+ */
+ temp = I915_READ(0x4600c);
+ temp &= 0xffff0000;
+ I915_WRITE(0x4600c, temp | 0x8124);
+
+ temp = I915_READ(0x46010);
+ I915_WRITE(0x46010, temp | 1);
+
+ temp = I915_READ(0x46034);
+ I915_WRITE(0x46034, temp | (1 << 24));
+ } else {
+ dpa_ctl |= DP_PLL_FREQ_270MHZ;
+ }
+ I915_WRITE(DP_A, dpa_ctl);
+
+ POSTING_READ(DP_A);
+ DELAY(500);
+}
+
+void
+intel_fdi_normal_train(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp;
+
+ /* enable normal train */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (IS_IVYBRIDGE(dev)) {
+ temp &= ~FDI_LINK_TRAIN_NONE_IVB;
+ temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
+ }
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_NORMAL_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_NONE;
+ }
+ I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+
+ /* wait one idle pattern time */
+ POSTING_READ(reg);
+ DELAY(1000);
+
+ /* IVB wants error correction enabled */
+ if (IS_IVYBRIDGE(dev))
+ I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
+ FDI_FE_ERRC_ENABLE);
+}
+
+void
+ivb_modeset_global_resources(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *pipe_B_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
+ struct intel_crtc *pipe_C_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
+ uint32_t temp;
+
+ /* When everything is off disable fdi C so that we could enable fdi B
+ * with all lanes. XXX: This misses the case where a pipe is not using
+ * any pch resources and so doesn't need any fdi lanes. */
+ if (!pipe_B_crtc->base.enabled && !pipe_C_crtc->base.enabled) {
+ WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
+ WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
+
+ temp = I915_READ(SOUTH_CHICKEN1);
+ temp &= ~FDI_BC_BIFURCATION_SELECT;
+ DRM_DEBUG_KMS("disabling fdi C rx\n");
+ I915_WRITE(SOUTH_CHICKEN1, temp);
+ }
+}
+
+/* The FDI link training functions for ILK/Ibexpeak. */
+void
+ironlake_fdi_link_train(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ u32 reg, temp, tries;
+
+ /* FDI needs bits from pipe & plane first */
+ assert_pipe_enabled(dev_priv, pipe);
+ assert_plane_enabled(dev_priv, plane);
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ reg = FDI_RX_IMR(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ I915_WRITE(reg, temp);
+ I915_READ(reg);
+ DELAY(150);
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(7 << 19);
+ temp |= (intel_crtc->fdi_lanes - 1) << 19;
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(reg, temp | FDI_TX_ENABLE);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(reg, temp | FDI_RX_ENABLE);
+
+ POSTING_READ(reg);
+ DELAY(150);
+
+ /* Ironlake workaround, enable clock pointer after FDI enable*/
+ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
+ FDI_RX_PHASE_SYNC_POINTER_EN);
+
+ reg = FDI_RX_IIR(pipe);
+ for (tries = 0; tries < 5; tries++) {
+ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if ((temp & FDI_RX_BIT_LOCK)) {
+ DRM_DEBUG_KMS("FDI train 1 done.\n");
+ I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+ break;
+ }
+ }
+ if (tries == 5)
+ DRM_ERROR("FDI train 1 fail!\n");
+
+ /* Train 2 */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ DELAY(150);
+
+ reg = FDI_RX_IIR(pipe);
+ for (tries = 0; tries < 5; tries++) {
+ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
+ DRM_DEBUG_KMS("FDI train 2 done.\n");
+ break;
+ }
+ }
+ if (tries == 5)
+ DRM_ERROR("FDI train 2 fail!\n");
+
+ DRM_DEBUG_KMS("FDI train done\n");
+
+}
+
+static const int snb_b_fdi_train_param[] = {
+ FDI_LINK_TRAIN_400MV_0DB_SNB_B,
+ FDI_LINK_TRAIN_400MV_6DB_SNB_B,
+ FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
+ FDI_LINK_TRAIN_800MV_0DB_SNB_B,
+};
+
+/* The FDI link training functions for SNB/Cougarpoint. */
+void
+gen6_fdi_link_train(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp, i, retry;
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ reg = FDI_RX_IMR(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ DELAY(150);
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(7 << 19);
+ temp |= (intel_crtc->fdi_lanes - 1) << 19;
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ /* SNB-B */
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ I915_WRITE(reg, temp | FDI_TX_ENABLE);
+
+ I915_WRITE(FDI_RX_MISC(pipe),
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
+ I915_WRITE(reg, temp | FDI_RX_ENABLE);
+
+ POSTING_READ(reg);
+ DELAY(150);
+
+ for (i = 0; i < 4; i++) {
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ DELAY(500);
+
+ for (retry = 0; retry < 5; retry++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ if (temp & FDI_RX_BIT_LOCK) {
+ I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+ DRM_DEBUG_KMS("FDI train 1 done.\n");
+ break;
+ }
+ DELAY(50);
+ }
+ if (retry < 5)
+ break;
+ }
+ if (i == 4)
+ DRM_ERROR("FDI train 1 fail!\n");
+
+ /* Train 2 */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ if (IS_GEN6(dev)) {
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ /* SNB-B */
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ }
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ }
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ DELAY(150);
+
+ for (i = 0; i < 4; i++) {
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ DELAY(500);
+
+ for (retry = 0; retry < 5; retry++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
+ DRM_DEBUG_KMS("FDI train 2 done.\n");
+ break;
+ }
+ DELAY(50);
+ }
+ if (retry < 5)
+ break;
+ }
+ if (i == 4)
+ DRM_ERROR("FDI train 2 fail!\n");
+
+ DRM_DEBUG_KMS("FDI train done.\n");
+}
+
+/* Manual link training for Ivy Bridge A0 parts */
+void
+ivb_manual_fdi_link_train(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp, i;
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ reg = FDI_RX_IMR(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ DELAY(150);
+
+ DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
+ I915_READ(FDI_RX_IIR(pipe)));
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(7 << 19);
+ temp |= (intel_crtc->fdi_lanes - 1) << 19;
+ temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
+ temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ temp |= FDI_COMPOSITE_SYNC;
+ I915_WRITE(reg, temp | FDI_TX_ENABLE);
+
+ I915_WRITE(FDI_RX_MISC(pipe),
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_AUTO;
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ temp |= FDI_COMPOSITE_SYNC;
+ I915_WRITE(reg, temp | FDI_RX_ENABLE);
+
+ POSTING_READ(reg);
+ DELAY(150);
+
+ for (i = 0; i < 4; i++) {
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ DELAY(500);
+
+ reg = FDI_RX_IIR(pipe);
+ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_BIT_LOCK ||
+ (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
+ I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+ DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i);
+ break;
+ }
+ }
+ if (i == 4)
+ DRM_ERROR("FDI train 1 fail!\n");
+
+ /* Train 2 */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE_IVB;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ DELAY(150);
+
+ for (i = 0; i < 4; i++) {
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ DELAY(500);
+
+ reg = FDI_RX_IIR(pipe);
+ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
+ DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i);
+ break;
+ }
+ }
+ if (i == 4)
+ DRM_ERROR("FDI train 2 fail!\n");
+
+ DRM_DEBUG_KMS("FDI train done.\n");
+}
+
+void
+ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp;
+
+
+ /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~((0x7 << 19) | (0x7 << 16));
+ temp |= (intel_crtc->fdi_lanes - 1) << 19;
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
+
+ POSTING_READ(reg);
+ DELAY(200);
+
+ /* Switch from Rawclk to PCDclk */
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp | FDI_PCDCLK);
+
+ POSTING_READ(reg);
+ DELAY(200);
+
+ /* On Haswell, the PLL configuration for ports and pipes is handled
+ * separately, as part of DDI setup */
+ if (!IS_HASWELL(dev)) {
+ /* Enable CPU FDI TX PLL, always on for Ironlake */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ if ((temp & FDI_TX_PLL_ENABLE) == 0) {
+ I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
+
+ POSTING_READ(reg);
+ DELAY(100);
+ }
+ }
+}
+
+void
+ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp;
+
+ /* Switch from PCDclk to Rawclk */
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_PCDCLK);
+
+ /* Disable CPU FDI TX PLL */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
+
+ POSTING_READ(reg);
+ DELAY(100);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
+
+ /* Wait for the clocks to turn off. */
+ POSTING_READ(reg);
+ DELAY(100);
+}
+
+void
+ironlake_fdi_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp;
+
+ /* disable CPU FDI tx and PCH FDI rx */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
+ POSTING_READ(reg);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(0x7 << 16);
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
+
+ POSTING_READ(reg);
+ DELAY(100);
+
+ /* Ironlake workaround, disable clock pointer after downing FDI */
+ if (HAS_PCH_IBX(dev)) {
+ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+ }
+
+ /* still set train pattern 1 */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
+ /* BPC in FDI rx is consistent with that in PIPECONF */
+ temp &= ~(0x07 << 16);
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ DELAY(100);
+}
+
+bool
+intel_crtc_has_pending_flip(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ bool pending;
+
+ if (atomic_read(&dev_priv->mm.wedged))
+ return false;
+
+ mtx_enter(&dev->event_lock);
+ pending = to_intel_crtc(crtc)->unpin_work != NULL;
+ mtx_leave(&dev->event_lock);
+
+ return pending;
+}
+
+void
+intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ if (crtc->fb == NULL)
+ return;
+
+ while (intel_crtc_has_pending_flip(crtc))
+ tsleep(&dev_priv->pending_flip_queue, 0, "915wfl", 0);
+
+ DRM_LOCK();
+ intel_finish_fb(crtc->fb);
+ DRM_UNLOCK();
+}
+
+bool
+ironlake_crtc_driving_pch(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_encoder *intel_encoder;
+
+ /*
+ * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
+ * must be driven by its own crtc; no sharing is possible.
+ */
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ switch (intel_encoder->type) {
+ case INTEL_OUTPUT_EDP:
+ if (!intel_encoder_is_pch_edp(&intel_encoder->base))
+ return false;
+ continue;
+ }
+ }
+
+ return true;
+}
+
+bool
+haswell_crtc_driving_pch(struct drm_crtc *crtc)
+{
+ return intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG);
+}
+
+/* Program iCLKIP clock to the desired frequency */
+void
+lpt_program_iclkip(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 divsel, phaseinc, auxdiv, phasedir = 0;
+ u32 temp;
+
+ /* It is necessary to ungate the pixclk gate prior to programming
+ * the divisors, and gate it back when it is done.
+ */
+ I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
+
+ /* Disable SSCCTL */
+ intel_sbi_write(dev_priv, SBI_SSCCTL6,
+ intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
+ SBI_SSCCTL_DISABLE,
+ SBI_ICLK);
+
+ /* 20MHz is a corner case which is out of range for the 7-bit divisor */
+ if (crtc->mode.clock == 20000) {
+ auxdiv = 1;
+ divsel = 0x41;
+ phaseinc = 0x20;
+ } else {
+ /* The iCLK virtual clock root frequency is in MHz,
+ * but the crtc->mode.clock in in KHz. To get the divisors,
+ * it is necessary to divide one by another, so we
+ * convert the virtual clock precision to KHz here for higher
+ * precision.
+ */
+ u32 iclk_virtual_root_freq = 172800 * 1000;
+ u32 iclk_pi_range = 64;
+ u32 desired_divisor, msb_divisor_value, pi_value;
+
+ desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock);
+ msb_divisor_value = desired_divisor / iclk_pi_range;
+ pi_value = desired_divisor % iclk_pi_range;
+
+ auxdiv = 0;
+ divsel = msb_divisor_value - 2;
+ phaseinc = pi_value;
+ }
+
+ /* This should not happen with any sane values */
+ WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
+ ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
+ WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
+ ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
+
+ DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
+ crtc->mode.clock,
+ auxdiv,
+ divsel,
+ phasedir,
+ phaseinc);
+
+ /* Program SSCDIVINTPHASE6 */
+ temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
+ temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
+ temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
+ temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
+ temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
+ temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
+ temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
+ intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
+
+ /* Program SSCAUXDIV */
+ temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
+ temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
+ temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
+ intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
+
+ /* Enable modulator and associated divider */
+ temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
+ temp &= ~SBI_SSCCTL_DISABLE;
+ intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
+
+ /* Wait for initialization time */
+ DELAY(24);
+
+ I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
+}
+
+/*
+ * Enable PCH resources required for PCH ports:
+ * - PCH PLLs
+ * - FDI training & RX/TX
+ * - update transcoder timings
+ * - DP transcoding bits
+ * - transcoder
+ */
+void
+ironlake_pch_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp;
+
+ assert_transcoder_disabled(dev_priv, pipe);
+
+ /* Write the TU size bits before fdi link training, so that error
+ * detection works. */
+ I915_WRITE(FDI_RX_TUSIZE1(pipe),
+ I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
+
+ /* For PCH output, training FDI link */
+ dev_priv->display.fdi_link_train(crtc);
+
+ /* XXX: pch pll's can be enabled any time before we enable the PCH
+ * transcoder, and we actually should do this to not upset any PCH
+ * transcoder that already use the clock when we share it.
+ *
+ * Note that enable_pch_pll tries to do the right thing, but get_pch_pll
+ * unconditionally resets the pll - we need that to have the right LVDS
+ * enable sequence. */
+ ironlake_enable_pch_pll(intel_crtc);
+
+ if (HAS_PCH_CPT(dev)) {
+ u32 sel;
+
+ temp = I915_READ(PCH_DPLL_SEL);
+ switch (pipe) {
+ default:
+ case 0:
+ temp |= TRANSA_DPLL_ENABLE;
+ sel = TRANSA_DPLLB_SEL;
+ break;
+ case 1:
+ temp |= TRANSB_DPLL_ENABLE;
+ sel = TRANSB_DPLLB_SEL;
+ break;
+ case 2:
+ temp |= TRANSC_DPLL_ENABLE;
+ sel = TRANSC_DPLLB_SEL;
+ break;
+ }
+ if (intel_crtc->pch_pll->pll_reg == _PCH_DPLL_B)
+ temp |= sel;
+ else
+ temp &= ~sel;
+ I915_WRITE(PCH_DPLL_SEL, temp);
+ }
+
+ /* set transcoder timing, panel must allow it */
+ assert_panel_unlocked(dev_priv, pipe);
+ I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
+ I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
+ I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe)));
+
+ I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
+ I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
+ I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
+ I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe)));
+
+ intel_fdi_normal_train(crtc);
+
+ /* For PCH DP, enable TRANS_DP_CTL */
+ if (HAS_PCH_CPT(dev) &&
+ (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
+ u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
+ reg = TRANS_DP_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(TRANS_DP_PORT_SEL_MASK |
+ TRANS_DP_SYNC_MASK |
+ TRANS_DP_BPC_MASK);
+ temp |= (TRANS_DP_OUTPUT_ENABLE |
+ TRANS_DP_ENH_FRAMING);
+ temp |= bpc << 9; /* same format but at 11:9 */
+
+ if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
+ temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
+ if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
+ temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
+
+ switch (intel_trans_dp_port_sel(crtc)) {
+ case PCH_DP_B:
+ temp |= TRANS_DP_PORT_SEL_B;
+ break;
+ case PCH_DP_C:
+ temp |= TRANS_DP_PORT_SEL_C;
+ break;
+ case PCH_DP_D:
+ temp |= TRANS_DP_PORT_SEL_D;
+ break;
+ default:
+ BUG();
+ }
+
+ I915_WRITE(reg, temp);
+ }
+
+ ironlake_enable_pch_transcoder(dev_priv, pipe);
+}
+
+void
+lpt_pch_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+
+ assert_transcoder_disabled(dev_priv, TRANSCODER_A);
+
+ lpt_program_iclkip(crtc);
+
+ /* Set transcoder timing. */
+ I915_WRITE(_TRANS_HTOTAL_A, I915_READ(HTOTAL(cpu_transcoder)));
+ I915_WRITE(_TRANS_HBLANK_A, I915_READ(HBLANK(cpu_transcoder)));
+ I915_WRITE(_TRANS_HSYNC_A, I915_READ(HSYNC(cpu_transcoder)));
+
+ I915_WRITE(_TRANS_VTOTAL_A, I915_READ(VTOTAL(cpu_transcoder)));
+ I915_WRITE(_TRANS_VBLANK_A, I915_READ(VBLANK(cpu_transcoder)));
+ I915_WRITE(_TRANS_VSYNC_A, I915_READ(VSYNC(cpu_transcoder)));
+ I915_WRITE(_TRANS_VSYNCSHIFT_A, I915_READ(VSYNCSHIFT(cpu_transcoder)));
+
+ lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
+}
+
+void
+intel_put_pch_pll(struct intel_crtc *intel_crtc)
+{
+ struct intel_pch_pll *pll = intel_crtc->pch_pll;
+
+ if (pll == NULL)
+ return;
+
+ if (pll->refcount == 0) {
+ WARN(1, "bad PCH PLL refcount\n");
+ return;
+ }
+
+ --pll->refcount;
+ intel_crtc->pch_pll = NULL;
+}
+
+static struct intel_pch_pll *
+intel_get_pch_pll(struct intel_crtc *intel_crtc, u32 dpll, u32 fp)
+{
+ struct inteldrm_softc *dev_priv = intel_crtc->base.dev->dev_private;
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct intel_pch_pll *pll;
+ int i;
+
+ pll = intel_crtc->pch_pll;
+ if (pll) {
+ DRM_DEBUG_KMS("CRTC:%d reusing existing PCH PLL %x\n",
+ intel_crtc->base.base.id, pll->pll_reg);
+ goto prepare;
+ }
+
+ if (HAS_PCH_IBX(dev)) {
+ /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
+ i = intel_crtc->pipe;
+ pll = &dev_priv->pch_plls[i];
+
+ DRM_DEBUG_KMS("CRTC:%d using pre-allocated PCH PLL %x\n",
+ intel_crtc->base.base.id, pll->pll_reg);
+
+ goto found;
+ }
+
+ for (i = 0; i < dev_priv->num_pch_pll; i++) {
+ pll = &dev_priv->pch_plls[i];
+
+ /* Only want to check enabled timings first */
+ if (pll->refcount == 0)
+ continue;
+
+ if (dpll == (I915_READ(pll->pll_reg) & 0x7fffffff) &&
+ fp == I915_READ(pll->fp0_reg)) {
+ DRM_DEBUG_KMS("CRTC:%d sharing existing PCH PLL %x (refcount %d, ative %d)\n",
+ intel_crtc->base.base.id,
+ pll->pll_reg, pll->refcount, pll->active);
+
+ goto found;
+ }
+ }
+
+ /* Ok no matching timings, maybe there's a free one? */
+ for (i = 0; i < dev_priv->num_pch_pll; i++) {
+ pll = &dev_priv->pch_plls[i];
+ if (pll->refcount == 0) {
+ DRM_DEBUG_KMS("CRTC:%d allocated PCH PLL %x\n",
+ intel_crtc->base.base.id, pll->pll_reg);
+ goto found;
+ }
+ }
+
+ return NULL;
+
+found:
+ intel_crtc->pch_pll = pll;
+ pll->refcount++;
+ DRM_DEBUG_DRIVER("using pll %d for pipe %d\n", i, intel_crtc->pipe);
+prepare: /* separate function? */
+ DRM_DEBUG_DRIVER("switching PLL %x off\n", pll->pll_reg);
+
+ /* Wait for the clocks to stabilize before rewriting the regs */
+ I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
+ POSTING_READ(pll->pll_reg);
+ DELAY(150);
+
+ I915_WRITE(pll->fp0_reg, fp);
+ I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
+ pll->on = false;
+ return pll;
+}
+
+void
+intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ int dslreg = PIPEDSL(pipe);
+ u32 temp;
+ int retries;
+
+ temp = I915_READ(dslreg);
+ DELAY(500);
+ for (retries = 10; retries > 0; retries--) {
+ if (I915_READ(dslreg) != temp)
+ break;
+ DELAY(1000);
+ }
+ if (retries == 0) {
+ DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
+ }
+}
+
+void
+ironlake_crtc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ u32 temp;
+ bool is_pch_port;
+
+ WARN_ON(!crtc->enabled);
+
+ if (intel_crtc->active)
+ return;
+
+ intel_crtc->active = true;
+ intel_update_watermarks(dev);
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ temp = I915_READ(PCH_LVDS);
+ if ((temp & LVDS_PORT_EN) == 0)
+ I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
+ }
+
+ is_pch_port = ironlake_crtc_driving_pch(crtc);
+
+ if (is_pch_port) {
+ /* Note: FDI PLL enabling _must_ be done before we enable the
+ * cpu pipes, hence this is separate from all the other fdi/pch
+ * enabling. */
+ ironlake_fdi_pll_enable(intel_crtc);
+ } else {
+ assert_fdi_tx_disabled(dev_priv, pipe);
+ assert_fdi_rx_disabled(dev_priv, pipe);
+ }
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_enable)
+ encoder->pre_enable(encoder);
+
+ /* Enable panel fitting for LVDS */
+ if (dev_priv->pch_pf_size &&
+ (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
+ /* Force use of hard-coded filter coefficients
+ * as some pre-programmed values are broken,
+ * e.g. x201.
+ */
+ if (IS_IVYBRIDGE(dev))
+ I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
+ PF_PIPE_SEL_IVB(pipe));
+ else
+ I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
+ I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
+ I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
+ }
+
+ /*
+ * On ILK+ LUT must be loaded before the pipe is running but with
+ * clocks enabled
+ */
+ intel_crtc_load_lut(crtc);
+
+ intel_enable_pipe(dev_priv, pipe, is_pch_port);
+ intel_enable_plane(dev_priv, plane, pipe);
+
+ if (is_pch_port)
+ ironlake_pch_enable(crtc);
+
+ DRM_LOCK();
+ intel_update_fbc(dev);
+ DRM_UNLOCK();
+
+ intel_crtc_update_cursor(crtc, true);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->enable(encoder);
+
+ if (HAS_PCH_CPT(dev))
+ intel_cpt_verify_modeset(dev, intel_crtc->pipe);
+
+ /*
+ * There seems to be a race in PCH platform hw (at least on some
+ * outputs) where an enabled pipe still completes any pageflip right
+ * away (as if the pipe is off) instead of waiting for vblank. As soon
+ * as the first vblank happend, everything works as expected. Hence just
+ * wait for one vblank before returning to avoid strange things
+ * happening.
+ */
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+}
+
+void
+haswell_crtc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ bool is_pch_port;
+
+ WARN_ON(!crtc->enabled);
+
+ if (intel_crtc->active)
+ return;
+
+ intel_crtc->active = true;
+ intel_update_watermarks(dev);
+
+ is_pch_port = haswell_crtc_driving_pch(crtc);
+
+ if (is_pch_port)
+ dev_priv->display.fdi_link_train(crtc);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_enable)
+ encoder->pre_enable(encoder);
+
+ intel_ddi_enable_pipe_clock(intel_crtc);
+
+ /* Enable panel fitting for eDP */
+ if (dev_priv->pch_pf_size &&
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
+ /* Force use of hard-coded filter coefficients
+ * as some pre-programmed values are broken,
+ * e.g. x201.
+ */
+ I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
+ PF_PIPE_SEL_IVB(pipe));
+ I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
+ I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
+ }
+
+ /*
+ * On ILK+ LUT must be loaded before the pipe is running but with
+ * clocks enabled
+ */
+ intel_crtc_load_lut(crtc);
+
+ intel_ddi_set_pipe_settings(crtc);
+ intel_ddi_enable_pipe_func(crtc);
+
+ intel_enable_pipe(dev_priv, pipe, is_pch_port);
+ intel_enable_plane(dev_priv, plane, pipe);
+
+ if (is_pch_port)
+ lpt_pch_enable(crtc);
+
+ DRM_LOCK();
+ intel_update_fbc(dev);
+ DRM_UNLOCK();
+
+ intel_crtc_update_cursor(crtc, true);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->enable(encoder);
+
+ /*
+ * There seems to be a race in PCH platform hw (at least on some
+ * outputs) where an enabled pipe still completes any pageflip right
+ * away (as if the pipe is off) instead of waiting for vblank. As soon
+ * as the first vblank happend, everything works as expected. Hence just
+ * wait for one vblank before returning to avoid strange things
+ * happening.
+ */
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+}
+
+void
+ironlake_crtc_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ u32 reg, temp;
+
+
+ if (!intel_crtc->active)
+ return;
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->disable(encoder);
+
+ intel_crtc_wait_for_pending_flips(crtc);
+ drm_vblank_off(dev, pipe);
+ intel_crtc_update_cursor(crtc, false);
+
+ intel_disable_plane(dev_priv, plane, pipe);
+
+ if (dev_priv->cfb_plane == plane)
+ intel_disable_fbc(dev);
+
+ intel_disable_pipe(dev_priv, pipe);
+
+ /* Disable PF */
+ I915_WRITE(PF_CTL(pipe), 0);
+ I915_WRITE(PF_WIN_SZ(pipe), 0);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->post_disable)
+ encoder->post_disable(encoder);
+
+ ironlake_fdi_disable(crtc);
+
+ ironlake_disable_pch_transcoder(dev_priv, pipe);
+
+ if (HAS_PCH_CPT(dev)) {
+ /* disable TRANS_DP_CTL */
+ reg = TRANS_DP_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
+ temp |= TRANS_DP_PORT_SEL_NONE;
+ I915_WRITE(reg, temp);
+
+ /* disable DPLL_SEL */
+ temp = I915_READ(PCH_DPLL_SEL);
+ switch (pipe) {
+ case 0:
+ temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
+ break;
+ case 1:
+ temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+ break;
+ case 2:
+ /* C shares PLL A or B */
+ temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
+ break;
+ default:
+ BUG(); /* wtf */
+ }
+ I915_WRITE(PCH_DPLL_SEL, temp);
+ }
+
+ /* disable PCH DPLL */
+ intel_disable_pch_pll(intel_crtc);
+
+ ironlake_fdi_pll_disable(intel_crtc);
+
+ intel_crtc->active = false;
+ intel_update_watermarks(dev);
+
+ DRM_LOCK();
+ intel_update_fbc(dev);
+ DRM_UNLOCK();
+}
+
+void
+haswell_crtc_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ bool is_pch_port;
+
+ if (!intel_crtc->active)
+ return;
+
+ is_pch_port = haswell_crtc_driving_pch(crtc);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->disable(encoder);
+
+ intel_crtc_wait_for_pending_flips(crtc);
+ drm_vblank_off(dev, pipe);
+ intel_crtc_update_cursor(crtc, false);
+
+ intel_disable_plane(dev_priv, plane, pipe);
+
+ if (dev_priv->cfb_plane == plane)
+ intel_disable_fbc(dev);
+
+ intel_disable_pipe(dev_priv, pipe);
+
+ intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
+
+ /* Disable PF */
+ I915_WRITE(PF_CTL(pipe), 0);
+ I915_WRITE(PF_WIN_SZ(pipe), 0);
+
+ intel_ddi_disable_pipe_clock(intel_crtc);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->post_disable)
+ encoder->post_disable(encoder);
+
+ if (is_pch_port) {
+ lpt_disable_pch_transcoder(dev_priv);
+ intel_ddi_fdi_disable(crtc);
+ }
+
+ intel_crtc->active = false;
+ intel_update_watermarks(dev);
+
+ DRM_LOCK();
+ intel_update_fbc(dev);
+ DRM_UNLOCK();
+}
+
+void
+ironlake_crtc_off(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ intel_put_pch_pll(intel_crtc);
+}
+
+void
+haswell_crtc_off(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ /* Stop saying we're using TRANSCODER_EDP because some other CRTC might
+ * start using it. */
+ intel_crtc->cpu_transcoder = intel_crtc->pipe;
+
+ intel_ddi_put_crtc_pll(crtc);
+}
+
+void
+intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
+{
+ if (!enable && intel_crtc->overlay) {
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ DRM_LOCK();
+ dev_priv->mm.interruptible = false;
+ (void) intel_overlay_switch_off(intel_crtc->overlay);
+ dev_priv->mm.interruptible = true;
+ DRM_UNLOCK();
+ }
+
+ /* Let userspace switch the overlay on again. In most cases userspace
+ * has to recompute where to put it anyway.
+ */
+}
+
+void
+i9xx_crtc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+
+ WARN_ON(!crtc->enabled);
+
+ if (intel_crtc->active)
+ return;
+
+ intel_crtc->active = true;
+ intel_update_watermarks(dev);
+
+ intel_enable_pll(dev_priv, pipe);
+ intel_enable_pipe(dev_priv, pipe, false);
+ intel_enable_plane(dev_priv, plane, pipe);
+
+ intel_crtc_load_lut(crtc);
+ intel_update_fbc(dev);
+
+ /* Give the overlay scaler a chance to enable if it's on this pipe */
+ intel_crtc_dpms_overlay(intel_crtc, true);
+ intel_crtc_update_cursor(crtc, true);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->enable(encoder);
+}
+
+void
+i9xx_crtc_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ u32 pctl;
+
+
+ if (!intel_crtc->active)
+ return;
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->disable(encoder);
+
+ /* Give the overlay scaler a chance to disable if it's on this pipe */
+ intel_crtc_wait_for_pending_flips(crtc);
+ drm_vblank_off(dev, pipe);
+ intel_crtc_dpms_overlay(intel_crtc, false);
+ intel_crtc_update_cursor(crtc, false);
+
+ if (dev_priv->cfb_plane == plane)
+ intel_disable_fbc(dev);
+
+ intel_disable_plane(dev_priv, plane, pipe);
+ intel_disable_pipe(dev_priv, pipe);
+
+ /* Disable pannel fitter if it is on this pipe. */
+ pctl = I915_READ(PFIT_CONTROL);
+ if ((pctl & PFIT_ENABLE) &&
+ ((pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT) == pipe)
+ I915_WRITE(PFIT_CONTROL, 0);
+
+ intel_disable_pll(dev_priv, pipe);
+
+ intel_crtc->active = false;
+ intel_update_fbc(dev);
+ intel_update_watermarks(dev);
+}
+
+void
+i9xx_crtc_off(struct drm_crtc *crtc)
+{
+}
+
+void
+intel_crtc_update_sarea(struct drm_crtc *crtc,
+ bool enabled)
+{
+#if 0
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_master_private *master_priv;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+
+ if (!dev->primary->master)
+ return;
+
+ master_priv = dev->primary->master->driver_priv;
+ if (!master_priv->sarea_priv)
+ return;
+
+ switch (pipe) {
+ case 0:
+ master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
+ master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
+ break;
+ case 1:
+ master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
+ master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
+ break;
+ default:
+ DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
+ break;
+ }
+#endif
+}
+
+/**
+ * Sets the power management mode of the pipe and plane.
+ */
+void
+intel_crtc_update_dpms(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ bool enable = false;
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder)
+ enable |= intel_encoder->connectors_active;
+
+ if (enable)
+ dev_priv->display.crtc_enable(crtc);
+ else
+ dev_priv->display.crtc_disable(crtc);
+
+ intel_crtc_update_sarea(crtc, enable);
+}
+
+void
+intel_crtc_noop(struct drm_crtc *crtc)
+{
+}
+
+void
+intel_crtc_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_connector *connector;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ /* crtc should still be enabled when we disable it. */
+ WARN_ON(!crtc->enabled);
+
+ dev_priv->display.crtc_disable(crtc);
+ intel_crtc_update_sarea(crtc, false);
+ dev_priv->display.off(crtc);
+
+ assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
+ assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
+
+ if (crtc->fb) {
+ DRM_LOCK();
+ intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
+ DRM_UNLOCK();
+ crtc->fb = NULL;
+ }
+
+ /* Update computed state. */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (!connector->encoder || !connector->encoder->crtc)
+ continue;
+
+ if (connector->encoder->crtc != crtc)
+ continue;
+
+ connector->dpms = DRM_MODE_DPMS_OFF;
+ to_intel_encoder(connector->encoder)->connectors_active = false;
+ }
+}
+
+void
+intel_modeset_disable(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->enabled)
+ intel_crtc_disable(crtc);
+ }
+}
+
+void
+intel_encoder_noop(struct drm_encoder *encoder)
+{
+}
+
+void
+intel_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+
+ drm_encoder_cleanup(encoder);
+ free(intel_encoder, M_DRM);
+}
+
+/* Simple dpms helper for encodres with just one connector, no cloning and only
+ * one kind of off state. It clamps all !ON modes to fully OFF and changes the
+ * state of the entire output pipe. */
+void
+intel_encoder_dpms(struct intel_encoder *encoder, int mode)
+{
+ if (mode == DRM_MODE_DPMS_ON) {
+ encoder->connectors_active = true;
+
+ intel_crtc_update_dpms(encoder->base.crtc);
+ } else {
+ encoder->connectors_active = false;
+
+ intel_crtc_update_dpms(encoder->base.crtc);
+ }
+}
+
+/* Cross check the actual hw state with our own modeset state tracking (and it's
+ * internal consistency). */
+void
+intel_connector_check_state(struct intel_connector *connector)
+{
+ if (connector->get_hw_state(connector)) {
+ struct intel_encoder *encoder = connector->encoder;
+ struct drm_crtc *crtc;
+ bool encoder_enabled;
+ enum pipe pipe;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.base.id,
+ drm_get_connector_name(&connector->base));
+
+ WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
+ "wrong connector dpms state\n");
+ WARN(connector->base.encoder != &encoder->base,
+ "active connector not linked to encoder\n");
+ WARN(!encoder->connectors_active,
+ "encoder->connectors_active not set\n");
+
+ encoder_enabled = encoder->get_hw_state(encoder, &pipe);
+ WARN(!encoder_enabled, "encoder not enabled\n");
+ if (WARN_ON(!encoder->base.crtc))
+ return;
+
+ crtc = encoder->base.crtc;
+
+ WARN(!crtc->enabled, "crtc not enabled\n");
+ WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
+ WARN(pipe != to_intel_crtc(crtc)->pipe,
+ "encoder active on the wrong pipe\n");
+ }
+}
+
+/* Even simpler default implementation, if there's really no special case to
+ * consider. */
+void
+intel_connector_dpms(struct drm_connector *connector, int mode)
+{
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+
+ /* All the simple cases only support two dpms states. */
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
+ if (mode == connector->dpms)
+ return;
+
+ connector->dpms = mode;
+
+ /* Only need to change hw state when actually enabled */
+ if (encoder->base.crtc)
+ intel_encoder_dpms(encoder, mode);
+ else
+ WARN_ON(encoder->connectors_active != false);
+
+ intel_modeset_check_state(connector->dev);
+}
+
+/* Simple connector->get_hw_state implementation for encoders that support only
+ * one connector and no cloning and hence the encoder state determines the state
+ * of the connector. */
+bool
+intel_connector_get_hw_state(struct intel_connector *connector)
+{
+ enum pipe pipe = 0;
+ struct intel_encoder *encoder = connector->encoder;
+
+ return encoder->get_hw_state(encoder, &pipe);
+}
+
+bool
+intel_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = crtc->dev;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ /* FDI link clock is fixed at 2.7G */
+ if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
+ return false;
+ }
+
+ /* All interlaced capable intel hw wants timings in frames. Note though
+ * that intel_lvds_mode_fixup does some funny tricks with the crtc
+ * timings, so we need to be careful not to clobber these.*/
+ if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+ /* WaPruneModeWithIncorrectHsyncOffset: Cantiga+ cannot handle modes
+ * with a hsync front porch of 0.
+ */
+ if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
+ adjusted_mode->hsync_start == adjusted_mode->hdisplay)
+ return false;
+
+ return true;
+}
+
+int
+valleyview_get_display_clock_speed(struct drm_device *dev)
+{
+ return 400000; /* FIXME */
+}
+
+int
+i945_get_display_clock_speed(struct drm_device *dev)
+{
+ return 400000;
+}
+
+int
+i915_get_display_clock_speed(struct drm_device *dev)
+{
+ return 333000;
+}
+
+int
+i9xx_misc_get_display_clock_speed(struct drm_device *dev)
+{
+ return 200000;
+}
+
+int
+i915gm_get_display_clock_speed(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u16 gcfgc = 0;
+
+ gcfgc = pci_conf_read(dev_priv->pc, dev_priv->tag, GCFGC) >> 16;
+
+ if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
+ return 133000;
+ else {
+ switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
+ case GC_DISPLAY_CLOCK_333_MHZ:
+ return 333000;
+ default:
+ case GC_DISPLAY_CLOCK_190_200_MHZ:
+ return 190000;
+ }
+ }
+}
+
+int
+i865_get_display_clock_speed(struct drm_device *dev)
+{
+ return 266000;
+}
+
+int
+i855_get_display_clock_speed(struct drm_device *dev)
+{
+ u16 hpllcc = 0;
+ /* Assume that the hardware is in the high speed state. This
+ * should be the default.
+ */
+ switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
+ case GC_CLOCK_133_200:
+ case GC_CLOCK_100_200:
+ return 200000;
+ case GC_CLOCK_166_250:
+ return 250000;
+ case GC_CLOCK_100_133:
+ return 133000;
+ }
+
+ /* Shouldn't happen */
+ return 0;
+}
+
+int
+i830_get_display_clock_speed(struct drm_device *dev)
+{
+ return 133000;
+}
+
+void
+fdi_reduce_ratio(u32 *num, u32 *den)
+{
+ while (*num > 0xffffff || *den > 0xffffff) {
+ *num >>= 1;
+ *den >>= 1;
+ }
+}
+
+void
+ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
+ int link_clock, struct fdi_m_n *m_n)
+{
+ m_n->tu = 64; /* default size */
+
+ /* BUG_ON(pixel_clock > INT_MAX / 36); */
+ m_n->gmch_m = bits_per_pixel * pixel_clock;
+ m_n->gmch_n = link_clock * nlanes * 8;
+ fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
+
+ m_n->link_m = pixel_clock;
+ m_n->link_n = link_clock;
+ fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
+}
+
+static inline bool
+intel_panel_use_ssc(struct inteldrm_softc *dev_priv)
+{
+ if (i915_panel_use_ssc >= 0)
+ return i915_panel_use_ssc != 0;
+ return dev_priv->lvds_use_ssc
+ && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
+}
+
+/**
+ * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
+ * @crtc: CRTC structure
+ * @mode: requested mode
+ *
+ * A pipe may be connected to one or more outputs. Based on the depth of the
+ * attached framebuffer, choose a good color depth to use on the pipe.
+ *
+ * If possible, match the pipe depth to the fb depth. In some cases, this
+ * isn't ideal, because the connected output supports a lesser or restricted
+ * set of depths. Resolve that here:
+ * LVDS typically supports only 6bpc, so clamp down in that case
+ * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
+ * Displays may support a restricted set as well, check EDID and clamp as
+ * appropriate.
+ * DP may want to dither down to 6bpc to fit larger modes
+ *
+ * RETURNS:
+ * Dithering requirement (i.e. false if display bpc and pipe bpc match,
+ * true if they don't match).
+ */
+bool
+intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ unsigned int *pipe_bpp,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_connector *connector;
+ struct intel_encoder *intel_encoder;
+ unsigned int display_bpc = UINT_MAX, bpc;
+
+ /* Walk the encoders & connectors on this crtc, get min bpc */
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+
+ if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
+ unsigned int lvds_bpc;
+
+ if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
+ LVDS_A3_POWER_UP)
+ lvds_bpc = 8;
+ else
+ lvds_bpc = 6;
+
+ if (lvds_bpc < display_bpc) {
+ DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
+ display_bpc = lvds_bpc;
+ }
+ continue;
+ }
+
+ /* Not one of the known troublemakers, check the EDID */
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ head) {
+ if (connector->encoder != &intel_encoder->base)
+ continue;
+
+ /* Don't use an invalid EDID bpc value */
+ if (connector->display_info.bpc &&
+ connector->display_info.bpc < display_bpc) {
+ DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
+ display_bpc = connector->display_info.bpc;
+ }
+ }
+
+ if (intel_encoder->type == INTEL_OUTPUT_EDP) {
+ /* Use VBT settings if we have an eDP panel */
+ unsigned int edp_bpc = dev_priv->edp.bpp / 3;
+
+ if (edp_bpc && edp_bpc < display_bpc) {
+ DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
+ display_bpc = edp_bpc;
+ }
+ continue;
+ }
+
+ /*
+ * HDMI is either 12 or 8, so if the display lets 10bpc sneak
+ * through, clamp it down. (Note: >12bpc will be caught below.)
+ */
+ if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
+ if (display_bpc > 8 && display_bpc < 12) {
+ DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
+ display_bpc = 12;
+ } else {
+ DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
+ display_bpc = 8;
+ }
+ }
+ }
+
+ if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+ DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
+ display_bpc = 6;
+ }
+
+ /*
+ * We could just drive the pipe at the highest bpc all the time and
+ * enable dithering as needed, but that costs bandwidth. So choose
+ * the minimum value that expresses the full color range of the fb but
+ * also stays within the max display bpc discovered above.
+ */
+
+ switch (fb->depth) {
+ case 8:
+ bpc = 8; /* since we go through a colormap */
+ break;
+ case 15:
+ case 16:
+ bpc = 6; /* min is 18bpp */
+ break;
+ case 24:
+ bpc = 8;
+ break;
+ case 30:
+ bpc = 10;
+ break;
+ case 48:
+ bpc = 12;
+ break;
+ default:
+ DRM_DEBUG("unsupported depth, assuming 24 bits\n");
+ bpc = min((unsigned int)8, display_bpc);
+ break;
+ }
+
+ display_bpc = min(display_bpc, bpc);
+
+ DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
+ bpc, display_bpc);
+
+ *pipe_bpp = display_bpc * 3;
+
+ return display_bpc != bpc;
+}
+
+int
+vlv_get_refclk(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ int refclk = 27000; /* for DP & HDMI */
+
+ return 100000; /* only one validated so far */
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
+ refclk = 96000;
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv))
+ refclk = 100000;
+ else
+ refclk = 96000;
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
+ refclk = 100000;
+ }
+
+ return refclk;
+}
+
+int
+i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ int refclk;
+
+ if (IS_VALLEYVIEW(dev)) {
+ refclk = vlv_get_refclk(crtc);
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
+ refclk = dev_priv->lvds_ssc_freq * 1000;
+ DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
+ refclk / 1000);
+ } else if (!IS_GEN2(dev)) {
+ refclk = 96000;
+ } else {
+ refclk = 48000;
+ }
+
+ return refclk;
+}
+
+void
+i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode,
+ intel_clock_t *clock)
+{
+ /* SDVO TV has fixed PLL values depend on its clock range,
+ this mirrors vbios setting. */
+ if (adjusted_mode->clock >= 100000
+ && adjusted_mode->clock < 140500) {
+ clock->p1 = 2;
+ clock->p2 = 10;
+ clock->n = 3;
+ clock->m1 = 16;
+ clock->m2 = 8;
+ } else if (adjusted_mode->clock >= 140500
+ && adjusted_mode->clock <= 200000) {
+ clock->p1 = 1;
+ clock->p2 = 10;
+ clock->n = 6;
+ clock->m1 = 12;
+ clock->m2 = 8;
+ }
+}
+
+void
+i9xx_update_pll_dividers(struct drm_crtc *crtc,
+ intel_clock_t *clock,
+ intel_clock_t *reduced_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 fp, fp2 = 0;
+
+ if (IS_PINEVIEW(dev)) {
+ fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
+ if (reduced_clock)
+ fp2 = (1 << reduced_clock->n) << 16 |
+ reduced_clock->m1 << 8 | reduced_clock->m2;
+ } else {
+ fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
+ if (reduced_clock)
+ fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
+ reduced_clock->m2;
+ }
+
+ I915_WRITE(FP0(pipe), fp);
+
+ intel_crtc->lowfreq_avail = false;
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ reduced_clock && i915_powersave) {
+ I915_WRITE(FP1(pipe), fp2);
+ intel_crtc->lowfreq_avail = true;
+ } else {
+ I915_WRITE(FP1(pipe), fp);
+ }
+}
+
+void
+intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 temp;
+
+ temp = I915_READ(LVDS);
+ temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+ if (pipe == 1) {
+ temp |= LVDS_PIPEB_SELECT;
+ } else {
+ temp &= ~LVDS_PIPEB_SELECT;
+ }
+ /* set the corresponsding LVDS_BORDER bit */
+ temp |= dev_priv->lvds_border_bits;
+ /* Set the B0-B3 data pairs corresponding to whether we're going to
+ * set the DPLLs for dual-channel mode or not.
+ */
+ if (clock->p2 == 7)
+ temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+ else
+ temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+
+ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+ * appropriately here, but we need to look more thoroughly into how
+ * panels behave in the two modes.
+ */
+ /* set the dithering flag on LVDS as needed */
+ if (INTEL_INFO(dev)->gen >= 4) {
+ if (dev_priv->lvds_dither)
+ temp |= LVDS_ENABLE_DITHER;
+ else
+ temp &= ~LVDS_ENABLE_DITHER;
+ }
+ temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ temp |= LVDS_HSYNC_POLARITY;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ temp |= LVDS_VSYNC_POLARITY;
+ I915_WRITE(LVDS, temp);
+}
+
+void
+vlv_update_pll(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ intel_clock_t *clock, intel_clock_t *reduced_clock,
+ int num_connectors)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 dpll, mdiv, pdiv;
+ u32 bestn, bestm1, bestm2, bestp1, bestp2;
+ bool is_sdvo;
+ u32 temp;
+ int retries;
+
+ is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
+
+ dpll = DPLL_VGA_MODE_DIS;
+ dpll |= DPLL_EXT_BUFFER_ENABLE_VLV;
+ dpll |= DPLL_REFA_CLK_ENABLE_VLV;
+ dpll |= DPLL_INTEGRATED_CLOCK_VLV;
+
+ I915_WRITE(DPLL(pipe), dpll);
+ POSTING_READ(DPLL(pipe));
+
+ bestn = clock->n;
+ bestm1 = clock->m1;
+ bestm2 = clock->m2;
+ bestp1 = clock->p1;
+ bestp2 = clock->p2;
+
+ /*
+ * In Valleyview PLL and program lane counter registers are exposed
+ * through DPIO interface
+ */
+ mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
+ mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
+ mdiv |= ((bestn << DPIO_N_SHIFT));
+ mdiv |= (1 << DPIO_POST_DIV_SHIFT);
+ mdiv |= (1 << DPIO_K_SHIFT);
+ mdiv |= DPIO_ENABLE_CALIBRATION;
+ intel_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
+
+ intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000);
+
+ pdiv = (1 << DPIO_REFSEL_OVERRIDE) | (5 << DPIO_PLL_MODESEL_SHIFT) |
+ (3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) |
+ (7 << DPIO_PLL_REFCLK_SEL_SHIFT) | (8 << DPIO_DRIVER_CTL_SHIFT) |
+ (5 << DPIO_CLK_BIAS_CTL_SHIFT);
+ intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv);
+
+ intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x005f003b);
+
+ dpll |= DPLL_VCO_ENABLE;
+ I915_WRITE(DPLL(pipe), dpll);
+ POSTING_READ(DPLL(pipe));
+ for (retries = 1; retries > 0; retries--) {
+ if ((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV)
+ break;
+ DELAY(1000);
+ }
+ if (retries == 0)
+ DRM_ERROR("DPLL %d failed to lock\n", pipe);
+
+ intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x620);
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
+ intel_dp_set_m_n(crtc, mode, adjusted_mode);
+
+ I915_WRITE(DPLL(pipe), dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(DPLL(pipe));
+ DELAY(150);
+
+ temp = 0;
+ if (is_sdvo) {
+ temp = intel_mode_get_pixel_multiplier(adjusted_mode);
+ if (temp > 1)
+ temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ else
+ temp = 0;
+ }
+ I915_WRITE(DPLL_MD(pipe), temp);
+ POSTING_READ(DPLL_MD(pipe));
+
+ /* Now program lane control registers */
+ if(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)
+ || intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
+ {
+ temp = 0x1000C4;
+ if(pipe == 1)
+ temp |= (1 << 21);
+ intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL1, temp);
+ }
+ if(intel_pipe_has_type(crtc,INTEL_OUTPUT_EDP))
+ {
+ temp = 0x1000C4;
+ if(pipe == 1)
+ temp |= (1 << 21);
+ intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp);
+ }
+}
+
+void
+i9xx_update_pll(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ intel_clock_t *clock, intel_clock_t *reduced_clock,
+ int num_connectors)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 dpll;
+ bool is_sdvo;
+
+ i9xx_update_pll_dividers(crtc, clock, reduced_clock);
+
+ is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
+
+ dpll = DPLL_VGA_MODE_DIS;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ dpll |= DPLLB_MODE_LVDS;
+ else
+ dpll |= DPLLB_MODE_DAC_SERIAL;
+ if (is_sdvo) {
+ int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
+ if (pixel_multiplier > 1) {
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
+ }
+ dpll |= DPLL_DVO_HIGH_SPEED;
+ }
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
+ dpll |= DPLL_DVO_HIGH_SPEED;
+
+ /* compute bitmask from p1 value */
+ if (IS_PINEVIEW(dev))
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
+ else {
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ if (IS_G4X(dev) && reduced_clock)
+ dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+ }
+ switch (clock->p2) {
+ case 5:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+ break;
+ case 7:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+ break;
+ case 10:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+ break;
+ case 14:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+ break;
+ }
+ if (INTEL_INFO(dev)->gen >= 4)
+ dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
+
+ if (is_sdvo && intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
+ dpll |= PLL_REF_INPUT_TVCLKINBC;
+ else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
+ /* XXX: just matching BIOS for now */
+ /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
+ dpll |= 3;
+ else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ dpll |= DPLL_VCO_ENABLE;
+ I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
+ POSTING_READ(DPLL(pipe));
+ DELAY(150);
+
+ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
+ * This is an exception to the general rule that mode_set doesn't turn
+ * things on.
+ */
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ intel_update_lvds(crtc, clock, adjusted_mode);
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
+ intel_dp_set_m_n(crtc, mode, adjusted_mode);
+
+ I915_WRITE(DPLL(pipe), dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(DPLL(pipe));
+ DELAY(150);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ u32 temp = 0;
+ if (is_sdvo) {
+ temp = intel_mode_get_pixel_multiplier(adjusted_mode);
+ if (temp > 1)
+ temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ else
+ temp = 0;
+ }
+ I915_WRITE(DPLL_MD(pipe), temp);
+ } else {
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ I915_WRITE(DPLL(pipe), dpll);
+ }
+}
+
+void
+i8xx_update_pll(struct drm_crtc *crtc,
+ struct drm_display_mode *adjusted_mode,
+ intel_clock_t *clock, intel_clock_t *reduced_clock,
+ int num_connectors)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 dpll;
+
+ i9xx_update_pll_dividers(crtc, clock, reduced_clock);
+
+ dpll = DPLL_VGA_MODE_DIS;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ } else {
+ if (clock->p1 == 2)
+ dpll |= PLL_P1_DIVIDE_BY_TWO;
+ else
+ dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ if (clock->p2 == 4)
+ dpll |= PLL_P2_DIVIDE_BY_4;
+ }
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
+ /* XXX: just matching BIOS for now */
+ /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
+ dpll |= 3;
+ else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ dpll |= DPLL_VCO_ENABLE;
+ I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
+ POSTING_READ(DPLL(pipe));
+ DELAY(150);
+
+ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
+ * This is an exception to the general rule that mode_set doesn't turn
+ * things on.
+ */
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ intel_update_lvds(crtc, clock, adjusted_mode);
+
+ I915_WRITE(DPLL(pipe), dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(DPLL(pipe));
+ DELAY(150);
+
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ I915_WRITE(DPLL(pipe), dpll);
+}
+
+void
+intel_set_pipe_timings(struct intel_crtc *intel_crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ enum pipe pipe = intel_crtc->pipe;
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ uint32_t vsyncshift;
+
+ if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ /* the chip adds 2 halflines automatically */
+ adjusted_mode->crtc_vtotal -= 1;
+ adjusted_mode->crtc_vblank_end -= 1;
+ vsyncshift = adjusted_mode->crtc_hsync_start
+ - adjusted_mode->crtc_htotal / 2;
+ } else {
+ vsyncshift = 0;
+ }
+
+ if (INTEL_INFO(dev)->gen > 3)
+ I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
+
+ I915_WRITE(HTOTAL(cpu_transcoder),
+ (adjusted_mode->crtc_hdisplay - 1) |
+ ((adjusted_mode->crtc_htotal - 1) << 16));
+ I915_WRITE(HBLANK(cpu_transcoder),
+ (adjusted_mode->crtc_hblank_start - 1) |
+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ I915_WRITE(HSYNC(cpu_transcoder),
+ (adjusted_mode->crtc_hsync_start - 1) |
+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
+
+ I915_WRITE(VTOTAL(cpu_transcoder),
+ (adjusted_mode->crtc_vdisplay - 1) |
+ ((adjusted_mode->crtc_vtotal - 1) << 16));
+ I915_WRITE(VBLANK(cpu_transcoder),
+ (adjusted_mode->crtc_vblank_start - 1) |
+ ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ I915_WRITE(VSYNC(cpu_transcoder),
+ (adjusted_mode->crtc_vsync_start - 1) |
+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
+
+ /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
+ * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
+ * documented on the DDI_FUNC_CTL register description, EDP Input Select
+ * bits. */
+ if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
+ (pipe == PIPE_B || pipe == PIPE_C))
+ I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
+
+ /* pipesrc controls the size that is scaled from, which should
+ * always be the user's requested size.
+ */
+ I915_WRITE(PIPESRC(pipe),
+ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+}
+
+int
+i9xx_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ int refclk, num_connectors = 0;
+ intel_clock_t clock, reduced_clock;
+ u32 dspcntr, pipeconf;
+ bool ok, has_reduced_clock = false, is_sdvo = false;
+ bool is_lvds = false, is_tv = false, is_dp = false;
+ struct intel_encoder *encoder;
+ const intel_limit_t *limit;
+ int ret;
+
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_SDVO:
+ case INTEL_OUTPUT_HDMI:
+ is_sdvo = true;
+ if (encoder->needs_tv_clock)
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_TVOUT:
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ is_dp = true;
+ break;
+ }
+
+ num_connectors++;
+ }
+
+ refclk = i9xx_get_refclk(crtc, num_connectors);
+
+ /*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
+ limit = intel_limit(crtc, refclk);
+ ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
+ &clock);
+ if (!ok) {
+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ /* Ensure that the cursor is valid for the new mode before changing... */
+ intel_crtc_update_cursor(crtc, true);
+
+ if (is_lvds && dev_priv->lvds_downclock_avail) {
+ /*
+ * Ensure we match the reduced clock's P to the target clock.
+ * If the clocks don't match, we can't switch the display clock
+ * by using the FP0/FP1. In such case we will disable the LVDS
+ * downclock feature.
+ */
+ has_reduced_clock = limit->find_pll(limit, crtc,
+ dev_priv->lvds_downclock,
+ refclk,
+ &clock,
+ &reduced_clock);
+ }
+
+ if (is_sdvo && is_tv)
+ i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
+
+ if (IS_GEN2(dev))
+ i8xx_update_pll(crtc, adjusted_mode, &clock,
+ has_reduced_clock ? &reduced_clock : NULL,
+ num_connectors);
+ else if (IS_VALLEYVIEW(dev))
+ vlv_update_pll(crtc, mode, adjusted_mode, &clock,
+ has_reduced_clock ? &reduced_clock : NULL,
+ num_connectors);
+ else
+ i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
+ has_reduced_clock ? &reduced_clock : NULL,
+ num_connectors);
+
+ /* setup pipeconf */
+ pipeconf = I915_READ(PIPECONF(pipe));
+
+ /* Set up the display plane register */
+ dspcntr = DISPPLANE_GAMMA_ENABLE;
+
+ if (pipe == 0)
+ dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
+ else
+ dspcntr |= DISPPLANE_SEL_PIPE_B;
+
+ if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
+ /* Enable pixel doubling when the dot clock is > 90% of the (display)
+ * core speed.
+ *
+ * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
+ * pipe == 0 check?
+ */
+ if (mode->clock >
+ dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
+ pipeconf |= PIPECONF_DOUBLE_WIDE;
+ else
+ pipeconf &= ~PIPECONF_DOUBLE_WIDE;
+ }
+
+ /* default to 8bpc */
+ pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
+ if (is_dp) {
+ if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+ pipeconf |= PIPECONF_BPP_6 |
+ PIPECONF_DITHER_EN |
+ PIPECONF_DITHER_TYPE_SP;
+ }
+ }
+
+ if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
+ if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+ pipeconf |= PIPECONF_BPP_6 |
+ PIPECONF_ENABLE |
+ I965_PIPECONF_ACTIVE;
+ }
+ }
+
+ DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
+ drm_mode_debug_printmodeline(mode);
+
+ if (HAS_PIPE_CXSR(dev)) {
+ if (intel_crtc->lowfreq_avail) {
+ DRM_DEBUG_KMS("enabling CxSR downclocking\n");
+ pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
+ } else {
+ DRM_DEBUG_KMS("disabling CxSR downclocking\n");
+ pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
+ }
+ }
+
+ pipeconf &= ~PIPECONF_INTERLACE_MASK;
+ if (!IS_GEN2(dev) &&
+ adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
+ else
+ pipeconf |= PIPECONF_PROGRESSIVE;
+
+ intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
+
+ /* pipesrc and dspsize control the size that is scaled from,
+ * which should always be the user's requested size.
+ */
+ I915_WRITE(DSPSIZE(plane),
+ ((mode->vdisplay - 1) << 16) |
+ (mode->hdisplay - 1));
+ I915_WRITE(DSPPOS(plane), 0);
+
+ I915_WRITE(PIPECONF(pipe), pipeconf);
+ POSTING_READ(PIPECONF(pipe));
+ intel_enable_pipe(dev_priv, pipe, false);
+
+ intel_wait_for_vblank(dev, pipe);
+
+ I915_WRITE(DSPCNTR(plane), dspcntr);
+ POSTING_READ(DSPCNTR(plane));
+
+ ret = intel_pipe_set_base(crtc, x, y, fb);
+
+ intel_update_watermarks(dev);
+
+ return ret;
+}
+
+void
+ironlake_init_pch_refclk(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *encoder;
+ u32 temp;
+ bool has_lvds = false;
+ bool has_cpu_edp = false;
+ bool has_pch_edp = false;
+ bool has_panel = false;
+ bool has_ck505 = false;
+ bool can_ssc = false;
+
+ /* We need to take the global config into account */
+ list_for_each_entry(encoder, &mode_config->encoder_list,
+ base.head) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ has_panel = true;
+ has_lvds = true;
+ break;
+ case INTEL_OUTPUT_EDP:
+ has_panel = true;
+ if (intel_encoder_is_pch_edp(&encoder->base))
+ has_pch_edp = true;
+ else
+ has_cpu_edp = true;
+ break;
+ }
+ }
+
+ if (HAS_PCH_IBX(dev)) {
+ has_ck505 = dev_priv->display_clock_mode;
+ can_ssc = has_ck505;
+ } else {
+ has_ck505 = false;
+ can_ssc = true;
+ }
+
+ DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
+ has_panel, has_lvds, has_pch_edp, has_cpu_edp,
+ has_ck505);
+
+ /* Ironlake: try to setup display ref clock before DPLL
+ * enabling. This is only under driver's control after
+ * PCH B stepping, previous chipset stepping should be
+ * ignoring this setting.
+ */
+ temp = I915_READ(PCH_DREF_CONTROL);
+ /* Always enable nonspread source */
+ temp &= ~DREF_NONSPREAD_SOURCE_MASK;
+
+ if (has_ck505)
+ temp |= DREF_NONSPREAD_CK505_ENABLE;
+ else
+ temp |= DREF_NONSPREAD_SOURCE_ENABLE;
+
+ if (has_panel) {
+ temp &= ~DREF_SSC_SOURCE_MASK;
+ temp |= DREF_SSC_SOURCE_ENABLE;
+
+ /* SSC must be turned on before enabling the CPU output */
+ if (intel_panel_use_ssc(dev_priv) && can_ssc) {
+ DRM_DEBUG_KMS("Using SSC on panel\n");
+ temp |= DREF_SSC1_ENABLE;
+ } else
+ temp &= ~DREF_SSC1_ENABLE;
+
+ /* Get SSC going before enabling the outputs */
+ I915_WRITE(PCH_DREF_CONTROL, temp);
+ POSTING_READ(PCH_DREF_CONTROL);
+ DELAY(200);
+
+ temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+
+ /* Enable CPU source on CPU attached eDP */
+ if (has_cpu_edp) {
+ if (intel_panel_use_ssc(dev_priv) && can_ssc) {
+ DRM_DEBUG_KMS("Using SSC on eDP\n");
+ temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
+ }
+ else
+ temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
+ } else
+ temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+
+ I915_WRITE(PCH_DREF_CONTROL, temp);
+ POSTING_READ(PCH_DREF_CONTROL);
+ DELAY(200);
+ } else {
+ DRM_DEBUG_KMS("Disabling SSC entirely\n");
+
+ temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+
+ /* Turn off CPU output */
+ temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+
+ I915_WRITE(PCH_DREF_CONTROL, temp);
+ POSTING_READ(PCH_DREF_CONTROL);
+ DELAY(200);
+
+ /* Turn off the SSC source */
+ temp &= ~DREF_SSC_SOURCE_MASK;
+ temp |= DREF_SSC_SOURCE_DISABLE;
+
+ /* Turn off SSC1 */
+ temp &= ~ DREF_SSC1_ENABLE;
+
+ I915_WRITE(PCH_DREF_CONTROL, temp);
+ POSTING_READ(PCH_DREF_CONTROL);
+ DELAY(200);
+ }
+}
+
+/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */
+void
+lpt_init_pch_refclk(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *encoder;
+ bool has_vga = false;
+ bool is_sdv = false;
+ u32 tmp;
+ int retries;
+
+ list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_ANALOG:
+ has_vga = true;
+ break;
+ }
+ }
+
+ if (!has_vga)
+ return;
+
+ /* XXX: Rip out SDV support once Haswell ships for real. */
+ if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
+ is_sdv = true;
+
+ tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ tmp &= ~SBI_SSCCTL_DISABLE;
+ tmp |= SBI_SSCCTL_PATHALT;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+
+ DELAY(24);
+
+ tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ tmp &= ~SBI_SSCCTL_PATHALT;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+
+ if (!is_sdv) {
+ tmp = I915_READ(SOUTH_CHICKEN2);
+ tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
+ I915_WRITE(SOUTH_CHICKEN2, tmp);
+
+ for (retries = 100; retries > 0; retries--) {
+ if (I915_READ(SOUTH_CHICKEN2) & FDI_MPHY_IOSFSB_RESET_STATUS)
+ break;
+ DELAY(100);
+ }
+ if (retries == 0)
+ DRM_ERROR("FDI mPHY reset assert timeout\n");
+
+ tmp = I915_READ(SOUTH_CHICKEN2);
+ tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
+ I915_WRITE(SOUTH_CHICKEN2, tmp);
+
+ for (retries = 100; retries > 0; retries--) {
+ if ((I915_READ(SOUTH_CHICKEN2) & FDI_MPHY_IOSFSB_RESET_STATUS) == 0)
+ break;
+ DELAY(100);
+ }
+ if (retries == 0)
+ DRM_ERROR("FDI mPHY reset de-assert timeout\n");
+ }
+
+ tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
+ tmp &= ~(0xFF << 24);
+ tmp |= (0x12 << 24);
+ intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
+
+ if (!is_sdv) {
+ tmp = intel_sbi_read(dev_priv, 0x808C, SBI_MPHY);
+ tmp &= ~(0x3 << 6);
+ tmp |= (1 << 6) | (1 << 0);
+ intel_sbi_write(dev_priv, 0x808C, tmp, SBI_MPHY);
+ }
+
+ if (is_sdv) {
+ tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY);
+ tmp |= 0x7FFF;
+ intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY);
+ }
+
+ tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
+ tmp |= (1 << 11);
+ intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
+ tmp |= (1 << 11);
+ intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
+
+ if (is_sdv) {
+ tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY);
+ tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
+ intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY);
+ tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
+ intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY);
+ tmp |= (0x3F << 8);
+ intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY);
+ tmp |= (0x3F << 8);
+ intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY);
+ }
+
+ tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
+ tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+ intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
+ tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+ intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
+
+ if (!is_sdv) {
+ tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
+ tmp &= ~(7 << 13);
+ tmp |= (5 << 13);
+ intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
+ tmp &= ~(7 << 13);
+ tmp |= (5 << 13);
+ intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
+ }
+
+ tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
+ tmp &= ~0xFF;
+ tmp |= 0x1C;
+ intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
+ tmp &= ~0xFF;
+ tmp |= 0x1C;
+ intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
+ tmp &= ~(0xFF << 16);
+ tmp |= (0x1C << 16);
+ intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
+ tmp &= ~(0xFF << 16);
+ tmp |= (0x1C << 16);
+ intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
+
+ if (!is_sdv) {
+ tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
+ tmp |= (1 << 27);
+ intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
+ tmp |= (1 << 27);
+ intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
+ tmp &= ~(0xF << 28);
+ tmp |= (4 << 28);
+ intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
+ tmp &= ~(0xF << 28);
+ tmp |= (4 << 28);
+ intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
+ }
+
+ /* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */
+ tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK);
+ tmp |= SBI_DBUFF0_ENABLE;
+ intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK);
+}
+
+/*
+ * Initialize reference clocks when the driver loads
+ */
+void
+intel_init_pch_refclk(struct drm_device *dev)
+{
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ ironlake_init_pch_refclk(dev);
+ else if (HAS_PCH_LPT(dev))
+ lpt_init_pch_refclk(dev);
+}
+
+int
+ironlake_get_refclk(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_encoder *encoder;
+ struct intel_encoder *edp_encoder = NULL;
+ int num_connectors = 0;
+ bool is_lvds = false;
+
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_EDP:
+ edp_encoder = encoder;
+ break;
+ }
+ num_connectors++;
+ }
+
+ if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
+ DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
+ dev_priv->lvds_ssc_freq);
+ return dev_priv->lvds_ssc_freq * 1000;
+ }
+
+ return 120000;
+}
+
+void
+ironlake_set_pipeconf(struct drm_crtc *crtc,
+ struct drm_display_mode *adjusted_mode,
+ bool dither)
+{
+ struct inteldrm_softc *dev_priv = crtc->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ uint32_t val;
+
+ val = I915_READ(PIPECONF(pipe));
+
+ val &= ~PIPE_BPC_MASK;
+ switch (intel_crtc->bpp) {
+ case 18:
+ val |= PIPE_6BPC;
+ break;
+ case 24:
+ val |= PIPE_8BPC;
+ break;
+ case 30:
+ val |= PIPE_10BPC;
+ break;
+ case 36:
+ val |= PIPE_12BPC;
+ break;
+ default:
+ /* Case prevented by intel_choose_pipe_bpp_dither. */
+ BUG();
+ }
+
+ val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
+ if (dither)
+ val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
+
+ val &= ~PIPECONF_INTERLACE_MASK;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ val |= PIPECONF_INTERLACED_ILK;
+ else
+ val |= PIPECONF_PROGRESSIVE;
+
+ I915_WRITE(PIPECONF(pipe), val);
+ POSTING_READ(PIPECONF(pipe));
+}
+
+void
+haswell_set_pipeconf(struct drm_crtc *crtc,
+ struct drm_display_mode *adjusted_mode,
+ bool dither)
+{
+ struct inteldrm_softc *dev_priv = crtc->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ uint32_t val;
+
+ val = I915_READ(PIPECONF(cpu_transcoder));
+
+ val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
+ if (dither)
+ val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
+
+ val &= ~PIPECONF_INTERLACE_MASK_HSW;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ val |= PIPECONF_INTERLACED_ILK;
+ else
+ val |= PIPECONF_PROGRESSIVE;
+
+ I915_WRITE(PIPECONF(cpu_transcoder), val);
+ POSTING_READ(PIPECONF(cpu_transcoder));
+}
+
+bool
+ironlake_compute_clocks(struct drm_crtc *crtc,
+ struct drm_display_mode *adjusted_mode,
+ intel_clock_t *clock,
+ bool *has_reduced_clock,
+ intel_clock_t *reduced_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ int refclk;
+ const intel_limit_t *limit;
+ bool ret, is_sdvo = false, is_tv = false, is_lvds = false;
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ switch (intel_encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_SDVO:
+ case INTEL_OUTPUT_HDMI:
+ is_sdvo = true;
+ if (intel_encoder->needs_tv_clock)
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_TVOUT:
+ is_tv = true;
+ break;
+ }
+ }
+
+ refclk = ironlake_get_refclk(crtc);
+
+ /*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
+ limit = intel_limit(crtc, refclk);
+ ret = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
+ clock);
+ if (!ret)
+ return false;
+
+ if (is_lvds && dev_priv->lvds_downclock_avail) {
+ /*
+ * Ensure we match the reduced clock's P to the target clock.
+ * If the clocks don't match, we can't switch the display clock
+ * by using the FP0/FP1. In such case we will disable the LVDS
+ * downclock feature.
+ */
+ *has_reduced_clock = limit->find_pll(limit, crtc,
+ dev_priv->lvds_downclock,
+ refclk,
+ clock,
+ reduced_clock);
+ }
+
+ if (is_sdvo && is_tv)
+ i9xx_adjust_sdvo_tv_clock(adjusted_mode, clock);
+
+ return true;
+}
+
+void
+cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ uint32_t temp;
+
+ temp = I915_READ(SOUTH_CHICKEN1);
+ if (temp & FDI_BC_BIFURCATION_SELECT)
+ return;
+
+ WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
+ WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
+
+ temp |= FDI_BC_BIFURCATION_SELECT;
+ DRM_DEBUG_KMS("enabling fdi C rx\n");
+ I915_WRITE(SOUTH_CHICKEN1, temp);
+ POSTING_READ(SOUTH_CHICKEN1);
+}
+
+bool
+ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *pipe_B_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
+
+ DRM_DEBUG_KMS("checking fdi config on pipe %i, lanes %i\n",
+ intel_crtc->pipe, intel_crtc->fdi_lanes);
+ if (intel_crtc->fdi_lanes > 4) {
+ DRM_DEBUG_KMS("invalid fdi lane config on pipe %i: %i lanes\n",
+ intel_crtc->pipe, intel_crtc->fdi_lanes);
+ /* Clamp lanes to avoid programming the hw with bogus values. */
+ intel_crtc->fdi_lanes = 4;
+
+ return false;
+ }
+
+ if (dev_priv->num_pipe == 2)
+ return true;
+
+ switch (intel_crtc->pipe) {
+ case PIPE_A:
+ return true;
+ case PIPE_B:
+ if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
+ intel_crtc->fdi_lanes > 2) {
+ DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
+ intel_crtc->pipe, intel_crtc->fdi_lanes);
+ /* Clamp lanes to avoid programming the hw with bogus values. */
+ intel_crtc->fdi_lanes = 2;
+
+ return false;
+ }
+
+ if (intel_crtc->fdi_lanes > 2)
+ WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
+ else
+ cpt_enable_fdi_bc_bifurcation(dev);
+
+ return true;
+ case PIPE_C:
+ if (!pipe_B_crtc->base.enabled || pipe_B_crtc->fdi_lanes <= 2) {
+ if (intel_crtc->fdi_lanes > 2) {
+ DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
+ intel_crtc->pipe, intel_crtc->fdi_lanes);
+ /* Clamp lanes to avoid programming the hw with bogus values. */
+ intel_crtc->fdi_lanes = 2;
+
+ return false;
+ }
+ } else {
+ DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
+ return false;
+ }
+
+ cpt_enable_fdi_bc_bifurcation(dev);
+
+ return true;
+ default:
+ BUG();
+ }
+}
+
+int
+ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
+{
+ /*
+ * Account for spread spectrum to avoid
+ * oversubscribing the link. Max center spread
+ * is 2.5%; use 5% for safety's sake.
+ */
+ u32 bps = target_clock * bpp * 21 / 20;
+ return bps / (link_bw * 8) + 1;
+}
+
+void
+ironlake_set_m_n(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ struct intel_encoder *intel_encoder, *edp_encoder = NULL;
+ struct fdi_m_n m_n = {0};
+ int target_clock, pixel_multiplier, lane, link_bw;
+ bool is_dp = false, is_cpu_edp = false;
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ switch (intel_encoder->type) {
+ case INTEL_OUTPUT_DISPLAYPORT:
+ is_dp = true;
+ break;
+ case INTEL_OUTPUT_EDP:
+ is_dp = true;
+ if (!intel_encoder_is_pch_edp(&intel_encoder->base))
+ is_cpu_edp = true;
+ edp_encoder = intel_encoder;
+ break;
+ }
+ }
+
+ /* FDI link */
+ pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
+ lane = 0;
+ /* CPU eDP doesn't require FDI link, so just set DP M/N
+ according to current link config */
+ if (is_cpu_edp) {
+ intel_edp_link_config(edp_encoder, &lane, &link_bw);
+ } else {
+ /* FDI is a binary signal running at ~2.7GHz, encoding
+ * each output octet as 10 bits. The actual frequency
+ * is stored as a divider into a 100MHz clock, and the
+ * mode pixel clock is stored in units of 1KHz.
+ * Hence the bw of each lane in terms of the mode signal
+ * is:
+ */
+ link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
+ }
+
+ /* [e]DP over FDI requires target mode clock instead of link clock. */
+ if (edp_encoder)
+ target_clock = intel_edp_target_clock(edp_encoder, mode);
+ else if (is_dp)
+ target_clock = mode->clock;
+ else
+ target_clock = adjusted_mode->clock;
+
+ if (!lane)
+ lane = ironlake_get_lanes_required(target_clock, link_bw,
+ intel_crtc->bpp);
+
+ intel_crtc->fdi_lanes = lane;
+
+ if (pixel_multiplier > 1)
+ link_bw *= pixel_multiplier;
+ ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
+ &m_n);
+
+ I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m);
+ I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
+ I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
+ I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
+}
+
+uint32_t
+ironlake_compute_dpll(struct intel_crtc *intel_crtc,
+ struct drm_display_mode *adjusted_mode,
+ intel_clock_t *clock, u32 fp)
+{
+ struct drm_crtc *crtc = &intel_crtc->base;
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ uint32_t dpll;
+ int factor, pixel_multiplier, num_connectors = 0;
+ bool is_lvds = false, is_sdvo = false, is_tv = false;
+ bool is_dp = false, is_cpu_edp = false;
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ switch (intel_encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_SDVO:
+ case INTEL_OUTPUT_HDMI:
+ is_sdvo = true;
+ if (intel_encoder->needs_tv_clock)
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_TVOUT:
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ is_dp = true;
+ break;
+ case INTEL_OUTPUT_EDP:
+ is_dp = true;
+ if (!intel_encoder_is_pch_edp(&intel_encoder->base))
+ is_cpu_edp = true;
+ break;
+ }
+
+ num_connectors++;
+ }
+
+ /* Enable autotuning of the PLL clock (if permissible) */
+ factor = 21;
+ if (is_lvds) {
+ if ((intel_panel_use_ssc(dev_priv) &&
+ dev_priv->lvds_ssc_freq == 100) ||
+ (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
+ factor = 25;
+ } else if (is_sdvo && is_tv)
+ factor = 20;
+
+ if (clock->m < factor * clock->n)
+ fp |= FP_CB_TUNE;
+
+ dpll = 0;
+
+ if (is_lvds)
+ dpll |= DPLLB_MODE_LVDS;
+ else
+ dpll |= DPLLB_MODE_DAC_SERIAL;
+ if (is_sdvo) {
+ pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
+ if (pixel_multiplier > 1) {
+ dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
+ }
+ dpll |= DPLL_DVO_HIGH_SPEED;
+ }
+ if (is_dp && !is_cpu_edp)
+ dpll |= DPLL_DVO_HIGH_SPEED;
+
+ /* compute bitmask from p1 value */
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ /* also FPA1 */
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+
+ switch (clock->p2) {
+ case 5:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+ break;
+ case 7:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+ break;
+ case 10:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+ break;
+ case 14:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+ break;
+ }
+
+ if (is_sdvo && is_tv)
+ dpll |= PLL_REF_INPUT_TVCLKINBC;
+ else if (is_tv)
+ /* XXX: just matching BIOS for now */
+ /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
+ dpll |= 3;
+ else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ return dpll;
+}
+
+int
+ironlake_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ int num_connectors = 0;
+ intel_clock_t clock, reduced_clock;
+ u32 dpll, fp = 0, fp2 = 0;
+ bool ok, has_reduced_clock = false;
+ bool is_lvds = false, is_dp = false, is_cpu_edp = false;
+ struct intel_encoder *encoder;
+ u32 temp;
+ int ret;
+ bool dither, fdi_config_ok;
+
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ is_dp = true;
+ break;
+ case INTEL_OUTPUT_EDP:
+ is_dp = true;
+ if (!intel_encoder_is_pch_edp(&encoder->base))
+ is_cpu_edp = true;
+ break;
+ }
+
+ num_connectors++;
+ }
+
+ WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
+ "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
+
+ ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
+ &has_reduced_clock, &reduced_clock);
+ if (!ok) {
+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ /* Ensure that the cursor is valid for the new mode before changing... */
+ intel_crtc_update_cursor(crtc, true);
+
+ /* determine panel color depth */
+ dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
+ adjusted_mode);
+ if (is_lvds && dev_priv->lvds_dither)
+ dither = true;
+
+ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+ if (has_reduced_clock)
+ fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
+ reduced_clock.m2;
+
+ dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock, fp);
+
+ DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
+ drm_mode_debug_printmodeline(mode);
+
+ /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
+ if (!is_cpu_edp) {
+ struct intel_pch_pll *pll;
+
+ pll = intel_get_pch_pll(intel_crtc, dpll, fp);
+ if (pll == NULL) {
+ DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
+ pipe);
+ return -EINVAL;
+ }
+ } else
+ intel_put_pch_pll(intel_crtc);
+
+ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
+ * This is an exception to the general rule that mode_set doesn't turn
+ * things on.
+ */
+ if (is_lvds) {
+ temp = I915_READ(PCH_LVDS);
+ temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~PORT_TRANS_SEL_MASK;
+ temp |= PORT_TRANS_SEL_CPT(pipe);
+ } else {
+ if (pipe == 1)
+ temp |= LVDS_PIPEB_SELECT;
+ else
+ temp &= ~LVDS_PIPEB_SELECT;
+ }
+
+ /* set the corresponsding LVDS_BORDER bit */
+ temp |= dev_priv->lvds_border_bits;
+ /* Set the B0-B3 data pairs corresponding to whether we're going to
+ * set the DPLLs for dual-channel mode or not.
+ */
+ if (clock.p2 == 7)
+ temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+ else
+ temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+
+ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+ * appropriately here, but we need to look more thoroughly into how
+ * panels behave in the two modes.
+ */
+ temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ temp |= LVDS_HSYNC_POLARITY;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ temp |= LVDS_VSYNC_POLARITY;
+ I915_WRITE(PCH_LVDS, temp);
+ }
+
+ if (is_dp && !is_cpu_edp) {
+ intel_dp_set_m_n(crtc, mode, adjusted_mode);
+ } else {
+ /* For non-DP output, clear any trans DP clock recovery setting.*/
+ I915_WRITE(TRANSDATA_M1(pipe), 0);
+ I915_WRITE(TRANSDATA_N1(pipe), 0);
+ I915_WRITE(TRANSDPLINK_M1(pipe), 0);
+ I915_WRITE(TRANSDPLINK_N1(pipe), 0);
+ }
+
+ if (intel_crtc->pch_pll) {
+ I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(intel_crtc->pch_pll->pll_reg);
+ DELAY(150);
+
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
+ }
+
+ intel_crtc->lowfreq_avail = false;
+ if (intel_crtc->pch_pll) {
+ if (is_lvds && has_reduced_clock && i915_powersave) {
+ I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
+ intel_crtc->lowfreq_avail = true;
+ } else {
+ I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
+ }
+ }
+
+ intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
+
+ /* Note, this also computes intel_crtc->fdi_lanes which is used below in
+ * ironlake_check_fdi_lanes. */
+ ironlake_set_m_n(crtc, mode, adjusted_mode);
+
+ fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc);
+
+ if (is_cpu_edp)
+ ironlake_set_pll_edp(crtc, adjusted_mode->clock);
+
+ ironlake_set_pipeconf(crtc, adjusted_mode, dither);
+
+ intel_wait_for_vblank(dev, pipe);
+
+ /* Set up the display plane register */
+ I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
+ POSTING_READ(DSPCNTR(plane));
+
+ ret = intel_pipe_set_base(crtc, x, y, fb);
+
+ intel_update_watermarks(dev);
+
+ intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
+
+ return fdi_config_ok ? ret : -EINVAL;
+}
+
+int
+haswell_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ int num_connectors = 0;
+ intel_clock_t clock, reduced_clock;
+ u32 dpll = 0, fp = 0, fp2 = 0;
+ bool ok, has_reduced_clock = false;
+ bool is_lvds = false, is_dp = false, is_cpu_edp = false;
+ struct intel_encoder *encoder;
+ u32 temp;
+ int ret;
+ bool dither;
+
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ is_dp = true;
+ break;
+ case INTEL_OUTPUT_EDP:
+ is_dp = true;
+ if (!intel_encoder_is_pch_edp(&encoder->base))
+ is_cpu_edp = true;
+ break;
+ }
+
+ num_connectors++;
+ }
+
+ if (is_cpu_edp)
+ intel_crtc->cpu_transcoder = TRANSCODER_EDP;
+ else
+ intel_crtc->cpu_transcoder = pipe;
+
+ /* We are not sure yet this won't happen. */
+ WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
+ INTEL_PCH_TYPE(dev));
+
+ WARN(num_connectors != 1, "%d connectors attached to pipe %c\n",
+ num_connectors, pipe_name(pipe));
+
+ WARN_ON(I915_READ(PIPECONF(intel_crtc->cpu_transcoder)) &
+ (PIPECONF_ENABLE | I965_PIPECONF_ACTIVE));
+
+ WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE);
+
+ if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock))
+ return -EINVAL;
+
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
+ &has_reduced_clock,
+ &reduced_clock);
+ if (!ok) {
+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Ensure that the cursor is valid for the new mode before changing... */
+ intel_crtc_update_cursor(crtc, true);
+
+ /* determine panel color depth */
+ dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
+ adjusted_mode);
+ if (is_lvds && dev_priv->lvds_dither)
+ dither = true;
+
+ DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
+ drm_mode_debug_printmodeline(mode);
+
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+ if (has_reduced_clock)
+ fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
+ reduced_clock.m2;
+
+ dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock,
+ fp);
+
+ /* CPU eDP is the only output that doesn't need a PCH PLL of its
+ * own on pre-Haswell/LPT generation */
+ if (!is_cpu_edp) {
+ struct intel_pch_pll *pll;
+
+ pll = intel_get_pch_pll(intel_crtc, dpll, fp);
+ if (pll == NULL) {
+ DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
+ pipe);
+ return -EINVAL;
+ }
+ } else
+ intel_put_pch_pll(intel_crtc);
+
+ /* The LVDS pin pair needs to be on before the DPLLs are
+ * enabled. This is an exception to the general rule that
+ * mode_set doesn't turn things on.
+ */
+ if (is_lvds) {
+ temp = I915_READ(PCH_LVDS);
+ temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~PORT_TRANS_SEL_MASK;
+ temp |= PORT_TRANS_SEL_CPT(pipe);
+ } else {
+ if (pipe == 1)
+ temp |= LVDS_PIPEB_SELECT;
+ else
+ temp &= ~LVDS_PIPEB_SELECT;
+ }
+
+ /* set the corresponsding LVDS_BORDER bit */
+ temp |= dev_priv->lvds_border_bits;
+ /* Set the B0-B3 data pairs corresponding to whether
+ * we're going to set the DPLLs for dual-channel mode or
+ * not.
+ */
+ if (clock.p2 == 7)
+ temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+ else
+ temp &= ~(LVDS_B0B3_POWER_UP |
+ LVDS_CLKB_POWER_UP);
+
+ /* It would be nice to set 24 vs 18-bit mode
+ * (LVDS_A3_POWER_UP) appropriately here, but we need to
+ * look more thoroughly into how panels behave in the
+ * two modes.
+ */
+ temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ temp |= LVDS_HSYNC_POLARITY;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ temp |= LVDS_VSYNC_POLARITY;
+ I915_WRITE(PCH_LVDS, temp);
+ }
+ }
+
+ if (is_dp && !is_cpu_edp) {
+ intel_dp_set_m_n(crtc, mode, adjusted_mode);
+ } else {
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ /* For non-DP output, clear any trans DP clock recovery
+ * setting.*/
+ I915_WRITE(TRANSDATA_M1(pipe), 0);
+ I915_WRITE(TRANSDATA_N1(pipe), 0);
+ I915_WRITE(TRANSDPLINK_M1(pipe), 0);
+ I915_WRITE(TRANSDPLINK_N1(pipe), 0);
+ }
+ }
+
+ intel_crtc->lowfreq_avail = false;
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ if (intel_crtc->pch_pll) {
+ I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(intel_crtc->pch_pll->pll_reg);
+ DELAY(150);
+
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
+ }
+
+ if (intel_crtc->pch_pll) {
+ if (is_lvds && has_reduced_clock && i915_powersave) {
+ I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
+ intel_crtc->lowfreq_avail = true;
+ } else {
+ I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
+ }
+ }
+ }
+
+ intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
+
+ if (!is_dp || is_cpu_edp)
+ ironlake_set_m_n(crtc, mode, adjusted_mode);
+
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ if (is_cpu_edp)
+ ironlake_set_pll_edp(crtc, adjusted_mode->clock);
+
+ haswell_set_pipeconf(crtc, adjusted_mode, dither);
+
+ /* Set up the display plane register */
+ I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
+ POSTING_READ(DSPCNTR(plane));
+
+ ret = intel_pipe_set_base(crtc, x, y, fb);
+
+ intel_update_watermarks(dev);
+
+ intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
+
+ return ret;
+}
+
+int
+intel_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ struct intel_encoder *encoder;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int ret;
+
+ drm_vblank_pre_modeset(dev, pipe);
+
+ ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
+ x, y, fb);
+ drm_vblank_post_modeset(dev, pipe);
+
+ if (ret != 0)
+ return ret;
+
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
+ encoder->base.base.id,
+ drm_get_encoder_name(&encoder->base),
+ mode->base.id, mode->name);
+ encoder_funcs = encoder->base.helper_private;
+ encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode);
+ }
+
+ return 0;
+}
+
+bool
+intel_eld_uptodate(struct drm_connector *connector,
+ int reg_eldv, uint32_t bits_eldv,
+ int reg_elda, uint32_t bits_elda,
+ int reg_edid)
+{
+ struct inteldrm_softc *dev_priv = connector->dev->dev_private;
+ uint8_t *eld = connector->eld;
+ uint32_t i;
+
+ i = I915_READ(reg_eldv);
+ i &= bits_eldv;
+
+ if (!eld[0])
+ return !i;
+
+ if (!i)
+ return false;
+
+ i = I915_READ(reg_elda);
+ i &= ~bits_elda;
+ I915_WRITE(reg_elda, i);
+
+ for (i = 0; i < eld[2]; i++)
+ if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
+ return false;
+
+ return true;
+}
+
+void
+g4x_write_eld(struct drm_connector *connector,
+ struct drm_crtc *crtc)
+{
+ struct inteldrm_softc *dev_priv = connector->dev->dev_private;
+ uint8_t *eld = connector->eld;
+ uint32_t eldv;
+ uint32_t len;
+ uint32_t i;
+
+ i = I915_READ(G4X_AUD_VID_DID);
+
+ if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
+ eldv = G4X_ELDV_DEVCL_DEVBLC;
+ else
+ eldv = G4X_ELDV_DEVCTG;
+
+ if (intel_eld_uptodate(connector,
+ G4X_AUD_CNTL_ST, eldv,
+ G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
+ G4X_HDMIW_HDMIEDID))
+ return;
+
+ i = I915_READ(G4X_AUD_CNTL_ST);
+ i &= ~(eldv | G4X_ELD_ADDR);
+ len = (i >> 9) & 0x1f; /* ELD buffer size */
+ I915_WRITE(G4X_AUD_CNTL_ST, i);
+
+ if (!eld[0])
+ return;
+
+ if (eld[2] < (uint8_t)len)
+ len = eld[2];
+ DRM_DEBUG_DRIVER("ELD size %d\n", len);
+ for (i = 0; i < len; i++)
+ I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
+
+ i = I915_READ(G4X_AUD_CNTL_ST);
+ i |= eldv;
+ I915_WRITE(G4X_AUD_CNTL_ST, i);
+}
+
+void
+haswell_write_eld(struct drm_connector *connector,
+ struct drm_crtc *crtc)
+{
+ struct inteldrm_softc *dev_priv = connector->dev->dev_private;
+ uint8_t *eld = connector->eld;
+ struct drm_device *dev = crtc->dev;
+ uint32_t eldv;
+ uint32_t i;
+ int len;
+ int pipe = to_intel_crtc(crtc)->pipe;
+ int tmp;
+
+ int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
+ int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
+ int aud_config = HSW_AUD_CFG(pipe);
+ int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
+
+
+ DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n");
+
+ /* Audio output enable */
+ DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
+ tmp = I915_READ(aud_cntrl_st2);
+ tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
+ I915_WRITE(aud_cntrl_st2, tmp);
+
+ /* Wait for 1 vertical blank */
+ intel_wait_for_vblank(dev, pipe);
+
+ /* Set ELD valid state */
+ tmp = I915_READ(aud_cntrl_st2);
+ DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp);
+ tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
+ I915_WRITE(aud_cntrl_st2, tmp);
+ tmp = I915_READ(aud_cntrl_st2);
+ DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp);
+
+ /* Enable HDMI mode */
+ tmp = I915_READ(aud_config);
+ DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp);
+ /* clear N_programing_enable and N_value_index */
+ tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
+ I915_WRITE(aud_config, tmp);
+
+ DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
+
+ eldv = AUDIO_ELD_VALID_A << (pipe * 4);
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
+ eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
+ I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
+ } else
+ I915_WRITE(aud_config, 0);
+
+ if (intel_eld_uptodate(connector,
+ aud_cntrl_st2, eldv,
+ aud_cntl_st, IBX_ELD_ADDRESS,
+ hdmiw_hdmiedid))
+ return;
+
+ i = I915_READ(aud_cntrl_st2);
+ i &= ~eldv;
+ I915_WRITE(aud_cntrl_st2, i);
+
+ if (!eld[0])
+ return;
+
+ i = I915_READ(aud_cntl_st);
+ i &= ~IBX_ELD_ADDRESS;
+ I915_WRITE(aud_cntl_st, i);
+ i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
+ DRM_DEBUG_DRIVER("port num:%d\n", i);
+
+ len = eld[2] < 21 ? eld[2] : 21; /* 84 bytes of hw ELD buffer */
+ DRM_DEBUG_DRIVER("ELD size %d\n", len);
+ for (i = 0; i < len; i++)
+ I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
+
+ i = I915_READ(aud_cntrl_st2);
+ i |= eldv;
+ I915_WRITE(aud_cntrl_st2, i);
+
+}
+
+void
+ironlake_write_eld(struct drm_connector *connector,
+ struct drm_crtc *crtc)
+{
+ struct inteldrm_softc *dev_priv = connector->dev->dev_private;
+ uint8_t *eld = connector->eld;
+ uint32_t eldv;
+ uint32_t i;
+ int len;
+ int hdmiw_hdmiedid;
+ int aud_config;
+ int aud_cntl_st;
+ int aud_cntrl_st2;
+ int pipe = to_intel_crtc(crtc)->pipe;
+
+ if (HAS_PCH_IBX(connector->dev)) {
+ hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
+ aud_config = IBX_AUD_CFG(pipe);
+ aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
+ aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
+ } else {
+ hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
+ aud_config = CPT_AUD_CFG(pipe);
+ aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
+ aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
+ }
+
+ DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
+
+ i = I915_READ(aud_cntl_st);
+ i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
+ if (!i) {
+ DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
+ /* operate blindly on all ports */
+ eldv = IBX_ELD_VALIDB;
+ eldv |= IBX_ELD_VALIDB << 4;
+ eldv |= IBX_ELD_VALIDB << 8;
+ } else {
+ DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
+ eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
+ }
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
+ eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
+ I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
+ } else
+ I915_WRITE(aud_config, 0);
+
+ if (intel_eld_uptodate(connector,
+ aud_cntrl_st2, eldv,
+ aud_cntl_st, IBX_ELD_ADDRESS,
+ hdmiw_hdmiedid))
+ return;
+
+ i = I915_READ(aud_cntrl_st2);
+ i &= ~eldv;
+ I915_WRITE(aud_cntrl_st2, i);
+
+ if (!eld[0])
+ return;
+
+ i = I915_READ(aud_cntl_st);
+ i &= ~IBX_ELD_ADDRESS;
+ I915_WRITE(aud_cntl_st, i);
+
+ len = eld[2] < 21 ? eld[2] : 21; /* 84 bytes of hw ELD buffer */
+ DRM_DEBUG_DRIVER("ELD size %d\n", len);
+ for (i = 0; i < len; i++)
+ I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
+
+ i = I915_READ(aud_cntrl_st2);
+ i |= eldv;
+ I915_WRITE(aud_cntrl_st2, i);
+}
+
+void
+intel_write_eld(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct drm_crtc *crtc = encoder->crtc;
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ connector = drm_select_eld(encoder, mode);
+ if (!connector)
+ return;
+
+ DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+ connector->base.id,
+ drm_get_connector_name(connector),
+ connector->encoder->base.id,
+ drm_get_encoder_name(connector->encoder));
+
+ connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
+
+ if (dev_priv->display.write_eld)
+ dev_priv->display.write_eld(connector, crtc);
+}
+
+/** Loads the palette/gamma unit for the CRTC with the prepared values */
+void
+intel_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int palreg = PALETTE(intel_crtc->pipe);
+ int i;
+
+ /* The clocks have to be on to load the palette. */
+ if (!crtc->enabled || !intel_crtc->active)
+ return;
+
+ /* use legacy palette for Ironlake */
+ if (HAS_PCH_SPLIT(dev))
+ palreg = LGC_PALETTE(intel_crtc->pipe);
+
+ for (i = 0; i < 256; i++) {
+ I915_WRITE(palreg + 4 * i,
+ (intel_crtc->lut_r[i] << 16) |
+ (intel_crtc->lut_g[i] << 8) |
+ intel_crtc->lut_b[i]);
+ }
+}
+
+void
+i845_update_cursor(struct drm_crtc *crtc, u32 base)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ bool visible = base != 0;
+ u32 cntl;
+
+ if (intel_crtc->cursor_visible == visible)
+ return;
+
+ cntl = I915_READ(_CURACNTR);
+ if (visible) {
+ /* On these chipsets we can only modify the base whilst
+ * the cursor is disabled.
+ */
+ I915_WRITE(_CURABASE, base);
+
+ cntl &= ~(CURSOR_FORMAT_MASK);
+ /* XXX width must be 64, stride 256 => 0x00 << 28 */
+ cntl |= CURSOR_ENABLE |
+ CURSOR_GAMMA_ENABLE |
+ CURSOR_FORMAT_ARGB;
+ } else
+ cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
+ I915_WRITE(_CURACNTR, cntl);
+
+ intel_crtc->cursor_visible = visible;
+}
+
+void
+i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ bool visible = base != 0;
+
+ if (intel_crtc->cursor_visible != visible) {
+ uint32_t cntl = I915_READ(CURCNTR(pipe));
+ if (base) {
+ cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
+ cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+ cntl |= pipe << 28; /* Connect to correct pipe */
+ } else {
+ cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
+ cntl |= CURSOR_MODE_DISABLE;
+ }
+ I915_WRITE(CURCNTR(pipe), cntl);
+
+ intel_crtc->cursor_visible = visible;
+ }
+ /* and commit changes on next vblank */
+ I915_WRITE(CURBASE(pipe), base);
+}
+
+void
+ivb_update_cursor(struct drm_crtc *crtc, u32 base)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ bool visible = base != 0;
+
+ if (intel_crtc->cursor_visible != visible) {
+ uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
+ if (base) {
+ cntl &= ~CURSOR_MODE;
+ cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+ } else {
+ cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
+ cntl |= CURSOR_MODE_DISABLE;
+ }
+ I915_WRITE(CURCNTR_IVB(pipe), cntl);
+
+ intel_crtc->cursor_visible = visible;
+ }
+ /* and commit changes on next vblank */
+ I915_WRITE(CURBASE_IVB(pipe), base);
+}
+
+/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
+void
+intel_crtc_update_cursor(struct drm_crtc *crtc,
+ bool on)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int x = intel_crtc->cursor_x;
+ int y = intel_crtc->cursor_y;
+ u32 base, pos;
+ bool visible;
+
+ pos = 0;
+
+ if (on && crtc->enabled && crtc->fb) {
+ base = intel_crtc->cursor_addr;
+ if (x > (int) crtc->fb->width)
+ base = 0;
+
+ if (y > (int) crtc->fb->height)
+ base = 0;
+ } else
+ base = 0;
+
+ if (x < 0) {
+ if (x + intel_crtc->cursor_width < 0)
+ base = 0;
+
+ pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
+ x = -x;
+ }
+ pos |= x << CURSOR_X_SHIFT;
+
+ if (y < 0) {
+ if (y + intel_crtc->cursor_height < 0)
+ base = 0;
+
+ pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
+ y = -y;
+ }
+ pos |= y << CURSOR_Y_SHIFT;
+
+ visible = base != 0;
+ if (!visible && !intel_crtc->cursor_visible)
+ return;
+
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
+ I915_WRITE(CURPOS_IVB(pipe), pos);
+ ivb_update_cursor(crtc, base);
+ } else {
+ I915_WRITE(CURPOS(pipe), pos);
+ if (IS_845G(dev) || IS_I865G(dev))
+ i845_update_cursor(crtc, base);
+ else
+ i9xx_update_cursor(crtc, base);
+ }
+}
+
+int
+intel_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file,
+ uint32_t handle,
+ uint32_t width, uint32_t height)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_gem_object *obj;
+ uint32_t addr;
+ int ret;
+
+ /* if we want to turn off the cursor ignore width and height */
+ if (!handle) {
+ DRM_DEBUG_KMS("cursor off\n");
+ addr = 0;
+ obj = NULL;
+ DRM_LOCK();
+ goto finish;
+ }
+
+ /* Currently we only support 64x64 cursors */
+ if (width != 64 || height != 64) {
+ DRM_ERROR("we currently only support 64x64 cursors\n");
+ return -EINVAL;
+ }
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
+ if (&obj->base == NULL)
+ return -ENOENT;
+
+ if (obj->base.size < width * height * 4) {
+ DRM_ERROR("buffer is to small\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ /* we only need to pin inside GTT if cursor is non-phy */
+ DRM_LOCK();
+ if (!dev_priv->info->cursor_needs_physical) {
+ if (obj->tiling_mode) {
+ DRM_ERROR("cursor cannot be tiled\n");
+ ret = -EINVAL;
+ goto fail_locked;
+ }
+
+ ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL);
+ if (ret) {
+ DRM_ERROR("failed to move cursor bo into the GTT\n");
+ goto fail_locked;
+ }
+
+ ret = i915_gem_object_put_fence(obj);
+ if (ret) {
+ DRM_ERROR("failed to release fence for cursor");
+ goto fail_unpin;
+ }
+
+ addr = obj->gtt_offset;
+ } else {
+ int align = IS_I830(dev) ? 16 * 1024 : 256;
+ ret = i915_gem_attach_phys_object(dev, obj,
+ (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
+ align);
+ if (ret) {
+ DRM_ERROR("failed to attach phys object\n");
+ goto fail_locked;
+ }
+ addr = obj->phys_obj->handle->segs[0].ds_addr;
+ }
+
+ if (IS_GEN2(dev))
+ I915_WRITE(CURSIZE, (height << 12) | width);
+
+ finish:
+ if (intel_crtc->cursor_bo) {
+ if (dev_priv->info->cursor_needs_physical) {
+ if (intel_crtc->cursor_bo != obj)
+ i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
+ } else
+ i915_gem_object_unpin(intel_crtc->cursor_bo);
+ drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
+ }
+
+ DRM_UNLOCK();
+
+ intel_crtc->cursor_addr = addr;
+ intel_crtc->cursor_bo = obj;
+ intel_crtc->cursor_width = width;
+ intel_crtc->cursor_height = height;
+
+ intel_crtc_update_cursor(crtc, true);
+
+ return 0;
+fail_unpin:
+ i915_gem_object_unpin(obj);
+fail_locked:
+ DRM_UNLOCK();
+fail:
+ drm_gem_object_unreference_unlocked(&obj->base);
+ return ret;
+}
+
+int
+intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ intel_crtc->cursor_x = x;
+ intel_crtc->cursor_y = y;
+
+ intel_crtc_update_cursor(crtc, true);
+
+ return 0;
+}
+
+/** Sets the color ramps on behalf of RandR */
+void
+intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ intel_crtc->lut_r[regno] = red >> 8;
+ intel_crtc->lut_g[regno] = green >> 8;
+ intel_crtc->lut_b[regno] = blue >> 8;
+}
+
+void
+intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ *red = intel_crtc->lut_r[regno] << 8;
+ *green = intel_crtc->lut_g[regno] << 8;
+ *blue = intel_crtc->lut_b[regno] << 8;
+}
+
+void
+intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t start, uint32_t size)
+{
+ int end = (start + size > 256) ? 256 : start + size, i;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ for (i = start; i < end; i++) {
+ intel_crtc->lut_r[i] = red[i] >> 8;
+ intel_crtc->lut_g[i] = green[i] >> 8;
+ intel_crtc->lut_b[i] = blue[i] >> 8;
+ }
+
+ intel_crtc_load_lut(crtc);
+}
+
+/**
+ * Get a pipe with a simple mode set on it for doing load-based monitor
+ * detection.
+ *
+ * It will be up to the load-detect code to adjust the pipe as appropriate for
+ * its requirements. The pipe will be connected to no other encoders.
+ *
+ * Currently this code will only succeed if there is a pipe with no encoders
+ * configured for it. In the future, it could choose to temporarily disable
+ * some outputs to free up a pipe for its use.
+ *
+ * \return crtc, or NULL if no pipes are available.
+ */
+
+/* VESA 640x480x72Hz mode to set on the pipe */
+static struct drm_display_mode load_detect_mode = {
+ DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
+ 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+};
+
+struct drm_framebuffer *
+intel_framebuffer_create(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_i915_gem_object *obj)
+{
+ struct intel_framebuffer *intel_fb;
+ int ret;
+
+ intel_fb = malloc(sizeof(*intel_fb), M_DRM, M_WAITOK | M_ZERO);
+ if (!intel_fb) {
+ drm_gem_object_unreference_unlocked(&obj->base);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(&obj->base);
+ free(intel_fb, M_DRM);
+ return ERR_PTR(ret);
+ }
+
+ return &intel_fb->base;
+}
+
+u32
+intel_framebuffer_pitch_for_width(int width, int bpp)
+{
+ u32 pitch = howmany(width * bpp, 8);
+ return roundup2(pitch, 64);
+}
+
+u32
+intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
+{
+ u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
+ return roundup2(pitch * mode->vdisplay, PAGE_SIZE);
+}
+
+struct drm_framebuffer *
+intel_framebuffer_create_for_mode(struct drm_device *dev,
+ struct drm_display_mode *mode,
+ int depth, int bpp)
+{
+ struct drm_i915_gem_object *obj;
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+
+ obj = i915_gem_alloc_object(dev,
+ intel_framebuffer_size_for_mode(mode, bpp));
+ if (obj == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ mode_cmd.width = mode->hdisplay;
+ mode_cmd.height = mode->vdisplay;
+ mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
+ bpp);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
+
+ return intel_framebuffer_create(dev, &mode_cmd, obj);
+}
+
+struct drm_framebuffer *
+mode_fits_in_fbdev(struct drm_device *dev,
+ struct drm_display_mode *mode)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ struct drm_framebuffer *fb;
+
+ if (dev_priv->fbdev == NULL)
+ return NULL;
+
+ obj = dev_priv->fbdev->ifb.obj;
+ if (obj == NULL)
+ return NULL;
+
+ fb = &dev_priv->fbdev->ifb.base;
+ if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
+ fb->bits_per_pixel))
+ return NULL;
+
+ if (obj->base.size < mode->vdisplay * fb->pitches[0])
+ return NULL;
+
+ return fb;
+}
+
+bool
+intel_get_load_detect_pipe(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ struct intel_load_detect_pipe *old)
+{
+ struct intel_crtc *intel_crtc;
+ struct intel_encoder *intel_encoder =
+ intel_attached_encoder(connector);
+ struct drm_crtc *possible_crtc;
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_crtc *crtc = NULL;
+ struct drm_device *dev = encoder->dev;
+ struct drm_framebuffer *fb;
+ int i = -1;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+ connector->base.id, drm_get_connector_name(connector),
+ encoder->base.id, drm_get_encoder_name(encoder));
+
+ /*
+ * Algorithm gets a little messy:
+ *
+ * - if the connector already has an assigned crtc, use it (but make
+ * sure it's on first)
+ *
+ * - try to find the first unused crtc that can drive this connector,
+ * and use that if we find one
+ */
+
+ /* See if we already have a CRTC for this connector */
+ if (encoder->crtc) {
+ crtc = encoder->crtc;
+
+ old->dpms_mode = connector->dpms;
+ old->load_detect_temp = false;
+
+ /* Make sure the crtc and connector are running */
+ if (connector->dpms != DRM_MODE_DPMS_ON)
+ connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
+
+ return true;
+ }
+
+ /* Find an unused one (if possible) */
+ list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
+ i++;
+ if (!(encoder->possible_crtcs & (1 << i)))
+ continue;
+ if (!possible_crtc->enabled) {
+ crtc = possible_crtc;
+ break;
+ }
+ }
+
+ /*
+ * If we didn't find an unused CRTC, don't use any.
+ */
+ if (!crtc) {
+ DRM_DEBUG_KMS("no pipe available for load-detect\n");
+ return false;
+ }
+
+ intel_encoder->new_crtc = to_intel_crtc(crtc);
+ to_intel_connector(connector)->new_encoder = intel_encoder;
+
+ intel_crtc = to_intel_crtc(crtc);
+ old->dpms_mode = connector->dpms;
+ old->load_detect_temp = true;
+ old->release_fb = NULL;
+
+ if (!mode)
+ mode = &load_detect_mode;
+
+ /* We need a framebuffer large enough to accommodate all accesses
+ * that the plane may generate whilst we perform load detection.
+ * We can not rely on the fbcon either being present (we get called
+ * during its initialisation to detect all boot displays, or it may
+ * not even exist) or that it is large enough to satisfy the
+ * requested mode.
+ */
+ fb = mode_fits_in_fbdev(dev, mode);
+ if (fb == NULL) {
+ DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
+ fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
+ old->release_fb = fb;
+ } else
+ DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
+ if (IS_ERR(fb)) {
+ DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
+ return false;
+ }
+
+ if (!intel_set_mode(crtc, mode, 0, 0, fb)) {
+ DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
+ if (old->release_fb)
+ old->release_fb->funcs->destroy(old->release_fb);
+ return false;
+ }
+
+ /* let the connector get through one full cycle before testing */
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ return true;
+}
+
+void
+intel_release_load_detect_pipe(struct drm_connector *connector,
+ struct intel_load_detect_pipe *old)
+{
+ struct intel_encoder *intel_encoder =
+ intel_attached_encoder(connector);
+ struct drm_encoder *encoder = &intel_encoder->base;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+ connector->base.id, drm_get_connector_name(connector),
+ encoder->base.id, drm_get_encoder_name(encoder));
+
+ if (old->load_detect_temp) {
+ struct drm_crtc *crtc = encoder->crtc;
+
+ to_intel_connector(connector)->new_encoder = NULL;
+ intel_encoder->new_crtc = NULL;
+ intel_set_mode(crtc, NULL, 0, 0, NULL);
+
+ if (old->release_fb)
+ old->release_fb->funcs->destroy(old->release_fb);
+
+ return;
+ }
+
+ /* Switch crtc and encoder back off if necessary */
+ if (old->dpms_mode != DRM_MODE_DPMS_ON)
+ connector->funcs->dpms(connector, old->dpms_mode);
+}
+
+/* Returns the clock of the currently programmed mode of the given pipe. */
+int
+intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 dpll = I915_READ(DPLL(pipe));
+ u32 fp;
+ intel_clock_t clock;
+
+ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+ fp = I915_READ(FP0(pipe));
+ else
+ fp = I915_READ(FP1(pipe));
+
+ clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
+ if (IS_PINEVIEW(dev)) {
+ clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
+ clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
+ } else {
+ clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+ clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+ }
+
+ if (!IS_GEN2(dev)) {
+ if (IS_PINEVIEW(dev))
+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
+ else
+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT);
+
+ switch (dpll & DPLL_MODE_MASK) {
+ case DPLLB_MODE_DAC_SERIAL:
+ clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
+ 5 : 10;
+ break;
+ case DPLLB_MODE_LVDS:
+ clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
+ 7 : 14;
+ break;
+ default:
+ DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
+ "mode\n", (int)(dpll & DPLL_MODE_MASK));
+ return 0;
+ }
+
+ /* XXX: Handle the 100Mhz refclk */
+ intel_clock(dev, 96000, &clock);
+ } else {
+ bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
+
+ if (is_lvds) {
+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT);
+ clock.p2 = 14;
+
+ if ((dpll & PLL_REF_INPUT_MASK) ==
+ PLLB_REF_INPUT_SPREADSPECTRUMIN) {
+ /* XXX: might not be 66MHz */
+ intel_clock(dev, 66000, &clock);
+ } else
+ intel_clock(dev, 48000, &clock);
+ } else {
+ if (dpll & PLL_P1_DIVIDE_BY_TWO)
+ clock.p1 = 2;
+ else {
+ clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
+ }
+ if (dpll & PLL_P2_DIVIDE_BY_4)
+ clock.p2 = 4;
+ else
+ clock.p2 = 2;
+
+ intel_clock(dev, 48000, &clock);
+ }
+ }
+
+ /* XXX: It would be nice to validate the clocks, but we can't reuse
+ * i830PllIsValid() because it relies on the xf86_config connector
+ * configuration being accurate, which it isn't necessarily.
+ */
+
+ return clock.dot;
+}
+
+/** Returns the currently programmed mode of the given pipe. */
+struct drm_display_mode *
+intel_crtc_mode_get(struct drm_device *dev,
+ struct drm_crtc *crtc)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ struct drm_display_mode *mode;
+ int htot = I915_READ(HTOTAL(cpu_transcoder));
+ int hsync = I915_READ(HSYNC(cpu_transcoder));
+ int vtot = I915_READ(VTOTAL(cpu_transcoder));
+ int vsync = I915_READ(VSYNC(cpu_transcoder));
+
+ mode = malloc(sizeof(*mode), M_DRM, M_WAITOK | M_ZERO);
+ if (!mode)
+ return NULL;
+
+ mode->clock = intel_crtc_clock_get(dev, crtc);
+ mode->hdisplay = (htot & 0xffff) + 1;
+ mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
+ mode->hsync_start = (hsync & 0xffff) + 1;
+ mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
+ mode->vdisplay = (vtot & 0xffff) + 1;
+ mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
+ mode->vsync_start = (vsync & 0xffff) + 1;
+ mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
+
+ drm_mode_set_name(mode);
+
+ return mode;
+}
+
+void
+intel_increase_pllclock(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int dpll_reg = DPLL(pipe);
+ int dpll;
+
+ if (HAS_PCH_SPLIT(dev))
+ return;
+
+ if (!dev_priv->lvds_downclock_avail)
+ return;
+
+ dpll = I915_READ(dpll_reg);
+ if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
+ DRM_DEBUG_DRIVER("upclocking LVDS\n");
+
+ assert_panel_unlocked(dev_priv, pipe);
+
+ dpll &= ~DISPLAY_RATE_SELECT_FPA1;
+ I915_WRITE(dpll_reg, dpll);
+ intel_wait_for_vblank(dev, pipe);
+
+ dpll = I915_READ(dpll_reg);
+ if (dpll & DISPLAY_RATE_SELECT_FPA1)
+ DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
+ }
+}
+
+void
+intel_decrease_pllclock(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ if (HAS_PCH_SPLIT(dev))
+ return;
+
+ if (!dev_priv->lvds_downclock_avail)
+ return;
+
+ /*
+ * Since this is called by a timer, we should never get here in
+ * the manual case.
+ */
+ if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
+ int pipe = intel_crtc->pipe;
+ int dpll_reg = DPLL(pipe);
+ int dpll;
+
+ DRM_DEBUG_DRIVER("downclocking LVDS\n");
+
+ assert_panel_unlocked(dev_priv, pipe);
+
+ dpll = I915_READ(dpll_reg);
+ dpll |= DISPLAY_RATE_SELECT_FPA1;
+ I915_WRITE(dpll_reg, dpll);
+ intel_wait_for_vblank(dev, pipe);
+ dpll = I915_READ(dpll_reg);
+ if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
+ DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
+ }
+
+}
+
+void
+intel_mark_busy(struct drm_device *dev)
+{
+ i915_update_gfx_val(dev->dev_private);
+}
+
+void
+intel_mark_idle(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+
+ if (!i915_powersave)
+ return;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (!crtc->fb)
+ continue;
+
+ intel_decrease_pllclock(crtc);
+ }
+}
+
+void
+intel_mark_fb_busy(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_crtc *crtc;
+
+ if (!i915_powersave)
+ return;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (!crtc->fb)
+ continue;
+
+ if (to_intel_framebuffer(crtc->fb)->obj == obj)
+ intel_increase_pllclock(crtc);
+ }
+}
+
+void
+intel_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct intel_unpin_work *work;
+
+ mtx_enter(&dev->event_lock);
+ work = intel_crtc->unpin_work;
+ intel_crtc->unpin_work = NULL;
+ mtx_leave(&dev->event_lock);
+
+ if (work) {
+// cancel_work_sync(&work->work);
+ free(work, M_DRM);
+ }
+
+ drm_crtc_cleanup(crtc);
+
+ free(intel_crtc, M_DRM);
+}
+
+void
+intel_unpin_work_fn(void *arg1, void *arg2)
+{
+ struct intel_unpin_work *work = arg1;
+ struct drm_device *dev = work->crtc->dev;
+
+ DRM_LOCK();
+ intel_unpin_fb_obj(work->old_fb_obj);
+ drm_gem_object_unreference(&work->pending_flip_obj->base);
+ drm_gem_object_unreference(&work->old_fb_obj->base);
+
+ intel_update_fbc(dev);
+ DRM_UNLOCK();
+
+ BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
+ atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
+
+ free(work, M_DRM);
+}
+
+void
+do_intel_finish_page_flip(struct drm_device *dev,
+ struct drm_crtc *crtc)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_unpin_work *work;
+ struct drm_i915_gem_object *obj;
+
+ /* Ignore early vblank irqs */
+ if (intel_crtc == NULL)
+ return;
+
+ mtx_enter(&dev->event_lock);
+ work = intel_crtc->unpin_work;
+
+ /* Ensure we don't miss a work->pending update ... */
+ DRM_READMEMORYBARRIER();
+
+ if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
+ mtx_leave(&dev->event_lock);
+ return;
+ }
+
+ /* and that the unpin work is consistent wrt ->pending. */
+ DRM_READMEMORYBARRIER();
+
+ intel_crtc->unpin_work = NULL;
+
+ if (work->event)
+ drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
+
+ drm_vblank_put(dev, intel_crtc->pipe);
+
+ mtx_leave(&dev->event_lock);
+
+ obj = work->old_fb_obj;
+
+ atomic_clear_int(&obj->pending_flip, 1 << intel_crtc->plane);
+ wakeup(&dev_priv->pending_flip_queue);
+
+ workq_queue_task(NULL, &work->task, 0, intel_unpin_work_fn, work, NULL);
+
+// trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
+}
+
+void
+intel_finish_page_flip(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+
+ do_intel_finish_page_flip(dev, crtc);
+}
+
+void
+intel_finish_page_flip_plane(struct drm_device *dev, int plane)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
+
+ do_intel_finish_page_flip(dev, crtc);
+}
+
+void
+intel_prepare_page_flip(struct drm_device *dev, int plane)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
+
+ /* NB: An MMIO update of the plane base pointer will also
+ * generate a page-flip completion irq, i.e. every modeset
+ * is also accompanied by a spurious intel_prepare_page_flip().
+ */
+ mtx_enter(&dev->event_lock);
+ if (intel_crtc->unpin_work)
+ atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
+ mtx_leave(&dev->event_lock);
+}
+
+static inline void
+intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
+{
+ /* Ensure that the work item is consistent when activating it ... */
+ DRM_WRITEMEMORYBARRIER();
+ atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
+ /* and that it is marked active as soon as the irq could fire. */
+ DRM_WRITEMEMORYBARRIER();
+}
+
+int
+intel_gen2_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ u32 flip_mask;
+ struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
+ if (ret)
+ goto err;
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ goto err_unpin;
+
+ /* Can't queue multiple flips, so wait for the previous
+ * one to finish before executing the next.
+ */
+ if (intel_crtc->plane)
+ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ else
+ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ intel_ring_emit(ring, fb->pitches[0]);
+ intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+ intel_ring_emit(ring, 0); /* aux display base address, unused */
+
+ intel_mark_page_flip_active(intel_crtc);
+ intel_ring_advance(ring);
+ return 0;
+
+err_unpin:
+ intel_unpin_fb_obj(obj);
+err:
+ return ret;
+}
+
+int
+intel_gen3_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ u32 flip_mask;
+ struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
+ if (ret)
+ goto err;
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ goto err_unpin;
+
+ if (intel_crtc->plane)
+ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ else
+ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ intel_ring_emit(ring, fb->pitches[0]);
+ intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+ intel_ring_emit(ring, MI_NOOP);
+
+ intel_mark_page_flip_active(intel_crtc);
+ intel_ring_advance(ring);
+ return 0;
+
+err_unpin:
+ intel_unpin_fb_obj(obj);
+err:
+ return ret;
+}
+
+int
+intel_gen4_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ uint32_t pf, pipesrc;
+ struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
+ if (ret)
+ goto err;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ goto err_unpin;
+
+ /* i965+ uses the linear or tiled offsets from the
+ * Display Registers (which do not change across a page-flip)
+ * so we need only reprogram the base address.
+ */
+ intel_ring_emit(ring, MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ intel_ring_emit(ring, fb->pitches[0]);
+ intel_ring_emit(ring,
+ (obj->gtt_offset + intel_crtc->dspaddr_offset) |
+ obj->tiling_mode);
+
+ /* XXX Enabling the panel-fitter across page-flip is so far
+ * untested on non-native modes, so ignore it for now.
+ * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
+ */
+ pf = 0;
+ pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
+ intel_ring_emit(ring, pf | pipesrc);
+
+ intel_mark_page_flip_active(intel_crtc);
+ intel_ring_advance(ring);
+ return 0;
+
+err_unpin:
+ intel_unpin_fb_obj(obj);
+err:
+ return ret;
+}
+
+int
+intel_gen6_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
+ uint32_t pf, pipesrc;
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
+ if (ret)
+ goto err;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ goto err_unpin;
+
+ intel_ring_emit(ring, MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
+ intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+
+ /* Contrary to the suggestions in the documentation,
+ * "Enable Panel Fitter" does not seem to be required when page
+ * flipping with a non-native mode, and worse causes a normal
+ * modeset to fail.
+ * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
+ */
+ pf = 0;
+ pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
+ intel_ring_emit(ring, pf | pipesrc);
+
+ intel_mark_page_flip_active(intel_crtc);
+ intel_ring_advance(ring);
+ return 0;
+
+err_unpin:
+ intel_unpin_fb_obj(obj);
+err:
+ return ret;
+}
+
+/*
+ * On gen7 we currently use the blit ring because (in early silicon at least)
+ * the render ring doesn't give us interrpts for page flip completion, which
+ * means clients will hang after the first flip is queued. Fortunately the
+ * blit ring generates interrupts properly, so use it instead.
+ */
+int
+intel_gen7_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_ring_buffer *ring = &dev_priv->rings[BCS];
+ uint32_t plane_bit = 0;
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
+ if (ret)
+ goto err;
+
+ switch(intel_crtc->plane) {
+ case PLANE_A:
+ plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
+ break;
+ case PLANE_B:
+ plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
+ break;
+ case PLANE_C:
+ plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
+ break;
+ default:
+#ifdef notyet
+ WARN_ONCE(1, "unknown plane in flip command\n");
+#endif
+ ret = -ENODEV;
+ goto err_unpin;
+ }
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ goto err_unpin;
+
+ intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
+ intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
+ intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+ intel_ring_emit(ring, (MI_NOOP));
+
+ intel_mark_page_flip_active(intel_crtc);
+ intel_ring_advance(ring);
+ return 0;
+
+err_unpin:
+ intel_unpin_fb_obj(obj);
+err:
+ return ret;
+}
+
+int
+intel_default_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ return -ENODEV;
+}
+
+int
+intel_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_unpin_work *work;
+ int ret;
+
+ /* Can't change pixel format via MI display flips. */
+ if (fb->pixel_format != crtc->fb->pixel_format)
+ return -EINVAL;
+
+ /*
+ * TILEOFF/LINOFF registers can't be changed via MI display flips.
+ * Note that pitch changes could also affect these register.
+ */
+ if (INTEL_INFO(dev)->gen > 3 &&
+ (fb->offsets[0] != crtc->fb->offsets[0] ||
+ fb->pitches[0] != crtc->fb->pitches[0]))
+ return -EINVAL;
+
+ work = malloc(sizeof *work, M_DRM, M_WAITOK | M_ZERO);
+ if (work == NULL)
+ return -ENOMEM;
+
+ work->event = event;
+ work->crtc = crtc;
+ intel_fb = to_intel_framebuffer(crtc->fb);
+ work->old_fb_obj = intel_fb->obj;
+
+ ret = drm_vblank_get(dev, intel_crtc->pipe);
+ if (ret)
+ goto free_work;
+
+ /* We borrow the event spin lock for protecting unpin_work */
+ mtx_enter(&dev->event_lock);
+ if (intel_crtc->unpin_work) {
+ mtx_leave(&dev->event_lock);
+ free(work, M_DRM);
+ drm_vblank_put(dev, intel_crtc->pipe);
+
+ DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
+ return -EBUSY;
+ }
+ intel_crtc->unpin_work = work;
+ mtx_leave(&dev->event_lock);
+
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
+
+#ifdef notyet
+ if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
+ flush_workqueue(dev_priv->wq);
+#endif
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto cleanup;
+
+ /* Reference the objects for the scheduled work. */
+ drm_gem_object_reference(&work->old_fb_obj->base);
+ drm_gem_object_reference(&obj->base);
+
+ crtc->fb = fb;
+
+ work->pending_flip_obj = obj;
+
+ work->enable_stall_check = true;
+
+ /* Block clients from rendering to the new back buffer until
+ * the flip occurs and the object is no longer visible.
+ */
+ atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
+ atomic_inc(&intel_crtc->unpin_work_count);
+
+ ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
+ if (ret)
+ goto cleanup_pending;
+
+ intel_disable_fbc(dev);
+ intel_mark_fb_busy(obj);
+ DRM_UNLOCK();
+
+// trace_i915_flip_request(intel_crtc->plane, obj);
+
+ return 0;
+
+cleanup_pending:
+ atomic_dec(&intel_crtc->unpin_work_count);
+ atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
+ drm_gem_object_unreference(&work->old_fb_obj->base);
+ drm_gem_object_unreference(&obj->base);
+ DRM_UNLOCK();
+
+cleanup:
+ mtx_enter(&dev->event_lock);
+ intel_crtc->unpin_work = NULL;
+ mtx_leave(&dev->event_lock);
+
+ drm_vblank_put(dev, intel_crtc->pipe);
+free_work:
+ free(work, M_DRM);
+
+ return ret;
+}
+
+static struct drm_crtc_helper_funcs intel_helper_funcs = {
+ .mode_set_base_atomic = intel_pipe_set_base_atomic,
+ .load_lut = intel_crtc_load_lut,
+ .disable = intel_crtc_noop,
+};
+
+bool
+intel_encoder_check_is_cloned(struct intel_encoder *encoder)
+{
+ struct intel_encoder *other_encoder;
+ struct drm_crtc *crtc = &encoder->new_crtc->base;
+
+ if (WARN_ON(!crtc))
+ return false;
+
+ list_for_each_entry(other_encoder,
+ &crtc->dev->mode_config.encoder_list,
+ base.head) {
+
+ if (&other_encoder->new_crtc->base != crtc ||
+ encoder == other_encoder)
+ continue;
+ else
+ return true;
+ }
+
+ return false;
+}
+
+bool
+intel_encoder_crtc_ok(struct drm_encoder *encoder,
+ struct drm_crtc *crtc)
+{
+ struct drm_device *dev;
+ struct drm_crtc *tmp;
+ int crtc_mask = 1;
+
+ WARN(!crtc, "checking null crtc?\n");
+
+ dev = crtc->dev;
+
+ list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
+ if (tmp == crtc)
+ break;
+ crtc_mask <<= 1;
+ }
+
+ if (encoder->possible_crtcs & crtc_mask)
+ return true;
+ return false;
+}
+
+/**
+ * intel_modeset_update_staged_output_state
+ *
+ * Updates the staged output configuration state, e.g. after we've read out the
+ * current hw state.
+ */
+void
+intel_modeset_update_staged_output_state(struct drm_device *dev)
+{
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ connector->new_encoder =
+ to_intel_encoder(connector->base.encoder);
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ encoder->new_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ }
+}
+
+/**
+ * intel_modeset_commit_output_state
+ *
+ * This function copies the stage display pipe configuration to the real one.
+ */
+void
+intel_modeset_commit_output_state(struct drm_device *dev)
+{
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ connector->base.encoder = &connector->new_encoder->base;
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ encoder->base.crtc = &encoder->new_crtc->base;
+ }
+}
+
+struct drm_display_mode *
+intel_modeset_adjusted_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_display_mode *adjusted_mode;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ struct intel_encoder *encoder;
+
+ adjusted_mode = drm_mode_duplicate(dev, mode);
+ if (!adjusted_mode)
+ return ERR_PTR(-ENOMEM);
+
+ /* Pass our mode to the connectors and the CRTC to give them a chance to
+ * adjust it according to limitations or connector properties, and also
+ * a chance to reject the mode entirely.
+ */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+
+ if (&encoder->new_crtc->base != crtc)
+ continue;
+ encoder_funcs = encoder->base.helper_private;
+ if (!(encoder_funcs->mode_fixup(&encoder->base, mode,
+ adjusted_mode))) {
+ DRM_DEBUG_KMS("Encoder fixup failed\n");
+ goto fail;
+ }
+ }
+
+ if (!(intel_crtc_mode_fixup(crtc, mode, adjusted_mode))) {
+ DRM_DEBUG_KMS("CRTC fixup failed\n");
+ goto fail;
+ }
+ DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+
+ return adjusted_mode;
+fail:
+ drm_mode_destroy(dev, adjusted_mode);
+ return ERR_PTR(-EINVAL);
+}
+
+/* Computes which crtcs are affected and sets the relevant bits in the mask. For
+ * simplicity we use the crtc's pipe number (because it's easier to obtain). */
+void
+intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
+ unsigned *prepare_pipes, unsigned *disable_pipes)
+{
+ struct intel_crtc *intel_crtc;
+ struct drm_device *dev = crtc->dev;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+ struct drm_crtc *tmp_crtc;
+
+ *disable_pipes = *modeset_pipes = *prepare_pipes = 0;
+
+ /* Check which crtcs have changed outputs connected to them, these need
+ * to be part of the prepare_pipes mask. We don't (yet) support global
+ * modeset across multiple crtcs, so modeset_pipes will only have one
+ * bit set at most. */
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->base.encoder == &connector->new_encoder->base)
+ continue;
+
+ if (connector->base.encoder) {
+ tmp_crtc = connector->base.encoder->crtc;
+
+ *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
+ }
+
+ if (connector->new_encoder)
+ *prepare_pipes |=
+ 1 << connector->new_encoder->new_crtc->pipe;
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (encoder->base.crtc == &encoder->new_crtc->base)
+ continue;
+
+ if (encoder->base.crtc) {
+ tmp_crtc = encoder->base.crtc;
+
+ *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
+ }
+
+ if (encoder->new_crtc)
+ *prepare_pipes |= 1 << encoder->new_crtc->pipe;
+ }
+
+ /* Check for any pipes that will be fully disabled ... */
+ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ bool used = false;
+
+ /* Don't try to disable disabled crtcs. */
+ if (!intel_crtc->base.enabled)
+ continue;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (encoder->new_crtc == intel_crtc)
+ used = true;
+ }
+
+ if (!used)
+ *disable_pipes |= 1 << intel_crtc->pipe;
+ }
+
+
+ /* set_mode is also used to update properties on life display pipes. */
+ intel_crtc = to_intel_crtc(crtc);
+ if (crtc->enabled)
+ *prepare_pipes |= 1 << intel_crtc->pipe;
+
+ /* We only support modeset on one single crtc, hence we need to do that
+ * only for the passed in crtc iff we change anything else than just
+ * disable crtcs.
+ *
+ * This is actually not true, to be fully compatible with the old crtc
+ * helper we automatically disable _any_ output (i.e. doesn't need to be
+ * connected to the crtc we're modesetting on) if it's disconnected.
+ * Which is a rather nutty api (since changed the output configuration
+ * without userspace's explicit request can lead to confusion), but
+ * alas. Hence we currently need to modeset on all pipes we prepare. */
+ if (*prepare_pipes)
+ *modeset_pipes = *prepare_pipes;
+
+ /* ... and mask these out. */
+ *modeset_pipes &= ~(*disable_pipes);
+ *prepare_pipes &= ~(*disable_pipes);
+}
+
+bool
+intel_crtc_in_use(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ struct drm_device *dev = crtc->dev;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ if (encoder->crtc == crtc)
+ return true;
+
+ return false;
+}
+
+void
+intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
+{
+ struct intel_encoder *intel_encoder;
+ struct intel_crtc *intel_crtc;
+ struct drm_connector *connector;
+
+ list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (!intel_encoder->base.crtc)
+ continue;
+
+ intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
+
+ if (prepare_pipes & (1 << intel_crtc->pipe))
+ intel_encoder->connectors_active = false;
+ }
+
+ intel_modeset_commit_output_state(dev);
+
+ /* Update computed state. */
+ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base);
+ }
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (!connector->encoder || !connector->encoder->crtc)
+ continue;
+
+ intel_crtc = to_intel_crtc(connector->encoder->crtc);
+
+ if (prepare_pipes & (1 << intel_crtc->pipe)) {
+ struct drm_property *dpms_property =
+ dev->mode_config.dpms_property;
+
+ connector->dpms = DRM_MODE_DPMS_ON;
+ drm_object_property_set_value(&connector->base,
+ dpms_property,
+ DRM_MODE_DPMS_ON);
+
+ intel_encoder = to_intel_encoder(connector->encoder);
+ intel_encoder->connectors_active = true;
+ }
+ }
+
+}
+
+#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
+ list_for_each_entry((intel_crtc), \
+ &(dev)->mode_config.crtc_list, \
+ base.head) \
+ if (mask & (1 <<(intel_crtc)->pipe)) \
+
+void
+intel_modeset_check_state(struct drm_device *dev)
+{
+ struct intel_crtc *crtc;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ /* This also checks the encoder/connector hw state with the
+ * ->get_hw_state callbacks. */
+ intel_connector_check_state(connector);
+
+ WARN(&connector->new_encoder->base != connector->base.encoder,
+ "connector's staged encoder doesn't match current encoder\n");
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ bool enabled = false;
+ bool active = false;
+ enum pipe pipe, tracked_pipe;
+
+ DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
+ encoder->base.base.id,
+ drm_get_encoder_name(&encoder->base));
+
+ WARN(&encoder->new_crtc->base != encoder->base.crtc,
+ "encoder's stage crtc doesn't match current crtc\n");
+ WARN(encoder->connectors_active && !encoder->base.crtc,
+ "encoder's active_connectors set, but no crtc\n");
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->base.encoder != &encoder->base)
+ continue;
+ enabled = true;
+ if (connector->base.dpms != DRM_MODE_DPMS_OFF)
+ active = true;
+ }
+ WARN(!!encoder->base.crtc != enabled,
+ "encoder's enabled state mismatch "
+ "(expected %i, found %i)\n",
+ !!encoder->base.crtc, enabled);
+ WARN(active && !encoder->base.crtc,
+ "active encoder with no crtc\n");
+
+ WARN(encoder->connectors_active != active,
+ "encoder's computed active state doesn't match tracked active state "
+ "(expected %i, found %i)\n", active, encoder->connectors_active);
+
+ active = encoder->get_hw_state(encoder, &pipe);
+ WARN(active != encoder->connectors_active,
+ "encoder's hw state doesn't match sw tracking "
+ "(expected %i, found %i)\n",
+ encoder->connectors_active, active);
+
+ if (!encoder->base.crtc)
+ continue;
+
+ tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
+ WARN(active && pipe != tracked_pipe,
+ "active encoder's pipe doesn't match"
+ "(expected %i, found %i)\n",
+ tracked_pipe, pipe);
+
+ }
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ bool enabled = false;
+ bool active = false;
+
+ DRM_DEBUG_KMS("[CRTC:%d]\n",
+ crtc->base.base.id);
+
+ WARN(crtc->active && !crtc->base.enabled,
+ "active crtc, but not enabled in sw tracking\n");
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (encoder->base.crtc != &crtc->base)
+ continue;
+ enabled = true;
+ if (encoder->connectors_active)
+ active = true;
+ }
+ WARN(active != crtc->active,
+ "crtc's computed active state doesn't match tracked active state "
+ "(expected %i, found %i)\n", active, crtc->active);
+ WARN(enabled != crtc->base.enabled,
+ "crtc's computed enabled state doesn't match tracked enabled state "
+ "(expected %i, found %i)\n", enabled, crtc->base.enabled);
+
+ assert_pipe(dev->dev_private, crtc->pipe, crtc->active);
+ }
+}
+
+bool
+intel_set_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = crtc->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
+ struct intel_crtc *intel_crtc;
+ unsigned disable_pipes, prepare_pipes, modeset_pipes;
+ bool ret = true;
+
+ intel_modeset_affected_pipes(crtc, &modeset_pipes,
+ &prepare_pipes, &disable_pipes);
+
+ DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
+ modeset_pipes, prepare_pipes, disable_pipes);
+
+ for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
+ intel_crtc_disable(&intel_crtc->base);
+
+ saved_hwmode = crtc->hwmode;
+ saved_mode = crtc->mode;
+
+ /* Hack: Because we don't (yet) support global modeset on multiple
+ * crtcs, we don't keep track of the new mode for more than one crtc.
+ * Hence simply check whether any bit is set in modeset_pipes in all the
+ * pieces of code that are not yet converted to deal with mutliple crtcs
+ * changing their mode at the same time. */
+ adjusted_mode = NULL;
+ if (modeset_pipes) {
+ adjusted_mode = intel_modeset_adjusted_mode(crtc, mode);
+ if (IS_ERR(adjusted_mode)) {
+ return false;
+ }
+ }
+
+ for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
+ if (intel_crtc->base.enabled)
+ dev_priv->display.crtc_disable(&intel_crtc->base);
+ }
+
+ /* crtc->mode is already used by the ->mode_set callbacks, hence we need
+ * to set it here already despite that we pass it down the callchain.
+ */
+ if (modeset_pipes)
+ crtc->mode = *mode;
+
+ /* Only after disabling all output pipelines that will be changed can we
+ * update the the output configuration. */
+ intel_modeset_update_state(dev, prepare_pipes);
+
+ if (dev_priv->display.modeset_global_resources)
+ dev_priv->display.modeset_global_resources(dev);
+
+ /* Set up the DPLL and any encoders state that needs to adjust or depend
+ * on the DPLL.
+ */
+ for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
+ ret = !intel_crtc_mode_set(&intel_crtc->base,
+ mode, adjusted_mode,
+ x, y, fb);
+ if (!ret)
+ goto done;
+ }
+
+ /* Now enable the clocks, plane, pipe, and connectors that we set up. */
+ for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc)
+ dev_priv->display.crtc_enable(&intel_crtc->base);
+
+ if (modeset_pipes) {
+ /* Store real post-adjustment hardware mode. */
+ crtc->hwmode = *adjusted_mode;
+
+ /* Calculate and store various constants which
+ * are later needed by vblank and swap-completion
+ * timestamping. They are derived from true hwmode.
+ */
+ drm_calc_timestamping_constants(crtc);
+ }
+
+ /* FIXME: add subpixel order */
+done:
+ drm_mode_destroy(dev, adjusted_mode);
+ if (!ret && crtc->enabled) {
+ crtc->hwmode = saved_hwmode;
+ crtc->mode = saved_mode;
+ } else {
+ intel_modeset_check_state(dev);
+ }
+
+ return ret;
+}
+
+#undef for_each_intel_crtc_masked
+
+void
+intel_set_config_free(struct intel_set_config *config)
+{
+ if (!config)
+ return;
+
+ free(config->save_connector_encoders, M_DRM);
+ free(config->save_encoder_crtcs, M_DRM);
+ free(config, M_DRM);
+}
+
+int
+intel_set_config_save_state(struct drm_device *dev,
+ struct intel_set_config *config)
+{
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int count;
+
+ config->save_encoder_crtcs =
+ malloc(dev->mode_config.num_encoder *
+ sizeof(struct drm_crtc *), M_DRM, M_WAITOK | M_ZERO);
+ if (!config->save_encoder_crtcs)
+ return -ENOMEM;
+
+ config->save_connector_encoders =
+ malloc(dev->mode_config.num_connector *
+ sizeof(struct drm_encoder *),M_DRM, M_WAITOK | M_ZERO);
+ if (!config->save_connector_encoders)
+ return -ENOMEM;
+
+ /* Copy data. Note that driver private data is not affected.
+ * Should anything bad happen only the expected state is
+ * restored, not the drivers personal bookkeeping.
+ */
+ count = 0;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ config->save_encoder_crtcs[count++] = encoder->crtc;
+ }
+
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ config->save_connector_encoders[count++] = connector->encoder;
+ }
+
+ return 0;
+}
+
+void
+intel_set_config_restore_state(struct drm_device *dev,
+ struct intel_set_config *config)
+{
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+ int count;
+
+ count = 0;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+ encoder->new_crtc =
+ to_intel_crtc(config->save_encoder_crtcs[count++]);
+ }
+
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
+ connector->new_encoder =
+ to_intel_encoder(config->save_connector_encoders[count++]);
+ }
+}
+
+void
+intel_set_config_compute_mode_changes(struct drm_mode_set *set,
+ struct intel_set_config *config)
+{
+
+ /* We should be able to check here if the fb has the same properties
+ * and then just flip_or_move it */
+ if (set->crtc->fb != set->fb) {
+ /* If we have no fb then treat it as a full mode set */
+ if (set->crtc->fb == NULL) {
+ DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
+ config->mode_changed = true;
+ } else if (set->fb == NULL) {
+ config->mode_changed = true;
+ } else if (set->fb->depth != set->crtc->fb->depth) {
+ config->mode_changed = true;
+ } else if (set->fb->bits_per_pixel !=
+ set->crtc->fb->bits_per_pixel) {
+ config->mode_changed = true;
+ } else
+ config->fb_changed = true;
+ }
+
+ if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
+ config->fb_changed = true;
+
+ if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
+ DRM_DEBUG_KMS("modes are different, full mode set\n");
+ drm_mode_debug_printmodeline(&set->crtc->mode);
+ drm_mode_debug_printmodeline(set->mode);
+ config->mode_changed = true;
+ }
+}
+
+int
+intel_modeset_stage_output_state(struct drm_device *dev,
+ struct drm_mode_set *set,
+ struct intel_set_config *config)
+{
+ struct drm_crtc *new_crtc;
+ struct intel_connector *connector;
+ struct intel_encoder *encoder;
+ int count, ro;
+
+ /* The upper layers ensure that we either disabl a crtc or have a list
+ * of connectors. For paranoia, double-check this. */
+ WARN_ON(!set->fb && (set->num_connectors != 0));
+ WARN_ON(set->fb && (set->num_connectors == 0));
+
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ /* Otherwise traverse passed in connector list and get encoders
+ * for them. */
+ for (ro = 0; ro < set->num_connectors; ro++) {
+ if (set->connectors[ro] == &connector->base) {
+ connector->new_encoder = connector->encoder;
+ break;
+ }
+ }
+
+ /* If we disable the crtc, disable all its connectors. Also, if
+ * the connector is on the changing crtc but not on the new
+ * connector list, disable it. */
+ if ((!set->fb || ro == set->num_connectors) &&
+ connector->base.encoder &&
+ connector->base.encoder->crtc == set->crtc) {
+ connector->new_encoder = NULL;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
+ connector->base.base.id,
+ drm_get_connector_name(&connector->base));
+ }
+
+
+ if (&connector->new_encoder->base != connector->base.encoder) {
+ DRM_DEBUG_KMS("encoder changed, full mode switch\n");
+ config->mode_changed = true;
+ }
+ }
+ /* connector->new_encoder is now updated for all connectors. */
+
+ /* Update crtc of enabled connectors. */
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (!connector->new_encoder)
+ continue;
+
+ new_crtc = connector->new_encoder->base.crtc;
+
+ for (ro = 0; ro < set->num_connectors; ro++) {
+ if (set->connectors[ro] == &connector->base)
+ new_crtc = set->crtc;
+ }
+
+ /* Make sure the new CRTC will work with the encoder */
+ if (!intel_encoder_crtc_ok(&connector->new_encoder->base,
+ new_crtc)) {
+ return -EINVAL;
+ }
+ connector->encoder->new_crtc = to_intel_crtc(new_crtc);
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
+ connector->base.base.id,
+ drm_get_connector_name(&connector->base),
+ new_crtc->base.id);
+ }
+
+ /* Check for any encoders that needs to be disabled. */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->new_encoder == encoder) {
+ WARN_ON(!connector->new_encoder->new_crtc);
+
+ goto next_encoder;
+ }
+ }
+ encoder->new_crtc = NULL;
+next_encoder:
+ /* Only now check for crtc changes so we don't miss encoders
+ * that will be disabled. */
+ if (&encoder->new_crtc->base != encoder->base.crtc) {
+ DRM_DEBUG_KMS("crtc changed, full mode switch\n");
+ config->mode_changed = true;
+ }
+ }
+ /* Now we've also updated encoder->new_crtc for all encoders. */
+
+ return 0;
+}
+
+int
+intel_crtc_set_config(struct drm_mode_set *set)
+{
+ struct drm_device *dev;
+ struct drm_mode_set save_set;
+ struct intel_set_config *config;
+ int ret;
+
+ BUG_ON(!set);
+ BUG_ON(!set->crtc);
+ BUG_ON(!set->crtc->helper_private);
+
+ if (!set->mode)
+ set->fb = NULL;
+
+ /* The fb helper likes to play gross jokes with ->mode_set_config.
+ * Unfortunately the crtc helper doesn't do much at all for this case,
+ * so we have to cope with this madness until the fb helper is fixed up. */
+ if (set->fb && set->num_connectors == 0)
+ return 0;
+
+ if (set->fb) {
+ DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
+ set->crtc->base.id, set->fb->base.id,
+ (int)set->num_connectors, set->x, set->y);
+ } else {
+ DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
+ }
+
+ dev = set->crtc->dev;
+
+ ret = -ENOMEM;
+ config = malloc(sizeof(*config), M_DRM, M_WAITOK | M_ZERO);
+ if (!config)
+ goto out_config;
+
+ ret = intel_set_config_save_state(dev, config);
+ if (ret)
+ goto out_config;
+
+ save_set.crtc = set->crtc;
+ save_set.mode = &set->crtc->mode;
+ save_set.x = set->crtc->x;
+ save_set.y = set->crtc->y;
+ save_set.fb = set->crtc->fb;
+
+ /* Compute whether we need a full modeset, only an fb base update or no
+ * change at all. In the future we might also check whether only the
+ * mode changed, e.g. for LVDS where we only change the panel fitter in
+ * such cases. */
+ intel_set_config_compute_mode_changes(set, config);
+
+ ret = intel_modeset_stage_output_state(dev, set, config);
+ if (ret)
+ goto fail;
+
+ if (config->mode_changed) {
+ if (set->mode) {
+ DRM_DEBUG_KMS("attempting to set mode from"
+ " userspace\n");
+ drm_mode_debug_printmodeline(set->mode);
+ }
+
+ if (!intel_set_mode(set->crtc, set->mode,
+ set->x, set->y, set->fb)) {
+ DRM_ERROR("failed to set mode on [CRTC:%d]\n",
+ set->crtc->base.id);
+ ret = -EINVAL;
+ goto fail;
+ }
+ } else if (config->fb_changed) {
+ ret = intel_pipe_set_base(set->crtc,
+ set->x, set->y, set->fb);
+ }
+
+ intel_set_config_free(config);
+
+ return 0;
+
+fail:
+ intel_set_config_restore_state(dev, config);
+
+ /* Try to restore the config */
+ if (config->mode_changed &&
+ !intel_set_mode(save_set.crtc, save_set.mode,
+ save_set.x, save_set.y, save_set.fb))
+ DRM_ERROR("failed to restore config after modeset failure\n");
+
+out_config:
+ intel_set_config_free(config);
+ return ret;
+}
+
+static const struct drm_crtc_funcs intel_crtc_funcs = {
+ .cursor_set = intel_crtc_cursor_set,
+ .cursor_move = intel_crtc_cursor_move,
+ .gamma_set = intel_crtc_gamma_set,
+ .set_config = intel_crtc_set_config,
+ .destroy = intel_crtc_destroy,
+ .page_flip = intel_crtc_page_flip,
+};
+
+void
+intel_cpu_pll_init(struct drm_device *dev)
+{
+ if (IS_HASWELL(dev))
+ intel_ddi_pll_init(dev);
+}
+
+void
+intel_pch_pll_init(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
+
+ if (dev_priv->num_pch_pll == 0) {
+ DRM_DEBUG_KMS("No PCH PLLs on this hardware, skipping initialisation\n");
+ return;
+ }
+
+ for (i = 0; i < dev_priv->num_pch_pll; i++) {
+ dev_priv->pch_plls[i].pll_reg = _PCH_DPLL(i);
+ dev_priv->pch_plls[i].fp0_reg = _PCH_FP0(i);
+ dev_priv->pch_plls[i].fp1_reg = _PCH_FP1(i);
+ }
+}
+
+void
+intel_crtc_init(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc;
+ int i;
+
+ intel_crtc = malloc(sizeof(struct intel_crtc) +
+ (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
+ M_DRM, M_WAITOK | M_ZERO);
+ if (intel_crtc == NULL)
+ return;
+
+ drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
+
+ drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
+ for (i = 0; i < 256; i++) {
+ intel_crtc->lut_r[i] = i;
+ intel_crtc->lut_g[i] = i;
+ intel_crtc->lut_b[i] = i;
+ }
+
+ /* Swap pipes & planes for FBC on pre-965 */
+ intel_crtc->pipe = pipe;
+ intel_crtc->plane = pipe;
+ intel_crtc->cpu_transcoder = pipe;
+ if (IS_MOBILE(dev) && IS_GEN3(dev)) {
+ DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
+ intel_crtc->plane = !pipe;
+ }
+
+ BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
+ dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
+ dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
+ dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
+
+ intel_crtc->bpp = 24; /* default for pre-Ironlake */
+
+ drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
+}
+
+int
+intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
+ struct drm_mode_object *drmmode_obj;
+ struct intel_crtc *crtc;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
+ DRM_MODE_OBJECT_CRTC);
+
+ if (!drmmode_obj) {
+ DRM_ERROR("no such CRTC id\n");
+ return -EINVAL;
+ }
+
+ crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
+ pipe_from_crtc_id->pipe = crtc->pipe;
+
+ return 0;
+}
+
+int
+intel_encoder_clones(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_encoder *source_encoder;
+ int index_mask = 0;
+ int entry = 0;
+
+ list_for_each_entry(source_encoder,
+ &dev->mode_config.encoder_list, base.head) {
+
+ if (encoder == source_encoder)
+ index_mask |= (1 << entry);
+
+ /* Intel hw has only one MUX where enocoders could be cloned. */
+ if (encoder->cloneable && source_encoder->cloneable)
+ index_mask |= (1 << entry);
+
+ entry++;
+ }
+
+ return index_mask;
+}
+
+bool
+has_edp_a(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ if (!IS_MOBILE(dev))
+ return false;
+
+ if ((I915_READ(DP_A) & DP_DETECTED) == 0)
+ return false;
+
+ if (IS_GEN5(dev) &&
+ (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
+ return false;
+
+ return true;
+}
+
+void
+intel_setup_outputs(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_encoder *encoder;
+ bool dpd_is_edp = false;
+ bool has_lvds;
+
+ has_lvds = intel_lvds_init(dev);
+ if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
+ /* disable the panel fitter on everything but LVDS */
+ I915_WRITE(PFIT_CONTROL, 0);
+ }
+
+ if (!(IS_HASWELL(dev) &&
+ (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)))
+ intel_crt_init(dev);
+
+ if (IS_HASWELL(dev)) {
+ int found;
+
+ /* Haswell uses DDI functions to detect digital outputs */
+ found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
+ /* DDI A only supports eDP */
+ if (found)
+ intel_ddi_init(dev, PORT_A);
+
+ /* DDI B, C and D detection is indicated by the SFUSE_STRAP
+ * register */
+ found = I915_READ(SFUSE_STRAP);
+
+ if (found & SFUSE_STRAP_DDIB_DETECTED)
+ intel_ddi_init(dev, PORT_B);
+ if (found & SFUSE_STRAP_DDIC_DETECTED)
+ intel_ddi_init(dev, PORT_C);
+ if (found & SFUSE_STRAP_DDID_DETECTED)
+ intel_ddi_init(dev, PORT_D);
+ } else if (HAS_PCH_SPLIT(dev)) {
+ int found;
+ dpd_is_edp = intel_dpd_is_edp(dev);
+
+ if (has_edp_a(dev))
+ intel_dp_init(dev, DP_A, PORT_A);
+
+ if (I915_READ(HDMIB) & PORT_DETECTED) {
+ /* PCH SDVOB multiplex with HDMIB */
+ found = intel_sdvo_init(dev, PCH_SDVOB, true);
+ if (!found)
+ intel_hdmi_init(dev, HDMIB, PORT_B);
+ if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
+ intel_dp_init(dev, PCH_DP_B, PORT_B);
+ }
+
+ if (I915_READ(HDMIC) & PORT_DETECTED)
+ intel_hdmi_init(dev, HDMIC, PORT_C);
+
+ if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED)
+ intel_hdmi_init(dev, HDMID, PORT_D);
+
+ if (I915_READ(PCH_DP_C) & DP_DETECTED)
+ intel_dp_init(dev, PCH_DP_C, PORT_C);
+
+ if (I915_READ(PCH_DP_D) & DP_DETECTED)
+ intel_dp_init(dev, PCH_DP_D, PORT_D);
+ } else if (IS_VALLEYVIEW(dev)) {
+ int found;
+
+ /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
+ if (I915_READ(DP_C) & DP_DETECTED)
+ intel_dp_init(dev, DP_C, PORT_C);
+
+ if (I915_READ(SDVOB) & PORT_DETECTED) {
+ /* SDVOB multiplex with HDMIB */
+ found = intel_sdvo_init(dev, SDVOB, true);
+ if (!found)
+ intel_hdmi_init(dev, SDVOB, PORT_B);
+ if (!found && (I915_READ(DP_B) & DP_DETECTED))
+ intel_dp_init(dev, DP_B, PORT_B);
+ }
+
+ if (I915_READ(SDVOC) & PORT_DETECTED)
+ intel_hdmi_init(dev, SDVOC, PORT_C);
+
+ } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
+ bool found = false;
+
+ if (I915_READ(SDVOB) & SDVO_DETECTED) {
+ DRM_DEBUG_KMS("probing SDVOB\n");
+ found = intel_sdvo_init(dev, SDVOB, true);
+ if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
+ DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
+ intel_hdmi_init(dev, SDVOB, PORT_B);
+ }
+
+ if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
+ DRM_DEBUG_KMS("probing DP_B\n");
+ intel_dp_init(dev, DP_B, PORT_B);
+ }
+ }
+
+ /* Before G4X SDVOC doesn't have its own detect register */
+
+ if (I915_READ(SDVOB) & SDVO_DETECTED) {
+ DRM_DEBUG_KMS("probing SDVOC\n");
+ found = intel_sdvo_init(dev, SDVOC, false);
+ }
+
+ if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
+
+ if (SUPPORTS_INTEGRATED_HDMI(dev)) {
+ DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
+ intel_hdmi_init(dev, SDVOC, PORT_C);
+ }
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ DRM_DEBUG_KMS("probing DP_C\n");
+ intel_dp_init(dev, DP_C, PORT_C);
+ }
+ }
+
+ if (SUPPORTS_INTEGRATED_DP(dev) &&
+ (I915_READ(DP_D) & DP_DETECTED)) {
+ DRM_DEBUG_KMS("probing DP_D\n");
+ intel_dp_init(dev, DP_D, PORT_D);
+ }
+ } else if (IS_GEN2(dev))
+ intel_dvo_init(dev);
+
+ if (SUPPORTS_TV(dev))
+ intel_tv_init(dev);
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+ encoder->base.possible_crtcs = encoder->crtc_mask;
+ encoder->base.possible_clones =
+ intel_encoder_clones(encoder);
+ }
+
+ intel_init_pch_refclk(dev);
+
+ drm_helper_move_panel_connectors_to_head(dev);
+}
+
+void
+intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+
+ drm_framebuffer_cleanup(fb);
+ drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
+
+ free(intel_fb, M_DRM);
+}
+
+int
+intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file,
+ unsigned int *handle)
+{
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+
+ drm_gem_object_reference(&obj->base);
+ return drm_handle_create(file, &obj->base, handle);
+}
+
+static const struct drm_framebuffer_funcs intel_fb_funcs = {
+ .destroy = intel_user_framebuffer_destroy,
+ .create_handle = intel_user_framebuffer_create_handle,
+};
+
+int
+intel_framebuffer_init(struct drm_device *dev,
+ struct intel_framebuffer *intel_fb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_i915_gem_object *obj)
+{
+ int ret;
+
+ if (obj->tiling_mode == I915_TILING_Y) {
+ DRM_DEBUG("hardware does not support tiling Y\n");
+ return -EINVAL;
+ }
+
+ if (mode_cmd->pitches[0] & 63) {
+ DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
+ mode_cmd->pitches[0]);
+ return -EINVAL;
+ }
+
+ /* FIXME <= Gen4 stride limits are bit unclear */
+ if (mode_cmd->pitches[0] > 32768) {
+ DRM_DEBUG("pitch (%d) must be at less than 32768\n",
+ mode_cmd->pitches[0]);
+ return -EINVAL;
+ }
+
+ if (obj->tiling_mode != I915_TILING_NONE &&
+ mode_cmd->pitches[0] != obj->stride) {
+ DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
+ mode_cmd->pitches[0], obj->stride);
+ return -EINVAL;
+ }
+
+ /* Reject formats not supported by any plane early. */
+ switch (mode_cmd->pixel_format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ break;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_ARGB1555:
+ if (INTEL_INFO(dev)->gen > 3) {
+ DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
+ return -EINVAL;
+ }
+ break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ if (INTEL_INFO(dev)->gen < 4) {
+ DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
+ return -EINVAL;
+ }
+ break;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_VYUY:
+ if (INTEL_INFO(dev)->gen < 5) {
+ DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
+ return -EINVAL;
+ }
+ break;
+ default:
+ DRM_DEBUG("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format);
+ return -EINVAL;
+ }
+
+ /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
+ if (mode_cmd->offsets[0] != 0)
+ return -EINVAL;
+
+ ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
+ if (ret) {
+ DRM_ERROR("framebuffer init failed %d\n", ret);
+ return ret;
+ }
+
+ drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
+ intel_fb->obj = obj;
+ return 0;
+}
+
+struct drm_framebuffer *
+intel_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *filp,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_i915_gem_object *obj;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
+ mode_cmd->handles[0]));
+ if (&obj->base == NULL)
+ return ERR_PTR(-ENOENT);
+
+ return intel_framebuffer_create(dev, mode_cmd, obj);
+}
+
+static const struct drm_mode_config_funcs intel_mode_funcs = {
+ .fb_create = intel_user_framebuffer_create,
+ .output_poll_changed = intel_fb_output_poll_changed,
+};
+
+/* Set up chip specific display functions */
+void
+intel_init_display(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ /* We always want a DPMS function */
+ if (IS_HASWELL(dev)) {
+ dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
+ dev_priv->display.crtc_enable = haswell_crtc_enable;
+ dev_priv->display.crtc_disable = haswell_crtc_disable;
+ dev_priv->display.off = haswell_crtc_off;
+ dev_priv->display.update_plane = ironlake_update_plane;
+ } else if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
+ dev_priv->display.crtc_enable = ironlake_crtc_enable;
+ dev_priv->display.crtc_disable = ironlake_crtc_disable;
+ dev_priv->display.off = ironlake_crtc_off;
+ dev_priv->display.update_plane = ironlake_update_plane;
+ } else {
+ dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
+ dev_priv->display.crtc_enable = i9xx_crtc_enable;
+ dev_priv->display.crtc_disable = i9xx_crtc_disable;
+ dev_priv->display.off = i9xx_crtc_off;
+ dev_priv->display.update_plane = i9xx_update_plane;
+ }
+
+ /* Returns the core display clock speed */
+ if (IS_VALLEYVIEW(dev))
+ dev_priv->display.get_display_clock_speed =
+ valleyview_get_display_clock_speed;
+ else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
+ dev_priv->display.get_display_clock_speed =
+ i945_get_display_clock_speed;
+ else if (IS_I915G(dev))
+ dev_priv->display.get_display_clock_speed =
+ i915_get_display_clock_speed;
+ else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
+ dev_priv->display.get_display_clock_speed =
+ i9xx_misc_get_display_clock_speed;
+ else if (IS_I915GM(dev))
+ dev_priv->display.get_display_clock_speed =
+ i915gm_get_display_clock_speed;
+ else if (IS_I865G(dev))
+ dev_priv->display.get_display_clock_speed =
+ i865_get_display_clock_speed;
+ else if (IS_I85X(dev))
+ dev_priv->display.get_display_clock_speed =
+ i855_get_display_clock_speed;
+ else /* 852, 830 */
+ dev_priv->display.get_display_clock_speed =
+ i830_get_display_clock_speed;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ if (IS_GEN5(dev)) {
+ dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
+ dev_priv->display.write_eld = ironlake_write_eld;
+ } else if (IS_GEN6(dev)) {
+ dev_priv->display.fdi_link_train = gen6_fdi_link_train;
+ dev_priv->display.write_eld = ironlake_write_eld;
+ } else if (IS_IVYBRIDGE(dev)) {
+ /* FIXME: detect B0+ stepping and use auto training */
+ dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
+ dev_priv->display.write_eld = ironlake_write_eld;
+ dev_priv->display.modeset_global_resources =
+ ivb_modeset_global_resources;
+ } else if (IS_HASWELL(dev)) {
+ dev_priv->display.fdi_link_train = hsw_fdi_link_train;
+ dev_priv->display.write_eld = haswell_write_eld;
+ } else
+ dev_priv->display.update_wm = NULL;
+ } else if (IS_G4X(dev)) {
+ dev_priv->display.write_eld = g4x_write_eld;
+ }
+
+ /* Default just returns -ENODEV to indicate unsupported */
+ dev_priv->display.queue_flip = intel_default_queue_flip;
+
+ switch (INTEL_INFO(dev)->gen) {
+ case 2:
+ dev_priv->display.queue_flip = intel_gen2_queue_flip;
+ break;
+
+ case 3:
+ dev_priv->display.queue_flip = intel_gen3_queue_flip;
+ break;
+
+ case 4:
+ case 5:
+ dev_priv->display.queue_flip = intel_gen4_queue_flip;
+ break;
+
+ case 6:
+ dev_priv->display.queue_flip = intel_gen6_queue_flip;
+ break;
+ case 7:
+ dev_priv->display.queue_flip = intel_gen7_queue_flip;
+ break;
+ }
+}
+
+/*
+ * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
+ * resume, or other times. This quirk makes sure that's the case for
+ * affected systems.
+ */
+void
+quirk_pipea_force(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ dev_priv->quirks |= QUIRK_PIPEA_FORCE;
+ DRM_INFO("applying pipe a force quirk\n");
+}
+
+/*
+ * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
+ */
+void
+quirk_ssc_force_disable(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
+ DRM_INFO("applying lvds SSC disable quirk\n");
+}
+
+/*
+ * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
+ * brightness value
+ */
+void
+quirk_invert_brightness(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
+ DRM_INFO("applying inverted panel brightness quirk\n");
+}
+
+struct intel_quirk {
+ int device;
+ int subsystem_vendor;
+ int subsystem_device;
+ void (*hook)(struct drm_device *dev);
+};
+
+/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
+struct intel_dmi_quirk {
+ void (*hook)(struct drm_device *dev);
+ const struct dmi_system_id (*dmi_id_list)[];
+};
+
+int
+intel_dmi_reverse_brightness(const struct dmi_system_id *id)
+{
+ printf("Backlight polarity reversed on %s\n", id->ident);
+ return 1;
+}
+
+#define PCI_ANY_ID (u_int16_t) (~0U)
+
+static struct intel_quirk intel_quirks[] = {
+ /* HP Mini needs pipe A force quirk (LP: #322104) */
+ { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
+
+ /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
+ { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
+
+ /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
+ { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
+
+ /* 830/845 need to leave pipe A & dpll A up */
+ { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+ { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+
+ /* Lenovo U160 cannot use SSC on LVDS */
+ { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
+
+ /* Sony Vaio Y cannot use SSC on LVDS */
+ { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
+
+ /* Acer Aspire 5734Z must invert backlight brightness */
+ { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
+
+ /* Acer Aspire 4736Z */
+ { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
+};
+
+void
+intel_init_quirks(struct drm_device *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
+ struct intel_quirk *q = &intel_quirks[i];
+
+ if (dev->pci_device == q->device &&
+ (dev->pci_subvendor == q->subsystem_vendor ||
+ q->subsystem_vendor == PCI_ANY_ID) &&
+ (dev->pci_subdevice == q->subsystem_device ||
+ q->subsystem_device == PCI_ANY_ID))
+ q->hook(dev);
+ }
+}
+
+/* Disable the VGA plane that we never use */
+void
+i915_disable_vga(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u8 sr1;
+ u32 vga_reg;
+
+ if (HAS_PCH_SPLIT(dev))
+ vga_reg = CPU_VGACNTRL;
+ else
+ vga_reg = VGACNTRL;
+
+#if 0
+ vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+#endif
+ outb(VGA_SR_INDEX, SR01);
+ sr1 = inb(VGA_SR_DATA);
+ outb(VGA_SR_DATA, sr1 | 1<<5);
+#if 0
+ vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+#endif
+ DELAY(300);
+
+ I915_WRITE(vga_reg, VGA_DISP_DISABLE);
+ POSTING_READ(vga_reg);
+}
+
+void
+intel_modeset_init_hw(struct drm_device *dev)
+{
+ /* We attempt to init the necessary power wells early in the initialization
+ * time, so the subsystems that expect power to be enabled can work.
+ */
+ intel_init_power_wells(dev);
+
+ intel_prepare_ddi(dev);
+
+ intel_init_clock_gating(dev);
+
+ DRM_LOCK();
+ intel_enable_gt_powersave(dev);
+ DRM_UNLOCK();
+}
+
+void
+intel_modeset_init(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ int i, ret;
+
+ drm_mode_config_init(dev);
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+
+ dev->mode_config.preferred_depth = 24;
+ dev->mode_config.prefer_shadow = 1;
+
+ dev->mode_config.funcs = &intel_mode_funcs;
+
+ intel_init_quirks(dev);
+
+ intel_init_pm(dev);
+
+ intel_init_display(dev);
+
+ if (IS_GEN2(dev)) {
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+ } else if (IS_GEN3(dev)) {
+ dev->mode_config.max_width = 4096;
+ dev->mode_config.max_height = 4096;
+ } else {
+ dev->mode_config.max_width = 8192;
+ dev->mode_config.max_height = 8192;
+ }
+#ifdef notyet
+ dev->mode_config.fb_base = dev_priv->mm.gtt_base_addr;
+#else
+ printf("%s todo set fb base\n", __func__);
+#endif
+
+ DRM_DEBUG_KMS("%d display pipe%s available.\n",
+ dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
+
+ for (i = 0; i < dev_priv->num_pipe; i++) {
+ intel_crtc_init(dev, i);
+ ret = intel_plane_init(dev, i);
+ if (ret)
+ DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
+ }
+
+ intel_cpu_pll_init(dev);
+ intel_pch_pll_init(dev);
+
+ /* Just disable it once at startup */
+ i915_disable_vga(dev);
+ intel_setup_outputs(dev);
+}
+
+void
+intel_connector_break_all_links(struct intel_connector *connector)
+{
+ connector->base.dpms = DRM_MODE_DPMS_OFF;
+ connector->base.encoder = NULL;
+ connector->encoder->connectors_active = false;
+ connector->encoder->base.crtc = NULL;
+}
+
+void
+intel_enable_pipe_a(struct drm_device *dev)
+{
+ struct intel_connector *connector;
+ struct drm_connector *crt = NULL;
+ struct intel_load_detect_pipe load_detect_temp;
+
+ /* We can't just switch on the pipe A, we need to set things up with a
+ * proper mode and output configuration. As a gross hack, enable pipe A
+ * by enabling the load detect pipe once. */
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
+ crt = &connector->base;
+ break;
+ }
+ }
+
+ if (!crt)
+ return;
+
+ if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp))
+ intel_release_load_detect_pipe(crt, &load_detect_temp);
+
+
+}
+
+bool
+intel_check_plane_mapping(struct intel_crtc *crtc)
+{
+ struct inteldrm_softc *dev_priv = crtc->base.dev->dev_private;
+ u32 reg, val;
+
+ if (dev_priv->num_pipe == 1)
+ return true;
+
+ reg = DSPCNTR(!crtc->plane);
+ val = I915_READ(reg);
+
+ if ((val & DISPLAY_PLANE_ENABLE) &&
+ (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
+ return false;
+
+ return true;
+}
+
+void
+intel_sanitize_crtc(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 reg;
+
+ /* Clear any frame start delays used for debugging left by the BIOS */
+ reg = PIPECONF(crtc->cpu_transcoder);
+ I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+
+ /* We need to sanitize the plane -> pipe mapping first because this will
+ * disable the crtc (and hence change the state) if it is wrong. Note
+ * that gen4+ has a fixed plane -> pipe mapping. */
+ if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
+ struct intel_connector *connector;
+ bool plane;
+
+ DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
+ crtc->base.base.id);
+
+ /* Pipe has the wrong plane attached and the plane is active.
+ * Temporarily change the plane mapping and disable everything
+ * ... */
+ plane = crtc->plane;
+ crtc->plane = !plane;
+ dev_priv->display.crtc_disable(&crtc->base);
+ crtc->plane = plane;
+
+ /* ... and break all links. */
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->encoder->base.crtc != &crtc->base)
+ continue;
+
+ intel_connector_break_all_links(connector);
+ }
+
+ WARN_ON(crtc->active);
+ crtc->base.enabled = false;
+ }
+
+ if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
+ crtc->pipe == PIPE_A && !crtc->active) {
+ /* BIOS forgot to enable pipe A, this mostly happens after
+ * resume. Force-enable the pipe to fix this, the update_dpms
+ * call below we restore the pipe to the right state, but leave
+ * the required bits on. */
+ intel_enable_pipe_a(dev);
+ }
+
+ /* Adjust the state of the output pipe according to whether we
+ * have active connectors/encoders. */
+ intel_crtc_update_dpms(&crtc->base);
+
+ if (crtc->active != crtc->base.enabled) {
+ struct intel_encoder *encoder;
+
+ /* This can happen either due to bugs in the get_hw_state
+ * functions or because the pipe is force-enabled due to the
+ * pipe A quirk. */
+ DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
+ crtc->base.base.id,
+ crtc->base.enabled ? "enabled" : "disabled",
+ crtc->active ? "enabled" : "disabled");
+
+ crtc->base.enabled = crtc->active;
+
+ /* Because we only establish the connector -> encoder ->
+ * crtc links if something is active, this means the
+ * crtc is now deactivated. Break the links. connector
+ * -> encoder links are only establish when things are
+ * actually up, hence no need to break them. */
+ WARN_ON(crtc->active);
+
+ for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
+ WARN_ON(encoder->connectors_active);
+ encoder->base.crtc = NULL;
+ }
+ }
+}
+
+void
+intel_sanitize_encoder(struct intel_encoder *encoder)
+{
+ struct intel_connector *connector;
+ struct drm_device *dev = encoder->base.dev;
+
+ /* We need to check both for a crtc link (meaning that the
+ * encoder is active and trying to read from a pipe) and the
+ * pipe itself being active. */
+ bool has_active_crtc = encoder->base.crtc &&
+ to_intel_crtc(encoder->base.crtc)->active;
+
+ if (encoder->connectors_active && !has_active_crtc) {
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
+ encoder->base.base.id,
+ drm_get_encoder_name(&encoder->base));
+
+ /* Connector is active, but has no active pipe. This is
+ * fallout from our resume register restoring. Disable
+ * the encoder manually again. */
+ if (encoder->base.crtc) {
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
+ encoder->base.base.id,
+ drm_get_encoder_name(&encoder->base));
+ encoder->disable(encoder);
+ }
+
+ /* Inconsistent output/port/pipe state happens presumably due to
+ * a bug in one of the get_hw_state functions. Or someplace else
+ * in our code, like the register restore mess on resume. Clamp
+ * things to off as a safer default. */
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->encoder != encoder)
+ continue;
+
+ intel_connector_break_all_links(connector);
+ }
+ }
+ /* Enabled encoders without active connectors will be fixed in
+ * the crtc fixup. */
+}
+
+void
+i915_redisable_vga(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 vga_reg;
+
+ if (HAS_PCH_SPLIT(dev))
+ vga_reg = CPU_VGACNTRL;
+ else
+ vga_reg = VGACNTRL;
+
+ if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
+ DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
+ I915_WRITE(vga_reg, VGA_DISP_DISABLE);
+ POSTING_READ(vga_reg);
+ }
+}
+
+/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
+ * and i915 state tracking structures. */
+void
+intel_modeset_setup_hw_state(struct drm_device *dev,
+ bool force_restore)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ enum pipe pipe;
+ u32 tmp;
+ struct intel_crtc *crtc;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+
+ if (IS_HASWELL(dev)) {
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
+
+ if (tmp & TRANS_DDI_FUNC_ENABLE) {
+ switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+ case TRANS_DDI_EDP_INPUT_A_ON:
+ case TRANS_DDI_EDP_INPUT_A_ONOFF:
+ pipe = PIPE_A;
+ break;
+ case TRANS_DDI_EDP_INPUT_B_ONOFF:
+ pipe = PIPE_B;
+ break;
+ case TRANS_DDI_EDP_INPUT_C_ONOFF:
+ pipe = PIPE_C;
+ break;
+ }
+
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ crtc->cpu_transcoder = TRANSCODER_EDP;
+
+ DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n",
+ pipe_name(pipe));
+ }
+ }
+
+ for_each_pipe(pipe) {
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+
+ tmp = I915_READ(PIPECONF(crtc->cpu_transcoder));
+ if (tmp & PIPECONF_ENABLE)
+ crtc->active = true;
+ else
+ crtc->active = false;
+
+ crtc->base.enabled = crtc->active;
+
+ DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
+ crtc->base.base.id,
+ crtc->active ? "enabled" : "disabled");
+ }
+
+ if (IS_HASWELL(dev))
+ intel_ddi_setup_hw_pll_state(dev);
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ pipe = 0;
+
+ if (encoder->get_hw_state(encoder, &pipe)) {
+ encoder->base.crtc =
+ dev_priv->pipe_to_crtc_mapping[pipe];
+ } else {
+ encoder->base.crtc = NULL;
+ }
+
+ encoder->connectors_active = false;
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n",
+ encoder->base.base.id,
+ drm_get_encoder_name(&encoder->base),
+ encoder->base.crtc ? "enabled" : "disabled",
+ pipe);
+ }
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->get_hw_state(connector)) {
+ connector->base.dpms = DRM_MODE_DPMS_ON;
+ connector->encoder->connectors_active = true;
+ connector->base.encoder = &connector->encoder->base;
+ } else {
+ connector->base.dpms = DRM_MODE_DPMS_OFF;
+ connector->base.encoder = NULL;
+ }
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
+ connector->base.base.id,
+ drm_get_connector_name(&connector->base),
+ connector->base.encoder ? "enabled" : "disabled");
+ }
+
+ /* HW state is read out, now we need to sanitize this mess. */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ intel_sanitize_encoder(encoder);
+ }
+
+ for_each_pipe(pipe) {
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ intel_sanitize_crtc(crtc);
+ }
+
+ if (force_restore) {
+ for_each_pipe(pipe) {
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ intel_set_mode(&crtc->base, &crtc->base.mode,
+ crtc->base.x, crtc->base.y, crtc->base.fb);
+ }
+
+ i915_redisable_vga(dev);
+ } else {
+ intel_modeset_update_staged_output_state(dev);
+ }
+
+ intel_modeset_check_state(dev);
+
+ drm_mode_config_reset(dev);
+}
+
+void
+intel_modeset_gem_init(struct drm_device *dev)
+{
+ intel_modeset_init_hw(dev);
+
+ intel_setup_overlay(dev);
+
+ intel_modeset_setup_hw_state(dev, false);
+}
+
+void
+intel_modeset_cleanup(struct drm_device *dev)
+{
+// struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ struct intel_crtc *intel_crtc;
+
+ drm_kms_helper_poll_fini(dev);
+ DRM_LOCK();
+
+#ifdef notyet
+ intel_unregister_dsm_handler();
+#endif
+
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ /* Skip inactive CRTCs */
+ if (!crtc->fb)
+ continue;
+
+ intel_crtc = to_intel_crtc(crtc);
+ intel_increase_pllclock(crtc);
+ }
+
+ intel_disable_fbc(dev);
+
+ intel_disable_gt_powersave(dev);
+
+ ironlake_teardown_rc6(dev);
+
+ if (IS_VALLEYVIEW(dev))
+ vlv_init_dpio(dev);
+
+ DRM_UNLOCK();
+
+ /* Disable the irq before mode object teardown, for the irq might
+ * enqueue unpin/hotplug work. */
+ drm_irq_uninstall(dev);
+#ifdef notyet
+ cancel_work_sync(&dev_priv->hotplug_work);
+ cancel_work_sync(&dev_priv->rps.work);
+#endif
+
+ /* flush any delayed tasks or pending work */
+#ifdef notyet
+ flush_scheduled_work();
+#endif
+
+ drm_mode_config_cleanup(dev);
+}
+
+/*
+ * Return which encoder is currently attached for connector.
+ */
+struct drm_encoder *
+intel_best_encoder(struct drm_connector *connector)
+{
+ return &intel_attached_encoder(connector)->base;
+}
+
+void
+intel_connector_attach_encoder(struct intel_connector *connector,
+ struct intel_encoder *encoder)
+{
+ connector->encoder = encoder;
+ drm_mode_connector_attach_encoder(&connector->base,
+ &encoder->base);
+}
+
+/*
+ * set vga decode state - true == enable VGA decode
+ */
+int
+intel_modeset_vga_set_state(struct drm_device *dev, bool state)
+{
+ printf("%s stub\n", __func__);
+ return EINVAL;
+#ifdef notyet
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u16 gmch_ctrl;
+
+ pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
+ if (state)
+ gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
+ else
+ gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
+ pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
+ return 0;
+#endif
+}
diff --git a/sys/dev/pci/drm/i915/intel_dp.c b/sys/dev/pci/drm/i915/intel_dp.c
new file mode 100644
index 00000000000..71929c8b2b6
--- /dev/null
+++ b/sys/dev/pci/drm/i915/intel_dp.c
@@ -0,0 +1,3085 @@
+/* $OpenBSD: intel_dp.c,v 1.1 2013/03/18 12:36:52 jsg Exp $ */
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Keith Packard <keithp@keithp.com>
+ *
+ */
+
+#include <dev/pci/drm/drmP.h>
+#include <dev/pci/drm/drm_crtc.h>
+#include <dev/pci/drm/drm_crtc_helper.h>
+#include <dev/pci/drm/drm_edid.h>
+#include "intel_drv.h"
+#include <dev/pci/drm/i915_drm.h>
+#include "i915_drv.h"
+
+#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
+
+struct intel_dp_m_n {
+ uint32_t tu;
+ uint32_t gmch_m;
+ uint32_t gmch_n;
+ uint32_t link_m;
+ uint32_t link_n;
+};
+
+bool is_edp(struct intel_dp *);
+bool is_pch_edp(struct intel_dp *);
+bool is_cpu_edp(struct intel_dp *);
+struct intel_dp *enc_to_intel_dp(struct drm_encoder *);
+struct intel_dp *intel_attached_dp(struct drm_connector *);
+int intel_dp_max_lane_count(struct intel_dp *);
+int intel_dp_max_link_bw(struct intel_dp *);
+int intel_dp_link_clock(uint8_t);
+int intel_dp_link_required(int, int);
+int intel_dp_max_data_rate(int, int);
+bool intel_dp_adjust_dithering(struct intel_dp *,
+ struct drm_display_mode *, bool);
+int intel_dp_mode_valid(struct drm_connector *, struct drm_display_mode *);
+uint32_t pack_aux(uint8_t *, int);
+void unpack_aux(uint32_t, uint8_t *, int);
+int intel_hrawclk(struct drm_device *);
+bool ironlake_edp_have_panel_power(struct intel_dp *);
+bool ironlake_edp_have_panel_vdd(struct intel_dp *);
+void intel_dp_check_edp(struct intel_dp *);
+int intel_dp_aux_ch(struct intel_dp *, uint8_t *, int, uint8_t *, int);
+int intel_dp_aux_native_write(struct intel_dp *, uint16_t, uint8_t *,
+ int);
+int intel_dp_aux_native_write_1(struct intel_dp *, uint16_t, uint8_t);
+int intel_dp_aux_native_read(struct intel_dp *, uint16_t, uint8_t *, int);
+int intel_dp_i2c_init(struct intel_dp *, struct intel_connector *,
+ const char *);
+bool intel_dp_mode_fixup(struct drm_encoder *, const struct drm_display_mode *,
+ struct drm_display_mode *);
+void intel_reduce_ratio(uint32_t *, uint32_t *);
+void intel_dp_compute_m_n(int, int, int, int, struct intel_dp_m_n *);
+void intel_dp_set_m_n(struct drm_crtc *, struct drm_display_mode *,
+ struct drm_display_mode *);
+void intel_dp_mode_set(struct drm_encoder *, struct drm_display_mode *,
+ struct drm_display_mode *);
+void ironlake_wait_panel_status(struct intel_dp *, u32, u32);
+void ironlake_wait_panel_on(struct intel_dp *);
+void ironlake_wait_panel_off(struct intel_dp *);
+void ironlake_wait_panel_power_cycle(struct intel_dp *);
+u32 ironlake_get_pp_control(struct inteldrm_softc *);
+void ironlake_edp_panel_vdd_on(struct intel_dp *);
+void ironlake_panel_vdd_off_sync(struct intel_dp *);
+void ironlake_edp_panel_vdd_off(struct intel_dp *, bool);
+void ironlake_edp_panel_on(struct intel_dp *);
+void ironlake_edp_panel_off(struct intel_dp *);
+void ironlake_edp_backlight_on(struct intel_dp *);
+void ironlake_edp_backlight_off(struct intel_dp *);
+void ironlake_edp_pll_on(struct intel_dp *);
+void ironlake_edp_pll_off(struct intel_dp *);
+void intel_dp_sink_dpms(struct intel_dp *, int);
+void intel_dp_prepare(struct drm_encoder *);
+void intel_dp_commit(struct drm_encoder *);
+void intel_dp_start_link_train(struct intel_dp *);
+void intel_dp_complete_link_train(struct intel_dp *);
+void intel_dp_dpms(struct drm_encoder *, int);
+bool intel_dp_aux_native_read_retry(struct intel_dp *, uint16_t, uint8_t *,
+ int);
+bool intel_dp_get_link_status(struct intel_dp *,
+ uint8_t link_status[DP_LINK_STATUS_SIZE]);
+uint8_t intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE], int);
+uint8_t intel_get_adjust_request_voltage(uint8_t adjust_request[2], int);
+uint8_t intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2], int);
+uint8_t intel_dp_voltage_max(struct intel_dp *);
+uint8_t intel_dp_pre_emphasis_max(struct intel_dp *, uint8_t);
+void intel_get_adjust_train(struct intel_dp *,
+ uint8_t link_status[DP_LINK_STATUS_SIZE]);
+uint32_t intel_dp_signal_levels(uint8_t);
+uint32_t intel_gen6_edp_signal_levels(uint8_t);
+uint32_t intel_gen7_edp_signal_levels(uint8_t);
+uint8_t intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], int);
+bool intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int);
+bool intel_channel_eq_ok(struct intel_dp *,
+ uint8_t link_status[DP_LINK_STATUS_SIZE]);
+bool intel_dp_set_link_train(struct intel_dp *, uint32_t, uint8_t);
+void intel_dp_start_link_train(struct intel_dp *);
+void intel_dp_complete_link_train(struct intel_dp *);
+void intel_dp_link_down(struct intel_dp *);
+bool intel_dp_get_dpcd(struct intel_dp *);
+bool intel_dp_get_sink_irq(struct intel_dp *, u8 *);
+void intel_dp_handle_test_request(struct intel_dp *);
+void intel_dp_check_link_status(struct intel_dp *);
+enum drm_connector_status intel_dp_detect_dpcd(struct intel_dp *);
+enum drm_connector_status ironlake_dp_detect(struct intel_dp *);
+enum drm_connector_status g4x_dp_detect(struct intel_dp *);
+struct edid *intel_dp_get_edid(struct drm_connector *,
+ struct i2c_controller *);
+int intel_dp_get_edid_modes(struct drm_connector *,
+ struct i2c_controller *);
+enum drm_connector_status intel_dp_detect(struct drm_connector *, bool);
+int intel_dp_get_modes(struct drm_connector *);
+bool intel_dp_detect_audio(struct drm_connector *);
+int intel_dp_set_property(struct drm_connector *, struct drm_property *,
+ uint64_t);
+void intel_dp_destroy(struct drm_connector *);
+void intel_dp_encoder_destroy(struct drm_encoder *);
+void intel_dp_hot_plug(struct intel_encoder *);
+void intel_dp_add_properties(struct intel_dp *, struct drm_connector *);
+struct drm_device *intel_dp_to_dev(struct intel_dp *);
+bool intel_dp_get_hw_state(struct intel_encoder *, enum pipe *);
+void intel_disable_dp(struct intel_encoder *);
+void intel_post_disable_dp(struct intel_encoder *);
+void intel_enable_dp(struct intel_encoder *);
+void intel_pre_enable_dp(struct intel_encoder *);
+uint32_t intel_dp_signal_levels_hsw(uint8_t);
+void intel_dp_probe_oui(struct intel_dp *);
+void intel_dp_init_panel_power_sequencer(struct drm_device *,
+ struct intel_dp *, struct edp_power_seq *);
+void intel_dp_init_panel_power_sequencer_registers(struct drm_device *,
+ struct intel_dp *, struct edp_power_seq *);
+void ironlake_panel_vdd_tick(void *);
+void ironlake_panel_vdd_work(void *, void *);
+int intel_dp_i2c_aux_ch(struct i2c_controller *, int, uint8_t, uint8_t *);
+
+/**
+ * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
+ * @intel_dp: DP struct
+ *
+ * If a CPU or PCH DP output is attached to an eDP panel, this function
+ * will return true, and false otherwise.
+ */
+bool
+is_edp(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+
+ return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
+}
+
+/**
+ * is_pch_edp - is the port on the PCH and attached to an eDP panel?
+ * @intel_dp: DP struct
+ *
+ * Returns true if the given DP struct corresponds to a PCH DP port attached
+ * to an eDP panel, false otherwise. Helpful for determining whether we
+ * may need FDI resources for a given DP output or not.
+ */
+bool
+is_pch_edp(struct intel_dp *intel_dp)
+{
+ return intel_dp->is_pch_edp;
+}
+
+/**
+ * is_cpu_edp - is the port on the CPU and attached to an eDP panel?
+ * @intel_dp: DP struct
+ *
+ * Returns true if the given DP struct corresponds to a CPU eDP port.
+ */
+bool
+is_cpu_edp(struct intel_dp *intel_dp)
+{
+ return is_edp(intel_dp) && !is_pch_edp(intel_dp);
+}
+
+struct drm_device *
+intel_dp_to_dev(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+
+ return intel_dig_port->base.base.dev;
+}
+
+struct intel_dp *
+intel_attached_dp(struct drm_connector *connector)
+{
+ return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+}
+
+/**
+ * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP?
+ * @encoder: DRM encoder
+ *
+ * Return true if @encoder corresponds to a PCH attached eDP panel. Needed
+ * by intel_display.c.
+ */
+bool
+intel_encoder_is_pch_edp(struct drm_encoder *encoder)
+{
+ struct intel_dp *intel_dp;
+
+ if (!encoder)
+ return false;
+
+ intel_dp = enc_to_intel_dp(encoder);
+
+ return is_pch_edp(intel_dp);
+}
+
+void
+intel_edp_link_config(struct intel_encoder *intel_encoder,
+ int *lane_num, int *link_bw)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+
+ *lane_num = intel_dp->lane_count;
+ *link_bw = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
+}
+
+int
+intel_edp_target_clock(struct intel_encoder *intel_encoder,
+ struct drm_display_mode *mode)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+
+ if (intel_connector->panel.fixed_mode)
+ return intel_connector->panel.fixed_mode->clock;
+ else
+ return mode->clock;
+}
+
+int
+intel_dp_max_link_bw(struct intel_dp *intel_dp)
+{
+ int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
+
+ switch (max_link_bw) {
+ case DP_LINK_BW_1_62:
+ case DP_LINK_BW_2_7:
+ break;
+ default:
+ max_link_bw = DP_LINK_BW_1_62;
+ break;
+ }
+ return max_link_bw;
+}
+
+int
+intel_dp_link_clock(uint8_t link_bw)
+{
+ if (link_bw == DP_LINK_BW_2_7)
+ return 270000;
+ else
+ return 162000;
+}
+
+/*
+ * The units on the numbers in the next two are... bizarre. Examples will
+ * make it clearer; this one parallels an example in the eDP spec.
+ *
+ * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
+ *
+ * 270000 * 1 * 8 / 10 == 216000
+ *
+ * The actual data capacity of that configuration is 2.16Gbit/s, so the
+ * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
+ * or equivalently, kilopixels per second - so for 1680x1050R it'd be
+ * 119000. At 18bpp that's 2142000 kilobits per second.
+ *
+ * Thus the strange-looking division by 10 in intel_dp_link_required, to
+ * get the result in decakilobits instead of kilobits.
+ */
+
+int
+intel_dp_link_required(int pixel_clock, int bpp)
+{
+ return (pixel_clock * bpp + 9) / 10;
+}
+
+int
+intel_dp_max_data_rate(int max_link_clock, int max_lanes)
+{
+ return (max_link_clock * max_lanes * 8) / 10;
+}
+
+bool
+intel_dp_adjust_dithering(struct intel_dp *intel_dp,
+ struct drm_display_mode *mode,
+ bool adjust_mode)
+{
+ int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
+ int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
+ int max_rate, mode_rate;
+
+ mode_rate = intel_dp_link_required(mode->clock, 24);
+ max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+
+ if (mode_rate > max_rate) {
+ mode_rate = intel_dp_link_required(mode->clock, 18);
+ if (mode_rate > max_rate)
+ return false;
+
+ if (adjust_mode)
+ mode->private_flags
+ |= INTEL_MODE_DP_FORCE_6BPC;
+
+ return true;
+ }
+
+ return true;
+}
+
+int
+intel_dp_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+
+ if (is_edp(intel_dp) && fixed_mode) {
+ if (mode->hdisplay > fixed_mode->hdisplay)
+ return MODE_PANEL;
+
+ if (mode->vdisplay > fixed_mode->vdisplay)
+ return MODE_PANEL;
+ }
+
+ if (!intel_dp_adjust_dithering(intel_dp, mode, false))
+ return MODE_CLOCK_HIGH;
+
+ if (mode->clock < 10000)
+ return MODE_CLOCK_LOW;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ return MODE_H_ILLEGAL;
+
+ return MODE_OK;
+}
+
+uint32_t
+pack_aux(uint8_t *src, int src_bytes)
+{
+ int i;
+ uint32_t v = 0;
+
+ if (src_bytes > 4)
+ src_bytes = 4;
+ for (i = 0; i < src_bytes; i++)
+ v |= ((uint32_t) src[i]) << ((3-i) * 8);
+ return v;
+}
+
+void
+unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
+{
+ int i;
+ if (dst_bytes > 4)
+ dst_bytes = 4;
+ for (i = 0; i < dst_bytes; i++)
+ dst[i] = src >> ((3-i) * 8);
+}
+
+/* hrawclock is 1/4 the FSB frequency */
+int
+intel_hrawclk(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ uint32_t clkcfg;
+
+ /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
+ if (IS_VALLEYVIEW(dev))
+ return 200;
+
+ clkcfg = I915_READ(CLKCFG);
+ switch (clkcfg & CLKCFG_FSB_MASK) {
+ case CLKCFG_FSB_400:
+ return 100;
+ case CLKCFG_FSB_533:
+ return 133;
+ case CLKCFG_FSB_667:
+ return 166;
+ case CLKCFG_FSB_800:
+ return 200;
+ case CLKCFG_FSB_1067:
+ return 266;
+ case CLKCFG_FSB_1333:
+ return 333;
+ /* these two are just a guess; one of them might be right */
+ case CLKCFG_FSB_1600:
+ case CLKCFG_FSB_1600_ALT:
+ return 400;
+ default:
+ return 133;
+ }
+}
+
+bool
+ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0;
+}
+
+bool
+ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0;
+}
+
+void
+intel_dp_check_edp(struct intel_dp *intel_dp)
+{
+#ifdef DRMDEBUG
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+#endif
+
+ if (!is_edp(intel_dp))
+ return;
+ if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
+ WARN(1, "eDP powered off while attempting aux channel communication.\n");
+ DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
+ I915_READ(PCH_PP_STATUS),
+ I915_READ(PCH_PP_CONTROL));
+ }
+}
+
+int
+intel_dp_aux_ch(struct intel_dp *intel_dp,
+ uint8_t *send, int send_bytes,
+ uint8_t *recv, int recv_size)
+{
+ uint32_t output_reg = intel_dp->output_reg;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ uint32_t ch_ctl = output_reg + 0x10;
+ uint32_t ch_data = ch_ctl + 4;
+ int i;
+ int recv_bytes;
+ uint32_t status;
+ uint32_t aux_clock_divider;
+ int try, precharge;
+
+ if (IS_HASWELL(dev)) {
+ switch (intel_dig_port->port) {
+ case PORT_A:
+ ch_ctl = DPA_AUX_CH_CTL;
+ ch_data = DPA_AUX_CH_DATA1;
+ break;
+ case PORT_B:
+ ch_ctl = PCH_DPB_AUX_CH_CTL;
+ ch_data = PCH_DPB_AUX_CH_DATA1;
+ break;
+ case PORT_C:
+ ch_ctl = PCH_DPC_AUX_CH_CTL;
+ ch_data = PCH_DPC_AUX_CH_DATA1;
+ break;
+ case PORT_D:
+ ch_ctl = PCH_DPD_AUX_CH_CTL;
+ ch_data = PCH_DPD_AUX_CH_DATA1;
+ break;
+ default:
+ BUG();
+ }
+ }
+
+ intel_dp_check_edp(intel_dp);
+ /* The clock divider is based off the hrawclk,
+ * and would like to run at 2MHz. So, take the
+ * hrawclk value and divide by 2 and use that
+ *
+ * Note that PCH attached eDP panels should use a 125MHz input
+ * clock divider.
+ */
+ if (is_cpu_edp(intel_dp)) {
+ if (IS_HASWELL(dev))
+ aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1;
+ else if (IS_VALLEYVIEW(dev))
+ aux_clock_divider = 100;
+ else if (IS_GEN6(dev) || IS_GEN7(dev))
+ aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
+ else
+ aux_clock_divider = 225; /* eDP input clock at 450Mhz */
+ } else if (HAS_PCH_SPLIT(dev))
+ aux_clock_divider = howmany(intel_pch_rawclk(dev), 2);
+ else
+ aux_clock_divider = intel_hrawclk(dev) / 2;
+
+ if (IS_GEN6(dev))
+ precharge = 3;
+ else
+ precharge = 5;
+
+ /* Try to wait for any previous AUX channel activity */
+ for (try = 0; try < 3; try++) {
+ status = I915_READ(ch_ctl);
+ if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ break;
+ drm_msleep(1, "915ach");
+ }
+
+ if (try == 3) {
+ WARN(1, "dp_aux_ch not started status 0x%08x\n",
+ I915_READ(ch_ctl));
+ return -EBUSY;
+ }
+
+ /* Must try at least 3 times according to DP spec */
+ for (try = 0; try < 5; try++) {
+ /* Load the send data into the aux channel data registers */
+ for (i = 0; i < send_bytes; i += 4)
+ I915_WRITE(ch_data + i,
+ pack_aux(send + i, send_bytes - i));
+
+ /* Send the command and wait for it to complete */
+ I915_WRITE(ch_ctl,
+ DP_AUX_CH_CTL_SEND_BUSY |
+ DP_AUX_CH_CTL_TIME_OUT_400us |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
+ for (;;) {
+ status = I915_READ(ch_ctl);
+ if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ break;
+ DELAY(100);
+ }
+
+ /* Clear done status and any errors */
+ I915_WRITE(ch_ctl,
+ status |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
+
+ if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR))
+ continue;
+ if (status & DP_AUX_CH_CTL_DONE)
+ break;
+ }
+
+ if ((status & DP_AUX_CH_CTL_DONE) == 0) {
+ DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
+ return -EBUSY;
+ }
+
+ /* Check for timeout or receive error.
+ * Timeouts occur when the sink is not connected
+ */
+ if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
+ DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
+ return -EIO;
+ }
+
+ /* Timeouts occur when the device isn't connected, so they're
+ * "normal" -- don't fill the kernel log with these */
+ if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
+ DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
+ return -ETIMEDOUT;
+ }
+
+ /* Unload any bytes sent back from the other side */
+ recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
+ DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
+ if (recv_bytes > recv_size)
+ recv_bytes = recv_size;
+
+ for (i = 0; i < recv_bytes; i += 4)
+ unpack_aux(I915_READ(ch_data + i),
+ recv + i, recv_bytes - i);
+
+ return recv_bytes;
+}
+
+/* Write data to the aux channel in native mode */
+int
+intel_dp_aux_native_write(struct intel_dp *intel_dp,
+ uint16_t address, uint8_t *send, int send_bytes)
+{
+ int ret;
+ uint8_t msg[20];
+ int msg_bytes;
+ uint8_t ack;
+
+ intel_dp_check_edp(intel_dp);
+ if (send_bytes > 16)
+ return -1;
+ msg[0] = AUX_NATIVE_WRITE << 4;
+ msg[1] = address >> 8;
+ msg[2] = address & 0xff;
+ msg[3] = send_bytes - 1;
+ memcpy(&msg[4], send, send_bytes);
+ msg_bytes = send_bytes + 4;
+ for (;;) {
+ ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
+ if (ret < 0)
+ return ret;
+ if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+ break;
+ else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+ DELAY(100);
+ else
+ return -EIO;
+ }
+ return send_bytes;
+}
+
+/* Write a single byte to the aux channel in native mode */
+int
+intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
+ uint16_t address, uint8_t byte)
+{
+ return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
+}
+
+/* read bytes from a native aux channel */
+int
+intel_dp_aux_native_read(struct intel_dp *intel_dp,
+ uint16_t address, uint8_t *recv, int recv_bytes)
+{
+ uint8_t msg[4];
+ int msg_bytes;
+ uint8_t reply[20];
+ int reply_bytes;
+ uint8_t ack;
+ int ret;
+
+ intel_dp_check_edp(intel_dp);
+ msg[0] = AUX_NATIVE_READ << 4;
+ msg[1] = address >> 8;
+ msg[2] = address & 0xff;
+ msg[3] = recv_bytes - 1;
+
+ msg_bytes = 4;
+ reply_bytes = recv_bytes + 1;
+
+ for (;;) {
+ ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
+ reply, reply_bytes);
+ if (ret == 0)
+ return -EIO;
+ if (ret < 0)
+ return ret;
+ ack = reply[0];
+ if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
+ memcpy(recv, reply + 1, ret - 1);
+ return ret - 1;
+ }
+ else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+ DELAY(100);
+ else
+ return -EIO;
+ }
+}
+
+int
+intel_dp_i2c_aux_ch(struct i2c_controller *adapter, int mode,
+ uint8_t write_byte, uint8_t *read_byte)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->ic_cookie;
+ struct intel_dp *intel_dp = container_of(adapter,
+ struct intel_dp,
+ adapter);
+ uint16_t address = algo_data->address;
+ uint8_t msg[5];
+ uint8_t reply[2];
+ unsigned retry;
+ int msg_bytes;
+ int reply_bytes;
+ int ret;
+
+ intel_dp_check_edp(intel_dp);
+ /* Set up the command byte */
+ if (mode & MODE_I2C_READ)
+ msg[0] = AUX_I2C_READ << 4;
+ else
+ msg[0] = AUX_I2C_WRITE << 4;
+
+ if (!(mode & MODE_I2C_STOP))
+ msg[0] |= AUX_I2C_MOT << 4;
+
+ msg[1] = address >> 8;
+ msg[2] = address;
+
+ switch (mode) {
+ case MODE_I2C_WRITE:
+ msg[3] = 0;
+ msg[4] = write_byte;
+ msg_bytes = 5;
+ reply_bytes = 1;
+ break;
+ case MODE_I2C_READ:
+ msg[3] = 0;
+ msg_bytes = 4;
+ reply_bytes = 2;
+ break;
+ default:
+ msg_bytes = 3;
+ reply_bytes = 1;
+ break;
+ }
+
+ for (retry = 0; retry < 5; retry++) {
+ ret = intel_dp_aux_ch(intel_dp,
+ msg, msg_bytes,
+ reply, reply_bytes);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
+ return ret;
+ }
+
+ switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
+ case AUX_NATIVE_REPLY_ACK:
+ /* I2C-over-AUX Reply field is only valid
+ * when paired with AUX ACK.
+ */
+ break;
+ case AUX_NATIVE_REPLY_NACK:
+ DRM_DEBUG_KMS("aux_ch native nack\n");
+ return EIO;
+ case AUX_NATIVE_REPLY_DEFER:
+ DELAY(100);
+ continue;
+ default:
+ DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
+ reply[0]);
+ return EIO;
+ }
+
+ switch (reply[0] & AUX_I2C_REPLY_MASK) {
+ case AUX_I2C_REPLY_ACK:
+ if (mode == MODE_I2C_READ) {
+ *read_byte = reply[1];
+ }
+ return reply_bytes - 1;
+ case AUX_I2C_REPLY_NACK:
+ DRM_DEBUG_KMS("aux_i2c nack\n");
+ return EIO;
+ case AUX_I2C_REPLY_DEFER:
+ DRM_DEBUG_KMS("aux_i2c defer\n");
+ DELAY(100);
+ break;
+ default:
+ DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
+ return EIO;
+ }
+ }
+
+ DRM_ERROR("too many retries, giving up\n");
+ return EIO;
+}
+
+int
+intel_dp_i2c_init(struct intel_dp *intel_dp,
+ struct intel_connector *intel_connector, const char *name)
+{
+ int ret;
+
+ DRM_DEBUG_KMS("i2c_init %s\n", name);
+ intel_dp->algo.running = false;
+ intel_dp->algo.address = 0;
+ intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
+ intel_dp->algo.adapter = &intel_dp->adapter;
+
+ memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter));
+#if 0
+ intel_dp->adapter.owner = THIS_MODULE;
+ intel_dp->adapter.class = I2C_CLASS_DDC;
+ strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
+ intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
+#endif
+ intel_dp->adapter.ic_cookie = &intel_dp->algo;
+#if 0
+ intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
+#endif
+
+ ironlake_edp_panel_vdd_on(intel_dp);
+ ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
+ ironlake_edp_panel_vdd_off(intel_dp, false);
+ return ret;
+}
+
+bool
+intel_dp_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+ int lane_count, clock;
+ int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
+ int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
+ int bpp, mode_rate;
+ static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
+
+ if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
+ intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
+ adjusted_mode);
+ intel_pch_panel_fitting(dev,
+ intel_connector->panel.fitting_mode,
+ mode, adjusted_mode);
+ }
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
+ return false;
+
+ DRM_DEBUG_KMS("DP link computation with max lane count %i "
+ "max bw %02x pixel clock %iKHz\n",
+ max_lane_count, bws[max_clock], adjusted_mode->clock);
+
+ if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, true))
+ return false;
+
+ bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
+ mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
+
+ for (clock = 0; clock <= max_clock; clock++) {
+ for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
+ int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
+
+ if (mode_rate <= link_avail) {
+ intel_dp->link_bw = bws[clock];
+ intel_dp->lane_count = lane_count;
+ adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
+ DRM_DEBUG_KMS("DP link bw %02x lane "
+ "count %d clock %d bpp %d\n",
+ intel_dp->link_bw, intel_dp->lane_count,
+ adjusted_mode->clock, bpp);
+ DRM_DEBUG_KMS("DP link bw required %i available %i\n",
+ mode_rate, link_avail);
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+void
+intel_reduce_ratio(uint32_t *num, uint32_t *den)
+{
+ while (*num > 0xffffff || *den > 0xffffff) {
+ *num >>= 1;
+ *den >>= 1;
+ }
+}
+
+void
+intel_dp_compute_m_n(int bpp,
+ int nlanes,
+ int pixel_clock,
+ int link_clock,
+ struct intel_dp_m_n *m_n)
+{
+ m_n->tu = 64;
+ m_n->gmch_m = (pixel_clock * bpp) >> 3;
+ m_n->gmch_n = link_clock * nlanes;
+ intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
+ m_n->link_m = pixel_clock;
+ m_n->link_n = link_clock;
+ intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
+}
+
+void
+intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_encoder *intel_encoder;
+ struct intel_dp *intel_dp;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int lane_count = 4;
+ struct intel_dp_m_n m_n;
+ int pipe = intel_crtc->pipe;
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+
+ /*
+ * Find the lane count in the intel_encoder private
+ */
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ intel_dp = enc_to_intel_dp(&intel_encoder->base);
+
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ intel_encoder->type == INTEL_OUTPUT_EDP)
+ {
+ lane_count = intel_dp->lane_count;
+ break;
+ }
+ }
+
+ /*
+ * Compute the GMCH and Link ratios. The '3' here is
+ * the number of bytes_per_pixel post-LUT, which we always
+ * set up for 8-bits of R/G/B, or 3 bytes total.
+ */
+ intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
+ mode->clock, adjusted_mode->clock, &m_n);
+
+ if (IS_HASWELL(dev)) {
+ I915_WRITE(PIPE_DATA_M1(cpu_transcoder),
+ TU_SIZE(m_n.tu) | m_n.gmch_m);
+ I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
+ I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
+ I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
+ } else if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
+ I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n);
+ I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m);
+ I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n);
+ } else if (IS_VALLEYVIEW(dev)) {
+ I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
+ I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
+ I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
+ I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
+ } else {
+ I915_WRITE(PIPE_GMCH_DATA_M(pipe),
+ TU_SIZE(m_n.tu) | m_n.gmch_m);
+ I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
+ I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
+ I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
+ }
+}
+
+void
+intel_dp_init_link_config(struct intel_dp *intel_dp)
+{
+ memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
+ intel_dp->link_configuration[0] = intel_dp->link_bw;
+ intel_dp->link_configuration[1] = intel_dp->lane_count;
+ intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
+ /*
+ * Check for DPCD version > 1.1 and enhanced framing support
+ */
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+ (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
+ intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ }
+}
+
+void
+intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ /*
+ * There are four kinds of DP registers:
+ *
+ * IBX PCH
+ * SNB CPU
+ * IVB CPU
+ * CPT PCH
+ *
+ * IBX PCH and CPU are the same for almost everything,
+ * except that the CPU DP PLL is configured in this
+ * register
+ *
+ * CPT PCH is quite different, having many bits moved
+ * to the TRANS_DP_CTL register instead. That
+ * configuration happens (oddly) in ironlake_pch_enable
+ */
+
+ /* Preserve the BIOS-computed detected bit. This is
+ * supposed to be read-only.
+ */
+ intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
+
+ /* Handle DP bits in common between all three register formats */
+ intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+
+ switch (intel_dp->lane_count) {
+ case 1:
+ intel_dp->DP |= DP_PORT_WIDTH_1;
+ break;
+ case 2:
+ intel_dp->DP |= DP_PORT_WIDTH_2;
+ break;
+ case 4:
+ intel_dp->DP |= DP_PORT_WIDTH_4;
+ break;
+ }
+ if (intel_dp->has_audio) {
+ DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
+ pipe_name(intel_crtc->pipe));
+ intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
+ intel_write_eld(encoder, adjusted_mode);
+ }
+
+ intel_dp_init_link_config(intel_dp);
+
+ /* Split out the IBX/CPU vs CPT settings */
+
+ if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ intel_dp->DP |= DP_SYNC_HS_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ intel_dp->DP |= DP_SYNC_VS_HIGH;
+ intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+
+ if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+ intel_dp->DP |= DP_ENHANCED_FRAMING;
+
+ intel_dp->DP |= intel_crtc->pipe << 29;
+
+ /* don't miss out required setting for eDP */
+ if (adjusted_mode->clock < 200000)
+ intel_dp->DP |= DP_PLL_FREQ_160MHZ;
+ else
+ intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+ } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
+ intel_dp->DP |= intel_dp->color_range;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ intel_dp->DP |= DP_SYNC_HS_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ intel_dp->DP |= DP_SYNC_VS_HIGH;
+ intel_dp->DP |= DP_LINK_TRAIN_OFF;
+
+ if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+ intel_dp->DP |= DP_ENHANCED_FRAMING;
+
+ if (intel_crtc->pipe == 1)
+ intel_dp->DP |= DP_PIPEB_SELECT;
+
+ if (is_cpu_edp(intel_dp)) {
+ /* don't miss out required setting for eDP */
+ if (adjusted_mode->clock < 200000)
+ intel_dp->DP |= DP_PLL_FREQ_160MHZ;
+ else
+ intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+ }
+ } else {
+ intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+ }
+}
+
+#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
+#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
+
+#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
+#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
+
+#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
+#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
+
+void
+ironlake_wait_panel_status(struct intel_dp *intel_dp,
+ u32 mask,
+ u32 value)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ int retries;
+
+ DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
+ mask, value,
+ I915_READ(PCH_PP_STATUS),
+ I915_READ(PCH_PP_CONTROL));
+
+ for (retries = 5000; retries > 0; retries--) {
+ if ((I915_READ(PCH_PP_STATUS) & mask) == value)
+ break;
+ DELAY(1000);
+ }
+ if (retries == 0)
+ DRM_ERROR("Panel status timeout: status %08x control %08x\n",
+ I915_READ(PCH_PP_STATUS),
+ I915_READ(PCH_PP_CONTROL));
+}
+
+void
+ironlake_wait_panel_on(struct intel_dp *intel_dp)
+{
+ DRM_DEBUG_KMS("Wait for panel power on\n");
+ ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
+}
+
+void
+ironlake_wait_panel_off(struct intel_dp *intel_dp)
+{
+ DRM_DEBUG_KMS("Wait for panel power off time\n");
+ ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
+}
+
+void
+ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
+{
+ DRM_DEBUG_KMS("Wait for panel power cycle\n");
+ ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
+}
+
+
+/* Read the current pp_control value, unlocking the register if it
+ * is locked
+ */
+
+u32
+ironlake_get_pp_control(struct inteldrm_softc *dev_priv)
+{
+ u32 control = I915_READ(PCH_PP_CONTROL);
+
+ control &= ~PANEL_UNLOCK_MASK;
+ control |= PANEL_UNLOCK_REGS;
+ return control;
+}
+
+void
+ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 pp;
+
+ if (!is_edp(intel_dp))
+ return;
+ DRM_DEBUG_KMS("Turn eDP VDD on\n");
+
+ WARN(intel_dp->want_panel_vdd,
+ "eDP VDD already requested on\n");
+
+ intel_dp->want_panel_vdd = true;
+
+ if (ironlake_edp_have_panel_vdd(intel_dp)) {
+ DRM_DEBUG_KMS("eDP VDD already on\n");
+ return;
+ }
+
+ if (!ironlake_edp_have_panel_power(intel_dp))
+ ironlake_wait_panel_power_cycle(intel_dp);
+
+ pp = ironlake_get_pp_control(dev_priv);
+ pp |= EDP_FORCE_VDD;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+ DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
+ I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
+
+ /*
+ * If the panel wasn't on, delay before accessing aux channel
+ */
+ if (!ironlake_edp_have_panel_power(intel_dp)) {
+ DRM_DEBUG_KMS("eDP was not running\n");
+ drm_msleep(intel_dp->panel_power_up_delay, "915edpon");
+ }
+}
+
+void
+ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 pp;
+
+ if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
+ pp = ironlake_get_pp_control(dev_priv);
+ pp &= ~EDP_FORCE_VDD;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+
+ /* Make sure sequencer is idle before allowing subsequent activity */
+ DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
+ I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
+
+ drm_msleep(intel_dp->panel_power_down_delay, "915vddo");
+ }
+}
+
+void
+ironlake_panel_vdd_tick(void *arg)
+{
+ struct intel_dp *intel_dp = arg;
+
+ workq_queue_task(NULL, &intel_dp->panel_vdd_task, 0,
+ ironlake_panel_vdd_work, intel_dp, NULL);
+}
+
+void
+ironlake_panel_vdd_work(void *arg1, void *arg2)
+{
+ struct intel_dp *intel_dp = arg1;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+ rw_enter_write(&dev->mode_config.rwl);
+ ironlake_panel_vdd_off_sync(intel_dp);
+ rw_exit_write(&dev->mode_config.rwl);
+}
+
+void
+ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
+{
+ if (!is_edp(intel_dp))
+ return;
+
+ DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
+ WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
+
+ intel_dp->want_panel_vdd = false;
+
+ if (sync) {
+ ironlake_panel_vdd_off_sync(intel_dp);
+ } else {
+ /*
+ * Queue the timer to fire a long
+ * time from now (relative to the power down delay)
+ * to keep the panel power up across a sequence of operations
+ */
+ timeout_add_msec(&intel_dp->panel_vdd_to, intel_dp->panel_power_cycle_delay * 5);
+ }
+}
+
+void
+ironlake_edp_panel_on(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 pp;
+
+ if (!is_edp(intel_dp))
+ return;
+
+ DRM_DEBUG_KMS("Turn eDP power on\n");
+
+ if (ironlake_edp_have_panel_power(intel_dp)) {
+ DRM_DEBUG_KMS("eDP power already on\n");
+ return;
+ }
+
+ ironlake_wait_panel_power_cycle(intel_dp);
+
+ pp = ironlake_get_pp_control(dev_priv);
+ if (IS_GEN5(dev)) {
+ /* ILK workaround: disable reset around power sequence */
+ pp &= ~PANEL_POWER_RESET;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+ }
+
+ pp |= POWER_TARGET_ON;
+ if (!IS_GEN5(dev))
+ pp |= PANEL_POWER_RESET;
+
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+
+ ironlake_wait_panel_on(intel_dp);
+
+ if (IS_GEN5(dev)) {
+ pp |= PANEL_POWER_RESET; /* restore panel reset bit */
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+ }
+}
+
+void
+ironlake_edp_panel_off(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 pp;
+
+ if (!is_edp(intel_dp))
+ return;
+
+ DRM_DEBUG_KMS("Turn eDP power off\n");
+
+ WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
+
+ pp = ironlake_get_pp_control(dev_priv);
+ /* We need to switch off panel power _and_ force vdd, for otherwise some
+ * panels get very unhappy and cease to work. */
+ pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+
+ intel_dp->want_panel_vdd = false;
+
+ ironlake_wait_panel_off(intel_dp);
+}
+
+void
+ironlake_edp_backlight_on(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
+ u32 pp;
+
+ if (!is_edp(intel_dp))
+ return;
+
+ DRM_DEBUG_KMS("\n");
+ /*
+ * If we enable the backlight right away following a panel power
+ * on, we may see slight flicker as the panel syncs with the eDP
+ * link. So delay a bit to make sure the image is solid before
+ * allowing it to appear.
+ */
+ drm_msleep(intel_dp->backlight_on_delay, "915ebo");
+ pp = ironlake_get_pp_control(dev_priv);
+ pp |= EDP_BLC_ENABLE;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+
+ intel_panel_enable_backlight(dev, pipe);
+}
+
+void
+ironlake_edp_backlight_off(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 pp;
+
+ if (!is_edp(intel_dp))
+ return;
+
+ intel_panel_disable_backlight(dev);
+
+ DRM_DEBUG_KMS("\n");
+ pp = ironlake_get_pp_control(dev_priv);
+ pp &= ~EDP_BLC_ENABLE;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+ drm_msleep(intel_dp->backlight_off_delay, "915bo1");
+}
+
+void
+ironlake_edp_pll_on(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 dpa_ctl;
+
+ assert_pipe_disabled(dev_priv,
+ to_intel_crtc(crtc)->pipe);
+
+ DRM_DEBUG_KMS("\n");
+ dpa_ctl = I915_READ(DP_A);
+ WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
+ WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
+
+ /* We don't adjust intel_dp->DP while tearing down the link, to
+ * facilitate link retraining (e.g. after hotplug). Hence clear all
+ * enable bits here to ensure that we don't enable too much. */
+ intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
+ intel_dp->DP |= DP_PLL_ENABLE;
+ I915_WRITE(DP_A, intel_dp->DP);
+ POSTING_READ(DP_A);
+ DELAY(200);
+}
+
+void
+ironlake_edp_pll_off(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 dpa_ctl;
+
+ assert_pipe_disabled(dev_priv,
+ to_intel_crtc(crtc)->pipe);
+
+ dpa_ctl = I915_READ(DP_A);
+ WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
+ "dp pll off, should be on\n");
+ WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
+
+ /* We can't rely on the value tracked for the DP register in
+ * intel_dp->DP because link_down must not change that (otherwise link
+ * re-training will fail. */
+ dpa_ctl &= ~DP_PLL_ENABLE;
+ I915_WRITE(DP_A, dpa_ctl);
+ POSTING_READ(DP_A);
+ DELAY(200);
+}
+
+/* If the sink supports it, try to set the power state appropriately */
+void
+intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
+{
+ int ret, i;
+
+ /* Should have a valid DPCD by this point */
+ if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
+ return;
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
+ DP_SET_POWER_D3);
+ if (ret != 1)
+ DRM_DEBUG_DRIVER("failed to write sink power state\n");
+ } else {
+ /*
+ * When turning on, we need to retry for 1ms to give the sink
+ * time to wake up.
+ */
+ for (i = 0; i < 3; i++) {
+ ret = intel_dp_aux_native_write_1(intel_dp,
+ DP_SET_POWER,
+ DP_SET_POWER_D0);
+ if (ret == 1)
+ break;
+ drm_msleep(1, "915dps");
+ }
+ }
+}
+
+bool
+intel_dp_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 tmp = I915_READ(intel_dp->output_reg);
+
+ if (!(tmp & DP_PORT_EN))
+ return false;
+
+ if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
+ *pipe = PORT_TO_PIPE_CPT(tmp);
+ } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
+ *pipe = PORT_TO_PIPE(tmp);
+ } else {
+ u32 trans_sel;
+ u32 trans_dp;
+ int i;
+
+ switch (intel_dp->output_reg) {
+ case PCH_DP_B:
+ trans_sel = TRANS_DP_PORT_SEL_B;
+ break;
+ case PCH_DP_C:
+ trans_sel = TRANS_DP_PORT_SEL_C;
+ break;
+ case PCH_DP_D:
+ trans_sel = TRANS_DP_PORT_SEL_D;
+ break;
+ default:
+ return true;
+ }
+
+ for_each_pipe(i) {
+ trans_dp = I915_READ(TRANS_DP_CTL(i));
+ if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
+ *pipe = i;
+ return true;
+ }
+ }
+
+ DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
+ intel_dp->output_reg);
+ }
+
+ return true;
+}
+
+void
+intel_disable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+ /* Make sure the panel is off before trying to change the mode. But also
+ * ensure that we have vdd while we switch off the panel. */
+ ironlake_edp_panel_vdd_on(intel_dp);
+ ironlake_edp_backlight_off(intel_dp);
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ ironlake_edp_panel_off(intel_dp);
+
+ /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
+ if (!is_cpu_edp(intel_dp))
+ intel_dp_link_down(intel_dp);
+}
+
+void
+intel_post_disable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+ if (is_cpu_edp(intel_dp)) {
+ intel_dp_link_down(intel_dp);
+ ironlake_edp_pll_off(intel_dp);
+ }
+}
+
+void
+intel_enable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+
+ if (WARN_ON(dp_reg & DP_PORT_EN))
+ return;
+
+ ironlake_edp_panel_vdd_on(intel_dp);
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ intel_dp_start_link_train(intel_dp);
+ ironlake_edp_panel_on(intel_dp);
+ ironlake_edp_panel_vdd_off(intel_dp, true);
+ intel_dp_complete_link_train(intel_dp);
+ ironlake_edp_backlight_on(intel_dp);
+}
+
+void
+intel_pre_enable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+ if (is_cpu_edp(intel_dp))
+ ironlake_edp_pll_on(intel_dp);
+}
+
+/*
+ * Native read with retry for link status and receiver capability reads for
+ * cases where the sink may still be asleep.
+ */
+bool
+intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
+ uint8_t *recv, int recv_bytes)
+{
+ int ret, i;
+
+ /*
+ * Sinks are *supposed* to come up within 1ms from an off state,
+ * but we're also supposed to retry 3 times per the spec.
+ */
+ for (i = 0; i < 3; i++) {
+ ret = intel_dp_aux_native_read(intel_dp, address, recv,
+ recv_bytes);
+ if (ret == recv_bytes)
+ return true;
+ drm_msleep(1, "915dpl");
+ }
+
+ return false;
+}
+
+/*
+ * Fetch AUX CH registers 0x202 - 0x207 which contain
+ * link status information
+ */
+bool
+intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
+{
+ return intel_dp_aux_native_read_retry(intel_dp,
+ DP_LANE0_1_STATUS,
+ link_status,
+ DP_LINK_STATUS_SIZE);
+}
+
+#if 0
+static char *voltage_names[] = {
+ "0.4V", "0.6V", "0.8V", "1.2V"
+};
+static char *pre_emph_names[] = {
+ "0dB", "3.5dB", "6dB", "9.5dB"
+};
+static char *link_train_names[] = {
+ "pattern 1", "pattern 2", "idle", "off"
+};
+#endif
+
+/*
+ * These are source-specific values; current Intel hardware supports
+ * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
+ */
+
+uint8_t
+intel_dp_voltage_max(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
+ return DP_TRAIN_VOLTAGE_SWING_800;
+ else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ return DP_TRAIN_VOLTAGE_SWING_1200;
+ else
+ return DP_TRAIN_VOLTAGE_SWING_800;
+}
+
+uint8_t
+intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+ if (IS_HASWELL(dev)) {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_9_5;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+ } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+ } else {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+ }
+}
+
+void
+intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
+{
+ uint8_t v = 0;
+ uint8_t p = 0;
+ int lane;
+ uint8_t voltage_max;
+ uint8_t preemph_max;
+
+ for (lane = 0; lane < intel_dp->lane_count; lane++) {
+ uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
+ uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
+
+ if (this_v > v)
+ v = this_v;
+ if (this_p > p)
+ p = this_p;
+ }
+
+ voltage_max = intel_dp_voltage_max(intel_dp);
+ if (v >= voltage_max)
+ v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
+
+ preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
+ if (p >= preemph_max)
+ p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+ for (lane = 0; lane < 4; lane++)
+ intel_dp->train_set[lane] = v | p;
+}
+
+uint32_t
+intel_dp_signal_levels(uint8_t train_set)
+{
+ uint32_t signal_levels = 0;
+
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ default:
+ signal_levels |= DP_VOLTAGE_0_4;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ signal_levels |= DP_VOLTAGE_0_6;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ signal_levels |= DP_VOLTAGE_0_8;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ signal_levels |= DP_VOLTAGE_1_2;
+ break;
+ }
+ switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
+ case DP_TRAIN_PRE_EMPHASIS_0:
+ default:
+ signal_levels |= DP_PRE_EMPHASIS_0;
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_3_5:
+ signal_levels |= DP_PRE_EMPHASIS_3_5;
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_6:
+ signal_levels |= DP_PRE_EMPHASIS_6;
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_9_5:
+ signal_levels |= DP_PRE_EMPHASIS_9_5;
+ break;
+ }
+ return signal_levels;
+}
+
+/* Gen6's DP voltage swing and pre-emphasis control */
+uint32_t
+intel_gen6_edp_signal_levels(uint8_t train_set)
+{
+ int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+ switch (signal_levels) {
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
+ return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+ case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
+ default:
+ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+ "0x%x\n", signal_levels);
+ return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
+ }
+}
+
+/* Gen7's DP voltage swing and pre-emphasis control */
+uint32_t
+intel_gen7_edp_signal_levels(uint8_t train_set)
+{
+ int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+ switch (signal_levels) {
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_400MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+ return EDP_LINK_TRAIN_400MV_6DB_IVB;
+
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_600MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
+
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_800MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
+
+ default:
+ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+ "0x%x\n", signal_levels);
+ return EDP_LINK_TRAIN_500MV_0DB_IVB;
+ }
+}
+
+/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
+uint32_t
+intel_dp_signal_levels_hsw(uint8_t train_set)
+{
+ int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+ switch (signal_levels) {
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+ return DDI_BUF_EMP_400MV_0DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return DDI_BUF_EMP_400MV_3_5DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+ return DDI_BUF_EMP_400MV_6DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
+ return DDI_BUF_EMP_400MV_9_5DB_HSW;
+
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+ return DDI_BUF_EMP_600MV_0DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return DDI_BUF_EMP_600MV_3_5DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
+ return DDI_BUF_EMP_600MV_6DB_HSW;
+
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+ return DDI_BUF_EMP_800MV_0DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return DDI_BUF_EMP_800MV_3_5DB_HSW;
+ default:
+ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+ "0x%x\n", signal_levels);
+ return DDI_BUF_EMP_400MV_0DB_HSW;
+ }
+}
+
+bool
+intel_dp_set_link_train(struct intel_dp *intel_dp,
+ uint32_t dp_reg_value,
+ uint8_t dp_train_pat)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ enum port port = intel_dig_port->port;
+ int ret;
+ uint32_t temp;
+ int retries;
+
+ if (IS_HASWELL(dev)) {
+ temp = I915_READ(DP_TP_CTL(port));
+
+ if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
+ temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
+ else
+ temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
+
+ temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+ switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
+ I915_WRITE(DP_TP_CTL(port), temp);
+
+ for (retries = 100; retries > 0; retries--) {
+ if (I915_READ(DP_TP_STATUS(port)) &
+ DP_TP_STATUS_IDLE_DONE)
+ break;
+ DELAY(100);
+ }
+ if (retries == 0)
+ DRM_ERROR("Timed out waiting for DP idle patterns\n");
+
+ temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+ temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
+
+ break;
+ case DP_TRAINING_PATTERN_1:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
+ break;
+ case DP_TRAINING_PATTERN_3:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
+ break;
+ }
+ I915_WRITE(DP_TP_CTL(port), temp);
+
+ } else if (HAS_PCH_CPT(dev) &&
+ (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
+ dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
+
+ switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
+ break;
+ case DP_TRAINING_PATTERN_1:
+ dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
+ break;
+ case DP_TRAINING_PATTERN_3:
+ DRM_ERROR("DP training pattern 3 not supported\n");
+ dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
+ break;
+ }
+
+ } else {
+ dp_reg_value &= ~DP_LINK_TRAIN_MASK;
+
+ switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ dp_reg_value |= DP_LINK_TRAIN_OFF;
+ break;
+ case DP_TRAINING_PATTERN_1:
+ dp_reg_value |= DP_LINK_TRAIN_PAT_1;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ dp_reg_value |= DP_LINK_TRAIN_PAT_2;
+ break;
+ case DP_TRAINING_PATTERN_3:
+ DRM_ERROR("DP training pattern 3 not supported\n");
+ dp_reg_value |= DP_LINK_TRAIN_PAT_2;
+ break;
+ }
+ }
+
+ I915_WRITE(intel_dp->output_reg, dp_reg_value);
+ POSTING_READ(intel_dp->output_reg);
+
+ intel_dp_aux_native_write_1(intel_dp,
+ DP_TRAINING_PATTERN_SET,
+ dp_train_pat);
+
+ if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
+ DP_TRAINING_PATTERN_DISABLE) {
+ ret = intel_dp_aux_native_write(intel_dp,
+ DP_TRAINING_LANE0_SET,
+ intel_dp->train_set,
+ intel_dp->lane_count);
+ if (ret != intel_dp->lane_count)
+ return false;
+ }
+
+ return true;
+}
+
+/* Enable corresponding port and start training pattern 1 */
+void
+intel_dp_start_link_train(struct intel_dp *intel_dp)
+{
+ struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
+ struct drm_device *dev = encoder->dev;
+ int i;
+ uint8_t voltage;
+ bool clock_recovery = false;
+ int voltage_tries, loop_tries;
+ uint32_t DP = intel_dp->DP;
+
+ if (IS_HASWELL(dev))
+ intel_ddi_prepare_link_retrain(encoder);
+
+ /* Write the link configuration data */
+ intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
+ intel_dp->link_configuration,
+ DP_LINK_CONFIGURATION_SIZE);
+
+ DP |= DP_PORT_EN;
+
+ memset(intel_dp->train_set, 0, 4);
+ voltage = 0xff;
+ voltage_tries = 0;
+ loop_tries = 0;
+ clock_recovery = false;
+ for (;;) {
+ /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
+ uint8_t link_status[DP_LINK_STATUS_SIZE];
+ uint32_t signal_levels;
+
+ if (IS_HASWELL(dev)) {
+ signal_levels = intel_dp_signal_levels_hsw(
+ intel_dp->train_set[0]);
+ DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
+ } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
+ signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
+ } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
+ signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
+ } else {
+ signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
+ DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+ }
+ DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n",
+ signal_levels);
+
+ /* Set training pattern 1 */
+ if (!intel_dp_set_link_train(intel_dp, DP,
+ DP_TRAINING_PATTERN_1 |
+ DP_LINK_SCRAMBLING_DISABLE))
+ break;
+
+ drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
+ if (!intel_dp_get_link_status(intel_dp, link_status)) {
+ DRM_ERROR("failed to get link status\n");
+ break;
+ }
+
+ if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+ DRM_DEBUG_KMS("clock recovery OK\n");
+ clock_recovery = true;
+ break;
+ }
+
+ /* Check to see if we've tried the max voltage */
+ for (i = 0; i < intel_dp->lane_count; i++)
+ if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
+ break;
+ if (i == intel_dp->lane_count && voltage_tries == 5) {
+ ++loop_tries;
+ if (loop_tries == 5) {
+ DRM_DEBUG_KMS("too many full retries, give up\n");
+ break;
+ }
+ memset(intel_dp->train_set, 0, 4);
+ voltage_tries = 0;
+ continue;
+ }
+
+ /* Check to see if we've tried the same voltage 5 times */
+ if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+ ++voltage_tries;
+ if (voltage_tries == 5) {
+ DRM_DEBUG_KMS("too many voltage retries, give up\n");
+ break;
+ }
+ } else
+ voltage_tries = 0;
+ voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+
+ /* Compute new intel_dp->train_set as requested by target */
+ intel_get_adjust_train(intel_dp, link_status);
+ }
+
+ intel_dp->DP = DP;
+}
+
+void
+intel_dp_complete_link_train(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ bool channel_eq = false;
+ int tries, cr_tries;
+ uint32_t DP = intel_dp->DP;
+
+ /* channel equalization */
+ tries = 0;
+ cr_tries = 0;
+ channel_eq = false;
+ for (;;) {
+ /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
+ uint32_t signal_levels;
+ uint8_t link_status[DP_LINK_STATUS_SIZE];
+
+ if (cr_tries > 5) {
+ DRM_ERROR("failed to train DP, aborting\n");
+ intel_dp_link_down(intel_dp);
+ break;
+ }
+
+ if (IS_HASWELL(dev)) {
+ signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]);
+ DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
+ } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
+ signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
+ } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
+ signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
+ } else {
+ signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
+ DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+ }
+
+ /* channel eq pattern */
+ if (!intel_dp_set_link_train(intel_dp, DP,
+ DP_TRAINING_PATTERN_2 |
+ DP_LINK_SCRAMBLING_DISABLE))
+ break;
+
+ drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
+ if (!intel_dp_get_link_status(intel_dp, link_status))
+ break;
+
+ /* Make sure clock is still ok */
+ if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+ intel_dp_start_link_train(intel_dp);
+ cr_tries++;
+ continue;
+ }
+
+ if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
+ channel_eq = true;
+ break;
+ }
+
+ /* Try 5 times, then try clock recovery if that fails */
+ if (tries > 5) {
+ intel_dp_link_down(intel_dp);
+ intel_dp_start_link_train(intel_dp);
+ tries = 0;
+ cr_tries++;
+ continue;
+ }
+
+ /* Compute new intel_dp->train_set as requested by target */
+ intel_get_adjust_train(intel_dp, link_status);
+ ++tries;
+ }
+
+ if (channel_eq)
+ DRM_DEBUG_KMS("Channel EQ done. DP Training successfull\n");
+
+ intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE);
+}
+
+void
+intel_dp_link_down(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ uint32_t DP = intel_dp->DP;
+
+ /*
+ * DDI code has a strict mode set sequence and we should try to respect
+ * it, otherwise we might hang the machine in many different ways. So we
+ * really should be disabling the port only on a complete crtc_disable
+ * sequence. This function is just called under two conditions on DDI
+ * code:
+ * - Link train failed while doing crtc_enable, and on this case we
+ * really should respect the mode set sequence and wait for a
+ * crtc_disable.
+ * - Someone turned the monitor off and intel_dp_check_link_status
+ * called us. We don't need to disable the whole port on this case, so
+ * when someone turns the monitor on again,
+ * intel_ddi_prepare_link_retrain will take care of redoing the link
+ * train.
+ */
+ if (IS_HASWELL(dev))
+ return;
+
+ if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
+ return;
+
+ DRM_DEBUG_KMS("\n");
+
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
+ DP &= ~DP_LINK_TRAIN_MASK_CPT;
+ I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
+ } else {
+ DP &= ~DP_LINK_TRAIN_MASK;
+ I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
+ }
+ POSTING_READ(intel_dp->output_reg);
+
+ drm_msleep(17, "915dlo");
+
+ if (HAS_PCH_IBX(dev) &&
+ I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
+ struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+
+ /* Hardware workaround: leaving our transcoder select
+ * set to transcoder B while it's off will prevent the
+ * corresponding HDMI output on transcoder A.
+ *
+ * Combine this with another hardware workaround:
+ * transcoder select bit can only be cleared while the
+ * port is enabled.
+ */
+ DP &= ~DP_PIPEB_SELECT;
+ I915_WRITE(intel_dp->output_reg, DP);
+
+ /* Changes to enable or select take place the vblank
+ * after being written.
+ */
+ if (crtc == NULL) {
+ /* We can arrive here never having been attached
+ * to a CRTC, for instance, due to inheriting
+ * random state from the BIOS.
+ *
+ * If the pipe is not running, play safe and
+ * wait for the clocks to stabilise before
+ * continuing.
+ */
+ POSTING_READ(intel_dp->output_reg);
+ drm_msleep(50, "915dla");
+ } else
+ intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
+ }
+
+ DP &= ~DP_AUDIO_OUTPUT_ENABLE;
+ I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
+ POSTING_READ(intel_dp->output_reg);
+ drm_msleep(intel_dp->panel_power_down_delay, "915ldo");
+}
+
+bool
+intel_dp_get_dpcd(struct intel_dp *intel_dp)
+{
+ if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
+ sizeof(intel_dp->dpcd)) == 0)
+ return false; /* aux transfer failed */
+
+ if (intel_dp->dpcd[DP_DPCD_REV] == 0)
+ return false; /* DPCD not present */
+
+ if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+ DP_DWN_STRM_PORT_PRESENT))
+ return true; /* native DP sink */
+
+ if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
+ return true; /* no per-port downstream info */
+
+ if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0,
+ intel_dp->downstream_ports,
+ DP_MAX_DOWNSTREAM_PORTS) == 0)
+ return false; /* downstream port status fetch failed */
+
+ return true;
+}
+
+void
+intel_dp_probe_oui(struct intel_dp *intel_dp)
+{
+ u8 buf[3];
+
+ if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
+ return;
+
+ ironlake_edp_panel_vdd_on(intel_dp);
+
+ if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
+ DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
+ buf[0], buf[1], buf[2]);
+
+ if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
+ DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
+ buf[0], buf[1], buf[2]);
+
+ ironlake_edp_panel_vdd_off(intel_dp, false);
+}
+
+bool
+intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
+{
+ int ret;
+
+ ret = intel_dp_aux_native_read_retry(intel_dp,
+ DP_DEVICE_SERVICE_IRQ_VECTOR,
+ sink_irq_vector, 1);
+ if (!ret)
+ return false;
+
+ return true;
+}
+
+void
+intel_dp_handle_test_request(struct intel_dp *intel_dp)
+{
+ /* NAK by default */
+ intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK);
+}
+
+/*
+ * According to DP spec
+ * 5.1.2:
+ * 1. Read DPCD
+ * 2. Configure link according to Receiver Capabilities
+ * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
+ * 4. Check link status on receipt of hot-plug interrupt
+ */
+
+void
+intel_dp_check_link_status(struct intel_dp *intel_dp)
+{
+ struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
+ u8 sink_irq_vector;
+ u8 link_status[DP_LINK_STATUS_SIZE];
+
+ if (!intel_encoder->connectors_active)
+ return;
+
+ if (WARN_ON(!intel_encoder->base.crtc))
+ return;
+
+ /* Try to read receiver status if the link appears to be up */
+ if (!intel_dp_get_link_status(intel_dp, link_status)) {
+ intel_dp_link_down(intel_dp);
+ return;
+ }
+
+ /* Now read the DPCD to see if it's actually running */
+ if (!intel_dp_get_dpcd(intel_dp)) {
+ intel_dp_link_down(intel_dp);
+ return;
+ }
+
+ /* Try to read the source of the interrupt */
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+ intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
+ /* Clear interrupt source */
+ intel_dp_aux_native_write_1(intel_dp,
+ DP_DEVICE_SERVICE_IRQ_VECTOR,
+ sink_irq_vector);
+
+ if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
+ intel_dp_handle_test_request(intel_dp);
+ if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
+ DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
+ }
+
+ if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
+ DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
+ drm_get_encoder_name(&intel_encoder->base));
+ intel_dp_start_link_train(intel_dp);
+ intel_dp_complete_link_train(intel_dp);
+ }
+}
+
+/* XXX this is probably wrong for multiple downstream ports */
+enum drm_connector_status
+intel_dp_detect_dpcd(struct intel_dp *intel_dp)
+{
+ uint8_t *dpcd = intel_dp->dpcd;
+ bool hpd;
+ uint8_t type;
+
+ if (!intel_dp_get_dpcd(intel_dp))
+ return connector_status_disconnected;
+
+ /* if there's no downstream port, we're done */
+ if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
+ return connector_status_connected;
+
+ /* If we're HPD-aware, SINK_COUNT changes dynamically */
+ hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD);
+ if (hpd) {
+ uint8_t reg;
+ if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
+ &reg, 1))
+ return connector_status_unknown;
+ return DP_GET_SINK_COUNT(reg) ? connector_status_connected
+ : connector_status_disconnected;
+ }
+
+ /* If no HPD, poke DDC gently */
+ if (drm_probe_ddc(&intel_dp->adapter))
+ return connector_status_connected;
+
+ /* Well we tried, say unknown for unreliable port types */
+ type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
+ if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID)
+ return connector_status_unknown;
+
+ /* Anything else is out of spec, warn and ignore */
+ DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
+ return connector_status_disconnected;
+}
+
+enum drm_connector_status
+ironlake_dp_detect(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ enum drm_connector_status status;
+
+ /* Can't disconnect eDP, but you can close the lid... */
+ if (is_edp(intel_dp)) {
+ status = intel_panel_detect(dev);
+ if (status == connector_status_unknown)
+ status = connector_status_connected;
+ return status;
+ }
+
+ return intel_dp_detect_dpcd(intel_dp);
+}
+
+enum drm_connector_status
+g4x_dp_detect(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ uint32_t bit;
+
+ switch (intel_dp->output_reg) {
+ case DP_B:
+ bit = DPB_HOTPLUG_LIVE_STATUS;
+ break;
+ case DP_C:
+ bit = DPC_HOTPLUG_LIVE_STATUS;
+ break;
+ case DP_D:
+ bit = DPD_HOTPLUG_LIVE_STATUS;
+ break;
+ default:
+ return connector_status_unknown;
+ }
+
+ if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
+ return connector_status_disconnected;
+
+ return intel_dp_detect_dpcd(intel_dp);
+}
+
+struct edid *
+intel_dp_get_edid(struct drm_connector *connector, struct i2c_controller *adapter)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ /* use cached edid if we have one */
+ if (intel_connector->edid) {
+ struct edid *edid;
+ int size;
+
+ /* invalid edid */
+ if (IS_ERR(intel_connector->edid))
+ return NULL;
+
+ size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
+ edid = malloc(size, M_DRM, M_WAITOK);
+ if (!edid)
+ return NULL;
+
+ memcpy(edid, intel_connector->edid, size);
+ return edid;
+ }
+
+ return drm_get_edid(connector, adapter);
+}
+
+int
+intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_controller *adapter)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ /* use cached edid if we have one */
+ if (intel_connector->edid) {
+ /* invalid edid */
+ if (IS_ERR(intel_connector->edid))
+ return 0;
+
+ return intel_connector_update_modes(connector,
+ intel_connector->edid);
+ }
+
+ return intel_ddc_get_modes(connector, adapter);
+}
+
+
+/**
+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
+ *
+ * \return true if DP port is connected.
+ * \return false if DP port is disconnected.
+ */
+enum drm_connector_status
+intel_dp_detect(struct drm_connector *connector, bool force)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct drm_device *dev = connector->dev;
+ enum drm_connector_status status;
+ struct edid *edid = NULL;
+#ifdef notyet
+ char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
+#endif
+
+ intel_dp->has_audio = false;
+
+ if (HAS_PCH_SPLIT(dev))
+ status = ironlake_dp_detect(intel_dp);
+ else
+ status = g4x_dp_detect(intel_dp);
+
+#ifdef notyet
+ hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
+ 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
+ DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
+#endif
+
+ if (status != connector_status_connected)
+ return status;
+
+ intel_dp_probe_oui(intel_dp);
+
+ if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
+ intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
+ } else {
+ edid = intel_dp_get_edid(connector, &intel_dp->adapter);
+ if (edid) {
+ intel_dp->has_audio = drm_detect_monitor_audio(edid);
+ free(edid, M_DRM);
+ }
+ }
+
+ if (intel_encoder->type != INTEL_OUTPUT_EDP)
+ intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ return connector_status_connected;
+}
+
+int
+intel_dp_get_modes(struct drm_connector *connector)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_device *dev = connector->dev;
+ int ret;
+
+ /* We should parse the EDID data and find out if it has an audio sink
+ */
+
+ ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
+ if (ret)
+ return ret;
+
+ /* if eDP has no EDID, fall back to fixed mode */
+ if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
+ struct drm_display_mode *mode;
+ mode = drm_mode_duplicate(dev,
+ intel_connector->panel.fixed_mode);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+bool
+intel_dp_detect_audio(struct drm_connector *connector)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct edid *edid;
+ bool has_audio = false;
+
+ edid = intel_dp_get_edid(connector, &intel_dp->adapter);
+ if (edid) {
+ has_audio = drm_detect_monitor_audio(edid);
+ free(edid, M_DRM);
+ }
+
+ return has_audio;
+}
+
+int
+intel_dp_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct inteldrm_softc *dev_priv = connector->dev->dev_private;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ int ret;
+
+ ret = drm_object_property_set_value(&connector->base, property, val);
+ if (ret)
+ return ret;
+
+ if (property == dev_priv->force_audio_property) {
+ int i = val;
+ bool has_audio;
+
+ if (i == intel_dp->force_audio)
+ return 0;
+
+ intel_dp->force_audio = i;
+
+ if (i == HDMI_AUDIO_AUTO)
+ has_audio = intel_dp_detect_audio(connector);
+ else
+ has_audio = (i == HDMI_AUDIO_ON);
+
+ if (has_audio == intel_dp->has_audio)
+ return 0;
+
+ intel_dp->has_audio = has_audio;
+ goto done;
+ }
+
+ if (property == dev_priv->broadcast_rgb_property) {
+ if (val == !!intel_dp->color_range)
+ return 0;
+
+ intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0;
+ goto done;
+ }
+
+ if (is_edp(intel_dp) &&
+ property == connector->dev->mode_config.scaling_mode_property) {
+ if (val == DRM_MODE_SCALE_NONE) {
+ DRM_DEBUG_KMS("no scaling not supported\n");
+ return -EINVAL;
+ }
+
+ if (intel_connector->panel.fitting_mode == val) {
+ /* the eDP scaling property is not changed */
+ return 0;
+ }
+ intel_connector->panel.fitting_mode = val;
+
+ goto done;
+ }
+
+ return -EINVAL;
+
+done:
+ if (intel_encoder->base.crtc) {
+ struct drm_crtc *crtc = intel_encoder->base.crtc;
+ intel_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
+ }
+
+ return 0;
+}
+
+void
+intel_dp_destroy(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ if (!IS_ERR_OR_NULL(intel_connector->edid))
+ free(intel_connector->edid, M_DRM);
+
+ if (is_edp(intel_dp)) {
+ intel_panel_destroy_backlight(dev);
+ intel_panel_fini(&intel_connector->panel);
+ }
+
+#ifdef notyet
+ drm_sysfs_connector_remove(connector);
+#endif
+ drm_connector_cleanup(connector);
+ free(connector, M_DRM);
+}
+
+void
+intel_dp_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+
+#ifdef notyet
+ i2c_del_adapter(&intel_dp->adapter);
+#endif
+ drm_encoder_cleanup(encoder);
+ if (is_edp(intel_dp)) {
+ timeout_del(&intel_dp->panel_vdd_to);
+ ironlake_panel_vdd_off_sync(intel_dp);
+ }
+ free(intel_dig_port, M_DRM);
+}
+
+static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
+ .mode_fixup = intel_dp_mode_fixup,
+ .mode_set = intel_dp_mode_set,
+ .disable = intel_encoder_noop,
+};
+
+static const struct drm_connector_funcs intel_dp_connector_funcs = {
+ .dpms = intel_connector_dpms,
+ .detect = intel_dp_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = intel_dp_set_property,
+ .destroy = intel_dp_destroy,
+};
+
+static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
+ .get_modes = intel_dp_get_modes,
+ .mode_valid = intel_dp_mode_valid,
+ .best_encoder = intel_best_encoder,
+};
+
+static const struct drm_encoder_funcs intel_dp_enc_funcs = {
+ .destroy = intel_dp_encoder_destroy,
+};
+
+void
+intel_dp_hot_plug(struct intel_encoder *intel_encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+
+ intel_dp_check_link_status(intel_dp);
+}
+
+/* Return which DP Port should be selected for Transcoder DP control */
+int
+intel_trans_dp_port_sel(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_encoder *intel_encoder;
+ struct intel_dp *intel_dp;
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ intel_dp = enc_to_intel_dp(&intel_encoder->base);
+
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ intel_encoder->type == INTEL_OUTPUT_EDP)
+ return intel_dp->output_reg;
+ }
+
+ return -1;
+}
+
+/* check the VBT to see whether the eDP is on DP-D port */
+bool
+intel_dpd_is_edp(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct child_device_config *p_child;
+ int i;
+
+ if (!dev_priv->child_dev_num)
+ return false;
+
+ for (i = 0; i < dev_priv->child_dev_num; i++) {
+ p_child = dev_priv->child_dev + i;
+
+ if (p_child->dvo_port == PORT_IDPD &&
+ p_child->device_type == DEVICE_TYPE_eDP)
+ return true;
+ }
+ return false;
+}
+
+void
+intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ intel_attach_force_audio_property(connector);
+ intel_attach_broadcast_rgb_property(connector);
+
+ if (is_edp(intel_dp)) {
+ drm_mode_create_scaling_mode_property(connector->dev);
+ drm_object_attach_property(
+ &connector->base,
+ connector->dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_ASPECT);
+ intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
+ }
+}
+
+void
+intel_dp_init_panel_power_sequencer(struct drm_device *dev,
+ struct intel_dp *intel_dp,
+ struct edp_power_seq *out)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct edp_power_seq cur, vbt, spec, final;
+ u32 pp_on, pp_off, pp_div, pp;
+
+ /* Workaround: Need to write PP_CONTROL with the unlock key as
+ * the very first thing. */
+ pp = ironlake_get_pp_control(dev_priv);
+ I915_WRITE(PCH_PP_CONTROL, pp);
+
+ pp_on = I915_READ(PCH_PP_ON_DELAYS);
+ pp_off = I915_READ(PCH_PP_OFF_DELAYS);
+ pp_div = I915_READ(PCH_PP_DIVISOR);
+
+ /* Pull timing values out of registers */
+ cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
+ PANEL_POWER_UP_DELAY_SHIFT;
+
+ cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
+ PANEL_LIGHT_ON_DELAY_SHIFT;
+
+ cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
+ PANEL_LIGHT_OFF_DELAY_SHIFT;
+
+ cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
+ PANEL_POWER_DOWN_DELAY_SHIFT;
+
+ cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
+ PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
+
+ DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
+
+ vbt = dev_priv->edp.pps;
+
+ /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
+ * our hw here, which are all in 100usec. */
+ spec.t1_t3 = 210 * 10;
+ spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
+ spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
+ spec.t10 = 500 * 10;
+ /* This one is special and actually in units of 100ms, but zero
+ * based in the hw (so we need to add 100 ms). But the sw vbt
+ * table multiplies it with 1000 to make it in units of 100usec,
+ * too. */
+ spec.t11_t12 = (510 + 100) * 10;
+
+ DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
+
+ /* Use the max of the register settings and vbt. If both are
+ * unset, fall back to the spec limits. */
+#define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \
+ spec.field : \
+ max(cur.field, vbt.field))
+ assign_final(t1_t3);
+ assign_final(t8);
+ assign_final(t9);
+ assign_final(t10);
+ assign_final(t11_t12);
+#undef assign_final
+
+#define get_delay(field) (howmany(final.field, 10))
+ intel_dp->panel_power_up_delay = get_delay(t1_t3);
+ intel_dp->backlight_on_delay = get_delay(t8);
+ intel_dp->backlight_off_delay = get_delay(t9);
+ intel_dp->panel_power_down_delay = get_delay(t10);
+ intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
+#undef get_delay
+
+ DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
+ intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
+ intel_dp->panel_power_cycle_delay);
+
+ DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
+ intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+
+ if (out)
+ *out = final;
+}
+
+void
+intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
+ struct intel_dp *intel_dp,
+ struct edp_power_seq *seq)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 pp_on, pp_off, pp_div;
+
+ /* And finally store the new values in the power sequencer. */
+ pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
+ (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
+ pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
+ (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
+ /* Compute the divisor for the pp clock, simply match the Bspec
+ * formula. */
+ pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1)
+ << PP_REFERENCE_DIVIDER_SHIFT;
+ pp_div |= (howmany(seq->t11_t12, 1000)
+ << PANEL_POWER_CYCLE_DELAY_SHIFT);
+
+ /* Haswell doesn't have any port selection bits for the panel
+ * power sequencer any more. */
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ if (is_cpu_edp(intel_dp))
+ pp_on |= PANEL_POWER_PORT_DP_A;
+ else
+ pp_on |= PANEL_POWER_PORT_DP_D;
+ }
+
+ I915_WRITE(PCH_PP_ON_DELAYS, pp_on);
+ I915_WRITE(PCH_PP_OFF_DELAYS, pp_off);
+ I915_WRITE(PCH_PP_DIVISOR, pp_div);
+
+ DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
+ I915_READ(PCH_PP_ON_DELAYS),
+ I915_READ(PCH_PP_OFF_DELAYS),
+ I915_READ(PCH_PP_DIVISOR));
+}
+
+void
+intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector)
+{
+ struct drm_connector *connector = &intel_connector->base;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct drm_device *dev = intel_encoder->base.dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_display_mode *fixed_mode = NULL;
+ struct edp_power_seq power_seq = { 0 };
+ enum port port = intel_dig_port->port;
+ const char *name = NULL;
+ int type;
+
+ /* Preserve the current hw state. */
+ intel_dp->DP = I915_READ(intel_dp->output_reg);
+ intel_dp->attached_connector = intel_connector;
+
+ if (HAS_PCH_SPLIT(dev) && port == PORT_D)
+ if (intel_dpd_is_edp(dev))
+ intel_dp->is_pch_edp = true;
+
+ /*
+ * FIXME : We need to initialize built-in panels before external panels.
+ * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
+ */
+ if (IS_VALLEYVIEW(dev) && port == PORT_C) {
+ type = DRM_MODE_CONNECTOR_eDP;
+ intel_encoder->type = INTEL_OUTPUT_EDP;
+ } else if (port == PORT_A || is_pch_edp(intel_dp)) {
+ type = DRM_MODE_CONNECTOR_eDP;
+ intel_encoder->type = INTEL_OUTPUT_EDP;
+ } else {
+ /* The intel_encoder->type value may be INTEL_OUTPUT_UNKNOWN for
+ * DDI or INTEL_OUTPUT_DISPLAYPORT for the older gens, so don't
+ * rewrite it.
+ */
+ type = DRM_MODE_CONNECTOR_DisplayPort;
+ }
+
+ drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
+ drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
+
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = 0;
+
+ timeout_set(&intel_dp->panel_vdd_to, ironlake_panel_vdd_tick, intel_dp);
+
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
+#ifdef notyet
+ drm_sysfs_connector_add(connector);
+#endif
+
+ if (IS_HASWELL(dev))
+ intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
+ else
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+
+ /* Set up the DDC bus. */
+ switch (port) {
+ case PORT_A:
+ name = "DPDDC-A";
+ break;
+ case PORT_B:
+ dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS;
+ name = "DPDDC-B";
+ break;
+ case PORT_C:
+ dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS;
+ name = "DPDDC-C";
+ break;
+ case PORT_D:
+ dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS;
+ name = "DPDDC-D";
+ break;
+ default:
+ WARN(1, "Invalid port %c\n", port_name(port));
+ break;
+ }
+
+ if (is_edp(intel_dp))
+ intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
+
+ intel_dp_i2c_init(intel_dp, intel_connector, name);
+
+ /* Cache DPCD and EDID for edp. */
+ if (is_edp(intel_dp)) {
+ bool ret;
+ struct drm_display_mode *scan;
+ struct edid *edid;
+
+ ironlake_edp_panel_vdd_on(intel_dp);
+ ret = intel_dp_get_dpcd(intel_dp);
+ ironlake_edp_panel_vdd_off(intel_dp, false);
+
+ if (ret) {
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
+ dev_priv->no_aux_handshake =
+ intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
+ DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
+ } else {
+ /* if this fails, presume the device is a ghost */
+ DRM_INFO("failed to retrieve link info, disabling eDP\n");
+ intel_dp_encoder_destroy(&intel_encoder->base);
+ intel_dp_destroy(connector);
+ return;
+ }
+
+ /* We now know it's not a ghost, init power sequence regs. */
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
+ &power_seq);
+
+ ironlake_edp_panel_vdd_on(intel_dp);
+ edid = drm_get_edid(connector, &intel_dp->adapter);
+ if (edid) {
+ if (drm_add_edid_modes(connector, edid)) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ drm_edid_to_eld(connector, edid);
+ } else {
+ free(edid, M_DRM);
+ edid = ERR_PTR(-EINVAL);
+ }
+ } else {
+ edid = ERR_PTR(-ENOENT);
+ }
+ intel_connector->edid = edid;
+
+ /* prefer fixed mode from EDID if available */
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
+ fixed_mode = drm_mode_duplicate(dev, scan);
+ break;
+ }
+ }
+
+ /* fallback to VBT if available for eDP */
+ if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) {
+ fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+ if (fixed_mode)
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+ }
+
+ ironlake_edp_panel_vdd_off(intel_dp, false);
+ }
+
+ if (is_edp(intel_dp)) {
+ intel_panel_init(&intel_connector->panel, fixed_mode);
+ intel_panel_setup_backlight(connector);
+ }
+
+ intel_dp_add_properties(intel_dp, connector);
+
+ /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
+ * 0xd. Failure to do so will result in spurious interrupts being
+ * generated on the port when a cable is not attached.
+ */
+ if (IS_G4X(dev) && !IS_GM45(dev)) {
+ u32 temp = I915_READ(PEG_BAND_GAP_DATA);
+ I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
+ }
+}
+
+void
+intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
+{
+ struct intel_digital_port *intel_dig_port;
+ struct intel_encoder *intel_encoder;
+ struct drm_encoder *encoder;
+ struct intel_connector *intel_connector;
+
+ intel_dig_port = malloc(sizeof(struct intel_digital_port), M_DRM,
+ M_WAITOK | M_ZERO);
+ if (!intel_dig_port)
+ return;
+
+ intel_connector = malloc(sizeof(struct intel_connector), M_DRM,
+ M_WAITOK | M_ZERO);
+ if (!intel_connector) {
+ free(intel_dig_port, M_DRM);
+ return;
+ }
+
+ intel_encoder = &intel_dig_port->base;
+ encoder = &intel_encoder->base;
+
+ drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
+
+ intel_encoder->enable = intel_enable_dp;
+ intel_encoder->pre_enable = intel_pre_enable_dp;
+ intel_encoder->disable = intel_disable_dp;
+ intel_encoder->post_disable = intel_post_disable_dp;
+ intel_encoder->get_hw_state = intel_dp_get_hw_state;
+
+ intel_dig_port->port = port;
+ intel_dig_port->dp.output_reg = output_reg;
+
+ intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->cloneable = false;
+ intel_encoder->hot_plug = intel_dp_hot_plug;
+
+ intel_dp_init_connector(intel_dig_port, intel_connector);
+}
diff --git a/sys/dev/pci/drm/i915/intel_drv.h b/sys/dev/pci/drm/i915/intel_drv.h
new file mode 100644
index 00000000000..5b2b7b4b5fe
--- /dev/null
+++ b/sys/dev/pci/drm/i915/intel_drv.h
@@ -0,0 +1,646 @@
+/* $OpenBSD: intel_drv.h,v 1.1 2013/03/18 12:36:52 jsg Exp $ */
+/*
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2007-2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef __INTEL_DRV_H__
+#define __INTEL_DRV_H__
+
+#include <dev/i2c/i2cvar.h>
+#include <dev/pci/drm/i915_drm.h>
+#include "i915_drv.h"
+#include <dev/pci/drm/drm_crtc.h>
+#include <dev/pci/drm/drm_crtc_helper.h>
+#include <dev/pci/drm/drm_fb_helper.h>
+#include <dev/pci/drm/drm_dp_helper.h>
+
+#define KHz(x) (1000*x)
+#define MHz(x) KHz(1000*x)
+
+/*
+ * Display related stuff
+ */
+
+/* store information about an Ixxx DVO */
+/* The i830->i865 use multiple DVOs with multiple i2cs */
+/* the i915, i945 have a single sDVO i2c bus - which is different */
+#define MAX_OUTPUTS 6
+/* maximum connectors per crtcs in the mode set */
+#define INTELFB_CONN_LIMIT 4
+
+#define INTEL_I2C_BUS_DVO 1
+#define INTEL_I2C_BUS_SDVO 2
+
+/* these are outputs from the chip - integrated only
+ external chips are via DVO or SDVO output */
+#define INTEL_OUTPUT_UNUSED 0
+#define INTEL_OUTPUT_ANALOG 1
+#define INTEL_OUTPUT_DVO 2
+#define INTEL_OUTPUT_SDVO 3
+#define INTEL_OUTPUT_LVDS 4
+#define INTEL_OUTPUT_TVOUT 5
+#define INTEL_OUTPUT_HDMI 6
+#define INTEL_OUTPUT_DISPLAYPORT 7
+#define INTEL_OUTPUT_EDP 8
+#define INTEL_OUTPUT_UNKNOWN 9
+
+#define INTEL_DVO_CHIP_NONE 0
+#define INTEL_DVO_CHIP_LVDS 1
+#define INTEL_DVO_CHIP_TMDS 2
+#define INTEL_DVO_CHIP_TVOUT 4
+
+/* drm_display_mode->private_flags */
+#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
+#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
+#define INTEL_MODE_DP_FORCE_6BPC (0x10)
+/* This flag must be set by the encoder's mode_fixup if it changes the crtc
+ * timings in the mode to prevent the crtc fixup from overwriting them.
+ * Currently only lvds needs that. */
+#define INTEL_MODE_CRTC_TIMINGS_SET (0x20)
+
+static inline void
+intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
+ int multiplier)
+{
+ mode->clock *= multiplier;
+ mode->private_flags |= multiplier;
+}
+
+static inline int
+intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
+{
+ return (mode->private_flags & INTEL_MODE_PIXEL_MULTIPLIER_MASK) >> INTEL_MODE_PIXEL_MULTIPLIER_SHIFT;
+}
+
+struct intel_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_i915_gem_object *obj;
+};
+
+struct intel_fbdev {
+ struct drm_fb_helper helper;
+ struct intel_framebuffer ifb;
+ struct list_head fbdev_list;
+ struct drm_display_mode *our_mode;
+};
+
+struct intel_encoder {
+ struct drm_encoder base;
+ /*
+ * The new crtc this encoder will be driven from. Only differs from
+ * base->crtc while a modeset is in progress.
+ */
+ struct intel_crtc *new_crtc;
+
+ int type;
+ bool needs_tv_clock;
+ /*
+ * Intel hw has only one MUX where encoders could be clone, hence a
+ * simple flag is enough to compute the possible_clones mask.
+ */
+ bool cloneable;
+ bool connectors_active;
+ void (*hot_plug)(struct intel_encoder *);
+ void (*pre_enable)(struct intel_encoder *);
+ void (*enable)(struct intel_encoder *);
+ void (*disable)(struct intel_encoder *);
+ void (*post_disable)(struct intel_encoder *);
+ /* Read out the current hw state of this connector, returning true if
+ * the encoder is active. If the encoder is enabled it also set the pipe
+ * it is connected to in the pipe parameter. */
+ bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe);
+ int crtc_mask;
+};
+
+struct intel_panel {
+ struct drm_display_mode *fixed_mode;
+ int fitting_mode;
+};
+
+struct intel_connector {
+ struct drm_connector base;
+ /*
+ * The fixed encoder this connector is connected to.
+ */
+ struct intel_encoder *encoder;
+
+ /*
+ * The new encoder this connector will be driven. Only differs from
+ * encoder while a modeset is in progress.
+ */
+ struct intel_encoder *new_encoder;
+
+ /* Reads out the current hw, returning true if the connector is enabled
+ * and active (i.e. dpms ON state). */
+ bool (*get_hw_state)(struct intel_connector *);
+
+ /* Panel info for eDP and LVDS */
+ struct intel_panel panel;
+
+ /* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
+ struct edid *edid;
+};
+
+struct intel_crtc {
+ struct drm_crtc base;
+ enum pipe pipe;
+ enum plane plane;
+ enum transcoder cpu_transcoder;
+ u8 lut_r[256], lut_g[256], lut_b[256];
+ /*
+ * Whether the crtc and the connected output pipeline is active. Implies
+ * that crtc->enabled is set, i.e. the current mode configuration has
+ * some outputs connected to this crtc.
+ */
+ bool active;
+ bool primary_disabled; /* is the crtc obscured by a plane? */
+ bool lowfreq_avail;
+ struct intel_overlay *overlay;
+ struct intel_unpin_work *unpin_work;
+ int fdi_lanes;
+
+ atomic_t unpin_work_count;
+
+ /* Display surface base address adjustement for pageflips. Note that on
+ * gen4+ this only adjusts up to a tile, offsets within a tile are
+ * handled in the hw itself (with the TILEOFF register). */
+ unsigned long dspaddr_offset;
+
+ struct drm_i915_gem_object *cursor_bo;
+ uint32_t cursor_addr;
+ int16_t cursor_x, cursor_y;
+ int16_t cursor_width, cursor_height;
+ bool cursor_visible;
+ unsigned int bpp;
+
+ /* We can share PLLs across outputs if the timings match */
+ struct intel_pch_pll *pch_pll;
+ uint32_t ddi_pll_sel;
+};
+
+struct intel_plane {
+ struct drm_plane base;
+ enum pipe pipe;
+ struct drm_i915_gem_object *obj;
+ bool can_scale;
+ int max_downscale;
+ u32 lut_r[1024], lut_g[1024], lut_b[1024];
+ void (*update_plane)(struct drm_plane *plane,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t x, uint32_t y,
+ uint32_t src_w, uint32_t src_h);
+ void (*disable_plane)(struct drm_plane *plane);
+ int (*update_colorkey)(struct drm_plane *plane,
+ struct drm_intel_sprite_colorkey *key);
+ void (*get_colorkey)(struct drm_plane *plane,
+ struct drm_intel_sprite_colorkey *key);
+};
+
+struct intel_watermark_params {
+ unsigned long fifo_size;
+ unsigned long max_wm;
+ unsigned long default_wm;
+ unsigned long guard_size;
+ unsigned long cacheline_size;
+};
+
+struct cxsr_latency {
+ int is_desktop;
+ int is_ddr3;
+ unsigned long fsb_freq;
+ unsigned long mem_freq;
+ unsigned long display_sr;
+ unsigned long display_hpll_disable;
+ unsigned long cursor_sr;
+ unsigned long cursor_hpll_disable;
+};
+
+#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
+#define to_intel_connector(x) container_of(x, struct intel_connector, base)
+#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
+#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
+#define to_intel_plane(x) container_of(x, struct intel_plane, base)
+
+#define DIP_HEADER_SIZE 5
+
+#define DIP_TYPE_AVI 0x82
+#define DIP_VERSION_AVI 0x2
+#define DIP_LEN_AVI 13
+#define DIP_AVI_PR_1 0
+#define DIP_AVI_PR_2 1
+
+#define DIP_TYPE_SPD 0x83
+#define DIP_VERSION_SPD 0x1
+#define DIP_LEN_SPD 25
+#define DIP_SPD_UNKNOWN 0
+#define DIP_SPD_DSTB 0x1
+#define DIP_SPD_DVDP 0x2
+#define DIP_SPD_DVHS 0x3
+#define DIP_SPD_HDDVR 0x4
+#define DIP_SPD_DVC 0x5
+#define DIP_SPD_DSC 0x6
+#define DIP_SPD_VCD 0x7
+#define DIP_SPD_GAME 0x8
+#define DIP_SPD_PC 0x9
+#define DIP_SPD_BD 0xa
+#define DIP_SPD_SCD 0xb
+
+struct dip_infoframe {
+ uint8_t type; /* HB0 */
+ uint8_t ver; /* HB1 */
+ uint8_t len; /* HB2 - body len, not including checksum */
+ uint8_t ecc; /* Header ECC */
+ uint8_t checksum; /* PB0 */
+ union {
+ struct {
+ /* PB1 - Y 6:5, A 4:4, B 3:2, S 1:0 */
+ uint8_t Y_A_B_S;
+ /* PB2 - C 7:6, M 5:4, R 3:0 */
+ uint8_t C_M_R;
+ /* PB3 - ITC 7:7, EC 6:4, Q 3:2, SC 1:0 */
+ uint8_t ITC_EC_Q_SC;
+ /* PB4 - VIC 6:0 */
+ uint8_t VIC;
+ /* PB5 - YQ 7:6, CN 5:4, PR 3:0 */
+ uint8_t YQ_CN_PR;
+ /* PB6 to PB13 */
+ uint16_t top_bar_end;
+ uint16_t bottom_bar_start;
+ uint16_t left_bar_end;
+ uint16_t right_bar_start;
+ } __attribute__ ((packed)) avi;
+ struct {
+ uint8_t vn[8];
+ uint8_t pd[16];
+ uint8_t sdi;
+ } __attribute__ ((packed)) spd;
+ uint8_t payload[27];
+ } __attribute__ ((packed)) body;
+} __attribute__((packed));
+
+struct intel_hdmi {
+ u32 sdvox_reg;
+ int ddc_bus;
+ uint32_t color_range;
+ bool has_hdmi_sink;
+ bool has_audio;
+ enum hdmi_force_audio force_audio;
+ void (*write_infoframe)(struct drm_encoder *encoder,
+ struct dip_infoframe *frame);
+ void (*set_infoframes)(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode);
+};
+
+#define DP_MAX_DOWNSTREAM_PORTS 0x10
+#define DP_LINK_CONFIGURATION_SIZE 9
+
+struct intel_dp {
+ uint32_t output_reg;
+ uint32_t DP;
+ uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
+ bool has_audio;
+ enum hdmi_force_audio force_audio;
+ uint32_t color_range;
+ uint8_t link_bw;
+ uint8_t lane_count;
+ uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
+ uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
+ struct i2c_controller adapter;
+ struct i2c_algo_dp_aux_data algo;
+ bool is_pch_edp;
+ uint8_t train_set[4];
+ int panel_power_up_delay;
+ int panel_power_down_delay;
+ int panel_power_cycle_delay;
+ int backlight_on_delay;
+ int backlight_off_delay;
+ struct workq_task panel_vdd_task;
+ struct timeout panel_vdd_to;
+ bool want_panel_vdd;
+ struct intel_connector *attached_connector;
+};
+
+struct intel_digital_port {
+ struct intel_encoder base;
+ enum port port;
+ u32 port_reversal;
+ struct intel_dp dp;
+ struct intel_hdmi hdmi;
+};
+
+static inline struct drm_crtc *
+intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ return dev_priv->pipe_to_crtc_mapping[pipe];
+}
+
+static inline struct drm_crtc *
+intel_get_crtc_for_plane(struct drm_device *dev, int plane)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ return dev_priv->plane_to_crtc_mapping[plane];
+}
+
+struct intel_unpin_work {
+ struct workq_task task;
+ struct drm_crtc *crtc;
+ struct drm_i915_gem_object *old_fb_obj;
+ struct drm_i915_gem_object *pending_flip_obj;
+ struct drm_pending_vblank_event *event;
+ atomic_t pending;
+#define INTEL_FLIP_INACTIVE 0
+#define INTEL_FLIP_PENDING 1
+#define INTEL_FLIP_COMPLETE 2
+ bool enable_stall_check;
+};
+
+struct intel_fbc_work {
+ struct workq_task task;
+ struct timeout to;
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+ int interval;
+};
+
+int intel_pch_rawclk(struct drm_device *dev);
+
+int intel_connector_update_modes(struct drm_connector *connector,
+ struct edid *edid);
+int intel_ddc_get_modes(struct drm_connector *c, struct i2c_controller *adapter);
+
+extern void intel_attach_force_audio_property(struct drm_connector *connector);
+extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+
+extern void intel_crt_init(struct drm_device *dev);
+extern void intel_hdmi_init(struct drm_device *dev,
+ int sdvox_reg, enum port port);
+extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector);
+extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
+extern bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
+extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
+ bool is_sdvob);
+extern void intel_dvo_init(struct drm_device *dev);
+extern void intel_tv_init(struct drm_device *dev);
+extern void intel_mark_busy(struct drm_device *dev);
+extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj);
+extern void intel_mark_idle(struct drm_device *dev);
+extern bool intel_lvds_init(struct drm_device *dev);
+extern void intel_dp_init(struct drm_device *dev, int output_reg,
+ enum port port);
+extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector);
+void
+intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
+extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
+extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
+extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
+extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
+extern bool intel_dp_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+extern bool intel_dpd_is_edp(struct drm_device *dev);
+extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
+extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_on(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
+extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
+extern int intel_edp_target_clock(struct intel_encoder *,
+ struct drm_display_mode *mode);
+extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
+extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
+extern void intel_flush_display_plane(drm_i915_private_t *dev_priv,
+ enum plane plane);
+
+/* intel_panel.c */
+extern int intel_panel_init(struct intel_panel *panel,
+ struct drm_display_mode *fixed_mode);
+extern void intel_panel_fini(struct intel_panel *panel);
+
+extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+ struct drm_display_mode *adjusted_mode);
+extern void intel_pch_panel_fitting(struct drm_device *dev,
+ int fitting_mode,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
+extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
+extern int intel_panel_setup_backlight(struct drm_connector *connector);
+extern void intel_panel_enable_backlight(struct drm_device *dev,
+ enum pipe pipe);
+extern void intel_panel_disable_backlight(struct drm_device *dev);
+extern void intel_panel_destroy_backlight(struct drm_device *dev);
+extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
+
+struct intel_set_config {
+ struct drm_encoder **save_connector_encoders;
+ struct drm_crtc **save_encoder_crtcs;
+
+ bool fb_changed;
+ bool mode_changed;
+};
+
+extern bool intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *old_fb);
+extern void intel_modeset_disable(struct drm_device *dev);
+extern void intel_crtc_load_lut(struct drm_crtc *crtc);
+extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
+extern void intel_encoder_noop(struct drm_encoder *encoder);
+extern void intel_encoder_destroy(struct drm_encoder *encoder);
+extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode);
+extern bool intel_encoder_check_is_cloned(struct intel_encoder *encoder);
+extern void intel_connector_dpms(struct drm_connector *, int mode);
+extern bool intel_connector_get_hw_state(struct intel_connector *connector);
+extern void intel_modeset_check_state(struct drm_device *dev);
+
+
+static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
+{
+ return to_intel_connector(connector)->encoder;
+}
+
+static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
+{
+ struct intel_digital_port *intel_dig_port =
+ container_of(encoder, struct intel_digital_port, base.base);
+ return &intel_dig_port->dp;
+}
+
+static inline struct intel_digital_port *
+enc_to_dig_port(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct intel_digital_port, base.base);
+}
+
+static inline struct intel_digital_port *
+dp_to_dig_port(struct intel_dp *intel_dp)
+{
+ return container_of(intel_dp, struct intel_digital_port, dp);
+}
+
+static inline struct intel_digital_port *
+hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
+{
+ return container_of(intel_hdmi, struct intel_digital_port, hdmi);
+}
+
+extern void intel_connector_attach_encoder(struct intel_connector *connector,
+ struct intel_encoder *encoder);
+extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
+
+extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
+ struct drm_crtc *crtc);
+int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern enum transcoder
+intel_pipe_to_cpu_transcoder(drm_i915_private_t *dev_priv,
+ enum pipe pipe);
+extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
+extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
+extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
+
+struct intel_load_detect_pipe {
+ struct drm_framebuffer *release_fb;
+ bool load_detect_temp;
+ int dpms_mode;
+};
+extern bool intel_get_load_detect_pipe(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ struct intel_load_detect_pipe *old);
+extern void intel_release_load_detect_pipe(struct drm_connector *connector,
+ struct intel_load_detect_pipe *old);
+
+extern void intelfb_restore(void);
+extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno);
+extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno);
+extern void intel_enable_clock_gating(struct drm_device *dev);
+
+extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined);
+extern void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
+
+extern int intel_framebuffer_init(struct drm_device *dev,
+ struct intel_framebuffer *ifb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_i915_gem_object *obj);
+extern int intel_fbdev_init(struct drm_device *dev);
+extern void intel_fbdev_fini(struct drm_device *dev);
+extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
+extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
+extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
+extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
+
+extern void intel_setup_overlay(struct drm_device *dev);
+extern void intel_cleanup_overlay(struct drm_device *dev);
+extern int intel_overlay_switch_off(struct intel_overlay *overlay);
+extern int intel_overlay_put_image(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int intel_overlay_attrs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+extern void intel_fb_output_poll_changed(struct drm_device *dev);
+extern void intel_fb_restore_mode(struct drm_device *dev);
+
+extern void assert_pipe(drm_i915_private_t *dev_priv, enum pipe pipe,
+ bool state);
+#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
+#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
+
+extern void intel_init_clock_gating(struct drm_device *dev);
+extern void intel_write_eld(struct drm_encoder *encoder,
+ struct drm_display_mode *mode);
+extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
+extern void intel_prepare_ddi(struct drm_device *dev);
+extern void hsw_fdi_link_train(struct drm_crtc *crtc);
+extern void intel_ddi_init(struct drm_device *dev, enum port port);
+
+/* For use by IVB LP watermark workaround in intel_sprite.c */
+extern void intel_update_watermarks(struct drm_device *dev);
+extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
+ uint32_t sprite_width,
+ int pixel_size);
+extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
+ struct drm_display_mode *mode);
+
+extern unsigned long intel_gen4_compute_page_offset(int *x, int *y,
+ unsigned int tiling_mode,
+ unsigned int bpp,
+ unsigned int pitch);
+
+extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+extern u32 intel_dpio_read(drm_i915_private_t *dev_priv, int reg);
+
+/* Power-related functions, located in intel_pm.c */
+extern void intel_init_pm(struct drm_device *dev);
+/* FBC */
+extern bool intel_fbc_enabled(struct drm_device *dev);
+extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
+extern void intel_update_fbc(struct drm_device *dev);
+/* IPS */
+extern void intel_gpu_ips_init(drm_i915_private_t *dev_priv);
+extern void intel_gpu_ips_teardown(void);
+
+extern void intel_init_power_wells(struct drm_device *dev);
+extern void intel_enable_gt_powersave(struct drm_device *dev);
+extern void intel_disable_gt_powersave(struct drm_device *dev);
+extern void gen6_gt_check_fifodbg(drm_i915_private_t *dev_priv);
+extern void ironlake_teardown_rc6(struct drm_device *dev);
+
+extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe);
+extern int intel_ddi_get_cdclk_freq(drm_i915_private_t *dev_priv);
+extern void intel_ddi_pll_init(struct drm_device *dev);
+extern void intel_ddi_enable_pipe_func(struct drm_crtc *crtc);
+extern void intel_ddi_disable_transcoder_func(drm_i915_private_t *dev_priv,
+ enum transcoder cpu_transcoder);
+extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
+extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
+extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
+extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock);
+extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
+extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
+extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
+extern bool
+intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
+extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
+
+#endif /* __INTEL_DRV_H__ */
diff --git a/sys/dev/pci/drm/i915/intel_dvo.c b/sys/dev/pci/drm/i915/intel_dvo.c
new file mode 100644
index 00000000000..1121144beb4
--- /dev/null
+++ b/sys/dev/pci/drm/i915/intel_dvo.c
@@ -0,0 +1,559 @@
+/* $OpenBSD: intel_dvo.c,v 1.1 2013/03/18 12:36:52 jsg Exp $ */
+/*
+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+#include <dev/pci/drm/drmP.h>
+#include <dev/pci/drm/drm_crtc.h>
+#include "intel_drv.h"
+#include <dev/pci/drm/i915_drm.h>
+#include "i915_drv.h"
+#include "dvo.h"
+
+struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *);
+struct intel_dvo *intel_attached_dvo(struct drm_connector *);
+bool intel_dvo_connector_get_hw_state(struct intel_connector *);
+bool intel_dvo_get_hw_state(struct intel_encoder *, enum pipe *);
+void intel_disable_dvo(struct intel_encoder *);
+void intel_enable_dvo(struct intel_encoder *);
+void intel_dvo_dpms(struct drm_connector *, int);
+int intel_dvo_mode_valid(struct drm_connector *, struct drm_display_mode *);
+bool intel_dvo_mode_fixup(struct drm_encoder *, const struct drm_display_mode *,
+ struct drm_display_mode *);
+void intel_dvo_mode_set(struct drm_encoder *, struct drm_display_mode *,
+ struct drm_display_mode *);
+enum drm_connector_status intel_dvo_detect(struct drm_connector *, bool);
+int intel_dvo_get_modes(struct drm_connector *);
+void intel_dvo_destroy(struct drm_connector *);
+void intel_dvo_enc_destroy(struct drm_encoder *);
+struct drm_display_mode *intel_dvo_get_current_mode(struct drm_connector *);
+
+#define SIL164_ADDR 0x38
+#define CH7xxx_ADDR 0x76
+#define TFP410_ADDR 0x38
+#define NS2501_ADDR 0x38
+
+static const struct intel_dvo_device intel_dvo_devices[] = {
+ {
+ .type = INTEL_DVO_CHIP_TMDS,
+ .name = "sil164",
+ .dvo_reg = DVOC,
+ .slave_addr = SIL164_ADDR,
+ .dev_ops = &sil164_ops,
+ },
+ {
+ .type = INTEL_DVO_CHIP_TMDS,
+ .name = "ch7xxx",
+ .dvo_reg = DVOC,
+ .slave_addr = CH7xxx_ADDR,
+ .dev_ops = &ch7xxx_ops,
+ },
+ {
+ .type = INTEL_DVO_CHIP_LVDS,
+ .name = "ivch",
+ .dvo_reg = DVOA,
+ .slave_addr = 0x02, /* Might also be 0x44, 0x84, 0xc4 */
+ .dev_ops = &ivch_ops,
+ },
+ {
+ .type = INTEL_DVO_CHIP_TMDS,
+ .name = "tfp410",
+ .dvo_reg = DVOC,
+ .slave_addr = TFP410_ADDR,
+ .dev_ops = &tfp410_ops,
+ },
+ {
+ .type = INTEL_DVO_CHIP_LVDS,
+ .name = "ch7017",
+ .dvo_reg = DVOC,
+ .slave_addr = 0x75,
+ .gpio = GMBUS_PORT_DPB,
+ .dev_ops = &ch7017_ops,
+ },
+ {
+ .type = INTEL_DVO_CHIP_TMDS,
+ .name = "ns2501",
+ .dvo_reg = DVOC,
+ .slave_addr = NS2501_ADDR,
+ .dev_ops = &ns2501_ops,
+ }
+};
+
+struct intel_dvo {
+ struct intel_encoder base;
+
+ struct intel_dvo_device dev;
+
+ struct drm_display_mode *panel_fixed_mode;
+ bool panel_wants_dither;
+};
+
+struct intel_dvo *
+enc_to_intel_dvo(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct intel_dvo, base.base);
+}
+
+struct intel_dvo *
+intel_attached_dvo(struct drm_connector *connector)
+{
+ return container_of(intel_attached_encoder(connector),
+ struct intel_dvo, base);
+}
+
+bool
+intel_dvo_connector_get_hw_state(struct intel_connector *connector)
+{
+ struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
+
+ return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev);
+}
+
+bool
+intel_dvo_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+ u32 tmp;
+
+ tmp = I915_READ(intel_dvo->dev.dvo_reg);
+
+ if (!(tmp & DVO_ENABLE))
+ return false;
+
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
+void
+intel_disable_dvo(struct intel_encoder *encoder)
+{
+ drm_i915_private_t *dev_priv = encoder->base.dev->dev_private;
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+ u32 dvo_reg = intel_dvo->dev.dvo_reg;
+ u32 temp = I915_READ(dvo_reg);
+
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
+ I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
+ I915_READ(dvo_reg);
+}
+
+void
+intel_enable_dvo(struct intel_encoder *encoder)
+{
+ drm_i915_private_t *dev_priv = encoder->base.dev->dev_private;
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+ u32 dvo_reg = intel_dvo->dev.dvo_reg;
+ u32 temp = I915_READ(dvo_reg);
+
+ I915_WRITE(dvo_reg, temp | DVO_ENABLE);
+ I915_READ(dvo_reg);
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
+}
+
+void
+intel_dvo_dpms(struct drm_connector *connector, int mode)
+{
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ struct drm_crtc *crtc;
+
+ /* dvo supports only 2 dpms states. */
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
+ if (mode == connector->dpms)
+ return;
+
+ connector->dpms = mode;
+
+ /* Only need to change hw state when actually enabled */
+ crtc = intel_dvo->base.base.crtc;
+ if (!crtc) {
+ intel_dvo->base.connectors_active = false;
+ return;
+ }
+
+ if (mode == DRM_MODE_DPMS_ON) {
+ intel_dvo->base.connectors_active = true;
+
+ intel_crtc_update_dpms(crtc);
+
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
+ } else {
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
+
+ intel_dvo->base.connectors_active = false;
+
+ intel_crtc_update_dpms(crtc);
+ }
+
+ intel_modeset_check_state(connector->dev);
+}
+
+int
+intel_dvo_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ /* XXX: Validate clock range */
+
+ if (intel_dvo->panel_fixed_mode) {
+ if (mode->hdisplay > intel_dvo->panel_fixed_mode->hdisplay)
+ return MODE_PANEL;
+ if (mode->vdisplay > intel_dvo->panel_fixed_mode->vdisplay)
+ return MODE_PANEL;
+ }
+
+ return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode);
+}
+
+bool
+intel_dvo_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+
+ /* If we have timings from the BIOS for the panel, put them in
+ * to the adjusted mode. The CRTC will be set up for this mode,
+ * with the panel scaling set up to source from the H/VDisplay
+ * of the original mode.
+ */
+ if (intel_dvo->panel_fixed_mode != NULL) {
+#define C(x) adjusted_mode->x = intel_dvo->panel_fixed_mode->x
+ C(hdisplay);
+ C(hsync_start);
+ C(hsync_end);
+ C(htotal);
+ C(vdisplay);
+ C(vsync_start);
+ C(vsync_end);
+ C(vtotal);
+ C(clock);
+#undef C
+ }
+
+ if (intel_dvo->dev.dev_ops->mode_fixup)
+ return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev, mode, adjusted_mode);
+
+ return true;
+}
+
+void
+intel_dvo_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+ int pipe = intel_crtc->pipe;
+ u32 dvo_val;
+ u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
+ int dpll_reg = DPLL(pipe);
+
+ switch (dvo_reg) {
+ case DVOA:
+ default:
+ dvo_srcdim_reg = DVOA_SRCDIM;
+ break;
+ case DVOB:
+ dvo_srcdim_reg = DVOB_SRCDIM;
+ break;
+ case DVOC:
+ dvo_srcdim_reg = DVOC_SRCDIM;
+ break;
+ }
+
+ intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, mode, adjusted_mode);
+
+ /* Save the data order, since I don't know what it should be set to. */
+ dvo_val = I915_READ(dvo_reg) &
+ (DVO_PRESERVE_MASK | DVO_DATA_ORDER_GBRG);
+ dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE |
+ DVO_BLANK_ACTIVE_HIGH;
+
+ if (pipe == 1)
+ dvo_val |= DVO_PIPE_B_SELECT;
+ dvo_val |= DVO_PIPE_STALL;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ dvo_val |= DVO_HSYNC_ACTIVE_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
+
+ I915_WRITE(dpll_reg, I915_READ(dpll_reg) | DPLL_DVO_HIGH_SPEED);
+
+ /*I915_WRITE(DVOB_SRCDIM,
+ (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
+ (adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
+ I915_WRITE(dvo_srcdim_reg,
+ (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
+ (adjusted_mode->vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));
+ /*I915_WRITE(DVOB, dvo_val);*/
+ I915_WRITE(dvo_reg, dvo_val);
+}
+
+/**
+ * Detect the output connection on our DVO device.
+ *
+ * Unimplemented.
+ */
+enum drm_connector_status
+intel_dvo_detect(struct drm_connector *connector, bool force)
+{
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
+}
+
+int
+intel_dvo_get_modes(struct drm_connector *connector)
+{
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ drm_i915_private_t *dev_priv = connector->dev->dev_private;
+
+ /* We should probably have an i2c driver get_modes function for those
+ * devices which will have a fixed set of modes determined by the chip
+ * (TV-out, for example), but for now with just TMDS and LVDS,
+ * that's not the case.
+ */
+ intel_ddc_get_modes(connector,
+ intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPC));
+ if (!list_empty(&connector->probed_modes))
+ return 1;
+
+ if (intel_dvo->panel_fixed_mode != NULL) {
+ struct drm_display_mode *mode;
+ mode = drm_mode_duplicate(connector->dev, intel_dvo->panel_fixed_mode);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+void
+intel_dvo_destroy(struct drm_connector *connector)
+{
+#if 0
+ drm_sysfs_connector_remove(connector);
+#endif
+ drm_connector_cleanup(connector);
+ free(connector, M_DRM);
+}
+
+static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
+ .mode_fixup = intel_dvo_mode_fixup,
+ .mode_set = intel_dvo_mode_set,
+ .disable = intel_encoder_noop,
+};
+
+static const struct drm_connector_funcs intel_dvo_connector_funcs = {
+ .dpms = intel_dvo_dpms,
+ .detect = intel_dvo_detect,
+ .destroy = intel_dvo_destroy,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+};
+
+static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
+ .mode_valid = intel_dvo_mode_valid,
+ .get_modes = intel_dvo_get_modes,
+ .best_encoder = intel_best_encoder,
+};
+
+void
+intel_dvo_enc_destroy(struct drm_encoder *encoder)
+{
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+
+ if (intel_dvo->dev.dev_ops->destroy)
+ intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev);
+
+ free(intel_dvo->panel_fixed_mode, M_DRM);
+
+ intel_encoder_destroy(encoder);
+}
+
+static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
+ .destroy = intel_dvo_enc_destroy,
+};
+
+/**
+ * Attempts to get a fixed panel timing for LVDS (currently only the i830).
+ *
+ * Other chips with DVO LVDS will need to extend this to deal with the LVDS
+ * chip being on DVOB/C and having multiple pipes.
+ */
+struct drm_display_mode *
+intel_dvo_get_current_mode(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
+ struct drm_display_mode *mode = NULL;
+
+ /* If the DVO port is active, that'll be the LVDS, so we can pull out
+ * its timings to get how the BIOS set up the panel.
+ */
+ if (dvo_val & DVO_ENABLE) {
+ struct drm_crtc *crtc;
+ int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0;
+
+ crtc = intel_get_crtc_for_pipe(dev, pipe);
+ if (crtc) {
+ mode = intel_crtc_mode_get(dev, crtc);
+ if (mode) {
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ if (dvo_val & DVO_HSYNC_ACTIVE_HIGH)
+ mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ if (dvo_val & DVO_VSYNC_ACTIVE_HIGH)
+ mode->flags |= DRM_MODE_FLAG_PVSYNC;
+ }
+ }
+ }
+
+ return mode;
+}
+
+void
+intel_dvo_init(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ struct intel_dvo *intel_dvo;
+ struct intel_connector *intel_connector;
+ int i;
+ int encoder_type = DRM_MODE_ENCODER_NONE;
+
+ intel_dvo = malloc(sizeof(struct intel_dvo), M_DRM, M_WAITOK | M_ZERO);
+ if (!intel_dvo)
+ return;
+
+ intel_connector = malloc(sizeof(struct intel_connector), M_DRM,
+ M_WAITOK | M_ZERO);
+ if (!intel_connector) {
+ free(intel_dvo, M_DRM);
+ return;
+ }
+
+ intel_encoder = &intel_dvo->base;
+ drm_encoder_init(dev, &intel_encoder->base,
+ &intel_dvo_enc_funcs, encoder_type);
+
+ intel_encoder->disable = intel_disable_dvo;
+ intel_encoder->enable = intel_enable_dvo;
+ intel_encoder->get_hw_state = intel_dvo_get_hw_state;
+ intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
+
+ /* Now, try to find a controller */
+ for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
+ struct drm_connector *connector = &intel_connector->base;
+ const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
+ struct i2c_controller *i2c;
+ int gpio;
+
+ /* Allow the I2C driver info to specify the GPIO to be used in
+ * special cases, but otherwise default to what's defined
+ * in the spec.
+ */
+ if (intel_gmbus_is_port_valid(dvo->gpio))
+ gpio = dvo->gpio;
+ else if (dvo->type == INTEL_DVO_CHIP_LVDS)
+ gpio = GMBUS_PORT_SSC;
+ else
+ gpio = GMBUS_PORT_DPB;
+
+ /* Set up the I2C bus necessary for the chip we're probing.
+ * It appears that everything is on GPIOE except for panels
+ * on i830 laptops, which are on GPIOB (DVOA).
+ */
+ i2c = intel_gmbus_get_adapter(dev_priv, gpio);
+
+ intel_dvo->dev = *dvo;
+ if (!dvo->dev_ops->init(&intel_dvo->dev, i2c))
+ continue;
+
+ intel_encoder->type = INTEL_OUTPUT_DVO;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ switch (dvo->type) {
+ case INTEL_DVO_CHIP_TMDS:
+ intel_encoder->cloneable = true;
+ drm_connector_init(dev, connector,
+ &intel_dvo_connector_funcs,
+ DRM_MODE_CONNECTOR_DVII);
+ encoder_type = DRM_MODE_ENCODER_TMDS;
+ break;
+ case INTEL_DVO_CHIP_LVDS:
+ intel_encoder->cloneable = false;
+ drm_connector_init(dev, connector,
+ &intel_dvo_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ encoder_type = DRM_MODE_ENCODER_LVDS;
+ break;
+ }
+
+ drm_connector_helper_add(connector,
+ &intel_dvo_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ drm_encoder_helper_add(&intel_encoder->base,
+ &intel_dvo_helper_funcs);
+
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
+ if (dvo->type == INTEL_DVO_CHIP_LVDS) {
+ /* For our LVDS chipsets, we should hopefully be able
+ * to dig the fixed panel mode out of the BIOS data.
+ * However, it's in a different format from the BIOS
+ * data on chipsets with integrated LVDS (stored in AIM
+ * headers, likely), so for now, just get the current
+ * mode being output through DVO.
+ */
+ intel_dvo->panel_fixed_mode =
+ intel_dvo_get_current_mode(connector);
+ intel_dvo->panel_wants_dither = true;
+ }
+
+#if 0
+ drm_sysfs_connector_add(connector);
+#endif
+ return;
+ }
+
+ drm_encoder_cleanup(&intel_encoder->base);
+ free(intel_dvo, M_DRM);
+ free(intel_connector, M_DRM);
+}
diff --git a/sys/dev/pci/drm/i915/intel_fb.c b/sys/dev/pci/drm/i915/intel_fb.c
new file mode 100644
index 00000000000..77e48dfe1b0
--- /dev/null
+++ b/sys/dev/pci/drm/i915/intel_fb.c
@@ -0,0 +1,309 @@
+/* $OpenBSD: intel_fb.c,v 1.1 2013/03/18 12:36:52 jsg Exp $ */
+/*
+ * Copyright © 2007 David Airlie
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * David Airlie
+ */
+
+#include <dev/pci/drm/drmP.h>
+#include <dev/pci/drm/drm.h>
+#include <dev/pci/drm/drm_crtc.h>
+#include <dev/pci/drm/drm_fb_helper.h>
+#include <dev/pci/drm/i915_drm.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+int intelfb_create(struct intel_fbdev *,
+ struct drm_fb_helper_surface_size *);
+int intel_fb_find_or_create_single(struct drm_fb_helper *,
+ struct drm_fb_helper_surface_size *);
+void intel_fbdev_destroy(struct drm_device *, struct intel_fbdev *);
+
+int
+intelfb_create(struct intel_fbdev *ifbdev,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_device *dev = ifbdev->helper.dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+#if 0
+ struct fb_info *info;
+#endif
+ struct drm_framebuffer *fb;
+ struct drm_mode_fb_cmd2 mode_cmd = {};
+ struct drm_i915_gem_object *obj;
+ int size, ret;
+
+ /* we don't do packed 24bpp */
+ if (sizes->surface_bpp == 24)
+ sizes->surface_bpp = 32;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+
+ mode_cmd.pitches[0] = roundup2(mode_cmd.width * ((sizes->surface_bpp + 7) /
+ 8), 64);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ size = roundup2(size, PAGE_SIZE);
+ obj = i915_gem_alloc_object(dev, size);
+ if (!obj) {
+ DRM_ERROR("failed to allocate framebuffer\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ DRM_LOCK();
+
+ /* Flush everything out, we'll be doing GTT only from now on */
+ ret = intel_pin_and_fence_fb_obj(dev, obj, false);
+ if (ret) {
+ DRM_ERROR("failed to pin fb: %d\n", ret);
+ goto out_unref;
+ }
+
+#if 0
+ info = framebuffer_alloc(0, device);
+ if (!info) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
+
+ info->par = ifbdev;
+#endif
+
+ ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
+ if (ret)
+ goto out_unpin;
+
+ fb = &ifbdev->ifb.base;
+
+ ifbdev->helper.fb = fb;
+#if 0
+ ifbdev->helper.fbdev = info;
+
+ strlcpy(info->fix.id, "inteldrmfb", sizeof(info->fix.id));
+
+ info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
+ info->fbops = &intelfb_ops;
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
+ /* setup aperture base/size for vesafb takeover */
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
+ info->apertures->ranges[0].base = dev->mode_config.fb_base;
+ info->apertures->ranges[0].size =
+ dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+
+ info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
+ info->fix.smem_len = size;
+
+ info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size);
+ if (!info->screen_base) {
+ ret = -ENOSPC;
+ goto out_unpin;
+ }
+ info->screen_size = size;
+
+// memset(info->screen_base, 0, size);
+
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
+
+ /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
+#else
+{
+ struct rasops_info *ri = &dev_priv->ro;
+ bus_space_handle_t bsh;
+ int err;
+
+ err = agp_map_subregion(dev_priv->agph, obj->gtt_offset, size, &bsh);
+ if (err) {
+ ret = -err;
+ goto out_unpin;
+ }
+
+ ri->ri_bits = bus_space_vaddr(dev->bst, bsh);
+ ri->ri_depth = fb->bits_per_pixel;
+ ri->ri_stride = fb->pitches[0];
+ ri->ri_width = sizes->fb_width;
+ ri->ri_height = sizes->fb_height;
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_XRGB8888:
+ ri->ri_rnum = 8;
+ ri->ri_rpos = 16;
+ ri->ri_gnum = 8;
+ ri->ri_gpos = 8;
+ ri->ri_bnum = 8;
+ ri->ri_bpos = 0;
+ break;
+ }
+}
+#endif
+
+ DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
+ fb->width, fb->height,
+ obj->gtt_offset, obj);
+
+ DRM_UNLOCK();
+#if 1
+ printf("skipping call to vga_switcheroo_client_fb_set\n");
+#else
+ vga_switcheroo_client_fb_set(dev->pdev, info);
+#endif
+ return 0;
+
+out_unpin:
+ i915_gem_object_unpin(obj);
+out_unref:
+ drm_gem_object_unreference(&obj->base);
+ DRM_UNLOCK();
+out:
+ return ret;
+}
+
+int
+intel_fb_find_or_create_single(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper;
+ int new_fb = 0;
+ int ret;
+
+ if (!helper->fb) {
+ ret = intelfb_create(ifbdev, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
+}
+
+static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
+ .gamma_set = intel_crtc_fb_gamma_set,
+ .gamma_get = intel_crtc_fb_gamma_get,
+ .fb_probe = intel_fb_find_or_create_single,
+};
+
+void
+intel_fbdev_destroy(struct drm_device *dev, struct intel_fbdev *ifbdev)
+{
+#if 0
+ struct fb_info *info;
+#endif
+ struct intel_framebuffer *ifb = &ifbdev->ifb;
+
+#if 0
+ if (ifbdev->helper.fbdev) {
+ info = ifbdev->helper.fbdev;
+ unregister_framebuffer(info);
+ iounmap(info->screen_base);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+#endif
+
+ drm_fb_helper_fini(&ifbdev->helper);
+
+ drm_framebuffer_cleanup(&ifb->base);
+ if (ifb->obj) {
+ drm_gem_object_unreference(&ifb->obj->base);
+ ifb->obj = NULL;
+ }
+}
+
+int
+intel_fbdev_init(struct drm_device *dev)
+{
+ struct intel_fbdev *ifbdev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ int ret;
+
+ ifbdev = malloc(sizeof(struct intel_fbdev), M_DRM,
+ M_WAITOK | M_ZERO);
+
+ dev_priv->fbdev = ifbdev;
+ ifbdev->helper.funcs = &intel_fb_helper_funcs;
+
+ ret = drm_fb_helper_init(dev, &ifbdev->helper,
+ dev_priv->num_pipe,
+ INTELFB_CONN_LIMIT);
+ if (ret) {
+ free(ifbdev, M_DRM);
+ return ret;
+ }
+
+ drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
+ drm_fb_helper_initial_config(&ifbdev->helper, 32);
+ return 0;
+}
+
+void
+intel_fbdev_fini(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ if (!dev_priv->fbdev)
+ return;
+
+ intel_fbdev_destroy(dev, dev_priv->fbdev);
+ free(dev_priv->fbdev, M_DRM);
+ dev_priv->fbdev = NULL;
+}
+
+void
+intel_fb_output_poll_changed(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
+}
+
+void
+intel_fb_restore_mode(struct drm_device *dev)
+{
+ int ret;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_plane *plane;
+
+ rw_enter_write(&dev->mode_config.rwl);
+
+ ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper);
+ if (ret)
+ DRM_DEBUG("failed to restore crtc mode\n");
+
+ /* Be sure to shut off any planes that may be active */
+ list_for_each_entry(plane, &config->plane_list, head)
+ plane->funcs->disable_plane(plane);
+
+ rw_exit_write(&dev->mode_config.rwl);
+}
diff --git a/sys/dev/pci/drm/i915/intel_hdmi.c b/sys/dev/pci/drm/i915/intel_hdmi.c
new file mode 100644
index 00000000000..ef9ae4f08d8
--- /dev/null
+++ b/sys/dev/pci/drm/i915/intel_hdmi.c
@@ -0,0 +1,1156 @@
+/* $OpenBSD: intel_hdmi.c,v 1.1 2013/03/18 12:36:52 jsg Exp $ */
+/*
+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright © 2006-2009 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <dev/pci/drm/drmP.h>
+#include <dev/pci/drm/drm_crtc.h>
+#include <dev/pci/drm/drm_edid.h>
+#include "intel_drv.h"
+#include <dev/pci/drm/i915_drm.h>
+#include "i915_drv.h"
+
+struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *);
+struct intel_hdmi *intel_attached_hdmi(struct drm_connector *);
+u32 intel_infoframe_index(struct dip_infoframe *);
+u32 intel_infoframe_flags(struct dip_infoframe *);
+void i9xx_write_infoframe(struct drm_encoder *, struct dip_infoframe *);
+void ironlake_write_infoframe(struct drm_encoder *, struct dip_infoframe *);
+void intel_set_infoframe(struct drm_encoder *, struct dip_infoframe *);
+void intel_hdmi_set_avi_infoframe(struct drm_encoder *,
+ struct drm_display_mode *);
+void intel_hdmi_set_spd_infoframe(struct drm_encoder *);
+void intel_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *,
+ struct drm_display_mode *);
+void intel_hdmi_dpms(struct drm_encoder *, int);
+int intel_hdmi_mode_valid(struct drm_connector *,
+ struct drm_display_mode *);
+bool intel_hdmi_mode_fixup(struct drm_encoder *, const struct drm_display_mode *,
+ struct drm_display_mode *);
+enum drm_connector_status intel_hdmi_detect(struct drm_connector *,
+ bool);
+int intel_hdmi_get_modes(struct drm_connector *);
+bool intel_hdmi_detect_audio(struct drm_connector *);
+int intel_hdmi_set_property(struct drm_connector *, struct drm_property *,
+ uint64_t);
+void intel_hdmi_destroy(struct drm_connector *);
+void intel_hdmi_add_properties(struct intel_hdmi *, struct drm_connector *);
+struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *);
+void assert_hdmi_port_disabled(struct intel_hdmi *);
+u32 g4x_infoframe_index(struct dip_infoframe *);
+u32 g4x_infoframe_enable(struct dip_infoframe *);
+u32 hsw_infoframe_enable(struct dip_infoframe *);
+u32 hsw_infoframe_data_reg(struct dip_infoframe *, enum pipe);
+void g4x_write_infoframe(struct drm_encoder *, struct dip_infoframe *);
+void ibx_write_infoframe(struct drm_encoder *, struct dip_infoframe *);
+void cpt_write_infoframe(struct drm_encoder *, struct dip_infoframe *);
+void vlv_write_infoframe(struct drm_encoder *, struct dip_infoframe *);
+void hsw_write_infoframe(struct drm_encoder *, struct dip_infoframe *);
+void g4x_set_infoframes(struct drm_encoder *, struct drm_display_mode *);
+void ibx_set_infoframes(struct drm_encoder *, struct drm_display_mode *);
+void cpt_set_infoframes(struct drm_encoder *, struct drm_display_mode *);
+void vlv_set_infoframes(struct drm_encoder *, struct drm_display_mode *);
+void hsw_set_infoframes(struct drm_encoder *, struct drm_display_mode *);
+bool intel_hdmi_get_hw_state(struct intel_encoder *, enum pipe *);
+void intel_enable_hdmi(struct intel_encoder *);
+void intel_disable_hdmi(struct intel_encoder *);
+bool g4x_hdmi_connected(struct intel_hdmi *);
+
+
+struct drm_device *
+intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
+{
+ return hdmi_to_dig_port(intel_hdmi)->base.base.dev;
+}
+
+void
+assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
+{
+ struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ uint32_t enabled_bits;
+
+ enabled_bits = IS_HASWELL(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
+
+ WARN(I915_READ(intel_hdmi->sdvox_reg) & enabled_bits,
+ "HDMI port enabled, expecting disabled\n");
+}
+
+struct intel_hdmi *
+enc_to_intel_hdmi(struct drm_encoder *encoder)
+{
+ struct intel_digital_port *intel_dig_port =
+ container_of(encoder, struct intel_digital_port, base.base);
+ return &intel_dig_port->hdmi;
+}
+
+struct intel_hdmi *
+intel_attached_hdmi(struct drm_connector *connector)
+{
+ return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
+}
+
+void
+intel_dip_infoframe_csum(struct dip_infoframe *frame)
+{
+ uint8_t *data = (uint8_t *)frame;
+ uint8_t sum = 0;
+ unsigned i;
+
+ frame->checksum = 0;
+ frame->ecc = 0;
+
+ for (i = 0; i < frame->len + DIP_HEADER_SIZE; i++)
+ sum += data[i];
+
+ frame->checksum = 0x100 - sum;
+}
+
+u32
+g4x_infoframe_index(struct dip_infoframe *frame)
+{
+ switch (frame->type) {
+ case DIP_TYPE_AVI:
+ return VIDEO_DIP_SELECT_AVI;
+ case DIP_TYPE_SPD:
+ return VIDEO_DIP_SELECT_SPD;
+ default:
+ DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+ return 0;
+ }
+}
+
+u32
+g4x_infoframe_enable(struct dip_infoframe *frame)
+{
+ switch (frame->type) {
+ case DIP_TYPE_AVI:
+ return VIDEO_DIP_ENABLE_AVI;
+ case DIP_TYPE_SPD:
+ return VIDEO_DIP_ENABLE_SPD;
+ default:
+ DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+ return 0;
+ }
+}
+
+u32
+hsw_infoframe_enable(struct dip_infoframe *frame)
+{
+ switch (frame->type) {
+ case DIP_TYPE_AVI:
+ return VIDEO_DIP_ENABLE_AVI_HSW;
+ case DIP_TYPE_SPD:
+ return VIDEO_DIP_ENABLE_SPD_HSW;
+ default:
+ DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+ return 0;
+ }
+}
+
+u32
+hsw_infoframe_data_reg(struct dip_infoframe *frame, enum pipe pipe)
+{
+ switch (frame->type) {
+ case DIP_TYPE_AVI:
+ return HSW_TVIDEO_DIP_AVI_DATA(pipe);
+ case DIP_TYPE_SPD:
+ return HSW_TVIDEO_DIP_SPD_DATA(pipe);
+ default:
+ DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+ return 0;
+ }
+}
+
+void
+g4x_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ uint32_t *data = (uint32_t *)frame;
+ struct drm_device *dev = encoder->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 val = I915_READ(VIDEO_DIP_CTL);
+ unsigned i, len = DIP_HEADER_SIZE + frame->len;
+
+ WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(frame);
+
+ val &= ~g4x_infoframe_enable(frame);
+
+ I915_WRITE(VIDEO_DIP_CTL, val);
+
+ DRM_WRITEMEMORYBARRIER()
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(VIDEO_DIP_DATA, *data);
+ data++;
+ }
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(VIDEO_DIP_DATA, 0);
+ DRM_WRITEMEMORYBARRIER()
+
+ val |= g4x_infoframe_enable(frame);
+ val &= ~VIDEO_DIP_FREQ_MASK;
+ val |= VIDEO_DIP_FREQ_VSYNC;
+
+ I915_WRITE(VIDEO_DIP_CTL, val);
+ POSTING_READ(VIDEO_DIP_CTL);
+}
+
+void
+ibx_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ uint32_t *data = (uint32_t *)frame;
+ struct drm_device *dev = encoder->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ unsigned i, len = DIP_HEADER_SIZE + frame->len;
+ u32 val = I915_READ(reg);
+
+ WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(frame);
+
+ val &= ~g4x_infoframe_enable(frame);
+
+ I915_WRITE(reg, val);
+
+ DRM_WRITEMEMORYBARRIER();
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ data++;
+ }
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
+ DRM_WRITEMEMORYBARRIER();
+
+ val |= g4x_infoframe_enable(frame);
+ val &= ~VIDEO_DIP_FREQ_MASK;
+ val |= VIDEO_DIP_FREQ_VSYNC;
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+}
+
+void
+cpt_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ uint32_t *data = (uint32_t *)frame;
+ struct drm_device *dev = encoder->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ unsigned i, len = DIP_HEADER_SIZE + frame->len;
+ u32 val = I915_READ(reg);
+
+ WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(frame);
+
+ /* The DIP control register spec says that we need to update the AVI
+ * infoframe without clearing its enable bit */
+ if (frame->type != DIP_TYPE_AVI)
+ val &= ~g4x_infoframe_enable(frame);
+
+ I915_WRITE(reg, val);
+
+ DRM_WRITEMEMORYBARRIER();
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ data++;
+ }
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
+ DRM_WRITEMEMORYBARRIER();
+
+ val |= g4x_infoframe_enable(frame);
+ val &= ~VIDEO_DIP_FREQ_MASK;
+ val |= VIDEO_DIP_FREQ_VSYNC;
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+}
+
+void
+vlv_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ uint32_t *data = (uint32_t *)frame;
+ struct drm_device *dev = encoder->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
+ unsigned i, len = DIP_HEADER_SIZE + frame->len;
+ u32 val = I915_READ(reg);
+
+ WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(frame);
+
+ val &= ~g4x_infoframe_enable(frame);
+
+ I915_WRITE(reg, val);
+
+ DRM_WRITEMEMORYBARRIER();
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ data++;
+ }
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
+ DRM_WRITEMEMORYBARRIER();
+
+ val |= g4x_infoframe_enable(frame);
+ val &= ~VIDEO_DIP_FREQ_MASK;
+ val |= VIDEO_DIP_FREQ_VSYNC;
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+}
+
+void
+hsw_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ uint32_t *data = (uint32_t *)frame;
+ struct drm_device *dev = encoder->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->pipe);
+ unsigned int i, len = DIP_HEADER_SIZE + frame->len;
+ u32 val = I915_READ(ctl_reg);
+
+ if (data_reg == 0)
+ return;
+
+ val &= ~hsw_infoframe_enable(frame);
+ I915_WRITE(ctl_reg, val);
+
+ DRM_WRITEMEMORYBARRIER();
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(data_reg + i, *data);
+ data++;
+ }
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(data_reg + i, 0);
+ DRM_WRITEMEMORYBARRIER();
+
+ val |= hsw_infoframe_enable(frame);
+ I915_WRITE(ctl_reg, val);
+ POSTING_READ(ctl_reg);
+}
+
+void
+intel_set_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+ intel_dip_infoframe_csum(frame);
+ intel_hdmi->write_infoframe(encoder, frame);
+}
+
+void
+intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct dip_infoframe avi_if = {
+ .type = DIP_TYPE_AVI,
+ .ver = DIP_VERSION_AVI,
+ .len = DIP_LEN_AVI,
+ };
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
+ avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
+
+ avi_if.body.avi.VIC = drm_mode_cea_vic(adjusted_mode);
+
+ intel_set_infoframe(encoder, &avi_if);
+}
+
+void
+intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
+{
+ struct dip_infoframe spd_if;
+
+ memset(&spd_if, 0, sizeof(spd_if));
+ spd_if.type = DIP_TYPE_SPD;
+ spd_if.ver = DIP_VERSION_SPD;
+ spd_if.len = DIP_LEN_SPD;
+ strlcpy(spd_if.body.spd.vn, "Intel", sizeof(spd_if.body.spd.vn));
+ strlcpy(spd_if.body.spd.pd, "Integrated gfx", sizeof(spd_if.body.spd.pd));
+ spd_if.body.spd.sdi = DIP_SPD_PC;
+
+ intel_set_infoframe(encoder, &spd_if);
+}
+
+void
+g4x_set_infoframes(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct inteldrm_softc *dev_priv = encoder->dev->dev_private;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 reg = VIDEO_DIP_CTL;
+ u32 val = I915_READ(reg);
+ u32 port;
+
+ assert_hdmi_port_disabled(intel_hdmi);
+
+ /* If the registers were not initialized yet, they might be zeroes,
+ * which means we're selecting the AVI DIP and we're setting its
+ * frequency to once. This seems to really confuse the HW and make
+ * things stop working (the register spec says the AVI always needs to
+ * be sent every VSync). So here we avoid writing to the register more
+ * than we need and also explicitly select the AVI DIP and explicitly
+ * set its frequency to every VSync. Avoiding to write it twice seems to
+ * be enough to solve the problem, but being defensive shouldn't hurt us
+ * either. */
+ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+ if (!intel_hdmi->has_hdmi_sink) {
+ if (!(val & VIDEO_DIP_ENABLE))
+ return;
+ val &= ~VIDEO_DIP_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ return;
+ }
+
+ switch (intel_hdmi->sdvox_reg) {
+ case SDVOB:
+ port = VIDEO_DIP_PORT_B;
+ break;
+ case SDVOC:
+ port = VIDEO_DIP_PORT_C;
+ break;
+ default:
+ BUG();
+ return;
+ }
+
+ if (port != (val & VIDEO_DIP_PORT_MASK)) {
+ if (val & VIDEO_DIP_ENABLE) {
+ val &= ~VIDEO_DIP_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ }
+ val &= ~VIDEO_DIP_PORT_MASK;
+ val |= port;
+ }
+
+ val |= VIDEO_DIP_ENABLE;
+ val &= ~VIDEO_DIP_ENABLE_VENDOR;
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_spd_infoframe(encoder);
+}
+
+void
+ibx_set_infoframes(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct inteldrm_softc *dev_priv = encoder->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 val = I915_READ(reg);
+ u32 port;
+
+ assert_hdmi_port_disabled(intel_hdmi);
+
+ /* See the big comment in g4x_set_infoframes() */
+ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+ if (!intel_hdmi->has_hdmi_sink) {
+ if (!(val & VIDEO_DIP_ENABLE))
+ return;
+ val &= ~VIDEO_DIP_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ return;
+ }
+
+ switch (intel_hdmi->sdvox_reg) {
+ case HDMIB:
+ port = VIDEO_DIP_PORT_B;
+ break;
+ case HDMIC:
+ port = VIDEO_DIP_PORT_C;
+ break;
+ case HDMID:
+ port = VIDEO_DIP_PORT_D;
+ break;
+ default:
+ BUG();
+ return;
+ }
+
+ if (port != (val & VIDEO_DIP_PORT_MASK)) {
+ if (val & VIDEO_DIP_ENABLE) {
+ val &= ~VIDEO_DIP_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ }
+ val &= ~VIDEO_DIP_PORT_MASK;
+ val |= port;
+ }
+
+ val |= VIDEO_DIP_ENABLE;
+ val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_GCP);
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_spd_infoframe(encoder);
+}
+
+void
+cpt_set_infoframes(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct inteldrm_softc *dev_priv = encoder->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 val = I915_READ(reg);
+
+ assert_hdmi_port_disabled(intel_hdmi);
+
+ /* See the big comment in g4x_set_infoframes() */
+ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+ if (!intel_hdmi->has_hdmi_sink) {
+ if (!(val & VIDEO_DIP_ENABLE))
+ return;
+ val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI);
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ return;
+ }
+
+ /* Set both together, unset both together: see the spec. */
+ val |= VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI;
+ val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_GCP);
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_spd_infoframe(encoder);
+}
+
+void
+vlv_set_infoframes(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct inteldrm_softc *dev_priv = encoder->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 val = I915_READ(reg);
+
+ assert_hdmi_port_disabled(intel_hdmi);
+
+ /* See the big comment in g4x_set_infoframes() */
+ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+ if (!intel_hdmi->has_hdmi_sink) {
+ if (!(val & VIDEO_DIP_ENABLE))
+ return;
+ val &= ~VIDEO_DIP_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ return;
+ }
+
+ val |= VIDEO_DIP_ENABLE;
+ val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_GCP);
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_spd_infoframe(encoder);
+}
+
+void
+hsw_set_infoframes(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct inteldrm_softc *dev_priv = encoder->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 val = I915_READ(reg);
+
+ assert_hdmi_port_disabled(intel_hdmi);
+
+ if (!intel_hdmi->has_hdmi_sink) {
+ I915_WRITE(reg, 0);
+ POSTING_READ(reg);
+ return;
+ }
+
+ val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
+ VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW);
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_spd_infoframe(encoder);
+}
+
+void
+intel_hdmi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 sdvox;
+
+ sdvox = SDVO_ENCODING_HDMI;
+ if (!HAS_PCH_SPLIT(dev))
+ sdvox |= intel_hdmi->color_range;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
+
+ if (intel_crtc->bpp > 24)
+ sdvox |= COLOR_FORMAT_12bpc;
+ else
+ sdvox |= COLOR_FORMAT_8bpc;
+
+ /* Required on CPT */
+ if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev))
+ sdvox |= HDMI_MODE_SELECT;
+
+ if (intel_hdmi->has_audio) {
+ DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
+ pipe_name(intel_crtc->pipe));
+ sdvox |= SDVO_AUDIO_ENABLE;
+ sdvox |= SDVO_NULL_PACKETS_DURING_VSYNC;
+ intel_write_eld(encoder, adjusted_mode);
+ }
+
+ if (HAS_PCH_CPT(dev))
+ sdvox |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
+ else if (intel_crtc->pipe == PIPE_B)
+ sdvox |= SDVO_PIPE_B_SELECT;
+
+ I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
+ POSTING_READ(intel_hdmi->sdvox_reg);
+
+ intel_hdmi->set_infoframes(encoder, adjusted_mode);
+}
+
+bool
+intel_hdmi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ u32 tmp;
+
+ tmp = I915_READ(intel_hdmi->sdvox_reg);
+
+ if (!(tmp & SDVO_ENABLE))
+ return false;
+
+ if (HAS_PCH_CPT(dev))
+ *pipe = PORT_TO_PIPE_CPT(tmp);
+ else
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
+void
+intel_enable_hdmi(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ u32 temp;
+ u32 enable_bits = SDVO_ENABLE;
+
+ if (intel_hdmi->has_audio)
+ enable_bits |= SDVO_AUDIO_ENABLE;
+
+ temp = I915_READ(intel_hdmi->sdvox_reg);
+
+ /* HW workaround for IBX, we need to move the port to transcoder A
+ * before disabling it. */
+ if (HAS_PCH_IBX(dev)) {
+ struct drm_crtc *crtc = encoder->base.crtc;
+ int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
+
+ /* Restore the transcoder select bit. */
+ if (pipe == PIPE_B)
+ enable_bits |= SDVO_PIPE_B_SELECT;
+ }
+
+ /* HW workaround, need to toggle enable bit off and on for 12bpc, but
+ * we do this anyway which shows more stable in testing.
+ */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE);
+ POSTING_READ(intel_hdmi->sdvox_reg);
+ }
+
+ temp |= enable_bits;
+
+ I915_WRITE(intel_hdmi->sdvox_reg, temp);
+ POSTING_READ(intel_hdmi->sdvox_reg);
+
+ /* HW workaround, need to write this twice for issue that may result
+ * in first write getting masked.
+ */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(intel_hdmi->sdvox_reg, temp);
+ POSTING_READ(intel_hdmi->sdvox_reg);
+ }
+}
+
+void
+intel_disable_hdmi(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ u32 temp;
+ u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE;
+
+ temp = I915_READ(intel_hdmi->sdvox_reg);
+
+ /* HW workaround for IBX, we need to move the port to transcoder A
+ * before disabling it. */
+ if (HAS_PCH_IBX(dev)) {
+ struct drm_crtc *crtc = encoder->base.crtc;
+ int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
+
+ if (temp & SDVO_PIPE_B_SELECT) {
+ temp &= ~SDVO_PIPE_B_SELECT;
+ I915_WRITE(intel_hdmi->sdvox_reg, temp);
+ POSTING_READ(intel_hdmi->sdvox_reg);
+
+ /* Again we need to write this twice. */
+ I915_WRITE(intel_hdmi->sdvox_reg, temp);
+ POSTING_READ(intel_hdmi->sdvox_reg);
+
+ /* Transcoder selection bits only update
+ * effectively on vblank. */
+ if (crtc)
+ intel_wait_for_vblank(dev, pipe);
+ else
+ drm_msleep(50, "915dhd");
+ }
+ }
+
+ /* HW workaround, need to toggle enable bit off and on for 12bpc, but
+ * we do this anyway which shows more stable in testing.
+ */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE);
+ POSTING_READ(intel_hdmi->sdvox_reg);
+ }
+
+ temp &= ~enable_bits;
+
+ I915_WRITE(intel_hdmi->sdvox_reg, temp);
+ POSTING_READ(intel_hdmi->sdvox_reg);
+
+ /* HW workaround, need to write this twice for issue that may result
+ * in first write getting masked.
+ */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(intel_hdmi->sdvox_reg, temp);
+ POSTING_READ(intel_hdmi->sdvox_reg);
+ }
+}
+
+int
+intel_hdmi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ if (mode->clock > 165000)
+ return MODE_CLOCK_HIGH;
+ if (mode->clock < 20000)
+ return MODE_CLOCK_LOW;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ return MODE_OK;
+}
+
+bool
+intel_hdmi_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+bool
+g4x_hdmi_connected(struct intel_hdmi *intel_hdmi)
+{
+ struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ uint32_t bit;
+
+ switch (intel_hdmi->sdvox_reg) {
+ case SDVOB:
+ bit = HDMIB_HOTPLUG_LIVE_STATUS;
+ break;
+ case SDVOC:
+ bit = HDMIC_HOTPLUG_LIVE_STATUS;
+ break;
+ default:
+ bit = 0;
+ break;
+ }
+
+ return I915_READ(PORT_HOTPLUG_STAT) & bit;
+}
+
+enum drm_connector_status
+intel_hdmi_detect(struct drm_connector *connector, bool force)
+{
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct intel_digital_port *intel_dig_port =
+ hdmi_to_dig_port(intel_hdmi);
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct inteldrm_softc *dev_priv = connector->dev->dev_private;
+ struct edid *edid;
+ enum drm_connector_status status = connector_status_disconnected;
+
+ if (IS_G4X(connector->dev) && !g4x_hdmi_connected(intel_hdmi))
+ return status;
+
+ intel_hdmi->has_hdmi_sink = false;
+ intel_hdmi->has_audio = false;
+ edid = drm_get_edid(connector,
+ intel_gmbus_get_adapter(dev_priv,
+ intel_hdmi->ddc_bus));
+
+ if (edid) {
+ if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ status = connector_status_connected;
+ if (intel_hdmi->force_audio != HDMI_AUDIO_OFF_DVI)
+ intel_hdmi->has_hdmi_sink =
+ drm_detect_hdmi_monitor(edid);
+ intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
+ }
+ free(edid, M_DRM);
+ }
+
+ if (status == connector_status_connected) {
+ if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO)
+ intel_hdmi->has_audio =
+ (intel_hdmi->force_audio == HDMI_AUDIO_ON);
+ intel_encoder->type = INTEL_OUTPUT_HDMI;
+ }
+
+ return status;
+}
+
+int
+intel_hdmi_get_modes(struct drm_connector *connector)
+{
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct inteldrm_softc *dev_priv = connector->dev->dev_private;
+
+ /* We should parse the EDID data and find out if it's an HDMI sink so
+ * we can send audio to it.
+ */
+
+ return intel_ddc_get_modes(connector,
+ intel_gmbus_get_adapter(dev_priv,
+ intel_hdmi->ddc_bus));
+}
+
+bool
+intel_hdmi_detect_audio(struct drm_connector *connector)
+{
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct inteldrm_softc *dev_priv = connector->dev->dev_private;
+ struct edid *edid;
+ bool has_audio = false;
+
+ edid = drm_get_edid(connector,
+ intel_gmbus_get_adapter(dev_priv,
+ intel_hdmi->ddc_bus));
+ if (edid) {
+ if (edid->input & DRM_EDID_INPUT_DIGITAL)
+ has_audio = drm_detect_monitor_audio(edid);
+ free(edid, M_DRM);
+ }
+
+ return has_audio;
+}
+
+int
+intel_hdmi_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct intel_digital_port *intel_dig_port =
+ hdmi_to_dig_port(intel_hdmi);
+ struct inteldrm_softc *dev_priv = connector->dev->dev_private;
+ int ret;
+
+ ret = drm_object_property_set_value(&connector->base, property, val);
+ if (ret)
+ return ret;
+
+ if (property == dev_priv->force_audio_property) {
+ enum hdmi_force_audio i = val;
+ bool has_audio;
+
+ if (i == intel_hdmi->force_audio)
+ return 0;
+
+ intel_hdmi->force_audio = i;
+
+ if (i == HDMI_AUDIO_AUTO)
+ has_audio = intel_hdmi_detect_audio(connector);
+ else
+ has_audio = (i == HDMI_AUDIO_ON);
+
+ if (i == HDMI_AUDIO_OFF_DVI)
+ intel_hdmi->has_hdmi_sink = 0;
+
+ intel_hdmi->has_audio = has_audio;
+ goto done;
+ }
+
+ if (property == dev_priv->broadcast_rgb_property) {
+ if (val == !!intel_hdmi->color_range)
+ return 0;
+
+ intel_hdmi->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
+ goto done;
+ }
+
+ return -EINVAL;
+
+done:
+ if (intel_dig_port->base.base.crtc) {
+ struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+ intel_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
+ }
+
+ return 0;
+}
+
+void
+intel_hdmi_destroy(struct drm_connector *connector)
+{
+#if 0
+ drm_sysfs_connector_remove(connector);
+#endif
+ drm_connector_cleanup(connector);
+ free(connector, M_DRM);
+}
+
+static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
+ .mode_fixup = intel_hdmi_mode_fixup,
+ .mode_set = intel_hdmi_mode_set,
+ .disable = intel_encoder_noop,
+};
+
+static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
+ .dpms = intel_connector_dpms,
+ .detect = intel_hdmi_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = intel_hdmi_set_property,
+ .destroy = intel_hdmi_destroy,
+};
+
+static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
+ .get_modes = intel_hdmi_get_modes,
+ .mode_valid = intel_hdmi_mode_valid,
+ .best_encoder = intel_best_encoder,
+};
+
+static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
+ .destroy = intel_encoder_destroy,
+};
+
+void
+intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
+{
+ intel_attach_force_audio_property(connector);
+ intel_attach_broadcast_rgb_property(connector);
+}
+
+void
+intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector)
+{
+ struct drm_connector *connector = &intel_connector->base;
+ struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct drm_device *dev = intel_encoder->base.dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ enum port port = intel_dig_port->port;
+
+ drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
+
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ connector->interlace_allowed = 1;
+ connector->doublescan_allowed = 0;
+
+ switch (port) {
+ case PORT_B:
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
+ dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
+ break;
+ case PORT_C:
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
+ dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
+ break;
+ case PORT_D:
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
+ dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
+ break;
+ case PORT_A:
+ /* Internal port only for eDP. */
+ default:
+ BUG();
+ }
+
+ if (!HAS_PCH_SPLIT(dev)) {
+ intel_hdmi->write_infoframe = g4x_write_infoframe;
+ intel_hdmi->set_infoframes = g4x_set_infoframes;
+ } else if (IS_VALLEYVIEW(dev)) {
+ intel_hdmi->write_infoframe = vlv_write_infoframe;
+ intel_hdmi->set_infoframes = vlv_set_infoframes;
+ } else if (IS_HASWELL(dev)) {
+ intel_hdmi->write_infoframe = hsw_write_infoframe;
+ intel_hdmi->set_infoframes = hsw_set_infoframes;
+ } else if (HAS_PCH_IBX(dev)) {
+ intel_hdmi->write_infoframe = ibx_write_infoframe;
+ intel_hdmi->set_infoframes = ibx_set_infoframes;
+ } else {
+ intel_hdmi->write_infoframe = cpt_write_infoframe;
+ intel_hdmi->set_infoframes = cpt_set_infoframes;
+ }
+
+ if (IS_HASWELL(dev))
+ intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
+ else
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+ intel_hdmi_add_properties(intel_hdmi, connector);
+
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
+#if 0
+ drm_sysfs_connector_add(connector);
+#endif
+
+ /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
+ * 0xd. Failure to do so will result in spurious interrupts being
+ * generated on the port when a cable is not attached.
+ */
+ if (IS_G4X(dev) && !IS_GM45(dev)) {
+ u32 temp = I915_READ(PEG_BAND_GAP_DATA);
+ I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
+ }
+}
+
+void
+intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
+{
+ struct intel_digital_port *intel_dig_port;
+ struct intel_encoder *intel_encoder;
+ struct drm_encoder *encoder;
+ struct intel_connector *intel_connector;
+
+ intel_dig_port = malloc(sizeof(struct intel_digital_port), M_DRM,
+ M_WAITOK | M_ZERO);
+ if (!intel_dig_port)
+ return;
+
+ intel_connector = malloc(sizeof(struct intel_connector), M_DRM,
+ M_WAITOK | M_ZERO);
+ if (!intel_connector) {
+ free(intel_dig_port, M_DRM);
+ return;
+ }
+
+ intel_encoder = &intel_dig_port->base;
+ encoder = &intel_encoder->base;
+
+ drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
+
+ intel_encoder->enable = intel_enable_hdmi;
+ intel_encoder->disable = intel_disable_hdmi;
+ intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
+
+ intel_encoder->type = INTEL_OUTPUT_HDMI;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->cloneable = false;
+
+ intel_dig_port->port = port;
+ intel_dig_port->hdmi.sdvox_reg = sdvox_reg;
+ intel_dig_port->dp.output_reg = 0;
+
+ intel_hdmi_init_connector(intel_dig_port, intel_connector);
+}
diff --git a/sys/dev/pci/drm/i915/intel_i2c.c b/sys/dev/pci/drm/i915/intel_i2c.c
new file mode 100644
index 00000000000..7f8a4ef68e6
--- /dev/null
+++ b/sys/dev/pci/drm/i915/intel_i2c.c
@@ -0,0 +1,220 @@
+/* $OpenBSD: intel_i2c.c,v 1.1 2013/03/18 12:36:52 jsg Exp $ */
+/*
+ * Copyright (c) 2012 Mark Kettenis <kettenis@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <dev/pci/drm/drmP.h>
+#include <dev/pci/drm/drm.h>
+#include "i915_drv.h"
+#include "i915_reg.h"
+
+#include <dev/i2c/i2cvar.h>
+
+int gmbus_i2c_acquire_bus(void *, int);
+void gmbus_i2c_release_bus(void *, int);
+int gmbus_i2c_exec(void *, i2c_op_t, i2c_addr_t, const void *, size_t,
+ void *buf, size_t, int);
+void i915_i2c_probe(struct inteldrm_softc *);
+
+int
+gmbus_i2c_acquire_bus(void *cookie, int flags)
+{
+ struct gmbus_port *gp = cookie;
+ struct inteldrm_softc *dev_priv = gp->dev_priv;
+
+ I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0,
+ GMBUS_RATE_100KHZ | gp->port);
+
+ return (0);
+}
+
+void
+gmbus_i2c_release_bus(void *cookie, int flags)
+{
+ struct gmbus_port *gp = cookie;
+ struct inteldrm_softc *dev_priv = gp->dev_priv;
+
+ I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
+}
+
+int
+gmbus_i2c_exec(void *cookie, i2c_op_t op, i2c_addr_t addr,
+ const void *cmdbuf, size_t cmdlen, void *buf, size_t len, int flags)
+{
+ struct gmbus_port *gp = cookie;
+ struct inteldrm_softc *dev_priv = gp->dev_priv;
+ uint32_t reg, st, val;
+ int reg_offset = dev_priv->gpio_mmio_base;
+ uint8_t *b;
+ int i, retries;
+ uint16_t rem = len;
+ int bus_err = 0;
+
+ if (cmdlen > 1)
+ return (EOPNOTSUPP);
+
+ if (I2C_OP_WRITE_P(op)) {
+ val = 0;
+
+ b = buf;
+ for (i = 0; i < 4 && rem > 0; i++, rem--) {
+ val |= *b++ << (8 * i);
+ }
+
+ I915_WRITE(GMBUS3 + reg_offset, val);
+ }
+
+ reg = 0;
+ if (len > 0)
+ reg |= GMBUS_CYCLE_WAIT;
+ if (I2C_OP_STOP_P(op))
+ reg |= GMBUS_CYCLE_STOP;
+ if (I2C_OP_READ_P(op)) {
+ reg |= GMBUS_SLAVE_READ;
+ if (cmdlen > 0)
+ reg |= GMBUS_CYCLE_INDEX;
+ b = (void *)cmdbuf;
+ if (cmdlen > 0)
+ reg |= (b[0] << GMBUS_SLAVE_INDEX_SHIFT);
+ }
+ if (I2C_OP_WRITE_P(op))
+ reg |= GMBUS_SLAVE_WRITE;
+ reg |= (addr << GMBUS_SLAVE_ADDR_SHIFT);
+ reg |= (len << GMBUS_BYTE_COUNT_SHIFT);
+ I915_WRITE(GMBUS1 + reg_offset, reg | GMBUS_SW_RDY);
+
+ if (I2C_OP_READ_P(op)) {
+ b = buf;
+ while (len > 0) {
+ for (retries = 50; retries > 0; retries--) {
+ st = I915_READ(GMBUS2 + reg_offset);
+ if (st & (GMBUS_SATOER | GMBUS_HW_RDY))
+ break;
+ DELAY(1000);
+ }
+ if (st & GMBUS_SATOER) {
+ bus_err = 1;
+ goto out;
+ }
+ if ((st & GMBUS_HW_RDY) == 0)
+ return (ETIMEDOUT);
+
+ val = I915_READ(GMBUS3 + reg_offset);
+ for (i = 0; i < 4 && len > 0; i++, len--) {
+ *b++ = val & 0xff;
+ val >>= 8;
+ }
+ }
+ }
+
+ if (I2C_OP_WRITE_P(op)) {
+ while (rem > 0) {
+ val = 0;
+ for (i = 0; i < 4 && rem > 0; i++, rem--) {
+ val |= *b++ << (8 * i);
+ }
+ I915_WRITE(GMBUS3 + reg_offset, val);
+ }
+ }
+
+out:
+ for (retries = 10; retries > 0; retries--) {
+ st = I915_READ(GMBUS2 + reg_offset);
+ if ((st & GMBUS_ACTIVE) == 0)
+ break;
+ DELAY(1000);
+ }
+ if (st & GMBUS_ACTIVE)
+ return (ETIMEDOUT);
+
+ /* after the bus is idle clear the bus error */
+ if (bus_err) {
+ I915_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
+ I915_WRITE(GMBUS1 + reg_offset, 0);
+ I915_WRITE(GMBUS0 + reg_offset, 0);
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+void
+i915_i2c_probe(struct inteldrm_softc *dev_priv)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ struct gmbus_port gp;
+ struct i2c_controller ic;
+ uint8_t buf[128];
+ uint8_t cmd;
+ int err, i;
+
+ if (HAS_PCH_SPLIT(dev))
+ dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA;
+ else
+ dev_priv->gpio_mmio_base = 0;
+
+ gp.dev_priv = dev_priv;
+ gp.port = GMBUS_PORT_PANEL;
+
+ ic.ic_cookie = &gp;
+ ic.ic_acquire_bus = gmbus_i2c_acquire_bus;
+ ic.ic_release_bus = gmbus_i2c_release_bus;
+ ic.ic_exec = gmbus_i2c_exec;
+
+ bzero(buf, sizeof(buf));
+ iic_acquire_bus(&ic, 0);
+ cmd = 0;
+ err = iic_exec(&ic, I2C_OP_READ_WITH_STOP, 0x50, &cmd, 1, buf, 128, 0);
+ if (err)
+ printf("err %d\n", err);
+ iic_release_bus(&ic, 0);
+ for (i = 0; i < sizeof(buf); i++)
+ printf(" 0x%02x", buf[i]);
+ printf("\n");
+}
+
+int
+intel_setup_gmbus(struct inteldrm_softc *dev_priv)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ int i;
+
+ if (HAS_PCH_SPLIT(dev))
+ dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA;
+ else
+ dev_priv->gpio_mmio_base = 0;
+
+ for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+ struct intel_gmbus *bus = &dev_priv->gmbus[i];
+
+ bus->gp.dev_priv = dev_priv;
+ bus->gp.port = i + 1;
+
+ bus->controller.ic_cookie = &bus->gp;
+ bus->controller.ic_acquire_bus = gmbus_i2c_acquire_bus;
+ bus->controller.ic_release_bus = gmbus_i2c_release_bus;
+ bus->controller.ic_exec = gmbus_i2c_exec;
+ }
+
+ return (0);
+}
+
+struct i2c_controller *
+intel_gmbus_get_adapter(drm_i915_private_t *dev_priv, unsigned port)
+{
+ /* -1 to map pin pair to gmbus index */
+ return (intel_gmbus_is_port_valid(port)) ?
+ &dev_priv->gmbus[port - 1].controller : NULL;
+}
diff --git a/sys/dev/pci/drm/i915/intel_lvds.c b/sys/dev/pci/drm/i915/intel_lvds.c
new file mode 100644
index 00000000000..3dc1e126154
--- /dev/null
+++ b/sys/dev/pci/drm/i915/intel_lvds.c
@@ -0,0 +1,1207 @@
+/* $OpenBSD: intel_lvds.c,v 1.1 2013/03/18 12:36:52 jsg Exp $ */
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <dev/pci/drm/drmP.h>
+#include <dev/pci/drm/drm_crtc.h>
+#include <dev/pci/drm/drm_edid.h>
+#include "intel_drv.h"
+#include <dev/pci/drm/i915_drm.h>
+#include "i915_drv.h"
+
+/* Private structure for the integrated LVDS support */
+struct intel_lvds_connector {
+ struct intel_connector base;
+
+#ifdef notyet
+ struct notifier_block lid_notifier;
+#endif
+};
+
+struct intel_lvds_encoder {
+ struct intel_encoder base;
+
+ u32 pfit_control;
+ u32 pfit_pgm_ratios;
+ bool pfit_dirty;
+
+ struct intel_lvds_connector *attached_connector;
+};
+
+struct intel_lvds *to_intel_lvds(struct drm_encoder *);
+struct intel_lvds *intel_attached_lvds(struct drm_connector *);
+void intel_enable_lvds(struct intel_encoder *);
+void intel_disable_lvds(struct intel_encoder *);
+void intel_lvds_dpms(struct drm_encoder *, int);
+int intel_lvds_mode_valid(struct drm_connector *,
+ struct drm_display_mode *);
+void centre_horizontally(struct drm_display_mode *, int);
+void centre_vertically(struct drm_display_mode *, int);
+bool intel_lvds_mode_fixup(struct drm_encoder *,
+ const struct drm_display_mode *, struct drm_display_mode *);
+void intel_lvds_prepare(struct drm_encoder *);
+void intel_lvds_commit(struct drm_encoder *);
+void intel_lvds_mode_set(struct drm_encoder *, struct drm_display_mode *,
+ struct drm_display_mode *);
+enum drm_connector_status intel_lvds_detect(struct drm_connector *,
+ bool);
+int intel_lvds_get_modes(struct drm_connector *);
+int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *);
+void intel_lvds_destroy(struct drm_connector *);
+int intel_lvds_set_property(struct drm_connector *,
+ struct drm_property *, uint64_t);
+int intel_no_lvds_dmi_callback(const struct dmi_system_id *);
+void intel_find_lvds_downclock(struct drm_device *,
+ struct drm_display_mode *, struct drm_connector *);
+bool lvds_is_present_in_vbt(struct drm_device *, u8 *);
+bool intel_lvds_supported(struct drm_device *);
+struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *);
+struct intel_lvds_connector *to_lvds_connector(struct drm_connector *);
+bool intel_lvds_get_hw_state(struct intel_encoder *, enum pipe *);
+
+
+struct intel_lvds_encoder *
+to_lvds_encoder(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct intel_lvds_encoder, base.base);
+}
+
+struct intel_lvds_connector *
+to_lvds_connector(struct drm_connector *connector)
+{
+ return container_of(connector, struct intel_lvds_connector, base.base);
+}
+
+bool
+intel_lvds_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 lvds_reg, tmp;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ lvds_reg = PCH_LVDS;
+ } else {
+ lvds_reg = LVDS;
+ }
+
+ tmp = I915_READ(lvds_reg);
+
+ if (!(tmp & LVDS_PORT_EN))
+ return false;
+
+ if (HAS_PCH_CPT(dev))
+ *pipe = PORT_TO_PIPE_CPT(tmp);
+ else
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
+/**
+ * Sets the power state for the panel.
+ */
+void
+intel_enable_lvds(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 ctl_reg, lvds_reg, stat_reg;
+ int retries;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ ctl_reg = PCH_PP_CONTROL;
+ lvds_reg = PCH_LVDS;
+ stat_reg = PCH_PP_STATUS;
+ } else {
+ ctl_reg = PP_CONTROL;
+ lvds_reg = LVDS;
+ stat_reg = PP_STATUS;
+ }
+
+ I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
+
+ if (lvds_encoder->pfit_dirty) {
+ /*
+ * Enable automatic panel scaling so that non-native modes
+ * fill the screen. The panel fitter should only be
+ * adjusted whilst the pipe is disabled, according to
+ * register description and PRM.
+ */
+ DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
+ lvds_encoder->pfit_control,
+ lvds_encoder->pfit_pgm_ratios);
+
+ I915_WRITE(PFIT_PGM_RATIOS, lvds_encoder->pfit_pgm_ratios);
+ I915_WRITE(PFIT_CONTROL, lvds_encoder->pfit_control);
+ lvds_encoder->pfit_dirty = false;
+ }
+
+ I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
+ POSTING_READ(lvds_reg);
+ for (retries = 1000; retries > 0; retries--) {
+ if ((I915_READ(stat_reg) & PP_ON) != 0)
+ break;
+ DELAY(1000);
+ }
+ if (retries == 0)
+ DRM_ERROR("timed out waiting for panel to power on\n");
+
+ intel_panel_enable_backlight(dev, intel_crtc->pipe);
+}
+
+void
+intel_disable_lvds(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 ctl_reg, lvds_reg, stat_reg;
+ int retries;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ ctl_reg = PCH_PP_CONTROL;
+ lvds_reg = PCH_LVDS;
+ stat_reg = PCH_PP_STATUS;
+ } else {
+ ctl_reg = PP_CONTROL;
+ lvds_reg = LVDS;
+ stat_reg = PP_STATUS;
+ }
+
+ intel_panel_disable_backlight(dev);
+
+ I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
+ for (retries = 1000; retries > 0; retries--) {
+ if ((I915_READ(stat_reg) & PP_ON) == 0)
+ break;
+ DELAY(1000);
+ }
+ if (retries == 0)
+ DRM_ERROR("timed out waiting for panel to power off\n");
+
+ if (lvds_encoder->pfit_control) {
+ I915_WRITE(PFIT_CONTROL, 0);
+ lvds_encoder->pfit_dirty = true;
+ }
+
+ I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
+ POSTING_READ(lvds_reg);
+}
+
+int
+intel_lvds_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+
+ if (mode->hdisplay > fixed_mode->hdisplay)
+ return MODE_PANEL;
+ if (mode->vdisplay > fixed_mode->vdisplay)
+ return MODE_PANEL;
+
+ return MODE_OK;
+}
+
+void
+centre_horizontally(struct drm_display_mode *mode,
+ int width)
+{
+ u32 border, sync_pos, blank_width, sync_width;
+
+ /* keep the hsync and hblank widths constant */
+ sync_width = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ blank_width = mode->crtc_hblank_end - mode->crtc_hblank_start;
+ sync_pos = (blank_width - sync_width + 1) / 2;
+
+ border = (mode->hdisplay - width + 1) / 2;
+ border += border & 1; /* make the border even */
+
+ mode->crtc_hdisplay = width;
+ mode->crtc_hblank_start = width + border;
+ mode->crtc_hblank_end = mode->crtc_hblank_start + blank_width;
+
+ mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos;
+ mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width;
+
+ mode->private_flags |= INTEL_MODE_CRTC_TIMINGS_SET;
+}
+
+void
+centre_vertically(struct drm_display_mode *mode,
+ int height)
+{
+ u32 border, sync_pos, blank_width, sync_width;
+
+ /* keep the vsync and vblank widths constant */
+ sync_width = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ blank_width = mode->crtc_vblank_end - mode->crtc_vblank_start;
+ sync_pos = (blank_width - sync_width + 1) / 2;
+
+ border = (mode->vdisplay - height + 1) / 2;
+
+ mode->crtc_vdisplay = height;
+ mode->crtc_vblank_start = height + border;
+ mode->crtc_vblank_end = mode->crtc_vblank_start + blank_width;
+
+ mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos;
+ mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width;
+
+ mode->private_flags |= INTEL_MODE_CRTC_TIMINGS_SET;
+}
+
+static inline u32
+panel_fitter_scaling(u32 source, u32 target)
+{
+ /*
+ * Floating point operation is not supported. So the FACTOR
+ * is defined, which can avoid the floating point computation
+ * when calculating the panel ratio.
+ */
+#define ACCURACY 12
+#define FACTOR (1 << ACCURACY)
+ u32 ratio = source * FACTOR / target;
+ return (FACTOR * ratio + FACTOR/2) / FACTOR;
+}
+
+bool
+intel_lvds_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
+ struct intel_connector *intel_connector =
+ &lvds_encoder->attached_connector->base;
+ struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc;
+ u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
+ int pipe;
+
+ /* Should never happen!! */
+ if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) {
+ DRM_ERROR("Can't support LVDS on pipe A\n");
+ return false;
+ }
+
+ if (intel_encoder_check_is_cloned(&lvds_encoder->base))
+ return false;
+
+ /*
+ * We have timings from the BIOS for the panel, put them in
+ * to the adjusted mode. The CRTC will be set up for this mode,
+ * with the panel scaling set up to source from the H/VDisplay
+ * of the original mode.
+ */
+ intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
+ adjusted_mode);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ intel_pch_panel_fitting(dev,
+ intel_connector->panel.fitting_mode,
+ mode, adjusted_mode);
+ return true;
+ }
+
+ /* Native modes don't need fitting */
+ if (adjusted_mode->hdisplay == mode->hdisplay &&
+ adjusted_mode->vdisplay == mode->vdisplay)
+ goto out;
+
+ /* 965+ wants fuzzy fitting */
+ if (INTEL_INFO(dev)->gen >= 4)
+ pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
+ PFIT_FILTER_FUZZY);
+
+ /*
+ * Enable automatic panel scaling for non-native modes so that they fill
+ * the screen. Should be enabled before the pipe is enabled, according
+ * to register description and PRM.
+ * Change the value here to see the borders for debugging
+ */
+ for_each_pipe(pipe)
+ I915_WRITE(BCLRPAT(pipe), 0);
+
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+ switch (intel_connector->panel.fitting_mode) {
+ case DRM_MODE_SCALE_CENTER:
+ /*
+ * For centered modes, we have to calculate border widths &
+ * heights and modify the values programmed into the CRTC.
+ */
+ centre_horizontally(adjusted_mode, mode->hdisplay);
+ centre_vertically(adjusted_mode, mode->vdisplay);
+ border = LVDS_BORDER_ENABLE;
+ break;
+
+ case DRM_MODE_SCALE_ASPECT:
+ /* Scale but preserve the aspect ratio */
+ if (INTEL_INFO(dev)->gen >= 4) {
+ u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
+ u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
+
+ /* 965+ is easy, it does everything in hw */
+ if (scaled_width > scaled_height)
+ pfit_control |= PFIT_ENABLE | PFIT_SCALING_PILLAR;
+ else if (scaled_width < scaled_height)
+ pfit_control |= PFIT_ENABLE | PFIT_SCALING_LETTER;
+ else if (adjusted_mode->hdisplay != mode->hdisplay)
+ pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
+ } else {
+ u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
+ u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
+ /*
+ * For earlier chips we have to calculate the scaling
+ * ratio by hand and program it into the
+ * PFIT_PGM_RATIO register
+ */
+ if (scaled_width > scaled_height) { /* pillar */
+ centre_horizontally(adjusted_mode, scaled_height / mode->vdisplay);
+
+ border = LVDS_BORDER_ENABLE;
+ if (mode->vdisplay != adjusted_mode->vdisplay) {
+ u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay);
+ pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
+ bits << PFIT_VERT_SCALE_SHIFT);
+ pfit_control |= (PFIT_ENABLE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ }
+ } else if (scaled_width < scaled_height) { /* letter */
+ centre_vertically(adjusted_mode, scaled_width / mode->hdisplay);
+
+ border = LVDS_BORDER_ENABLE;
+ if (mode->hdisplay != adjusted_mode->hdisplay) {
+ u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay);
+ pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
+ bits << PFIT_VERT_SCALE_SHIFT);
+ pfit_control |= (PFIT_ENABLE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ }
+ } else
+ /* Aspects match, Let hw scale both directions */
+ pfit_control |= (PFIT_ENABLE |
+ VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ }
+ break;
+
+ case DRM_MODE_SCALE_FULLSCREEN:
+ /*
+ * Full scaling, even if it changes the aspect ratio.
+ * Fortunately this is all done for us in hw.
+ */
+ if (mode->vdisplay != adjusted_mode->vdisplay ||
+ mode->hdisplay != adjusted_mode->hdisplay) {
+ pfit_control |= PFIT_ENABLE;
+ if (INTEL_INFO(dev)->gen >= 4)
+ pfit_control |= PFIT_SCALING_AUTO;
+ else
+ pfit_control |= (VERT_AUTO_SCALE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_AUTO_SCALE |
+ HORIZ_INTERP_BILINEAR);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+out:
+ /* If not enabling scaling, be consistent and always use 0. */
+ if ((pfit_control & PFIT_ENABLE) == 0) {
+ pfit_control = 0;
+ pfit_pgm_ratios = 0;
+ }
+
+ /* Make sure pre-965 set dither correctly */
+ if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
+ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
+ if (pfit_control != lvds_encoder->pfit_control ||
+ pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) {
+ lvds_encoder->pfit_control = pfit_control;
+ lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios;
+ lvds_encoder->pfit_dirty = true;
+ }
+ dev_priv->lvds_border_bits = border;
+
+ /*
+ * XXX: It would be nice to support lower refresh rates on the
+ * panels to reduce power consumption, and perhaps match the
+ * user's requested refresh rate.
+ */
+
+ return true;
+}
+
+void
+intel_lvds_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /*
+ * The LVDS pin pair will already have been turned on in the
+ * intel_crtc_mode_set since it has a large impact on the DPLL
+ * settings.
+ */
+}
+
+/**
+ * Detect the LVDS connection.
+ *
+ * Since LVDS doesn't have hotlug, we use the lid as a proxy. Open means
+ * connected and closed means disconnected. We also send hotplug events as
+ * needed, using lid status notification from the input layer.
+ */
+enum drm_connector_status
+intel_lvds_detect(struct drm_connector *connector, bool force)
+{
+ struct drm_device *dev = connector->dev;
+ enum drm_connector_status status;
+
+ status = intel_panel_detect(dev);
+ if (status != connector_status_unknown)
+ return status;
+
+ return connector_status_connected;
+}
+
+/**
+ * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
+ */
+int
+intel_lvds_get_modes(struct drm_connector *connector)
+{
+ struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector);
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode;
+
+ /* use cached edid if we have one */
+ if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
+ return drm_add_edid_modes(connector, lvds_connector->base.edid);
+
+ mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode);
+ if (mode == NULL)
+ return 0;
+
+ drm_mode_probed_add(connector, mode);
+ return 1;
+}
+
+int
+intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
+{
+ printf("Skipping forced modeset for %s\n", id->ident);
+ return 1;
+}
+
+/* The GPU hangs up on these systems if modeset is performed on LID open */
+static const struct dmi_system_id intel_no_modeset_on_lid[] = {
+ {
+ .callback = intel_no_modeset_on_lid_dmi_callback,
+ .ident = "Toshiba Tecra A11",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"),
+ },
+ },
+
+ { } /* terminating entry */
+};
+
+#ifdef notyet
+/*
+ * Lid events. Note the use of 'modeset_on_lid':
+ * - we set it on lid close, and reset it on open
+ * - we use it as a "only once" bit (ie we ignore
+ * duplicate events where it was already properly
+ * set/reset)
+ * - the suspend/resume paths will also set it to
+ * zero, since they restore the mode ("lid open").
+ */
+int
+intel_lid_notify(struct notifier_block *nb, unsigned long val,
+ void *unused)
+{
+ struct intel_lvds_connector *lvds_connector =
+ container_of(nb, struct intel_lvds_connector, lid_notifier);
+ struct drm_connector *connector = &lvds_connector->base.base;
+ struct drm_device *dev = connector->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
+ return NOTIFY_OK;
+
+ /*
+ * check and update the status of LVDS connector after receiving
+ * the LID nofication event.
+ */
+ connector->status = connector->funcs->detect(connector, false);
+
+ /* Don't force modeset on machines where it causes a GPU lockup */
+ if (dmi_check_system(intel_no_modeset_on_lid))
+ return NOTIFY_OK;
+ if (!acpi_lid_open()) {
+ dev_priv->modeset_on_lid = 1;
+ return NOTIFY_OK;
+ }
+
+ if (!dev_priv->modeset_on_lid)
+ return NOTIFY_OK;
+
+ dev_priv->modeset_on_lid = 0;
+
+ rw_enter_write(&dev->mode_config.rwl);
+ intel_modeset_setup_hw_state(dev, true);
+ rw_exit_write(&dev->mode_config.rwl);
+
+ return NOTIFY_OK;
+}
+#endif
+
+/**
+ * intel_lvds_destroy - unregister and free LVDS structures
+ * @connector: connector to free
+ *
+ * Unregister the DDC bus for this connector then free the driver private
+ * structure.
+ */
+void
+intel_lvds_destroy(struct drm_connector *connector)
+{
+ struct intel_lvds_connector *lvds_connector =
+ to_lvds_connector(connector);
+
+#ifdef notyet
+ if (lvds_connector->lid_notifier.notifier_call)
+ acpi_lid_notifier_unregister(&lvds_connector->lid_notifier);
+#endif
+
+ if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
+ free(lvds_connector->base.edid, M_DRM);
+
+ intel_panel_destroy_backlight(connector->dev);
+ intel_panel_fini(&lvds_connector->base.panel);
+
+#if 0
+ drm_sysfs_connector_remove(connector);
+#endif
+ drm_connector_cleanup(connector);
+ free(connector, M_DRM);
+}
+
+int
+intel_lvds_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t value)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_device *dev = connector->dev;
+
+ if (property == dev->mode_config.scaling_mode_property) {
+ struct drm_crtc *crtc;
+
+ if (value == DRM_MODE_SCALE_NONE) {
+ DRM_DEBUG_KMS("no scaling not supported\n");
+ return -EINVAL;
+ }
+
+ if (intel_connector->panel.fitting_mode == value) {
+ /* the LVDS scaling property is not changed */
+ return 0;
+ }
+ intel_connector->panel.fitting_mode = value;
+
+ crtc = intel_attached_encoder(connector)->base.crtc;
+ if (crtc && crtc->enabled) {
+ /*
+ * If the CRTC is enabled, the display will be changed
+ * according to the new panel fitting mode.
+ */
+ intel_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
+ }
+ }
+
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
+ .mode_fixup = intel_lvds_mode_fixup,
+ .mode_set = intel_lvds_mode_set,
+ .disable = intel_encoder_noop,
+};
+
+static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
+ .get_modes = intel_lvds_get_modes,
+ .mode_valid = intel_lvds_mode_valid,
+ .best_encoder = intel_best_encoder,
+};
+
+static const struct drm_connector_funcs intel_lvds_connector_funcs = {
+ .dpms = intel_connector_dpms,
+ .detect = intel_lvds_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = intel_lvds_set_property,
+ .destroy = intel_lvds_destroy,
+};
+
+static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
+ .destroy = intel_encoder_destroy,
+};
+
+int
+intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
+{
+ printf("Skipping LVDS initialization for %s\n", id->ident);
+ return 1;
+}
+
+/* These systems claim to have LVDS, but really don't */
+static const struct dmi_system_id intel_no_lvds[] = {
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Apple Mac Mini (Core series)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Apple Mac Mini (Core 2 series)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Macmini2,1"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "MSI IM-945GSE-A",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MSI"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "A9830IMS"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Dell Studio Hybrid",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Studio Hybrid 140g"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Dell OptiPlex FX170",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex FX170"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "AOpen Mini PC",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "AOpen"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "i965GMx-IF"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "AOpen Mini PC MP915",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+ DMI_MATCH(DMI_BOARD_NAME, "i915GMx-F"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "AOpen i915GMm-HFS",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+ DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "AOpen i45GMx-I",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+ DMI_MATCH(DMI_BOARD_NAME, "i45GMx-I"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Aopen i945GTt-VFA",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Clientron U800",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "U800"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Clientron E830",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "E830"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Asus EeeBox PC EB1007",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Asus AT5NM10T-I",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Hewlett-Packard HP t5740e Thin Client",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP t5740e Thin Client"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Hewlett-Packard t5745",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "hp t5745"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Hewlett-Packard st5747",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "hp st5747"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "MSI Wind Box DC500",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
+ DMI_MATCH(DMI_BOARD_NAME, "MS-7469"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Gigabyte GA-D525TUD",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+ DMI_MATCH(DMI_BOARD_NAME, "D525TUD"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Supermicro X7SPA-H",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"),
+ },
+ },
+
+ { } /* terminating entry */
+};
+
+/**
+ * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID
+ * @dev: drm device
+ * @connector: LVDS connector
+ *
+ * Find the reduced downclock for LVDS in EDID.
+ */
+void
+intel_find_lvds_downclock(struct drm_device *dev,
+ struct drm_display_mode *fixed_mode,
+ struct drm_connector *connector)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_display_mode *scan;
+ int temp_downclock;
+
+ temp_downclock = fixed_mode->clock;
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ /*
+ * If one mode has the same resolution with the fixed_panel
+ * mode while they have the different refresh rate, it means
+ * that the reduced downclock is found for the LVDS. In such
+ * case we can set the different FPx0/1 to dynamically select
+ * between low and high frequency.
+ */
+ if (scan->hdisplay == fixed_mode->hdisplay &&
+ scan->hsync_start == fixed_mode->hsync_start &&
+ scan->hsync_end == fixed_mode->hsync_end &&
+ scan->htotal == fixed_mode->htotal &&
+ scan->vdisplay == fixed_mode->vdisplay &&
+ scan->vsync_start == fixed_mode->vsync_start &&
+ scan->vsync_end == fixed_mode->vsync_end &&
+ scan->vtotal == fixed_mode->vtotal) {
+ if (scan->clock < temp_downclock) {
+ /*
+ * The downclock is already found. But we
+ * expect to find the lower downclock.
+ */
+ temp_downclock = scan->clock;
+ }
+ }
+ }
+ if (temp_downclock < fixed_mode->clock && i915_lvds_downclock) {
+ /* We found the downclock for LVDS. */
+ dev_priv->lvds_downclock_avail = 1;
+ dev_priv->lvds_downclock = temp_downclock;
+ DRM_DEBUG_KMS("LVDS downclock is found in EDID. "
+ "Normal clock %dKhz, downclock %dKhz\n",
+ fixed_mode->clock, temp_downclock);
+ }
+}
+
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the LVDS is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it assumes that the LVDS is present.
+ */
+bool
+lvds_is_present_in_vbt(struct drm_device *dev,
+ u8 *i2c_pin)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ int i;
+
+ if (!dev_priv->child_dev_num)
+ return true;
+
+ for (i = 0; i < dev_priv->child_dev_num; i++) {
+ struct child_device_config *child = dev_priv->child_dev + i;
+
+ /* If the device type is not LFP, continue.
+ * We have to check both the new identifiers as well as the
+ * old for compatibility with some BIOSes.
+ */
+ if (child->device_type != DEVICE_TYPE_INT_LFP &&
+ child->device_type != DEVICE_TYPE_LFP)
+ continue;
+
+ if (intel_gmbus_is_port_valid(child->i2c_pin))
+ *i2c_pin = child->i2c_pin;
+
+ /* However, we cannot trust the BIOS writers to populate
+ * the VBT correctly. Since LVDS requires additional
+ * information from AIM blocks, a non-zero addin offset is
+ * a good indicator that the LVDS is actually present.
+ */
+ if (child->addin_offset)
+ return true;
+
+ /* But even then some BIOS writers perform some black magic
+ * and instantiate the device without reference to any
+ * additional data. Trust that if the VBT was written into
+ * the OpRegion then they have validated the LVDS's existence.
+ */
+ if (dev_priv->opregion.vbt)
+ return true;
+ }
+
+ return false;
+}
+
+bool
+intel_lvds_supported(struct drm_device *dev)
+{
+ /* With the introduction of the PCH we gained a dedicated
+ * LVDS presence pin, use it. */
+ if (HAS_PCH_SPLIT(dev))
+ return true;
+
+ /* Otherwise LVDS was only attached to mobile products,
+ * except for the inglorious 830gm */
+ return IS_MOBILE(dev) && !IS_I830(dev);
+}
+
+/**
+ * intel_lvds_init - setup LVDS connectors on this device
+ * @dev: drm device
+ *
+ * Create the connector, register the LVDS DDC bus, and try to figure out what
+ * modes we can display on the LVDS panel (if present).
+ */
+bool
+intel_lvds_init(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_lvds_encoder *lvds_encoder;
+ struct intel_encoder *intel_encoder;
+ struct intel_lvds_connector *lvds_connector;
+ struct intel_connector *intel_connector;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct drm_display_mode *scan; /* *modes, *bios_mode; */
+ struct drm_display_mode *fixed_mode = NULL;
+ struct edid *edid;
+ struct drm_crtc *crtc;
+ u32 lvds;
+ int pipe;
+ u8 pin;
+
+ if (!intel_lvds_supported(dev))
+ return false;
+
+ /* Skip init on machines we know falsely report LVDS */
+ if (dmi_check_system(intel_no_lvds))
+ return false;
+
+ pin = GMBUS_PORT_PANEL;
+ if (!lvds_is_present_in_vbt(dev, &pin)) {
+ DRM_DEBUG_KMS("LVDS is not present in VBT\n");
+ return false;
+ }
+
+ if (HAS_PCH_SPLIT(dev)) {
+ if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
+ return false;
+ if (dev_priv->edp.support) {
+ DRM_DEBUG_KMS("disable LVDS for eDP support\n");
+ return false;
+ }
+ }
+
+ lvds_encoder = malloc(sizeof(struct intel_lvds_encoder), M_DRM,
+ M_WAITOK | M_ZERO);
+ if (!lvds_encoder)
+ return false;
+
+ lvds_connector = malloc(sizeof(struct intel_lvds_connector), M_DRM,
+ M_WAITOK | M_ZERO);
+ if (!lvds_connector) {
+ free(lvds_encoder, M_DRM);
+ return false;
+ }
+
+ lvds_encoder->attached_connector = lvds_connector;
+
+ if (!HAS_PCH_SPLIT(dev)) {
+ lvds_encoder->pfit_control = I915_READ(PFIT_CONTROL);
+ }
+
+ intel_encoder = &lvds_encoder->base;
+ encoder = &intel_encoder->base;
+ intel_connector = &lvds_connector->base;
+ connector = &intel_connector->base;
+ drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+
+ drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
+ DRM_MODE_ENCODER_LVDS);
+
+ intel_encoder->enable = intel_enable_lvds;
+ intel_encoder->disable = intel_disable_lvds;
+ intel_encoder->get_hw_state = intel_lvds_get_hw_state;
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
+ intel_encoder->type = INTEL_OUTPUT_LVDS;
+
+ intel_encoder->cloneable = false;
+ if (HAS_PCH_SPLIT(dev))
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ else if (IS_GEN4(dev))
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ else
+ intel_encoder->crtc_mask = (1 << 1);
+
+ drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
+ drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ /* create the scaling mode property */
+ drm_mode_create_scaling_mode_property(dev);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_ASPECT);
+ intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
+ /*
+ * LVDS discovery:
+ * 1) check for EDID on DDC
+ * 2) check for VBT data
+ * 3) check to see if LVDS is already on
+ * if none of the above, no panel
+ * 4) make sure lid is open
+ * if closed, act like it's not there for now
+ */
+
+ /*
+ * Attempt to get the fixed panel mode from DDC. Assume that the
+ * preferred mode is the right one.
+ */
+ edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, pin));
+ if (edid) {
+ if (drm_add_edid_modes(connector, edid)) {
+ drm_mode_connector_update_edid_property(connector,
+ edid);
+ } else {
+ free(edid, M_DRM);
+ edid = ERR_PTR(-EINVAL);
+ }
+ } else {
+ edid = ERR_PTR(-ENOENT);
+ }
+ lvds_connector->base.edid = edid;
+
+ if (IS_ERR_OR_NULL(edid)) {
+ /* Didn't get an EDID, so
+ * Set wide sync ranges so we get all modes
+ * handed to valid_mode for checking
+ */
+ connector->display_info.min_vfreq = 0;
+ connector->display_info.max_vfreq = 200;
+ connector->display_info.min_hfreq = 0;
+ connector->display_info.max_hfreq = 200;
+ }
+
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+ DRM_DEBUG_KMS("using preferred mode from EDID: ");
+ drm_mode_debug_printmodeline(scan);
+
+ fixed_mode = drm_mode_duplicate(dev, scan);
+ if (fixed_mode) {
+ intel_find_lvds_downclock(dev, fixed_mode,
+ connector);
+ goto out;
+ }
+ }
+ }
+
+ /* Failed to get EDID, what about VBT? */
+ if (dev_priv->lfp_lvds_vbt_mode) {
+ DRM_DEBUG_KMS("using mode from VBT: ");
+ drm_mode_debug_printmodeline(dev_priv->lfp_lvds_vbt_mode);
+
+ fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+ if (fixed_mode) {
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+ goto out;
+ }
+ }
+
+ /*
+ * If we didn't get EDID, try checking if the panel is already turned
+ * on. If so, assume that whatever is currently programmed is the
+ * correct mode.
+ */
+
+ /* Ironlake: FIXME if still fail, not try pipe mode now */
+ if (HAS_PCH_SPLIT(dev))
+ goto failed;
+
+ lvds = I915_READ(LVDS);
+ pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
+ crtc = intel_get_crtc_for_pipe(dev, pipe);
+
+ if (crtc && (lvds & LVDS_PORT_EN)) {
+ fixed_mode = intel_crtc_mode_get(dev, crtc);
+ if (fixed_mode) {
+ DRM_DEBUG_KMS("using current (BIOS) mode: ");
+ drm_mode_debug_printmodeline(fixed_mode);
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+ goto out;
+ }
+ }
+
+ /* If we still don't have a mode after all that, give up. */
+ if (!fixed_mode)
+ goto failed;
+
+out:
+ /*
+ * Unlock registers and just
+ * leave them unlocked
+ */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(PCH_PP_CONTROL,
+ I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
+ } else {
+ I915_WRITE(PP_CONTROL,
+ I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
+ }
+#ifdef notyet
+ lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
+ if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
+ DRM_DEBUG_KMS("lid notifier registration failed\n");
+ lvds_connector->lid_notifier.notifier_call = NULL;
+ }
+ drm_sysfs_connector_add(connector);
+#endif
+
+ intel_panel_init(&intel_connector->panel, fixed_mode);
+ intel_panel_setup_backlight(connector);
+
+ return true;
+
+failed:
+ DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
+ drm_connector_cleanup(connector);
+ drm_encoder_cleanup(encoder);
+ if (fixed_mode)
+ drm_mode_destroy(dev, fixed_mode);
+ free(lvds_encoder, M_DRM);
+ free(lvds_connector, M_DRM);
+ return false;
+}
diff --git a/sys/dev/pci/drm/i915/intel_modes.c b/sys/dev/pci/drm/i915/intel_modes.c
new file mode 100644
index 00000000000..ea6121c019f
--- /dev/null
+++ b/sys/dev/pci/drm/i915/intel_modes.c
@@ -0,0 +1,128 @@
+/* $OpenBSD: intel_modes.c,v 1.1 2013/03/18 12:36:52 jsg Exp $ */
+/*
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2007, 2010 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dev/pci/drm/drmP.h>
+#include <dev/pci/drm/drm.h>
+#include <dev/pci/drm/i915_drm.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include <dev/pci/drm/drm_edid.h>
+#include <dev/pci/drm/drm_crtc.h>
+
+/**
+ * intel_connector_update_modes - update connector from edid
+ * @connector: DRM connector device to use
+ * @edid: previously read EDID information
+ */
+int
+intel_connector_update_modes(struct drm_connector *connector, struct edid *edid)
+{
+ int ret;
+
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ drm_edid_to_eld(connector, edid);
+
+ return ret;
+}
+
+/**
+ * intel_ddc_get_modes - get modelist from monitor
+ * @connector: DRM connector device to use
+ * @adapter: i2c adapter
+ *
+ * Fetch the EDID information from @connector using the DDC bus.
+ */
+int
+intel_ddc_get_modes(struct drm_connector *connector, struct i2c_controller *adapter)
+{
+ struct edid *edid;
+ int ret;
+
+ edid = drm_get_edid(connector, adapter);
+ if (!edid)
+ return 0;
+
+ ret = intel_connector_update_modes(connector, edid);
+ free(edid, M_DRM);
+
+ return ret;
+}
+
+static const struct drm_prop_enum_list force_audio_names[] = {
+ { HDMI_AUDIO_OFF_DVI, "force-dvi" },
+ { HDMI_AUDIO_OFF, "off" },
+ { HDMI_AUDIO_AUTO, "auto" },
+ { HDMI_AUDIO_ON, "on" },
+};
+
+void
+intel_attach_force_audio_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_property *prop;
+
+ prop = dev_priv->force_audio_property;
+ if (prop == NULL) {
+ prop = drm_property_create_enum(dev, 0,
+ "audio",
+ force_audio_names,
+ nitems(force_audio_names));
+ if (prop == NULL)
+ return;
+
+ dev_priv->force_audio_property = prop;
+ }
+ drm_object_attach_property(&connector->base, prop, 0);
+}
+
+static const struct drm_prop_enum_list broadcast_rgb_names[] = {
+ { 0, "Full" },
+ { 1, "Limited 16:235" },
+};
+
+void
+intel_attach_broadcast_rgb_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_property *prop;
+
+ prop = dev_priv->broadcast_rgb_property;
+ if (prop == NULL) {
+ prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
+ "Broadcast RGB",
+ broadcast_rgb_names,
+ nitems(broadcast_rgb_names));
+ if (prop == NULL)
+ return;
+
+ dev_priv->broadcast_rgb_property = prop;
+ }
+
+ drm_object_attach_property(&connector->base, prop, 0);
+}
diff --git a/sys/dev/pci/drm/i915/intel_opregion.c b/sys/dev/pci/drm/i915/intel_opregion.c
new file mode 100644
index 00000000000..9255bdfe1e7
--- /dev/null
+++ b/sys/dev/pci/drm/i915/intel_opregion.c
@@ -0,0 +1,570 @@
+/* $OpenBSD: intel_opregion.c,v 1.1 2013/03/18 12:36:52 jsg Exp $ */
+/*
+ * Copyright 2008 Intel Corporation <hong.liu@intel.com>
+ * Copyright 2008 Red Hat <mjg@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <dev/pci/drm/drmP.h>
+#include <dev/pci/drm/i915_drm.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+#include <dev/isa/isareg.h>
+#include <dev/isa/isavar.h>
+#include <dev/acpi/acpivar.h>
+
+#define PCI_ASLE 0xe4
+#define PCI_ASLS 0xfc
+
+#define OPREGION_HEADER_OFFSET 0
+#define OPREGION_ACPI_OFFSET 0x100
+#define ACPI_CLID 0x01ac /* current lid state indicator */
+#define ACPI_CDCK 0x01b0 /* current docking state indicator */
+#define OPREGION_SWSCI_OFFSET 0x200
+#define OPREGION_ASLE_OFFSET 0x300
+#define OPREGION_VBT_OFFSET 0x400
+
+#define OPREGION_SIGNATURE "IntelGraphicsMem"
+#define MBOX_ACPI (1<<0)
+#define MBOX_SWSCI (1<<1)
+#define MBOX_ASLE (1<<2)
+
+struct opregion_header {
+ u8 signature[16];
+ u32 size;
+ u32 opregion_ver;
+ u8 bios_ver[32];
+ u8 vbios_ver[16];
+ u8 driver_ver[16];
+ u32 mboxes;
+ u8 reserved[164];
+} __attribute__((packed));
+
+/* OpRegion mailbox #1: public ACPI methods */
+struct opregion_acpi {
+ u32 drdy; /* driver readiness */
+ u32 csts; /* notification status */
+ u32 cevt; /* current event */
+ u8 rsvd1[20];
+ u32 didl[8]; /* supported display devices ID list */
+ u32 cpdl[8]; /* currently presented display list */
+ u32 cadl[8]; /* currently active display list */
+ u32 nadl[8]; /* next active devices list */
+ u32 aslp; /* ASL sleep time-out */
+ u32 tidx; /* toggle table index */
+ u32 chpd; /* current hotplug enable indicator */
+ u32 clid; /* current lid state*/
+ u32 cdck; /* current docking state */
+ u32 sxsw; /* Sx state resume */
+ u32 evts; /* ASL supported events */
+ u32 cnot; /* current OS notification */
+ u32 nrdy; /* driver status */
+ u8 rsvd2[60];
+} __attribute__((packed));
+
+/* OpRegion mailbox #2: SWSCI */
+struct opregion_swsci {
+ u32 scic; /* SWSCI command|status|data */
+ u32 parm; /* command parameters */
+ u32 dslp; /* driver sleep time-out */
+ u8 rsvd[244];
+} __attribute__((packed));
+
+/* OpRegion mailbox #3: ASLE */
+struct opregion_asle {
+ u32 ardy; /* driver readiness */
+ u32 aslc; /* ASLE interrupt command */
+ u32 tche; /* technology enabled indicator */
+ u32 alsi; /* current ALS illuminance reading */
+ u32 bclp; /* backlight brightness to set */
+ u32 pfit; /* panel fitting state */
+ u32 cblv; /* current brightness level */
+ u16 bclm[20]; /* backlight level duty cycle mapping table */
+ u32 cpfm; /* current panel fitting mode */
+ u32 epfm; /* enabled panel fitting modes */
+ u8 plut[74]; /* panel LUT and identifier */
+ u32 pfmb; /* PWM freq and min brightness */
+ u8 rsvd[102];
+} __attribute__((packed));
+
+/* ASLE irq request bits */
+#define ASLE_SET_ALS_ILLUM (1 << 0)
+#define ASLE_SET_BACKLIGHT (1 << 1)
+#define ASLE_SET_PFIT (1 << 2)
+#define ASLE_SET_PWM_FREQ (1 << 3)
+#define ASLE_REQ_MSK 0xf
+
+/* response bits of ASLE irq request */
+#define ASLE_ALS_ILLUM_FAILED (1<<10)
+#define ASLE_BACKLIGHT_FAILED (1<<12)
+#define ASLE_PFIT_FAILED (1<<14)
+#define ASLE_PWM_FREQ_FAILED (1<<16)
+
+/* ASLE backlight brightness to set */
+#define ASLE_BCLP_VALID (1<<31)
+#define ASLE_BCLP_MSK (~(1<<31))
+
+/* ASLE panel fitting request */
+#define ASLE_PFIT_VALID (1<<31)
+#define ASLE_PFIT_CENTER (1<<0)
+#define ASLE_PFIT_STRETCH_TEXT (1<<1)
+#define ASLE_PFIT_STRETCH_GFX (1<<2)
+
+/* PWM frequency and minimum brightness */
+#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
+#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
+#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
+#define ASLE_PFMB_PWM_VALID (1<<31)
+
+#define ASLE_CBLV_VALID (1<<31)
+
+#define ACPI_OTHER_OUTPUT (0<<8)
+#define ACPI_VGA_OUTPUT (1<<8)
+#define ACPI_TV_OUTPUT (2<<8)
+#define ACPI_DIGITAL_OUTPUT (3<<8)
+#define ACPI_LVDS_OUTPUT (4<<8)
+
+#ifdef CONFIG_ACPI
+u32
+asle_set_backlight(struct drm_device *dev, u32 bclp)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct opregion_asle *asle = dev_priv->opregion.asle;
+ u32 max;
+
+ if (!(bclp & ASLE_BCLP_VALID))
+ return ASLE_BACKLIGHT_FAILED;
+
+ bclp &= ASLE_BCLP_MSK;
+ if (bclp > 255)
+ return ASLE_BACKLIGHT_FAILED;
+
+ max = intel_panel_get_max_backlight(dev);
+ intel_panel_set_backlight(dev, bclp * max / 255);
+ asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
+
+ return 0;
+}
+
+u32
+asle_set_als_illum(struct drm_device *dev, u32 alsi)
+{
+ /* alsi is the current ALS reading in lux. 0 indicates below sensor
+ range, 0xffff indicates above sensor range. 1-0xfffe are valid */
+ return 0;
+}
+
+u32
+asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ if (pfmb & ASLE_PFMB_PWM_VALID) {
+ u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
+ u32 pwm = pfmb & ASLE_PFMB_PWM_MASK;
+ blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK;
+ pwm = pwm >> 9;
+ /* FIXME - what do we do with the PWM? */
+ }
+ return 0;
+}
+
+u32
+asle_set_pfit(struct drm_device *dev, u32 pfit)
+{
+ /* Panel fitting is currently controlled by the X code, so this is a
+ noop until modesetting support works fully */
+ if (!(pfit & ASLE_PFIT_VALID))
+ return ASLE_PFIT_FAILED;
+ return 0;
+}
+
+void
+intel_opregion_asle_intr(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct opregion_asle *asle = dev_priv->opregion.asle;
+ u32 asle_stat = 0;
+ u32 asle_req;
+
+ if (!asle)
+ return;
+
+ asle_req = asle->aslc & ASLE_REQ_MSK;
+
+ if (!asle_req) {
+ DRM_DEBUG("non asle set request??\n");
+ return;
+ }
+
+ if (asle_req & ASLE_SET_ALS_ILLUM)
+ asle_stat |= asle_set_als_illum(dev, asle->alsi);
+
+ if (asle_req & ASLE_SET_BACKLIGHT)
+ asle_stat |= asle_set_backlight(dev, asle->bclp);
+
+ if (asle_req & ASLE_SET_PFIT)
+ asle_stat |= asle_set_pfit(dev, asle->pfit);
+
+ if (asle_req & ASLE_SET_PWM_FREQ)
+ asle_stat |= asle_set_pwm_freq(dev, asle->pfmb);
+
+ asle->aslc = asle_stat;
+}
+
+void
+intel_opregion_gse_intr(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct opregion_asle *asle = dev_priv->opregion.asle;
+ u32 asle_stat = 0;
+ u32 asle_req;
+
+ if (!asle)
+ return;
+
+ asle_req = asle->aslc & ASLE_REQ_MSK;
+
+ if (!asle_req) {
+ DRM_DEBUG("non asle set request??\n");
+ return;
+ }
+
+ if (asle_req & ASLE_SET_ALS_ILLUM) {
+ DRM_DEBUG("Illum is not supported\n");
+ asle_stat |= ASLE_ALS_ILLUM_FAILED;
+ }
+
+ if (asle_req & ASLE_SET_BACKLIGHT)
+ asle_stat |= asle_set_backlight(dev, asle->bclp);
+
+ if (asle_req & ASLE_SET_PFIT) {
+ DRM_DEBUG("Pfit is not supported\n");
+ asle_stat |= ASLE_PFIT_FAILED;
+ }
+
+ if (asle_req & ASLE_SET_PWM_FREQ) {
+ DRM_DEBUG("PWM freq is not supported\n");
+ asle_stat |= ASLE_PWM_FREQ_FAILED;
+ }
+
+ asle->aslc = asle_stat;
+}
+
+#define ASLE_ALS_EN (1<<0)
+#define ASLE_BLC_EN (1<<1)
+#define ASLE_PFIT_EN (1<<2)
+#define ASLE_PFMB_EN (1<<3)
+
+void
+intel_opregion_enable_asle(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct opregion_asle *asle = dev_priv->opregion.asle;
+
+ if (asle) {
+ if (IS_MOBILE(dev))
+ intel_enable_asle(dev);
+
+ asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
+ ASLE_PFMB_EN;
+ asle->ardy = 1;
+ }
+}
+
+#define ACPI_EV_DISPLAY_SWITCH (1<<0)
+#define ACPI_EV_LID (1<<1)
+#define ACPI_EV_DOCK (1<<2)
+
+static struct intel_opregion *system_opregion;
+
+int
+intel_opregion_video_event(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ /* The only video events relevant to opregion are 0x80. These indicate
+ either a docking event, lid switch or display switch request. In
+ Linux, these are handled by the dock, button and video drivers.
+ */
+ struct opregion_acpi *acpi;
+ struct acpi_bus_event *event = data;
+ int ret = NOTIFY_OK;
+
+ if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
+ return NOTIFY_DONE;
+
+ if (!system_opregion)
+ return NOTIFY_DONE;
+
+ acpi = system_opregion->acpi;
+
+ if (event->type == 0x80 && !(acpi->cevt & 0x1))
+ ret = NOTIFY_BAD;
+
+ acpi->csts = 0;
+
+ return ret;
+}
+
+static struct notifier_block intel_opregion_notifier = {
+ .notifier_call = intel_opregion_video_event,
+};
+
+/*
+ * Initialise the DIDL field in opregion. This passes a list of devices to
+ * the firmware. Values are defined by section B.4.2 of the ACPI specification
+ * (version 3)
+ */
+
+void
+intel_didl_outputs(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_opregion *opregion = &dev_priv->opregion;
+ struct drm_connector *connector;
+ acpi_handle handle;
+ struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
+ unsigned long long device_id;
+ acpi_status status;
+ int i = 0;
+
+ handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
+ if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev)))
+ return;
+
+ if (acpi_is_video_device(acpi_dev))
+ acpi_video_bus = acpi_dev;
+ else {
+ list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
+ if (acpi_is_video_device(acpi_cdev)) {
+ acpi_video_bus = acpi_cdev;
+ break;
+ }
+ }
+ }
+
+ if (!acpi_video_bus) {
+ printk(KERN_WARNING "No ACPI video bus found\n");
+ return;
+ }
+
+ list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
+ if (i >= 8) {
+ dev_printk(KERN_ERR, &dev->pdev->dev,
+ "More than 8 outputs detected\n");
+ return;
+ }
+ status =
+ acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
+ NULL, &device_id);
+ if (ACPI_SUCCESS(status)) {
+ if (!device_id)
+ goto blind_set;
+ opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f);
+ i++;
+ }
+ }
+
+end:
+ /* If fewer than 8 outputs, the list must be null terminated */
+ if (i < 8)
+ opregion->acpi->didl[i] = 0;
+ return;
+
+blind_set:
+ i = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ int output_type = ACPI_OTHER_OUTPUT;
+ if (i >= 8) {
+ device_printf(dev->device,
+ "More than 8 outputs detected\n");
+ return;
+ }
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_VGA:
+ case DRM_MODE_CONNECTOR_DVIA:
+ output_type = ACPI_VGA_OUTPUT;
+ break;
+ case DRM_MODE_CONNECTOR_Composite:
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ case DRM_MODE_CONNECTOR_Component:
+ case DRM_MODE_CONNECTOR_9PinDIN:
+ output_type = ACPI_TV_OUTPUT;
+ break;
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_DVID:
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_HDMIA:
+ case DRM_MODE_CONNECTOR_HDMIB:
+ output_type = ACPI_DIGITAL_OUTPUT;
+ break;
+ case DRM_MODE_CONNECTOR_LVDS:
+ output_type = ACPI_LVDS_OUTPUT;
+ break;
+ }
+ opregion->acpi->didl[i] |= (1<<31) | output_type | i;
+ i++;
+ }
+ goto end;
+}
+
+void
+intel_opregion_init(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_opregion *opregion = &dev_priv->opregion;
+
+ if (!opregion->header)
+ return;
+
+ if (opregion->acpi) {
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ intel_didl_outputs(dev);
+
+ /* Notify BIOS we are ready to handle ACPI video ext notifs.
+ * Right now, all the events are handled by the ACPI video module.
+ * We don't actually need to do anything with them. */
+ opregion->acpi->csts = 0;
+ opregion->acpi->drdy = 1;
+
+ system_opregion = opregion;
+ register_acpi_notifier(&intel_opregion_notifier);
+ }
+
+ if (opregion->asle)
+ intel_opregion_enable_asle(dev);
+}
+
+void
+intel_opregion_fini(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_opregion *opregion = &dev_priv->opregion;
+
+ if (!opregion->header)
+ return;
+
+ if (opregion->acpi) {
+ opregion->acpi->drdy = 0;
+
+ system_opregion = NULL;
+ unregister_acpi_notifier(&intel_opregion_notifier);
+ }
+
+ /* just clear all opregion memory pointers now */
+ iounmap(opregion->header);
+ opregion->header = NULL;
+ opregion->acpi = NULL;
+ opregion->swsci = NULL;
+ opregion->asle = NULL;
+ opregion->vbt = NULL;
+}
+#else
+int
+intel_opregion_init(struct drm_device *dev)
+{
+
+ return (0);
+}
+
+void
+intel_opregion_fini(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv;
+ struct intel_opregion *opregion;
+
+ dev_priv = dev->dev_private;
+ opregion = &dev_priv->opregion;
+
+ if (opregion->header == NULL)
+ return;
+
+ bus_space_unmap(dev_priv->bst, dev_priv->opregion_ioh, OPREGION_SIZE);
+ opregion->header = NULL;
+ opregion->acpi = NULL;
+ opregion->swsci = NULL;
+ opregion->asle = NULL;
+ opregion->vbt = NULL;
+}
+#endif
+
+int
+intel_opregion_setup(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_opregion *opregion = &dev_priv->opregion;
+ void *base;
+ u32 asls, mboxes;
+ int err = 0;
+
+ asls = pci_conf_read(dev_priv->pc, dev_priv->tag, PCI_ASLS);
+ DRM_DEBUG("graphic opregion physical addr: 0x%x\n", asls);
+ if (asls == 0) {
+ DRM_DEBUG("ACPI OpRegion not supported!\n");
+ return -ENOTSUP;
+ }
+
+ if (bus_space_map(dev_priv->bst, asls, OPREGION_SIZE,
+ BUS_SPACE_MAP_LINEAR, &dev_priv->opregion_ioh)) {
+ DRM_DEBUG("could not map opregion!\n");
+ return ENOMEM;
+ }
+ base = bus_space_vaddr(dev_priv->bst, dev_priv->opregion_ioh);
+ if (!base)
+ return -ENOMEM;
+
+ if (memcmp(base, OPREGION_SIGNATURE, 16)) {
+ DRM_DEBUG("opregion signature mismatch\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+ opregion->header = (struct opregion_header *)base;
+ opregion->vbt = base + OPREGION_VBT_OFFSET;
+
+ opregion->lid_state = (u32 *)(base + ACPI_CLID);
+
+ mboxes = opregion->header->mboxes;
+ if (mboxes & MBOX_ACPI) {
+ DRM_DEBUG("Public ACPI methods supported\n");
+ opregion->acpi = (struct opregion_acpi *)(base +
+ OPREGION_ACPI_OFFSET);
+ }
+
+ if (mboxes & MBOX_SWSCI) {
+ DRM_DEBUG("SWSCI supported\n");
+ opregion->swsci = (struct opregion_swsci *)(base +
+ OPREGION_SWSCI_OFFSET);
+ }
+ if (mboxes & MBOX_ASLE) {
+ DRM_DEBUG("ASLE supported\n");
+ opregion->asle = (struct opregion_asle *)(base +
+ OPREGION_ASLE_OFFSET);
+ }
+
+ return 0;
+
+err_out:
+ bus_space_unmap(dev_priv->bst, dev_priv->opregion_ioh, OPREGION_SIZE);
+ return err;
+}
diff --git a/sys/dev/pci/drm/i915/intel_overlay.c b/sys/dev/pci/drm/i915/intel_overlay.c
new file mode 100644
index 00000000000..312a7e37293
--- /dev/null
+++ b/sys/dev/pci/drm/i915/intel_overlay.c
@@ -0,0 +1,1631 @@
+/* $OpenBSD: intel_overlay.c,v 1.1 2013/03/18 12:36:52 jsg Exp $ */
+/*
+ * Copyright © 2009
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Daniel Vetter <daniel@ffwll.ch>
+ *
+ * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
+ */
+#include <dev/pci/drm/drmP.h>
+#include <dev/pci/drm/i915_drm.h>
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_drv.h"
+
+/* Limits for overlay size. According to intel doc, the real limits are:
+ * Y width: 4095, UV width (planar): 2047, Y height: 2047,
+ * UV width (planar): * 1023. But the xorg thinks 2048 for height and width. Use
+ * the mininum of both. */
+#define IMAGE_MAX_WIDTH 2048
+#define IMAGE_MAX_HEIGHT 2046 /* 2 * 1023 */
+/* on 830 and 845 these large limits result in the card hanging */
+#define IMAGE_MAX_WIDTH_LEGACY 1024
+#define IMAGE_MAX_HEIGHT_LEGACY 1088
+
+/* overlay register definitions */
+/* OCMD register */
+#define OCMD_TILED_SURFACE (0x1<<19)
+#define OCMD_MIRROR_MASK (0x3<<17)
+#define OCMD_MIRROR_MODE (0x3<<17)
+#define OCMD_MIRROR_HORIZONTAL (0x1<<17)
+#define OCMD_MIRROR_VERTICAL (0x2<<17)
+#define OCMD_MIRROR_BOTH (0x3<<17)
+#define OCMD_BYTEORDER_MASK (0x3<<14) /* zero for YUYV or FOURCC YUY2 */
+#define OCMD_UV_SWAP (0x1<<14) /* YVYU */
+#define OCMD_Y_SWAP (0x2<<14) /* UYVY or FOURCC UYVY */
+#define OCMD_Y_AND_UV_SWAP (0x3<<14) /* VYUY */
+#define OCMD_SOURCE_FORMAT_MASK (0xf<<10)
+#define OCMD_RGB_888 (0x1<<10) /* not in i965 Intel docs */
+#define OCMD_RGB_555 (0x2<<10) /* not in i965 Intel docs */
+#define OCMD_RGB_565 (0x3<<10) /* not in i965 Intel docs */
+#define OCMD_YUV_422_PACKED (0x8<<10)
+#define OCMD_YUV_411_PACKED (0x9<<10) /* not in i965 Intel docs */
+#define OCMD_YUV_420_PLANAR (0xc<<10)
+#define OCMD_YUV_422_PLANAR (0xd<<10)
+#define OCMD_YUV_410_PLANAR (0xe<<10) /* also 411 */
+#define OCMD_TVSYNCFLIP_PARITY (0x1<<9)
+#define OCMD_TVSYNCFLIP_ENABLE (0x1<<7)
+#define OCMD_BUF_TYPE_MASK (0x1<<5)
+#define OCMD_BUF_TYPE_FRAME (0x0<<5)
+#define OCMD_BUF_TYPE_FIELD (0x1<<5)
+#define OCMD_TEST_MODE (0x1<<4)
+#define OCMD_BUFFER_SELECT (0x3<<2)
+#define OCMD_BUFFER0 (0x0<<2)
+#define OCMD_BUFFER1 (0x1<<2)
+#define OCMD_FIELD_SELECT (0x1<<2)
+#define OCMD_FIELD0 (0x0<<1)
+#define OCMD_FIELD1 (0x1<<1)
+#define OCMD_ENABLE (0x1<<0)
+
+/* OCONFIG register */
+#define OCONF_PIPE_MASK (0x1<<18)
+#define OCONF_PIPE_A (0x0<<18)
+#define OCONF_PIPE_B (0x1<<18)
+#define OCONF_GAMMA2_ENABLE (0x1<<16)
+#define OCONF_CSC_MODE_BT601 (0x0<<5)
+#define OCONF_CSC_MODE_BT709 (0x1<<5)
+#define OCONF_CSC_BYPASS (0x1<<4)
+#define OCONF_CC_OUT_8BIT (0x1<<3)
+#define OCONF_TEST_MODE (0x1<<2)
+#define OCONF_THREE_LINE_BUFFER (0x1<<0)
+#define OCONF_TWO_LINE_BUFFER (0x0<<0)
+
+/* DCLRKM (dst-key) register */
+#define DST_KEY_ENABLE (0x1<<31)
+#define CLK_RGB24_MASK 0x0
+#define CLK_RGB16_MASK 0x070307
+#define CLK_RGB15_MASK 0x070707
+#define CLK_RGB8I_MASK 0xffffff
+
+#define RGB16_TO_COLORKEY(c) \
+ (((c & 0xF800) << 8) | ((c & 0x07E0) << 5) | ((c & 0x001F) << 3))
+#define RGB15_TO_COLORKEY(c) \
+ (((c & 0x7c00) << 9) | ((c & 0x03E0) << 6) | ((c & 0x001F) << 3))
+
+/* overlay flip addr flag */
+#define OFC_UPDATE 0x1
+
+/* polyphase filter coefficients */
+#define N_HORIZ_Y_TAPS 5
+#define N_VERT_Y_TAPS 3
+#define N_HORIZ_UV_TAPS 3
+#define N_VERT_UV_TAPS 3
+#define N_PHASES 17
+#define MAX_TAPS 5
+
+/* memory bufferd overlay registers */
+struct overlay_registers {
+ u32 OBUF_0Y;
+ u32 OBUF_1Y;
+ u32 OBUF_0U;
+ u32 OBUF_0V;
+ u32 OBUF_1U;
+ u32 OBUF_1V;
+ u32 OSTRIDE;
+ u32 YRGB_VPH;
+ u32 UV_VPH;
+ u32 HORZ_PH;
+ u32 INIT_PHS;
+ u32 DWINPOS;
+ u32 DWINSZ;
+ u32 SWIDTH;
+ u32 SWIDTHSW;
+ u32 SHEIGHT;
+ u32 YRGBSCALE;
+ u32 UVSCALE;
+ u32 OCLRC0;
+ u32 OCLRC1;
+ u32 DCLRKV;
+ u32 DCLRKM;
+ u32 SCLRKVH;
+ u32 SCLRKVL;
+ u32 SCLRKEN;
+ u32 OCONFIG;
+ u32 OCMD;
+ u32 RESERVED1; /* 0x6C */
+ u32 OSTART_0Y;
+ u32 OSTART_1Y;
+ u32 OSTART_0U;
+ u32 OSTART_0V;
+ u32 OSTART_1U;
+ u32 OSTART_1V;
+ u32 OTILEOFF_0Y;
+ u32 OTILEOFF_1Y;
+ u32 OTILEOFF_0U;
+ u32 OTILEOFF_0V;
+ u32 OTILEOFF_1U;
+ u32 OTILEOFF_1V;
+ u32 FASTHSCALE; /* 0xA0 */
+ u32 UVSCALEV; /* 0xA4 */
+ u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */
+ u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */
+ u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES];
+ u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */
+ u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES];
+ u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */
+ u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES];
+ u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */
+ u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
+};
+
+struct intel_overlay {
+ struct drm_device *dev;
+ struct intel_crtc *crtc;
+ struct drm_i915_gem_object *vid_bo;
+ struct drm_i915_gem_object *old_vid_bo;
+ int active;
+ int pfit_active;
+ u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
+ u32 color_key;
+ u32 brightness, contrast, saturation;
+ u32 old_xscale, old_yscale;
+ /* register access */
+ u32 flip_addr;
+ struct drm_i915_gem_object *reg_bo;
+ /* flip handling */
+ uint32_t last_flip_req;
+ void (*flip_tail)(struct intel_overlay *);
+};
+
+struct put_image_params {
+ int format;
+ short dst_x;
+ short dst_y;
+ short dst_w;
+ short dst_h;
+ short src_w;
+ short src_scan_h;
+ short src_scan_w;
+ short src_h;
+ short stride_Y;
+ short stride_UV;
+ int offset_Y;
+ int offset_U;
+ int offset_V;
+};
+
+struct overlay_registers *intel_overlay_map_regs(struct intel_overlay *);
+void intel_overlay_unmap_regs(struct intel_overlay *,
+ struct overlay_registers *);
+void intel_overlay_off_tail(struct intel_overlay *);
+int intel_overlay_off(struct intel_overlay *);
+int intel_overlay_recover_from_interrupt(struct intel_overlay *);
+int intel_overlay_release_old_vid(struct intel_overlay *);
+int packed_depth_bytes(u32);
+int packed_width_bytes(u32, short);
+int uv_hsubsampling(u32);
+int uv_vsubsampling(u32);
+int intel_overlay_do_put_image(struct intel_overlay *,
+ struct drm_i915_gem_object *, struct put_image_params *);
+int check_overlay_possible_on_crtc(struct intel_overlay *,
+ struct intel_crtc *);
+void update_pfit_vscale_ratio(struct intel_overlay *);
+int check_overlay_dst(struct intel_overlay *,
+ struct drm_intel_overlay_put_image *);
+int check_overlay_scaling(struct put_image_params *);
+int check_overlay_src(struct drm_device *,
+ struct drm_intel_overlay_put_image *,
+ struct drm_i915_gem_object *);
+int intel_panel_fitter_pipe(struct drm_device *);
+void update_reg_attrs(struct intel_overlay *, struct overlay_registers *);
+bool check_gamma_bounds(u32, u32);
+bool check_gamma5_errata(u32);
+int check_gamma(struct drm_intel_overlay_attrs *);
+void update_polyphase_filter(struct overlay_registers *);
+bool update_scaling_factors(struct intel_overlay *,
+ struct overlay_registers *, struct put_image_params *);
+void update_colorkey(struct intel_overlay *, struct overlay_registers *);
+u32 overlay_cmd_reg(struct put_image_params *);
+u32 max_u32(u32, u32);
+int intel_overlay_on(struct intel_overlay *);
+u32 calc_swidthsw(struct drm_device *, u32, u32);
+int intel_overlay_continue(struct intel_overlay *, bool);
+int intel_overlay_do_wait_request(struct intel_overlay *,
+ void (*)(struct intel_overlay *));
+int i830_activate_pipe_a(struct drm_device *);
+void i830_deactivate_pipe_a(struct drm_device *);
+void intel_overlay_release_old_vid_tail(struct intel_overlay *);
+
+struct overlay_registers *
+intel_overlay_map_regs(struct intel_overlay *overlay)
+{
+// drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+ struct overlay_registers *regs;
+
+ if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ regs = (struct overlay_registers *)overlay->reg_bo->phys_obj->handle->kva;
+ else {
+#ifdef notyet
+ regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping,
+ overlay->reg_bo->gtt_offset);
+#else
+ printf("%s partial stub\n", __func__);
+ return (NULL);
+#endif
+ }
+
+ return regs;
+}
+
+void
+intel_overlay_unmap_regs(struct intel_overlay *overlay,
+ struct overlay_registers *regs)
+{
+ if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+#ifdef notyet
+ io_mapping_unmap(regs);
+#else
+ printf("%s partial stub\n", __func__);
+#endif
+}
+
+int
+intel_overlay_do_wait_request(struct intel_overlay *overlay,
+ void (*tail)(struct intel_overlay *))
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
+ int ret;
+
+ BUG_ON(overlay->last_flip_req);
+ ret = i915_add_request(ring, NULL, &overlay->last_flip_req);
+ if (ret)
+ return ret;
+
+ overlay->flip_tail = tail;
+ ret = i915_wait_seqno(ring, overlay->last_flip_req);
+ if (ret)
+ return ret;
+ i915_gem_retire_requests(dev_priv);
+
+ overlay->last_flip_req = 0;
+ return 0;
+}
+
+/* overlay needs to be disable in OCMD reg */
+int
+intel_overlay_on(struct intel_overlay *overlay)
+{
+ struct drm_device *dev = overlay->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
+ int ret;
+
+ BUG_ON(overlay->active);
+ overlay->active = 1;
+
+ WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
+ intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ return intel_overlay_do_wait_request(overlay, NULL);
+}
+
+/* overlay needs to be enabled in OCMD reg */
+int
+intel_overlay_continue(struct intel_overlay *overlay,
+ bool load_polyphase_filter)
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
+ u32 flip_addr = overlay->flip_addr;
+ u32 tmp;
+ int ret;
+
+ BUG_ON(!overlay->active);
+
+ if (load_polyphase_filter)
+ flip_addr |= OFC_UPDATE;
+
+ /* check for underruns */
+ tmp = I915_READ(DOVSTA);
+ if (tmp & (1 << 17))
+ DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+ intel_ring_emit(ring, flip_addr);
+ intel_ring_advance(ring);
+
+ return i915_add_request(ring, NULL, &overlay->last_flip_req);
+}
+
+void
+intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
+{
+ struct drm_i915_gem_object *obj = overlay->old_vid_bo;
+
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(&obj->base);
+
+ overlay->old_vid_bo = NULL;
+}
+
+void
+intel_overlay_off_tail(struct intel_overlay *overlay)
+{
+ struct drm_i915_gem_object *obj = overlay->vid_bo;
+
+ /* never have the overlay hw on without showing a frame */
+ BUG_ON(!overlay->vid_bo);
+
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(&obj->base);
+ overlay->vid_bo = NULL;
+
+ overlay->crtc->overlay = NULL;
+ overlay->crtc = NULL;
+ overlay->active = 0;
+}
+
+/* overlay needs to be disabled in OCMD reg */
+int
+intel_overlay_off(struct intel_overlay *overlay)
+{
+ struct drm_device *dev = overlay->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
+ u32 flip_addr = overlay->flip_addr;
+ int ret;
+
+ BUG_ON(!overlay->active);
+
+ /* According to intel docs the overlay hw may hang (when switching
+ * off) without loading the filter coeffs. It is however unclear whether
+ * this applies to the disabling of the overlay or to the switching off
+ * of the hw. Do it in both cases */
+ flip_addr |= OFC_UPDATE;
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ /* wait for overlay to go idle */
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+ intel_ring_emit(ring, flip_addr);
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ /* turn overlay off */
+ if (IS_I830(dev)) {
+ /* Workaround: Don't disable the overlay fully, since otherwise
+ * it dies on the next OVERLAY_ON cmd. */
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_NOOP);
+ } else {
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+ intel_ring_emit(ring, flip_addr);
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ }
+ intel_ring_advance(ring);
+
+ return intel_overlay_do_wait_request(overlay, intel_overlay_off_tail);
+}
+
+/* recover from an interruption due to a signal
+ * We have to be careful not to repeat work forever an make forward progess. */
+int
+intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
+ int ret;
+
+ if (overlay->last_flip_req == 0)
+ return 0;
+
+ ret = i915_wait_seqno(ring, overlay->last_flip_req);
+ if (ret)
+ return ret;
+ i915_gem_retire_requests(dev_priv);
+
+ if (overlay->flip_tail)
+ overlay->flip_tail(overlay);
+
+ overlay->last_flip_req = 0;
+ return 0;
+}
+
+/* Wait for pending overlay flip and release old frame.
+ * Needs to be called before the overlay register are changed
+ * via intel_overlay_(un)map_regs
+ */
+int
+intel_overlay_release_old_vid(struct intel_overlay *overlay)
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
+ int ret;
+
+ /* Only wait if there is actually an old frame to release to
+ * guarantee forward progress.
+ */
+ if (!overlay->old_vid_bo)
+ return 0;
+
+ if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
+ /* synchronous slowpath */
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ ret = intel_overlay_do_wait_request(overlay,
+ intel_overlay_release_old_vid_tail);
+ if (ret)
+ return ret;
+ }
+
+ intel_overlay_release_old_vid_tail(overlay);
+ return 0;
+}
+
+int
+packed_depth_bytes(u32 format)
+{
+ switch (format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ return 4;
+ case I915_OVERLAY_YUV411:
+ /* return 6; not implemented */
+ default:
+ return -EINVAL;
+ }
+}
+
+int
+packed_width_bytes(u32 format, short width)
+{
+ switch (format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ return width << 1;
+ default:
+ return -EINVAL;
+ }
+}
+
+int
+uv_hsubsampling(u32 format)
+{
+ switch (format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ case I915_OVERLAY_YUV420:
+ return 2;
+ case I915_OVERLAY_YUV411:
+ case I915_OVERLAY_YUV410:
+ return 4;
+ default:
+ return -EINVAL;
+ }
+}
+
+int
+uv_vsubsampling(u32 format)
+{
+ switch (format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV420:
+ case I915_OVERLAY_YUV410:
+ return 2;
+ case I915_OVERLAY_YUV422:
+ case I915_OVERLAY_YUV411:
+ return 1;
+ default:
+ return -EINVAL;
+ }
+}
+
+u32
+calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
+{
+ u32 mask, shift, ret;
+ if (IS_GEN2(dev)) {
+ mask = 0x1f;
+ shift = 5;
+ } else {
+ mask = 0x3f;
+ shift = 6;
+ }
+ ret = ((offset + width + mask) >> shift) - (offset >> shift);
+ if (!IS_GEN2(dev))
+ ret <<= 1;
+ ret -= 1;
+ return ret << 2;
+}
+
+static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = {
+ 0x3000, 0xb4a0, 0x1930, 0x1920, 0xb4a0,
+ 0x3000, 0xb500, 0x19d0, 0x1880, 0xb440,
+ 0x3000, 0xb540, 0x1a88, 0x2f80, 0xb3e0,
+ 0x3000, 0xb580, 0x1b30, 0x2e20, 0xb380,
+ 0x3000, 0xb5c0, 0x1bd8, 0x2cc0, 0xb320,
+ 0x3020, 0xb5e0, 0x1c60, 0x2b80, 0xb2c0,
+ 0x3020, 0xb5e0, 0x1cf8, 0x2a20, 0xb260,
+ 0x3020, 0xb5e0, 0x1d80, 0x28e0, 0xb200,
+ 0x3020, 0xb5c0, 0x1e08, 0x3f40, 0xb1c0,
+ 0x3020, 0xb580, 0x1e78, 0x3ce0, 0xb160,
+ 0x3040, 0xb520, 0x1ed8, 0x3aa0, 0xb120,
+ 0x3040, 0xb4a0, 0x1f30, 0x3880, 0xb0e0,
+ 0x3040, 0xb400, 0x1f78, 0x3680, 0xb0a0,
+ 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060,
+ 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040,
+ 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020,
+ 0xb000, 0x3000, 0x0800, 0x3000, 0xb000
+};
+
+static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
+ 0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60,
+ 0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40,
+ 0xb040, 0x1b20, 0x29e0, 0xb060, 0x1bd8, 0x2880,
+ 0xb080, 0x1c88, 0x3e60, 0xb0a0, 0x1d28, 0x3c00,
+ 0xb0c0, 0x1db8, 0x39e0, 0xb0e0, 0x1e40, 0x37e0,
+ 0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0,
+ 0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240,
+ 0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0,
+ 0x3000, 0x0800, 0x3000
+};
+
+void
+update_polyphase_filter(struct overlay_registers *regs)
+{
+ memcpy(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
+ memcpy(regs->UV_HCOEFS, uv_static_hcoeffs, sizeof(uv_static_hcoeffs));
+}
+
+bool
+update_scaling_factors(struct intel_overlay *overlay,
+ struct overlay_registers *regs,
+ struct put_image_params *params)
+{
+ /* fixed point with a 12 bit shift */
+ u32 xscale, yscale, xscale_UV, yscale_UV;
+#define FP_SHIFT 12
+#define FRACT_MASK 0xfff
+ bool scale_changed = false;
+ int uv_hscale = uv_hsubsampling(params->format);
+ int uv_vscale = uv_vsubsampling(params->format);
+
+ if (params->dst_w > 1)
+ xscale = ((params->src_scan_w - 1) << FP_SHIFT)
+ /(params->dst_w);
+ else
+ xscale = 1 << FP_SHIFT;
+
+ if (params->dst_h > 1)
+ yscale = ((params->src_scan_h - 1) << FP_SHIFT)
+ /(params->dst_h);
+ else
+ yscale = 1 << FP_SHIFT;
+
+ /*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/
+ xscale_UV = xscale/uv_hscale;
+ yscale_UV = yscale/uv_vscale;
+ /* make the Y scale to UV scale ratio an exact multiply */
+ xscale = xscale_UV * uv_hscale;
+ yscale = yscale_UV * uv_vscale;
+ /*} else {
+ xscale_UV = 0;
+ yscale_UV = 0;
+ }*/
+
+ if (xscale != overlay->old_xscale || yscale != overlay->old_yscale)
+ scale_changed = true;
+ overlay->old_xscale = xscale;
+ overlay->old_yscale = yscale;
+
+ regs->YRGBSCALE = (((yscale & FRACT_MASK) << 20) |
+ ((xscale >> FP_SHIFT) << 16) |
+ ((xscale & FRACT_MASK) << 3));
+
+ regs->UVSCALE = (((yscale_UV & FRACT_MASK) << 20) |
+ ((xscale_UV >> FP_SHIFT) << 16) |
+ ((xscale_UV & FRACT_MASK) << 3));
+
+ regs->UVSCALEV = ((((yscale >> FP_SHIFT) << 16) |
+ ((yscale_UV >> FP_SHIFT) << 0)));
+
+ if (scale_changed)
+ update_polyphase_filter(regs);
+
+ return scale_changed;
+}
+
+void
+update_colorkey(struct intel_overlay *overlay,
+ struct overlay_registers *regs)
+{
+ u32 key = overlay->color_key;
+
+ switch (overlay->crtc->base.fb->bits_per_pixel) {
+ case 8:
+ regs->DCLRKV = 0;
+ regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
+ break;
+
+ case 16:
+ if (overlay->crtc->base.fb->depth == 15) {
+ regs->DCLRKV = RGB15_TO_COLORKEY(key);
+ regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
+ } else {
+ regs->DCLRKV = RGB16_TO_COLORKEY(key);
+ regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
+ }
+ break;
+
+ case 24:
+ case 32:
+ regs->DCLRKV = key;
+ regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
+ break;
+ }
+}
+
+u32
+overlay_cmd_reg(struct put_image_params *params)
+{
+ u32 cmd = OCMD_ENABLE | OCMD_BUF_TYPE_FRAME | OCMD_BUFFER0;
+
+ if (params->format & I915_OVERLAY_YUV_PLANAR) {
+ switch (params->format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ cmd |= OCMD_YUV_422_PLANAR;
+ break;
+ case I915_OVERLAY_YUV420:
+ cmd |= OCMD_YUV_420_PLANAR;
+ break;
+ case I915_OVERLAY_YUV411:
+ case I915_OVERLAY_YUV410:
+ cmd |= OCMD_YUV_410_PLANAR;
+ break;
+ }
+ } else { /* YUV packed */
+ switch (params->format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ cmd |= OCMD_YUV_422_PACKED;
+ break;
+ case I915_OVERLAY_YUV411:
+ cmd |= OCMD_YUV_411_PACKED;
+ break;
+ }
+
+ switch (params->format & I915_OVERLAY_SWAP_MASK) {
+ case I915_OVERLAY_NO_SWAP:
+ break;
+ case I915_OVERLAY_UV_SWAP:
+ cmd |= OCMD_UV_SWAP;
+ break;
+ case I915_OVERLAY_Y_SWAP:
+ cmd |= OCMD_Y_SWAP;
+ break;
+ case I915_OVERLAY_Y_AND_UV_SWAP:
+ cmd |= OCMD_Y_AND_UV_SWAP;
+ break;
+ }
+ }
+
+ return cmd;
+}
+
+u32
+max_u32(u32 a, u32 b)
+{
+ return (a > b ? a : b);
+}
+
+int
+intel_overlay_do_put_image(struct intel_overlay *overlay,
+ struct drm_i915_gem_object *new_bo,
+ struct put_image_params *params)
+{
+ int ret, tmp_width;
+ struct overlay_registers *regs;
+ bool scale_changed = false;
+ struct drm_device *dev = overlay->dev;
+ u32 swidth, swidthsw, sheight, ostride;
+
+#ifdef notyet
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+#endif
+ rw_assert_wrlock(&dev->mode_config.rwl);
+ BUG_ON(!overlay);
+
+ ret = intel_overlay_release_old_vid(overlay);
+ if (ret != 0)
+ return ret;
+
+ ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL);
+ if (ret != 0)
+ return ret;
+
+ ret = i915_gem_object_put_fence(new_bo);
+ if (ret)
+ goto out_unpin;
+
+ if (!overlay->active) {
+ u32 oconfig;
+ regs = intel_overlay_map_regs(overlay);
+ if (!regs) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
+ oconfig = OCONF_CC_OUT_8BIT;
+ if (IS_GEN4(overlay->dev))
+ oconfig |= OCONF_CSC_MODE_BT709;
+ oconfig |= overlay->crtc->pipe == 0 ?
+ OCONF_PIPE_A : OCONF_PIPE_B;
+ regs->OCONFIG = oconfig;
+ intel_overlay_unmap_regs(overlay, regs);
+
+ ret = intel_overlay_on(overlay);
+ if (ret != 0)
+ goto out_unpin;
+ }
+
+ regs = intel_overlay_map_regs(overlay);
+ if (!regs) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
+
+ regs->DWINPOS = (params->dst_y << 16) | params->dst_x;
+ regs->DWINSZ = (params->dst_h << 16) | params->dst_w;
+
+ if (params->format & I915_OVERLAY_YUV_PACKED)
+ tmp_width = packed_width_bytes(params->format, params->src_w);
+ else
+ tmp_width = params->src_w;
+
+ swidth = params->src_w;
+ swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
+ sheight = params->src_h;
+ regs->OBUF_0Y = new_bo->gtt_offset + params->offset_Y;
+ ostride = params->stride_Y;
+
+ if (params->format & I915_OVERLAY_YUV_PLANAR) {
+ int uv_hscale = uv_hsubsampling(params->format);
+ int uv_vscale = uv_vsubsampling(params->format);
+ u32 tmp_U, tmp_V;
+ swidth |= (params->src_w/uv_hscale) << 16;
+ tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
+ params->src_w/uv_hscale);
+ tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
+ params->src_w/uv_hscale);
+ swidthsw |= max_u32(tmp_U, tmp_V) << 16;
+ sheight |= (params->src_h/uv_vscale) << 16;
+ regs->OBUF_0U = new_bo->gtt_offset + params->offset_U;
+ regs->OBUF_0V = new_bo->gtt_offset + params->offset_V;
+ ostride |= params->stride_UV << 16;
+ }
+
+ regs->SWIDTH = swidth;
+ regs->SWIDTHSW = swidthsw;
+ regs->SHEIGHT = sheight;
+ regs->OSTRIDE = ostride;
+
+ scale_changed = update_scaling_factors(overlay, regs, params);
+
+ update_colorkey(overlay, regs);
+
+ regs->OCMD = overlay_cmd_reg(params);
+
+ intel_overlay_unmap_regs(overlay, regs);
+
+ ret = intel_overlay_continue(overlay, scale_changed);
+ if (ret)
+ goto out_unpin;
+
+ overlay->old_vid_bo = overlay->vid_bo;
+ overlay->vid_bo = new_bo;
+
+ return 0;
+
+out_unpin:
+ i915_gem_object_unpin(new_bo);
+ return ret;
+}
+
+int
+intel_overlay_switch_off(struct intel_overlay *overlay)
+{
+ struct overlay_registers *regs;
+ struct drm_device *dev = overlay->dev;
+ int ret;
+
+#ifdef notyet
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+#endif
+ rw_assert_wrlock(&dev->mode_config.rwl);
+
+ ret = intel_overlay_recover_from_interrupt(overlay);
+ if (ret != 0)
+ return ret;
+
+ if (!overlay->active)
+ return 0;
+
+ ret = intel_overlay_release_old_vid(overlay);
+ if (ret != 0)
+ return ret;
+
+ regs = intel_overlay_map_regs(overlay);
+ regs->OCMD = 0;
+ intel_overlay_unmap_regs(overlay, regs);
+
+ ret = intel_overlay_off(overlay);
+ if (ret != 0)
+ return ret;
+
+ intel_overlay_off_tail(overlay);
+ return 0;
+}
+
+int
+check_overlay_possible_on_crtc(struct intel_overlay *overlay,
+ struct intel_crtc *crtc)
+{
+ drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+
+ if (!crtc->active)
+ return -EINVAL;
+
+ /* can't use the overlay with double wide pipe */
+ if (INTEL_INFO(overlay->dev)->gen < 4 &&
+ (I915_READ(PIPECONF(crtc->pipe)) & (PIPECONF_DOUBLE_WIDE | PIPECONF_ENABLE)) != PIPECONF_ENABLE)
+ return -EINVAL;
+
+ return 0;
+}
+
+void
+update_pfit_vscale_ratio(struct intel_overlay *overlay)
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 pfit_control = I915_READ(PFIT_CONTROL);
+ u32 ratio;
+
+ /* XXX: This is not the same logic as in the xorg driver, but more in
+ * line with the intel documentation for the i965
+ */
+ if (INTEL_INFO(dev)->gen >= 4) {
+ /* on i965 use the PGM reg to read out the autoscaler values */
+ ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
+ } else {
+ if (pfit_control & VERT_AUTO_SCALE)
+ ratio = I915_READ(PFIT_AUTO_RATIOS);
+ else
+ ratio = I915_READ(PFIT_PGM_RATIOS);
+ ratio >>= PFIT_VERT_SCALE_SHIFT;
+ }
+
+ overlay->pfit_vscale_ratio = ratio;
+}
+
+int
+check_overlay_dst(struct intel_overlay *overlay,
+ struct drm_intel_overlay_put_image *rec)
+{
+ struct drm_display_mode *mode = &overlay->crtc->base.mode;
+
+ if (rec->dst_x < mode->hdisplay &&
+ rec->dst_x + rec->dst_width <= mode->hdisplay &&
+ rec->dst_y < mode->vdisplay &&
+ rec->dst_y + rec->dst_height <= mode->vdisplay)
+ return 0;
+ else
+ return -EINVAL;
+}
+
+int
+check_overlay_scaling(struct put_image_params *rec)
+{
+ u32 tmp;
+
+ /* downscaling limit is 8.0 */
+ tmp = ((rec->src_scan_h << 16) / rec->dst_h) >> 16;
+ if (tmp > 7)
+ return -EINVAL;
+ tmp = ((rec->src_scan_w << 16) / rec->dst_w) >> 16;
+ if (tmp > 7)
+ return -EINVAL;
+
+ return 0;
+}
+
+int
+check_overlay_src(struct drm_device *dev,
+ struct drm_intel_overlay_put_image *rec,
+ struct drm_i915_gem_object *new_bo)
+{
+ int uv_hscale = uv_hsubsampling(rec->flags);
+ int uv_vscale = uv_vsubsampling(rec->flags);
+ u32 stride_mask;
+ int depth;
+ u32 tmp;
+
+ /* check src dimensions */
+ if (IS_845G(dev) || IS_I830(dev)) {
+ if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
+ rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
+ return -EINVAL;
+ } else {
+ if (rec->src_height > IMAGE_MAX_HEIGHT ||
+ rec->src_width > IMAGE_MAX_WIDTH)
+ return -EINVAL;
+ }
+
+ /* better safe than sorry, use 4 as the maximal subsampling ratio */
+ if (rec->src_height < N_VERT_Y_TAPS*4 ||
+ rec->src_width < N_HORIZ_Y_TAPS*4)
+ return -EINVAL;
+
+ /* check alignment constraints */
+ switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
+ case I915_OVERLAY_RGB:
+ /* not implemented */
+ return -EINVAL;
+
+ case I915_OVERLAY_YUV_PACKED:
+ if (uv_vscale != 1)
+ return -EINVAL;
+
+ depth = packed_depth_bytes(rec->flags);
+ if (depth < 0)
+ return depth;
+
+ /* ignore UV planes */
+ rec->stride_UV = 0;
+ rec->offset_U = 0;
+ rec->offset_V = 0;
+ /* check pixel alignment */
+ if (rec->offset_Y % depth)
+ return -EINVAL;
+ break;
+
+ case I915_OVERLAY_YUV_PLANAR:
+ if (uv_vscale < 0 || uv_hscale < 0)
+ return -EINVAL;
+ /* no offset restrictions for planar formats */
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (rec->src_width % uv_hscale)
+ return -EINVAL;
+
+ /* stride checking */
+ if (IS_I830(dev) || IS_845G(dev))
+ stride_mask = 255;
+ else
+ stride_mask = 63;
+
+ if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
+ return -EINVAL;
+ if (IS_GEN4(dev) && rec->stride_Y < 512)
+ return -EINVAL;
+
+ tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
+ 4096 : 8192;
+ if (rec->stride_Y > tmp || rec->stride_UV > 2*1024)
+ return -EINVAL;
+
+ /* check buffer dimensions */
+ switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
+ case I915_OVERLAY_RGB:
+ case I915_OVERLAY_YUV_PACKED:
+ /* always 4 Y values per depth pixels */
+ if (packed_width_bytes(rec->flags, rec->src_width) > rec->stride_Y)
+ return -EINVAL;
+
+ tmp = rec->stride_Y*rec->src_height;
+ if (rec->offset_Y + tmp > new_bo->base.size)
+ return -EINVAL;
+ break;
+
+ case I915_OVERLAY_YUV_PLANAR:
+ if (rec->src_width > rec->stride_Y)
+ return -EINVAL;
+ if (rec->src_width/uv_hscale > rec->stride_UV)
+ return -EINVAL;
+
+ tmp = rec->stride_Y * rec->src_height;
+ if (rec->offset_Y + tmp > new_bo->base.size)
+ return -EINVAL;
+
+ tmp = rec->stride_UV * (rec->src_height / uv_vscale);
+ if (rec->offset_U + tmp > new_bo->base.size ||
+ rec->offset_V + tmp > new_bo->base.size)
+ return -EINVAL;
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+int
+intel_panel_fitter_pipe(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 pfit_control;
+
+ /* i830 doesn't have a panel fitter */
+ if (IS_I830(dev))
+ return -1;
+
+ pfit_control = I915_READ(PFIT_CONTROL);
+
+ /* See if the panel fitter is in use */
+ if ((pfit_control & PFIT_ENABLE) == 0)
+ return -1;
+
+ /* 965 can place panel fitter on either pipe */
+ if (IS_GEN4(dev))
+ return (pfit_control >> 29) & 0x3;
+
+ /* older chips can only use pipe 1 */
+ return 1;
+}
+
+int
+intel_overlay_put_image(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_intel_overlay_put_image *put_image_rec = data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_overlay *overlay;
+ struct drm_mode_object *drmmode_obj;
+ struct intel_crtc *crtc;
+ struct drm_i915_gem_object *new_bo;
+ struct put_image_params *params;
+ int ret;
+
+ /* No need to check for DRIVER_MODESET - we don't set it up then. */
+ overlay = dev_priv->overlay;
+ if (!overlay) {
+ DRM_DEBUG("userspace bug: no overlay\n");
+ return -ENODEV;
+ }
+
+ if (!(put_image_rec->flags & I915_OVERLAY_ENABLE)) {
+ rw_enter_write(&dev->mode_config.rwl);
+ DRM_LOCK();
+
+ ret = intel_overlay_switch_off(overlay);
+
+ DRM_UNLOCK();
+ rw_exit_write(&dev->mode_config.rwl);
+
+ return ret;
+ }
+
+ params = malloc(sizeof(struct put_image_params), M_DRM,
+ M_WAITOK | M_ZERO);
+ if (!params)
+ return -ENOMEM;
+
+ drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
+ DRM_MODE_OBJECT_CRTC);
+ if (!drmmode_obj) {
+ ret = -ENOENT;
+ goto out_free;
+ }
+ crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
+
+ new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv,
+ put_image_rec->bo_handle));
+ if (&new_bo->base == NULL) {
+ ret = -ENOENT;
+ goto out_free;
+ }
+
+ rw_enter_write(&dev->mode_config.rwl);
+ DRM_LOCK();
+
+ if (new_bo->tiling_mode) {
+ DRM_ERROR("buffer used for overlay image can not be tiled\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = intel_overlay_recover_from_interrupt(overlay);
+ if (ret != 0)
+ goto out_unlock;
+
+ if (overlay->crtc != crtc) {
+ struct drm_display_mode *mode = &crtc->base.mode;
+ ret = intel_overlay_switch_off(overlay);
+ if (ret != 0)
+ goto out_unlock;
+
+ ret = check_overlay_possible_on_crtc(overlay, crtc);
+ if (ret != 0)
+ goto out_unlock;
+
+ overlay->crtc = crtc;
+ crtc->overlay = overlay;
+
+ /* line too wide, i.e. one-line-mode */
+ if (mode->hdisplay > 1024 &&
+ intel_panel_fitter_pipe(dev) == crtc->pipe) {
+ overlay->pfit_active = 1;
+ update_pfit_vscale_ratio(overlay);
+ } else
+ overlay->pfit_active = 0;
+ }
+
+ ret = check_overlay_dst(overlay, put_image_rec);
+ if (ret != 0)
+ goto out_unlock;
+
+ if (overlay->pfit_active) {
+ params->dst_y = ((((u32)put_image_rec->dst_y) << 12) /
+ overlay->pfit_vscale_ratio);
+ /* shifting right rounds downwards, so add 1 */
+ params->dst_h = ((((u32)put_image_rec->dst_height) << 12) /
+ overlay->pfit_vscale_ratio) + 1;
+ } else {
+ params->dst_y = put_image_rec->dst_y;
+ params->dst_h = put_image_rec->dst_height;
+ }
+ params->dst_x = put_image_rec->dst_x;
+ params->dst_w = put_image_rec->dst_width;
+
+ params->src_w = put_image_rec->src_width;
+ params->src_h = put_image_rec->src_height;
+ params->src_scan_w = put_image_rec->src_scan_width;
+ params->src_scan_h = put_image_rec->src_scan_height;
+ if (params->src_scan_h > params->src_h ||
+ params->src_scan_w > params->src_w) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = check_overlay_src(dev, put_image_rec, new_bo);
+ if (ret != 0)
+ goto out_unlock;
+ params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK;
+ params->stride_Y = put_image_rec->stride_Y;
+ params->stride_UV = put_image_rec->stride_UV;
+ params->offset_Y = put_image_rec->offset_Y;
+ params->offset_U = put_image_rec->offset_U;
+ params->offset_V = put_image_rec->offset_V;
+
+ /* Check scaling after src size to prevent a divide-by-zero. */
+ ret = check_overlay_scaling(params);
+ if (ret != 0)
+ goto out_unlock;
+
+ ret = intel_overlay_do_put_image(overlay, new_bo, params);
+ if (ret != 0)
+ goto out_unlock;
+
+ DRM_UNLOCK();
+ rw_exit_write(&dev->mode_config.rwl);
+
+ free(params, M_DRM);
+
+ return 0;
+
+out_unlock:
+ DRM_UNLOCK();
+ rw_exit_write(&dev->mode_config.rwl);
+ drm_gem_object_unreference_unlocked(&new_bo->base);
+out_free:
+ free(params, M_DRM);
+
+ return ret;
+}
+
+void
+update_reg_attrs(struct intel_overlay *overlay,
+ struct overlay_registers *regs)
+{
+ regs->OCLRC0 = (overlay->contrast << 18) | (overlay->brightness & 0xff);
+ regs->OCLRC1 = overlay->saturation;
+}
+
+bool
+check_gamma_bounds(u32 gamma1, u32 gamma2)
+{
+ int i;
+
+ if (gamma1 & 0xff000000 || gamma2 & 0xff000000)
+ return false;
+
+ for (i = 0; i < 3; i++) {
+ if (((gamma1 >> i*8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
+ return false;
+ }
+
+ return true;
+}
+
+bool
+check_gamma5_errata(u32 gamma5)
+{
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ if (((gamma5 >> i*8) & 0xff) == 0x80)
+ return false;
+ }
+
+ return true;
+}
+
+int
+check_gamma(struct drm_intel_overlay_attrs *attrs)
+{
+ if (!check_gamma_bounds(0, attrs->gamma0) ||
+ !check_gamma_bounds(attrs->gamma0, attrs->gamma1) ||
+ !check_gamma_bounds(attrs->gamma1, attrs->gamma2) ||
+ !check_gamma_bounds(attrs->gamma2, attrs->gamma3) ||
+ !check_gamma_bounds(attrs->gamma3, attrs->gamma4) ||
+ !check_gamma_bounds(attrs->gamma4, attrs->gamma5) ||
+ !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
+ return -EINVAL;
+
+ if (!check_gamma5_errata(attrs->gamma5))
+ return -EINVAL;
+
+ return 0;
+}
+
+int
+intel_overlay_attrs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_intel_overlay_attrs *attrs = data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_overlay *overlay;
+ struct overlay_registers *regs;
+ int ret;
+
+ /* No need to check for DRIVER_MODESET - we don't set it up then. */
+ overlay = dev_priv->overlay;
+ if (!overlay) {
+ DRM_DEBUG("userspace bug: no overlay\n");
+ return -ENODEV;
+ }
+
+ rw_enter_write(&dev->mode_config.rwl);
+ DRM_LOCK();
+
+ ret = -EINVAL;
+ if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
+ attrs->color_key = overlay->color_key;
+ attrs->brightness = overlay->brightness;
+ attrs->contrast = overlay->contrast;
+ attrs->saturation = overlay->saturation;
+
+ if (!IS_GEN2(dev)) {
+ attrs->gamma0 = I915_READ(OGAMC0);
+ attrs->gamma1 = I915_READ(OGAMC1);
+ attrs->gamma2 = I915_READ(OGAMC2);
+ attrs->gamma3 = I915_READ(OGAMC3);
+ attrs->gamma4 = I915_READ(OGAMC4);
+ attrs->gamma5 = I915_READ(OGAMC5);
+ }
+ } else {
+ if (attrs->brightness < -128 || attrs->brightness > 127)
+ goto out_unlock;
+ if (attrs->contrast > 255)
+ goto out_unlock;
+ if (attrs->saturation > 1023)
+ goto out_unlock;
+
+ overlay->color_key = attrs->color_key;
+ overlay->brightness = attrs->brightness;
+ overlay->contrast = attrs->contrast;
+ overlay->saturation = attrs->saturation;
+
+ regs = intel_overlay_map_regs(overlay);
+ if (!regs) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ update_reg_attrs(overlay, regs);
+
+ intel_overlay_unmap_regs(overlay, regs);
+
+ if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
+ if (IS_GEN2(dev))
+ goto out_unlock;
+
+ if (overlay->active) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ ret = check_gamma(attrs);
+ if (ret)
+ goto out_unlock;
+
+ I915_WRITE(OGAMC0, attrs->gamma0);
+ I915_WRITE(OGAMC1, attrs->gamma1);
+ I915_WRITE(OGAMC2, attrs->gamma2);
+ I915_WRITE(OGAMC3, attrs->gamma3);
+ I915_WRITE(OGAMC4, attrs->gamma4);
+ I915_WRITE(OGAMC5, attrs->gamma5);
+ }
+ }
+
+ ret = 0;
+out_unlock:
+ DRM_UNLOCK();
+ rw_exit_write(&dev->mode_config.rwl);
+
+ return ret;
+}
+
+void
+intel_setup_overlay(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_overlay *overlay;
+ struct drm_i915_gem_object *reg_bo;
+ struct overlay_registers *regs;
+ int ret;
+
+ if (!HAS_OVERLAY(dev))
+ return;
+
+ overlay = malloc(sizeof(struct intel_overlay), M_DRM,
+ M_WAITOK | M_ZERO);
+ if (!overlay)
+ return;
+
+ DRM_LOCK();
+ if (WARN_ON(dev_priv->overlay))
+ goto out_free;
+
+ overlay->dev = dev;
+
+ reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
+ if (!reg_bo)
+ goto out_free;
+ overlay->reg_bo = reg_bo;
+
+ if (OVERLAY_NEEDS_PHYSICAL(dev)) {
+ ret = i915_gem_attach_phys_object(dev, reg_bo,
+ I915_GEM_PHYS_OVERLAY_REGS,
+ PAGE_SIZE);
+ if (ret) {
+ DRM_ERROR("failed to attach phys overlay regs\n");
+ goto out_free_bo;
+ }
+ overlay->flip_addr = reg_bo->phys_obj->handle->segs[0].ds_addr;
+ } else {
+// ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true, false);
+ ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true);
+ if (ret) {
+ DRM_ERROR("failed to pin overlay register bo\n");
+ goto out_free_bo;
+ }
+ overlay->flip_addr = reg_bo->gtt_offset;
+
+ ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
+ if (ret) {
+ DRM_ERROR("failed to move overlay register bo into the GTT\n");
+ goto out_unpin_bo;
+ }
+ }
+
+ /* init all values */
+ overlay->color_key = 0x0101fe;
+ overlay->brightness = -19;
+ overlay->contrast = 75;
+ overlay->saturation = 146;
+
+ regs = intel_overlay_map_regs(overlay);
+ if (!regs)
+ goto out_unpin_bo;
+
+ memset(regs, 0, sizeof(struct overlay_registers));
+ update_polyphase_filter(regs);
+ update_reg_attrs(overlay, regs);
+
+ intel_overlay_unmap_regs(overlay, regs);
+
+ dev_priv->overlay = overlay;
+ DRM_UNLOCK();
+ DRM_INFO("initialized overlay support\n");
+ return;
+
+out_unpin_bo:
+ if (!OVERLAY_NEEDS_PHYSICAL(dev))
+ i915_gem_object_unpin(reg_bo);
+out_free_bo:
+ drm_gem_object_unreference(&reg_bo->base);
+out_free:
+ DRM_UNLOCK();
+ free(overlay, M_DRM);
+ return;
+}
+
+void
+intel_cleanup_overlay(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (!dev_priv->overlay)
+ return;
+
+ /* The bo's should be free'd by the generic code already.
+ * Furthermore modesetting teardown happens beforehand so the
+ * hardware should be off already */
+ BUG_ON(dev_priv->overlay->active);
+
+ drm_gem_object_unreference_unlocked(&dev_priv->overlay->reg_bo->base);
+ free(dev_priv->overlay, M_DRM);
+}
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/seq_file.h>
+
+struct intel_overlay_error_state {
+ struct overlay_registers regs;
+ unsigned long base;
+ u32 dovsta;
+ u32 isr;
+};
+
+struct overlay_registers *
+intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
+{
+ drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+ struct overlay_registers *regs;
+
+ if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ /* Cast to make sparse happy, but it's wc memory anyway, so
+ * equivalent to the wc io mapping on X86. */
+ regs = (struct overlay_registers *)
+ overlay->reg_bo->phys_obj->handle->vaddr;
+ else
+ regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+ overlay->reg_bo->gtt_offset);
+
+ return regs;
+}
+
+void
+intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
+ struct overlay_registers *regs)
+{
+ if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ io_mapping_unmap_atomic(regs);
+}
+
+
+struct intel_overlay_error_state *
+intel_overlay_capture_error_state(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_overlay *overlay = dev_priv->overlay;
+ struct intel_overlay_error_state *error;
+ struct overlay_registers *regs;
+
+ if (!overlay || !overlay->active)
+ return NULL;
+
+ error = kmalloc(sizeof(*error), GFP_ATOMIC);
+ if (error == NULL)
+ return NULL;
+
+ error->dovsta = I915_READ(DOVSTA);
+ error->isr = I915_READ(ISR);
+ if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr;
+ else
+ error->base = overlay->reg_bo->gtt_offset;
+
+ regs = intel_overlay_map_regs_atomic(overlay);
+ if (!regs)
+ goto err;
+
+ memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
+ intel_overlay_unmap_regs_atomic(overlay, regs);
+
+ return error;
+
+err:
+ free(error, M_DRM);
+ return NULL;
+}
+
+void
+intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error)
+{
+ seq_printf(m, "Overlay, status: 0x%08x, interrupt: 0x%08x\n",
+ error->dovsta, error->isr);
+ seq_printf(m, " Register file at 0x%08lx:\n",
+ error->base);
+
+#define P(x) seq_printf(m, " " #x ": 0x%08x\n", error->regs.x)
+ P(OBUF_0Y);
+ P(OBUF_1Y);
+ P(OBUF_0U);
+ P(OBUF_0V);
+ P(OBUF_1U);
+ P(OBUF_1V);
+ P(OSTRIDE);
+ P(YRGB_VPH);
+ P(UV_VPH);
+ P(HORZ_PH);
+ P(INIT_PHS);
+ P(DWINPOS);
+ P(DWINSZ);
+ P(SWIDTH);
+ P(SWIDTHSW);
+ P(SHEIGHT);
+ P(YRGBSCALE);
+ P(UVSCALE);
+ P(OCLRC0);
+ P(OCLRC1);
+ P(DCLRKV);
+ P(DCLRKM);
+ P(SCLRKVH);
+ P(SCLRKVL);
+ P(SCLRKEN);
+ P(OCONFIG);
+ P(OCMD);
+ P(OSTART_0Y);
+ P(OSTART_1Y);
+ P(OSTART_0U);
+ P(OSTART_0V);
+ P(OSTART_1U);
+ P(OSTART_1V);
+ P(OTILEOFF_0Y);
+ P(OTILEOFF_1Y);
+ P(OTILEOFF_0U);
+ P(OTILEOFF_0V);
+ P(OTILEOFF_1U);
+ P(OTILEOFF_1V);
+ P(FASTHSCALE);
+ P(UVSCALEV);
+#undef P
+}
+#endif
diff --git a/sys/dev/pci/drm/i915/intel_panel.c b/sys/dev/pci/drm/i915/intel_panel.c
new file mode 100644
index 00000000000..07e53e328bd
--- /dev/null
+++ b/sys/dev/pci/drm/i915/intel_panel.c
@@ -0,0 +1,516 @@
+/* $OpenBSD: intel_panel.c,v 1.1 2013/03/18 12:36:52 jsg Exp $ */
+/*
+ * Copyright © 2006-2010 Intel Corporation
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <dev/pci/drm/drmP.h>
+#include "intel_drv.h"
+
+#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
+
+int is_backlight_combination_mode(struct drm_device *);
+u32 i915_read_blc_pwm_ctl(struct drm_device *);
+void intel_pch_panel_set_backlight(struct drm_device *, u32);
+void intel_panel_actually_set_backlight(struct drm_device *, u32);
+void intel_panel_init_backlight(struct drm_device *);
+u32 _intel_panel_get_max_backlight(struct drm_device *);
+u32 intel_panel_compute_brightness(struct drm_device *, u32);
+u32 intel_panel_get_backlight(struct drm_device *);
+
+void
+intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ adjusted_mode->hdisplay = fixed_mode->hdisplay;
+ adjusted_mode->hsync_start = fixed_mode->hsync_start;
+ adjusted_mode->hsync_end = fixed_mode->hsync_end;
+ adjusted_mode->htotal = fixed_mode->htotal;
+
+ adjusted_mode->vdisplay = fixed_mode->vdisplay;
+ adjusted_mode->vsync_start = fixed_mode->vsync_start;
+ adjusted_mode->vsync_end = fixed_mode->vsync_end;
+ adjusted_mode->vtotal = fixed_mode->vtotal;
+
+ adjusted_mode->clock = fixed_mode->clock;
+}
+
+/* adjusted_mode has been preset to be the panel's fixed mode */
+void
+intel_pch_panel_fitting(struct drm_device *dev,
+ int fitting_mode,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ int x, y, width, height;
+
+ x = y = width = height = 0;
+
+ /* Native modes don't need fitting */
+ if (adjusted_mode->hdisplay == mode->hdisplay &&
+ adjusted_mode->vdisplay == mode->vdisplay)
+ goto done;
+
+ switch (fitting_mode) {
+ case DRM_MODE_SCALE_CENTER:
+ width = mode->hdisplay;
+ height = mode->vdisplay;
+ x = (adjusted_mode->hdisplay - width + 1)/2;
+ y = (adjusted_mode->vdisplay - height + 1)/2;
+ break;
+
+ case DRM_MODE_SCALE_ASPECT:
+ /* Scale but preserve the aspect ratio */
+ {
+ u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
+ u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
+ if (scaled_width > scaled_height) { /* pillar */
+ width = scaled_height / mode->vdisplay;
+ if (width & 1)
+ width++;
+ x = (adjusted_mode->hdisplay - width + 1) / 2;
+ y = 0;
+ height = adjusted_mode->vdisplay;
+ } else if (scaled_width < scaled_height) { /* letter */
+ height = scaled_width / mode->hdisplay;
+ if (height & 1)
+ height++;
+ y = (adjusted_mode->vdisplay - height + 1) / 2;
+ x = 0;
+ width = adjusted_mode->hdisplay;
+ } else {
+ x = y = 0;
+ width = adjusted_mode->hdisplay;
+ height = adjusted_mode->vdisplay;
+ }
+ }
+ break;
+
+ default:
+ case DRM_MODE_SCALE_FULLSCREEN:
+ x = y = 0;
+ width = adjusted_mode->hdisplay;
+ height = adjusted_mode->vdisplay;
+ break;
+ }
+
+done:
+ dev_priv->pch_pf_pos = (x << 16) | y;
+ dev_priv->pch_pf_size = (width << 16) | height;
+}
+
+int
+is_backlight_combination_mode(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ if (INTEL_INFO(dev)->gen >= 4)
+ return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
+
+ if (IS_GEN2(dev))
+ return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE;
+
+ return 0;
+}
+
+u32
+i915_read_blc_pwm_ctl(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 val;
+
+ /* Restore the CTL value if it lost, e.g. GPU reset */
+
+ if (HAS_PCH_SPLIT(dev)) {
+ val = I915_READ(BLC_PWM_PCH_CTL2);
+ if (dev_priv->regfile.saveBLC_PWM_CTL2 == 0) {
+ dev_priv->regfile.saveBLC_PWM_CTL2 = val;
+ } else if (val == 0) {
+ val = dev_priv->regfile.saveBLC_PWM_CTL2;
+ I915_WRITE(BLC_PWM_PCH_CTL2, val);
+ }
+ } else {
+ val = I915_READ(BLC_PWM_CTL);
+ if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
+ dev_priv->regfile.saveBLC_PWM_CTL = val;
+ if (INTEL_INFO(dev)->gen >= 4)
+ dev_priv->regfile.saveBLC_PWM_CTL2 =
+ I915_READ(BLC_PWM_CTL2);
+ } else if (val == 0) {
+ val = dev_priv->regfile.saveBLC_PWM_CTL;
+ I915_WRITE(BLC_PWM_CTL, val);
+ if (INTEL_INFO(dev)->gen >= 4)
+ I915_WRITE(BLC_PWM_CTL2,
+ dev_priv->regfile.saveBLC_PWM_CTL2);
+ }
+ }
+
+ return val;
+}
+
+u32
+_intel_panel_get_max_backlight(struct drm_device *dev)
+{
+ u32 max;
+
+ max = i915_read_blc_pwm_ctl(dev);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ max >>= 16;
+ } else {
+ if (INTEL_INFO(dev)->gen < 4)
+ max >>= 17;
+ else
+ max >>= 16;
+
+ if (is_backlight_combination_mode(dev))
+ max *= 0xff;
+ }
+
+ return max;
+}
+
+u32
+intel_panel_get_max_backlight(struct drm_device *dev)
+{
+ u32 max;
+
+ max = _intel_panel_get_max_backlight(dev);
+ if (max == 0) {
+ /* XXX add code here to query mode clock or hardware clock
+ * and program max PWM appropriately.
+ */
+#ifdef notyet
+ pr_warn_once("fixme: max PWM is zero\n");
+#endif
+ return 1;
+ }
+
+ DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
+ return max;
+}
+
+static int i915_panel_invert_brightness;
+/*
+ "(-1 force normal, 0 machine defaults, 1 force inversion), please "
+ "report PCI device ID, subsystem vendor and subsystem device ID "
+ "to dri-devel@lists.freedesktop.org, if your machine needs it. "
+ "It will then be included in an upcoming module version.");
+*/
+u32
+intel_panel_compute_brightness(struct drm_device *dev, u32 val)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ if (i915_panel_invert_brightness < 0)
+ return val;
+
+ if (i915_panel_invert_brightness > 0 ||
+ dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS)
+ return intel_panel_get_max_backlight(dev) - val;
+
+ return val;
+}
+
+u32
+intel_panel_get_backlight(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 val;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+ } else {
+ val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+ if (INTEL_INFO(dev)->gen < 4)
+ val >>= 1;
+
+ if (is_backlight_combination_mode(dev)) {
+ u8 lbpc;
+
+ lbpc = pci_conf_read(dev_priv->pc, dev_priv->tag,
+ PCI_LBPC) & 0xff;
+ val *= lbpc;
+ }
+ }
+
+ val = intel_panel_compute_brightness(dev, val);
+ DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
+ return val;
+}
+
+void
+intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 val = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ I915_WRITE(BLC_PWM_CPU_CTL, val | level);
+}
+
+void
+intel_panel_actually_set_backlight(struct drm_device *dev, u32 level)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 tmp;
+
+ DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
+ level = intel_panel_compute_brightness(dev, level);
+
+ if (HAS_PCH_SPLIT(dev))
+ return intel_pch_panel_set_backlight(dev, level);
+
+ if (is_backlight_combination_mode(dev)) {
+ u32 max = intel_panel_get_max_backlight(dev);
+ u8 lbpc;
+
+ lbpc = level * 0xfe / max + 1;
+ level /= lbpc;
+ pci_conf_write(dev_priv->pc, dev_priv->tag, PCI_LBPC, lbpc);
+ }
+
+ tmp = I915_READ(BLC_PWM_CTL);
+ if (INTEL_INFO(dev)->gen < 4)
+ level <<= 1;
+ tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+ I915_WRITE(BLC_PWM_CTL, tmp | level);
+}
+
+void
+intel_panel_set_backlight(struct drm_device *dev, u32 level)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ dev_priv->backlight_level = level;
+ if (dev_priv->backlight_enabled)
+ intel_panel_actually_set_backlight(dev, level);
+}
+
+void
+intel_panel_disable_backlight(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ dev_priv->backlight_enabled = false;
+ intel_panel_actually_set_backlight(dev, 0);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ uint32_t reg, tmp;
+
+ reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2;
+
+ I915_WRITE(reg, I915_READ(reg) & ~BLM_PWM_ENABLE);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ tmp = I915_READ(BLC_PWM_PCH_CTL1);
+ tmp &= ~BLM_PCH_PWM_ENABLE;
+ I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
+ }
+ }
+}
+
+void
+intel_panel_enable_backlight(struct drm_device *dev,
+ enum pipe pipe)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ if (dev_priv->backlight_level == 0)
+ dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ uint32_t reg, tmp;
+
+ reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2;
+
+
+ tmp = I915_READ(reg);
+
+ /* Note that this can also get called through dpms changes. And
+ * we don't track the backlight dpms state, hence check whether
+ * we have to do anything first. */
+ if (tmp & BLM_PWM_ENABLE)
+ goto set_level;
+
+ if (dev_priv->num_pipe == 3)
+ tmp &= ~BLM_PIPE_SELECT_IVB;
+ else
+ tmp &= ~BLM_PIPE_SELECT;
+
+ tmp |= BLM_PIPE(pipe);
+ tmp &= ~BLM_PWM_ENABLE;
+
+ I915_WRITE(reg, tmp);
+ POSTING_READ(reg);
+ I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ tmp = I915_READ(BLC_PWM_PCH_CTL1);
+ tmp |= BLM_PCH_PWM_ENABLE;
+ tmp &= ~BLM_PCH_OVERRIDE_ENABLE;
+ I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
+ }
+ }
+
+set_level:
+ /* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1.
+ * BLC_PWM_CPU_CTL may be cleared to zero automatically when these
+ * registers are set.
+ */
+ dev_priv->backlight_enabled = true;
+ intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
+}
+
+void
+intel_panel_init_backlight(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ dev_priv->backlight_level = intel_panel_get_backlight(dev);
+ dev_priv->backlight_enabled = dev_priv->backlight_level != 0;
+}
+
+enum drm_connector_status
+intel_panel_detect(struct drm_device *dev)
+{
+// struct inteldrm_softc *dev_priv = dev->dev_private;
+
+#if 0
+ /* Assume that the BIOS does not lie through the OpRegion... */
+ if (!i915_panel_ignore_lid && dev_priv->opregion.lid_state) {
+ return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
+ connector_status_connected :
+ connector_status_disconnected;
+ }
+#endif
+
+ switch (i915_panel_ignore_lid) {
+ case -2:
+ return connector_status_connected;
+ case -1:
+ return connector_status_disconnected;
+ default:
+ return connector_status_unknown;
+ }
+}
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+int
+intel_panel_update_status(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(bd);
+ intel_panel_set_backlight(dev, bd->props.brightness);
+ return 0;
+}
+
+int
+intel_panel_get_brightness(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(bd);
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ return dev_priv->backlight_level;
+}
+
+static const struct backlight_ops intel_panel_bl_ops = {
+ .update_status = intel_panel_update_status,
+ .get_brightness = intel_panel_get_brightness,
+};
+
+int
+intel_panel_setup_backlight(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct backlight_properties props;
+
+ intel_panel_init_backlight(dev);
+
+ memset(&props, 0, sizeof(props));
+ props.type = BACKLIGHT_RAW;
+ props.max_brightness = _intel_panel_get_max_backlight(dev);
+ if (props.max_brightness == 0) {
+ DRM_DEBUG_DRIVER("Failed to get maximum backlight value\n");
+ return -ENODEV;
+ }
+ dev_priv->backlight =
+ backlight_device_register("intel_backlight",
+ &connector->kdev, dev,
+ &intel_panel_bl_ops, &props);
+
+ if (IS_ERR(dev_priv->backlight)) {
+ DRM_ERROR("Failed to register backlight: %ld\n",
+ PTR_ERR(dev_priv->backlight));
+ dev_priv->backlight = NULL;
+ return -ENODEV;
+ }
+ dev_priv->backlight->props.brightness = intel_panel_get_backlight(dev);
+ return 0;
+}
+
+void
+intel_panel_destroy_backlight(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ if (dev_priv->backlight)
+ backlight_device_unregister(dev_priv->backlight);
+}
+#else
+int
+intel_panel_setup_backlight(struct drm_connector *connector)
+{
+ intel_panel_init_backlight(connector->dev);
+ return 0;
+}
+
+void
+intel_panel_destroy_backlight(struct drm_device *dev)
+{
+ return;
+}
+#endif
+
+int
+intel_panel_init(struct intel_panel *panel,
+ struct drm_display_mode *fixed_mode)
+{
+ panel->fixed_mode = fixed_mode;
+
+ return 0;
+}
+
+void
+intel_panel_fini(struct intel_panel *panel)
+{
+ struct intel_connector *intel_connector =
+ container_of(panel, struct intel_connector, panel);
+
+ if (panel->fixed_mode)
+ drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
+}
diff --git a/sys/dev/pci/drm/i915/intel_pm.c b/sys/dev/pci/drm/i915/intel_pm.c
new file mode 100644
index 00000000000..acfda003567
--- /dev/null
+++ b/sys/dev/pci/drm/i915/intel_pm.c
@@ -0,0 +1,4752 @@
+/* $OpenBSD: intel_pm.c,v 1.1 2013/03/18 12:36:52 jsg Exp $ */
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eugeni Dodonov <eugeni.dodonov@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+#define FORCEWAKE_ACK_TIMEOUT_MS 2
+
+/* FBC, or Frame Buffer Compression, is a technique employed to compress the
+ * framebuffer contents in-memory, aiming at reducing the required bandwidth
+ * during in-memory transfers and, therefore, reduce the power packet.
+ *
+ * The benefits of FBC are mostly visible with solid backgrounds and
+ * variation-less patterns.
+ *
+ * FBC-related functionality can be enabled by the means of the
+ * i915.i915_enable_fbc parameter
+ */
+
+bool intel_crtc_active(struct drm_crtc *);
+void i8xx_disable_fbc(struct drm_device *);
+void i8xx_enable_fbc(struct drm_crtc *, unsigned long);
+bool i8xx_fbc_enabled(struct drm_device *);
+void g4x_enable_fbc(struct drm_crtc *, unsigned long);
+void g4x_disable_fbc(struct drm_device *);
+bool g4x_fbc_enabled(struct drm_device *);
+void sandybridge_blit_fbc_update(struct drm_device *);
+void ironlake_enable_fbc(struct drm_crtc *, unsigned long);
+void ironlake_disable_fbc(struct drm_device *);
+bool ironlake_fbc_enabled(struct drm_device *);
+bool intel_fbc_enabled(struct drm_device *);
+void intel_cancel_fbc_work(struct inteldrm_softc *);
+void i915_pineview_get_mem_freq(struct drm_device *);
+void i915_ironlake_get_mem_freq(struct drm_device *);
+const struct cxsr_latency *
+ intel_get_cxsr_latency(int, int, int, int);
+void pineview_disable_cxsr(struct drm_device *);
+int i9xx_get_fifo_size(struct drm_device *, int);
+int i85x_get_fifo_size(struct drm_device *, int);
+int i845_get_fifo_size(struct drm_device *, int);
+int i830_get_fifo_size(struct drm_device *, int);
+unsigned long
+ intel_calculate_wm(unsigned long,
+ const struct intel_watermark_params *wm, int, int, unsigned long);
+struct drm_crtc *
+ single_enabled_crtc(struct drm_device *);
+void pineview_update_wm(struct drm_device *);
+bool g4x_compute_wm0(struct drm_device *, int,
+ const struct intel_watermark_params *, int,
+ const struct intel_watermark_params *, int, int *, int *);
+bool g4x_check_srwm(struct drm_device *, int, int,
+ const struct intel_watermark_params *,
+ const struct intel_watermark_params *);
+bool g4x_compute_srwm(struct drm_device *, int, int,
+ const struct intel_watermark_params *,
+ const struct intel_watermark_params *, int *, int *);
+bool vlv_compute_drain_latency(struct drm_device *, int, int *, int *,
+ int *, int *);
+void vlv_update_drain_latency(struct drm_device *);
+void valleyview_update_wm(struct drm_device *);
+void g4x_update_wm(struct drm_device *);
+void i965_update_wm(struct drm_device *);
+void i9xx_update_wm(struct drm_device *);
+void i830_update_wm(struct drm_device *);
+bool ironlake_check_srwm(struct drm_device *, int, int, int, int,
+ const struct intel_watermark_params *,
+ const struct intel_watermark_params *);
+bool ironlake_compute_srwm(struct drm_device *, int, int, int,
+ const struct intel_watermark_params *,
+ const struct intel_watermark_params *, int *, int *, int *);
+void ironlake_update_wm(struct drm_device *);
+void sandybridge_update_wm(struct drm_device *);
+void ivybridge_update_wm(struct drm_device *);
+void haswell_update_linetime_wm(struct drm_device *, int,
+ struct drm_display_mode *);
+bool sandybridge_compute_sprite_wm(struct drm_device *, int, uint32_t, int,
+ const struct intel_watermark_params *, int, int *);
+bool sandybridge_compute_sprite_srwm(struct drm_device *, int, uint32_t,
+ int, const struct intel_watermark_params *, int, int *);
+void sandybridge_update_sprite_wm(struct drm_device *, int, uint32_t, int);
+struct drm_i915_gem_object *
+ intel_alloc_context_page(struct drm_device *);
+void ironlake_enable_drps(struct drm_device *);
+void ironlake_disable_drps(struct drm_device *);
+u32 gen6_rps_limits(struct inteldrm_softc *, u8 *);
+void gen6_disable_rps(struct drm_device *);
+void gen6_enable_rps(struct drm_device *);
+void gen6_update_ring_freq(struct drm_device *);
+void ironlake_disable_rc6(struct drm_device *);
+int ironlake_setup_rc6(struct drm_device *);
+void ironlake_enable_rc6(struct drm_device *);
+unsigned long intel_pxfreq(u32);
+unsigned long __i915_chipset_val(struct inteldrm_softc *);
+u16 pvid_to_extvid(struct inteldrm_softc *, u8);
+void __i915_update_gfx_val(struct inteldrm_softc *);
+unsigned long __i915_gfx_val(struct inteldrm_softc *);
+unsigned long i915_read_mch_val(void);
+bool i915_gpu_raise(void);
+bool i915_gpu_lower(void);
+bool i915_gpu_busy(void);
+bool i915_gpu_turbo_disable(void);
+void intel_init_emon(struct drm_device *);
+void ibx_init_clock_gating(struct drm_device *);
+void ironlake_init_clock_gating(struct drm_device *);
+void cpt_init_clock_gating(struct drm_device *);
+void gen6_init_clock_gating(struct drm_device *);
+void gen7_setup_fixed_func_scheduler(struct inteldrm_softc *);
+void lpt_init_clock_gating(struct drm_device *);
+void haswell_init_clock_gating(struct drm_device *);
+void ivybridge_init_clock_gating(struct drm_device *);
+void valleyview_init_clock_gating(struct drm_device *);
+void g4x_init_clock_gating(struct drm_device *);
+void crestline_init_clock_gating(struct drm_device *);
+void broadwater_init_clock_gating(struct drm_device *);
+void gen3_init_clock_gating(struct drm_device *);
+void i85x_init_clock_gating(struct drm_device *);
+void i830_init_clock_gating(struct drm_device *);
+void __gen6_gt_wait_for_thread_c0(struct inteldrm_softc *);
+void __gen6_gt_force_wake_reset(struct inteldrm_softc *);
+void __gen6_gt_force_wake_mt_reset(struct inteldrm_softc *);
+void vlv_force_wake_reset(struct inteldrm_softc *);
+void vlv_force_wake_get(struct inteldrm_softc *);
+void vlv_force_wake_put(struct inteldrm_softc *);
+void intel_gt_reset(struct drm_device *);
+void intel_gt_init(struct drm_device *);
+void intel_fbc_work_tick(void *);
+void intel_fbc_work_fn(void *, void *);
+void intel_gen6_powersave_tick(void *);
+void intel_gen6_powersave_work(void *, void *);
+
+extern int ticks;
+
+bool
+intel_crtc_active(struct drm_crtc *crtc)
+{
+ /* Be paranoid as we can arrive here with only partial
+ * state retrieved from the hardware during setup.
+ */
+ return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
+}
+
+void
+i8xx_disable_fbc(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 fbc_ctl;
+ int retries;
+
+ /* Disable compression */
+ fbc_ctl = I915_READ(FBC_CONTROL);
+ if ((fbc_ctl & FBC_CTL_EN) == 0)
+ return;
+
+ fbc_ctl &= ~FBC_CTL_EN;
+ I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+ /* Wait for compressing bit to clear */
+ for (retries = 10; retries > 0; retries--) {
+ if ((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0)
+ break;
+ DELAY(1000);
+ }
+ if (retries == 0) {
+ DRM_DEBUG_KMS("FBC idle timed out\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+}
+
+void
+i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int cfb_pitch;
+ int plane, i;
+ u32 fbc_ctl, fbc_ctl2;
+
+ cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
+ if (fb->pitches[0] < cfb_pitch)
+ cfb_pitch = fb->pitches[0];
+
+ /* FBC_CTL wants 64B units */
+ cfb_pitch = (cfb_pitch / 64) - 1;
+ plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
+
+ /* Clear old tags */
+ for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
+ I915_WRITE(FBC_TAG + (i * 4), 0);
+
+ /* Set it up... */
+ fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
+ fbc_ctl2 |= plane;
+ I915_WRITE(FBC_CONTROL2, fbc_ctl2);
+ I915_WRITE(FBC_FENCE_OFF, crtc->y);
+
+ /* enable it... */
+ fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
+ if (IS_I945GM(dev))
+ fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
+ fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
+ fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
+ fbc_ctl |= obj->fence_reg;
+ I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+ DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
+ cfb_pitch, crtc->y, intel_crtc->plane);
+}
+
+bool
+i8xx_fbc_enabled(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
+}
+
+void
+g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
+ unsigned long stall_watermark = 200;
+ u32 dpfc_ctl;
+
+ dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
+ dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
+ I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
+
+ I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
+ (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
+ (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
+ I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
+
+ /* enable it... */
+ I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
+
+ DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
+}
+
+void
+g4x_disable_fbc(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 dpfc_ctl;
+
+ /* Disable compression */
+ dpfc_ctl = I915_READ(DPFC_CONTROL);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+ }
+}
+
+bool
+g4x_fbc_enabled(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
+}
+
+void
+sandybridge_blit_fbc_update(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 blt_ecoskpd;
+
+ /* Make sure blitter notifies FBC of writes */
+ gen6_gt_force_wake_get(dev_priv);
+ blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
+ blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
+ GEN6_BLITTER_LOCK_SHIFT;
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
+ GEN6_BLITTER_LOCK_SHIFT);
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ POSTING_READ(GEN6_BLITTER_ECOSKPD);
+ gen6_gt_force_wake_put(dev_priv);
+}
+
+void
+ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
+ unsigned long stall_watermark = 200;
+ u32 dpfc_ctl;
+
+ dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+ dpfc_ctl &= DPFC_RESERVED;
+ dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
+ /* Set persistent mode for front-buffer rendering, ala X. */
+ dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
+ dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
+ I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
+
+ I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
+ (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
+ (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
+ I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
+ I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
+ /* enable it... */
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+
+ if (IS_GEN6(dev)) {
+ I915_WRITE(SNB_DPFC_CTL_SA,
+ SNB_CPU_FENCE_ENABLE | obj->fence_reg);
+ I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+ sandybridge_blit_fbc_update(dev);
+ }
+
+ DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
+}
+
+void
+ironlake_disable_fbc(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 dpfc_ctl;
+
+ /* Disable compression */
+ dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+ }
+}
+
+bool
+ironlake_fbc_enabled(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
+}
+
+bool
+intel_fbc_enabled(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ if (!dev_priv->display.fbc_enabled)
+ return false;
+
+ return dev_priv->display.fbc_enabled(dev);
+}
+
+void
+intel_fbc_work_tick(void *arg)
+{
+ struct intel_fbc_work *work = arg;
+
+ workq_queue_task(NULL, &work->task, 0,
+ intel_fbc_work_fn, work, NULL);
+}
+
+void
+intel_fbc_work_fn(void *arg1, void *arg2)
+{
+ struct intel_fbc_work *work = arg1;
+ struct drm_device *dev = work->crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ DRM_LOCK();
+ if (work == dev_priv->fbc_work) {
+ /* Double check that we haven't switched fb without cancelling
+ * the prior work.
+ */
+ if (work->crtc->fb == work->fb) {
+ dev_priv->display.enable_fbc(work->crtc,
+ work->interval);
+
+ dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
+ dev_priv->cfb_fb = work->crtc->fb->base.id;
+ dev_priv->cfb_y = work->crtc->y;
+ }
+
+ dev_priv->fbc_work = NULL;
+ }
+ DRM_UNLOCK();
+
+ free(work, M_DRM);
+}
+
+void
+intel_cancel_fbc_work(struct inteldrm_softc *dev_priv)
+{
+ if (dev_priv->fbc_work == NULL)
+ return;
+
+ DRM_DEBUG_KMS("cancelling pending FBC enable\n");
+
+ /* Synchronisation is provided by struct_mutex and checking of
+ * dev_priv->fbc_work, so we can perform the cancellation
+ * entirely asynchronously.
+ */
+ if (timeout_del(&dev_priv->fbc_work->to))
+ /* tasklet was killed before being run, clean up */
+ free(dev_priv->fbc_work, M_DRM);
+
+ /* Mark the work as no longer wanted so that if it does
+ * wake-up (because the work was already running and waiting
+ * for our mutex), it will discover that is no longer
+ * necessary to run.
+ */
+ dev_priv->fbc_work = NULL;
+}
+
+void
+intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct intel_fbc_work *work;
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ if (!dev_priv->display.enable_fbc)
+ return;
+
+ intel_cancel_fbc_work(dev_priv);
+
+ work = malloc(sizeof *work, M_DRM, M_WAITOK | M_ZERO);
+ if (work == NULL) {
+ dev_priv->display.enable_fbc(crtc, interval);
+ return;
+ }
+
+ work->crtc = crtc;
+ work->fb = crtc->fb;
+ work->interval = interval;
+ timeout_set(&work->to, intel_fbc_work_tick, work);
+
+ dev_priv->fbc_work = work;
+
+ DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
+
+ /* Delay the actual enabling to let pageflipping cease and the
+ * display to settle before starting the compression. Note that
+ * this delay also serves a second purpose: it allows for a
+ * vblank to pass after disabling the FBC before we attempt
+ * to modify the control registers.
+ *
+ * A more complicated solution would involve tracking vblanks
+ * following the termination of the page-flipping sequence
+ * and indeed performing the enable as a co-routine and not
+ * waiting synchronously upon the vblank.
+ */
+ timeout_add_msec(&work->to, 50);
+}
+
+void
+intel_disable_fbc(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ intel_cancel_fbc_work(dev_priv);
+
+ if (!dev_priv->display.disable_fbc)
+ return;
+
+ dev_priv->display.disable_fbc(dev);
+ dev_priv->cfb_plane = -1;
+}
+
+/**
+ * intel_update_fbc - enable/disable FBC as needed
+ * @dev: the drm_device
+ *
+ * Set up the framebuffer compression hardware at mode set time. We
+ * enable it if possible:
+ * - plane A only (on pre-965)
+ * - no pixel mulitply/line duplication
+ * - no alpha buffer discard
+ * - no dual wide
+ * - framebuffer <= 2048 in width, 1536 in height
+ *
+ * We can't assume that any compression will take place (worst case),
+ * so the compressed buffer has to be the same size as the uncompressed
+ * one. It also must reside (along with the line length buffer) in
+ * stolen memory.
+ *
+ * We need to enable/disable FBC on a global basis.
+ */
+void
+intel_update_fbc(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = NULL, *tmp_crtc;
+ struct intel_crtc *intel_crtc;
+ struct drm_framebuffer *fb;
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj;
+ int enable_fbc;
+
+ if (!i915_powersave)
+ return;
+
+ if (!I915_HAS_FBC(dev))
+ return;
+
+ /*
+ * If FBC is already on, we just have to verify that we can
+ * keep it that way...
+ * Need to disable if:
+ * - more than one pipe is active
+ * - changing FBC params (stride, fence, mode)
+ * - new fb is too large to fit in compressed buffer
+ * - going to an unsupported config (interlace, pixel multiply, etc.)
+ */
+ list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
+ if (intel_crtc_active(tmp_crtc) &&
+ !to_intel_crtc(tmp_crtc)->primary_disabled) {
+ if (crtc) {
+ DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
+ goto out_disable;
+ }
+ crtc = tmp_crtc;
+ }
+ }
+
+ if (!crtc || crtc->fb == NULL) {
+ DRM_DEBUG_KMS("no output, disabling\n");
+ dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
+ goto out_disable;
+ }
+
+ intel_crtc = to_intel_crtc(crtc);
+ fb = crtc->fb;
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
+
+ enable_fbc = i915_enable_fbc;
+ if (enable_fbc < 0) {
+ DRM_DEBUG_KMS("fbc set to per-chip default\n");
+ enable_fbc = 1;
+ if (INTEL_INFO(dev)->gen <= 6)
+ enable_fbc = 0;
+ }
+ if (!enable_fbc) {
+ DRM_DEBUG_KMS("fbc disabled per module param\n");
+ dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
+ goto out_disable;
+ }
+ if (intel_fb->obj->base.size > dev_priv->cfb_size) {
+ DRM_DEBUG_KMS("framebuffer too large, disabling "
+ "compression\n");
+ dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
+ goto out_disable;
+ }
+ if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
+ (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
+ DRM_DEBUG_KMS("mode incompatible with compression, "
+ "disabling\n");
+ dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
+ goto out_disable;
+ }
+ if ((crtc->mode.hdisplay > 2048) ||
+ (crtc->mode.vdisplay > 1536)) {
+ DRM_DEBUG_KMS("mode too large for compression, disabling\n");
+ dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
+ goto out_disable;
+ }
+ if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
+ DRM_DEBUG_KMS("plane not 0, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_BAD_PLANE;
+ goto out_disable;
+ }
+
+ /* The use of a CPU fence is mandatory in order to detect writes
+ * by the CPU to the scanout and trigger updates to the FBC.
+ */
+ if (obj->tiling_mode != I915_TILING_X ||
+ obj->fence_reg == I915_FENCE_REG_NONE) {
+ DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_NOT_TILED;
+ goto out_disable;
+ }
+
+ /* If the kernel debugger is active, always disable compression */
+#ifdef notyet
+ if (in_dbg_master())
+ goto out_disable;
+#endif
+
+ /* If the scanout has not changed, don't modify the FBC settings.
+ * Note that we make the fundamental assumption that the fb->obj
+ * cannot be unpinned (and have its GTT offset and fence revoked)
+ * without first being decoupled from the scanout and FBC disabled.
+ */
+ if (dev_priv->cfb_plane == intel_crtc->plane &&
+ dev_priv->cfb_fb == fb->base.id &&
+ dev_priv->cfb_y == crtc->y)
+ return;
+
+ if (intel_fbc_enabled(dev)) {
+ /* We update FBC along two paths, after changing fb/crtc
+ * configuration (modeswitching) and after page-flipping
+ * finishes. For the latter, we know that not only did
+ * we disable the FBC at the start of the page-flip
+ * sequence, but also more than one vblank has passed.
+ *
+ * For the former case of modeswitching, it is possible
+ * to switch between two FBC valid configurations
+ * instantaneously so we do need to disable the FBC
+ * before we can modify its control registers. We also
+ * have to wait for the next vblank for that to take
+ * effect. However, since we delay enabling FBC we can
+ * assume that a vblank has passed since disabling and
+ * that we can safely alter the registers in the deferred
+ * callback.
+ *
+ * In the scenario that we go from a valid to invalid
+ * and then back to valid FBC configuration we have
+ * no strict enforcement that a vblank occurred since
+ * disabling the FBC. However, along all current pipe
+ * disabling paths we do need to wait for a vblank at
+ * some point. And we wait before enabling FBC anyway.
+ */
+ DRM_DEBUG_KMS("disabling active FBC for update\n");
+ intel_disable_fbc(dev);
+ }
+
+ intel_enable_fbc(crtc, 500);
+ return;
+
+out_disable:
+ /* Multiple disables should be harmless */
+ if (intel_fbc_enabled(dev)) {
+ DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
+ intel_disable_fbc(dev);
+ }
+}
+
+void
+i915_pineview_get_mem_freq(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 tmp;
+
+ tmp = I915_READ(CLKCFG);
+
+ switch (tmp & CLKCFG_FSB_MASK) {
+ case CLKCFG_FSB_533:
+ dev_priv->fsb_freq = 533; /* 133*4 */
+ break;
+ case CLKCFG_FSB_800:
+ dev_priv->fsb_freq = 800; /* 200*4 */
+ break;
+ case CLKCFG_FSB_667:
+ dev_priv->fsb_freq = 667; /* 167*4 */
+ break;
+ case CLKCFG_FSB_400:
+ dev_priv->fsb_freq = 400; /* 100*4 */
+ break;
+ }
+
+ switch (tmp & CLKCFG_MEM_MASK) {
+ case CLKCFG_MEM_533:
+ dev_priv->mem_freq = 533;
+ break;
+ case CLKCFG_MEM_667:
+ dev_priv->mem_freq = 667;
+ break;
+ case CLKCFG_MEM_800:
+ dev_priv->mem_freq = 800;
+ break;
+ }
+
+ /* detect pineview DDR3 setting */
+ tmp = I915_READ(CSHRDDR3CTL);
+ dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
+}
+
+void
+i915_ironlake_get_mem_freq(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u16 ddrpll, csipll;
+
+ ddrpll = I915_READ16(DDRMPLL1);
+ csipll = I915_READ16(CSIPLL0);
+
+ switch (ddrpll & 0xff) {
+ case 0xc:
+ dev_priv->mem_freq = 800;
+ break;
+ case 0x10:
+ dev_priv->mem_freq = 1066;
+ break;
+ case 0x14:
+ dev_priv->mem_freq = 1333;
+ break;
+ case 0x18:
+ dev_priv->mem_freq = 1600;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
+ ddrpll & 0xff);
+ dev_priv->mem_freq = 0;
+ break;
+ }
+
+ dev_priv->ips.r_t = dev_priv->mem_freq;
+
+ switch (csipll & 0x3ff) {
+ case 0x00c:
+ dev_priv->fsb_freq = 3200;
+ break;
+ case 0x00e:
+ dev_priv->fsb_freq = 3733;
+ break;
+ case 0x010:
+ dev_priv->fsb_freq = 4266;
+ break;
+ case 0x012:
+ dev_priv->fsb_freq = 4800;
+ break;
+ case 0x014:
+ dev_priv->fsb_freq = 5333;
+ break;
+ case 0x016:
+ dev_priv->fsb_freq = 5866;
+ break;
+ case 0x018:
+ dev_priv->fsb_freq = 6400;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
+ csipll & 0x3ff);
+ dev_priv->fsb_freq = 0;
+ break;
+ }
+
+ if (dev_priv->fsb_freq == 3200) {
+ dev_priv->ips.c_m = 0;
+ } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
+ dev_priv->ips.c_m = 1;
+ } else {
+ dev_priv->ips.c_m = 2;
+ }
+}
+
+static const struct cxsr_latency cxsr_latency_table[] = {
+ {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
+ {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
+ {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
+ {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
+ {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
+
+ {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
+ {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
+ {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
+ {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
+ {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
+
+ {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
+ {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
+ {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
+ {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
+ {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
+
+ {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
+ {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
+ {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
+ {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
+ {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
+
+ {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
+ {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
+ {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
+ {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
+ {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
+
+ {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
+ {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
+ {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
+ {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
+ {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
+};
+
+const struct cxsr_latency *
+intel_get_cxsr_latency(int is_desktop,
+ int is_ddr3,
+ int fsb,
+ int mem)
+{
+ const struct cxsr_latency *latency;
+ int i;
+
+ if (fsb == 0 || mem == 0)
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
+ latency = &cxsr_latency_table[i];
+ if (is_desktop == latency->is_desktop &&
+ is_ddr3 == latency->is_ddr3 &&
+ fsb == latency->fsb_freq && mem == latency->mem_freq)
+ return latency;
+ }
+
+ DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+
+ return NULL;
+}
+
+void
+pineview_disable_cxsr(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ /* deactivate cxsr */
+ I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
+}
+
+/*
+ * Latency for FIFO fetches is dependent on several factors:
+ * - memory configuration (speed, channels)
+ * - chipset
+ * - current MCH state
+ * It can be fairly high in some situations, so here we assume a fairly
+ * pessimal value. It's a tradeoff between extra memory fetches (if we
+ * set this value too high, the FIFO will fetch frequently to stay full)
+ * and power consumption (set it too low to save power and we might see
+ * FIFO underruns and display "flicker").
+ *
+ * A value of 5us seems to be a good balance; safe for very low end
+ * platforms but not overly aggressive on lower latency configs.
+ */
+static const int latency_ns = 5000;
+
+int
+i9xx_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ if (plane)
+ size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A", size);
+
+ return size;
+}
+
+int
+i85x_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x1ff;
+ if (plane)
+ size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
+ size >>= 1; /* Convert to cachelines */
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A", size);
+
+ return size;
+}
+
+int
+i845_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ size >>= 2; /* Convert to cachelines */
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A",
+ size);
+
+ return size;
+}
+
+int
+i830_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ size >>= 1; /* Convert to cachelines */
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A", size);
+
+ return size;
+}
+
+/* Pineview has different values for various configs */
+static const struct intel_watermark_params pineview_display_wm = {
+ PINEVIEW_DISPLAY_FIFO,
+ PINEVIEW_MAX_WM,
+ PINEVIEW_DFT_WM,
+ PINEVIEW_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params pineview_display_hplloff_wm = {
+ PINEVIEW_DISPLAY_FIFO,
+ PINEVIEW_MAX_WM,
+ PINEVIEW_DFT_HPLLOFF_WM,
+ PINEVIEW_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params pineview_cursor_wm = {
+ PINEVIEW_CURSOR_FIFO,
+ PINEVIEW_CURSOR_MAX_WM,
+ PINEVIEW_CURSOR_DFT_WM,
+ PINEVIEW_CURSOR_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
+ PINEVIEW_CURSOR_FIFO,
+ PINEVIEW_CURSOR_MAX_WM,
+ PINEVIEW_CURSOR_DFT_WM,
+ PINEVIEW_CURSOR_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params g4x_wm_info = {
+ G4X_FIFO_SIZE,
+ G4X_MAX_WM,
+ G4X_MAX_WM,
+ 2,
+ G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params g4x_cursor_wm_info = {
+ I965_CURSOR_FIFO,
+ I965_CURSOR_MAX_WM,
+ I965_CURSOR_DFT_WM,
+ 2,
+ G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params valleyview_wm_info = {
+ VALLEYVIEW_FIFO_SIZE,
+ VALLEYVIEW_MAX_WM,
+ VALLEYVIEW_MAX_WM,
+ 2,
+ G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params valleyview_cursor_wm_info = {
+ I965_CURSOR_FIFO,
+ VALLEYVIEW_CURSOR_MAX_WM,
+ I965_CURSOR_DFT_WM,
+ 2,
+ G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params i965_cursor_wm_info = {
+ I965_CURSOR_FIFO,
+ I965_CURSOR_MAX_WM,
+ I965_CURSOR_DFT_WM,
+ 2,
+ I915_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params i945_wm_info = {
+ I945_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 2,
+ I915_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params i915_wm_info = {
+ I915_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 2,
+ I915_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params i855_wm_info = {
+ I855GM_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 2,
+ I830_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params i830_wm_info = {
+ I830_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 2,
+ I830_FIFO_LINE_SIZE
+};
+
+static const struct intel_watermark_params ironlake_display_wm_info = {
+ ILK_DISPLAY_FIFO,
+ ILK_DISPLAY_MAXWM,
+ ILK_DISPLAY_DFTWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params ironlake_cursor_wm_info = {
+ ILK_CURSOR_FIFO,
+ ILK_CURSOR_MAXWM,
+ ILK_CURSOR_DFTWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params ironlake_display_srwm_info = {
+ ILK_DISPLAY_SR_FIFO,
+ ILK_DISPLAY_MAX_SRWM,
+ ILK_DISPLAY_DFT_SRWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params ironlake_cursor_srwm_info = {
+ ILK_CURSOR_SR_FIFO,
+ ILK_CURSOR_MAX_SRWM,
+ ILK_CURSOR_DFT_SRWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+
+static const struct intel_watermark_params sandybridge_display_wm_info = {
+ SNB_DISPLAY_FIFO,
+ SNB_DISPLAY_MAXWM,
+ SNB_DISPLAY_DFTWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params sandybridge_cursor_wm_info = {
+ SNB_CURSOR_FIFO,
+ SNB_CURSOR_MAXWM,
+ SNB_CURSOR_DFTWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params sandybridge_display_srwm_info = {
+ SNB_DISPLAY_SR_FIFO,
+ SNB_DISPLAY_MAX_SRWM,
+ SNB_DISPLAY_DFT_SRWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
+ SNB_CURSOR_SR_FIFO,
+ SNB_CURSOR_MAX_SRWM,
+ SNB_CURSOR_DFT_SRWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+
+
+/**
+ * intel_calculate_wm - calculate watermark level
+ * @clock_in_khz: pixel clock
+ * @wm: chip FIFO params
+ * @pixel_size: display pixel size
+ * @latency_ns: memory latency for the platform
+ *
+ * Calculate the watermark level (the level at which the display plane will
+ * start fetching from memory again). Each chip has a different display
+ * FIFO size and allocation, so the caller needs to figure that out and pass
+ * in the correct intel_watermark_params structure.
+ *
+ * As the pixel clock runs, the FIFO will be drained at a rate that depends
+ * on the pixel size. When it reaches the watermark level, it'll start
+ * fetching FIFO line sized based chunks from memory until the FIFO fills
+ * past the watermark point. If the FIFO drains completely, a FIFO underrun
+ * will occur, and a display engine hang could result.
+ */
+unsigned long
+intel_calculate_wm(unsigned long clock_in_khz,
+ const struct intel_watermark_params *wm,
+ int fifo_size,
+ int pixel_size,
+ unsigned long latency_ns)
+{
+ long entries_required, wm_size;
+
+ /*
+ * Note: we need to make sure we don't overflow for various clock &
+ * latency values.
+ * clocks go from a few thousand to several hundred thousand.
+ * latency is usually a few thousand
+ */
+ entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
+ 1000;
+ entries_required = howmany(entries_required, wm->cacheline_size);
+
+ DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
+
+ wm_size = fifo_size - (entries_required + wm->guard_size);
+
+ DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
+
+ /* Don't promote wm_size to unsigned... */
+ if (wm_size > (long)wm->max_wm)
+ wm_size = wm->max_wm;
+ if (wm_size <= 0)
+ wm_size = wm->default_wm;
+ return wm_size;
+}
+
+struct drm_crtc *
+single_enabled_crtc(struct drm_device *dev)
+{
+ struct drm_crtc *crtc, *enabled = NULL;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (intel_crtc_active(crtc)) {
+ if (enabled)
+ return NULL;
+ enabled = crtc;
+ }
+ }
+
+ return enabled;
+}
+
+void
+pineview_update_wm(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ const struct cxsr_latency *latency;
+ u32 reg;
+ unsigned long wm;
+
+ latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
+ dev_priv->fsb_freq, dev_priv->mem_freq);
+ if (!latency) {
+ DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+ pineview_disable_cxsr(dev);
+ return;
+ }
+
+ crtc = single_enabled_crtc(dev);
+ if (crtc) {
+ int clock = crtc->mode.clock;
+ int pixel_size = crtc->fb->bits_per_pixel / 8;
+
+ /* Display SR */
+ wm = intel_calculate_wm(clock, &pineview_display_wm,
+ pineview_display_wm.fifo_size,
+ pixel_size, latency->display_sr);
+ reg = I915_READ(DSPFW1);
+ reg &= ~DSPFW_SR_MASK;
+ reg |= wm << DSPFW_SR_SHIFT;
+ I915_WRITE(DSPFW1, reg);
+ DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
+
+ /* cursor SR */
+ wm = intel_calculate_wm(clock, &pineview_cursor_wm,
+ pineview_display_wm.fifo_size,
+ pixel_size, latency->cursor_sr);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_CURSOR_SR_MASK;
+ reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
+ I915_WRITE(DSPFW3, reg);
+
+ /* Display HPLL off SR */
+ wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
+ pineview_display_hplloff_wm.fifo_size,
+ pixel_size, latency->display_hpll_disable);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_HPLL_SR_MASK;
+ reg |= wm & DSPFW_HPLL_SR_MASK;
+ I915_WRITE(DSPFW3, reg);
+
+ /* cursor HPLL off SR */
+ wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
+ pineview_display_hplloff_wm.fifo_size,
+ pixel_size, latency->cursor_hpll_disable);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_HPLL_CURSOR_MASK;
+ reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
+ I915_WRITE(DSPFW3, reg);
+ DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
+
+ /* activate cxsr */
+ I915_WRITE(DSPFW3,
+ I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
+ DRM_DEBUG_KMS("Self-refresh is enabled\n");
+ } else {
+ pineview_disable_cxsr(dev);
+ DRM_DEBUG_KMS("Self-refresh is disabled\n");
+ }
+}
+
+bool
+g4x_compute_wm0(struct drm_device *dev,
+ int plane,
+ const struct intel_watermark_params *display,
+ int display_latency_ns,
+ const struct intel_watermark_params *cursor,
+ int cursor_latency_ns,
+ int *plane_wm,
+ int *cursor_wm)
+{
+ struct drm_crtc *crtc;
+ int htotal, hdisplay, clock, pixel_size;
+ int line_time_us, line_count;
+ int entries, tlb_miss;
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ if (!intel_crtc_active(crtc)) {
+ *cursor_wm = cursor->guard_size;
+ *plane_wm = display->guard_size;
+ return false;
+ }
+
+ htotal = crtc->mode.htotal;
+ hdisplay = crtc->mode.hdisplay;
+ clock = crtc->mode.clock;
+ pixel_size = crtc->fb->bits_per_pixel / 8;
+
+ /* Use the small buffer method to calculate plane watermark */
+ entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+ tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
+ if (tlb_miss > 0)
+ entries += tlb_miss;
+ entries = howmany(entries, display->cacheline_size);
+ *plane_wm = entries + display->guard_size;
+ if (*plane_wm > (int)display->max_wm)
+ *plane_wm = display->max_wm;
+
+ /* Use the large buffer method to calculate cursor watermark */
+ line_time_us = ((htotal * 1000) / clock);
+ line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
+ entries = line_count * 64 * pixel_size;
+ tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
+ if (tlb_miss > 0)
+ entries += tlb_miss;
+ entries = howmany(entries, cursor->cacheline_size);
+ *cursor_wm = entries + cursor->guard_size;
+ if (*cursor_wm > (int)cursor->max_wm)
+ *cursor_wm = (int)cursor->max_wm;
+
+ return true;
+}
+
+/*
+ * Check the wm result.
+ *
+ * If any calculated watermark values is larger than the maximum value that
+ * can be programmed into the associated watermark register, that watermark
+ * must be disabled.
+ */
+bool
+g4x_check_srwm(struct drm_device *dev,
+ int display_wm, int cursor_wm,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor)
+{
+ DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
+ display_wm, cursor_wm);
+
+ if (display_wm > display->max_wm) {
+ DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
+ display_wm, display->max_wm);
+ return false;
+ }
+
+ if (cursor_wm > cursor->max_wm) {
+ DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
+ cursor_wm, cursor->max_wm);
+ return false;
+ }
+
+ if (!(display_wm || cursor_wm)) {
+ DRM_DEBUG_KMS("SR latency is 0, disabling\n");
+ return false;
+ }
+
+ return true;
+}
+
+bool
+g4x_compute_srwm(struct drm_device *dev,
+ int plane,
+ int latency_ns,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor,
+ int *display_wm, int *cursor_wm)
+{
+ struct drm_crtc *crtc;
+ int hdisplay, htotal, pixel_size, clock;
+ unsigned long line_time_us;
+ int line_count, line_size;
+ int small, large;
+ int entries;
+
+ if (!latency_ns) {
+ *display_wm = *cursor_wm = 0;
+ return false;
+ }
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ hdisplay = crtc->mode.hdisplay;
+ htotal = crtc->mode.htotal;
+ clock = crtc->mode.clock;
+ pixel_size = crtc->fb->bits_per_pixel / 8;
+
+ line_time_us = (htotal * 1000) / clock;
+ line_count = (latency_ns / line_time_us + 1000) / 1000;
+ line_size = hdisplay * pixel_size;
+
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+ large = line_count * line_size;
+
+ entries = howmany(min(small, large), display->cacheline_size);
+ *display_wm = entries + display->guard_size;
+
+ /* calculate the self-refresh watermark for display cursor */
+ entries = line_count * pixel_size * 64;
+ entries = howmany(entries, cursor->cacheline_size);
+ *cursor_wm = entries + cursor->guard_size;
+
+ return g4x_check_srwm(dev,
+ *display_wm, *cursor_wm,
+ display, cursor);
+}
+
+bool
+vlv_compute_drain_latency(struct drm_device *dev,
+ int plane,
+ int *plane_prec_mult,
+ int *plane_dl,
+ int *cursor_prec_mult,
+ int *cursor_dl)
+{
+ struct drm_crtc *crtc;
+ int clock, pixel_size;
+ int entries;
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ if (!intel_crtc_active(crtc))
+ return false;
+
+ clock = crtc->mode.clock; /* VESA DOT Clock */
+ pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */
+
+ entries = (clock / 1000) * pixel_size;
+ *plane_prec_mult = (entries > 256) ?
+ DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
+ *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
+ pixel_size);
+
+ entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */
+ *cursor_prec_mult = (entries > 256) ?
+ DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
+ *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
+
+ return true;
+}
+
+/*
+ * Update drain latency registers of memory arbiter
+ *
+ * Valleyview SoC has a new memory arbiter and needs drain latency registers
+ * to be programmed. Each plane has a drain latency multiplier and a drain
+ * latency value.
+ */
+
+void
+vlv_update_drain_latency(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ int planea_prec, planea_dl, planeb_prec, planeb_dl;
+ int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
+ int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
+ either 16 or 32 */
+
+ /* For plane A, Cursor A */
+ if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
+ &cursor_prec_mult, &cursora_dl)) {
+ cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+ DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
+ planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+ DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
+
+ I915_WRITE(VLV_DDL1, cursora_prec |
+ (cursora_dl << DDL_CURSORA_SHIFT) |
+ planea_prec | planea_dl);
+ }
+
+ /* For plane B, Cursor B */
+ if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
+ &cursor_prec_mult, &cursorb_dl)) {
+ cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+ DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
+ planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+ DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
+
+ I915_WRITE(VLV_DDL2, cursorb_prec |
+ (cursorb_dl << DDL_CURSORB_SHIFT) |
+ planeb_prec | planeb_dl);
+ }
+}
+
+#define single_plane_enabled(mask) ((mask) != 0 && powerof2(mask))
+
+void
+valleyview_update_wm(struct drm_device *dev)
+{
+ static const int sr_latency_ns = 12000;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
+ int plane_sr, cursor_sr;
+ int ignore_plane_sr, ignore_cursor_sr;
+ unsigned int enabled = 0;
+
+ vlv_update_drain_latency(dev);
+
+ if (g4x_compute_wm0(dev, 0,
+ &valleyview_wm_info, latency_ns,
+ &valleyview_cursor_wm_info, latency_ns,
+ &planea_wm, &cursora_wm))
+ enabled |= 1;
+
+ if (g4x_compute_wm0(dev, 1,
+ &valleyview_wm_info, latency_ns,
+ &valleyview_cursor_wm_info, latency_ns,
+ &planeb_wm, &cursorb_wm))
+ enabled |= 2;
+
+ if (single_plane_enabled(enabled) &&
+ g4x_compute_srwm(dev, ffs(enabled) - 1,
+ sr_latency_ns,
+ &valleyview_wm_info,
+ &valleyview_cursor_wm_info,
+ &plane_sr, &ignore_cursor_sr) &&
+ g4x_compute_srwm(dev, ffs(enabled) - 1,
+ 2*sr_latency_ns,
+ &valleyview_wm_info,
+ &valleyview_cursor_wm_info,
+ &ignore_plane_sr, &cursor_sr)) {
+ I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
+ } else {
+ I915_WRITE(FW_BLC_SELF_VLV,
+ I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
+ plane_sr = cursor_sr = 0;
+ }
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
+ planea_wm, cursora_wm,
+ planeb_wm, cursorb_wm,
+ plane_sr, cursor_sr);
+
+ I915_WRITE(DSPFW1,
+ (plane_sr << DSPFW_SR_SHIFT) |
+ (cursorb_wm << DSPFW_CURSORB_SHIFT) |
+ (planeb_wm << DSPFW_PLANEB_SHIFT) |
+ planea_wm);
+ I915_WRITE(DSPFW2,
+ (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
+ (cursora_wm << DSPFW_CURSORA_SHIFT));
+ I915_WRITE(DSPFW3,
+ (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
+ (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+}
+
+void
+g4x_update_wm(struct drm_device *dev)
+{
+ static const int sr_latency_ns = 12000;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
+ int plane_sr, cursor_sr;
+ unsigned int enabled = 0;
+
+ if (g4x_compute_wm0(dev, 0,
+ &g4x_wm_info, latency_ns,
+ &g4x_cursor_wm_info, latency_ns,
+ &planea_wm, &cursora_wm))
+ enabled |= 1;
+
+ if (g4x_compute_wm0(dev, 1,
+ &g4x_wm_info, latency_ns,
+ &g4x_cursor_wm_info, latency_ns,
+ &planeb_wm, &cursorb_wm))
+ enabled |= 2;
+
+ if (single_plane_enabled(enabled) &&
+ g4x_compute_srwm(dev, ffs(enabled) - 1,
+ sr_latency_ns,
+ &g4x_wm_info,
+ &g4x_cursor_wm_info,
+ &plane_sr, &cursor_sr)) {
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ } else {
+ I915_WRITE(FW_BLC_SELF,
+ I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
+ plane_sr = cursor_sr = 0;
+ }
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
+ planea_wm, cursora_wm,
+ planeb_wm, cursorb_wm,
+ plane_sr, cursor_sr);
+
+ I915_WRITE(DSPFW1,
+ (plane_sr << DSPFW_SR_SHIFT) |
+ (cursorb_wm << DSPFW_CURSORB_SHIFT) |
+ (planeb_wm << DSPFW_PLANEB_SHIFT) |
+ planea_wm);
+ I915_WRITE(DSPFW2,
+ (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
+ (cursora_wm << DSPFW_CURSORA_SHIFT));
+ /* HPLL off in SR has some issues on G4x... disable it */
+ I915_WRITE(DSPFW3,
+ (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
+ (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+}
+
+void
+i965_update_wm(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ int srwm = 1;
+ int cursor_sr = 16;
+
+ /* Calc sr entries for one plane configs */
+ crtc = single_enabled_crtc(dev);
+ if (crtc) {
+ /* self-refresh has much higher latency */
+ static const int sr_latency_ns = 12000;
+ int clock = crtc->mode.clock;
+ int htotal = crtc->mode.htotal;
+ int hdisplay = crtc->mode.hdisplay;
+ int pixel_size = crtc->fb->bits_per_pixel / 8;
+ unsigned long line_time_us;
+ int entries;
+
+ line_time_us = ((htotal * 1000) / clock);
+
+ /* Use ns/us then divide to preserve precision */
+ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * hdisplay;
+ entries = howmany(entries, I915_FIFO_LINE_SIZE);
+ srwm = I965_FIFO_SIZE - entries;
+ if (srwm < 0)
+ srwm = 1;
+ srwm &= 0x1ff;
+ DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
+ entries, srwm);
+
+ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * 64;
+ entries = howmany(entries,
+ i965_cursor_wm_info.cacheline_size);
+ cursor_sr = i965_cursor_wm_info.fifo_size -
+ (entries + i965_cursor_wm_info.guard_size);
+
+ if (cursor_sr > i965_cursor_wm_info.max_wm)
+ cursor_sr = i965_cursor_wm_info.max_wm;
+
+ DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
+ "cursor %d\n", srwm, cursor_sr);
+
+ if (IS_CRESTLINE(dev))
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ } else {
+ /* Turn off self refresh if both pipes are enabled */
+ if (IS_CRESTLINE(dev))
+ I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
+ & ~FW_BLC_SELF_EN);
+ }
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
+ srwm);
+
+ /* 965 has limitations... */
+ I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
+ (8 << 16) | (8 << 8) | (8 << 0));
+ I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
+ /* update cursor SR watermark */
+ I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+}
+
+void
+i9xx_update_wm(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ const struct intel_watermark_params *wm_info;
+ uint32_t fwater_lo;
+ uint32_t fwater_hi;
+ int cwm, srwm = 1;
+ int fifo_size;
+ int planea_wm, planeb_wm;
+ struct drm_crtc *crtc, *enabled = NULL;
+
+ if (IS_I945GM(dev))
+ wm_info = &i945_wm_info;
+ else if (!IS_GEN2(dev))
+ wm_info = &i915_wm_info;
+ else
+ wm_info = &i855_wm_info;
+
+ fifo_size = dev_priv->display.get_fifo_size(dev, 0);
+ crtc = intel_get_crtc_for_plane(dev, 0);
+ if (intel_crtc_active(crtc)) {
+ int cpp = crtc->fb->bits_per_pixel / 8;
+ if (IS_GEN2(dev))
+ cpp = 4;
+
+ planea_wm = intel_calculate_wm(crtc->mode.clock,
+ wm_info, fifo_size, cpp,
+ latency_ns);
+ enabled = crtc;
+ } else
+ planea_wm = fifo_size - wm_info->guard_size;
+
+ fifo_size = dev_priv->display.get_fifo_size(dev, 1);
+ crtc = intel_get_crtc_for_plane(dev, 1);
+ if (intel_crtc_active(crtc)) {
+ int cpp = crtc->fb->bits_per_pixel / 8;
+ if (IS_GEN2(dev))
+ cpp = 4;
+
+ planeb_wm = intel_calculate_wm(crtc->mode.clock,
+ wm_info, fifo_size, cpp,
+ latency_ns);
+ if (enabled == NULL)
+ enabled = crtc;
+ else
+ enabled = NULL;
+ } else
+ planeb_wm = fifo_size - wm_info->guard_size;
+
+ DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
+
+ /*
+ * Overlay gets an aggressive default since video jitter is bad.
+ */
+ cwm = 2;
+
+ /* Play safe and disable self-refresh before adjusting watermarks. */
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
+ else if (IS_I915GM(dev))
+ I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
+
+ /* Calc sr entries for one plane configs */
+ if (HAS_FW_BLC(dev) && enabled) {
+ /* self-refresh has much higher latency */
+ static const int sr_latency_ns = 6000;
+ int clock = enabled->mode.clock;
+ int htotal = enabled->mode.htotal;
+ int hdisplay = enabled->mode.hdisplay;
+ int pixel_size = enabled->fb->bits_per_pixel / 8;
+ unsigned long line_time_us;
+ int entries;
+
+ line_time_us = (htotal * 1000) / clock;
+
+ /* Use ns/us then divide to preserve precision */
+ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * hdisplay;
+ entries = howmany(entries, wm_info->cacheline_size);
+ DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
+ srwm = wm_info->fifo_size - entries;
+ if (srwm < 0)
+ srwm = 1;
+
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ I915_WRITE(FW_BLC_SELF,
+ FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
+ else if (IS_I915GM(dev))
+ I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
+ }
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
+ planea_wm, planeb_wm, cwm, srwm);
+
+ fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
+ fwater_hi = (cwm & 0x1f);
+
+ /* Set request length to 8 cachelines per fetch */
+ fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
+ fwater_hi = fwater_hi | (1 << 8);
+
+ I915_WRITE(FW_BLC, fwater_lo);
+ I915_WRITE(FW_BLC2, fwater_hi);
+
+ if (HAS_FW_BLC(dev)) {
+ if (enabled) {
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ I915_WRITE(FW_BLC_SELF,
+ FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
+ else if (IS_I915GM(dev))
+ I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
+ DRM_DEBUG_KMS("memory self refresh enabled\n");
+ } else
+ DRM_DEBUG_KMS("memory self refresh disabled\n");
+ }
+}
+
+void
+i830_update_wm(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ uint32_t fwater_lo;
+ int planea_wm;
+
+ crtc = single_enabled_crtc(dev);
+ if (crtc == NULL)
+ return;
+
+ planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
+ dev_priv->display.get_fifo_size(dev, 0),
+ 4, latency_ns);
+ fwater_lo = I915_READ(FW_BLC) & ~0xfff;
+ fwater_lo |= (3<<8) | planea_wm;
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
+
+ I915_WRITE(FW_BLC, fwater_lo);
+}
+
+#define ILK_LP0_PLANE_LATENCY 700
+#define ILK_LP0_CURSOR_LATENCY 1300
+
+/*
+ * Check the wm result.
+ *
+ * If any calculated watermark values is larger than the maximum value that
+ * can be programmed into the associated watermark register, that watermark
+ * must be disabled.
+ */
+bool
+ironlake_check_srwm(struct drm_device *dev, int level,
+ int fbc_wm, int display_wm, int cursor_wm,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
+ " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
+
+ if (fbc_wm > SNB_FBC_MAX_SRWM) {
+ DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
+ fbc_wm, SNB_FBC_MAX_SRWM, level);
+
+ /* fbc has it's own way to disable FBC WM */
+ I915_WRITE(DISP_ARB_CTL,
+ I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
+ return false;
+ }
+
+ if (display_wm > display->max_wm) {
+ DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
+ display_wm, SNB_DISPLAY_MAX_SRWM, level);
+ return false;
+ }
+
+ if (cursor_wm > cursor->max_wm) {
+ DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
+ cursor_wm, SNB_CURSOR_MAX_SRWM, level);
+ return false;
+ }
+
+ if (!(fbc_wm || display_wm || cursor_wm)) {
+ DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Compute watermark values of WM[1-3],
+ */
+bool
+ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
+ int latency_ns,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor,
+ int *fbc_wm, int *display_wm, int *cursor_wm)
+{
+ struct drm_crtc *crtc;
+ unsigned long line_time_us;
+ int hdisplay, htotal, pixel_size, clock;
+ int line_count, line_size;
+ int small, large;
+ int entries;
+
+ if (!latency_ns) {
+ *fbc_wm = *display_wm = *cursor_wm = 0;
+ return false;
+ }
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ hdisplay = crtc->mode.hdisplay;
+ htotal = crtc->mode.htotal;
+ clock = crtc->mode.clock;
+ pixel_size = crtc->fb->bits_per_pixel / 8;
+
+ line_time_us = (htotal * 1000) / clock;
+ line_count = (latency_ns / line_time_us + 1000) / 1000;
+ line_size = hdisplay * pixel_size;
+
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+ large = line_count * line_size;
+
+ entries = howmany(min(small, large), display->cacheline_size);
+ *display_wm = entries + display->guard_size;
+
+ /*
+ * Spec says:
+ * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
+ */
+ *fbc_wm = howmany(*display_wm * 64, line_size) + 2;
+
+ /* calculate the self-refresh watermark for display cursor */
+ entries = line_count * pixel_size * 64;
+ entries = howmany(entries, cursor->cacheline_size);
+ *cursor_wm = entries + cursor->guard_size;
+
+ return ironlake_check_srwm(dev, level,
+ *fbc_wm, *display_wm, *cursor_wm,
+ display, cursor);
+}
+
+void
+ironlake_update_wm(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ int fbc_wm, plane_wm, cursor_wm;
+ unsigned int enabled;
+
+ enabled = 0;
+ if (g4x_compute_wm0(dev, 0,
+ &ironlake_display_wm_info,
+ ILK_LP0_PLANE_LATENCY,
+ &ironlake_cursor_wm_info,
+ ILK_LP0_CURSOR_LATENCY,
+ &plane_wm, &cursor_wm)) {
+ I915_WRITE(WM0_PIPEA_ILK,
+ (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+ " plane %d, " "cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 1;
+ }
+
+ if (g4x_compute_wm0(dev, 1,
+ &ironlake_display_wm_info,
+ ILK_LP0_PLANE_LATENCY,
+ &ironlake_cursor_wm_info,
+ ILK_LP0_CURSOR_LATENCY,
+ &plane_wm, &cursor_wm)) {
+ I915_WRITE(WM0_PIPEB_ILK,
+ (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 2;
+ }
+
+ /*
+ * Calculate and update the self-refresh watermark only when one
+ * display plane is used.
+ */
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ if (!single_plane_enabled(enabled))
+ return;
+ enabled = ffs(enabled) - 1;
+
+ /* WM1 */
+ if (!ironlake_compute_srwm(dev, 1, enabled,
+ ILK_READ_WM1_LATENCY() * 500,
+ &ironlake_display_srwm_info,
+ &ironlake_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM1_LP_ILK,
+ WM1_LP_SR_EN |
+ (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM2 */
+ if (!ironlake_compute_srwm(dev, 2, enabled,
+ ILK_READ_WM2_LATENCY() * 500,
+ &ironlake_display_srwm_info,
+ &ironlake_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM2_LP_ILK,
+ WM2_LP_EN |
+ (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /*
+ * WM3 is unsupported on ILK, probably because we don't have latency
+ * data for that power state
+ */
+}
+
+void
+sandybridge_update_wm(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ u32 val;
+ int fbc_wm, plane_wm, cursor_wm;
+ unsigned int enabled;
+
+ enabled = 0;
+ if (g4x_compute_wm0(dev, 0,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEA_ILK);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEA_ILK, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+ " plane %d, " "cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 1;
+ }
+
+ if (g4x_compute_wm0(dev, 1,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEB_ILK);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEB_ILK, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 2;
+ }
+
+ /*
+ * Calculate and update the self-refresh watermark only when one
+ * display plane is used.
+ *
+ * SNB support 3 levels of watermark.
+ *
+ * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
+ * and disabled in the descending order
+ *
+ */
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ if (!single_plane_enabled(enabled) ||
+ dev_priv->sprite_scaling_enabled)
+ return;
+ enabled = ffs(enabled) - 1;
+
+ /* WM1 */
+ if (!ironlake_compute_srwm(dev, 1, enabled,
+ SNB_READ_WM1_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM1_LP_ILK,
+ WM1_LP_SR_EN |
+ (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM2 */
+ if (!ironlake_compute_srwm(dev, 2, enabled,
+ SNB_READ_WM2_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM2_LP_ILK,
+ WM2_LP_EN |
+ (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM3 */
+ if (!ironlake_compute_srwm(dev, 3, enabled,
+ SNB_READ_WM3_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM3_LP_ILK,
+ WM3_LP_EN |
+ (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+}
+
+void
+ivybridge_update_wm(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ u32 val;
+ int fbc_wm, plane_wm, cursor_wm;
+ int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
+ unsigned int enabled;
+
+ enabled = 0;
+ if (g4x_compute_wm0(dev, 0,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEA_ILK);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEA_ILK, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+ " plane %d, " "cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 1;
+ }
+
+ if (g4x_compute_wm0(dev, 1,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEB_ILK);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEB_ILK, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 2;
+ }
+
+ if (g4x_compute_wm0(dev, 2,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEC_IVB);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEC_IVB, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 3;
+ }
+
+ /*
+ * Calculate and update the self-refresh watermark only when one
+ * display plane is used.
+ *
+ * SNB support 3 levels of watermark.
+ *
+ * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
+ * and disabled in the descending order
+ *
+ */
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ if (!single_plane_enabled(enabled) ||
+ dev_priv->sprite_scaling_enabled)
+ return;
+ enabled = ffs(enabled) - 1;
+
+ /* WM1 */
+ if (!ironlake_compute_srwm(dev, 1, enabled,
+ SNB_READ_WM1_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM1_LP_ILK,
+ WM1_LP_SR_EN |
+ (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM2 */
+ if (!ironlake_compute_srwm(dev, 2, enabled,
+ SNB_READ_WM2_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM2_LP_ILK,
+ WM2_LP_EN |
+ (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM3, note we have to correct the cursor latency */
+ if (!ironlake_compute_srwm(dev, 3, enabled,
+ SNB_READ_WM3_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &ignore_cursor_wm) ||
+ !ironlake_compute_srwm(dev, 3, enabled,
+ 2 * SNB_READ_WM3_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM3_LP_ILK,
+ WM3_LP_EN |
+ (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+}
+
+void
+haswell_update_linetime_wm(struct drm_device *dev, int pipe,
+ struct drm_display_mode *mode)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 temp;
+
+ temp = I915_READ(PIPE_WM_LINETIME(pipe));
+ temp &= ~PIPE_WM_LINETIME_MASK;
+
+ /* The WM are computed with base on how long it takes to fill a single
+ * row at the given clock rate, multiplied by 8.
+ * */
+ temp |= PIPE_WM_LINETIME_TIME(
+ ((mode->crtc_hdisplay * 1000) / mode->clock) * 8);
+
+ /* IPS watermarks are only used by pipe A, and are ignored by
+ * pipes B and C. They are calculated similarly to the common
+ * linetime values, except that we are using CD clock frequency
+ * in MHz instead of pixel rate for the division.
+ *
+ * This is a placeholder for the IPS watermark calculation code.
+ */
+
+ I915_WRITE(PIPE_WM_LINETIME(pipe), temp);
+}
+
+bool
+sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
+ uint32_t sprite_width, int pixel_size,
+ const struct intel_watermark_params *display,
+ int display_latency_ns, int *sprite_wm)
+{
+ struct drm_crtc *crtc;
+ int clock;
+ int entries, tlb_miss;
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ if (!intel_crtc_active(crtc)) {
+ *sprite_wm = display->guard_size;
+ return false;
+ }
+
+ clock = crtc->mode.clock;
+
+ /* Use the small buffer method to calculate the sprite watermark */
+ entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+ tlb_miss = display->fifo_size*display->cacheline_size -
+ sprite_width * 8;
+ if (tlb_miss > 0)
+ entries += tlb_miss;
+ entries = howmany(entries, display->cacheline_size);
+ *sprite_wm = entries + display->guard_size;
+ if (*sprite_wm > (int)display->max_wm)
+ *sprite_wm = display->max_wm;
+
+ return true;
+}
+
+bool
+sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
+ uint32_t sprite_width, int pixel_size,
+ const struct intel_watermark_params *display,
+ int latency_ns, int *sprite_wm)
+{
+ struct drm_crtc *crtc;
+ unsigned long line_time_us;
+ int clock;
+ int line_count, line_size;
+ int small, large;
+ int entries;
+
+ if (!latency_ns) {
+ *sprite_wm = 0;
+ return false;
+ }
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ clock = crtc->mode.clock;
+ if (!clock) {
+ *sprite_wm = 0;
+ return false;
+ }
+
+ line_time_us = (sprite_width * 1000) / clock;
+ if (!line_time_us) {
+ *sprite_wm = 0;
+ return false;
+ }
+
+ line_count = (latency_ns / line_time_us + 1000) / 1000;
+ line_size = sprite_width * pixel_size;
+
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+ large = line_count * line_size;
+
+ entries = howmany(min(small, large), display->cacheline_size);
+ *sprite_wm = entries + display->guard_size;
+
+ return *sprite_wm > 0x3ff ? false : true;
+}
+
+void
+sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
+ uint32_t sprite_width, int pixel_size)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ u32 val;
+ int sprite_wm, reg;
+ int ret;
+
+ switch (pipe) {
+ case 0:
+ reg = WM0_PIPEA_ILK;
+ break;
+ case 1:
+ reg = WM0_PIPEB_ILK;
+ break;
+ case 2:
+ reg = WM0_PIPEC_IVB;
+ break;
+ default:
+ return; /* bad pipe */
+ }
+
+ ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
+ &sandybridge_display_wm_info,
+ latency, &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
+ pipe);
+ return;
+ }
+
+ val = I915_READ(reg);
+ val &= ~WM0_PIPE_SPRITE_MASK;
+ I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
+ DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
+
+
+ ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+ pixel_size,
+ &sandybridge_display_srwm_info,
+ SNB_READ_WM1_LATENCY() * 500,
+ &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
+ pipe);
+ return;
+ }
+ I915_WRITE(WM1S_LP_ILK, sprite_wm);
+
+ /* Only IVB has two more LP watermarks for sprite */
+ if (!IS_IVYBRIDGE(dev))
+ return;
+
+ ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+ pixel_size,
+ &sandybridge_display_srwm_info,
+ SNB_READ_WM2_LATENCY() * 500,
+ &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
+ pipe);
+ return;
+ }
+ I915_WRITE(WM2S_LP_IVB, sprite_wm);
+
+ ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+ pixel_size,
+ &sandybridge_display_srwm_info,
+ SNB_READ_WM3_LATENCY() * 500,
+ &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
+ pipe);
+ return;
+ }
+ I915_WRITE(WM3S_LP_IVB, sprite_wm);
+}
+
+/**
+ * intel_update_watermarks - update FIFO watermark values based on current modes
+ *
+ * Calculate watermark values for the various WM regs based on current mode
+ * and plane configuration.
+ *
+ * There are several cases to deal with here:
+ * - normal (i.e. non-self-refresh)
+ * - self-refresh (SR) mode
+ * - lines are large relative to FIFO size (buffer can hold up to 2)
+ * - lines are small relative to FIFO size (buffer can hold more than 2
+ * lines), so need to account for TLB latency
+ *
+ * The normal calculation is:
+ * watermark = dotclock * bytes per pixel * latency
+ * where latency is platform & configuration dependent (we assume pessimal
+ * values here).
+ *
+ * The SR calculation is:
+ * watermark = (trunc(latency/line time)+1) * surface width *
+ * bytes per pixel
+ * where
+ * line time = htotal / dotclock
+ * surface width = hdisplay for normal plane and 64 for cursor
+ * and latency is assumed to be high, as above.
+ *
+ * The final value programmed to the register should always be rounded up,
+ * and include an extra 2 entries to account for clock crossings.
+ *
+ * We don't use the sprite, so we can ignore that. And on Crestline we have
+ * to set the non-SR watermarks to 8.
+ */
+void
+intel_update_watermarks(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.update_wm)
+ dev_priv->display.update_wm(dev);
+}
+
+void
+intel_update_linetime_watermarks(struct drm_device *dev,
+ int pipe, struct drm_display_mode *mode)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.update_linetime_wm)
+ dev_priv->display.update_linetime_wm(dev, pipe, mode);
+}
+
+void
+intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
+ uint32_t sprite_width, int pixel_size)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.update_sprite_wm)
+ dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
+ pixel_size);
+}
+
+struct drm_i915_gem_object *
+intel_alloc_context_page(struct drm_device *dev)
+{
+ struct drm_i915_gem_object *ctx;
+ int ret;
+
+#ifdef notyet
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+#endif
+
+ ctx = i915_gem_alloc_object(dev, 4096);
+ if (!ctx) {
+ DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
+ return NULL;
+ }
+
+ ret = i915_gem_object_pin(ctx, 4096, true /*, false */);
+ if (ret) {
+ DRM_ERROR("failed to pin power context: %d\n", ret);
+ goto err_unref;
+ }
+
+ ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
+ if (ret) {
+ DRM_ERROR("failed to set-domain on power context: %d\n", ret);
+ goto err_unpin;
+ }
+
+ return ctx;
+
+err_unpin:
+ i915_gem_object_unpin(ctx);
+err_unref:
+ drm_gem_object_unreference(&ctx->base);
+ DRM_UNLOCK();
+ return NULL;
+}
+
+struct mutex mchdev_lock;
+static struct inteldrm_softc *i915_mch_dev;
+
+bool
+ironlake_set_drps(struct drm_device *dev, u8 val)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u16 rgvswctl;
+
+// assert_spin_locked(&mchdev_lock);
+
+ rgvswctl = I915_READ16(MEMSWCTL);
+ if (rgvswctl & MEMCTL_CMD_STS) {
+ DRM_DEBUG("gpu busy, RCS change rejected\n");
+ return false; /* still busy with another command */
+ }
+
+ rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
+ (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
+ I915_WRITE16(MEMSWCTL, rgvswctl);
+ POSTING_READ16(MEMSWCTL);
+
+ rgvswctl |= MEMCTL_CMD_STS;
+ I915_WRITE16(MEMSWCTL, rgvswctl);
+
+ return true;
+}
+
+void
+ironlake_enable_drps(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 rgvmodectl = I915_READ(MEMMODECTL);
+ u8 fmax, fmin, fstart, vstart;
+ int retries;
+
+// spin_lock_irq(&mchdev_lock);
+
+ /* Enable temp reporting */
+ I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
+ I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
+
+ /* 100ms RC evaluation intervals */
+ I915_WRITE(RCUPEI, 100000);
+ I915_WRITE(RCDNEI, 100000);
+
+ /* Set max/min thresholds to 90ms and 80ms respectively */
+ I915_WRITE(RCBMAXAVG, 90000);
+ I915_WRITE(RCBMINAVG, 80000);
+
+ I915_WRITE(MEMIHYST, 1);
+
+ /* Set up min, max, and cur for interrupt handling */
+ fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
+ fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
+ fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
+ MEMMODE_FSTART_SHIFT;
+
+ vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
+ PXVFREQ_PX_SHIFT;
+
+ dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
+ dev_priv->ips.fstart = fstart;
+
+ dev_priv->ips.max_delay = fstart;
+ dev_priv->ips.min_delay = fmin;
+ dev_priv->ips.cur_delay = fstart;
+
+ DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
+ fmax, fmin, fstart);
+
+ I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
+
+ /*
+ * Interrupts will be enabled in ironlake_irq_postinstall
+ */
+
+ I915_WRITE(VIDSTART, vstart);
+ POSTING_READ(VIDSTART);
+
+ rgvmodectl |= MEMMODE_SWMODE_EN;
+ I915_WRITE(MEMMODECTL, rgvmodectl);
+
+ for (retries = 10; retries > 0; retries--) {
+ if ((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0)
+ break;
+ DELAY(1000);
+ }
+ if (retries == 0)
+ DRM_ERROR("stuck trying to change perf mode\n");
+ DELAY(1000);
+
+ ironlake_set_drps(dev, fstart);
+
+ dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
+ I915_READ(0x112e0);
+ dev_priv->ips.last_time1 = jiffies_to_msecs(ticks);
+ dev_priv->ips.last_count2 = I915_READ(0x112f4);
+ nanouptime(&dev_priv->ips.last_time2);
+
+// spin_unlock_irq(&mchdev_lock);
+}
+
+void
+ironlake_disable_drps(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u16 rgvswctl;
+
+// spin_lock_irq(&mchdev_lock);
+
+ rgvswctl = I915_READ16(MEMSWCTL);
+
+ /* Ack interrupts, disable EFC interrupt */
+ I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
+ I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
+ I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
+ I915_WRITE(DEIIR, DE_PCU_EVENT);
+ I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
+
+ /* Go back to the starting frequency */
+ ironlake_set_drps(dev, dev_priv->ips.fstart);
+ DELAY(1000);
+ rgvswctl |= MEMCTL_CMD_STS;
+ I915_WRITE(MEMSWCTL, rgvswctl);
+ DELAY(1000);
+
+// spin_unlock_irq(&mchdev_lock);
+}
+
+/* There's a funny hw issue where the hw returns all 0 when reading from
+ * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
+ * ourselves, instead of doing a rmw cycle (which might result in us clearing
+ * all limits and the gpu stuck at whatever frequency it is at atm).
+ */
+u32
+gen6_rps_limits(struct inteldrm_softc *dev_priv, u8 *val)
+{
+ u32 limits;
+
+ limits = 0;
+
+ if (*val >= dev_priv->rps.max_delay)
+ *val = dev_priv->rps.max_delay;
+ limits |= dev_priv->rps.max_delay << 24;
+
+ /* Only set the down limit when we've reached the lowest level to avoid
+ * getting more interrupts, otherwise leave this clear. This prevents a
+ * race in the hw when coming out of rc6: There's a tiny window where
+ * the hw runs at the minimal clock before selecting the desired
+ * frequency, if the down threshold expires in that window we will not
+ * receive a down interrupt. */
+ if (*val <= dev_priv->rps.min_delay) {
+ *val = dev_priv->rps.min_delay;
+ limits |= dev_priv->rps.min_delay << 16;
+ }
+
+ return limits;
+}
+
+void
+gen6_set_rps(struct drm_device *dev, u8 val)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 limits = gen6_rps_limits(dev_priv, &val);
+
+ rw_assert_wrlock(&dev_priv->rps.hw_lock);
+ WARN_ON(val > dev_priv->rps.max_delay);
+ WARN_ON(val < dev_priv->rps.min_delay);
+
+ if (val == dev_priv->rps.cur_delay)
+ return;
+
+ I915_WRITE(GEN6_RPNSWREQ,
+ GEN6_FREQUENCY(val) |
+ GEN6_OFFSET(0) |
+ GEN6_AGGRESSIVE_TURBO);
+
+ /* Make sure we continue to get interrupts
+ * until we hit the minimum or maximum frequencies.
+ */
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
+
+ POSTING_READ(GEN6_RPNSWREQ);
+
+ dev_priv->rps.cur_delay = val;
+
+// trace_intel_gpu_freq_change(val * 50);
+}
+
+void
+gen6_disable_rps(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+ I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
+ I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+ I915_WRITE(GEN6_PMIER, 0);
+ /* Complete PM interrupt masking here doesn't race with the rps work
+ * item again unmasking PM interrupts because that is using a different
+ * register (PMIMR) to mask PM interrupts. The only risk is in leaving
+ * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
+
+ mtx_enter(&dev_priv->rps.lock);
+ dev_priv->rps.pm_iir = 0;
+ mtx_leave(&dev_priv->rps.lock);
+
+ I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
+}
+
+int
+intel_enable_rc6(const struct drm_device *dev)
+{
+ /* Respect the kernel parameter if it is set */
+ if (i915_enable_rc6 >= 0)
+ return i915_enable_rc6;
+
+ /* Disable RC6 on Ironlake */
+ if (INTEL_INFO(dev)->gen == 5)
+ return 0;
+
+ if (IS_HASWELL(dev)) {
+ DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
+ return INTEL_RC6_ENABLE;
+ }
+
+ /* snb/ivb have more than one rc6 state. */
+ if (INTEL_INFO(dev)->gen == 6) {
+ DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
+ return INTEL_RC6_ENABLE;
+ }
+
+ DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
+ return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
+}
+
+void
+gen6_enable_rps(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ u32 rp_state_cap;
+ u32 gt_perf_status;
+ u32 rc6vids, pcu_mbox, rc6_mask = 0;
+ u32 gtfifodbg;
+ int rc6_mode;
+ int i, ret;
+
+ rw_assert_wrlock(&dev_priv->rps.hw_lock);
+
+ /* Here begins a magic sequence of register writes to enable
+ * auto-downclocking.
+ *
+ * Perhaps there might be some value in exposing these to
+ * userspace...
+ */
+ I915_WRITE(GEN6_RC_STATE, 0);
+
+ /* Clear the DBG now so we don't confuse earlier errors */
+ if ((gtfifodbg = I915_READ(GTFIFODBG))) {
+ DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
+ I915_WRITE(GTFIFODBG, gtfifodbg);
+ }
+
+ gen6_gt_force_wake_get(dev_priv);
+
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+
+ /* In units of 100MHz */
+ dev_priv->rps.max_delay = rp_state_cap & 0xff;
+ dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
+ dev_priv->rps.cur_delay = 0;
+
+ /* disable the counters and set deterministic thresholds */
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+
+ I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
+ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
+ I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
+ I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
+ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
+
+ for_each_ring(ring, dev_priv, i)
+ I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+
+ I915_WRITE(GEN6_RC_SLEEP, 0);
+ I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
+ I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
+ I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
+ I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
+
+ /* Check if we are enabling RC6 */
+ rc6_mode = intel_enable_rc6(dev);
+ if (rc6_mode & INTEL_RC6_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
+
+ /* We don't use those on Haswell */
+ if (!IS_HASWELL(dev)) {
+ if (rc6_mode & INTEL_RC6p_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
+
+ if (rc6_mode & INTEL_RC6pp_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
+ }
+
+ DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
+ (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
+ (rc6_mask & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
+ (rc6_mask & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
+
+ I915_WRITE(GEN6_RC_CONTROL,
+ rc6_mask |
+ GEN6_RC_CTL_EI_MODE(1) |
+ GEN6_RC_CTL_HW_ENABLE);
+
+ I915_WRITE(GEN6_RPNSWREQ,
+ GEN6_FREQUENCY(10) |
+ GEN6_OFFSET(0) |
+ GEN6_AGGRESSIVE_TURBO);
+ I915_WRITE(GEN6_RC_VIDEO_FREQ,
+ GEN6_FREQUENCY(12));
+
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+ dev_priv->rps.max_delay << 24 |
+ dev_priv->rps.min_delay << 16);
+
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
+ I915_WRITE(GEN6_RP_UP_EI, 66000);
+ I915_WRITE(GEN6_RP_DOWN_EI, 350000);
+
+ I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
+
+ ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
+ if (!ret) {
+ pcu_mbox = 0;
+ ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
+ if (ret && pcu_mbox & (1<<31)) { /* OC supported */
+ dev_priv->rps.max_delay = pcu_mbox & 0xff;
+ DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
+ }
+ } else {
+ DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
+ }
+
+ gen6_set_rps(dev, (gt_perf_status & 0xff00) >> 8);
+
+ /* requires MSI enabled */
+ I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS);
+ mtx_enter(&dev_priv->rps.lock);
+ WARN_ON(dev_priv->rps.pm_iir != 0);
+ I915_WRITE(GEN6_PMIMR, 0);
+ mtx_leave(&dev_priv->rps.lock);
+ /* enable all PM interrupts */
+ I915_WRITE(GEN6_PMINTRMSK, 0);
+
+ rc6vids = 0;
+ ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
+ if (IS_GEN6(dev) && ret) {
+ DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
+ } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
+ DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
+ GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
+ rc6vids &= 0xffff00;
+ rc6vids |= GEN6_ENCODE_RC6_VID(450);
+ ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
+ if (ret)
+ DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
+ }
+
+ gen6_gt_force_wake_put(dev_priv);
+}
+
+void
+gen6_update_ring_freq(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ int min_freq = 15;
+ int gpu_freq;
+ unsigned int ia_freq, max_ia_freq;
+ int scaling_factor = 180;
+
+ rw_assert_wrlock(&dev_priv->rps.hw_lock);
+
+#ifdef notyet
+ max_ia_freq = cpufreq_quick_get_max(0);
+ /*
+ * Default to measured freq if none found, PCU will ensure we don't go
+ * over
+ */
+ if (!max_ia_freq)
+ max_ia_freq = tsc_khz;
+
+ /* Convert from kHz to MHz */
+ max_ia_freq /= 1000;
+#else
+ /* XXX we ideally want the max not cpuspeed... */
+ max_ia_freq = cpuspeed;
+#endif
+
+ /*
+ * For each potential GPU frequency, load a ring frequency we'd like
+ * to use for memory access. We do this by specifying the IA frequency
+ * the PCU should use as a reference to determine the ring frequency.
+ */
+ for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
+ gpu_freq--) {
+ int diff = dev_priv->rps.max_delay - gpu_freq;
+ int d;
+
+ /*
+ * For GPU frequencies less than 750MHz, just use the lowest
+ * ring freq.
+ */
+ if (gpu_freq < min_freq)
+ ia_freq = 800;
+ else
+ ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
+ d = 100;
+ ia_freq = (ia_freq + d / 2) / d;
+ ia_freq <<= GEN6_PCODE_FREQ_IA_RATIO_SHIFT;
+
+ sandybridge_pcode_write(dev_priv,
+ GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
+ ia_freq | gpu_freq);
+ }
+}
+
+void
+ironlake_teardown_rc6(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ if (dev_priv->ips.renderctx) {
+ i915_gem_object_unpin(dev_priv->ips.renderctx);
+ drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
+ dev_priv->ips.renderctx = NULL;
+ }
+
+ if (dev_priv->ips.pwrctx) {
+ i915_gem_object_unpin(dev_priv->ips.pwrctx);
+ drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
+ dev_priv->ips.pwrctx = NULL;
+ }
+}
+
+void
+ironlake_disable_rc6(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ int retries;
+
+ if (I915_READ(PWRCTXA)) {
+ /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
+ I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
+ for (retries = 50; retries > 0; retries--) {
+ if ((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON)
+ break;
+ DELAY(1000);
+ }
+
+ I915_WRITE(PWRCTXA, 0);
+ POSTING_READ(PWRCTXA);
+
+ I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+ POSTING_READ(RSTDBYCTL);
+ }
+}
+
+int
+ironlake_setup_rc6(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ if (dev_priv->ips.renderctx == NULL)
+ dev_priv->ips.renderctx = intel_alloc_context_page(dev);
+ if (!dev_priv->ips.renderctx)
+ return -ENOMEM;
+
+ if (dev_priv->ips.pwrctx == NULL)
+ dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
+ if (!dev_priv->ips.pwrctx) {
+ ironlake_teardown_rc6(dev);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void
+ironlake_enable_rc6(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
+ bool was_interruptible;
+ int ret;
+
+ /* rc6 disabled by default due to repeated reports of hanging during
+ * boot and resume.
+ */
+ if (!intel_enable_rc6(dev))
+ return;
+
+#ifdef notyet
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+#endif
+
+ ret = ironlake_setup_rc6(dev);
+ if (ret)
+ return;
+
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
+
+ /*
+ * GPU can automatically power down the render unit if given a page
+ * to save state.
+ */
+ ret = intel_ring_begin(ring, 6);
+ if (ret) {
+ ironlake_teardown_rc6(dev);
+ dev_priv->mm.interruptible = was_interruptible;
+ return;
+ }
+
+ intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
+ intel_ring_emit(ring, MI_SET_CONTEXT);
+ intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset |
+ MI_MM_SPACE_GTT |
+ MI_SAVE_EXT_STATE_EN |
+ MI_RESTORE_EXT_STATE_EN |
+ MI_RESTORE_INHIBIT);
+ intel_ring_emit(ring, MI_SUSPEND_FLUSH);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_FLUSH);
+ intel_ring_advance(ring);
+
+ /*
+ * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
+ * does an implicit flush, combined with MI_FLUSH above, it should be
+ * safe to assume that renderctx is valid
+ */
+ ret = intel_ring_idle(ring);
+ dev_priv->mm.interruptible = was_interruptible;
+ if (ret) {
+ DRM_ERROR("failed to enable ironlake power power savings\n");
+ ironlake_teardown_rc6(dev);
+ return;
+ }
+
+ I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN);
+ I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+}
+
+unsigned long
+intel_pxfreq(u32 vidfreq)
+{
+ unsigned long freq;
+ int div = (vidfreq & 0x3f0000) >> 16;
+ int post = (vidfreq & 0x3000) >> 12;
+ int pre = (vidfreq & 0x7);
+
+ if (!pre)
+ return 0;
+
+ freq = ((div * 133333) / ((1<<post) * pre));
+
+ return freq;
+}
+
+static const struct cparams {
+ u16 i;
+ u16 t;
+ u16 m;
+ u16 c;
+} cparams[] = {
+ { 1, 1333, 301, 28664 },
+ { 1, 1066, 294, 24460 },
+ { 1, 800, 294, 25192 },
+ { 0, 1333, 276, 27605 },
+ { 0, 1066, 276, 27605 },
+ { 0, 800, 231, 23784 },
+};
+
+unsigned long
+__i915_chipset_val(struct inteldrm_softc *dev_priv)
+{
+ u64 total_count, diff, ret;
+ u32 count1, count2, count3, m = 0, c = 0;
+ unsigned long now = jiffies_to_msecs(ticks), diff1;
+ int i;
+
+// assert_spin_locked(&mchdev_lock);
+
+ diff1 = now - dev_priv->ips.last_time1;
+
+ /* Prevent division-by-zero if we are asking too fast.
+ * Also, we don't get interesting results if we are polling
+ * faster than once in 10ms, so just return the saved value
+ * in such cases.
+ */
+ if (diff1 <= 10)
+ return dev_priv->ips.chipset_power;
+
+ count1 = I915_READ(DMIEC);
+ count2 = I915_READ(DDREC);
+ count3 = I915_READ(CSIEC);
+
+ total_count = count1 + count2 + count3;
+
+ /* FIXME: handle per-counter overflow */
+ if (total_count < dev_priv->ips.last_count1) {
+ diff = ~0UL - dev_priv->ips.last_count1;
+ diff += total_count;
+ } else {
+ diff = total_count - dev_priv->ips.last_count1;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cparams); i++) {
+ if (cparams[i].i == dev_priv->ips.c_m &&
+ cparams[i].t == dev_priv->ips.r_t) {
+ m = cparams[i].m;
+ c = cparams[i].c;
+ break;
+ }
+ }
+
+ diff = diff / diff1;
+ ret = ((m * diff) + c);
+ ret = ret / 10;
+
+ dev_priv->ips.last_count1 = total_count;
+ dev_priv->ips.last_time1 = now;
+
+ dev_priv->ips.chipset_power = ret;
+
+ return ret;
+}
+
+unsigned long
+i915_chipset_val(struct inteldrm_softc *dev_priv)
+{
+ unsigned long val;
+
+ if (dev_priv->info->gen != 5)
+ return 0;
+
+// spin_lock_irq(&mchdev_lock);
+
+ val = __i915_chipset_val(dev_priv);
+
+// spin_unlock_irq(&mchdev_lock);
+
+ return val;
+}
+
+unsigned long
+i915_mch_val(struct inteldrm_softc *dev_priv)
+{
+ unsigned long m, x, b;
+ u32 tsfs;
+
+ tsfs = I915_READ(TSFS);
+
+ m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
+ x = I915_READ8(TR1);
+
+ b = tsfs & TSFS_INTR_MASK;
+
+ return ((m * x) / 127) - b;
+}
+
+u16
+pvid_to_extvid(struct inteldrm_softc *dev_priv, u8 pxvid)
+{
+ static const struct v_table {
+ u16 vd; /* in .1 mil */
+ u16 vm; /* in .1 mil */
+ } v_table[] = {
+ { 0, 0, },
+ { 375, 0, },
+ { 500, 0, },
+ { 625, 0, },
+ { 750, 0, },
+ { 875, 0, },
+ { 1000, 0, },
+ { 1125, 0, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4250, 3125, },
+ { 4375, 3250, },
+ { 4500, 3375, },
+ { 4625, 3500, },
+ { 4750, 3625, },
+ { 4875, 3750, },
+ { 5000, 3875, },
+ { 5125, 4000, },
+ { 5250, 4125, },
+ { 5375, 4250, },
+ { 5500, 4375, },
+ { 5625, 4500, },
+ { 5750, 4625, },
+ { 5875, 4750, },
+ { 6000, 4875, },
+ { 6125, 5000, },
+ { 6250, 5125, },
+ { 6375, 5250, },
+ { 6500, 5375, },
+ { 6625, 5500, },
+ { 6750, 5625, },
+ { 6875, 5750, },
+ { 7000, 5875, },
+ { 7125, 6000, },
+ { 7250, 6125, },
+ { 7375, 6250, },
+ { 7500, 6375, },
+ { 7625, 6500, },
+ { 7750, 6625, },
+ { 7875, 6750, },
+ { 8000, 6875, },
+ { 8125, 7000, },
+ { 8250, 7125, },
+ { 8375, 7250, },
+ { 8500, 7375, },
+ { 8625, 7500, },
+ { 8750, 7625, },
+ { 8875, 7750, },
+ { 9000, 7875, },
+ { 9125, 8000, },
+ { 9250, 8125, },
+ { 9375, 8250, },
+ { 9500, 8375, },
+ { 9625, 8500, },
+ { 9750, 8625, },
+ { 9875, 8750, },
+ { 10000, 8875, },
+ { 10125, 9000, },
+ { 10250, 9125, },
+ { 10375, 9250, },
+ { 10500, 9375, },
+ { 10625, 9500, },
+ { 10750, 9625, },
+ { 10875, 9750, },
+ { 11000, 9875, },
+ { 11125, 10000, },
+ { 11250, 10125, },
+ { 11375, 10250, },
+ { 11500, 10375, },
+ { 11625, 10500, },
+ { 11750, 10625, },
+ { 11875, 10750, },
+ { 12000, 10875, },
+ { 12125, 11000, },
+ { 12250, 11125, },
+ { 12375, 11250, },
+ { 12500, 11375, },
+ { 12625, 11500, },
+ { 12750, 11625, },
+ { 12875, 11750, },
+ { 13000, 11875, },
+ { 13125, 12000, },
+ { 13250, 12125, },
+ { 13375, 12250, },
+ { 13500, 12375, },
+ { 13625, 12500, },
+ { 13750, 12625, },
+ { 13875, 12750, },
+ { 14000, 12875, },
+ { 14125, 13000, },
+ { 14250, 13125, },
+ { 14375, 13250, },
+ { 14500, 13375, },
+ { 14625, 13500, },
+ { 14750, 13625, },
+ { 14875, 13750, },
+ { 15000, 13875, },
+ { 15125, 14000, },
+ { 15250, 14125, },
+ { 15375, 14250, },
+ { 15500, 14375, },
+ { 15625, 14500, },
+ { 15750, 14625, },
+ { 15875, 14750, },
+ { 16000, 14875, },
+ { 16125, 15000, },
+ };
+ if (dev_priv->info->is_mobile)
+ return v_table[pxvid].vm;
+ else
+ return v_table[pxvid].vd;
+}
+
+void
+__i915_update_gfx_val(struct inteldrm_softc *dev_priv)
+{
+ struct timespec now, diff1;
+ u64 diff;
+ unsigned long diffms;
+ u32 count;
+
+// assert_spin_locked(&mchdev_lock);
+
+ nanouptime(&now);
+ timespecsub(&now, &dev_priv->ips.last_time2, &diff1);
+
+ /* Don't divide by 0 */
+ diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
+ if (!diffms)
+ return;
+
+ count = I915_READ(GFXEC);
+
+ if (count < dev_priv->ips.last_count2) {
+ diff = ~0UL - dev_priv->ips.last_count2;
+ diff += count;
+ } else {
+ diff = count - dev_priv->ips.last_count2;
+ }
+
+ dev_priv->ips.last_count2 = count;
+ dev_priv->ips.last_time2 = now;
+
+ /* More magic constants... */
+ diff = diff * 1181;
+ diff = diff / (diffms * 10);
+ dev_priv->ips.gfx_power = diff;
+}
+
+void
+i915_update_gfx_val(struct inteldrm_softc *dev_priv)
+{
+ if (dev_priv->info->gen != 5)
+ return;
+
+// spin_lock_irq(&mchdev_lock);
+
+ __i915_update_gfx_val(dev_priv);
+
+// spin_unlock_irq(&mchdev_lock);
+}
+
+unsigned long
+__i915_gfx_val(struct inteldrm_softc *dev_priv)
+{
+ unsigned long t, corr, state1, corr2, state2;
+ u32 pxvid, ext_v;
+
+// assert_spin_locked(&mchdev_lock);
+
+ pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4));
+ pxvid = (pxvid >> 24) & 0x7f;
+ ext_v = pvid_to_extvid(dev_priv, pxvid);
+
+ state1 = ext_v;
+
+ t = i915_mch_val(dev_priv);
+
+ /* Revel in the empirically derived constants */
+
+ /* Correction factor in 1/100000 units */
+ if (t > 80)
+ corr = ((t * 2349) + 135940);
+ else if (t >= 50)
+ corr = ((t * 964) + 29317);
+ else /* < 50 */
+ corr = ((t * 301) + 1004);
+
+ corr = corr * ((150142 * state1) / 10000 - 78642);
+ corr /= 100000;
+ corr2 = (corr * dev_priv->ips.corr);
+
+ state2 = (corr2 * state1) / 10000;
+ state2 /= 100; /* convert to mW */
+
+ __i915_update_gfx_val(dev_priv);
+
+ return dev_priv->ips.gfx_power + state2;
+}
+
+unsigned long
+i915_gfx_val(struct inteldrm_softc *dev_priv)
+{
+ unsigned long val;
+
+ if (dev_priv->info->gen != 5)
+ return 0;
+
+// spin_lock_irq(&mchdev_lock);
+
+ val = __i915_gfx_val(dev_priv);
+
+// spin_unlock_irq(&mchdev_lock);
+
+ return val;
+}
+
+/**
+ * i915_read_mch_val - return value for IPS use
+ *
+ * Calculate and return a value for the IPS driver to use when deciding whether
+ * we have thermal and power headroom to increase CPU or GPU power budget.
+ */
+unsigned long
+i915_read_mch_val(void)
+{
+ struct inteldrm_softc *dev_priv;
+ unsigned long chipset_val, graphics_val, ret = 0;
+
+// spin_lock_irq(&mchdev_lock);
+ if (!i915_mch_dev)
+ goto out_unlock;
+ dev_priv = i915_mch_dev;
+
+ chipset_val = __i915_chipset_val(dev_priv);
+ graphics_val = __i915_gfx_val(dev_priv);
+
+ ret = chipset_val + graphics_val;
+
+out_unlock:
+// spin_unlock_irq(&mchdev_lock);
+
+ return ret;
+}
+
+/**
+ * i915_gpu_raise - raise GPU frequency limit
+ *
+ * Raise the limit; IPS indicates we have thermal headroom.
+ */
+bool
+i915_gpu_raise(void)
+{
+ struct inteldrm_softc *dev_priv;
+ bool ret = true;
+
+// spin_lock_irq(&mchdev_lock);
+ if (!i915_mch_dev) {
+ ret = false;
+ goto out_unlock;
+ }
+ dev_priv = i915_mch_dev;
+
+ if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
+ dev_priv->ips.max_delay--;
+
+out_unlock:
+// spin_unlock_irq(&mchdev_lock);
+
+ return ret;
+}
+
+/**
+ * i915_gpu_lower - lower GPU frequency limit
+ *
+ * IPS indicates we're close to a thermal limit, so throttle back the GPU
+ * frequency maximum.
+ */
+bool
+i915_gpu_lower(void)
+{
+ struct inteldrm_softc *dev_priv;
+ bool ret = true;
+
+// spin_lock_irq(&mchdev_lock);
+ if (!i915_mch_dev) {
+ ret = false;
+ goto out_unlock;
+ }
+ dev_priv = i915_mch_dev;
+
+ if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
+ dev_priv->ips.max_delay++;
+
+out_unlock:
+// spin_unlock_irq(&mchdev_lock);
+
+ return ret;
+}
+
+/**
+ * i915_gpu_busy - indicate GPU business to IPS
+ *
+ * Tell the IPS driver whether or not the GPU is busy.
+ */
+bool
+i915_gpu_busy(void)
+{
+ struct inteldrm_softc *dev_priv;
+ struct intel_ring_buffer *ring;
+ bool ret = false;
+ int i;
+
+// spin_lock_irq(&mchdev_lock);
+ if (!i915_mch_dev)
+ goto out_unlock;
+ dev_priv = i915_mch_dev;
+
+ for_each_ring(ring, dev_priv, i)
+ ret |= !list_empty(&ring->request_list);
+
+out_unlock:
+// spin_unlock_irq(&mchdev_lock);
+
+ return ret;
+}
+
+/**
+ * i915_gpu_turbo_disable - disable graphics turbo
+ *
+ * Disable graphics turbo by resetting the max frequency and setting the
+ * current frequency to the default.
+ */
+bool
+i915_gpu_turbo_disable(void)
+{
+ struct inteldrm_softc *dev_priv;
+ struct drm_device *dev;
+ bool ret = true;
+
+// spin_lock_irq(&mchdev_lock);
+ if (!i915_mch_dev) {
+ ret = false;
+ goto out_unlock;
+ }
+ dev_priv = i915_mch_dev;
+ dev = (struct drm_device *)dev_priv->drmdev;
+ dev_priv->ips.max_delay = dev_priv->ips.fstart;
+
+ if (!ironlake_set_drps(dev, dev_priv->ips.fstart))
+ ret = false;
+
+out_unlock:
+// spin_unlock_irq(&mchdev_lock);
+
+ return ret;
+}
+
+/**
+ * Tells the intel_ips driver that the i915 driver is now loaded, if
+ * IPS got loaded first.
+ *
+ * This awkward dance is so that neither module has to depend on the
+ * other in order for IPS to do the appropriate communication of
+ * GPU turbo limits to i915.
+ */
+#if 0
+void
+ips_ping_for_i915_load(void)
+{
+ void (*link)(void);
+
+ link = symbol_get(ips_link_to_i915_driver);
+ if (link) {
+ link();
+ symbol_put(ips_link_to_i915_driver);
+ }
+}
+#endif
+
+void
+intel_gpu_ips_init(struct inteldrm_softc *dev_priv)
+{
+ /* We only register the i915 ips part with intel-ips once everything is
+ * set up, to avoid intel-ips sneaking in and reading bogus values. */
+// spin_lock_irq(&mchdev_lock);
+ i915_mch_dev = dev_priv;
+// spin_unlock_irq(&mchdev_lock);
+
+// ips_ping_for_i915_load();
+}
+
+void
+intel_gpu_ips_teardown(void)
+{
+// spin_lock_irq(&mchdev_lock);
+ i915_mch_dev = NULL;
+// spin_unlock_irq(&mchdev_lock);
+}
+
+void
+intel_init_emon(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 lcfuse;
+ u8 pxw[16];
+ int i;
+
+ /* Disable to program */
+ I915_WRITE(ECR, 0);
+ POSTING_READ(ECR);
+
+ /* Program energy weights for various events */
+ I915_WRITE(SDEW, 0x15040d00);
+ I915_WRITE(CSIEW0, 0x007f0000);
+ I915_WRITE(CSIEW1, 0x1e220004);
+ I915_WRITE(CSIEW2, 0x04000004);
+
+ for (i = 0; i < 5; i++)
+ I915_WRITE(PEW + (i * 4), 0);
+ for (i = 0; i < 3; i++)
+ I915_WRITE(DEW + (i * 4), 0);
+
+ /* Program P-state weights to account for frequency power adjustment */
+ for (i = 0; i < 16; i++) {
+ u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
+ unsigned long freq = intel_pxfreq(pxvidfreq);
+ unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
+ PXVFREQ_PX_SHIFT;
+ unsigned long val;
+
+ val = vid * vid;
+ val *= (freq / 1000);
+ val *= 255;
+ val /= (127*127*900);
+ if (val > 0xff)
+ DRM_ERROR("bad pxval: %ld\n", val);
+ pxw[i] = val;
+ }
+ /* Render standby states get 0 weight */
+ pxw[14] = 0;
+ pxw[15] = 0;
+
+ for (i = 0; i < 4; i++) {
+ u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
+ (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
+ I915_WRITE(PXW + (i * 4), val);
+ }
+
+ /* Adjust magic regs to magic values (more experimental results) */
+ I915_WRITE(OGW0, 0);
+ I915_WRITE(OGW1, 0);
+ I915_WRITE(EG0, 0x00007f00);
+ I915_WRITE(EG1, 0x0000000e);
+ I915_WRITE(EG2, 0x000e0000);
+ I915_WRITE(EG3, 0x68000300);
+ I915_WRITE(EG4, 0x42000000);
+ I915_WRITE(EG5, 0x00140031);
+ I915_WRITE(EG6, 0);
+ I915_WRITE(EG7, 0);
+
+ for (i = 0; i < 8; i++)
+ I915_WRITE(PXWL + (i * 4), 0);
+
+ /* Enable PMON + select events */
+ I915_WRITE(ECR, 0x80000019);
+
+ lcfuse = I915_READ(LCFUSE02);
+
+ dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
+}
+
+void
+intel_disable_gt_powersave(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ if (IS_IRONLAKE_M(dev)) {
+ ironlake_disable_drps(dev);
+ ironlake_disable_rc6(dev);
+ } else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) {
+ timeout_del(&dev_priv->rps.delayed_resume_to);
+ rw_enter_write(&dev_priv->rps.hw_lock);
+ gen6_disable_rps(dev);
+ rw_exit_write(&dev_priv->rps.hw_lock);
+ }
+}
+
+void
+intel_gen6_powersave_tick(void *arg)
+{
+ drm_i915_private_t *dev_priv = arg;
+
+ workq_queue_task(NULL, &dev_priv->rps.delayed_resume_task, 0,
+ intel_gen6_powersave_work, dev_priv, NULL);
+}
+
+void
+intel_gen6_powersave_work(void *arg1, void *arg2)
+{
+ drm_i915_private_t *dev_priv = arg1;
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+
+ rw_enter_write(&dev_priv->rps.hw_lock);
+ gen6_enable_rps(dev);
+ gen6_update_ring_freq(dev);
+ rw_exit_write(&dev_priv->rps.hw_lock);
+}
+
+void
+intel_enable_gt_powersave(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ if (IS_IRONLAKE_M(dev)) {
+ ironlake_enable_drps(dev);
+ ironlake_enable_rc6(dev);
+ intel_init_emon(dev);
+ } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
+ /*
+ * PCU communication is slow and this doesn't need to be
+ * done at any specific time, so do this out of our fast path
+ * to make resume and init faster.
+ */
+ timeout_add_sec(&dev_priv->rps.delayed_resume_to, 1);
+ }
+}
+
+void
+ibx_init_clock_gating(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ /*
+ * On Ibex Peak and Cougar Point, we need to disable clock
+ * gating for the panel power sequencer or it will fail to
+ * start up when no ports are active.
+ */
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+}
+
+void
+ironlake_init_clock_gating(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
+
+ /* Required for FBC */
+ dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
+ ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
+ ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
+
+ I915_WRITE(PCH_3DCGDIS0,
+ MARIUNIT_CLOCK_GATE_DISABLE |
+ SVSMUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(PCH_3DCGDIS1,
+ VFMUNIT_CLOCK_GATE_DISABLE);
+
+ /*
+ * According to the spec the following bits should be set in
+ * order to enable memory self-refresh
+ * The bit 22/21 of 0x42004
+ * The bit 5 of 0x42020
+ * The bit 15 of 0x45000
+ */
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ (I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE | ILK_VSDPFD_FULL));
+ dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
+ I915_WRITE(DISP_ARB_CTL,
+ (I915_READ(DISP_ARB_CTL) |
+ DISP_FBC_WM_DIS));
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ /*
+ * Based on the document from hardware guys the following bits
+ * should be set unconditionally in order to enable FBC.
+ * The bit 22 of 0x42000
+ * The bit 22 of 0x42004
+ * The bit 7,8,9 of 0x42020.
+ */
+ if (IS_IRONLAKE_M(dev)) {
+ I915_WRITE(ILK_DISPLAY_CHICKEN1,
+ I915_READ(ILK_DISPLAY_CHICKEN1) |
+ ILK_FBCQ_DIS);
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE);
+ }
+
+ I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
+
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_ELPIN_409_SELECT);
+ I915_WRITE(_3D_CHICKEN2,
+ _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
+ _3D_CHICKEN2_WM_READ_PIPELINED);
+
+ /* WaDisableRenderCachePipelinedFlush */
+ I915_WRITE(CACHE_MODE_0,
+ _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
+
+ ibx_init_clock_gating(dev);
+}
+
+void
+cpt_init_clock_gating(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ int pipe;
+
+ /*
+ * On Ibex Peak and Cougar Point, we need to disable clock
+ * gating for the panel power sequencer or it will fail to
+ * start up when no ports are active.
+ */
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
+ DPLS_EDP_PPS_FIX_DIS);
+ /* The below fixes the weird display corruption, a few pixels shifted
+ * downward, on (only) LVDS of some HP laptops with IVY.
+ */
+ for_each_pipe(pipe)
+ I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_CHICKEN2_TIMING_OVERRIDE);
+ /* WADP0ClockGatingDisable */
+ for_each_pipe(pipe) {
+ I915_WRITE(TRANS_CHICKEN1(pipe),
+ TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
+ }
+}
+
+void
+gen6_init_clock_gating(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ int pipe;
+ uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
+
+ I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
+
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_ELPIN_409_SELECT);
+
+ /* WaDisableHiZPlanesWhenMSAAEnabled */
+ I915_WRITE(_3D_CHICKEN,
+ _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
+
+ /* WaSetupGtModeTdRowDispatch */
+ if (IS_SNB_GT1(dev))
+ I915_WRITE(GEN6_GT_MODE,
+ _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
+
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ I915_WRITE(CACHE_MODE_0,
+ _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
+
+ I915_WRITE(GEN6_UCGCTL1,
+ I915_READ(GEN6_UCGCTL1) |
+ GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
+ GEN6_CSUNIT_CLOCK_GATE_DISABLE);
+
+ /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
+ * gating disable must be set. Failure to set it results in
+ * flickering pixels due to Z write ordering failures after
+ * some amount of runtime in the Mesa "fire" demo, and Unigine
+ * Sanctuary and Tropics, and apparently anything else with
+ * alpha test or pixel discard.
+ *
+ * According to the spec, bit 11 (RCCUNIT) must also be set,
+ * but we didn't debug actual testcases to find it out.
+ *
+ * Also apply WaDisableVDSUnitClockGating and
+ * WaDisableRCPBUnitClockGating.
+ */
+ I915_WRITE(GEN6_UCGCTL2,
+ GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
+ GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
+ GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
+
+ /* Bspec says we need to always set all mask bits. */
+ I915_WRITE(_3D_CHICKEN3, (0xFFFF << 16) |
+ _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL);
+
+ /*
+ * According to the spec the following bits should be
+ * set in order to enable memory self-refresh and fbc:
+ * The bit21 and bit22 of 0x42000
+ * The bit21 and bit22 of 0x42004
+ * The bit5 and bit7 of 0x42020
+ * The bit14 of 0x70180
+ * The bit14 of 0x71180
+ */
+ I915_WRITE(ILK_DISPLAY_CHICKEN1,
+ I915_READ(ILK_DISPLAY_CHICKEN1) |
+ ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE | ILK_VSDPFD_FULL);
+ I915_WRITE(ILK_DSPCLK_GATE_D,
+ I915_READ(ILK_DSPCLK_GATE_D) |
+ ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
+ ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
+
+ /* WaMbcDriverBootEnable */
+ I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
+ GEN6_MBCTL_ENABLE_BOOT_FETCH);
+
+ for_each_pipe(pipe) {
+ I915_WRITE(DSPCNTR(pipe),
+ I915_READ(DSPCNTR(pipe)) |
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ intel_flush_display_plane(dev_priv, pipe);
+ }
+
+ /* The default value should be 0x200 according to docs, but the two
+ * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
+ I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff));
+ I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
+
+ cpt_init_clock_gating(dev);
+}
+
+void
+gen7_setup_fixed_func_scheduler(struct inteldrm_softc *dev_priv)
+{
+ uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
+
+ reg &= ~GEN7_FF_SCHED_MASK;
+ reg |= GEN7_FF_TS_SCHED_HW;
+ reg |= GEN7_FF_VS_SCHED_HW;
+ reg |= GEN7_FF_DS_SCHED_HW;
+
+ I915_WRITE(GEN7_FF_THREAD_MODE, reg);
+}
+
+void
+lpt_init_clock_gating(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ /*
+ * TODO: this bit should only be enabled when really needed, then
+ * disabled when not needed anymore in order to save power.
+ */
+ if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
+ I915_WRITE(SOUTH_DSPCLK_GATE_D,
+ I915_READ(SOUTH_DSPCLK_GATE_D) |
+ PCH_LP_PARTITION_LEVEL_DISABLE);
+}
+
+void
+haswell_init_clock_gating(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ int pipe;
+
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
+ * This implements the WaDisableRCZUnitClockGating workaround.
+ */
+ I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
+
+ /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
+ I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
+ GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
+
+ /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
+ I915_WRITE(GEN7_L3CNTLREG1,
+ GEN7_WA_FOR_GEN7_L3_CONTROL);
+ I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
+ GEN7_WA_L3_CHICKEN_MODE);
+
+ /* This is required by WaCatErrorRejectionIssue */
+ I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+
+ for_each_pipe(pipe) {
+ I915_WRITE(DSPCNTR(pipe),
+ I915_READ(DSPCNTR(pipe)) |
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ intel_flush_display_plane(dev_priv, pipe);
+ }
+
+ gen7_setup_fixed_func_scheduler(dev_priv);
+
+ /* WaDisable4x2SubspanOptimization */
+ I915_WRITE(CACHE_MODE_1,
+ _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+
+ /* WaMbcDriverBootEnable */
+ I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
+ GEN6_MBCTL_ENABLE_BOOT_FETCH);
+
+ /* XXX: This is a workaround for early silicon revisions and should be
+ * removed later.
+ */
+ I915_WRITE(WM_DBG,
+ I915_READ(WM_DBG) |
+ WM_DBG_DISALLOW_MULTIPLE_LP |
+ WM_DBG_DISALLOW_SPRITE |
+ WM_DBG_DISALLOW_MAXFIFO);
+
+ lpt_init_clock_gating(dev);
+}
+
+void
+ivybridge_init_clock_gating(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ int pipe;
+ uint32_t snpcr;
+
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaDisableEarlyCull */
+ I915_WRITE(_3D_CHICKEN3,
+ _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
+
+ /* WaDisableBackToBackFlipFix */
+ I915_WRITE(IVB_CHICKEN3,
+ CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
+ CHICKEN3_DGMG_DONE_FIX_DISABLE);
+
+ /* WaDisablePSDDualDispatchEnable */
+ if (IS_IVB_GT1(dev))
+ I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
+ _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+ else
+ I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2,
+ _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+
+ /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
+ I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
+ GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
+
+ /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
+ I915_WRITE(GEN7_L3CNTLREG1,
+ GEN7_WA_FOR_GEN7_L3_CONTROL);
+ I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
+ GEN7_WA_L3_CHICKEN_MODE);
+ if (IS_IVB_GT1(dev))
+ I915_WRITE(GEN7_ROW_CHICKEN2,
+ _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+ else
+ I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
+ _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+
+
+ /* WaForceL3Serialization */
+ I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
+ ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
+
+ /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
+ * gating disable must be set. Failure to set it results in
+ * flickering pixels due to Z write ordering failures after
+ * some amount of runtime in the Mesa "fire" demo, and Unigine
+ * Sanctuary and Tropics, and apparently anything else with
+ * alpha test or pixel discard.
+ *
+ * According to the spec, bit 11 (RCCUNIT) must also be set,
+ * but we didn't debug actual testcases to find it out.
+ *
+ * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
+ * This implements the WaDisableRCZUnitClockGating workaround.
+ */
+ I915_WRITE(GEN6_UCGCTL2,
+ GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
+ GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
+
+ /* This is required by WaCatErrorRejectionIssue */
+ I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+
+ for_each_pipe(pipe) {
+ I915_WRITE(DSPCNTR(pipe),
+ I915_READ(DSPCNTR(pipe)) |
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ intel_flush_display_plane(dev_priv, pipe);
+ }
+
+ /* WaMbcDriverBootEnable */
+ I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
+ GEN6_MBCTL_ENABLE_BOOT_FETCH);
+
+ gen7_setup_fixed_func_scheduler(dev_priv);
+
+ /* WaDisable4x2SubspanOptimization */
+ I915_WRITE(CACHE_MODE_1,
+ _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+
+ snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+ snpcr &= ~GEN6_MBC_SNPCR_MASK;
+ snpcr |= GEN6_MBC_SNPCR_MED;
+ I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
+
+ cpt_init_clock_gating(dev);
+}
+
+void
+valleyview_init_clock_gating(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ int pipe;
+
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaDisableEarlyCull */
+ I915_WRITE(_3D_CHICKEN3,
+ _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
+
+ /* WaDisableBackToBackFlipFix */
+ I915_WRITE(IVB_CHICKEN3,
+ CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
+ CHICKEN3_DGMG_DONE_FIX_DISABLE);
+
+ I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
+ _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+
+ /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
+ I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
+ GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
+
+ /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
+ I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
+ I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
+
+ /* WaForceL3Serialization */
+ I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
+ ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
+
+ /* WaDisableDopClockGating */
+ I915_WRITE(GEN7_ROW_CHICKEN2,
+ _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+
+ /* WaForceL3Serialization */
+ I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
+ ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
+
+ /* This is required by WaCatErrorRejectionIssue */
+ I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+
+ /* WaMbcDriverBootEnable */
+ I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
+ GEN6_MBCTL_ENABLE_BOOT_FETCH);
+
+
+ /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
+ * gating disable must be set. Failure to set it results in
+ * flickering pixels due to Z write ordering failures after
+ * some amount of runtime in the Mesa "fire" demo, and Unigine
+ * Sanctuary and Tropics, and apparently anything else with
+ * alpha test or pixel discard.
+ *
+ * According to the spec, bit 11 (RCCUNIT) must also be set,
+ * but we didn't debug actual testcases to find it out.
+ *
+ * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
+ * This implements the WaDisableRCZUnitClockGating workaround.
+ *
+ * Also apply WaDisableVDSUnitClockGating and
+ * WaDisableRCPBUnitClockGating.
+ */
+ I915_WRITE(GEN6_UCGCTL2,
+ GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
+ GEN7_TDLUNIT_CLOCK_GATE_DISABLE |
+ GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
+ GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
+ GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
+
+ I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
+
+ for_each_pipe(pipe) {
+ I915_WRITE(DSPCNTR(pipe),
+ I915_READ(DSPCNTR(pipe)) |
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ intel_flush_display_plane(dev_priv, pipe);
+ }
+
+ I915_WRITE(CACHE_MODE_1,
+ _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+
+ /*
+ * On ValleyView, the GUnit needs to signal the GT
+ * when flip and other events complete. So enable
+ * all the GUnit->GT interrupts here
+ */
+ I915_WRITE(VLV_DPFLIPSTAT, PIPEB_LINE_COMPARE_INT_EN |
+ PIPEB_HLINE_INT_EN | PIPEB_VBLANK_INT_EN |
+ SPRITED_FLIPDONE_INT_EN | SPRITEC_FLIPDONE_INT_EN |
+ PLANEB_FLIPDONE_INT_EN | PIPEA_LINE_COMPARE_INT_EN |
+ PIPEA_HLINE_INT_EN | PIPEA_VBLANK_INT_EN |
+ SPRITEB_FLIPDONE_INT_EN | SPRITEA_FLIPDONE_INT_EN |
+ PLANEA_FLIPDONE_INT_EN);
+
+ /*
+ * WaDisableVLVClockGating_VBIIssue
+ * Disable clock gating on th GCFG unit to prevent a delay
+ * in the reporting of vblank events.
+ */
+ I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
+}
+
+void
+g4x_init_clock_gating(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ uint32_t dspclk_gate;
+
+ I915_WRITE(RENCLK_GATE_D1, 0);
+ I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
+ GS_UNIT_CLOCK_GATE_DISABLE |
+ CL_UNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(RAMCLK_GATE_D, 0);
+ dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
+ OVRUNIT_CLOCK_GATE_DISABLE |
+ OVCUNIT_CLOCK_GATE_DISABLE;
+ if (IS_GM45(dev))
+ dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
+
+ /* WaDisableRenderCachePipelinedFlush */
+ I915_WRITE(CACHE_MODE_0,
+ _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
+}
+
+void
+crestline_init_clock_gating(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
+ I915_WRITE(RENCLK_GATE_D2, 0);
+ I915_WRITE(DSPCLK_GATE_D, 0);
+ I915_WRITE(RAMCLK_GATE_D, 0);
+ I915_WRITE16(DEUC, 0);
+}
+
+void
+broadwater_init_clock_gating(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
+ I965_RCC_CLOCK_GATE_DISABLE |
+ I965_RCPB_CLOCK_GATE_DISABLE |
+ I965_ISC_CLOCK_GATE_DISABLE |
+ I965_FBC_CLOCK_GATE_DISABLE);
+ I915_WRITE(RENCLK_GATE_D2, 0);
+}
+
+void
+gen3_init_clock_gating(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 dstate = I915_READ(D_STATE);
+
+ dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
+ DSTATE_DOT_CLOCK_GATING;
+ I915_WRITE(D_STATE, dstate);
+
+ if (IS_PINEVIEW(dev))
+ I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
+
+ /* IIR "flip pending" means done if this bit is set */
+ I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
+}
+
+void
+i85x_init_clock_gating(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
+}
+
+void
+i830_init_clock_gating(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
+}
+
+void
+intel_init_clock_gating(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ dev_priv->display.init_clock_gating(dev);
+}
+
+/* Starting with Haswell, we have different power wells for
+ * different parts of the GPU. This attempts to enable them all.
+ */
+void
+intel_init_power_wells(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ unsigned long power_wells[] = {
+ HSW_PWR_WELL_CTL1,
+ HSW_PWR_WELL_CTL2,
+ HSW_PWR_WELL_CTL4
+ };
+ int i, retries;
+
+ if (!IS_HASWELL(dev))
+ return;
+
+ DRM_LOCK();
+
+ for (i = 0; i < ARRAY_SIZE(power_wells); i++) {
+ int well = I915_READ(power_wells[i]);
+
+ if ((well & HSW_PWR_WELL_STATE) == 0) {
+ I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE);
+ for (retries = 20; retries > 0; retries--) {
+ if (I915_READ(power_wells[i]) & HSW_PWR_WELL_STATE)
+ break;
+ DELAY(1000);
+ }
+ if (retries == 0)
+ DRM_ERROR("Error enabling power well %lx\n", power_wells[i]);
+ }
+ }
+
+ DRM_UNLOCK();
+}
+
+/* Set up chip specific power management-related functions */
+void
+intel_init_pm(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ if (I915_HAS_FBC(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
+ dev_priv->display.enable_fbc = ironlake_enable_fbc;
+ dev_priv->display.disable_fbc = ironlake_disable_fbc;
+ } else if (IS_GM45(dev)) {
+ dev_priv->display.fbc_enabled = g4x_fbc_enabled;
+ dev_priv->display.enable_fbc = g4x_enable_fbc;
+ dev_priv->display.disable_fbc = g4x_disable_fbc;
+ } else if (IS_CRESTLINE(dev)) {
+ dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
+ dev_priv->display.enable_fbc = i8xx_enable_fbc;
+ dev_priv->display.disable_fbc = i8xx_disable_fbc;
+ }
+ /* 855GM needs testing */
+ }
+
+ /* For cxsr */
+ if (IS_PINEVIEW(dev))
+ i915_pineview_get_mem_freq(dev);
+ else if (IS_GEN5(dev))
+ i915_ironlake_get_mem_freq(dev);
+
+ /* For FIFO watermark updates */
+ if (HAS_PCH_SPLIT(dev)) {
+ if (IS_GEN5(dev)) {
+ if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
+ dev_priv->display.update_wm = ironlake_update_wm;
+ else {
+ DRM_DEBUG_KMS("Failed to get proper latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
+ dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
+ } else if (IS_GEN6(dev)) {
+ if (SNB_READ_WM0_LATENCY()) {
+ dev_priv->display.update_wm = sandybridge_update_wm;
+ dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
+ } else {
+ DRM_DEBUG_KMS("Failed to read display plane latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
+ dev_priv->display.init_clock_gating = gen6_init_clock_gating;
+ } else if (IS_IVYBRIDGE(dev)) {
+ /* FIXME: detect B0+ stepping and use auto training */
+ if (SNB_READ_WM0_LATENCY()) {
+ dev_priv->display.update_wm = ivybridge_update_wm;
+ dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
+ } else {
+ DRM_DEBUG_KMS("Failed to read display plane latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
+ dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
+ } else if (IS_HASWELL(dev)) {
+ if (SNB_READ_WM0_LATENCY()) {
+ dev_priv->display.update_wm = sandybridge_update_wm;
+ dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
+ dev_priv->display.update_linetime_wm = haswell_update_linetime_wm;
+ } else {
+ DRM_DEBUG_KMS("Failed to read display plane latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
+ dev_priv->display.init_clock_gating = haswell_init_clock_gating;
+ } else
+ dev_priv->display.update_wm = NULL;
+ } else if (IS_VALLEYVIEW(dev)) {
+ dev_priv->display.update_wm = valleyview_update_wm;
+ dev_priv->display.init_clock_gating =
+ valleyview_init_clock_gating;
+ } else if (IS_PINEVIEW(dev)) {
+ if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
+ dev_priv->is_ddr3,
+ dev_priv->fsb_freq,
+ dev_priv->mem_freq)) {
+ DRM_INFO("failed to find known CxSR latency "
+ "(found ddr%s fsb freq %d, mem freq %d), "
+ "disabling CxSR\n",
+ (dev_priv->is_ddr3 == 1) ? "3" : "2",
+ dev_priv->fsb_freq, dev_priv->mem_freq);
+ /* Disable CxSR and never update its watermark again */
+ pineview_disable_cxsr(dev);
+ dev_priv->display.update_wm = NULL;
+ } else
+ dev_priv->display.update_wm = pineview_update_wm;
+ dev_priv->display.init_clock_gating = gen3_init_clock_gating;
+ } else if (IS_G4X(dev)) {
+ dev_priv->display.update_wm = g4x_update_wm;
+ dev_priv->display.init_clock_gating = g4x_init_clock_gating;
+ } else if (IS_GEN4(dev)) {
+ dev_priv->display.update_wm = i965_update_wm;
+ if (IS_CRESTLINE(dev))
+ dev_priv->display.init_clock_gating = crestline_init_clock_gating;
+ else if (IS_BROADWATER(dev))
+ dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
+ } else if (IS_GEN3(dev)) {
+ dev_priv->display.update_wm = i9xx_update_wm;
+ dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
+ dev_priv->display.init_clock_gating = gen3_init_clock_gating;
+ } else if (IS_I865G(dev)) {
+ dev_priv->display.update_wm = i830_update_wm;
+ dev_priv->display.init_clock_gating = i85x_init_clock_gating;
+ dev_priv->display.get_fifo_size = i830_get_fifo_size;
+ } else if (IS_I85X(dev)) {
+ dev_priv->display.update_wm = i9xx_update_wm;
+ dev_priv->display.get_fifo_size = i85x_get_fifo_size;
+ dev_priv->display.init_clock_gating = i85x_init_clock_gating;
+ } else {
+ dev_priv->display.update_wm = i830_update_wm;
+ dev_priv->display.init_clock_gating = i830_init_clock_gating;
+ if (IS_845G(dev))
+ dev_priv->display.get_fifo_size = i845_get_fifo_size;
+ else
+ dev_priv->display.get_fifo_size = i830_get_fifo_size;
+ }
+}
+
+void
+__gen6_gt_wait_for_thread_c0(struct inteldrm_softc *dev_priv)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ u32 gt_thread_status_mask;
+ int retries;
+
+ if (IS_HASWELL(dev))
+ gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
+ else
+ gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
+
+ /* w/a for a sporadic read returning 0 by waiting for the GT
+ * thread to wake up.
+ */
+ for (retries = 500; retries > 0; retries--) {
+ if ((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) &
+ gt_thread_status_mask) == 0)
+ break;
+ DELAY(1000);
+ }
+ if (retries == 0)
+ DRM_ERROR("GT thread status wait timed out\n");
+}
+
+void
+__gen6_gt_force_wake_reset(struct inteldrm_softc *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE, 0);
+ POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
+}
+
+void
+__gen6_gt_force_wake_get(struct inteldrm_softc *dev_priv)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ u32 forcewake_ack;
+ int count;
+
+ if (IS_HASWELL(dev))
+ forcewake_ack = FORCEWAKE_ACK_HSW;
+ else
+ forcewake_ack = FORCEWAKE_ACK;
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(forcewake_ack) & 1))
+ DELAY(10);
+
+ I915_WRITE_NOTRACE(FORCEWAKE, FORCEWAKE_KERNEL);
+ POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(forcewake_ack) & 1) == 0)
+ DELAY(10);
+
+ __gen6_gt_wait_for_thread_c0(dev_priv);
+}
+
+void
+__gen6_gt_force_wake_mt_reset(struct inteldrm_softc *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
+ /* something from same cacheline, but !FORCEWAKE_MT */
+ POSTING_READ(ECOBUS);
+}
+
+void
+__gen6_gt_force_wake_mt_get(struct inteldrm_softc *dev_priv)
+{
+ struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
+ u32 forcewake_ack;
+ int count;
+
+ if (IS_HASWELL(dev))
+ forcewake_ack = FORCEWAKE_ACK_HSW;
+ else
+ forcewake_ack = FORCEWAKE_MT_ACK;
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(forcewake_ack) & 1))
+ DELAY(10);
+
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+ /* something from same cacheline, but !FORCEWAKE_MT */
+ POSTING_READ(ECOBUS);
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(forcewake_ack) & 1) == 0)
+ DELAY(10);
+
+ __gen6_gt_wait_for_thread_c0(dev_priv);
+}
+
+/*
+ * Generally this is called implicitly by the register read function. However,
+ * if some sequence requires the GT to not power down then this function should
+ * be called at the beginning of the sequence followed by a call to
+ * gen6_gt_force_wake_put() at the end of the sequence.
+ */
+void
+gen6_gt_force_wake_get(struct inteldrm_softc *dev_priv)
+{
+ mtx_enter(&dev_priv->gt_lock);
+ if (dev_priv->forcewake_count++ == 0)
+ dev_priv->gt.force_wake_get(dev_priv);
+ mtx_leave(&dev_priv->gt_lock);
+}
+
+void
+gen6_gt_check_fifodbg(struct inteldrm_softc *dev_priv)
+{
+ u32 gtfifodbg;
+ gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
+ if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
+ "MMIO read or write has been dropped %x\n", gtfifodbg))
+ I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
+}
+
+void
+__gen6_gt_force_wake_put(struct inteldrm_softc *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE, 0);
+ /* something from same cacheline, but !FORCEWAKE */
+ POSTING_READ(ECOBUS);
+ gen6_gt_check_fifodbg(dev_priv);
+}
+
+void
+__gen6_gt_force_wake_mt_put(struct inteldrm_softc *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+ /* something from same cacheline, but !FORCEWAKE_MT */
+ POSTING_READ(ECOBUS);
+ gen6_gt_check_fifodbg(dev_priv);
+}
+
+/*
+ * see gen6_gt_force_wake_get()
+ */
+void
+gen6_gt_force_wake_put(struct inteldrm_softc *dev_priv)
+{
+ mtx_enter(&dev_priv->gt_lock);
+ if (--dev_priv->forcewake_count == 0)
+ dev_priv->gt.force_wake_put(dev_priv);
+ mtx_leave(&dev_priv->gt_lock);
+}
+
+int
+__gen6_gt_wait_for_fifo(struct inteldrm_softc *dev_priv)
+{
+ int ret = 0;
+
+ if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
+ int loop = 500;
+ u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+ while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
+ DELAY(10);
+ fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+ }
+ if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
+ ++ret;
+ dev_priv->gt_fifo_count = fifo;
+ }
+ dev_priv->gt_fifo_count--;
+
+ return ret;
+}
+
+void
+vlv_force_wake_reset(struct inteldrm_softc *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff));
+ /* something from same cacheline, but !FORCEWAKE_VLV */
+ POSTING_READ(FORCEWAKE_ACK_VLV);
+}
+
+void
+vlv_force_wake_get(struct inteldrm_softc *dev_priv)
+{
+ int count;
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1))
+ DELAY(10);
+
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0)
+ DELAY(10);
+
+ __gen6_gt_wait_for_thread_c0(dev_priv);
+}
+
+void
+vlv_force_wake_put(struct inteldrm_softc *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+ /* something from same cacheline, but !FORCEWAKE_VLV */
+ POSTING_READ(FORCEWAKE_ACK_VLV);
+ gen6_gt_check_fifodbg(dev_priv);
+}
+
+void
+intel_gt_reset(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ if (IS_VALLEYVIEW(dev)) {
+ vlv_force_wake_reset(dev_priv);
+ } else if (INTEL_INFO(dev)->gen >= 6) {
+ __gen6_gt_force_wake_reset(dev_priv);
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+ __gen6_gt_force_wake_mt_reset(dev_priv);
+ }
+}
+
+void
+intel_gt_init(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ mtx_init(&dev_priv->gt_lock, IPL_NONE);
+
+ intel_gt_reset(dev);
+
+ if (IS_VALLEYVIEW(dev)) {
+ dev_priv->gt.force_wake_get = vlv_force_wake_get;
+ dev_priv->gt.force_wake_put = vlv_force_wake_put;
+ } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
+ dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
+ dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
+ } else if (IS_GEN6(dev)) {
+ dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
+ dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
+ }
+ timeout_set(&dev_priv->rps.delayed_resume_to, intel_gen6_powersave_tick,
+ dev_priv);
+}
+
+int
+sandybridge_pcode_read(struct inteldrm_softc *dev_priv, u8 mbox, u32 *val)
+{
+ int retries;
+ rw_assert_wrlock(&dev_priv->rps.hw_lock);
+
+ if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
+ DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
+ return -EAGAIN;
+ }
+
+ I915_WRITE(GEN6_PCODE_DATA, *val);
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+
+ for (retries = 500; retries > 0; retries--) {
+ if ((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0)
+ break;
+ DELAY(1000);
+ }
+ if (retries == 0) {
+ DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
+ return -ETIMEDOUT;
+ }
+
+ *val = I915_READ(GEN6_PCODE_DATA);
+ I915_WRITE(GEN6_PCODE_DATA, 0);
+
+ return 0;
+}
+
+int
+sandybridge_pcode_write(struct inteldrm_softc *dev_priv, u8 mbox, u32 val)
+{
+ int retries;
+ rw_assert_wrlock(&dev_priv->rps.hw_lock);
+
+ if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
+ DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
+ return -EAGAIN;
+ }
+
+ I915_WRITE(GEN6_PCODE_DATA, val);
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+
+ for (retries = 500; retries > 0; retries--) {
+ if ((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0)
+ break;
+ DELAY(1000);
+ }
+ if (retries == 0) {
+ DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
+ return -ETIMEDOUT;
+ }
+
+ I915_WRITE(GEN6_PCODE_DATA, 0);
+
+ return 0;
+}
diff --git a/sys/dev/pci/drm/i915/intel_ringbuffer.c b/sys/dev/pci/drm/i915/intel_ringbuffer.c
new file mode 100644
index 00000000000..18628a76d2d
--- /dev/null
+++ b/sys/dev/pci/drm/i915/intel_ringbuffer.c
@@ -0,0 +1,2007 @@
+/* $OpenBSD: intel_ringbuffer.c,v 1.1 2013/03/18 12:36:52 jsg Exp $ */
+/*
+ * Copyright © 2008-2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Zou Nan hai <nanhai.zou@intel.com>
+ * Xiang Hai hao<haihao.xiang@intel.com>
+ *
+ */
+
+#include <dev/pci/drm/drmP.h>
+#include "i915_drv.h"
+#include <dev/pci/drm/i915_drm.h>
+#include "intel_drv.h"
+
+/*
+ * 965+ support PIPE_CONTROL commands, which provide finer grained control
+ * over cache flushing.
+ */
+struct pipe_control {
+ struct drm_i915_gem_object *obj;
+ volatile u32 *cpu_page;
+ u32 gtt_offset;
+};
+
+int gen2_render_ring_flush(struct intel_ring_buffer *, u32, u32);
+int gen4_render_ring_flush(struct intel_ring_buffer *, u32, u32);
+int gen6_render_ring_flush(struct intel_ring_buffer *, u32, u32);
+int gen7_render_ring_flush(struct intel_ring_buffer *, u32, u32);
+int intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *);
+int gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *);
+void ring_write_tail(struct intel_ring_buffer *, u32);
+int init_pipe_control(struct intel_ring_buffer *);
+void cleanup_pipe_control(struct intel_ring_buffer *);
+int init_render_ring(struct intel_ring_buffer *);
+void render_ring_cleanup(struct intel_ring_buffer *);
+void update_mboxes(struct intel_ring_buffer *, u32);
+int gen6_add_request(struct intel_ring_buffer *);
+int gen6_ring_sync(struct intel_ring_buffer *, struct intel_ring_buffer *,
+ u32);
+int pc_render_add_request(struct intel_ring_buffer *);
+u32 gen6_ring_get_seqno(struct intel_ring_buffer *, bool);
+u32 ring_get_seqno(struct intel_ring_buffer *, bool);
+u32 pc_render_get_seqno(struct intel_ring_buffer *, bool);
+bool gen5_ring_get_irq(struct intel_ring_buffer *);
+void gen5_ring_put_irq(struct intel_ring_buffer *);
+bool i9xx_ring_get_irq(struct intel_ring_buffer *);
+void i9xx_ring_put_irq(struct intel_ring_buffer *);
+bool i8xx_ring_get_irq(struct intel_ring_buffer *);
+void i8xx_ring_put_irq(struct intel_ring_buffer *);
+int bsd_ring_flush(struct intel_ring_buffer *, u32, u32);
+int i9xx_add_request(struct intel_ring_buffer *);
+bool gen6_ring_get_irq(struct intel_ring_buffer *);
+void gen6_ring_put_irq(struct intel_ring_buffer *);
+int i830_dispatch_execbuffer(struct intel_ring_buffer *, u32, u32,
+ unsigned);
+int i915_dispatch_execbuffer(struct intel_ring_buffer *, u32, u32,
+ unsigned);
+int i965_dispatch_execbuffer(struct intel_ring_buffer *, u32, u32,
+ unsigned);
+int init_status_page(struct intel_ring_buffer *);
+int init_phys_hws_pga(struct intel_ring_buffer *);
+int intel_ring_idle(struct intel_ring_buffer *);
+int intel_ring_wait_seqno(struct intel_ring_buffer *, u32);
+int intel_ring_wait_request(struct intel_ring_buffer *, int);
+int intel_wrap_ring_buffer(struct intel_ring_buffer *);
+int intel_ring_alloc_seqno(struct intel_ring_buffer *);
+void gen6_bsd_ring_write_tail(struct intel_ring_buffer *, u32);
+int gen6_ring_flush(struct intel_ring_buffer *, u32, u32);
+int gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *, u32, u32,
+ unsigned);
+int hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *, u32, u32,
+ unsigned);
+int blt_ring_flush(struct intel_ring_buffer *, u32, u32);
+int intel_ring_flush_all_caches(struct intel_ring_buffer *);
+int intel_ring_invalidate_all_caches(struct intel_ring_buffer *);
+
+extern int ticks;
+
+static inline int
+ring_space(struct intel_ring_buffer *ring)
+{
+ int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
+ if (space < 0)
+ space += ring->size;
+ return space;
+}
+
+int
+gen2_render_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
+{
+ u32 cmd;
+ int ret;
+
+ cmd = MI_FLUSH;
+ if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
+ cmd |= MI_NO_WRITE_FLUSH;
+
+ if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+ cmd |= MI_READ_FLUSH;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+int
+gen4_render_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
+{
+ struct drm_device *dev = ring->dev;
+ u32 cmd;
+ int ret;
+
+ /*
+ * read/write caches:
+ *
+ * I915_GEM_DOMAIN_RENDER is always invalidated, but is
+ * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
+ * also flushed at 2d versus 3d pipeline switches.
+ *
+ * read-only caches:
+ *
+ * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
+ * MI_READ_FLUSH is set, and is always flushed on 965.
+ *
+ * I915_GEM_DOMAIN_COMMAND may not exist?
+ *
+ * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
+ * invalidated when MI_EXE_FLUSH is set.
+ *
+ * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
+ * invalidated with every MI_FLUSH.
+ *
+ * TLBs:
+ *
+ * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
+ * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
+ * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
+ * are flushed at any MI_FLUSH.
+ */
+
+ cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
+ if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
+ cmd &= ~MI_NO_WRITE_FLUSH;
+ if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
+ cmd |= MI_EXE_FLUSH;
+
+ if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
+ (IS_G4X(dev) || IS_GEN5(dev)))
+ cmd |= MI_INVALIDATE_ISP;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+/**
+ * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
+ * implementing two workarounds on gen6. From section 1.4.7.1
+ * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
+ *
+ * [DevSNB-C+{W/A}] Before any depth stall flush (including those
+ * produced by non-pipelined state commands), software needs to first
+ * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
+ * 0.
+ *
+ * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
+ * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
+ *
+ * And the workaround for these two requires this workaround first:
+ *
+ * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
+ * BEFORE the pipe-control with a post-sync op and no write-cache
+ * flushes.
+ *
+ * And this last workaround is tricky because of the requirements on
+ * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
+ * volume 2 part 1:
+ *
+ * "1 of the following must also be set:
+ * - Render Target Cache Flush Enable ([12] of DW1)
+ * - Depth Cache Flush Enable ([0] of DW1)
+ * - Stall at Pixel Scoreboard ([1] of DW1)
+ * - Depth Stall ([13] of DW1)
+ * - Post-Sync Operation ([13] of DW1)
+ * - Notify Enable ([8] of DW1)"
+ *
+ * The cache flushes require the workaround flush that triggered this
+ * one, so we can't use it. Depth stall would trigger the same.
+ * Post-sync nonzero is what triggered this second workaround, so we
+ * can't use that one either. Notify enable is IRQs, which aren't
+ * really our business. That leaves only stall at scoreboard.
+ */
+int
+intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
+{
+ struct pipe_control *pc = ring->private;
+ u32 scratch_addr = pc->gtt_offset + 128;
+ int ret;
+
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+ intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_STALL_AT_SCOREBOARD);
+ intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
+ intel_ring_emit(ring, 0); /* low dword */
+ intel_ring_emit(ring, 0); /* high dword */
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+ intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
+ intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+int
+gen6_render_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains, u32 flush_domains)
+{
+ u32 flags = 0;
+ struct pipe_control *pc = ring->private;
+ u32 scratch_addr = pc->gtt_offset + 128;
+ int ret;
+
+ /* Force SNB workarounds for PIPE_CONTROL flushes */
+ ret = intel_emit_post_sync_nonzero_flush(ring);
+ if (ret)
+ return ret;
+
+ /* Just flush everything. Experiments have shown that reducing the
+ * number of bits based on the write domains has little performance
+ * impact.
+ */
+ if (flush_domains) {
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ /*
+ * Ensure that any following seqno writes only happen
+ * when the render cache is indeed flushed.
+ */
+ flags |= PIPE_CONTROL_CS_STALL;
+ }
+ if (invalidate_domains) {
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ /*
+ * TLB invalidate requires a post-sync write.
+ */
+ flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
+ }
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+ intel_ring_emit(ring, flags);
+ intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+int
+gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+ intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_STALL_AT_SCOREBOARD);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+int
+gen7_render_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains, u32 flush_domains)
+{
+ u32 flags = 0;
+ struct pipe_control *pc = ring->private;
+ u32 scratch_addr = pc->gtt_offset + 128;
+ int ret;
+
+ /*
+ * Ensure that any following seqno writes only happen when the render
+ * cache is indeed flushed.
+ *
+ * Workaround: 4th PIPE_CONTROL command (except the ones with only
+ * read-cache invalidate bits set) must have the CS_STALL bit set. We
+ * don't try to be clever and just set it unconditionally.
+ */
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ /* Just flush everything. Experiments have shown that reducing the
+ * number of bits based on the write domains has little performance
+ * impact.
+ */
+ if (flush_domains) {
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ }
+ if (invalidate_domains) {
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ /*
+ * TLB invalidate requires a post-sync write.
+ */
+ flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+
+ /* Workaround: we must issue a pipe_control with CS-stall bit
+ * set before a pipe_control command that has the state cache
+ * invalidate bit set. */
+ gen7_render_ring_cs_stall_wa(ring);
+ }
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+ intel_ring_emit(ring, flags);
+ intel_ring_emit(ring, scratch_addr);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+void
+ring_write_tail(struct intel_ring_buffer *ring, u32 value)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ I915_WRITE_TAIL(ring, value);
+}
+
+u32
+intel_ring_get_active_head(struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
+ RING_ACTHD(ring->mmio_base) : ACTHD;
+
+ return I915_READ(acthd_reg);
+}
+
+int
+init_ring_common(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj = ring->obj;
+ int ret = 0;
+ u32 head;
+ int retries;
+
+ if (HAS_FORCE_WAKE(dev))
+ gen6_gt_force_wake_get(dev_priv);
+
+ /* Stop the ring if it's running. */
+ I915_WRITE_CTL(ring, 0);
+ I915_WRITE_HEAD(ring, 0);
+ ring->write_tail(ring, 0);
+
+ head = I915_READ_HEAD(ring) & HEAD_ADDR;
+
+ /* G45 ring initialization fails to reset head to zero */
+ if (head != 0) {
+ DRM_DEBUG_KMS("%s head not reset to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+ I915_READ_CTL(ring),
+ I915_READ_HEAD(ring),
+ I915_READ_TAIL(ring),
+ I915_READ_START(ring));
+
+ I915_WRITE_HEAD(ring, 0);
+
+ if (I915_READ_HEAD(ring) & HEAD_ADDR) {
+ DRM_ERROR("failed to set %s head to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+ I915_READ_CTL(ring),
+ I915_READ_HEAD(ring),
+ I915_READ_TAIL(ring),
+ I915_READ_START(ring));
+ }
+ }
+
+ /* Initialize the ring. This must happen _after_ we've cleared the ring
+ * registers with the above sequence (the readback of the HEAD registers
+ * also enforces ordering), otherwise the hw might lose the new ring
+ * register values. */
+ I915_WRITE_START(ring, obj->gtt_offset);
+ I915_WRITE_CTL(ring,
+ ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
+ | RING_VALID);
+
+ /* If the head is still not zero, the ring is dead */
+ for (retries = 50; retries > 0; retries--) {
+ if ((I915_READ_CTL(ring) & RING_VALID) != 0 &&
+ I915_READ_START(ring) == obj->gtt_offset &&
+ (I915_READ_HEAD(ring) & HEAD_ADDR) == 0)
+ break;
+ DELAY(1000);
+ }
+ if (retries == 0) {
+ DRM_ERROR("%s initialization failed "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+ I915_READ_CTL(ring),
+ I915_READ_HEAD(ring),
+ I915_READ_TAIL(ring),
+ I915_READ_START(ring));
+ ret = -EIO;
+ goto out;
+ }
+
+ if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
+ i915_kernel_lost_context(ring->dev);
+ else {
+ ring->head = I915_READ_HEAD(ring);
+ ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+ ring->space = ring_space(ring);
+ ring->last_retired_head = -1;
+ }
+
+out:
+ if (HAS_FORCE_WAKE(dev))
+ gen6_gt_force_wake_put(dev_priv);
+
+ return ret;
+}
+
+int
+init_pipe_control(struct intel_ring_buffer *ring)
+{
+ struct pipe_control *pc;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ if (ring->private)
+ return 0;
+
+ pc = malloc(sizeof(*pc), M_DRM, M_WAITOK);
+ if (!pc)
+ return -ENOMEM;
+
+ obj = i915_gem_alloc_object(ring->dev, 4096);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate seqno page\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+
+ /*
+ * snooped gtt mapping please .
+ * Normally this flag is only to dmamem_map, but it's been overloaded
+ * for the agp mapping
+ */
+ obj->dma_flags = BUS_DMA_COHERENT | BUS_DMA_READ;
+
+ ret = i915_gem_object_pin(obj, 4096, true);
+ if (ret)
+ goto err_unref;
+
+ pc->gtt_offset = obj->gtt_offset;
+ pc->cpu_page = (volatile u_int32_t *)vm_map_min(kernel_map);
+ obj->base.uao->pgops->pgo_reference(obj->base.uao);
+ if ((ret = uvm_map(kernel_map, (vaddr_t *)&pc->cpu_page,
+ PAGE_SIZE, obj->base.uao, 0, 0, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
+ UVM_INH_SHARE, UVM_ADV_RANDOM, 0))) != 0)
+ if (ret != 0) {
+ DRM_ERROR("Failed to map status page.\n");
+ obj->base.uao->pgops->pgo_detach(obj->base.uao);
+ goto err_unpin;
+ }
+
+ pc->obj = obj;
+ ring->private = pc;
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(&obj->base);
+err:
+ free(pc, M_DRM);
+ return ret;
+}
+
+void
+cleanup_pipe_control(struct intel_ring_buffer *ring)
+{
+ struct pipe_control *pc = ring->private;
+ struct drm_i915_gem_object *obj;
+
+ if (!ring->private)
+ return;
+
+ obj = pc->obj;
+
+ uvm_unmap(kernel_map, (vaddr_t)pc->cpu_page,
+ (vaddr_t)pc->cpu_page + PAGE_SIZE);
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(&obj->base);
+
+ free(pc, M_DRM);
+ ring->private = NULL;
+}
+
+int
+init_render_ring(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret = init_ring_common(ring);
+
+ if (INTEL_INFO(dev)->gen > 3)
+ I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
+
+ /* We need to disable the AsyncFlip performance optimisations in order
+ * to use MI_WAIT_FOR_EVENT within the CS. It should already be
+ * programmed to '1' on all products.
+ */
+ if (INTEL_INFO(dev)->gen >= 6)
+ I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
+
+ /* Required for the hardware to program scanline values for waiting */
+ if (INTEL_INFO(dev)->gen == 6)
+ I915_WRITE(GFX_MODE,
+ _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS));
+
+ if (IS_GEN7(dev))
+ I915_WRITE(GFX_MODE_GEN7,
+ _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
+ _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
+
+ if (INTEL_INFO(dev)->gen >= 5) {
+ ret = init_pipe_control(ring);
+ if (ret)
+ return ret;
+ }
+
+#ifdef notyet
+ if (IS_GEN6(dev)) {
+ /* From the Sandybridge PRM, volume 1 part 3, page 24:
+ * "If this bit is set, STCunit will have LRA as replacement
+ * policy. [...] This bit must be reset. LRA replacement
+ * policy is not supported."
+ */
+ I915_WRITE(CACHE_MODE_0,
+ _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
+
+ /* This is not explicitly set for GEN6, so read the register.
+ * see intel_ring_mi_set_context() for why we care.
+ * TODO: consider explicitly setting the bit for GEN5
+ */
+ ring->itlb_before_ctx_switch =
+ !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS);
+ }
+#endif
+
+ if (INTEL_INFO(dev)->gen >= 6)
+ I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
+
+ if (HAS_L3_GPU_CACHE(dev))
+ I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
+
+ return ret;
+}
+
+void
+render_ring_cleanup(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+
+ if (!ring->private)
+ return;
+
+ if (HAS_BROKEN_CS_TLB(dev))
+ drm_gem_object_unreference(to_gem_object(ring->private));
+
+ cleanup_pipe_control(ring);
+}
+
+void
+update_mboxes(struct intel_ring_buffer *ring,
+ u32 mmio_offset)
+{
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, mmio_offset);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
+}
+
+/**
+ * gen6_add_request - Update the semaphore mailbox registers
+ *
+ * @ring - ring that is adding a request
+ * @seqno - return seqno stuck into the ring
+ *
+ * Update the mailbox registers in the *other* rings with the current seqno.
+ * This acts like a signal in the canonical semaphore.
+ */
+int
+gen6_add_request(struct intel_ring_buffer *ring)
+{
+ u32 mbox1_reg;
+ u32 mbox2_reg;
+ int ret;
+
+ ret = intel_ring_begin(ring, 10);
+ if (ret)
+ return ret;
+
+ mbox1_reg = ring->signal_mbox[0];
+ mbox2_reg = ring->signal_mbox[1];
+
+ update_mboxes(ring, mbox1_reg);
+ update_mboxes(ring, mbox2_reg);
+ intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
+ intel_ring_emit(ring, MI_USER_INTERRUPT);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+/**
+ * intel_ring_sync - sync the waiter to the signaller on seqno
+ *
+ * @waiter - ring that is waiting
+ * @signaller - ring which has, or will signal
+ * @seqno - seqno which the waiter will block on
+ */
+int
+gen6_ring_sync(struct intel_ring_buffer *waiter,
+ struct intel_ring_buffer *signaller,
+ u32 seqno)
+{
+ int ret;
+ u32 dw1 = MI_SEMAPHORE_MBOX |
+ MI_SEMAPHORE_COMPARE |
+ MI_SEMAPHORE_REGISTER;
+
+ /* Throughout all of the GEM code, seqno passed implies our current
+ * seqno is >= the last seqno executed. However for hardware the
+ * comparison is strictly greater than.
+ */
+ seqno -= 1;
+
+ WARN_ON(signaller->semaphore_register[waiter->id] ==
+ MI_SEMAPHORE_SYNC_INVALID);
+
+ ret = intel_ring_begin(waiter, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(waiter,
+ dw1 | signaller->semaphore_register[waiter->id]);
+ intel_ring_emit(waiter, seqno);
+ intel_ring_emit(waiter, 0);
+ intel_ring_emit(waiter, MI_NOOP);
+ intel_ring_advance(waiter);
+
+ return 0;
+}
+
+#define PIPE_CONTROL_FLUSH(ring__, addr__) \
+do { \
+ intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
+ PIPE_CONTROL_DEPTH_STALL); \
+ intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
+ intel_ring_emit(ring__, 0); \
+ intel_ring_emit(ring__, 0); \
+} while (0)
+
+int
+pc_render_add_request(struct intel_ring_buffer *ring)
+{
+ struct pipe_control *pc = ring->private;
+ u32 scratch_addr = pc->gtt_offset + 128;
+ int ret;
+
+ /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
+ * incoherent with writes to memory, i.e. completely fubar,
+ * so we need to use PIPE_NOTIFY instead.
+ *
+ * However, we also need to workaround the qword write
+ * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
+ * memory before requesting an interrupt.
+ */
+ ret = intel_ring_begin(ring, 32);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WRITE_FLUSH |
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
+ intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
+ intel_ring_emit(ring, 0);
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128; /* write to separate cachelines */
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WRITE_FLUSH |
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
+ PIPE_CONTROL_NOTIFY);
+ intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+u32
+gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
+{
+ /* Workaround to force correct ordering between irq and seqno writes on
+ * ivb (and maybe also on snb) by reading from a CS register (like
+ * ACTHD) before reading the status page. */
+ if (!lazy_coherency)
+ intel_ring_get_active_head(ring);
+ return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+u32
+ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
+{
+ return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+u32
+pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
+{
+ struct pipe_control *pc = ring->private;
+ return pc->cpu_page[0];
+}
+
+bool
+gen5_ring_get_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (!dev->irq_enabled)
+ return false;
+
+// mtx_enter(&dev_priv->irq_lock);
+ if (ring->irq_refcount++ == 0) {
+ dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
+ }
+// mtx_leave(&dev_priv->irq_lock);
+
+ return true;
+}
+
+void
+gen5_ring_put_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+// mtx_enter(&dev_priv->irq_lock);
+ if (--ring->irq_refcount == 0) {
+ dev_priv->gt_irq_mask |= ring->irq_enable_mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
+ }
+// mtx_leave(&dev_priv->irq_lock);
+}
+
+bool
+i9xx_ring_get_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (!dev->irq_enabled)
+ return false;
+
+// mtx_enter(&dev_priv->irq_lock);
+ if (ring->irq_refcount++ == 0) {
+ dev_priv->irq_mask &= ~ring->irq_enable_mask;
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ POSTING_READ(IMR);
+ }
+// mtx_leave(&dev_priv->irq_lock);
+
+ return true;
+}
+
+void
+i9xx_ring_put_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+// mtx_enter(&dev_priv->irq_lock);
+ if (--ring->irq_refcount == 0) {
+ dev_priv->irq_mask |= ring->irq_enable_mask;
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ POSTING_READ(IMR);
+ }
+// mtx_leave(&dev_priv->irq_lock);
+}
+
+bool
+i8xx_ring_get_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (!dev->irq_enabled)
+ return false;
+
+// mtx_enter(&dev_priv->irq_lock);
+ if (ring->irq_refcount++ == 0) {
+ dev_priv->irq_mask &= ~ring->irq_enable_mask;
+ I915_WRITE16(IMR, dev_priv->irq_mask);
+ POSTING_READ16(IMR);
+ }
+// mtx_leave(&dev_priv->irq_lock);
+
+ return true;
+}
+
+void
+i8xx_ring_put_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+// mtx_enter(&dev_priv->irq_lock);
+ if (--ring->irq_refcount == 0) {
+ dev_priv->irq_mask |= ring->irq_enable_mask;
+ I915_WRITE16(IMR, dev_priv->irq_mask);
+ POSTING_READ16(IMR);
+ }
+// mtx_leave(&dev_priv->irq_lock);
+}
+
+void
+intel_ring_setup_status_page(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ u32 mmio = 0;
+
+ /* The ring status page addresses are no longer next to the rest of
+ * the ring registers as of gen7.
+ */
+ if (IS_GEN7(dev)) {
+ switch (ring->id) {
+ case RCS:
+ mmio = RENDER_HWS_PGA_GEN7;
+ break;
+ case BCS:
+ mmio = BLT_HWS_PGA_GEN7;
+ break;
+ case VCS:
+ mmio = BSD_HWS_PGA_GEN7;
+ break;
+ }
+ } else if (IS_GEN6(dev)) {
+ mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
+ } else {
+ mmio = RING_HWS_PGA(ring->mmio_base);
+ }
+
+ I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
+ POSTING_READ(mmio);
+}
+
+int
+bsd_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_FLUSH);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ return 0;
+}
+
+int
+i9xx_add_request(struct intel_ring_buffer *ring)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
+ intel_ring_emit(ring, MI_USER_INTERRUPT);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+bool
+gen6_ring_get_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (!dev->irq_enabled)
+ return false;
+
+ /* It looks like we need to prevent the gt from suspending while waiting
+ * for an notifiy irq, otherwise irqs seem to get lost on at least the
+ * blt/bsd rings on ivb. */
+ gen6_gt_force_wake_get(dev_priv);
+
+// mtx_enter(&dev_priv->irq_lock);
+ if (ring->irq_refcount++ == 0) {
+ if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
+ I915_WRITE_IMR(ring, ~(ring->irq_enable_mask |
+ GEN6_RENDER_L3_PARITY_ERROR));
+ else
+ I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
+ dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
+ }
+// mtx_leave(&dev_priv->irq_lock);
+
+ return true;
+}
+
+void
+gen6_ring_put_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+// mtx_enter(&dev_priv->irq_lock);
+ if (--ring->irq_refcount == 0) {
+ if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
+ I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
+ else
+ I915_WRITE_IMR(ring, ~0);
+ dev_priv->gt_irq_mask |= ring->irq_enable_mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
+ }
+// mtx_leave(&dev_priv->irq_lock);
+
+ gen6_gt_force_wake_put(dev_priv);
+}
+
+int
+i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 length,
+ unsigned flags)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START |
+ MI_BATCH_GTT |
+ (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
+#define I830_BATCH_LIMIT (256*1024)
+int
+i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 len,
+ unsigned flags)
+{
+ int ret;
+
+ if (flags & I915_DISPATCH_PINNED) {
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_BATCH_BUFFER);
+ intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+ intel_ring_emit(ring, offset + len - 8);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ } else {
+ struct drm_i915_gem_object *obj = ring->private;
+ u32 cs_offset = obj->gtt_offset;
+
+ if (len > I830_BATCH_LIMIT)
+ return -ENOSPC;
+
+ ret = intel_ring_begin(ring, 9+3);
+ if (ret)
+ return ret;
+ /* Blit the batch (which has now all relocs applied) to the stable batch
+ * scratch bo area (so that the CS never stumbles over its tlb
+ * invalidation bug) ... */
+ intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
+ XY_SRC_COPY_BLT_WRITE_ALPHA |
+ XY_SRC_COPY_BLT_WRITE_RGB);
+ intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, (howmany(len, 4096) << 16) | 1024);
+ intel_ring_emit(ring, cs_offset);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 4096);
+ intel_ring_emit(ring, offset);
+ intel_ring_emit(ring, MI_FLUSH);
+
+ /* ... and execute it. */
+ intel_ring_emit(ring, MI_BATCH_BUFFER);
+ intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+ intel_ring_emit(ring, cs_offset + len - 8);
+ intel_ring_advance(ring);
+ }
+
+ return 0;
+}
+
+int
+i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 len,
+ unsigned flags)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
+ intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+void
+cleanup_status_page(struct intel_ring_buffer *ring)
+{
+ struct drm_i915_gem_object *obj;
+
+ obj = ring->status_page.obj;
+ if (obj == NULL)
+ return;
+
+ uvm_unmap(kernel_map, (vaddr_t)ring->status_page.page_addr,
+ (vaddr_t)ring->status_page.page_addr + PAGE_SIZE);
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(&obj->base);
+ ring->status_page.obj = NULL;
+}
+
+int
+init_status_page(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ obj = i915_gem_alloc_object(dev, 4096);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate status page\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+
+ /*
+ * snooped gtt mapping please .
+ * Normally this flag is only to dmamem_map, but it's been overloaded
+ * for the agp mapping
+ */
+ obj->dma_flags = BUS_DMA_COHERENT | BUS_DMA_READ;
+
+ ret = i915_gem_object_pin(obj, 4096, true);
+ if (ret != 0) {
+ goto err_unref;
+ }
+
+ ring->status_page.gfx_addr = obj->gtt_offset;
+ ring->status_page.page_addr = (u_int32_t *)vm_map_min(kernel_map);
+ obj->base.uao->pgops->pgo_reference(obj->base.uao);
+ if ((ret = uvm_map(kernel_map, (vaddr_t *)&ring->status_page.page_addr,
+ PAGE_SIZE, obj->base.uao, 0, 0, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
+ UVM_INH_SHARE, UVM_ADV_RANDOM, 0))) != 0)
+ if (ret != 0) {
+ obj->base.uao->pgops->pgo_detach(obj->base.uao);
+ ret = -ENOMEM;
+ goto err_unpin;
+ }
+ ring->status_page.obj = obj;
+ memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+
+ intel_ring_setup_status_page(ring);
+ DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
+ ring->name, ring->status_page.gfx_addr);
+
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(&obj->base);
+err:
+ return ret;
+}
+
+int
+init_phys_hws_pga(struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ u32 addr;
+
+ if (!dev_priv->status_page_dmah) {
+ dev_priv->status_page_dmah = drm_dmamem_alloc(dev_priv->dmat,
+ PAGE_SIZE, PAGE_SIZE, 1, PAGE_SIZE, 0, BUS_DMA_READ);
+ if (!dev_priv->status_page_dmah)
+ return -ENOMEM;
+ }
+
+ addr = dev_priv->status_page_dmah->map->dm_segs[0].ds_addr;
+ if (INTEL_INFO(ring->dev)->gen >= 4)
+ addr |= (dev_priv->status_page_dmah->map->dm_segs[0].ds_addr >> 28) & 0xf0;
+ bus_dmamap_sync(dev_priv->dmat, dev_priv->status_page_dmah->map, 0,
+ PAGE_SIZE, BUS_DMASYNC_PREREAD);
+ I915_WRITE(HWS_PGA, addr);
+
+ ring->status_page.page_addr = (u32 *)dev_priv->status_page_dmah->kva;
+ memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+
+ return 0;
+}
+
+u32
+intel_read_status_page(struct intel_ring_buffer *ring, int reg)
+{
+ struct inteldrm_softc *dev_priv = ring->dev->dev_private;
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_gem_object *obj_priv;
+ bus_dma_tag_t tag;
+ bus_dmamap_t map;
+ u32 val;
+
+ if (I915_NEED_GFX_HWS(dev)) {
+ obj_priv = ring->status_page.obj;
+ map = obj_priv->dmamap;
+ tag = dev_priv->agpdmat;
+ } else {
+ map = dev_priv->status_page_dmah->map;
+ tag = dev->dmat;
+ }
+ /* Ensure that the compiler doesn't optimize away the load. */
+ bus_dmamap_sync(tag, map, 0, PAGE_SIZE, BUS_DMASYNC_POSTREAD);
+ val = ((volatile u_int32_t *)(ring->status_page.page_addr))[reg];
+ bus_dmamap_sync(tag, map, 0, PAGE_SIZE, BUS_DMASYNC_PREREAD);
+
+ return (val);
+}
+
+int
+intel_init_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_i915_gem_object *obj;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ ring->dev = dev;
+ INIT_LIST_HEAD(&ring->active_list);
+ INIT_LIST_HEAD(&ring->request_list);
+ ring->size = 32 * PAGE_SIZE;
+ memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
+
+// init_waitqueue_head(&ring->irq_queue);
+
+ if (I915_NEED_GFX_HWS(dev)) {
+ ret = init_status_page(ring);
+ if (ret)
+ return ret;
+ } else {
+ BUG_ON(ring->id != RCS);
+ ret = init_phys_hws_pga(ring);
+ if (ret)
+ return ret;
+ }
+
+ obj = i915_gem_alloc_object(dev, ring->size);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate ringbuffer\n");
+ ret = -ENOMEM;
+ goto err_hws;
+ }
+
+ ring->obj = obj;
+
+ ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
+ if (ret)
+ goto err_unref;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ goto err_unpin;
+
+ if ((ret = agp_map_subregion(dev_priv->agph, obj->gtt_offset,
+ ring->size, &ring->bsh)) != 0) {
+ DRM_ERROR("Failed to map ringbuffer.\n");
+ ret = -EINVAL;
+ goto err_unpin;
+ }
+
+ ret = ring->init(ring);
+ if (ret)
+ goto err_unmap;
+
+ /* Workaround an erratum on the i830 which causes a hang if
+ * the TAIL pointer points to within the last 2 cachelines
+ * of the buffer.
+ */
+ ring->effective_size = ring->size;
+ if (IS_I830(ring->dev) || IS_845G(ring->dev))
+ ring->effective_size -= 128;
+
+ return 0;
+
+err_unmap:
+ agp_unmap_subregion(dev_priv->agph, ring->bsh, ring->size);
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(&obj->base);
+ ring->obj = NULL;
+err_hws:
+ cleanup_status_page(ring);
+ return ret;
+}
+
+void
+intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv;
+ int ret;
+
+ if (ring->obj == NULL)
+ return;
+
+ /* Disable the ring buffer. The ring must be idle at this point */
+ dev_priv = ring->dev->dev_private;
+ ret = intel_ring_idle(ring);
+ if (ret)
+ DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
+ ring->name, ret);
+
+ I915_WRITE_CTL(ring, 0);
+
+ agp_unmap_subregion(dev_priv->agph, ring->bsh, ring->size);
+
+ i915_gem_object_unpin(ring->obj);
+ drm_gem_object_unreference(&ring->obj->base);
+ ring->obj = NULL;
+
+ if (ring->cleanup)
+ ring->cleanup(ring);
+
+ cleanup_status_page(ring);
+}
+
+int
+intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ bool was_interruptible;
+ int ret;
+
+ /* XXX As we have not yet audited all the paths to check that
+ * they are ready for ERESTARTSYS from intel_ring_begin, do not
+ * allow us to be interruptible by a signal.
+ */
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
+
+ ret = i915_wait_seqno(ring, seqno);
+
+ if (!ret)
+ i915_gem_retire_requests_ring(ring);
+
+ dev_priv->mm.interruptible = was_interruptible;
+
+ return ret;
+}
+
+int
+intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
+{
+ struct drm_i915_gem_request *request;
+ u32 seqno = 0;
+ int ret;
+
+ i915_gem_retire_requests_ring(ring);
+
+ if (ring->last_retired_head != -1) {
+ ring->head = ring->last_retired_head;
+ ring->last_retired_head = -1;
+ ring->space = ring_space(ring);
+ if (ring->space >= n)
+ return 0;
+ }
+
+ list_for_each_entry(request, &ring->request_list, list) {
+ int space;
+
+ if (request->tail == -1)
+ continue;
+
+ space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
+ if (space < 0)
+ space += ring->size;
+ if (space >= n) {
+ seqno = request->seqno;
+ break;
+ }
+
+ /* Consume this request in case we need more space than
+ * is available and so need to prevent a race between
+ * updating last_retired_head and direct reads of
+ * I915_RING_HEAD. It also provides a nice sanity check.
+ */
+ request->tail = -1;
+ }
+
+ if (seqno == 0)
+ return -ENOSPC;
+
+ ret = intel_ring_wait_seqno(ring, seqno);
+ if (ret)
+ return ret;
+
+ if (WARN_ON(ring->last_retired_head == -1))
+ return -ENOSPC;
+
+ ring->head = ring->last_retired_head;
+ ring->last_retired_head = -1;
+ ring->space = ring_space(ring);
+ if (WARN_ON(ring->space < n))
+ return -ENOSPC;
+
+ return 0;
+}
+
+int
+ring_wait_for_space(struct intel_ring_buffer *ring, int n)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long end;
+ int ret;
+
+ ret = intel_ring_wait_request(ring, n);
+ if (ret != -ENOSPC)
+ return ret;
+
+// trace_i915_ring_wait_begin(ring);
+ /* With GEM the hangcheck timer should kick us out of the loop,
+ * leaving it early runs the risk of corrupting GEM state (due
+ * to running on almost untested codepaths). But on resume
+ * timers don't work yet, so prevent a complete hang in that
+ * case by choosing an insanely large timeout. */
+ end = ticks + 60 * hz;
+
+ do {
+ ring->head = I915_READ_HEAD(ring);
+ ring->space = ring_space(ring);
+ if (ring->space >= n) {
+// trace_i915_ring_wait_end(ring);
+ return 0;
+ }
+
+#if 0
+ if (dev->primary->master) {
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+ }
+#endif
+
+ drm_msleep(1, "915wfs");
+
+ ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
+ if (ret)
+ return ret;
+ } while (!time_after(ticks, end));
+// trace_i915_ring_wait_end(ring);
+ return -EBUSY;
+}
+
+int
+intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
+{
+ struct inteldrm_softc *dev_priv = ring->dev->dev_private;
+ int rem = ring->size - ring->tail;
+
+ if (ring->space < rem) {
+ int ret = ring_wait_for_space(ring, rem);
+ if (ret)
+ return ret;
+ }
+
+ bus_space_set_region_4(dev_priv->bst, ring->bsh,
+ ring->tail, MI_NOOP, rem / 4);
+
+ ring->tail = 0;
+ ring->space = ring_space(ring);
+
+ return 0;
+}
+
+int
+intel_ring_idle(struct intel_ring_buffer *ring)
+{
+ u32 seqno;
+ int ret;
+
+ if (ring->outstanding_lazy_request) {
+ ret = i915_add_request(ring, NULL, NULL);
+ if (ret)
+ return ret;
+ }
+
+ if (list_empty(&ring->request_list))
+ return 0;
+
+ seqno = list_entry(ring->request_list.prev,
+ struct drm_i915_gem_request,
+ list)->seqno;
+
+
+ return i915_wait_seqno(ring, seqno);
+}
+
+int
+intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
+{
+ if (ring->outstanding_lazy_request)
+ return 0;
+
+ return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
+}
+
+int
+intel_ring_begin(struct intel_ring_buffer *ring, int num_dwords)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ int n = 4*num_dwords;
+ int ret;
+
+ ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
+ if (ret)
+ return ret;
+
+ /* Preallocate the olr before touching the ring */
+ ret = intel_ring_alloc_seqno(ring);
+ if (ret)
+ return ret;
+
+ if (unlikely(ring->tail + n > ring->effective_size)) {
+ ret = intel_wrap_ring_buffer(ring);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ if (unlikely(ring->space < n)) {
+ ret = ring_wait_for_space(ring, n);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ ring->space -= n;
+ return 0;
+}
+
+void
+intel_ring_advance(struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+
+ ring->tail &= ring->size - 1;
+ if (dev_priv->stop_rings & intel_ring_flag(ring))
+ return;
+ ring->write_tail(ring, ring->tail);
+}
+
+void
+gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, u32 value)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ int retries;
+
+ /* Every tail move must follow the sequence below */
+
+ /* Disable notification that the ring is IDLE. The GT
+ * will then assume that it is busy and bring it out of rc6.
+ */
+ I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
+ _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+
+ /* Clear the context id. Here be magic! */
+ I915_WRITE64(GEN6_BSD_RNCID, 0x0);
+
+ /* Wait for the ring not to be idle, i.e. for it to wake up. */
+ for (retries = 50; retries > 0; retries--) {
+ if ((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
+ GEN6_BSD_SLEEP_INDICATOR) == 0)
+ break;
+ DELAY(1000);
+ }
+ if (retries == 0)
+ DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
+
+ /* Now that the ring is fully powered up, update the tail */
+ I915_WRITE_TAIL(ring, value);
+ POSTING_READ(RING_TAIL(ring->mmio_base));
+
+ /* Let the ring send IDLE messages to the GT again,
+ * and so let it sleep to conserve power when idle.
+ */
+ I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
+ _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+}
+
+int
+gen6_ring_flush(struct intel_ring_buffer *ring, u32 invalidate, u32 flush)
+{
+ uint32_t cmd;
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ cmd = MI_FLUSH_DW;
+ /*
+ * Bspec vol 1c.5 - video engine command streamer:
+ * "If ENABLED, all TLBs will be invalidated once the flush
+ * operation is complete. This bit is only valid when the
+ * Post-Sync Operation field is a value of 1h or 3h."
+ */
+ if (invalidate & I915_GEM_GPU_DOMAINS)
+ cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
+ MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ return 0;
+}
+
+int
+hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 len,
+ unsigned flags)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
+ (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
+ /* bit0-7 is the length on GEN6+ */
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+int
+gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 len,
+ unsigned flags)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START |
+ (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
+ /* bit0-7 is the length on GEN6+ */
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+/* Blitter support (SandyBridge+) */
+
+int
+blt_ring_flush(struct intel_ring_buffer *ring, u32 invalidate, u32 flush)
+{
+ uint32_t cmd;
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ cmd = MI_FLUSH_DW;
+ /*
+ * Bspec vol 1c.3 - blitter engine command streamer:
+ * "If ENABLED, all TLBs will be invalidated once the flush
+ * operation is complete. This bit is only valid when the
+ * Post-Sync Operation field is a value of 1h or 3h."
+ */
+ if (invalidate & I915_GEM_DOMAIN_RENDER)
+ cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
+ MI_FLUSH_DW_OP_STOREDW;
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ return 0;
+}
+
+int
+intel_init_render_ring_buffer(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
+
+ ring->name = "render ring";
+ ring->id = RCS;
+ ring->mmio_base = RENDER_RING_BASE;
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ ring->add_request = gen6_add_request;
+ ring->flush = gen7_render_ring_flush;
+ if (INTEL_INFO(dev)->gen == 6)
+ ring->flush = gen6_render_ring_flush;
+ ring->irq_get = gen6_ring_get_irq;
+ ring->irq_put = gen6_ring_put_irq;
+ ring->irq_enable_mask = GT_USER_INTERRUPT;
+ ring->get_seqno = gen6_ring_get_seqno;
+ ring->sync_to = gen6_ring_sync;
+ ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
+ ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB;
+ ring->signal_mbox[0] = GEN6_VRSYNC;
+ ring->signal_mbox[1] = GEN6_BRSYNC;
+ } else if (IS_GEN5(dev)) {
+ ring->add_request = pc_render_add_request;
+ ring->flush = gen4_render_ring_flush;
+ ring->get_seqno = pc_render_get_seqno;
+ ring->irq_get = gen5_ring_get_irq;
+ ring->irq_put = gen5_ring_put_irq;
+ ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
+ } else {
+ ring->add_request = i9xx_add_request;
+ if (INTEL_INFO(dev)->gen < 4)
+ ring->flush = gen2_render_ring_flush;
+ else
+ ring->flush = gen4_render_ring_flush;
+ ring->get_seqno = ring_get_seqno;
+ if (IS_GEN2(dev)) {
+ ring->irq_get = i8xx_ring_get_irq;
+ ring->irq_put = i8xx_ring_put_irq;
+ } else {
+ ring->irq_get = i9xx_ring_get_irq;
+ ring->irq_put = i9xx_ring_put_irq;
+ }
+ ring->irq_enable_mask = I915_USER_INTERRUPT;
+ }
+ ring->write_tail = ring_write_tail;
+ if (IS_HASWELL(dev))
+ ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
+ else if (INTEL_INFO(dev)->gen >= 6)
+ ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ else if (INTEL_INFO(dev)->gen >= 4)
+ ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+ else if (IS_I830(dev) || IS_845G(dev))
+ ring->dispatch_execbuffer = i830_dispatch_execbuffer;
+ else
+ ring->dispatch_execbuffer = i915_dispatch_execbuffer;
+ ring->init = init_render_ring;
+ ring->cleanup = render_ring_cleanup;
+
+ /* Workaround batchbuffer to combat CS tlb bug. */
+ if (HAS_BROKEN_CS_TLB(dev)) {
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate batch bo\n");
+ return -ENOMEM;
+ }
+
+ ret = i915_gem_object_pin(obj, 0, true);
+ if (ret != 0) {
+ drm_gem_object_unreference(&obj->base);
+ DRM_ERROR("Failed to ping batch bo\n");
+ return ret;
+ }
+
+ ring->private = obj;
+ }
+
+ return intel_init_ring_buffer(dev, ring);
+}
+
+int
+intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
+ int ret;
+
+ ring->name = "render ring";
+ ring->id = RCS;
+ ring->mmio_base = RENDER_RING_BASE;
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ /* non-kms not supported on gen6+ */
+ return -ENODEV;
+ }
+
+ /* Note: gem is not supported on gen5/ilk without kms (the corresponding
+ * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
+ * the special gen5 functions. */
+ ring->add_request = i9xx_add_request;
+ if (INTEL_INFO(dev)->gen < 4)
+ ring->flush = gen2_render_ring_flush;
+ else
+ ring->flush = gen4_render_ring_flush;
+ ring->get_seqno = ring_get_seqno;
+ if (IS_GEN2(dev)) {
+ ring->irq_get = i8xx_ring_get_irq;
+ ring->irq_put = i8xx_ring_put_irq;
+ } else {
+ ring->irq_get = i9xx_ring_get_irq;
+ ring->irq_put = i9xx_ring_put_irq;
+ }
+ ring->irq_enable_mask = I915_USER_INTERRUPT;
+ ring->write_tail = ring_write_tail;
+ if (INTEL_INFO(dev)->gen >= 4)
+ ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+ else if (IS_I830(dev) || IS_845G(dev))
+ ring->dispatch_execbuffer = i830_dispatch_execbuffer;
+ else
+ ring->dispatch_execbuffer = i915_dispatch_execbuffer;
+ ring->init = init_render_ring;
+ ring->cleanup = render_ring_cleanup;
+
+ ring->dev = dev;
+ INIT_LIST_HEAD(&ring->active_list);
+ INIT_LIST_HEAD(&ring->request_list);
+
+ ring->size = size;
+ ring->effective_size = ring->size;
+ if (IS_I830(ring->dev) || IS_845G(ring->dev))
+ ring->effective_size -= 128;
+
+ if ((ret = agp_map_subregion(dev_priv->agph, start,
+ size, &ring->bsh)) != 0) {
+ DRM_ERROR("Failed to map ringbuffer.\n");
+ return -ENOMEM;
+ }
+
+ if (!I915_NEED_GFX_HWS(dev)) {
+ ret = init_phys_hws_pga(ring);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int
+intel_init_bsd_ring_buffer(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->rings[VCS];
+
+ ring->name = "bsd ring";
+ ring->id = VCS;
+
+ ring->write_tail = ring_write_tail;
+ if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ ring->mmio_base = GEN6_BSD_RING_BASE;
+ /* gen6 bsd needs a special wa for tail updates */
+ if (IS_GEN6(dev))
+ ring->write_tail = gen6_bsd_ring_write_tail;
+ ring->flush = gen6_ring_flush;
+ ring->add_request = gen6_add_request;
+ ring->get_seqno = gen6_ring_get_seqno;
+ ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
+ ring->irq_get = gen6_ring_get_irq;
+ ring->irq_put = gen6_ring_put_irq;
+ ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ ring->sync_to = gen6_ring_sync;
+ ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR;
+ ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB;
+ ring->signal_mbox[0] = GEN6_RVSYNC;
+ ring->signal_mbox[1] = GEN6_BVSYNC;
+ } else {
+ ring->mmio_base = BSD_RING_BASE;
+ ring->flush = bsd_ring_flush;
+ ring->add_request = i9xx_add_request;
+ ring->get_seqno = ring_get_seqno;
+ if (IS_GEN5(dev)) {
+ ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
+ ring->irq_get = gen5_ring_get_irq;
+ ring->irq_put = gen5_ring_put_irq;
+ } else {
+ ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
+ ring->irq_get = i9xx_ring_get_irq;
+ ring->irq_put = i9xx_ring_put_irq;
+ }
+ ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+ }
+ ring->init = init_ring_common;
+
+ return intel_init_ring_buffer(dev, ring);
+}
+
+int
+intel_init_blt_ring_buffer(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->rings[BCS];
+
+ ring->name = "blitter ring";
+ ring->id = BCS;
+
+ ring->mmio_base = BLT_RING_BASE;
+ ring->write_tail = ring_write_tail;
+ ring->flush = blt_ring_flush;
+ ring->add_request = gen6_add_request;
+ ring->get_seqno = gen6_ring_get_seqno;
+ ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
+ ring->irq_get = gen6_ring_get_irq;
+ ring->irq_put = gen6_ring_put_irq;
+ ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ ring->sync_to = gen6_ring_sync;
+ ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR;
+ ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV;
+ ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->signal_mbox[0] = GEN6_RBSYNC;
+ ring->signal_mbox[1] = GEN6_VBSYNC;
+ ring->init = init_ring_common;
+
+ return intel_init_ring_buffer(dev, ring);
+}
+
+int
+intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
+{
+ int ret;
+
+ if (!ring->gpu_caches_dirty)
+ return 0;
+
+ ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
+ if (ret)
+ return ret;
+
+// trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
+
+ ring->gpu_caches_dirty = false;
+ return 0;
+}
+
+int
+intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
+{
+ uint32_t flush_domains;
+ int ret;
+
+ flush_domains = 0;
+ if (ring->gpu_caches_dirty)
+ flush_domains = I915_GEM_GPU_DOMAINS;
+
+ ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+ if (ret)
+ return ret;
+
+// trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+
+ ring->gpu_caches_dirty = false;
+ return 0;
+}
+
+void
+intel_ring_emit(struct intel_ring_buffer *ring, uint32_t data)
+{
+ struct drm_device *dev = ring->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ bus_space_write_4(dev_priv->bst, ring->bsh,
+ ring->tail, data);
+ ring->tail += 4;
+}
diff --git a/sys/dev/pci/drm/i915/intel_ringbuffer.h b/sys/dev/pci/drm/i915/intel_ringbuffer.h
new file mode 100644
index 00000000000..0c847c945f1
--- /dev/null
+++ b/sys/dev/pci/drm/i915/intel_ringbuffer.h
@@ -0,0 +1,242 @@
+/* $OpenBSD: intel_ringbuffer.h,v 1.1 2013/03/18 12:36:52 jsg Exp $ */
+
+#ifndef _INTEL_RINGBUFFER_H_
+#define _INTEL_RINGBUFFER_H_
+
+#include <dev/pci/drm/drm_linux_list.h>
+
+/*
+ * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
+ * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
+ * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
+ *
+ * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
+ * cacheline, the Head Pointer must not be greater than the Tail
+ * Pointer."
+ */
+#define I915_RING_FREE_SPACE 64
+
+struct intel_hw_status_page {
+ u32 *page_addr;
+ unsigned int gfx_addr;
+ struct drm_i915_gem_object *obj;
+};
+
+#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
+#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
+
+#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
+#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
+
+#define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
+#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
+
+#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
+#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
+
+#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
+#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
+
+#define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base))
+#define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base))
+#define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base))
+
+struct intel_ring_buffer {
+ const char *name;
+ enum intel_ring_id {
+ RCS = 0x0,
+ VCS,
+ BCS,
+ } id;
+#define I915_NUM_RINGS 3
+ u32 mmio_base;
+ struct drm_device *dev;
+ struct drm_i915_gem_object *obj;
+
+ bus_space_handle_t bsh;
+
+ u32 head;
+ u32 tail;
+ int space;
+ int size;
+ int effective_size;
+ struct intel_hw_status_page status_page;
+
+ /** We track the position of the requests in the ring buffer, and
+ * when each is retired we increment last_retired_head as the GPU
+ * must have finished processing the request and so we know we
+ * can advance the ringbuffer up to that position.
+ *
+ * last_retired_head is set to -1 after the value is consumed so
+ * we can detect new retirements.
+ */
+ u32 last_retired_head;
+
+ u32 irq_refcount; /* protected by dev_priv->irq_lock */
+ u32 irq_enable_mask; /* bitmask to enable ring interrupt */
+ u32 trace_irq_seqno;
+ u32 sync_seqno[I915_NUM_RINGS-1];
+ bool (*irq_get)(struct intel_ring_buffer *ring);
+ void (*irq_put)(struct intel_ring_buffer *ring);
+
+ int (*init)(struct intel_ring_buffer *ring);
+
+ void (*write_tail)(struct intel_ring_buffer *ring,
+ u32 value);
+ int (*flush)(struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains);
+ int (*add_request)(struct intel_ring_buffer *ring);
+ /* Some chipsets are not quite as coherent as advertised and need
+ * an expensive kick to force a true read of the up-to-date seqno.
+ * However, the up-to-date seqno is not always required and the last
+ * seen value is good enough. Note that the seqno will always be
+ * monotonic, even if not coherent.
+ */
+ u32 (*get_seqno)(struct intel_ring_buffer *ring,
+ bool lazy_coherency);
+ int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
+ u32 offset, u32 length,
+ unsigned flags);
+#define I915_DISPATCH_SECURE 0x1
+#define I915_DISPATCH_PINNED 0x2
+ void (*cleanup)(struct intel_ring_buffer *ring);
+ int (*sync_to)(struct intel_ring_buffer *ring,
+ struct intel_ring_buffer *to,
+ u32 seqno);
+
+ u32 semaphore_register[3]; /*our mbox written by others */
+ u32 signal_mbox[2]; /* mboxes this ring signals to */
+ /**
+ * List of objects currently involved in rendering from the
+ * ringbuffer.
+ *
+ * Includes buffers having the contents of their GPU caches
+ * flushed, not necessarily primitives. last_rendering_seqno
+ * represents when the rendering involved will be completed.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head active_list;
+
+ /**
+ * List of breadcrumbs associated with GPU requests currently
+ * outstanding.
+ */
+ struct list_head request_list;
+
+ /**
+ * Do we have some not yet emitted requests outstanding?
+ */
+ u32 outstanding_lazy_request;
+ bool gpu_caches_dirty;
+
+ int irq_queue;
+
+ /**
+ * Do an explicit TLB flush before MI_SET_CONTEXT
+ */
+ bool itlb_before_ctx_switch;
+ struct i915_hw_context *default_context;
+ struct drm_i915_gem_object *last_context_obj;
+
+ void *private;
+};
+
+static inline bool
+intel_ring_initialized(struct intel_ring_buffer *ring)
+{
+ return ring->obj != NULL;
+}
+
+static inline unsigned
+intel_ring_flag(struct intel_ring_buffer *ring)
+{
+ return 1 << ring->id;
+}
+
+static inline u32
+intel_ring_sync_index(struct intel_ring_buffer *ring,
+ struct intel_ring_buffer *other)
+{
+ int idx;
+
+ /*
+ * cs -> 0 = vcs, 1 = bcs
+ * vcs -> 0 = bcs, 1 = cs,
+ * bcs -> 0 = cs, 1 = vcs.
+ */
+
+ idx = (other - ring) - 1;
+ if (idx < 0)
+ idx += I915_NUM_RINGS;
+
+ return idx;
+}
+
+u32 intel_read_status_page(struct intel_ring_buffer *ring, int reg);
+
+/**
+ * Reads a dword out of the status page, which is written to from the command
+ * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
+ * MI_STORE_DATA_IMM.
+ *
+ * The following dwords have a reserved meaning:
+ * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
+ * 0x04: ring 0 head pointer
+ * 0x05: ring 1 head pointer (915-class)
+ * 0x06: ring 2 head pointer (915-class)
+ * 0x10-0x1b: Context status DWords (GM45)
+ * 0x1f: Last written status offset. (GM45)
+ *
+ * The area from dword 0x20 to 0x3ff is available for driver usage.
+ */
+#define I915_GEM_HWS_INDEX 0x20
+#define I915_GEM_HWS_SCRATCH_INDEX 0x30
+#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
+
+void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
+
+int intel_ring_begin(struct intel_ring_buffer *ring, int n);
+#ifdef notyet
+static inline void intel_ring_emit(struct intel_ring_buffer *ring,
+ u32 data)
+{
+ iowrite32(data, ring->virtual_start + ring->tail);
+ ring->tail += 4;
+}
+#endif
+void intel_ring_advance(struct intel_ring_buffer *ring);
+int intel_ring_idle(struct intel_ring_buffer *ring);
+
+int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
+int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
+
+int intel_init_render_ring_buffer(struct drm_device *dev);
+int intel_init_bsd_ring_buffer(struct drm_device *dev);
+int intel_init_blt_ring_buffer(struct drm_device *dev);
+
+u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
+void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
+
+static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
+{
+ return ring->tail;
+}
+
+static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
+{
+ BUG_ON(ring->outstanding_lazy_request == 0);
+ return ring->outstanding_lazy_request;
+}
+
+static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
+{
+ if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
+ ring->trace_irq_seqno = seqno;
+}
+
+/* DRI warts */
+int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
+
+#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/sys/dev/pci/drm/i915/intel_sdvo.c b/sys/dev/pci/drm/i915/intel_sdvo.c
new file mode 100644
index 00000000000..094cc292cde
--- /dev/null
+++ b/sys/dev/pci/drm/i915/intel_sdvo.c
@@ -0,0 +1,3047 @@
+/* $OpenBSD: intel_sdvo.c,v 1.1 2013/03/18 12:36:52 jsg Exp $ */
+/*
+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright © 2006-2007 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+#include <dev/pci/drm/drmP.h>
+#include <dev/pci/drm/drm_crtc.h>
+#include <dev/pci/drm/drm_edid.h>
+#include "intel_drv.h"
+#include <dev/pci/drm/i915_drm.h>
+#include "i915_drv.h"
+#include "intel_sdvo_regs.h"
+
+static __inline uint16_t
+bitcount16(uint32_t x)
+{
+
+ x = (x & 0x5555) + ((x & 0xaaaa) >> 1);
+ x = (x & 0x3333) + ((x & 0xcccc) >> 2);
+ x = (x + (x >> 4)) & 0x0f0f;
+ x = (x + (x >> 8)) & 0x00ff;
+ return (x);
+}
+
+#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
+#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
+#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
+#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_YPRPB0)
+
+#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
+ SDVO_TV_MASK)
+
+#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
+#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
+#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
+#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
+#define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK))
+
+
+static const char *tv_format_names[] = {
+ "NTSC_M" , "NTSC_J" , "NTSC_443",
+ "PAL_B" , "PAL_D" , "PAL_G" ,
+ "PAL_H" , "PAL_I" , "PAL_M" ,
+ "PAL_N" , "PAL_NC" , "PAL_60" ,
+ "SECAM_B" , "SECAM_D" , "SECAM_G" ,
+ "SECAM_K" , "SECAM_K1", "SECAM_L" ,
+ "SECAM_60"
+};
+
+#define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names))
+
+struct intel_sdvo {
+ struct intel_encoder base;
+
+ struct i2c_controller *i2c;
+ u8 slave_addr;
+
+ struct i2c_controller ddc;
+
+ /* Register for the SDVO device: SDVOB or SDVOC */
+ uint32_t sdvo_reg;
+
+ /* Active outputs controlled by this SDVO output */
+ uint16_t controlled_output;
+
+ /*
+ * Capabilities of the SDVO device returned by
+ * i830_sdvo_get_capabilities()
+ */
+ struct intel_sdvo_caps caps;
+
+ /* Pixel clock limitations reported by the SDVO device, in kHz */
+ int pixel_clock_min, pixel_clock_max;
+
+ /*
+ * For multiple function SDVO device,
+ * this is for current attached outputs.
+ */
+ uint16_t attached_output;
+
+ /*
+ * Hotplug activation bits for this device
+ */
+ uint16_t hotplug_active;
+
+ /**
+ * This is used to select the color range of RBG outputs in HDMI mode.
+ * It is only valid when using TMDS encoding and 8 bit per color mode.
+ */
+ uint32_t color_range;
+
+ /**
+ * This is set if we're going to treat the device as TV-out.
+ *
+ * While we have these nice friendly flags for output types that ought
+ * to decide this for us, the S-Video output on our HDMI+S-Video card
+ * shows up as RGB1 (VGA).
+ */
+ bool is_tv;
+
+ /* On different gens SDVOB is at different places. */
+ bool is_sdvob;
+
+ /* This is for current tv format name */
+ int tv_format_index;
+
+ /**
+ * This is set if we treat the device as HDMI, instead of DVI.
+ */
+ bool is_hdmi;
+ bool has_hdmi_monitor;
+ bool has_hdmi_audio;
+
+ /**
+ * This is set if we detect output of sdvo device as LVDS and
+ * have a valid fixed mode to use with the panel.
+ */
+ bool is_lvds;
+
+ /**
+ * This is sdvo fixed pannel mode pointer
+ */
+ struct drm_display_mode *sdvo_lvds_fixed_mode;
+
+ /* DDC bus used by this SDVO encoder */
+ uint8_t ddc_bus;
+
+ /*
+ * the sdvo flag gets lost in round trip: dtd->adjusted_mode->dtd
+ */
+ uint8_t dtd_sdvo_flags;
+};
+
+struct intel_sdvo_connector {
+ struct intel_connector base;
+
+ /* Mark the type of connector */
+ uint16_t output_flag;
+
+ enum hdmi_force_audio force_audio;
+
+ /* This contains all current supported TV format */
+ u8 tv_format_supported[TV_FORMAT_NUM];
+ int format_supported_num;
+ struct drm_property *tv_format;
+
+ /* add the property for the SDVO-TV */
+ struct drm_property *left;
+ struct drm_property *right;
+ struct drm_property *top;
+ struct drm_property *bottom;
+ struct drm_property *hpos;
+ struct drm_property *vpos;
+ struct drm_property *contrast;
+ struct drm_property *saturation;
+ struct drm_property *hue;
+ struct drm_property *sharpness;
+ struct drm_property *flicker_filter;
+ struct drm_property *flicker_filter_adaptive;
+ struct drm_property *flicker_filter_2d;
+ struct drm_property *tv_chroma_filter;
+ struct drm_property *tv_luma_filter;
+ struct drm_property *dot_crawl;
+
+ /* add the property for the SDVO-TV/LVDS */
+ struct drm_property *brightness;
+
+ /* Add variable to record current setting for the above property */
+ u32 left_margin, right_margin, top_margin, bottom_margin;
+
+ /* this is to get the range of margin.*/
+ u32 max_hscan, max_vscan;
+ u32 max_hpos, cur_hpos;
+ u32 max_vpos, cur_vpos;
+ u32 cur_brightness, max_brightness;
+ u32 cur_contrast, max_contrast;
+ u32 cur_saturation, max_saturation;
+ u32 cur_hue, max_hue;
+ u32 cur_sharpness, max_sharpness;
+ u32 cur_flicker_filter, max_flicker_filter;
+ u32 cur_flicker_filter_adaptive, max_flicker_filter_adaptive;
+ u32 cur_flicker_filter_2d, max_flicker_filter_2d;
+ u32 cur_tv_chroma_filter, max_tv_chroma_filter;
+ u32 cur_tv_luma_filter, max_tv_luma_filter;
+ u32 cur_dot_crawl, max_dot_crawl;
+};
+
+struct intel_sdvo *to_intel_sdvo(struct drm_encoder *);
+struct intel_sdvo *intel_attached_sdvo(struct drm_connector *);
+struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *);
+void intel_sdvo_write_sdvox(struct intel_sdvo *, u32);
+bool intel_sdvo_read_byte(struct intel_sdvo *, u8, u8 *);
+bool intel_sdvo_write_cmd(struct intel_sdvo *, u8, const void *, int);
+bool intel_sdvo_read_response(struct intel_sdvo *, void *, int);
+int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *);
+bool intel_sdvo_set_value(struct intel_sdvo *, u8, const void *, int);
+bool intel_sdvo_get_value(struct intel_sdvo *, u8, void *, int);
+bool intel_sdvo_set_target_input(struct intel_sdvo *);
+bool intel_sdvo_get_trained_inputs(struct intel_sdvo *, bool *, bool *);
+bool intel_sdvo_set_active_outputs(struct intel_sdvo *, u16);
+bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *, int);
+bool intel_sdvo_get_input_pixel_clock_range(struct intel_sdvo *, int *,
+ int *);
+bool intel_sdvo_set_target_output(struct intel_sdvo *, u16);
+bool intel_sdvo_set_timing(struct intel_sdvo *, u8,
+ struct intel_sdvo_dtd *);
+bool intel_sdvo_set_input_timing(struct intel_sdvo *,
+ struct intel_sdvo_dtd *);
+bool intel_sdvo_set_output_timing(struct intel_sdvo *,
+ struct intel_sdvo_dtd *);
+bool intel_sdvo_create_preferred_input_timing(struct intel_sdvo *, uint16_t,
+ uint16_t, uint16_t);
+bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *,
+ struct intel_sdvo_dtd *);
+bool intel_sdvo_set_clock_rate_mult(struct intel_sdvo *, u8);
+void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *,
+ const struct drm_display_mode *);
+void intel_sdvo_get_mode_from_dtd(struct drm_display_mode *,
+ const struct intel_sdvo_dtd *);
+bool intel_sdvo_check_supp_encode(struct intel_sdvo *);
+bool intel_sdvo_set_encode(struct intel_sdvo *, uint8_t);
+bool intel_sdvo_set_colorimetry(struct intel_sdvo *, uint8_t);
+bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *);
+bool intel_sdvo_set_tv_format(struct intel_sdvo *);
+bool intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *,
+ const struct drm_display_mode *);
+bool intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *,
+ const struct drm_display_mode *, struct drm_display_mode *);
+bool intel_sdvo_mode_fixup(struct drm_encoder *,
+ const struct drm_display_mode *, struct drm_display_mode *);
+void intel_sdvo_mode_set(struct drm_encoder *, struct drm_display_mode *,
+ struct drm_display_mode *);
+int intel_sdvo_mode_valid(struct drm_connector *,
+ struct drm_display_mode *);
+bool intel_sdvo_get_capabilities(struct intel_sdvo *,
+ struct intel_sdvo_caps *);
+int intel_sdvo_supports_hotplug(struct intel_sdvo *);
+void intel_sdvo_enable_hotplug(struct intel_encoder *);
+bool intel_sdvo_multifunc_encoder(struct intel_sdvo *);
+struct edid *intel_sdvo_get_edid(struct drm_connector *);
+struct edid *intel_sdvo_get_analog_edid(struct drm_connector *);
+enum drm_connector_status intel_sdvo_tmds_sink_detect(struct drm_connector *);
+bool intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *,
+ struct edid *);
+enum drm_connector_status intel_sdvo_detect(struct drm_connector *, bool);
+void intel_sdvo_get_ddc_modes(struct drm_connector *);
+void intel_sdvo_get_tv_modes(struct drm_connector *);
+void intel_sdvo_get_lvds_modes(struct drm_connector *);
+int intel_sdvo_get_modes(struct drm_connector *);
+void intel_sdvo_destroy_enhance_property(struct drm_connector *);
+void intel_sdvo_destroy(struct drm_connector *);
+bool intel_sdvo_detect_hdmi_audio(struct drm_connector *);
+int intel_sdvo_set_property(struct drm_connector *,
+ struct drm_property *, uint64_t);
+void intel_sdvo_enc_destroy(struct drm_encoder *);
+void intel_sdvo_guess_ddc_bus(struct intel_sdvo *);
+void intel_sdvo_select_ddc_bus(struct inteldrm_softc *,
+ struct intel_sdvo *, u32);
+void intel_sdvo_select_i2c_bus(struct inteldrm_softc *,
+ struct intel_sdvo *, u32);
+bool intel_sdvo_is_hdmi_connector(struct intel_sdvo *, int);
+void intel_sdvo_connector_init(struct intel_sdvo_connector *,
+ struct intel_sdvo *);
+void intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *);
+bool intel_sdvo_dvi_init(struct intel_sdvo *, int);
+bool intel_sdvo_tv_init(struct intel_sdvo *, int);
+bool intel_sdvo_analog_init(struct intel_sdvo *, int);
+bool intel_sdvo_lvds_init(struct intel_sdvo *, int);
+bool intel_sdvo_output_setup(struct intel_sdvo *, uint16_t);
+bool intel_sdvo_tv_create_property(struct intel_sdvo *,
+ struct intel_sdvo_connector *, int);
+bool intel_sdvo_create_enhance_property_tv(struct intel_sdvo *,
+ struct intel_sdvo_connector *,
+ struct intel_sdvo_enhancements_reply);
+bool intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *,
+ struct intel_sdvo_connector *,
+ struct intel_sdvo_enhancements_reply);
+bool intel_sdvo_create_enhance_property(struct intel_sdvo *,
+ struct intel_sdvo_connector *);
+bool intel_sdvo_init_ddc_proxy(struct intel_sdvo *, struct drm_device *);
+void intel_sdvo_debug_write(struct intel_sdvo *, u8, const void *, int);
+bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *, u8);
+bool intel_sdvo_get_active_outputs(struct intel_sdvo *, u16 *);
+bool intel_sdvo_write_infoframe(struct intel_sdvo *, unsigned, uint8_t,
+ uint8_t *, unsigned);
+bool intel_sdvo_get_preferred_input_mode(struct intel_sdvo *,
+ const struct drm_display_mode *, struct drm_display_mode *);
+bool intel_sdvo_connector_get_hw_state(struct intel_connector *);
+bool intel_sdvo_get_hw_state(struct intel_encoder *, enum pipe *);
+void intel_disable_sdvo(struct intel_encoder *);
+void intel_enable_sdvo(struct intel_encoder *);
+void intel_sdvo_dpms(struct drm_connector *, int);
+uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *);
+void intel_sdvo_unselect_i2c_bus(struct intel_sdvo *);
+u8 intel_sdvo_get_slave_addr(struct drm_device *, struct intel_sdvo *);
+void intel_sdvo_output_cleanup(struct intel_sdvo *);
+int intel_sdvo_ddc_proxy_acquire_bus(void *, int);
+void intel_sdvo_ddc_proxy_release_bus(void *, int);
+int intel_sdvo_ddc_proxy_exec(void *, i2c_op_t, i2c_addr_t,
+ const void *, size_t, void *, size_t, int);
+
+struct intel_sdvo *
+to_intel_sdvo(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct intel_sdvo, base.base);
+}
+
+struct intel_sdvo *
+intel_attached_sdvo(struct drm_connector *connector)
+{
+ return container_of(intel_attached_encoder(connector),
+ struct intel_sdvo, base);
+}
+
+struct intel_sdvo_connector *
+to_intel_sdvo_connector(struct drm_connector *connector)
+{
+ return container_of(to_intel_connector(connector), struct intel_sdvo_connector, base);
+}
+
+bool
+intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags);
+bool
+intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ int type);
+bool
+intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector);
+
+/**
+ * Writes the SDVOB or SDVOC with the given value, but always writes both
+ * SDVOB and SDVOC to work around apparent hardware issues (according to
+ * comments in the BIOS).
+ */
+void
+intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
+{
+ struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 bval = val, cval = val;
+ int i;
+
+ if (intel_sdvo->sdvo_reg == PCH_SDVOB) {
+ I915_WRITE(intel_sdvo->sdvo_reg, val);
+ I915_READ(intel_sdvo->sdvo_reg);
+ return;
+ }
+
+ if (intel_sdvo->sdvo_reg == SDVOB) {
+ cval = I915_READ(SDVOC);
+ } else {
+ bval = I915_READ(SDVOB);
+ }
+ /*
+ * Write the registers twice for luck. Sometimes,
+ * writing them only once doesn't appear to 'stick'.
+ * The BIOS does this too. Yay, magic
+ */
+ for (i = 0; i < 2; i++)
+ {
+ I915_WRITE(SDVOB, bval);
+ I915_READ(SDVOB);
+ I915_WRITE(SDVOC, cval);
+ I915_READ(SDVOC);
+ }
+}
+
+bool
+intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
+{
+ uint8_t cmd = 0;
+
+ iic_acquire_bus(intel_sdvo->i2c, 0);
+ iic_exec(intel_sdvo->i2c, I2C_OP_READ_WITH_STOP, addr, &cmd, 1, ch, 1, 0);
+ iic_release_bus(intel_sdvo->i2c, 0);
+
+ return true;
+}
+
+#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
+/** Mapping of command numbers to names, for debug output */
+static const struct _sdvo_cmd_name {
+ u8 cmd;
+ const char *name;
+} sdvo_cmd_names[] = {
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
+
+ /* Add the op code for SDVO enhancements */
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
+
+ /* HDMI op code */
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
+};
+
+#define SDVO_NAME(svdo) ((svdo)->is_sdvob ? "SDVOB" : "SDVOC")
+
+void
+intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
+ const void *args, int args_len)
+{
+ int i;
+
+ DRM_DEBUG_KMS("%s: W: %02X ",
+ SDVO_NAME(intel_sdvo), cmd);
+ for (i = 0; i < args_len; i++)
+ printf("%02X ", ((u8 *)args)[i]);
+ for (; i < 8; i++)
+ printf(" ");
+ for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
+ if (cmd == sdvo_cmd_names[i].cmd) {
+ printf("(%s)", sdvo_cmd_names[i].name);
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(sdvo_cmd_names))
+ printf("(%02X)", cmd);
+ printf("\n");
+}
+
+static const char *cmd_status_names[] = {
+ "Power on",
+ "Success",
+ "Not supported",
+ "Invalid arg",
+ "Pending",
+ "Target not specified",
+ "Scaling not supported"
+};
+
+struct i2c_msg {
+ i2c_op_t op;
+ i2c_addr_t addr;
+ void *buf;
+ size_t len;
+};
+
+bool
+intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
+ const void *args, int args_len)
+{
+ u8 *buf, status;
+ struct i2c_msg *msgs;
+ int i, ret = true, x;
+ uint8_t c = 0;
+
+ /* Would be simpler to allocate both in one go ? */
+ buf = (u8 *)malloc(args_len * 2 + 2, M_DRM,
+ M_WAITOK | M_ZERO);
+ if (!buf)
+ return false;
+
+ msgs = malloc(args_len + 3 * sizeof(*msgs), M_DRM,
+ M_WAITOK | M_ZERO);
+ if (!msgs) {
+ free(buf, M_DRM);
+ return false;
+ }
+
+ intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
+
+ for (i = 0; i < args_len; i++) {
+ msgs[i].op = I2C_OP_WRITE_WITH_STOP;
+ msgs[i].addr = intel_sdvo->slave_addr;
+ msgs[i].len = 2;
+ msgs[i].buf = buf + 2 *i;
+ buf[2*i + 0] = SDVO_I2C_ARG_0 - i;
+ buf[2*i + 1] = ((u8*)args)[i];
+ }
+ msgs[i].op = I2C_OP_WRITE_WITH_STOP;
+ msgs[i].addr = intel_sdvo->slave_addr;
+ msgs[i].len = 2;
+ msgs[i].buf = buf + 2*i;
+ buf[2*i + 0] = SDVO_I2C_OPCODE;
+ buf[2*i + 1] = cmd;
+
+ /* the following two are to read the response */
+ status = SDVO_I2C_CMD_STATUS;
+ msgs[i+1].op = I2C_OP_WRITE_WITH_STOP;
+ msgs[i+1].addr = intel_sdvo->slave_addr;
+ msgs[i+1].len = 1;
+ msgs[i+1].buf = &status;
+
+ msgs[i+2].op = I2C_OP_READ_WITH_STOP;
+ msgs[i+2].addr = intel_sdvo->slave_addr;
+ msgs[i+2].len = 1;
+ msgs[i+2].buf = &status;
+
+ iic_acquire_bus(intel_sdvo->i2c, 0);
+ for (x = 0; x < i+3; x++) {
+ ret = iic_exec(intel_sdvo->i2c, msgs[x].op, msgs[x].addr,
+ &c, 1, msgs[x].buf, msgs[x].len, 0);
+ if (ret) {
+ DRM_DEBUG_KMS("sdvo i2c transfer failed\n");
+ ret = false;
+ break;
+ }
+ }
+ iic_release_bus(intel_sdvo->i2c, 0);
+
+ free(msgs, M_DRM);
+ free(buf, M_DRM);
+ return ret;
+}
+
+bool
+intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
+ void *response, int response_len)
+{
+ u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
+ u8 status;
+ int i;
+
+ DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
+
+ /*
+ * The documentation states that all commands will be
+ * processed within 15µs, and that we need only poll
+ * the status byte a maximum of 3 times in order for the
+ * command to be complete.
+ *
+ * Check 5 times in case the hardware failed to read the docs.
+ *
+ * Also beware that the first response by many devices is to
+ * reply PENDING and stall for time. TVs are notorious for
+ * requiring longer than specified to complete their replies.
+ * Originally (in the DDX long ago), the delay was only ever 15ms
+ * with an additional delay of 30ms applied for TVs added later after
+ * many experiments. To accommodate both sets of delays, we do a
+ * sequence of slow checks if the device is falling behind and fails
+ * to reply within 5*15µs.
+ */
+ if (!intel_sdvo_read_byte(intel_sdvo,
+ SDVO_I2C_CMD_STATUS,
+ &status))
+ goto log_fail;
+
+ while (status == SDVO_CMD_STATUS_PENDING && --retry) {
+ if (retry < 10)
+ drm_msleep(15, "915srr");
+ else
+ DELAY(15);
+
+ if (!intel_sdvo_read_byte(intel_sdvo,
+ SDVO_I2C_CMD_STATUS,
+ &status))
+ goto log_fail;
+ }
+
+ if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
+ printf("(%s)", cmd_status_names[status]);
+ else
+ printf("(??? %d)", status);
+
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ goto log_fail;
+
+ /* Read the command response */
+ for (i = 0; i < response_len; i++) {
+ if (!intel_sdvo_read_byte(intel_sdvo,
+ SDVO_I2C_RETURN_0 + i,
+ &((u8 *)response)[i]))
+ goto log_fail;
+ printf(" %02X", ((u8 *)response)[i]);
+ }
+ printf("\n");
+ return true;
+
+log_fail:
+ printf("... failed\n");
+ return false;
+}
+
+int
+intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
+{
+ if (mode->clock >= 100000)
+ return 1;
+ else if (mode->clock >= 50000)
+ return 2;
+ else
+ return 4;
+}
+
+bool
+intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
+ u8 ddc_bus)
+{
+ /* This must be the immediately preceding write before the i2c xfer */
+ return intel_sdvo_write_cmd(intel_sdvo,
+ SDVO_CMD_SET_CONTROL_BUS_SWITCH,
+ &ddc_bus, 1);
+}
+
+bool
+intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len)
+{
+ if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len))
+ return false;
+
+ return intel_sdvo_read_response(intel_sdvo, NULL, 0);
+}
+
+bool
+intel_sdvo_get_value(struct intel_sdvo *intel_sdvo, u8 cmd, void *value, int len)
+{
+ if (!intel_sdvo_write_cmd(intel_sdvo, cmd, NULL, 0))
+ return false;
+
+ return intel_sdvo_read_response(intel_sdvo, value, len);
+}
+
+bool
+intel_sdvo_set_target_input(struct intel_sdvo *intel_sdvo)
+{
+ struct intel_sdvo_set_target_input_args targets = {0};
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_TARGET_INPUT,
+ &targets, sizeof(targets));
+}
+
+/**
+ * Return whether each input is trained.
+ *
+ * This function is making an assumption about the layout of the response,
+ * which should be checked against the docs.
+ */
+bool
+intel_sdvo_get_trained_inputs(struct intel_sdvo *intel_sdvo, bool *input_1, bool *input_2)
+{
+ struct intel_sdvo_get_trained_inputs_response response;
+
+#ifdef notyet
+ BUILD_BUG_ON(sizeof(response) != 1);
+#endif
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS,
+ &response, sizeof(response)))
+ return false;
+
+ *input_1 = response.input0_trained;
+ *input_2 = response.input1_trained;
+ return true;
+}
+
+bool
+intel_sdvo_set_active_outputs(struct intel_sdvo *intel_sdvo,
+ u16 outputs)
+{
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_ACTIVE_OUTPUTS,
+ &outputs, sizeof(outputs));
+}
+
+bool
+intel_sdvo_get_active_outputs(struct intel_sdvo *intel_sdvo,
+ u16 *outputs)
+{
+ return intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_ACTIVE_OUTPUTS,
+ outputs, sizeof(*outputs));
+}
+
+bool
+intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo,
+ int mode)
+{
+ u8 state = SDVO_ENCODER_STATE_ON;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ state = SDVO_ENCODER_STATE_ON;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ state = SDVO_ENCODER_STATE_STANDBY;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ state = SDVO_ENCODER_STATE_SUSPEND;
+ break;
+ case DRM_MODE_DPMS_OFF:
+ state = SDVO_ENCODER_STATE_OFF;
+ break;
+ }
+
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state));
+}
+
+bool
+intel_sdvo_get_input_pixel_clock_range(struct intel_sdvo *intel_sdvo,
+ int *clock_min,
+ int *clock_max)
+{
+ struct intel_sdvo_pixel_clock_range clocks;
+
+#ifdef notyet
+ BUILD_BUG_ON(sizeof(clocks) != 4);
+#endif
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
+ &clocks, sizeof(clocks)))
+ return false;
+
+ /* Convert the values from units of 10 kHz to kHz. */
+ *clock_min = clocks.min * 10;
+ *clock_max = clocks.max * 10;
+ return true;
+}
+
+bool
+intel_sdvo_set_target_output(struct intel_sdvo *intel_sdvo,
+ u16 outputs)
+{
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_TARGET_OUTPUT,
+ &outputs, sizeof(outputs));
+}
+
+bool
+intel_sdvo_set_timing(struct intel_sdvo *intel_sdvo, u8 cmd,
+ struct intel_sdvo_dtd *dtd)
+{
+ return intel_sdvo_set_value(intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) &&
+ intel_sdvo_set_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
+}
+
+bool
+intel_sdvo_set_input_timing(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_dtd *dtd)
+{
+ return intel_sdvo_set_timing(intel_sdvo,
+ SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
+}
+
+bool
+intel_sdvo_set_output_timing(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_dtd *dtd)
+{
+ return intel_sdvo_set_timing(intel_sdvo,
+ SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
+}
+
+bool
+intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
+ uint16_t clock,
+ uint16_t width,
+ uint16_t height)
+{
+ struct intel_sdvo_preferred_input_timing_args args;
+
+ memset(&args, 0, sizeof(args));
+ args.clock = clock;
+ args.width = width;
+ args.height = height;
+ args.interlace = 0;
+
+ if (intel_sdvo->is_lvds &&
+ (intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width ||
+ intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height))
+ args.scaled = 1;
+
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
+ &args, sizeof(args));
+}
+
+bool
+intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_dtd *dtd)
+{
+#ifdef notyet
+ BUILD_BUG_ON(sizeof(dtd->part1) != 8);
+ BUILD_BUG_ON(sizeof(dtd->part2) != 8);
+#endif
+ return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
+ &dtd->part1, sizeof(dtd->part1)) &&
+ intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
+ &dtd->part2, sizeof(dtd->part2));
+}
+
+bool
+intel_sdvo_set_clock_rate_mult(struct intel_sdvo *intel_sdvo, u8 val)
+{
+ return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
+}
+
+void
+intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
+ const struct drm_display_mode *mode)
+{
+ uint16_t width, height;
+ uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
+ uint16_t h_sync_offset, v_sync_offset;
+ int mode_clock;
+
+ width = mode->hdisplay;
+ height = mode->vdisplay;
+
+ /* do some mode translations */
+ h_blank_len = mode->htotal - mode->hdisplay;
+ h_sync_len = mode->hsync_end - mode->hsync_start;
+
+ v_blank_len = mode->vtotal - mode->vdisplay;
+ v_sync_len = mode->vsync_end - mode->vsync_start;
+
+ h_sync_offset = mode->hsync_start - mode->hdisplay;
+ v_sync_offset = mode->vsync_start - mode->vdisplay;
+
+ mode_clock = mode->clock;
+ mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1;
+ mode_clock /= 10;
+ dtd->part1.clock = mode_clock;
+
+ dtd->part1.h_active = width & 0xff;
+ dtd->part1.h_blank = h_blank_len & 0xff;
+ dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
+ ((h_blank_len >> 8) & 0xf);
+ dtd->part1.v_active = height & 0xff;
+ dtd->part1.v_blank = v_blank_len & 0xff;
+ dtd->part1.v_high = (((height >> 8) & 0xf) << 4) |
+ ((v_blank_len >> 8) & 0xf);
+
+ dtd->part2.h_sync_off = h_sync_offset & 0xff;
+ dtd->part2.h_sync_width = h_sync_len & 0xff;
+ dtd->part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
+ (v_sync_len & 0xf);
+ dtd->part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) |
+ ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) |
+ ((v_sync_len & 0x30) >> 4);
+
+ dtd->part2.dtd_flags = 0x18;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ dtd->part2.dtd_flags |= DTD_FLAG_INTERLACE;
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ dtd->part2.dtd_flags |= DTD_FLAG_HSYNC_POSITIVE;
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE;
+
+ dtd->part2.sdvo_flags = 0;
+ dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
+ dtd->part2.reserved = 0;
+}
+
+void
+intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
+ const struct intel_sdvo_dtd *dtd)
+{
+ mode->hdisplay = dtd->part1.h_active;
+ mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
+ mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off;
+ mode->hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2;
+ mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width;
+ mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
+ mode->htotal = mode->hdisplay + dtd->part1.h_blank;
+ mode->htotal += (dtd->part1.h_high & 0xf) << 8;
+
+ mode->vdisplay = dtd->part1.v_active;
+ mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
+ mode->vsync_start = mode->vdisplay;
+ mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
+ mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2;
+ mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0;
+ mode->vsync_end = mode->vsync_start +
+ (dtd->part2.v_sync_off_width & 0xf);
+ mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
+ mode->vtotal = mode->vdisplay + dtd->part1.v_blank;
+ mode->vtotal += (dtd->part1.v_high & 0xf) << 8;
+
+ mode->clock = dtd->part1.clock * 10;
+
+ mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+ if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE)
+ mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
+ mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
+ mode->flags |= DRM_MODE_FLAG_PVSYNC;
+}
+
+bool
+intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
+{
+ struct intel_sdvo_encode encode;
+
+#ifdef notyet
+ BUILD_BUG_ON(sizeof(encode) != 2);
+#endif
+ return intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_SUPP_ENCODE,
+ &encode, sizeof(encode));
+}
+
+bool
+intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo,
+ uint8_t mode)
+{
+ return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1);
+}
+
+bool
+intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo,
+ uint8_t mode)
+{
+ return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
+}
+
+#if 0
+void
+intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
+{
+ int i, j;
+ uint8_t set_buf_index[2];
+ uint8_t av_split;
+ uint8_t buf_size;
+ uint8_t buf[48];
+ uint8_t *pos;
+
+ intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1);
+
+ for (i = 0; i <= av_split; i++) {
+ set_buf_index[0] = i; set_buf_index[1] = 0;
+ intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2);
+ intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0);
+ intel_sdvo_read_response(encoder, &buf_size, 1);
+
+ pos = buf;
+ for (j = 0; j <= buf_size; j += 8) {
+ intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA,
+ NULL, 0);
+ intel_sdvo_read_response(encoder, pos, 8);
+ pos += 8;
+ }
+ }
+}
+#endif
+
+bool
+intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
+ unsigned if_index, uint8_t tx_rate,
+ uint8_t *data, unsigned length)
+{
+ uint8_t set_buf_index[2] = { if_index, 0 };
+ uint8_t hbuf_size, tmp[8];
+ int i;
+
+ if (!intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2))
+ return false;
+
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO,
+ &hbuf_size, 1))
+ return false;
+
+ /* Buffer size is 0 based, hooray! */
+ hbuf_size++;
+
+ DRM_DEBUG_KMS("writing sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n",
+ if_index, length, hbuf_size);
+
+ for (i = 0; i < hbuf_size; i += 8) {
+ memset(tmp, 0, 8);
+ if (i < length) {
+ unsigned min = 8 < length - i ? 8 : length - i;
+ memcpy(tmp, data + i, min);
+ }
+
+ if (!intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_DATA,
+ tmp, 8))
+ return false;
+ }
+
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_TXRATE,
+ &tx_rate, 1);
+}
+
+bool
+intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
+{
+ struct dip_infoframe avi_if = {
+ .type = DIP_TYPE_AVI,
+ .ver = DIP_VERSION_AVI,
+ .len = DIP_LEN_AVI,
+ };
+ uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
+
+ intel_dip_infoframe_csum(&avi_if);
+
+ /* sdvo spec says that the ecc is handled by the hw, and it looks like
+ * we must not send the ecc field, either. */
+ memcpy(sdvo_data, &avi_if, 3);
+ sdvo_data[3] = avi_if.checksum;
+ memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi));
+
+ return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
+ SDVO_HBUF_TX_VSYNC,
+ sdvo_data, sizeof(sdvo_data));
+}
+
+bool
+intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
+{
+ struct intel_sdvo_tv_format format;
+ uint32_t format_map;
+
+ format_map = 1 << intel_sdvo->tv_format_index;
+ memset(&format, 0, sizeof(format));
+ memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
+
+#ifdef notyet
+ BUILD_BUG_ON(sizeof(format) != 6);
+#endif
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_TV_FORMAT,
+ &format, sizeof(format));
+}
+
+bool
+intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo,
+ const struct drm_display_mode *mode)
+{
+ struct intel_sdvo_dtd output_dtd;
+
+ if (!intel_sdvo_set_target_output(intel_sdvo,
+ intel_sdvo->attached_output))
+ return false;
+
+ intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
+ if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
+ return false;
+
+ return true;
+}
+
+/* Asks the sdvo controller for the preferred input mode given the output mode.
+ * Unfortunately we have to set up the full output mode to do that. */
+bool
+intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct intel_sdvo_dtd input_dtd;
+
+ /* Reset the input timing to the screen. Assume always input 0. */
+ if (!intel_sdvo_set_target_input(intel_sdvo))
+ return false;
+
+ if (!intel_sdvo_create_preferred_input_timing(intel_sdvo,
+ mode->clock / 10,
+ mode->hdisplay,
+ mode->vdisplay))
+ return false;
+
+ if (!intel_sdvo_get_preferred_input_timing(intel_sdvo,
+ &input_dtd))
+ return false;
+
+ intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
+ intel_sdvo->dtd_sdvo_flags = input_dtd.part2.sdvo_flags;
+
+ return true;
+}
+
+bool
+intel_sdvo_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
+ int multiplier;
+
+ /* We need to construct preferred input timings based on our
+ * output timings. To do that, we have to set the output
+ * timings, even though this isn't really the right place in
+ * the sequence to do it. Oh well.
+ */
+ if (intel_sdvo->is_tv) {
+ if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode))
+ return false;
+
+ (void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
+ mode,
+ adjusted_mode);
+ } else if (intel_sdvo->is_lvds) {
+ if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
+ intel_sdvo->sdvo_lvds_fixed_mode))
+ return false;
+
+ (void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
+ mode,
+ adjusted_mode);
+ }
+
+ /* Make the CRTC code factor in the SDVO pixel multiplier. The
+ * SDVO device will factor out the multiplier during mode_set.
+ */
+ multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode);
+ intel_mode_set_pixel_multiplier(adjusted_mode, multiplier);
+
+ return true;
+}
+
+void
+intel_sdvo_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
+ u32 sdvox;
+ struct intel_sdvo_in_out_map in_out;
+ struct intel_sdvo_dtd input_dtd, output_dtd;
+ int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
+ int rate;
+
+ if (!mode)
+ return;
+
+ /* First, set the input mapping for the first input to our controlled
+ * output. This is only correct if we're a single-input device, in
+ * which case the first input is the output from the appropriate SDVO
+ * channel on the motherboard. In a two-input device, the first input
+ * will be SDVOB and the second SDVOC.
+ */
+ in_out.in0 = intel_sdvo->attached_output;
+ in_out.in1 = 0;
+
+ intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_IN_OUT_MAP,
+ &in_out, sizeof(in_out));
+
+ /* Set the output timings to the screen */
+ if (!intel_sdvo_set_target_output(intel_sdvo,
+ intel_sdvo->attached_output))
+ return;
+
+ /* lvds has a special fixed output timing. */
+ if (intel_sdvo->is_lvds)
+ intel_sdvo_get_dtd_from_mode(&output_dtd,
+ intel_sdvo->sdvo_lvds_fixed_mode);
+ else
+ intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
+ if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
+ DRM_INFO("Setting output timings on %s failed\n",
+ SDVO_NAME(intel_sdvo));
+
+ /* Set the input timing to the screen. Assume always input 0. */
+ if (!intel_sdvo_set_target_input(intel_sdvo))
+ return;
+
+ if (intel_sdvo->has_hdmi_monitor) {
+ intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
+ intel_sdvo_set_colorimetry(intel_sdvo,
+ SDVO_COLORIMETRY_RGB256);
+ intel_sdvo_set_avi_infoframe(intel_sdvo);
+ } else
+ intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
+
+ if (intel_sdvo->is_tv &&
+ !intel_sdvo_set_tv_format(intel_sdvo))
+ return;
+
+ /* We have tried to get input timing in mode_fixup, and filled into
+ * adjusted_mode.
+ */
+ intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+ if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
+ input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
+ if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
+ DRM_INFO("Setting input timings on %s failed\n",
+ SDVO_NAME(intel_sdvo));
+
+ switch (pixel_multiplier) {
+ default:
+ case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
+ case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
+ case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
+ }
+ if (!intel_sdvo_set_clock_rate_mult(intel_sdvo, rate))
+ return;
+
+ /* Set the SDVO control regs. */
+ if (INTEL_INFO(dev)->gen >= 4) {
+ /* The real mode polarity is set by the SDVO commands, using
+ * struct intel_sdvo_dtd. */
+ sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
+ if (intel_sdvo->is_hdmi)
+ sdvox |= intel_sdvo->color_range;
+ if (INTEL_INFO(dev)->gen < 5)
+ sdvox |= SDVO_BORDER_ENABLE;
+ } else {
+ sdvox = I915_READ(intel_sdvo->sdvo_reg);
+ switch (intel_sdvo->sdvo_reg) {
+ case SDVOB:
+ sdvox &= SDVOB_PRESERVE_MASK;
+ break;
+ case SDVOC:
+ sdvox &= SDVOC_PRESERVE_MASK;
+ break;
+ }
+ sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
+ }
+
+ if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
+ sdvox |= TRANSCODER_CPT(intel_crtc->pipe);
+ else
+ sdvox |= TRANSCODER(intel_crtc->pipe);
+
+ if (intel_sdvo->has_hdmi_audio)
+ sdvox |= SDVO_AUDIO_ENABLE;
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ /* done in crtc_mode_set as the dpll_md reg must be written early */
+ } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
+ /* done in crtc_mode_set as it lives inside the dpll register */
+ } else {
+ sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT;
+ }
+
+ if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL &&
+ INTEL_INFO(dev)->gen < 5)
+ sdvox |= SDVO_STALL_SELECT;
+ intel_sdvo_write_sdvox(intel_sdvo, sdvox);
+}
+
+bool
+intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
+{
+ struct intel_sdvo_connector *intel_sdvo_connector =
+ to_intel_sdvo_connector(&connector->base);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base);
+ u16 active_outputs;
+
+ intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
+
+ if (active_outputs & intel_sdvo_connector->output_flag)
+ return true;
+ else
+ return false;
+}
+
+bool
+intel_sdvo_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ u32 tmp;
+
+ tmp = I915_READ(intel_sdvo->sdvo_reg);
+
+ if (!(tmp & SDVO_ENABLE))
+ return false;
+
+ if (HAS_PCH_CPT(dev))
+ *pipe = PORT_TO_PIPE_CPT(tmp);
+ else
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
+void
+intel_disable_sdvo(struct intel_encoder *encoder)
+{
+ struct inteldrm_softc *dev_priv = encoder->base.dev->dev_private;
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ u32 temp;
+
+ intel_sdvo_set_active_outputs(intel_sdvo, 0);
+ if (0)
+ intel_sdvo_set_encoder_power_state(intel_sdvo,
+ DRM_MODE_DPMS_OFF);
+
+ temp = I915_READ(intel_sdvo->sdvo_reg);
+ if ((temp & SDVO_ENABLE) != 0) {
+ /* HW workaround for IBX, we need to move the port to
+ * transcoder A before disabling it. */
+ if (HAS_PCH_IBX(encoder->base.dev)) {
+ struct drm_crtc *crtc = encoder->base.crtc;
+ int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
+
+ if (temp & SDVO_PIPE_B_SELECT) {
+ temp &= ~SDVO_PIPE_B_SELECT;
+ I915_WRITE(intel_sdvo->sdvo_reg, temp);
+ POSTING_READ(intel_sdvo->sdvo_reg);
+
+ /* Again we need to write this twice. */
+ I915_WRITE(intel_sdvo->sdvo_reg, temp);
+ POSTING_READ(intel_sdvo->sdvo_reg);
+
+ /* Transcoder selection bits only update
+ * effectively on vblank. */
+ if (crtc)
+ intel_wait_for_vblank(encoder->base.dev, pipe);
+ else
+ drm_msleep(50, "915dsd");
+ }
+ }
+
+ intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
+ }
+}
+
+void
+intel_enable_sdvo(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ u32 temp;
+ bool input1, input2;
+ int i;
+ u8 status;
+
+ temp = I915_READ(intel_sdvo->sdvo_reg);
+ if ((temp & SDVO_ENABLE) == 0) {
+ /* HW workaround for IBX, we need to move the port
+ * to transcoder A before disabling it. */
+ if (HAS_PCH_IBX(dev)) {
+ struct drm_crtc *crtc = encoder->base.crtc;
+ int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
+
+ /* Restore the transcoder select bit. */
+ if (pipe == PIPE_B)
+ temp |= SDVO_PIPE_B_SELECT;
+ }
+
+ intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
+ }
+ for (i = 0; i < 2; i++)
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
+ /* Warn if the device reported failure to sync.
+ * A lot of SDVO devices fail to notify of sync, but it's
+ * a given it the status is a success, we succeeded.
+ */
+ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
+ DRM_DEBUG_KMS("First %s output reported failure to "
+ "sync\n", SDVO_NAME(intel_sdvo));
+ }
+
+ if (0)
+ intel_sdvo_set_encoder_power_state(intel_sdvo,
+ DRM_MODE_DPMS_ON);
+ intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
+}
+
+void
+intel_sdvo_dpms(struct drm_connector *connector, int mode)
+{
+ struct drm_crtc *crtc;
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+
+ /* dvo supports only 2 dpms states. */
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
+ if (mode == connector->dpms)
+ return;
+
+ connector->dpms = mode;
+
+ /* Only need to change hw state when actually enabled */
+ crtc = intel_sdvo->base.base.crtc;
+ if (!crtc) {
+ intel_sdvo->base.connectors_active = false;
+ return;
+ }
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ intel_sdvo_set_active_outputs(intel_sdvo, 0);
+ if (0)
+ intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
+
+ intel_sdvo->base.connectors_active = false;
+
+ intel_crtc_update_dpms(crtc);
+ } else {
+ intel_sdvo->base.connectors_active = true;
+
+ intel_crtc_update_dpms(crtc);
+
+ if (0)
+ intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
+ intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
+ }
+
+ intel_modeset_check_state(connector->dev);
+}
+
+int
+intel_sdvo_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ if (intel_sdvo->pixel_clock_min > mode->clock)
+ return MODE_CLOCK_LOW;
+
+ if (intel_sdvo->pixel_clock_max < mode->clock)
+ return MODE_CLOCK_HIGH;
+
+ if (intel_sdvo->is_lvds) {
+ if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
+ return MODE_PANEL;
+
+ if (mode->vdisplay > intel_sdvo->sdvo_lvds_fixed_mode->vdisplay)
+ return MODE_PANEL;
+ }
+
+ return MODE_OK;
+}
+
+bool
+intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps)
+{
+#ifdef notyet
+ BUILD_BUG_ON(sizeof(*caps) != 8);
+#endif
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_DEVICE_CAPS,
+ caps, sizeof(*caps)))
+ return false;
+
+ DRM_DEBUG_KMS("SDVO capabilities:\n"
+ " vendor_id: %d\n"
+ " device_id: %d\n"
+ " device_rev_id: %d\n"
+ " sdvo_version_major: %d\n"
+ " sdvo_version_minor: %d\n"
+ " sdvo_inputs_mask: %d\n"
+ " smooth_scaling: %d\n"
+ " sharp_scaling: %d\n"
+ " up_scaling: %d\n"
+ " down_scaling: %d\n"
+ " stall_support: %d\n"
+ " output_flags: %d\n",
+ caps->vendor_id,
+ caps->device_id,
+ caps->device_rev_id,
+ caps->sdvo_version_major,
+ caps->sdvo_version_minor,
+ caps->sdvo_inputs_mask,
+ caps->smooth_scaling,
+ caps->sharp_scaling,
+ caps->up_scaling,
+ caps->down_scaling,
+ caps->stall_support,
+ caps->output_flags);
+
+ return true;
+}
+
+uint16_t
+intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
+{
+ struct drm_device *dev = intel_sdvo->base.base.dev;
+ uint16_t hotplug;
+
+ /* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
+ * on the line. */
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ return 0;
+
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
+ &hotplug, sizeof(hotplug)))
+ return 0;
+
+ return hotplug;
+}
+
+void
+intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
+{
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+
+ intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG,
+ &intel_sdvo->hotplug_active, 2);
+}
+
+bool
+intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
+{
+ /* Is there more than one type of output? */
+ return bitcount16(intel_sdvo->caps.output_flags) > 1;
+}
+
+struct edid *
+intel_sdvo_get_edid(struct drm_connector *connector)
+{
+ struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
+ return drm_get_edid(connector, &sdvo->ddc);
+}
+
+/* Mac mini hack -- use the same DDC as the analog connector */
+struct edid *
+intel_sdvo_get_analog_edid(struct drm_connector *connector)
+{
+ struct inteldrm_softc *dev_priv = connector->dev->dev_private;
+
+ return drm_get_edid(connector,
+ intel_gmbus_get_adapter(dev_priv,
+ dev_priv->crt_ddc_pin));
+}
+
+enum drm_connector_status
+intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
+{
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ enum drm_connector_status status;
+ struct edid *edid;
+
+ edid = intel_sdvo_get_edid(connector);
+
+ if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) {
+ u8 ddc, saved_ddc = intel_sdvo->ddc_bus;
+
+ /*
+ * Don't use the 1 as the argument of DDC bus switch to get
+ * the EDID. It is used for SDVO SPD ROM.
+ */
+ for (ddc = intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) {
+ intel_sdvo->ddc_bus = ddc;
+ edid = intel_sdvo_get_edid(connector);
+ if (edid)
+ break;
+ }
+ /*
+ * If we found the EDID on the other bus,
+ * assume that is the correct DDC bus.
+ */
+ if (edid == NULL)
+ intel_sdvo->ddc_bus = saved_ddc;
+ }
+
+ /*
+ * When there is no edid and no monitor is connected with VGA
+ * port, try to use the CRT ddc to read the EDID for DVI-connector.
+ */
+ if (edid == NULL)
+ edid = intel_sdvo_get_analog_edid(connector);
+
+ status = connector_status_unknown;
+ if (edid != NULL) {
+ /* DDC bus is shared, match EDID to connector type */
+ if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ status = connector_status_connected;
+ if (intel_sdvo->is_hdmi) {
+ intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
+ intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
+ }
+ } else
+ status = connector_status_disconnected;
+ free(edid, M_DRM);
+ }
+
+ if (status == connector_status_connected) {
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ if (intel_sdvo_connector->force_audio != HDMI_AUDIO_AUTO)
+ intel_sdvo->has_hdmi_audio = (intel_sdvo_connector->force_audio == HDMI_AUDIO_ON);
+ }
+
+ return status;
+}
+
+bool
+intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
+ struct edid *edid)
+{
+ bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+ bool connector_is_digital = !!IS_DIGITAL(sdvo);
+
+ DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n",
+ connector_is_digital, monitor_is_digital);
+ return connector_is_digital == monitor_is_digital;
+}
+
+enum drm_connector_status
+intel_sdvo_detect(struct drm_connector *connector, bool force)
+{
+ uint16_t response;
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ enum drm_connector_status ret;
+
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_ATTACHED_DISPLAYS,
+ &response, 2))
+ return connector_status_unknown;
+
+ DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
+ response & 0xff, response >> 8,
+ intel_sdvo_connector->output_flag);
+
+ if (response == 0)
+ return connector_status_disconnected;
+
+ intel_sdvo->attached_output = response;
+
+ intel_sdvo->has_hdmi_monitor = false;
+ intel_sdvo->has_hdmi_audio = false;
+
+ if ((intel_sdvo_connector->output_flag & response) == 0)
+ ret = connector_status_disconnected;
+ else if (IS_TMDS(intel_sdvo_connector))
+ ret = intel_sdvo_tmds_sink_detect(connector);
+ else {
+ struct edid *edid;
+
+ /* if we have an edid check it matches the connection */
+ edid = intel_sdvo_get_edid(connector);
+ if (edid == NULL)
+ edid = intel_sdvo_get_analog_edid(connector);
+ if (edid != NULL) {
+ if (intel_sdvo_connector_matches_edid(intel_sdvo_connector,
+ edid))
+ ret = connector_status_connected;
+ else
+ ret = connector_status_disconnected;
+
+ free(edid, M_DRM);
+ } else
+ ret = connector_status_connected;
+ }
+
+ /* May update encoder flag for like clock for SDVO TV, etc.*/
+ if (ret == connector_status_connected) {
+ intel_sdvo->is_tv = false;
+ intel_sdvo->is_lvds = false;
+ intel_sdvo->base.needs_tv_clock = false;
+
+ if (response & SDVO_TV_MASK) {
+ intel_sdvo->is_tv = true;
+ intel_sdvo->base.needs_tv_clock = true;
+ }
+ if (response & SDVO_LVDS_MASK)
+ intel_sdvo->is_lvds = intel_sdvo->sdvo_lvds_fixed_mode != NULL;
+ }
+
+ return ret;
+}
+
+void
+intel_sdvo_get_ddc_modes(struct drm_connector *connector)
+{
+ struct edid *edid;
+
+ /* set the bus switch and get the modes */
+ edid = intel_sdvo_get_edid(connector);
+
+ /*
+ * Mac mini hack. On this device, the DVI-I connector shares one DDC
+ * link between analog and digital outputs. So, if the regular SDVO
+ * DDC fails, check to see if the analog output is disconnected, in
+ * which case we'll look there for the digital DDC data.
+ */
+ if (edid == NULL)
+ edid = intel_sdvo_get_analog_edid(connector);
+
+ if (edid != NULL) {
+ if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
+ edid)) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ drm_add_edid_modes(connector, edid);
+ }
+
+ free(edid, M_DRM);
+ }
+}
+
+/*
+ * Set of SDVO TV modes.
+ * Note! This is in reply order (see loop in get_tv_modes).
+ * XXX: all 60Hz refresh?
+ */
+static const struct drm_display_mode sdvo_tv_modes[] = {
+ { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815, 320, 321, 384,
+ 416, 0, 200, 201, 232, 233, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 6814, 320, 321, 384,
+ 416, 0, 240, 241, 272, 273, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 9910, 400, 401, 464,
+ 496, 0, 300, 301, 332, 333, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 16913, 640, 641, 704,
+ 736, 0, 350, 351, 382, 383, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121, 640, 641, 704,
+ 736, 0, 400, 401, 432, 433, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 22654, 640, 641, 704,
+ 736, 0, 480, 481, 512, 513, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("704x480", DRM_MODE_TYPE_DRIVER, 24624, 704, 705, 768,
+ 800, 0, 480, 481, 512, 513, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("704x576", DRM_MODE_TYPE_DRIVER, 29232, 704, 705, 768,
+ 800, 0, 576, 577, 608, 609, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x350", DRM_MODE_TYPE_DRIVER, 18751, 720, 721, 784,
+ 816, 0, 350, 351, 382, 383, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 21199, 720, 721, 784,
+ 816, 0, 400, 401, 432, 433, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 25116, 720, 721, 784,
+ 816, 0, 480, 481, 512, 513, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x540", DRM_MODE_TYPE_DRIVER, 28054, 720, 721, 784,
+ 816, 0, 540, 541, 572, 573, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 29816, 720, 721, 784,
+ 816, 0, 576, 577, 608, 609, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("768x576", DRM_MODE_TYPE_DRIVER, 31570, 768, 769, 832,
+ 864, 0, 576, 577, 608, 609, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 34030, 800, 801, 864,
+ 896, 0, 600, 601, 632, 633, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 36581, 832, 833, 896,
+ 928, 0, 624, 625, 656, 657, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("920x766", DRM_MODE_TYPE_DRIVER, 48707, 920, 921, 984,
+ 1016, 0, 766, 767, 798, 799, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 53827, 1024, 1025, 1088,
+ 1120, 0, 768, 769, 800, 801, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 87265, 1280, 1281, 1344,
+ 1376, 0, 1024, 1025, 1056, 1057, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+};
+
+void
+intel_sdvo_get_tv_modes(struct drm_connector *connector)
+{
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo_sdtv_resolution_request tv_res;
+ uint32_t reply = 0, format_map = 0;
+ int i;
+
+ /* Read the list of supported input resolutions for the selected TV
+ * format.
+ */
+ format_map = 1 << intel_sdvo->tv_format_index;
+ memcpy(&tv_res, &format_map,
+ min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request)));
+
+ if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output))
+ return;
+
+#ifdef notyet
+ BUILD_BUG_ON(sizeof(tv_res) != 3);
+#endif
+ if (!intel_sdvo_write_cmd(intel_sdvo,
+ SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
+ &tv_res, sizeof(tv_res)))
+ return;
+ if (!intel_sdvo_read_response(intel_sdvo, &reply, 3))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++)
+ if (reply & (1 << i)) {
+ struct drm_display_mode *nmode;
+ nmode = drm_mode_duplicate(connector->dev,
+ &sdvo_tv_modes[i]);
+ if (nmode)
+ drm_mode_probed_add(connector, nmode);
+ }
+}
+
+void
+intel_sdvo_get_lvds_modes(struct drm_connector *connector)
+{
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct inteldrm_softc *dev_priv = connector->dev->dev_private;
+ struct drm_display_mode *newmode;
+
+ /*
+ * Attempt to get the mode list from DDC.
+ * Assume that the preferred modes are
+ * arranged in priority order.
+ */
+ intel_ddc_get_modes(connector, intel_sdvo->i2c);
+ if (list_empty(&connector->probed_modes) == false)
+ goto end;
+
+ /* Fetch modes from VBT */
+ if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
+ newmode = drm_mode_duplicate(connector->dev,
+ dev_priv->sdvo_lvds_vbt_mode);
+ if (newmode != NULL) {
+ /* Guarantee the mode is preferred */
+ newmode->type = (DRM_MODE_TYPE_PREFERRED |
+ DRM_MODE_TYPE_DRIVER);
+ drm_mode_probed_add(connector, newmode);
+ }
+ }
+
+end:
+ list_for_each_entry(newmode, &connector->probed_modes, head) {
+ if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
+ intel_sdvo->sdvo_lvds_fixed_mode =
+ drm_mode_duplicate(connector->dev, newmode);
+
+ intel_sdvo->is_lvds = true;
+ break;
+ }
+ }
+
+}
+
+int
+intel_sdvo_get_modes(struct drm_connector *connector)
+{
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+
+ if (IS_TV(intel_sdvo_connector))
+ intel_sdvo_get_tv_modes(connector);
+ else if (IS_LVDS(intel_sdvo_connector))
+ intel_sdvo_get_lvds_modes(connector);
+ else
+ intel_sdvo_get_ddc_modes(connector);
+
+ return !list_empty(&connector->probed_modes);
+}
+
+void
+intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
+{
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ struct drm_device *dev = connector->dev;
+
+ if (intel_sdvo_connector->left)
+ drm_property_destroy(dev, intel_sdvo_connector->left);
+ if (intel_sdvo_connector->right)
+ drm_property_destroy(dev, intel_sdvo_connector->right);
+ if (intel_sdvo_connector->top)
+ drm_property_destroy(dev, intel_sdvo_connector->top);
+ if (intel_sdvo_connector->bottom)
+ drm_property_destroy(dev, intel_sdvo_connector->bottom);
+ if (intel_sdvo_connector->hpos)
+ drm_property_destroy(dev, intel_sdvo_connector->hpos);
+ if (intel_sdvo_connector->vpos)
+ drm_property_destroy(dev, intel_sdvo_connector->vpos);
+ if (intel_sdvo_connector->saturation)
+ drm_property_destroy(dev, intel_sdvo_connector->saturation);
+ if (intel_sdvo_connector->contrast)
+ drm_property_destroy(dev, intel_sdvo_connector->contrast);
+ if (intel_sdvo_connector->hue)
+ drm_property_destroy(dev, intel_sdvo_connector->hue);
+ if (intel_sdvo_connector->sharpness)
+ drm_property_destroy(dev, intel_sdvo_connector->sharpness);
+ if (intel_sdvo_connector->flicker_filter)
+ drm_property_destroy(dev, intel_sdvo_connector->flicker_filter);
+ if (intel_sdvo_connector->flicker_filter_2d)
+ drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_2d);
+ if (intel_sdvo_connector->flicker_filter_adaptive)
+ drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_adaptive);
+ if (intel_sdvo_connector->tv_luma_filter)
+ drm_property_destroy(dev, intel_sdvo_connector->tv_luma_filter);
+ if (intel_sdvo_connector->tv_chroma_filter)
+ drm_property_destroy(dev, intel_sdvo_connector->tv_chroma_filter);
+ if (intel_sdvo_connector->dot_crawl)
+ drm_property_destroy(dev, intel_sdvo_connector->dot_crawl);
+ if (intel_sdvo_connector->brightness)
+ drm_property_destroy(dev, intel_sdvo_connector->brightness);
+}
+
+void
+intel_sdvo_destroy(struct drm_connector *connector)
+{
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+
+ if (intel_sdvo_connector->tv_format)
+ drm_property_destroy(connector->dev,
+ intel_sdvo_connector->tv_format);
+
+ intel_sdvo_destroy_enhance_property(connector);
+#if 0
+ drm_sysfs_connector_remove(connector);
+#endif
+ drm_connector_cleanup(connector);
+ free(intel_sdvo_connector, M_DRM);
+}
+
+bool
+intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
+{
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct edid *edid;
+ bool has_audio = false;
+
+ if (!intel_sdvo->is_hdmi)
+ return false;
+
+ edid = intel_sdvo_get_edid(connector);
+ if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
+ has_audio = drm_detect_monitor_audio(edid);
+ free(edid, M_DRM);
+
+ return has_audio;
+}
+
+int
+intel_sdvo_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ struct inteldrm_softc *dev_priv = connector->dev->dev_private;
+ uint16_t temp_value;
+ uint8_t cmd;
+ int ret;
+
+ ret = drm_object_property_set_value(&connector->base, property, val);
+ if (ret)
+ return ret;
+
+ if (property == dev_priv->force_audio_property) {
+ int i = val;
+ bool has_audio;
+
+ if (i == intel_sdvo_connector->force_audio)
+ return 0;
+
+ intel_sdvo_connector->force_audio = i;
+
+ if (i == HDMI_AUDIO_AUTO)
+ has_audio = intel_sdvo_detect_hdmi_audio(connector);
+ else
+ has_audio = (i == HDMI_AUDIO_ON);
+
+ if (has_audio == intel_sdvo->has_hdmi_audio)
+ return 0;
+
+ intel_sdvo->has_hdmi_audio = has_audio;
+ goto done;
+ }
+
+ if (property == dev_priv->broadcast_rgb_property) {
+ if (val == !!intel_sdvo->color_range)
+ return 0;
+
+ intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
+ goto done;
+ }
+
+#define CHECK_PROPERTY(name, NAME) \
+ if (intel_sdvo_connector->name == property) { \
+ if (intel_sdvo_connector->cur_##name == temp_value) return 0; \
+ if (intel_sdvo_connector->max_##name < temp_value) return -EINVAL; \
+ cmd = SDVO_CMD_SET_##NAME; \
+ intel_sdvo_connector->cur_##name = temp_value; \
+ goto set_value; \
+ }
+
+ if (property == intel_sdvo_connector->tv_format) {
+ if (val >= TV_FORMAT_NUM)
+ return -EINVAL;
+
+ if (intel_sdvo->tv_format_index ==
+ intel_sdvo_connector->tv_format_supported[val])
+ return 0;
+
+ intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[val];
+ goto done;
+ } else if (IS_TV_OR_LVDS(intel_sdvo_connector)) {
+ temp_value = val;
+ if (intel_sdvo_connector->left == property) {
+ drm_object_property_set_value(&connector->base,
+ intel_sdvo_connector->right, val);
+ if (intel_sdvo_connector->left_margin == temp_value)
+ return 0;
+
+ intel_sdvo_connector->left_margin = temp_value;
+ intel_sdvo_connector->right_margin = temp_value;
+ temp_value = intel_sdvo_connector->max_hscan -
+ intel_sdvo_connector->left_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_H;
+ goto set_value;
+ } else if (intel_sdvo_connector->right == property) {
+ drm_object_property_set_value(&connector->base,
+ intel_sdvo_connector->left, val);
+ if (intel_sdvo_connector->right_margin == temp_value)
+ return 0;
+
+ intel_sdvo_connector->left_margin = temp_value;
+ intel_sdvo_connector->right_margin = temp_value;
+ temp_value = intel_sdvo_connector->max_hscan -
+ intel_sdvo_connector->left_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_H;
+ goto set_value;
+ } else if (intel_sdvo_connector->top == property) {
+ drm_object_property_set_value(&connector->base,
+ intel_sdvo_connector->bottom, val);
+ if (intel_sdvo_connector->top_margin == temp_value)
+ return 0;
+
+ intel_sdvo_connector->top_margin = temp_value;
+ intel_sdvo_connector->bottom_margin = temp_value;
+ temp_value = intel_sdvo_connector->max_vscan -
+ intel_sdvo_connector->top_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_V;
+ goto set_value;
+ } else if (intel_sdvo_connector->bottom == property) {
+ drm_object_property_set_value(&connector->base,
+ intel_sdvo_connector->top, val);
+ if (intel_sdvo_connector->bottom_margin == temp_value)
+ return 0;
+
+ intel_sdvo_connector->top_margin = temp_value;
+ intel_sdvo_connector->bottom_margin = temp_value;
+ temp_value = intel_sdvo_connector->max_vscan -
+ intel_sdvo_connector->top_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_V;
+ goto set_value;
+ }
+ CHECK_PROPERTY(hpos, HPOS)
+ CHECK_PROPERTY(vpos, VPOS)
+ CHECK_PROPERTY(saturation, SATURATION)
+ CHECK_PROPERTY(contrast, CONTRAST)
+ CHECK_PROPERTY(hue, HUE)
+ CHECK_PROPERTY(brightness, BRIGHTNESS)
+ CHECK_PROPERTY(sharpness, SHARPNESS)
+ CHECK_PROPERTY(flicker_filter, FLICKER_FILTER)
+ CHECK_PROPERTY(flicker_filter_2d, FLICKER_FILTER_2D)
+ CHECK_PROPERTY(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE)
+ CHECK_PROPERTY(tv_chroma_filter, TV_CHROMA_FILTER)
+ CHECK_PROPERTY(tv_luma_filter, TV_LUMA_FILTER)
+ CHECK_PROPERTY(dot_crawl, DOT_CRAWL)
+ }
+
+ return -EINVAL; /* unknown property */
+
+set_value:
+ if (!intel_sdvo_set_value(intel_sdvo, cmd, &temp_value, 2))
+ return -EIO;
+
+
+done:
+ if (intel_sdvo->base.base.crtc) {
+ struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
+ intel_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
+ }
+
+ return 0;
+#undef CHECK_PROPERTY
+}
+
+static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
+ .mode_fixup = intel_sdvo_mode_fixup,
+ .mode_set = intel_sdvo_mode_set,
+ .disable = intel_encoder_noop,
+};
+
+static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
+ .dpms = intel_sdvo_dpms,
+ .detect = intel_sdvo_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = intel_sdvo_set_property,
+ .destroy = intel_sdvo_destroy,
+};
+
+static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
+ .get_modes = intel_sdvo_get_modes,
+ .mode_valid = intel_sdvo_mode_valid,
+ .best_encoder = intel_best_encoder,
+};
+
+void
+intel_sdvo_enc_destroy(struct drm_encoder *encoder)
+{
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
+
+ if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
+ drm_mode_destroy(encoder->dev,
+ intel_sdvo->sdvo_lvds_fixed_mode);
+
+#ifdef notyet
+ i2c_del_adapter(&intel_sdvo->ddc);
+#endif
+ intel_encoder_destroy(encoder);
+}
+
+static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
+ .destroy = intel_sdvo_enc_destroy,
+};
+
+void
+intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
+{
+ uint16_t mask = 0;
+ unsigned int num_bits;
+
+ /* Make a mask of outputs less than or equal to our own priority in the
+ * list.
+ */
+ switch (sdvo->controlled_output) {
+ case SDVO_OUTPUT_LVDS1:
+ mask |= SDVO_OUTPUT_LVDS1;
+ case SDVO_OUTPUT_LVDS0:
+ mask |= SDVO_OUTPUT_LVDS0;
+ case SDVO_OUTPUT_TMDS1:
+ mask |= SDVO_OUTPUT_TMDS1;
+ case SDVO_OUTPUT_TMDS0:
+ mask |= SDVO_OUTPUT_TMDS0;
+ case SDVO_OUTPUT_RGB1:
+ mask |= SDVO_OUTPUT_RGB1;
+ case SDVO_OUTPUT_RGB0:
+ mask |= SDVO_OUTPUT_RGB0;
+ break;
+ }
+
+ /* Count bits to find what number we are in the priority list. */
+ mask &= sdvo->caps.output_flags;
+ num_bits = bitcount16(mask);
+ /* If more than 3 outputs, default to DDC bus 3 for now. */
+ if (num_bits > 3)
+ num_bits = 3;
+
+ /* Corresponds to SDVO_CONTROL_BUS_DDCx */
+ sdvo->ddc_bus = 1 << num_bits;
+}
+
+/**
+ * Choose the appropriate DDC bus for control bus switch command for this
+ * SDVO output based on the controlled output.
+ *
+ * DDC bus number assignment is in a priority order of RGB outputs, then TMDS
+ * outputs, then LVDS outputs.
+ */
+void
+intel_sdvo_select_ddc_bus(struct inteldrm_softc *dev_priv,
+ struct intel_sdvo *sdvo, u32 reg)
+{
+ struct sdvo_device_mapping *mapping;
+
+ if (sdvo->is_sdvob)
+ mapping = &(dev_priv->sdvo_mappings[0]);
+ else
+ mapping = &(dev_priv->sdvo_mappings[1]);
+
+ if (mapping->initialized)
+ sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
+ else
+ intel_sdvo_guess_ddc_bus(sdvo);
+}
+
+void
+intel_sdvo_select_i2c_bus(struct inteldrm_softc *dev_priv,
+ struct intel_sdvo *sdvo, u32 reg)
+{
+ struct sdvo_device_mapping *mapping;
+ u8 pin;
+
+ if (sdvo->is_sdvob)
+ mapping = &dev_priv->sdvo_mappings[0];
+ else
+ mapping = &dev_priv->sdvo_mappings[1];
+
+ if (mapping->initialized && intel_gmbus_is_port_valid(mapping->i2c_pin))
+ pin = mapping->i2c_pin;
+ else
+ pin = GMBUS_PORT_DPB;
+
+ sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
+
+#ifdef notyet
+ /* With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow
+ * our code totally fails once we start using gmbus. Hence fall back to
+ * bit banging for now. */
+ intel_gmbus_force_bit(sdvo->i2c, true);
+#endif
+}
+
+/* undo any changes intel_sdvo_select_i2c_bus() did to sdvo->i2c */
+void
+intel_sdvo_unselect_i2c_bus(struct intel_sdvo *sdvo)
+{
+#ifdef notyet
+ intel_gmbus_force_bit(sdvo->i2c, false);
+#endif
+}
+
+bool
+intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
+{
+ return intel_sdvo_check_supp_encode(intel_sdvo);
+}
+
+u8
+intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct sdvo_device_mapping *my_mapping, *other_mapping;
+
+ if (sdvo->is_sdvob) {
+ my_mapping = &dev_priv->sdvo_mappings[0];
+ other_mapping = &dev_priv->sdvo_mappings[1];
+ } else {
+ my_mapping = &dev_priv->sdvo_mappings[1];
+ other_mapping = &dev_priv->sdvo_mappings[0];
+ }
+
+ /* If the BIOS described our SDVO device, take advantage of it. */
+ if (my_mapping->slave_addr)
+ return my_mapping->slave_addr;
+
+ /* If the BIOS only described a different SDVO device, use the
+ * address that it isn't using.
+ */
+ if (other_mapping->slave_addr) {
+ if (other_mapping->slave_addr == 0x70)
+ return 0x72;
+ else
+ return 0x70;
+ }
+
+ /* No SDVO device info is found for another DVO port,
+ * so use mapping assumption we had before BIOS parsing.
+ */
+ if (sdvo->is_sdvob)
+ return 0x70;
+ else
+ return 0x72;
+}
+
+void
+intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
+ struct intel_sdvo *encoder)
+{
+ drm_connector_init(encoder->base.base.dev,
+ &connector->base.base,
+ &intel_sdvo_connector_funcs,
+ connector->base.base.connector_type);
+
+ drm_connector_helper_add(&connector->base.base,
+ &intel_sdvo_connector_helper_funcs);
+
+ connector->base.base.interlace_allowed = 1;
+ connector->base.base.doublescan_allowed = 0;
+ connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->base.get_hw_state = intel_sdvo_connector_get_hw_state;
+
+ intel_connector_attach_encoder(&connector->base, &encoder->base);
+#if 0
+ drm_sysfs_connector_add(&connector->base.base);
+#endif
+}
+
+void
+intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector)
+{
+ struct drm_device *dev = connector->base.base.dev;
+
+ intel_attach_force_audio_property(&connector->base.base);
+ if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev))
+ intel_attach_broadcast_rgb_property(&connector->base.base);
+}
+
+bool
+intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
+{
+ struct drm_encoder *encoder = &intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
+
+ intel_sdvo_connector = malloc(sizeof(struct intel_sdvo_connector),
+ M_DRM, M_WAITOK | M_ZERO);
+ if (!intel_sdvo_connector)
+ return false;
+
+ if (device == 0) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
+ } else if (device == 1) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
+ }
+
+ intel_connector = &intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ if (intel_sdvo_get_hotplug_support(intel_sdvo) &
+ intel_sdvo_connector->output_flag) {
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag;
+ /* Some SDVO devices have one-shot hotplug interrupts.
+ * Ensure that they get re-enabled when an interrupt happens.
+ */
+ intel_encoder->hot_plug = intel_sdvo_enable_hotplug;
+ intel_sdvo_enable_hotplug(intel_encoder);
+ } else {
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ }
+ encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
+ connector->connector_type = DRM_MODE_CONNECTOR_DVID;
+
+ if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
+ connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+ intel_sdvo->is_hdmi = true;
+ }
+
+ intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
+ if (intel_sdvo->is_hdmi)
+ intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
+
+ return true;
+}
+
+bool
+intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
+{
+ struct drm_encoder *encoder = &intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
+
+ intel_sdvo_connector = malloc(sizeof(struct intel_sdvo_connector),
+ M_DRM, M_WAITOK | M_ZERO);
+ if (!intel_sdvo_connector)
+ return false;
+
+ intel_connector = &intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+
+ intel_sdvo->controlled_output |= type;
+ intel_sdvo_connector->output_flag = type;
+
+ intel_sdvo->is_tv = true;
+ intel_sdvo->base.needs_tv_clock = true;
+
+ intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
+
+ if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type))
+ goto err;
+
+ if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+ goto err;
+
+ return true;
+
+err:
+ intel_sdvo_destroy(connector);
+ return false;
+}
+
+bool
+intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
+{
+ struct drm_encoder *encoder = &intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
+
+ intel_sdvo_connector = malloc(sizeof(struct intel_sdvo_connector),
+ M_DRM, M_WAITOK | M_ZERO);
+ if (!intel_sdvo_connector)
+ return false;
+
+ intel_connector = &intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ encoder->encoder_type = DRM_MODE_ENCODER_DAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+
+ if (device == 0) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
+ } else if (device == 1) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
+ }
+
+ intel_sdvo_connector_init(intel_sdvo_connector,
+ intel_sdvo);
+ return true;
+}
+
+bool
+intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
+{
+ struct drm_encoder *encoder = &intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
+
+ intel_sdvo_connector = malloc(sizeof(struct intel_sdvo_connector),
+ M_DRM, M_WAITOK | M_ZERO);
+ if (!intel_sdvo_connector)
+ return false;
+
+ intel_connector = &intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+ connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
+
+ if (device == 0) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
+ } else if (device == 1) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
+ }
+
+ intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
+ if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+ goto err;
+
+ return true;
+
+err:
+ intel_sdvo_destroy(connector);
+ return false;
+}
+
+bool
+intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
+{
+ intel_sdvo->is_tv = false;
+ intel_sdvo->base.needs_tv_clock = false;
+ intel_sdvo->is_lvds = false;
+
+ /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
+
+ if (flags & SDVO_OUTPUT_TMDS0)
+ if (!intel_sdvo_dvi_init(intel_sdvo, 0))
+ return false;
+
+ if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
+ if (!intel_sdvo_dvi_init(intel_sdvo, 1))
+ return false;
+
+ /* TV has no XXX1 function block */
+ if (flags & SDVO_OUTPUT_SVID0)
+ if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_SVID0))
+ return false;
+
+ if (flags & SDVO_OUTPUT_CVBS0)
+ if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0))
+ return false;
+
+ if (flags & SDVO_OUTPUT_YPRPB0)
+ if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_YPRPB0))
+ return false;
+
+ if (flags & SDVO_OUTPUT_RGB0)
+ if (!intel_sdvo_analog_init(intel_sdvo, 0))
+ return false;
+
+ if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
+ if (!intel_sdvo_analog_init(intel_sdvo, 1))
+ return false;
+
+ if (flags & SDVO_OUTPUT_LVDS0)
+ if (!intel_sdvo_lvds_init(intel_sdvo, 0))
+ return false;
+
+ if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
+ if (!intel_sdvo_lvds_init(intel_sdvo, 1))
+ return false;
+
+ if ((flags & SDVO_OUTPUT_MASK) == 0) {
+ unsigned char bytes[2];
+
+ intel_sdvo->controlled_output = 0;
+ memcpy(bytes, &intel_sdvo->caps.output_flags, 2);
+ DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
+ SDVO_NAME(intel_sdvo),
+ bytes[0], bytes[1]);
+ return false;
+ }
+ intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+
+ return true;
+}
+
+void
+intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
+{
+ struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct drm_connector *connector, *tmp;
+
+ list_for_each_entry_safe(connector, tmp,
+ &dev->mode_config.connector_list, head) {
+ if (intel_attached_encoder(connector) == &intel_sdvo->base)
+ intel_sdvo_destroy(connector);
+ }
+}
+
+bool
+intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ int type)
+{
+ struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct intel_sdvo_tv_format format;
+ uint32_t format_map, i;
+
+ if (!intel_sdvo_set_target_output(intel_sdvo, type))
+ return false;
+
+#ifdef notyet
+ BUILD_BUG_ON(sizeof(format) != 6);
+#endif
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_SUPPORTED_TV_FORMATS,
+ &format, sizeof(format)))
+ return false;
+
+ memcpy(&format_map, &format, min(sizeof(format_map), sizeof(format)));
+
+ if (format_map == 0)
+ return false;
+
+ intel_sdvo_connector->format_supported_num = 0;
+ for (i = 0 ; i < TV_FORMAT_NUM; i++)
+ if (format_map & (1 << i))
+ intel_sdvo_connector->tv_format_supported[intel_sdvo_connector->format_supported_num++] = i;
+
+
+ intel_sdvo_connector->tv_format =
+ drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "mode", intel_sdvo_connector->format_supported_num);
+ if (!intel_sdvo_connector->tv_format)
+ return false;
+
+ for (i = 0; i < intel_sdvo_connector->format_supported_num; i++)
+ drm_property_add_enum(
+ intel_sdvo_connector->tv_format, i,
+ i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
+
+ intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0];
+ drm_object_attach_property(&intel_sdvo_connector->base.base.base,
+ intel_sdvo_connector->tv_format, 0);
+ return true;
+
+}
+
+#define ENHANCEMENT(name, NAME) do { \
+ if (enhancements.name) { \
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \
+ !intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \
+ return false; \
+ intel_sdvo_connector->max_##name = data_value[0]; \
+ intel_sdvo_connector->cur_##name = response; \
+ intel_sdvo_connector->name = \
+ drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
+ if (!intel_sdvo_connector->name) return false; \
+ drm_object_attach_property(&connector->base, \
+ intel_sdvo_connector->name, \
+ intel_sdvo_connector->cur_##name); \
+ DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
+ data_value[0], data_value[1], response); \
+ } \
+} while (0)
+
+bool
+intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ struct intel_sdvo_enhancements_reply enhancements)
+{
+ struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct drm_connector *connector = &intel_sdvo_connector->base.base;
+ uint16_t response, data_value[2];
+
+ /* when horizontal overscan is supported, Add the left/right property */
+ if (enhancements.overscan_h) {
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_MAX_OVERSCAN_H,
+ &data_value, 4))
+ return false;
+
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_OVERSCAN_H,
+ &response, 2))
+ return false;
+
+ intel_sdvo_connector->max_hscan = data_value[0];
+ intel_sdvo_connector->left_margin = data_value[0] - response;
+ intel_sdvo_connector->right_margin = intel_sdvo_connector->left_margin;
+ intel_sdvo_connector->left =
+ drm_property_create_range(dev, 0, "left_margin", 0, data_value[0]);
+ if (!intel_sdvo_connector->left)
+ return false;
+
+ drm_object_attach_property(&connector->base,
+ intel_sdvo_connector->left,
+ intel_sdvo_connector->left_margin);
+
+ intel_sdvo_connector->right =
+ drm_property_create_range(dev, 0, "right_margin", 0, data_value[0]);
+ if (!intel_sdvo_connector->right)
+ return false;
+
+ drm_object_attach_property(&connector->base,
+ intel_sdvo_connector->right,
+ intel_sdvo_connector->right_margin);
+ DRM_DEBUG_KMS("h_overscan: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+
+ if (enhancements.overscan_v) {
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_MAX_OVERSCAN_V,
+ &data_value, 4))
+ return false;
+
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_OVERSCAN_V,
+ &response, 2))
+ return false;
+
+ intel_sdvo_connector->max_vscan = data_value[0];
+ intel_sdvo_connector->top_margin = data_value[0] - response;
+ intel_sdvo_connector->bottom_margin = intel_sdvo_connector->top_margin;
+ intel_sdvo_connector->top =
+ drm_property_create_range(dev, 0,
+ "top_margin", 0, data_value[0]);
+ if (!intel_sdvo_connector->top)
+ return false;
+
+ drm_object_attach_property(&connector->base,
+ intel_sdvo_connector->top,
+ intel_sdvo_connector->top_margin);
+
+ intel_sdvo_connector->bottom =
+ drm_property_create_range(dev, 0,
+ "bottom_margin", 0, data_value[0]);
+ if (!intel_sdvo_connector->bottom)
+ return false;
+
+ drm_object_attach_property(&connector->base,
+ intel_sdvo_connector->bottom,
+ intel_sdvo_connector->bottom_margin);
+ DRM_DEBUG_KMS("v_overscan: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+
+ ENHANCEMENT(hpos, HPOS);
+ ENHANCEMENT(vpos, VPOS);
+ ENHANCEMENT(saturation, SATURATION);
+ ENHANCEMENT(contrast, CONTRAST);
+ ENHANCEMENT(hue, HUE);
+ ENHANCEMENT(sharpness, SHARPNESS);
+ ENHANCEMENT(brightness, BRIGHTNESS);
+ ENHANCEMENT(flicker_filter, FLICKER_FILTER);
+ ENHANCEMENT(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE);
+ ENHANCEMENT(flicker_filter_2d, FLICKER_FILTER_2D);
+ ENHANCEMENT(tv_chroma_filter, TV_CHROMA_FILTER);
+ ENHANCEMENT(tv_luma_filter, TV_LUMA_FILTER);
+
+ if (enhancements.dot_crawl) {
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2))
+ return false;
+
+ intel_sdvo_connector->max_dot_crawl = 1;
+ intel_sdvo_connector->cur_dot_crawl = response & 0x1;
+ intel_sdvo_connector->dot_crawl =
+ drm_property_create_range(dev, 0, "dot_crawl", 0, 1);
+ if (!intel_sdvo_connector->dot_crawl)
+ return false;
+
+ drm_object_attach_property(&connector->base,
+ intel_sdvo_connector->dot_crawl,
+ intel_sdvo_connector->cur_dot_crawl);
+ DRM_DEBUG_KMS("dot crawl: current %d\n", response);
+ }
+
+ return true;
+}
+
+bool
+intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ struct intel_sdvo_enhancements_reply enhancements)
+{
+ struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct drm_connector *connector = &intel_sdvo_connector->base.base;
+ uint16_t response, data_value[2];
+
+ ENHANCEMENT(brightness, BRIGHTNESS);
+
+ return true;
+}
+#undef ENHANCEMENT
+
+bool
+intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector)
+{
+ union {
+ struct intel_sdvo_enhancements_reply reply;
+ uint16_t response;
+ } enhancements;
+
+#ifdef notyet
+ BUILD_BUG_ON(sizeof(enhancements) != 2);
+#endif
+
+ enhancements.response = 0;
+ intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
+ &enhancements, sizeof(enhancements));
+ if (enhancements.response == 0) {
+ DRM_DEBUG_KMS("No enhancement is supported\n");
+ return true;
+ }
+
+ if (IS_TV(intel_sdvo_connector))
+ return intel_sdvo_create_enhance_property_tv(intel_sdvo, intel_sdvo_connector, enhancements.reply);
+ else if (IS_LVDS(intel_sdvo_connector))
+ return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply);
+ else
+ return true;
+}
+
+int
+intel_sdvo_ddc_proxy_acquire_bus(void *cookie, int flags)
+{
+ struct intel_sdvo *sdvo = cookie;
+ struct i2c_controller *i2c = sdvo->i2c;
+
+ return ((i2c->ic_acquire_bus)(i2c->ic_cookie, flags));
+}
+
+void
+intel_sdvo_ddc_proxy_release_bus(void *cookie, int flags)
+{
+ struct intel_sdvo *sdvo = cookie;
+ struct i2c_controller *i2c = sdvo->i2c;
+
+ (i2c->ic_release_bus)(i2c->ic_cookie, flags);
+}
+
+int
+intel_sdvo_ddc_proxy_exec(void *cookie, i2c_op_t op, i2c_addr_t addr,
+ const void *cmdbuf, size_t cmdlen, void *buffer, size_t len, int flags)
+{
+ struct intel_sdvo *sdvo = cookie;
+ struct i2c_controller *i2c = sdvo->i2c;
+
+ if (!intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus))
+ return -EIO;
+
+ return ((i2c->ic_exec)(i2c->ic_cookie,
+ op, addr, cmdbuf, cmdlen, buffer, len, flags));
+}
+
+bool
+intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
+ struct drm_device *dev)
+{
+#if 0
+ sdvo->ddc.owner = THIS_MODULE;
+ sdvo->ddc.class = I2C_CLASS_DDC;
+ snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
+ sdvo->ddc.dev.parent = &dev->pdev->dev;
+ sdvo->ddc.algo_data = sdvo;
+ sdvo->ddc.algo = &intel_sdvo_ddc_proxy;
+
+ return i2c_add_adapter(&sdvo->ddc) == 0;
+#else
+ sdvo->ddc.ic_cookie = sdvo;
+ sdvo->ddc.ic_acquire_bus = intel_sdvo_ddc_proxy_acquire_bus;
+ sdvo->ddc.ic_release_bus = intel_sdvo_ddc_proxy_release_bus;
+ sdvo->ddc.ic_exec = intel_sdvo_ddc_proxy_exec;
+
+ return true;
+#endif
+}
+
+bool
+intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ struct intel_sdvo *intel_sdvo;
+ u32 hotplug_mask;
+ int i;
+
+ intel_sdvo = malloc(sizeof(struct intel_sdvo), M_DRM,
+ M_WAITOK | M_ZERO);
+ if (!intel_sdvo)
+ return false;
+
+ intel_sdvo->sdvo_reg = sdvo_reg;
+ intel_sdvo->is_sdvob = is_sdvob;
+ intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
+ intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
+ if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev))
+ goto err_i2c_bus;
+
+ /* encoder type will be decided later */
+ intel_encoder = &intel_sdvo->base;
+ intel_encoder->type = INTEL_OUTPUT_SDVO;
+ drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0);
+
+ /* Read the regs to test if we can talk to the device */
+ for (i = 0; i < 0x40; i++) {
+ u8 byte;
+
+ if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) {
+ DRM_DEBUG_KMS("No SDVO device found on %s\n",
+ SDVO_NAME(intel_sdvo));
+ goto err;
+ }
+ }
+
+ hotplug_mask = 0;
+ if (IS_G4X(dev)) {
+ hotplug_mask = intel_sdvo->is_sdvob ?
+ SDVOB_HOTPLUG_INT_STATUS_G4X : SDVOC_HOTPLUG_INT_STATUS_G4X;
+ } else if (IS_GEN4(dev)) {
+ hotplug_mask = intel_sdvo->is_sdvob ?
+ SDVOB_HOTPLUG_INT_STATUS_I965 : SDVOC_HOTPLUG_INT_STATUS_I965;
+ } else {
+ hotplug_mask = intel_sdvo->is_sdvob ?
+ SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915;
+ }
+
+ drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs);
+
+ intel_encoder->disable = intel_disable_sdvo;
+ intel_encoder->enable = intel_enable_sdvo;
+ intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
+
+ /* In default case sdvo lvds is false */
+ if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
+ goto err;
+
+ if (intel_sdvo_output_setup(intel_sdvo,
+ intel_sdvo->caps.output_flags) != true) {
+ DRM_DEBUG_KMS("SDVO output failed to setup on %s\n",
+ SDVO_NAME(intel_sdvo));
+ /* Output_setup can leave behind connectors! */
+ goto err_output;
+ }
+
+ /*
+ * Cloning SDVO with anything is often impossible, since the SDVO
+ * encoder can request a special input timing mode. And even if that's
+ * not the case we have evidence that cloning a plain unscaled mode with
+ * VGA doesn't really work. Furthermore the cloning flags are way too
+ * simplistic anyway to express such constraints, so just give up on
+ * cloning for SDVO encoders.
+ */
+ intel_sdvo->base.cloneable = false;
+
+ /* Only enable the hotplug irq if we need it, to work around noisy
+ * hotplug lines.
+ */
+ if (intel_sdvo->hotplug_active)
+ dev_priv->hotplug_supported_mask |= hotplug_mask;
+
+ intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
+
+ /* Set the input timing to the screen. Assume always input 0. */
+ if (!intel_sdvo_set_target_input(intel_sdvo))
+ goto err_output;
+
+ if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
+ &intel_sdvo->pixel_clock_min,
+ &intel_sdvo->pixel_clock_max))
+ goto err_output;
+
+ DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
+ "clock range %dMHz - %dMHz, "
+ "input 1: %c, input 2: %c, "
+ "output 1: %c, output 2: %c\n",
+ SDVO_NAME(intel_sdvo),
+ intel_sdvo->caps.vendor_id, intel_sdvo->caps.device_id,
+ intel_sdvo->caps.device_rev_id,
+ intel_sdvo->pixel_clock_min / 1000,
+ intel_sdvo->pixel_clock_max / 1000,
+ (intel_sdvo->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
+ (intel_sdvo->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
+ /* check currently supported outputs */
+ intel_sdvo->caps.output_flags &
+ (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
+ intel_sdvo->caps.output_flags &
+ (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
+ return true;
+
+err_output:
+ intel_sdvo_output_cleanup(intel_sdvo);
+
+err:
+ drm_encoder_cleanup(&intel_encoder->base);
+#ifdef notyet
+ i2c_del_adapter(&intel_sdvo->ddc);
+#endif
+err_i2c_bus:
+ intel_sdvo_unselect_i2c_bus(intel_sdvo);
+ free(intel_sdvo, M_DRM);
+
+ return false;
+}
diff --git a/sys/dev/pci/drm/i915/intel_sdvo_regs.h b/sys/dev/pci/drm/i915/intel_sdvo_regs.h
new file mode 100644
index 00000000000..13bd3830f6e
--- /dev/null
+++ b/sys/dev/pci/drm/i915/intel_sdvo_regs.h
@@ -0,0 +1,731 @@
+/* $OpenBSD: intel_sdvo_regs.h,v 1.1 2013/03/18 12:36:52 jsg Exp $ */
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+/**
+ * @file SDVO command definitions and structures.
+ */
+
+#define SDVO_OUTPUT_FIRST (0)
+#define SDVO_OUTPUT_TMDS0 (1 << 0)
+#define SDVO_OUTPUT_RGB0 (1 << 1)
+#define SDVO_OUTPUT_CVBS0 (1 << 2)
+#define SDVO_OUTPUT_SVID0 (1 << 3)
+#define SDVO_OUTPUT_YPRPB0 (1 << 4)
+#define SDVO_OUTPUT_SCART0 (1 << 5)
+#define SDVO_OUTPUT_LVDS0 (1 << 6)
+#define SDVO_OUTPUT_TMDS1 (1 << 8)
+#define SDVO_OUTPUT_RGB1 (1 << 9)
+#define SDVO_OUTPUT_CVBS1 (1 << 10)
+#define SDVO_OUTPUT_SVID1 (1 << 11)
+#define SDVO_OUTPUT_YPRPB1 (1 << 12)
+#define SDVO_OUTPUT_SCART1 (1 << 13)
+#define SDVO_OUTPUT_LVDS1 (1 << 14)
+#define SDVO_OUTPUT_LAST (14)
+
+struct intel_sdvo_caps {
+ u8 vendor_id;
+ u8 device_id;
+ u8 device_rev_id;
+ u8 sdvo_version_major;
+ u8 sdvo_version_minor;
+ unsigned int sdvo_inputs_mask:2;
+ unsigned int smooth_scaling:1;
+ unsigned int sharp_scaling:1;
+ unsigned int up_scaling:1;
+ unsigned int down_scaling:1;
+ unsigned int stall_support:1;
+ unsigned int pad:1;
+ u16 output_flags;
+} __attribute__((packed));
+
+/* Note: SDVO detailed timing flags match EDID misc flags. */
+#define DTD_FLAG_HSYNC_POSITIVE (1 << 1)
+#define DTD_FLAG_VSYNC_POSITIVE (1 << 2)
+#define DTD_FLAG_INTERLACE (1 << 7)
+
+/** This matches the EDID DTD structure, more or less */
+struct intel_sdvo_dtd {
+ struct {
+ u16 clock; /**< pixel clock, in 10kHz units */
+ u8 h_active; /**< lower 8 bits (pixels) */
+ u8 h_blank; /**< lower 8 bits (pixels) */
+ u8 h_high; /**< upper 4 bits each h_active, h_blank */
+ u8 v_active; /**< lower 8 bits (lines) */
+ u8 v_blank; /**< lower 8 bits (lines) */
+ u8 v_high; /**< upper 4 bits each v_active, v_blank */
+ } part1;
+
+ struct {
+ u8 h_sync_off; /**< lower 8 bits, from hblank start */
+ u8 h_sync_width; /**< lower 8 bits (pixels) */
+ /** lower 4 bits each vsync offset, vsync width */
+ u8 v_sync_off_width;
+ /**
+ * 2 high bits of hsync offset, 2 high bits of hsync width,
+ * bits 4-5 of vsync offset, and 2 high bits of vsync width.
+ */
+ u8 sync_off_width_high;
+ u8 dtd_flags;
+ u8 sdvo_flags;
+ /** bits 6-7 of vsync offset at bits 6-7 */
+ u8 v_sync_off_high;
+ u8 reserved;
+ } part2;
+} __attribute__((packed));
+
+struct intel_sdvo_pixel_clock_range {
+ u16 min; /**< pixel clock, in 10kHz units */
+ u16 max; /**< pixel clock, in 10kHz units */
+} __attribute__((packed));
+
+struct intel_sdvo_preferred_input_timing_args {
+ u16 clock;
+ u16 width;
+ u16 height;
+ u8 interlace:1;
+ u8 scaled:1;
+ u8 pad:6;
+} __attribute__((packed));
+
+/* I2C registers for SDVO */
+#define SDVO_I2C_ARG_0 0x07
+#define SDVO_I2C_ARG_1 0x06
+#define SDVO_I2C_ARG_2 0x05
+#define SDVO_I2C_ARG_3 0x04
+#define SDVO_I2C_ARG_4 0x03
+#define SDVO_I2C_ARG_5 0x02
+#define SDVO_I2C_ARG_6 0x01
+#define SDVO_I2C_ARG_7 0x00
+#define SDVO_I2C_OPCODE 0x08
+#define SDVO_I2C_CMD_STATUS 0x09
+#define SDVO_I2C_RETURN_0 0x0a
+#define SDVO_I2C_RETURN_1 0x0b
+#define SDVO_I2C_RETURN_2 0x0c
+#define SDVO_I2C_RETURN_3 0x0d
+#define SDVO_I2C_RETURN_4 0x0e
+#define SDVO_I2C_RETURN_5 0x0f
+#define SDVO_I2C_RETURN_6 0x10
+#define SDVO_I2C_RETURN_7 0x11
+#define SDVO_I2C_VENDOR_BEGIN 0x20
+
+/* Status results */
+#define SDVO_CMD_STATUS_POWER_ON 0x0
+#define SDVO_CMD_STATUS_SUCCESS 0x1
+#define SDVO_CMD_STATUS_NOTSUPP 0x2
+#define SDVO_CMD_STATUS_INVALID_ARG 0x3
+#define SDVO_CMD_STATUS_PENDING 0x4
+#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED 0x5
+#define SDVO_CMD_STATUS_SCALING_NOT_SUPP 0x6
+
+/* SDVO commands, argument/result registers */
+
+#define SDVO_CMD_RESET 0x01
+
+/** Returns a struct intel_sdvo_caps */
+#define SDVO_CMD_GET_DEVICE_CAPS 0x02
+
+#define SDVO_CMD_GET_FIRMWARE_REV 0x86
+# define SDVO_DEVICE_FIRMWARE_MINOR SDVO_I2C_RETURN_0
+# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1
+# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2
+
+/**
+ * Reports which inputs are trained (managed to sync).
+ *
+ * Devices must have trained within 2 vsyncs of a mode change.
+ */
+#define SDVO_CMD_GET_TRAINED_INPUTS 0x03
+struct intel_sdvo_get_trained_inputs_response {
+ unsigned int input0_trained:1;
+ unsigned int input1_trained:1;
+ unsigned int pad:6;
+} __attribute__((packed));
+
+/** Returns a struct intel_sdvo_output_flags of active outputs. */
+#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
+
+/**
+ * Sets the current set of active outputs.
+ *
+ * Takes a struct intel_sdvo_output_flags. Must be preceded by a SET_IN_OUT_MAP
+ * on multi-output devices.
+ */
+#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05
+
+/**
+ * Returns the current mapping of SDVO inputs to outputs on the device.
+ *
+ * Returns two struct intel_sdvo_output_flags structures.
+ */
+#define SDVO_CMD_GET_IN_OUT_MAP 0x06
+struct intel_sdvo_in_out_map {
+ u16 in0, in1;
+};
+
+/**
+ * Sets the current mapping of SDVO inputs to outputs on the device.
+ *
+ * Takes two struct i380_sdvo_output_flags structures.
+ */
+#define SDVO_CMD_SET_IN_OUT_MAP 0x07
+
+/**
+ * Returns a struct intel_sdvo_output_flags of attached displays.
+ */
+#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b
+
+/**
+ * Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging.
+ */
+#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c
+
+/**
+ * Takes a struct intel_sdvo_output_flags.
+ */
+#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d
+
+/**
+ * Returns a struct intel_sdvo_output_flags of displays with hot plug
+ * interrupts enabled.
+ */
+#define SDVO_CMD_GET_ACTIVE_HOT_PLUG 0x0e
+
+#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f
+struct intel_sdvo_get_interrupt_event_source_response {
+ u16 interrupt_status;
+ unsigned int ambient_light_interrupt:1;
+ unsigned int hdmi_audio_encrypt_change:1;
+ unsigned int pad:6;
+} __attribute__((packed));
+
+/**
+ * Selects which input is affected by future input commands.
+ *
+ * Commands affected include SET_INPUT_TIMINGS_PART[12],
+ * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
+ * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
+ */
+#define SDVO_CMD_SET_TARGET_INPUT 0x10
+struct intel_sdvo_set_target_input_args {
+ unsigned int target_1:1;
+ unsigned int pad:7;
+} __attribute__((packed));
+
+/**
+ * Takes a struct intel_sdvo_output_flags of which outputs are targeted by
+ * future output commands.
+ *
+ * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
+ * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
+ */
+#define SDVO_CMD_SET_TARGET_OUTPUT 0x11
+
+#define SDVO_CMD_GET_INPUT_TIMINGS_PART1 0x12
+#define SDVO_CMD_GET_INPUT_TIMINGS_PART2 0x13
+#define SDVO_CMD_SET_INPUT_TIMINGS_PART1 0x14
+#define SDVO_CMD_SET_INPUT_TIMINGS_PART2 0x15
+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1 0x16
+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2 0x17
+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1 0x18
+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2 0x19
+/* Part 1 */
+# define SDVO_DTD_CLOCK_LOW SDVO_I2C_ARG_0
+# define SDVO_DTD_CLOCK_HIGH SDVO_I2C_ARG_1
+# define SDVO_DTD_H_ACTIVE SDVO_I2C_ARG_2
+# define SDVO_DTD_H_BLANK SDVO_I2C_ARG_3
+# define SDVO_DTD_H_HIGH SDVO_I2C_ARG_4
+# define SDVO_DTD_V_ACTIVE SDVO_I2C_ARG_5
+# define SDVO_DTD_V_BLANK SDVO_I2C_ARG_6
+# define SDVO_DTD_V_HIGH SDVO_I2C_ARG_7
+/* Part 2 */
+# define SDVO_DTD_HSYNC_OFF SDVO_I2C_ARG_0
+# define SDVO_DTD_HSYNC_WIDTH SDVO_I2C_ARG_1
+# define SDVO_DTD_VSYNC_OFF_WIDTH SDVO_I2C_ARG_2
+# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH SDVO_I2C_ARG_3
+# define SDVO_DTD_DTD_FLAGS SDVO_I2C_ARG_4
+# define SDVO_DTD_DTD_FLAG_INTERLACED (1 << 7)
+# define SDVO_DTD_DTD_FLAG_STEREO_MASK (3 << 5)
+# define SDVO_DTD_DTD_FLAG_INPUT_MASK (3 << 3)
+# define SDVO_DTD_DTD_FLAG_SYNC_MASK (3 << 1)
+# define SDVO_DTD_SDVO_FLAS SDVO_I2C_ARG_5
+# define SDVO_DTD_SDVO_FLAG_STALL (1 << 7)
+# define SDVO_DTD_SDVO_FLAG_CENTERED (0 << 6)
+# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT (1 << 6)
+# define SDVO_DTD_SDVO_FLAG_SCALING_MASK (3 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_NONE (0 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP (1 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4)
+# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6
+
+/**
+ * Generates a DTD based on the given width, height, and flags.
+ *
+ * This will be supported by any device supporting scaling or interlaced
+ * modes.
+ */
+#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING 0x1a
+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW SDVO_I2C_ARG_0
+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH SDVO_I2C_ARG_1
+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW SDVO_I2C_ARG_2
+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH SDVO_I2C_ARG_3
+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW SDVO_I2C_ARG_4
+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH SDVO_I2C_ARG_5
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS SDVO_I2C_ARG_6
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED (1 << 0)
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED (1 << 1)
+
+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b
+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c
+
+/** Returns a struct intel_sdvo_pixel_clock_range */
+#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d
+/** Returns a struct intel_sdvo_pixel_clock_range */
+#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e
+
+/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
+#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f
+
+/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20
+/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21
+# define SDVO_CLOCK_RATE_MULT_1X (1 << 0)
+# define SDVO_CLOCK_RATE_MULT_2X (1 << 1)
+# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
+
+#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
+/** 6 bytes of bit flags for TV formats shared by all TV format functions */
+struct intel_sdvo_tv_format {
+ unsigned int ntsc_m:1;
+ unsigned int ntsc_j:1;
+ unsigned int ntsc_443:1;
+ unsigned int pal_b:1;
+ unsigned int pal_d:1;
+ unsigned int pal_g:1;
+ unsigned int pal_h:1;
+ unsigned int pal_i:1;
+
+ unsigned int pal_m:1;
+ unsigned int pal_n:1;
+ unsigned int pal_nc:1;
+ unsigned int pal_60:1;
+ unsigned int secam_b:1;
+ unsigned int secam_d:1;
+ unsigned int secam_g:1;
+ unsigned int secam_k:1;
+
+ unsigned int secam_k1:1;
+ unsigned int secam_l:1;
+ unsigned int secam_60:1;
+ unsigned int hdtv_std_smpte_240m_1080i_59:1;
+ unsigned int hdtv_std_smpte_240m_1080i_60:1;
+ unsigned int hdtv_std_smpte_260m_1080i_59:1;
+ unsigned int hdtv_std_smpte_260m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080i_50:1;
+
+ unsigned int hdtv_std_smpte_274m_1080i_59:1;
+ unsigned int hdtv_std_smpte_274m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080p_23:1;
+ unsigned int hdtv_std_smpte_274m_1080p_24:1;
+ unsigned int hdtv_std_smpte_274m_1080p_25:1;
+ unsigned int hdtv_std_smpte_274m_1080p_29:1;
+ unsigned int hdtv_std_smpte_274m_1080p_30:1;
+ unsigned int hdtv_std_smpte_274m_1080p_50:1;
+
+ unsigned int hdtv_std_smpte_274m_1080p_59:1;
+ unsigned int hdtv_std_smpte_274m_1080p_60:1;
+ unsigned int hdtv_std_smpte_295m_1080i_50:1;
+ unsigned int hdtv_std_smpte_295m_1080p_50:1;
+ unsigned int hdtv_std_smpte_296m_720p_59:1;
+ unsigned int hdtv_std_smpte_296m_720p_60:1;
+ unsigned int hdtv_std_smpte_296m_720p_50:1;
+ unsigned int hdtv_std_smpte_293m_480p_59:1;
+
+ unsigned int hdtv_std_smpte_170m_480i_59:1;
+ unsigned int hdtv_std_iturbt601_576i_50:1;
+ unsigned int hdtv_std_iturbt601_576p_50:1;
+ unsigned int hdtv_std_eia_7702a_480i_60:1;
+ unsigned int hdtv_std_eia_7702a_480p_60:1;
+ unsigned int pad:3;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_TV_FORMAT 0x28
+
+#define SDVO_CMD_SET_TV_FORMAT 0x29
+
+/** Returns the resolutiosn that can be used with the given TV format */
+#define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT 0x83
+struct intel_sdvo_sdtv_resolution_request {
+ unsigned int ntsc_m:1;
+ unsigned int ntsc_j:1;
+ unsigned int ntsc_443:1;
+ unsigned int pal_b:1;
+ unsigned int pal_d:1;
+ unsigned int pal_g:1;
+ unsigned int pal_h:1;
+ unsigned int pal_i:1;
+
+ unsigned int pal_m:1;
+ unsigned int pal_n:1;
+ unsigned int pal_nc:1;
+ unsigned int pal_60:1;
+ unsigned int secam_b:1;
+ unsigned int secam_d:1;
+ unsigned int secam_g:1;
+ unsigned int secam_k:1;
+
+ unsigned int secam_k1:1;
+ unsigned int secam_l:1;
+ unsigned int secam_60:1;
+ unsigned int pad:5;
+} __attribute__((packed));
+
+struct intel_sdvo_sdtv_resolution_reply {
+ unsigned int res_320x200:1;
+ unsigned int res_320x240:1;
+ unsigned int res_400x300:1;
+ unsigned int res_640x350:1;
+ unsigned int res_640x400:1;
+ unsigned int res_640x480:1;
+ unsigned int res_704x480:1;
+ unsigned int res_704x576:1;
+
+ unsigned int res_720x350:1;
+ unsigned int res_720x400:1;
+ unsigned int res_720x480:1;
+ unsigned int res_720x540:1;
+ unsigned int res_720x576:1;
+ unsigned int res_768x576:1;
+ unsigned int res_800x600:1;
+ unsigned int res_832x624:1;
+
+ unsigned int res_920x766:1;
+ unsigned int res_1024x768:1;
+ unsigned int res_1280x1024:1;
+ unsigned int pad:5;
+} __attribute__((packed));
+
+/* Get supported resolution with squire pixel aspect ratio that can be
+ scaled for the requested HDTV format */
+#define SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT 0x85
+
+struct intel_sdvo_hdtv_resolution_request {
+ unsigned int hdtv_std_smpte_240m_1080i_59:1;
+ unsigned int hdtv_std_smpte_240m_1080i_60:1;
+ unsigned int hdtv_std_smpte_260m_1080i_59:1;
+ unsigned int hdtv_std_smpte_260m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080i_50:1;
+ unsigned int hdtv_std_smpte_274m_1080i_59:1;
+ unsigned int hdtv_std_smpte_274m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080p_23:1;
+
+ unsigned int hdtv_std_smpte_274m_1080p_24:1;
+ unsigned int hdtv_std_smpte_274m_1080p_25:1;
+ unsigned int hdtv_std_smpte_274m_1080p_29:1;
+ unsigned int hdtv_std_smpte_274m_1080p_30:1;
+ unsigned int hdtv_std_smpte_274m_1080p_50:1;
+ unsigned int hdtv_std_smpte_274m_1080p_59:1;
+ unsigned int hdtv_std_smpte_274m_1080p_60:1;
+ unsigned int hdtv_std_smpte_295m_1080i_50:1;
+
+ unsigned int hdtv_std_smpte_295m_1080p_50:1;
+ unsigned int hdtv_std_smpte_296m_720p_59:1;
+ unsigned int hdtv_std_smpte_296m_720p_60:1;
+ unsigned int hdtv_std_smpte_296m_720p_50:1;
+ unsigned int hdtv_std_smpte_293m_480p_59:1;
+ unsigned int hdtv_std_smpte_170m_480i_59:1;
+ unsigned int hdtv_std_iturbt601_576i_50:1;
+ unsigned int hdtv_std_iturbt601_576p_50:1;
+
+ unsigned int hdtv_std_eia_7702a_480i_60:1;
+ unsigned int hdtv_std_eia_7702a_480p_60:1;
+ unsigned int pad:6;
+} __attribute__((packed));
+
+struct intel_sdvo_hdtv_resolution_reply {
+ unsigned int res_640x480:1;
+ unsigned int res_800x600:1;
+ unsigned int res_1024x768:1;
+ unsigned int res_1280x960:1;
+ unsigned int res_1400x1050:1;
+ unsigned int res_1600x1200:1;
+ unsigned int res_1920x1440:1;
+ unsigned int res_2048x1536:1;
+
+ unsigned int res_2560x1920:1;
+ unsigned int res_3200x2400:1;
+ unsigned int res_3840x2880:1;
+ unsigned int pad1:5;
+
+ unsigned int res_848x480:1;
+ unsigned int res_1064x600:1;
+ unsigned int res_1280x720:1;
+ unsigned int res_1360x768:1;
+ unsigned int res_1704x960:1;
+ unsigned int res_1864x1050:1;
+ unsigned int res_1920x1080:1;
+ unsigned int res_2128x1200:1;
+
+ unsigned int res_2560x1400:1;
+ unsigned int res_2728x1536:1;
+ unsigned int res_3408x1920:1;
+ unsigned int res_4264x2400:1;
+ unsigned int res_5120x2880:1;
+ unsigned int pad2:3;
+
+ unsigned int res_768x480:1;
+ unsigned int res_960x600:1;
+ unsigned int res_1152x720:1;
+ unsigned int res_1124x768:1;
+ unsigned int res_1536x960:1;
+ unsigned int res_1680x1050:1;
+ unsigned int res_1728x1080:1;
+ unsigned int res_1920x1200:1;
+
+ unsigned int res_2304x1440:1;
+ unsigned int res_2456x1536:1;
+ unsigned int res_3072x1920:1;
+ unsigned int res_3840x2400:1;
+ unsigned int res_4608x2880:1;
+ unsigned int pad3:3;
+
+ unsigned int res_1280x1024:1;
+ unsigned int pad4:7;
+
+ unsigned int res_1280x768:1;
+ unsigned int pad5:7;
+} __attribute__((packed));
+
+/* Get supported power state returns info for encoder and monitor, rely on
+ last SetTargetInput and SetTargetOutput calls */
+#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a
+/* Get power state returns info for encoder and monitor, rely on last
+ SetTargetInput and SetTargetOutput calls */
+#define SDVO_CMD_GET_POWER_STATE 0x2b
+#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b
+#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c
+# define SDVO_ENCODER_STATE_ON (1 << 0)
+# define SDVO_ENCODER_STATE_STANDBY (1 << 1)
+# define SDVO_ENCODER_STATE_SUSPEND (1 << 2)
+# define SDVO_ENCODER_STATE_OFF (1 << 3)
+# define SDVO_MONITOR_STATE_ON (1 << 4)
+# define SDVO_MONITOR_STATE_STANDBY (1 << 5)
+# define SDVO_MONITOR_STATE_SUSPEND (1 << 6)
+# define SDVO_MONITOR_STATE_OFF (1 << 7)
+
+#define SDVO_CMD_GET_MAX_PANEL_POWER_SEQUENCING 0x2d
+#define SDVO_CMD_GET_PANEL_POWER_SEQUENCING 0x2e
+#define SDVO_CMD_SET_PANEL_POWER_SEQUENCING 0x2f
+/**
+ * The panel power sequencing parameters are in units of milliseconds.
+ * The high fields are bits 8:9 of the 10-bit values.
+ */
+struct sdvo_panel_power_sequencing {
+ u8 t0;
+ u8 t1;
+ u8 t2;
+ u8 t3;
+ u8 t4;
+
+ unsigned int t0_high:2;
+ unsigned int t1_high:2;
+ unsigned int t2_high:2;
+ unsigned int t3_high:2;
+
+ unsigned int t4_high:2;
+ unsigned int pad:6;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL 0x30
+struct sdvo_max_backlight_reply {
+ u8 max_value;
+ u8 default_value;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_BACKLIGHT_LEVEL 0x31
+#define SDVO_CMD_SET_BACKLIGHT_LEVEL 0x32
+
+#define SDVO_CMD_GET_AMBIENT_LIGHT 0x33
+struct sdvo_get_ambient_light_reply {
+ u16 trip_low;
+ u16 trip_high;
+ u16 value;
+} __attribute__((packed));
+#define SDVO_CMD_SET_AMBIENT_LIGHT 0x34
+struct sdvo_set_ambient_light_reply {
+ u16 trip_low;
+ u16 trip_high;
+ unsigned int enable:1;
+ unsigned int pad:7;
+} __attribute__((packed));
+
+/* Set display power state */
+#define SDVO_CMD_SET_DISPLAY_POWER_STATE 0x7d
+# define SDVO_DISPLAY_STATE_ON (1 << 0)
+# define SDVO_DISPLAY_STATE_STANDBY (1 << 1)
+# define SDVO_DISPLAY_STATE_SUSPEND (1 << 2)
+# define SDVO_DISPLAY_STATE_OFF (1 << 3)
+
+#define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS 0x84
+struct intel_sdvo_enhancements_reply {
+ unsigned int flicker_filter:1;
+ unsigned int flicker_filter_adaptive:1;
+ unsigned int flicker_filter_2d:1;
+ unsigned int saturation:1;
+ unsigned int hue:1;
+ unsigned int brightness:1;
+ unsigned int contrast:1;
+ unsigned int overscan_h:1;
+
+ unsigned int overscan_v:1;
+ unsigned int hpos:1;
+ unsigned int vpos:1;
+ unsigned int sharpness:1;
+ unsigned int dot_crawl:1;
+ unsigned int dither:1;
+ unsigned int tv_chroma_filter:1;
+ unsigned int tv_luma_filter:1;
+} __attribute__((packed));
+
+/* Picture enhancement limits below are dependent on the current TV format,
+ * and thus need to be queried and set after it.
+ */
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER 0x4d
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE 0x7b
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_2D 0x52
+#define SDVO_CMD_GET_MAX_SATURATION 0x55
+#define SDVO_CMD_GET_MAX_HUE 0x58
+#define SDVO_CMD_GET_MAX_BRIGHTNESS 0x5b
+#define SDVO_CMD_GET_MAX_CONTRAST 0x5e
+#define SDVO_CMD_GET_MAX_OVERSCAN_H 0x61
+#define SDVO_CMD_GET_MAX_OVERSCAN_V 0x64
+#define SDVO_CMD_GET_MAX_HPOS 0x67
+#define SDVO_CMD_GET_MAX_VPOS 0x6a
+#define SDVO_CMD_GET_MAX_SHARPNESS 0x6d
+#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER 0x74
+#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER 0x77
+struct intel_sdvo_enhancement_limits_reply {
+ u16 max_value;
+ u16 default_value;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_LVDS_PANEL_INFORMATION 0x7f
+#define SDVO_CMD_SET_LVDS_PANEL_INFORMATION 0x80
+# define SDVO_LVDS_COLOR_DEPTH_18 (0 << 0)
+# define SDVO_LVDS_COLOR_DEPTH_24 (1 << 0)
+# define SDVO_LVDS_CONNECTOR_SPWG (0 << 2)
+# define SDVO_LVDS_CONNECTOR_OPENLDI (1 << 2)
+# define SDVO_LVDS_SINGLE_CHANNEL (0 << 4)
+# define SDVO_LVDS_DUAL_CHANNEL (1 << 4)
+
+#define SDVO_CMD_GET_FLICKER_FILTER 0x4e
+#define SDVO_CMD_SET_FLICKER_FILTER 0x4f
+#define SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE 0x50
+#define SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE 0x51
+#define SDVO_CMD_GET_FLICKER_FILTER_2D 0x53
+#define SDVO_CMD_SET_FLICKER_FILTER_2D 0x54
+#define SDVO_CMD_GET_SATURATION 0x56
+#define SDVO_CMD_SET_SATURATION 0x57
+#define SDVO_CMD_GET_HUE 0x59
+#define SDVO_CMD_SET_HUE 0x5a
+#define SDVO_CMD_GET_BRIGHTNESS 0x5c
+#define SDVO_CMD_SET_BRIGHTNESS 0x5d
+#define SDVO_CMD_GET_CONTRAST 0x5f
+#define SDVO_CMD_SET_CONTRAST 0x60
+#define SDVO_CMD_GET_OVERSCAN_H 0x62
+#define SDVO_CMD_SET_OVERSCAN_H 0x63
+#define SDVO_CMD_GET_OVERSCAN_V 0x65
+#define SDVO_CMD_SET_OVERSCAN_V 0x66
+#define SDVO_CMD_GET_HPOS 0x68
+#define SDVO_CMD_SET_HPOS 0x69
+#define SDVO_CMD_GET_VPOS 0x6b
+#define SDVO_CMD_SET_VPOS 0x6c
+#define SDVO_CMD_GET_SHARPNESS 0x6e
+#define SDVO_CMD_SET_SHARPNESS 0x6f
+#define SDVO_CMD_GET_TV_CHROMA_FILTER 0x75
+#define SDVO_CMD_SET_TV_CHROMA_FILTER 0x76
+#define SDVO_CMD_GET_TV_LUMA_FILTER 0x78
+#define SDVO_CMD_SET_TV_LUMA_FILTER 0x79
+struct intel_sdvo_enhancements_arg {
+ u16 value;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_DOT_CRAWL 0x70
+#define SDVO_CMD_SET_DOT_CRAWL 0x71
+# define SDVO_DOT_CRAWL_ON (1 << 0)
+# define SDVO_DOT_CRAWL_DEFAULT_ON (1 << 1)
+
+#define SDVO_CMD_GET_DITHER 0x72
+#define SDVO_CMD_SET_DITHER 0x73
+# define SDVO_DITHER_ON (1 << 0)
+# define SDVO_DITHER_DEFAULT_ON (1 << 1)
+
+#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a
+# define SDVO_CONTROL_BUS_PROM (1 << 0)
+# define SDVO_CONTROL_BUS_DDC1 (1 << 1)
+# define SDVO_CONTROL_BUS_DDC2 (1 << 2)
+# define SDVO_CONTROL_BUS_DDC3 (1 << 3)
+
+/* HDMI op codes */
+#define SDVO_CMD_GET_SUPP_ENCODE 0x9d
+#define SDVO_CMD_GET_ENCODE 0x9e
+#define SDVO_CMD_SET_ENCODE 0x9f
+ #define SDVO_ENCODE_DVI 0x0
+ #define SDVO_ENCODE_HDMI 0x1
+#define SDVO_CMD_SET_PIXEL_REPLI 0x8b
+#define SDVO_CMD_GET_PIXEL_REPLI 0x8c
+#define SDVO_CMD_GET_COLORIMETRY_CAP 0x8d
+#define SDVO_CMD_SET_COLORIMETRY 0x8e
+ #define SDVO_COLORIMETRY_RGB256 0x0
+ #define SDVO_COLORIMETRY_RGB220 0x1
+ #define SDVO_COLORIMETRY_YCrCb422 0x3
+ #define SDVO_COLORIMETRY_YCrCb444 0x4
+#define SDVO_CMD_GET_COLORIMETRY 0x8f
+#define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90
+#define SDVO_CMD_SET_AUDIO_STAT 0x91
+#define SDVO_CMD_GET_AUDIO_STAT 0x92
+#define SDVO_CMD_SET_HBUF_INDEX 0x93
+ #define SDVO_HBUF_INDEX_ELD 0
+ #define SDVO_HBUF_INDEX_AVI_IF 1
+#define SDVO_CMD_GET_HBUF_INDEX 0x94
+#define SDVO_CMD_GET_HBUF_INFO 0x95
+#define SDVO_CMD_SET_HBUF_AV_SPLIT 0x96
+#define SDVO_CMD_GET_HBUF_AV_SPLIT 0x97
+#define SDVO_CMD_SET_HBUF_DATA 0x98
+#define SDVO_CMD_GET_HBUF_DATA 0x99
+#define SDVO_CMD_SET_HBUF_TXRATE 0x9a
+#define SDVO_CMD_GET_HBUF_TXRATE 0x9b
+ #define SDVO_HBUF_TX_DISABLED (0 << 6)
+ #define SDVO_HBUF_TX_ONCE (2 << 6)
+ #define SDVO_HBUF_TX_VSYNC (3 << 6)
+#define SDVO_CMD_GET_AUDIO_TX_INFO 0x9c
+#define SDVO_NEED_TO_STALL (1 << 7)
+
+struct intel_sdvo_encode {
+ u8 dvi_rev;
+ u8 hdmi_rev;
+} __attribute__ ((packed));
diff --git a/sys/dev/pci/drm/i915/intel_sprite.c b/sys/dev/pci/drm/i915/intel_sprite.c
new file mode 100644
index 00000000000..a097fb57d9b
--- /dev/null
+++ b/sys/dev/pci/drm/i915/intel_sprite.c
@@ -0,0 +1,767 @@
+/* $OpenBSD: intel_sprite.c,v 1.1 2013/03/18 12:36:52 jsg Exp $ */
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Jesse Barnes <jbarnes@virtuousgeek.org>
+ *
+ * New plane/sprite handling.
+ *
+ * The older chips had a separate interface for programming plane related
+ * registers; newer ones are much simpler and we can use the new DRM plane
+ * support.
+ */
+#include <dev/pci/drm/drmP.h>
+#include <dev/pci/drm/drm_crtc.h>
+#include <dev/pci/drm/drm_fourcc.h>
+#include "intel_drv.h"
+#include <dev/pci/drm/i915_drm.h>
+#include "i915_drv.h"
+
+void ivb_disable_plane(struct drm_plane *);
+int ivb_update_colorkey(struct drm_plane *,
+ struct drm_intel_sprite_colorkey *);
+void ivb_get_colorkey(struct drm_plane *,
+ struct drm_intel_sprite_colorkey *);
+void snb_disable_plane(struct drm_plane *);
+void intel_enable_primary(struct drm_crtc *);
+int snb_update_colorkey(struct drm_plane *,
+ struct drm_intel_sprite_colorkey *);
+void snb_get_colorkey(struct drm_plane *,
+ struct drm_intel_sprite_colorkey *);
+int intel_update_plane(struct drm_plane *, struct drm_crtc *,
+ struct drm_framebuffer *, int, int, unsigned int, unsigned int,
+ uint32_t, uint32_t, uint32_t, uint32_t);
+int intel_disable_drm_plane(struct drm_plane *);
+void intel_destroy_plane(struct drm_plane *);
+void ivb_update_plane(struct drm_plane *, struct drm_framebuffer *,
+ struct drm_i915_gem_object *, int, int, unsigned int,
+ unsigned int, uint32_t, uint32_t, uint32_t, uint32_t);
+void snb_update_plane(struct drm_plane *, struct drm_framebuffer *,
+ struct drm_i915_gem_object *, int, int, unsigned int,
+ unsigned int, uint32_t, uint32_t, uint32_t, uint32_t);
+void intel_disable_primary(struct drm_crtc *);
+void ilk_update_plane(struct drm_plane *, struct drm_framebuffer *,
+ struct drm_i915_gem_object *, int, int, unsigned int, unsigned int,
+ uint32_t, uint32_t, uint32_t, uint32_t);
+void ilk_disable_plane(struct drm_plane *);
+int ilk_update_colorkey(struct drm_plane *,
+ struct drm_intel_sprite_colorkey *);
+void ilk_get_colorkey(struct drm_plane *,
+ struct drm_intel_sprite_colorkey *);
+
+void
+ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t x, uint32_t y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_device *dev = plane->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ int pipe = intel_plane->pipe;
+ u32 sprctl, sprscale = 0;
+ unsigned long sprsurf_offset, linear_offset;
+ int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+
+ sprctl = I915_READ(SPRCTL(pipe));
+
+ /* Mask out pixel format bits in case we change it */
+ sprctl &= ~SPRITE_PIXFORMAT_MASK;
+ sprctl &= ~SPRITE_RGB_ORDER_RGBX;
+ sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK;
+ sprctl &= ~SPRITE_TILED;
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_XBGR8888:
+ sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ sprctl |= SPRITE_FORMAT_RGBX888;
+ break;
+ case DRM_FORMAT_YUYV:
+ sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
+ break;
+ case DRM_FORMAT_YVYU:
+ sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU;
+ break;
+ case DRM_FORMAT_UYVY:
+ sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY;
+ break;
+ case DRM_FORMAT_VYUY:
+ sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
+ break;
+ default:
+ BUG();
+ }
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ sprctl |= SPRITE_TILED;
+
+ /* must disable */
+ sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
+ sprctl |= SPRITE_ENABLE;
+
+ /* Sizes are 0 based */
+ src_w--;
+ src_h--;
+ crtc_w--;
+ crtc_h--;
+
+ intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
+
+ /*
+ * IVB workaround: must disable low power watermarks for at least
+ * one frame before enabling scaling. LP watermarks can be re-enabled
+ * when scaling is disabled.
+ */
+ if (crtc_w != src_w || crtc_h != src_h) {
+ if (!dev_priv->sprite_scaling_enabled) {
+ dev_priv->sprite_scaling_enabled = true;
+ intel_update_watermarks(dev);
+ intel_wait_for_vblank(dev, pipe);
+ }
+ sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
+ } else {
+ if (dev_priv->sprite_scaling_enabled) {
+ dev_priv->sprite_scaling_enabled = false;
+ /* potentially re-enable LP watermarks */
+ intel_update_watermarks(dev);
+ }
+ }
+
+ I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
+ I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
+
+ linear_offset = y * fb->pitches[0] + x * pixel_size;
+ sprsurf_offset =
+ intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ pixel_size, fb->pitches[0]);
+ linear_offset -= sprsurf_offset;
+
+ /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
+ * register */
+ if (IS_HASWELL(dev))
+ I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
+ else if (obj->tiling_mode != I915_TILING_NONE)
+ I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
+ else
+ I915_WRITE(SPRLINOFF(pipe), linear_offset);
+
+ I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
+ if (intel_plane->can_scale)
+ I915_WRITE(SPRSCALE(pipe), sprscale);
+ I915_WRITE(SPRCTL(pipe), sprctl);
+ I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset);
+ POSTING_READ(SPRSURF(pipe));
+}
+
+void
+ivb_disable_plane(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ int pipe = intel_plane->pipe;
+
+ I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
+ /* Can't leave the scaler enabled... */
+ if (intel_plane->can_scale)
+ I915_WRITE(SPRSCALE(pipe), 0);
+ /* Activate double buffered register update */
+ I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
+ POSTING_READ(SPRSURF(pipe));
+
+ dev_priv->sprite_scaling_enabled = false;
+ intel_update_watermarks(dev);
+}
+
+int
+ivb_update_colorkey(struct drm_plane *plane,
+ struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = plane->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane;
+ u32 sprctl;
+ int ret = 0;
+
+ intel_plane = to_intel_plane(plane);
+
+ I915_WRITE(SPRKEYVAL(intel_plane->pipe), key->min_value);
+ I915_WRITE(SPRKEYMAX(intel_plane->pipe), key->max_value);
+ I915_WRITE(SPRKEYMSK(intel_plane->pipe), key->channel_mask);
+
+ sprctl = I915_READ(SPRCTL(intel_plane->pipe));
+ sprctl &= ~(SPRITE_SOURCE_KEY | SPRITE_DEST_KEY);
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ sprctl |= SPRITE_DEST_KEY;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ sprctl |= SPRITE_SOURCE_KEY;
+ I915_WRITE(SPRCTL(intel_plane->pipe), sprctl);
+
+ POSTING_READ(SPRKEYMSK(intel_plane->pipe));
+
+ return ret;
+}
+
+void
+ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = plane->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane;
+ u32 sprctl;
+
+ intel_plane = to_intel_plane(plane);
+
+ key->min_value = I915_READ(SPRKEYVAL(intel_plane->pipe));
+ key->max_value = I915_READ(SPRKEYMAX(intel_plane->pipe));
+ key->channel_mask = I915_READ(SPRKEYMSK(intel_plane->pipe));
+ key->flags = 0;
+
+ sprctl = I915_READ(SPRCTL(intel_plane->pipe));
+
+ if (sprctl & SPRITE_DEST_KEY)
+ key->flags = I915_SET_COLORKEY_DESTINATION;
+ else if (sprctl & SPRITE_SOURCE_KEY)
+ key->flags = I915_SET_COLORKEY_SOURCE;
+ else
+ key->flags = I915_SET_COLORKEY_NONE;
+}
+
+void
+ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t x, uint32_t y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_device *dev = plane->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ int pipe = intel_plane->pipe;
+ unsigned long dvssurf_offset, linear_offset;
+ u32 dvscntr, dvsscale;
+ int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+
+ dvscntr = I915_READ(DVSCNTR(pipe));
+
+ /* Mask out pixel format bits in case we change it */
+ dvscntr &= ~DVS_PIXFORMAT_MASK;
+ dvscntr &= ~DVS_RGB_ORDER_XBGR;
+ dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK;
+ dvscntr &= ~DVS_TILED;
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_XBGR8888:
+ dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ dvscntr |= DVS_FORMAT_RGBX888;
+ break;
+ case DRM_FORMAT_YUYV:
+ dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
+ break;
+ case DRM_FORMAT_YVYU:
+ dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU;
+ break;
+ case DRM_FORMAT_UYVY:
+ dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY;
+ break;
+ case DRM_FORMAT_VYUY:
+ dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
+ break;
+ default:
+ BUG();
+ }
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ dvscntr |= DVS_TILED;
+
+ if (IS_GEN6(dev))
+ dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
+ dvscntr |= DVS_ENABLE;
+
+ /* Sizes are 0 based */
+ src_w--;
+ src_h--;
+ crtc_w--;
+ crtc_h--;
+
+ intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
+
+ dvsscale = 0;
+ if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h)
+ dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
+
+ I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
+ I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
+
+ linear_offset = y * fb->pitches[0] + x * pixel_size;
+ dvssurf_offset =
+ intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ pixel_size, fb->pitches[0]);
+ linear_offset -= dvssurf_offset;
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
+ else
+ I915_WRITE(DVSLINOFF(pipe), linear_offset);
+
+ I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
+ I915_WRITE(DVSSCALE(pipe), dvsscale);
+ I915_WRITE(DVSCNTR(pipe), dvscntr);
+ I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset);
+ POSTING_READ(DVSSURF(pipe));
+}
+
+void
+ilk_disable_plane(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ int pipe = intel_plane->pipe;
+
+ I915_WRITE(DVSCNTR(pipe), I915_READ(DVSCNTR(pipe)) & ~DVS_ENABLE);
+ /* Disable the scaler */
+ I915_WRITE(DVSSCALE(pipe), 0);
+ /* Flush double buffered register updates */
+ I915_MODIFY_DISPBASE(DVSSURF(pipe), 0);
+ POSTING_READ(DVSSURF(pipe));
+}
+
+void
+intel_enable_primary(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int reg = DSPCNTR(intel_crtc->plane);
+
+ if (!intel_crtc->primary_disabled)
+ return;
+
+ intel_crtc->primary_disabled = false;
+ intel_update_fbc(dev);
+
+ I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
+}
+
+void
+intel_disable_primary(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int reg = DSPCNTR(intel_crtc->plane);
+
+ if (intel_crtc->primary_disabled)
+ return;
+
+ I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
+
+ intel_crtc->primary_disabled = true;
+ intel_update_fbc(dev);
+}
+
+int
+ilk_update_colorkey(struct drm_plane *plane,
+ struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = plane->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane;
+ u32 dvscntr;
+ int ret = 0;
+
+ intel_plane = to_intel_plane(plane);
+
+ I915_WRITE(DVSKEYVAL(intel_plane->pipe), key->min_value);
+ I915_WRITE(DVSKEYMAX(intel_plane->pipe), key->max_value);
+ I915_WRITE(DVSKEYMSK(intel_plane->pipe), key->channel_mask);
+
+ dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
+ dvscntr &= ~(DVS_SOURCE_KEY | DVS_DEST_KEY);
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ dvscntr |= DVS_DEST_KEY;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ dvscntr |= DVS_SOURCE_KEY;
+ I915_WRITE(DVSCNTR(intel_plane->pipe), dvscntr);
+
+ POSTING_READ(DVSKEYMSK(intel_plane->pipe));
+
+ return ret;
+}
+
+void
+ilk_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = plane->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane;
+ u32 dvscntr;
+
+ intel_plane = to_intel_plane(plane);
+
+ key->min_value = I915_READ(DVSKEYVAL(intel_plane->pipe));
+ key->max_value = I915_READ(DVSKEYMAX(intel_plane->pipe));
+ key->channel_mask = I915_READ(DVSKEYMSK(intel_plane->pipe));
+ key->flags = 0;
+
+ dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
+
+ if (dvscntr & DVS_DEST_KEY)
+ key->flags = I915_SET_COLORKEY_DESTINATION;
+ else if (dvscntr & DVS_SOURCE_KEY)
+ key->flags = I915_SET_COLORKEY_SOURCE;
+ else
+ key->flags = I915_SET_COLORKEY_NONE;
+}
+
+int
+intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_device *dev = plane->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj, *old_obj;
+ int pipe = intel_plane->pipe;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+ int ret = 0;
+ int x = src_x >> 16, y = src_y >> 16;
+ int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay;
+ bool disable_primary = false;
+
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
+
+ old_obj = intel_plane->obj;
+
+ src_w = src_w >> 16;
+ src_h = src_h >> 16;
+
+ /* Pipe must be running... */
+ if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE))
+ return -EINVAL;
+
+ if (crtc_x >= primary_w || crtc_y >= primary_h)
+ return -EINVAL;
+
+ /* Don't modify another pipe's plane */
+ if (intel_plane->pipe != intel_crtc->pipe)
+ return -EINVAL;
+
+ /* Sprite planes can be linear or x-tiled surfaces */
+ switch (obj->tiling_mode) {
+ case I915_TILING_NONE:
+ case I915_TILING_X:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * Clamp the width & height into the visible area. Note we don't
+ * try to scale the source if part of the visible region is offscreen.
+ * The caller must handle that by adjusting source offset and size.
+ */
+ if ((crtc_x < 0) && ((crtc_x + crtc_w) > 0)) {
+ crtc_w += crtc_x;
+ crtc_x = 0;
+ }
+ if ((crtc_x + crtc_w) <= 0) /* Nothing to display */
+ goto out;
+ if ((crtc_x + crtc_w) > primary_w)
+ crtc_w = primary_w - crtc_x;
+
+ if ((crtc_y < 0) && ((crtc_y + crtc_h) > 0)) {
+ crtc_h += crtc_y;
+ crtc_y = 0;
+ }
+ if ((crtc_y + crtc_h) <= 0) /* Nothing to display */
+ goto out;
+ if (crtc_y + crtc_h > primary_h)
+ crtc_h = primary_h - crtc_y;
+
+ if (!crtc_w || !crtc_h) /* Again, nothing to display */
+ goto out;
+
+ /*
+ * We may not have a scaler, eg. HSW does not have it any more
+ */
+ if (!intel_plane->can_scale && (crtc_w != src_w || crtc_h != src_h))
+ return -EINVAL;
+
+ /*
+ * We can take a larger source and scale it down, but
+ * only so much... 16x is the max on SNB.
+ */
+ if (((src_w * src_h) / (crtc_w * crtc_h)) > intel_plane->max_downscale)
+ return -EINVAL;
+
+ /*
+ * If the sprite is completely covering the primary plane,
+ * we can disable the primary and save power.
+ */
+ if ((crtc_x == 0) && (crtc_y == 0) &&
+ (crtc_w == primary_w) && (crtc_h == primary_h))
+ disable_primary = true;
+
+ DRM_LOCK();
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+ if (ret)
+ goto out_unlock;
+
+ intel_plane->obj = obj;
+
+ /*
+ * Be sure to re-enable the primary before the sprite is no longer
+ * covering it fully.
+ */
+ if (!disable_primary)
+ intel_enable_primary(crtc);
+
+ intel_plane->update_plane(plane, fb, obj, crtc_x, crtc_y,
+ crtc_w, crtc_h, x, y, src_w, src_h);
+
+ if (disable_primary)
+ intel_disable_primary(crtc);
+
+ /* Unpin old obj after new one is active to avoid ugliness */
+ if (old_obj) {
+ /*
+ * It's fairly common to simply update the position of
+ * an existing object. In that case, we don't need to
+ * wait for vblank to avoid ugliness, we only need to
+ * do the pin & ref bookkeeping.
+ */
+ if (old_obj != obj) {
+ DRM_UNLOCK();
+ intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
+ DRM_LOCK();
+ }
+ intel_unpin_fb_obj(old_obj);
+ }
+
+out_unlock:
+ DRM_UNLOCK();
+out:
+ return ret;
+}
+
+int
+intel_disable_drm_plane(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ int ret = 0;
+
+ if (plane->crtc)
+ intel_enable_primary(plane->crtc);
+ intel_plane->disable_plane(plane);
+
+ if (!intel_plane->obj)
+ goto out;
+
+ DRM_LOCK();
+ intel_unpin_fb_obj(intel_plane->obj);
+ intel_plane->obj = NULL;
+ DRM_UNLOCK();
+out:
+
+ return ret;
+}
+
+void
+intel_destroy_plane(struct drm_plane *plane)
+{
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ intel_disable_drm_plane(plane);
+ drm_plane_cleanup(plane);
+ free(intel_plane, M_DRM);
+}
+
+int
+intel_sprite_set_colorkey(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_intel_sprite_colorkey *set = data;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ struct intel_plane *intel_plane;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ /* Make sure we don't try to enable both src & dest simultaneously */
+ if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
+ return -EINVAL;
+
+ rw_enter_write(&dev->mode_config.rwl);
+
+ obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ plane = obj_to_plane(obj);
+ intel_plane = to_intel_plane(plane);
+ ret = intel_plane->update_colorkey(plane, set);
+
+out_unlock:
+ rw_exit_write(&dev->mode_config.rwl);
+ return ret;
+}
+
+int
+intel_sprite_get_colorkey(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_intel_sprite_colorkey *get = data;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ struct intel_plane *intel_plane;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ rw_enter_write(&dev->mode_config.rwl);
+
+ obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ plane = obj_to_plane(obj);
+ intel_plane = to_intel_plane(plane);
+ intel_plane->get_colorkey(plane, get);
+
+out_unlock:
+ rw_exit_write(&dev->mode_config.rwl);
+ return ret;
+}
+
+static const struct drm_plane_funcs intel_plane_funcs = {
+ .update_plane = intel_update_plane,
+ .disable_plane = intel_disable_drm_plane,
+ .destroy = intel_destroy_plane,
+};
+
+static uint32_t ilk_plane_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+};
+
+static uint32_t snb_plane_formats[] = {
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+};
+
+int
+intel_plane_init(struct drm_device *dev, enum pipe pipe)
+{
+ struct intel_plane *intel_plane;
+ unsigned long possible_crtcs;
+ const uint32_t *plane_formats;
+ int num_plane_formats;
+ int ret;
+
+ if (INTEL_INFO(dev)->gen < 5)
+ return -ENODEV;
+
+ intel_plane = malloc(sizeof(struct intel_plane), M_DRM,
+ M_WAITOK | M_ZERO);
+ if (!intel_plane)
+ return -ENOMEM;
+
+ switch (INTEL_INFO(dev)->gen) {
+ case 5:
+ case 6:
+ intel_plane->can_scale = true;
+ intel_plane->max_downscale = 16;
+ intel_plane->update_plane = ilk_update_plane;
+ intel_plane->disable_plane = ilk_disable_plane;
+ intel_plane->update_colorkey = ilk_update_colorkey;
+ intel_plane->get_colorkey = ilk_get_colorkey;
+
+ if (IS_GEN6(dev)) {
+ plane_formats = snb_plane_formats;
+ num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+ } else {
+ plane_formats = ilk_plane_formats;
+ num_plane_formats = ARRAY_SIZE(ilk_plane_formats);
+ }
+ break;
+
+ case 7:
+ if (IS_HASWELL(dev) || IS_VALLEYVIEW(dev))
+ intel_plane->can_scale = false;
+ else
+ intel_plane->can_scale = true;
+ intel_plane->max_downscale = 2;
+ intel_plane->update_plane = ivb_update_plane;
+ intel_plane->disable_plane = ivb_disable_plane;
+ intel_plane->update_colorkey = ivb_update_colorkey;
+ intel_plane->get_colorkey = ivb_get_colorkey;
+
+ plane_formats = snb_plane_formats;
+ num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+ break;
+
+ default:
+ free(intel_plane, M_DRM);
+ return -ENODEV;
+ }
+
+ intel_plane->pipe = pipe;
+ possible_crtcs = (1 << pipe);
+ ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
+ &intel_plane_funcs,
+ plane_formats, num_plane_formats,
+ false);
+ if (ret)
+ free(intel_plane, M_DRM);
+
+ return ret;
+}
diff --git a/sys/dev/pci/drm/i915/intel_tv.c b/sys/dev/pci/drm/i915/intel_tv.c
new file mode 100644
index 00000000000..339f298b498
--- /dev/null
+++ b/sys/dev/pci/drm/i915/intel_tv.c
@@ -0,0 +1,1708 @@
+/* $OpenBSD: intel_tv.c,v 1.1 2013/03/18 12:36:52 jsg Exp $ */
+/*
+ * Copyright © 2006-2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+/** @file
+ * Integrated TV-out support for the 915GM and 945GM.
+ */
+
+#include <dev/pci/drm/drmP.h>
+#include <dev/pci/drm/drm_crtc.h>
+#include <dev/pci/drm/drm_edid.h>
+#include "intel_drv.h"
+#include <dev/pci/drm/i915_drm.h>
+#include "i915_drv.h"
+
+enum tv_margin {
+ TV_MARGIN_LEFT, TV_MARGIN_TOP,
+ TV_MARGIN_RIGHT, TV_MARGIN_BOTTOM
+};
+
+/** Private structure for the integrated TV support */
+struct intel_tv {
+ struct intel_encoder base;
+
+ int type;
+ const char *tv_format;
+ int margin[4];
+ u32 save_TV_H_CTL_1;
+ u32 save_TV_H_CTL_2;
+ u32 save_TV_H_CTL_3;
+ u32 save_TV_V_CTL_1;
+ u32 save_TV_V_CTL_2;
+ u32 save_TV_V_CTL_3;
+ u32 save_TV_V_CTL_4;
+ u32 save_TV_V_CTL_5;
+ u32 save_TV_V_CTL_6;
+ u32 save_TV_V_CTL_7;
+ u32 save_TV_SC_CTL_1, save_TV_SC_CTL_2, save_TV_SC_CTL_3;
+
+ u32 save_TV_CSC_Y;
+ u32 save_TV_CSC_Y2;
+ u32 save_TV_CSC_U;
+ u32 save_TV_CSC_U2;
+ u32 save_TV_CSC_V;
+ u32 save_TV_CSC_V2;
+ u32 save_TV_CLR_KNOBS;
+ u32 save_TV_CLR_LEVEL;
+ u32 save_TV_WIN_POS;
+ u32 save_TV_WIN_SIZE;
+ u32 save_TV_FILTER_CTL_1;
+ u32 save_TV_FILTER_CTL_2;
+ u32 save_TV_FILTER_CTL_3;
+
+ u32 save_TV_H_LUMA[60];
+ u32 save_TV_H_CHROMA[60];
+ u32 save_TV_V_LUMA[43];
+ u32 save_TV_V_CHROMA[43];
+
+ u32 save_TV_DAC;
+ u32 save_TV_CTL;
+};
+
+struct video_levels {
+ int blank, black, burst;
+};
+
+struct color_conversion {
+ u16 ry, gy, by, ay;
+ u16 ru, gu, bu, au;
+ u16 rv, gv, bv, av;
+};
+
+static const u32 filter_table[] = {
+ 0xB1403000, 0x2E203500, 0x35002E20, 0x3000B140,
+ 0x35A0B160, 0x2DC02E80, 0xB1403480, 0xB1603000,
+ 0x2EA03640, 0x34002D80, 0x3000B120, 0x36E0B160,
+ 0x2D202EF0, 0xB1203380, 0xB1603000, 0x2F303780,
+ 0x33002CC0, 0x3000B100, 0x3820B160, 0x2C802F50,
+ 0xB10032A0, 0xB1603000, 0x2F9038C0, 0x32202C20,
+ 0x3000B0E0, 0x3980B160, 0x2BC02FC0, 0xB0E031C0,
+ 0xB1603000, 0x2FF03A20, 0x31602B60, 0xB020B0C0,
+ 0x3AE0B160, 0x2B001810, 0xB0C03120, 0xB140B020,
+ 0x18283BA0, 0x30C02A80, 0xB020B0A0, 0x3C60B140,
+ 0x2A201838, 0xB0A03080, 0xB120B020, 0x18383D20,
+ 0x304029C0, 0xB040B080, 0x3DE0B100, 0x29601848,
+ 0xB0803000, 0xB100B040, 0x18483EC0, 0xB0402900,
+ 0xB040B060, 0x3F80B0C0, 0x28801858, 0xB060B080,
+ 0xB0A0B060, 0x18602820, 0xB0A02820, 0x0000B060,
+ 0xB1403000, 0x2E203500, 0x35002E20, 0x3000B140,
+ 0x35A0B160, 0x2DC02E80, 0xB1403480, 0xB1603000,
+ 0x2EA03640, 0x34002D80, 0x3000B120, 0x36E0B160,
+ 0x2D202EF0, 0xB1203380, 0xB1603000, 0x2F303780,
+ 0x33002CC0, 0x3000B100, 0x3820B160, 0x2C802F50,
+ 0xB10032A0, 0xB1603000, 0x2F9038C0, 0x32202C20,
+ 0x3000B0E0, 0x3980B160, 0x2BC02FC0, 0xB0E031C0,
+ 0xB1603000, 0x2FF03A20, 0x31602B60, 0xB020B0C0,
+ 0x3AE0B160, 0x2B001810, 0xB0C03120, 0xB140B020,
+ 0x18283BA0, 0x30C02A80, 0xB020B0A0, 0x3C60B140,
+ 0x2A201838, 0xB0A03080, 0xB120B020, 0x18383D20,
+ 0x304029C0, 0xB040B080, 0x3DE0B100, 0x29601848,
+ 0xB0803000, 0xB100B040, 0x18483EC0, 0xB0402900,
+ 0xB040B060, 0x3F80B0C0, 0x28801858, 0xB060B080,
+ 0xB0A0B060, 0x18602820, 0xB0A02820, 0x0000B060,
+ 0x36403000, 0x2D002CC0, 0x30003640, 0x2D0036C0,
+ 0x35C02CC0, 0x37403000, 0x2C802D40, 0x30003540,
+ 0x2D8037C0, 0x34C02C40, 0x38403000, 0x2BC02E00,
+ 0x30003440, 0x2E2038C0, 0x34002B80, 0x39803000,
+ 0x2B402E40, 0x30003380, 0x2E603A00, 0x33402B00,
+ 0x3A803040, 0x2A802EA0, 0x30403300, 0x2EC03B40,
+ 0x32802A40, 0x3C003040, 0x2A002EC0, 0x30803240,
+ 0x2EC03C80, 0x320029C0, 0x3D403080, 0x29402F00,
+ 0x308031C0, 0x2F203DC0, 0x31802900, 0x3E8030C0,
+ 0x28802F40, 0x30C03140, 0x2F203F40, 0x31402840,
+ 0x28003100, 0x28002F00, 0x00003100, 0x36403000,
+ 0x2D002CC0, 0x30003640, 0x2D0036C0,
+ 0x35C02CC0, 0x37403000, 0x2C802D40, 0x30003540,
+ 0x2D8037C0, 0x34C02C40, 0x38403000, 0x2BC02E00,
+ 0x30003440, 0x2E2038C0, 0x34002B80, 0x39803000,
+ 0x2B402E40, 0x30003380, 0x2E603A00, 0x33402B00,
+ 0x3A803040, 0x2A802EA0, 0x30403300, 0x2EC03B40,
+ 0x32802A40, 0x3C003040, 0x2A002EC0, 0x30803240,
+ 0x2EC03C80, 0x320029C0, 0x3D403080, 0x29402F00,
+ 0x308031C0, 0x2F203DC0, 0x31802900, 0x3E8030C0,
+ 0x28802F40, 0x30C03140, 0x2F203F40, 0x31402840,
+ 0x28003100, 0x28002F00, 0x00003100,
+};
+
+/*
+ * Color conversion values have 3 separate fixed point formats:
+ *
+ * 10 bit fields (ay, au)
+ * 1.9 fixed point (b.bbbbbbbbb)
+ * 11 bit fields (ry, by, ru, gu, gv)
+ * exp.mantissa (ee.mmmmmmmmm)
+ * ee = 00 = 10^-1 (0.mmmmmmmmm)
+ * ee = 01 = 10^-2 (0.0mmmmmmmmm)
+ * ee = 10 = 10^-3 (0.00mmmmmmmmm)
+ * ee = 11 = 10^-4 (0.000mmmmmmmmm)
+ * 12 bit fields (gy, rv, bu)
+ * exp.mantissa (eee.mmmmmmmmm)
+ * eee = 000 = 10^-1 (0.mmmmmmmmm)
+ * eee = 001 = 10^-2 (0.0mmmmmmmmm)
+ * eee = 010 = 10^-3 (0.00mmmmmmmmm)
+ * eee = 011 = 10^-4 (0.000mmmmmmmmm)
+ * eee = 100 = reserved
+ * eee = 101 = reserved
+ * eee = 110 = reserved
+ * eee = 111 = 10^0 (m.mmmmmmmm) (only usable for 1.0 representation)
+ *
+ * Saturation and contrast are 8 bits, with their own representation:
+ * 8 bit field (saturation, contrast)
+ * exp.mantissa (ee.mmmmmm)
+ * ee = 00 = 10^-1 (0.mmmmmm)
+ * ee = 01 = 10^0 (m.mmmmm)
+ * ee = 10 = 10^1 (mm.mmmm)
+ * ee = 11 = 10^2 (mmm.mmm)
+ *
+ * Simple conversion function:
+ *
+ * static u32
+ * float_to_csc_11(float f)
+ * {
+ * u32 exp;
+ * u32 mant;
+ * u32 ret;
+ *
+ * if (f < 0)
+ * f = -f;
+ *
+ * if (f >= 1) {
+ * exp = 0x7;
+ * mant = 1 << 8;
+ * } else {
+ * for (exp = 0; exp < 3 && f < 0.5; exp++)
+ * f *= 2.0;
+ * mant = (f * (1 << 9) + 0.5);
+ * if (mant >= (1 << 9))
+ * mant = (1 << 9) - 1;
+ * }
+ * ret = (exp << 9) | mant;
+ * return ret;
+ * }
+ */
+
+/*
+ * Behold, magic numbers! If we plant them they might grow a big
+ * s-video cable to the sky... or something.
+ *
+ * Pre-converted to appropriate hex value.
+ */
+
+/*
+ * PAL & NTSC values for composite & s-video connections
+ */
+static const struct color_conversion ntsc_m_csc_composite = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
+ .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
+ .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
+};
+
+static const struct video_levels ntsc_m_levels_composite = {
+ .blank = 225, .black = 267, .burst = 113,
+};
+
+static const struct color_conversion ntsc_m_csc_svideo = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
+ .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
+ .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
+};
+
+static const struct video_levels ntsc_m_levels_svideo = {
+ .blank = 266, .black = 316, .burst = 133,
+};
+
+static const struct color_conversion ntsc_j_csc_composite = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0119,
+ .ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0200,
+ .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0200,
+};
+
+static const struct video_levels ntsc_j_levels_composite = {
+ .blank = 225, .black = 225, .burst = 113,
+};
+
+static const struct color_conversion ntsc_j_csc_svideo = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x014c,
+ .ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0200,
+ .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0200,
+};
+
+static const struct video_levels ntsc_j_levels_svideo = {
+ .blank = 266, .black = 266, .burst = 133,
+};
+
+static const struct color_conversion pal_csc_composite = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0113,
+ .ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0200,
+ .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0200,
+};
+
+static const struct video_levels pal_levels_composite = {
+ .blank = 237, .black = 237, .burst = 118,
+};
+
+static const struct color_conversion pal_csc_svideo = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
+ .ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0200,
+ .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0200,
+};
+
+static const struct video_levels pal_levels_svideo = {
+ .blank = 280, .black = 280, .burst = 139,
+};
+
+static const struct color_conversion pal_m_csc_composite = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
+ .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
+ .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
+};
+
+static const struct video_levels pal_m_levels_composite = {
+ .blank = 225, .black = 267, .burst = 113,
+};
+
+static const struct color_conversion pal_m_csc_svideo = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
+ .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
+ .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
+};
+
+static const struct video_levels pal_m_levels_svideo = {
+ .blank = 266, .black = 316, .burst = 133,
+};
+
+static const struct color_conversion pal_n_csc_composite = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
+ .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
+ .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
+};
+
+static const struct video_levels pal_n_levels_composite = {
+ .blank = 225, .black = 267, .burst = 118,
+};
+
+static const struct color_conversion pal_n_csc_svideo = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
+ .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
+ .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
+};
+
+static const struct video_levels pal_n_levels_svideo = {
+ .blank = 266, .black = 316, .burst = 139,
+};
+
+/*
+ * Component connections
+ */
+static const struct color_conversion sdtv_csc_yprpb = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
+ .ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0200,
+ .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0200,
+};
+
+static const struct color_conversion sdtv_csc_rgb = {
+ .ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166,
+ .ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166,
+ .rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166,
+};
+
+static const struct color_conversion hdtv_csc_yprpb = {
+ .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0145,
+ .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0200,
+ .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0200,
+};
+
+static const struct color_conversion hdtv_csc_rgb = {
+ .ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166,
+ .ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166,
+ .rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166,
+};
+
+static const struct video_levels component_levels = {
+ .blank = 279, .black = 279, .burst = 0,
+};
+
+
+struct tv_mode {
+ const char *name;
+ int clock;
+ int refresh; /* in millihertz (for precision) */
+ u32 oversample;
+ int hsync_end, hblank_start, hblank_end, htotal;
+ bool progressive, trilevel_sync, component_only;
+ int vsync_start_f1, vsync_start_f2, vsync_len;
+ bool veq_ena;
+ int veq_start_f1, veq_start_f2, veq_len;
+ int vi_end_f1, vi_end_f2, nbr_end;
+ bool burst_ena;
+ int hburst_start, hburst_len;
+ int vburst_start_f1, vburst_end_f1;
+ int vburst_start_f2, vburst_end_f2;
+ int vburst_start_f3, vburst_end_f3;
+ int vburst_start_f4, vburst_end_f4;
+ /*
+ * subcarrier programming
+ */
+ int dda2_size, dda3_size, dda1_inc, dda2_inc, dda3_inc;
+ u32 sc_reset;
+ bool pal_burst;
+ /*
+ * blank/black levels
+ */
+ const struct video_levels *composite_levels, *svideo_levels;
+ const struct color_conversion *composite_color, *svideo_color;
+ const u32 *filter_table;
+ int max_srcw;
+};
+
+
+/*
+ * Sub carrier DDA
+ *
+ * I think this works as follows:
+ *
+ * subcarrier freq = pixel_clock * (dda1_inc + dda2_inc / dda2_size) / 4096
+ *
+ * Presumably, when dda3 is added in, it gets to adjust the dda2_inc value
+ *
+ * So,
+ * dda1_ideal = subcarrier/pixel * 4096
+ * dda1_inc = floor (dda1_ideal)
+ * dda2 = dda1_ideal - dda1_inc
+ *
+ * then pick a ratio for dda2 that gives the closest approximation. If
+ * you can't get close enough, you can play with dda3 as well. This
+ * seems likely to happen when dda2 is small as the jumps would be larger
+ *
+ * To invert this,
+ *
+ * pixel_clock = subcarrier * 4096 / (dda1_inc + dda2_inc / dda2_size)
+ *
+ * The constants below were all computed using a 107.520MHz clock
+ */
+
+/**
+ * Register programming values for TV modes.
+ *
+ * These values account for -1s required.
+ */
+
+static const struct tv_mode tv_modes[] = {
+ {
+ .name = "NTSC-M",
+ .clock = 108000,
+ .refresh = 59940,
+ .oversample = TV_OVERSAMPLE_8X,
+ .component_only = 0,
+ /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
+
+ .hsync_end = 64, .hblank_end = 124,
+ .hblank_start = 836, .htotal = 857,
+
+ .progressive = false, .trilevel_sync = false,
+
+ .vsync_start_f1 = 6, .vsync_start_f2 = 7,
+ .vsync_len = 6,
+
+ .veq_ena = true, .veq_start_f1 = 0,
+ .veq_start_f2 = 1, .veq_len = 18,
+
+ .vi_end_f1 = 20, .vi_end_f2 = 21,
+ .nbr_end = 240,
+
+ .burst_ena = true,
+ .hburst_start = 72, .hburst_len = 34,
+ .vburst_start_f1 = 9, .vburst_end_f1 = 240,
+ .vburst_start_f2 = 10, .vburst_end_f2 = 240,
+ .vburst_start_f3 = 9, .vburst_end_f3 = 240,
+ .vburst_start_f4 = 10, .vburst_end_f4 = 240,
+
+ /* desired 3.5800000 actual 3.5800000 clock 107.52 */
+ .dda1_inc = 135,
+ .dda2_inc = 20800, .dda2_size = 27456,
+ .dda3_inc = 0, .dda3_size = 0,
+ .sc_reset = TV_SC_RESET_EVERY_4,
+ .pal_burst = false,
+
+ .composite_levels = &ntsc_m_levels_composite,
+ .composite_color = &ntsc_m_csc_composite,
+ .svideo_levels = &ntsc_m_levels_svideo,
+ .svideo_color = &ntsc_m_csc_svideo,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "NTSC-443",
+ .clock = 108000,
+ .refresh = 59940,
+ .oversample = TV_OVERSAMPLE_8X,
+ .component_only = 0,
+ /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 4.43MHz */
+ .hsync_end = 64, .hblank_end = 124,
+ .hblank_start = 836, .htotal = 857,
+
+ .progressive = false, .trilevel_sync = false,
+
+ .vsync_start_f1 = 6, .vsync_start_f2 = 7,
+ .vsync_len = 6,
+
+ .veq_ena = true, .veq_start_f1 = 0,
+ .veq_start_f2 = 1, .veq_len = 18,
+
+ .vi_end_f1 = 20, .vi_end_f2 = 21,
+ .nbr_end = 240,
+
+ .burst_ena = true,
+ .hburst_start = 72, .hburst_len = 34,
+ .vburst_start_f1 = 9, .vburst_end_f1 = 240,
+ .vburst_start_f2 = 10, .vburst_end_f2 = 240,
+ .vburst_start_f3 = 9, .vburst_end_f3 = 240,
+ .vburst_start_f4 = 10, .vburst_end_f4 = 240,
+
+ /* desired 4.4336180 actual 4.4336180 clock 107.52 */
+ .dda1_inc = 168,
+ .dda2_inc = 4093, .dda2_size = 27456,
+ .dda3_inc = 310, .dda3_size = 525,
+ .sc_reset = TV_SC_RESET_NEVER,
+ .pal_burst = false,
+
+ .composite_levels = &ntsc_m_levels_composite,
+ .composite_color = &ntsc_m_csc_composite,
+ .svideo_levels = &ntsc_m_levels_svideo,
+ .svideo_color = &ntsc_m_csc_svideo,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "NTSC-J",
+ .clock = 108000,
+ .refresh = 59940,
+ .oversample = TV_OVERSAMPLE_8X,
+ .component_only = 0,
+
+ /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
+ .hsync_end = 64, .hblank_end = 124,
+ .hblank_start = 836, .htotal = 857,
+
+ .progressive = false, .trilevel_sync = false,
+
+ .vsync_start_f1 = 6, .vsync_start_f2 = 7,
+ .vsync_len = 6,
+
+ .veq_ena = true, .veq_start_f1 = 0,
+ .veq_start_f2 = 1, .veq_len = 18,
+
+ .vi_end_f1 = 20, .vi_end_f2 = 21,
+ .nbr_end = 240,
+
+ .burst_ena = true,
+ .hburst_start = 72, .hburst_len = 34,
+ .vburst_start_f1 = 9, .vburst_end_f1 = 240,
+ .vburst_start_f2 = 10, .vburst_end_f2 = 240,
+ .vburst_start_f3 = 9, .vburst_end_f3 = 240,
+ .vburst_start_f4 = 10, .vburst_end_f4 = 240,
+
+ /* desired 3.5800000 actual 3.5800000 clock 107.52 */
+ .dda1_inc = 135,
+ .dda2_inc = 20800, .dda2_size = 27456,
+ .dda3_inc = 0, .dda3_size = 0,
+ .sc_reset = TV_SC_RESET_EVERY_4,
+ .pal_burst = false,
+
+ .composite_levels = &ntsc_j_levels_composite,
+ .composite_color = &ntsc_j_csc_composite,
+ .svideo_levels = &ntsc_j_levels_svideo,
+ .svideo_color = &ntsc_j_csc_svideo,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "PAL-M",
+ .clock = 108000,
+ .refresh = 59940,
+ .oversample = TV_OVERSAMPLE_8X,
+ .component_only = 0,
+
+ /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
+ .hsync_end = 64, .hblank_end = 124,
+ .hblank_start = 836, .htotal = 857,
+
+ .progressive = false, .trilevel_sync = false,
+
+ .vsync_start_f1 = 6, .vsync_start_f2 = 7,
+ .vsync_len = 6,
+
+ .veq_ena = true, .veq_start_f1 = 0,
+ .veq_start_f2 = 1, .veq_len = 18,
+
+ .vi_end_f1 = 20, .vi_end_f2 = 21,
+ .nbr_end = 240,
+
+ .burst_ena = true,
+ .hburst_start = 72, .hburst_len = 34,
+ .vburst_start_f1 = 9, .vburst_end_f1 = 240,
+ .vburst_start_f2 = 10, .vburst_end_f2 = 240,
+ .vburst_start_f3 = 9, .vburst_end_f3 = 240,
+ .vburst_start_f4 = 10, .vburst_end_f4 = 240,
+
+ /* desired 3.5800000 actual 3.5800000 clock 107.52 */
+ .dda1_inc = 135,
+ .dda2_inc = 16704, .dda2_size = 27456,
+ .dda3_inc = 0, .dda3_size = 0,
+ .sc_reset = TV_SC_RESET_EVERY_8,
+ .pal_burst = true,
+
+ .composite_levels = &pal_m_levels_composite,
+ .composite_color = &pal_m_csc_composite,
+ .svideo_levels = &pal_m_levels_svideo,
+ .svideo_color = &pal_m_csc_svideo,
+
+ .filter_table = filter_table,
+ },
+ {
+ /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
+ .name = "PAL-N",
+ .clock = 108000,
+ .refresh = 50000,
+ .oversample = TV_OVERSAMPLE_8X,
+ .component_only = 0,
+
+ .hsync_end = 64, .hblank_end = 128,
+ .hblank_start = 844, .htotal = 863,
+
+ .progressive = false, .trilevel_sync = false,
+
+
+ .vsync_start_f1 = 6, .vsync_start_f2 = 7,
+ .vsync_len = 6,
+
+ .veq_ena = true, .veq_start_f1 = 0,
+ .veq_start_f2 = 1, .veq_len = 18,
+
+ .vi_end_f1 = 24, .vi_end_f2 = 25,
+ .nbr_end = 286,
+
+ .burst_ena = true,
+ .hburst_start = 73, .hburst_len = 34,
+ .vburst_start_f1 = 8, .vburst_end_f1 = 285,
+ .vburst_start_f2 = 8, .vburst_end_f2 = 286,
+ .vburst_start_f3 = 9, .vburst_end_f3 = 286,
+ .vburst_start_f4 = 9, .vburst_end_f4 = 285,
+
+
+ /* desired 4.4336180 actual 4.4336180 clock 107.52 */
+ .dda1_inc = 135,
+ .dda2_inc = 23578, .dda2_size = 27648,
+ .dda3_inc = 134, .dda3_size = 625,
+ .sc_reset = TV_SC_RESET_EVERY_8,
+ .pal_burst = true,
+
+ .composite_levels = &pal_n_levels_composite,
+ .composite_color = &pal_n_csc_composite,
+ .svideo_levels = &pal_n_levels_svideo,
+ .svideo_color = &pal_n_csc_svideo,
+
+ .filter_table = filter_table,
+ },
+ {
+ /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
+ .name = "PAL",
+ .clock = 108000,
+ .refresh = 50000,
+ .oversample = TV_OVERSAMPLE_8X,
+ .component_only = 0,
+
+ .hsync_end = 64, .hblank_end = 142,
+ .hblank_start = 844, .htotal = 863,
+
+ .progressive = false, .trilevel_sync = false,
+
+ .vsync_start_f1 = 5, .vsync_start_f2 = 6,
+ .vsync_len = 5,
+
+ .veq_ena = true, .veq_start_f1 = 0,
+ .veq_start_f2 = 1, .veq_len = 15,
+
+ .vi_end_f1 = 24, .vi_end_f2 = 25,
+ .nbr_end = 286,
+
+ .burst_ena = true,
+ .hburst_start = 73, .hburst_len = 32,
+ .vburst_start_f1 = 8, .vburst_end_f1 = 285,
+ .vburst_start_f2 = 8, .vburst_end_f2 = 286,
+ .vburst_start_f3 = 9, .vburst_end_f3 = 286,
+ .vburst_start_f4 = 9, .vburst_end_f4 = 285,
+
+ /* desired 4.4336180 actual 4.4336180 clock 107.52 */
+ .dda1_inc = 168,
+ .dda2_inc = 4122, .dda2_size = 27648,
+ .dda3_inc = 67, .dda3_size = 625,
+ .sc_reset = TV_SC_RESET_EVERY_8,
+ .pal_burst = true,
+
+ .composite_levels = &pal_levels_composite,
+ .composite_color = &pal_csc_composite,
+ .svideo_levels = &pal_levels_svideo,
+ .svideo_color = &pal_csc_svideo,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "480p",
+ .clock = 107520,
+ .refresh = 59940,
+ .oversample = TV_OVERSAMPLE_4X,
+ .component_only = 1,
+
+ .hsync_end = 64, .hblank_end = 122,
+ .hblank_start = 842, .htotal = 857,
+
+ .progressive = true, .trilevel_sync = false,
+
+ .vsync_start_f1 = 12, .vsync_start_f2 = 12,
+ .vsync_len = 12,
+
+ .veq_ena = false,
+
+ .vi_end_f1 = 44, .vi_end_f2 = 44,
+ .nbr_end = 479,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "576p",
+ .clock = 107520,
+ .refresh = 50000,
+ .oversample = TV_OVERSAMPLE_4X,
+ .component_only = 1,
+
+ .hsync_end = 64, .hblank_end = 139,
+ .hblank_start = 859, .htotal = 863,
+
+ .progressive = true, .trilevel_sync = false,
+
+ .vsync_start_f1 = 10, .vsync_start_f2 = 10,
+ .vsync_len = 10,
+
+ .veq_ena = false,
+
+ .vi_end_f1 = 48, .vi_end_f2 = 48,
+ .nbr_end = 575,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "720p@60Hz",
+ .clock = 148800,
+ .refresh = 60000,
+ .oversample = TV_OVERSAMPLE_2X,
+ .component_only = 1,
+
+ .hsync_end = 80, .hblank_end = 300,
+ .hblank_start = 1580, .htotal = 1649,
+
+ .progressive = true, .trilevel_sync = true,
+
+ .vsync_start_f1 = 10, .vsync_start_f2 = 10,
+ .vsync_len = 10,
+
+ .veq_ena = false,
+
+ .vi_end_f1 = 29, .vi_end_f2 = 29,
+ .nbr_end = 719,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "720p@50Hz",
+ .clock = 148800,
+ .refresh = 50000,
+ .oversample = TV_OVERSAMPLE_2X,
+ .component_only = 1,
+
+ .hsync_end = 80, .hblank_end = 300,
+ .hblank_start = 1580, .htotal = 1979,
+
+ .progressive = true, .trilevel_sync = true,
+
+ .vsync_start_f1 = 10, .vsync_start_f2 = 10,
+ .vsync_len = 10,
+
+ .veq_ena = false,
+
+ .vi_end_f1 = 29, .vi_end_f2 = 29,
+ .nbr_end = 719,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ .max_srcw = 800
+ },
+ {
+ .name = "1080i@50Hz",
+ .clock = 148800,
+ .refresh = 50000,
+ .oversample = TV_OVERSAMPLE_2X,
+ .component_only = 1,
+
+ .hsync_end = 88, .hblank_end = 235,
+ .hblank_start = 2155, .htotal = 2639,
+
+ .progressive = false, .trilevel_sync = true,
+
+ .vsync_start_f1 = 4, .vsync_start_f2 = 5,
+ .vsync_len = 10,
+
+ .veq_ena = true, .veq_start_f1 = 4,
+ .veq_start_f2 = 4, .veq_len = 10,
+
+
+ .vi_end_f1 = 21, .vi_end_f2 = 22,
+ .nbr_end = 539,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "1080i@60Hz",
+ .clock = 148800,
+ .refresh = 60000,
+ .oversample = TV_OVERSAMPLE_2X,
+ .component_only = 1,
+
+ .hsync_end = 88, .hblank_end = 235,
+ .hblank_start = 2155, .htotal = 2199,
+
+ .progressive = false, .trilevel_sync = true,
+
+ .vsync_start_f1 = 4, .vsync_start_f2 = 5,
+ .vsync_len = 10,
+
+ .veq_ena = true, .veq_start_f1 = 4,
+ .veq_start_f2 = 4, .veq_len = 10,
+
+
+ .vi_end_f1 = 21, .vi_end_f2 = 22,
+ .nbr_end = 539,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+};
+
+struct intel_tv *enc_to_intel_tv(struct drm_encoder *);
+struct intel_tv *intel_attached_tv(struct drm_connector *);
+void intel_tv_dpms(struct drm_encoder *, int);
+const struct tv_mode *intel_tv_mode_lookup(const char *);
+const struct tv_mode *intel_tv_mode_find(struct intel_tv *);
+enum drm_mode_status intel_tv_mode_valid(struct drm_connector *,
+ struct drm_display_mode *);
+bool intel_tv_mode_fixup(struct drm_encoder *, const struct drm_display_mode *,
+ struct drm_display_mode *);
+void intel_tv_mode_set(struct drm_encoder *, struct drm_display_mode *,
+ struct drm_display_mode *);
+int intel_tv_detect_type(struct intel_tv *, struct drm_connector *);
+void intel_tv_find_better_format(struct drm_connector *);
+enum drm_connector_status intel_tv_detect(struct drm_connector *, bool);
+void intel_tv_chose_preferred_modes(struct drm_connector *,
+ struct drm_display_mode *);
+int intel_tv_get_modes(struct drm_connector *);
+void intel_tv_destroy(struct drm_connector *);
+int intel_tv_set_property(struct drm_connector *, struct drm_property *,
+ uint64_t);
+int tv_is_present_in_vbt(struct drm_device *);
+bool intel_tv_get_hw_state(struct intel_encoder *, enum pipe *);
+void intel_enable_tv(struct intel_encoder *);
+void intel_disable_tv(struct intel_encoder *);
+
+struct intel_tv *
+enc_to_intel_tv(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct intel_tv, base.base);
+}
+
+struct intel_tv *
+intel_attached_tv(struct drm_connector *connector)
+{
+ return container_of(intel_attached_encoder(connector),
+ struct intel_tv,
+ base);
+}
+
+bool
+intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 tmp = I915_READ(TV_CTL);
+
+ if (!(tmp & TV_ENC_ENABLE))
+ return false;
+
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
+void
+intel_enable_tv(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
+}
+
+void
+intel_disable_tv(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+
+ I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
+}
+
+const struct tv_mode *
+intel_tv_mode_lookup(const char *tv_format)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
+ const struct tv_mode *tv_mode = &tv_modes[i];
+
+ if (!strcmp(tv_format, tv_mode->name))
+ return tv_mode;
+ }
+ return NULL;
+}
+
+const struct tv_mode *
+intel_tv_mode_find(struct intel_tv *intel_tv)
+{
+ return intel_tv_mode_lookup(intel_tv->tv_format);
+}
+
+enum drm_mode_status
+intel_tv_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+
+ /* Ensure TV refresh is close to desired refresh */
+ if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
+ < 1000)
+ return MODE_OK;
+
+ return MODE_CLOCK_RANGE;
+}
+
+
+bool
+intel_tv_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+
+ if (!tv_mode)
+ return false;
+
+ if (intel_encoder_check_is_cloned(&intel_tv->base))
+ return false;
+
+ adjusted_mode->clock = tv_mode->clock;
+ return true;
+}
+
+void
+intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ u32 tv_ctl;
+ u32 hctl1, hctl2, hctl3;
+ u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7;
+ u32 scctl1, scctl2, scctl3;
+ int i, j;
+ const struct video_levels *video_levels;
+ const struct color_conversion *color_conversion;
+ bool burst_ena;
+ int pipe = intel_crtc->pipe;
+
+ if (!tv_mode)
+ return; /* can't happen (mode_prepare prevents this) */
+
+ tv_ctl = I915_READ(TV_CTL);
+ tv_ctl &= TV_CTL_SAVE;
+
+ switch (intel_tv->type) {
+ default:
+ case DRM_MODE_CONNECTOR_Unknown:
+ case DRM_MODE_CONNECTOR_Composite:
+ tv_ctl |= TV_ENC_OUTPUT_COMPOSITE;
+ video_levels = tv_mode->composite_levels;
+ color_conversion = tv_mode->composite_color;
+ burst_ena = tv_mode->burst_ena;
+ break;
+ case DRM_MODE_CONNECTOR_Component:
+ tv_ctl |= TV_ENC_OUTPUT_COMPONENT;
+ video_levels = &component_levels;
+ if (tv_mode->burst_ena)
+ color_conversion = &sdtv_csc_yprpb;
+ else
+ color_conversion = &hdtv_csc_yprpb;
+ burst_ena = false;
+ break;
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ tv_ctl |= TV_ENC_OUTPUT_SVIDEO;
+ video_levels = tv_mode->svideo_levels;
+ color_conversion = tv_mode->svideo_color;
+ burst_ena = tv_mode->burst_ena;
+ break;
+ }
+ hctl1 = (tv_mode->hsync_end << TV_HSYNC_END_SHIFT) |
+ (tv_mode->htotal << TV_HTOTAL_SHIFT);
+
+ hctl2 = (tv_mode->hburst_start << 16) |
+ (tv_mode->hburst_len << TV_HBURST_LEN_SHIFT);
+
+ if (burst_ena)
+ hctl2 |= TV_BURST_ENA;
+
+ hctl3 = (tv_mode->hblank_start << TV_HBLANK_START_SHIFT) |
+ (tv_mode->hblank_end << TV_HBLANK_END_SHIFT);
+
+ vctl1 = (tv_mode->nbr_end << TV_NBR_END_SHIFT) |
+ (tv_mode->vi_end_f1 << TV_VI_END_F1_SHIFT) |
+ (tv_mode->vi_end_f2 << TV_VI_END_F2_SHIFT);
+
+ vctl2 = (tv_mode->vsync_len << TV_VSYNC_LEN_SHIFT) |
+ (tv_mode->vsync_start_f1 << TV_VSYNC_START_F1_SHIFT) |
+ (tv_mode->vsync_start_f2 << TV_VSYNC_START_F2_SHIFT);
+
+ vctl3 = (tv_mode->veq_len << TV_VEQ_LEN_SHIFT) |
+ (tv_mode->veq_start_f1 << TV_VEQ_START_F1_SHIFT) |
+ (tv_mode->veq_start_f2 << TV_VEQ_START_F2_SHIFT);
+
+ if (tv_mode->veq_ena)
+ vctl3 |= TV_EQUAL_ENA;
+
+ vctl4 = (tv_mode->vburst_start_f1 << TV_VBURST_START_F1_SHIFT) |
+ (tv_mode->vburst_end_f1 << TV_VBURST_END_F1_SHIFT);
+
+ vctl5 = (tv_mode->vburst_start_f2 << TV_VBURST_START_F2_SHIFT) |
+ (tv_mode->vburst_end_f2 << TV_VBURST_END_F2_SHIFT);
+
+ vctl6 = (tv_mode->vburst_start_f3 << TV_VBURST_START_F3_SHIFT) |
+ (tv_mode->vburst_end_f3 << TV_VBURST_END_F3_SHIFT);
+
+ vctl7 = (tv_mode->vburst_start_f4 << TV_VBURST_START_F4_SHIFT) |
+ (tv_mode->vburst_end_f4 << TV_VBURST_END_F4_SHIFT);
+
+ if (intel_crtc->pipe == 1)
+ tv_ctl |= TV_ENC_PIPEB_SELECT;
+ tv_ctl |= tv_mode->oversample;
+
+ if (tv_mode->progressive)
+ tv_ctl |= TV_PROGRESSIVE;
+ if (tv_mode->trilevel_sync)
+ tv_ctl |= TV_TRILEVEL_SYNC;
+ if (tv_mode->pal_burst)
+ tv_ctl |= TV_PAL_BURST;
+
+ scctl1 = 0;
+ if (tv_mode->dda1_inc)
+ scctl1 |= TV_SC_DDA1_EN;
+ if (tv_mode->dda2_inc)
+ scctl1 |= TV_SC_DDA2_EN;
+ if (tv_mode->dda3_inc)
+ scctl1 |= TV_SC_DDA3_EN;
+ scctl1 |= tv_mode->sc_reset;
+ if (video_levels)
+ scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
+ scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT;
+
+ scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT |
+ tv_mode->dda2_inc << TV_SCDDA2_INC_SHIFT;
+
+ scctl3 = tv_mode->dda3_size << TV_SCDDA3_SIZE_SHIFT |
+ tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT;
+
+ /* Enable two fixes for the chips that need them. */
+ if (dev->pci_device < 0x2772)
+ tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX;
+
+ I915_WRITE(TV_H_CTL_1, hctl1);
+ I915_WRITE(TV_H_CTL_2, hctl2);
+ I915_WRITE(TV_H_CTL_3, hctl3);
+ I915_WRITE(TV_V_CTL_1, vctl1);
+ I915_WRITE(TV_V_CTL_2, vctl2);
+ I915_WRITE(TV_V_CTL_3, vctl3);
+ I915_WRITE(TV_V_CTL_4, vctl4);
+ I915_WRITE(TV_V_CTL_5, vctl5);
+ I915_WRITE(TV_V_CTL_6, vctl6);
+ I915_WRITE(TV_V_CTL_7, vctl7);
+ I915_WRITE(TV_SC_CTL_1, scctl1);
+ I915_WRITE(TV_SC_CTL_2, scctl2);
+ I915_WRITE(TV_SC_CTL_3, scctl3);
+
+ if (color_conversion) {
+ I915_WRITE(TV_CSC_Y, (color_conversion->ry << 16) |
+ color_conversion->gy);
+ I915_WRITE(TV_CSC_Y2, (color_conversion->by << 16) |
+ color_conversion->ay);
+ I915_WRITE(TV_CSC_U, (color_conversion->ru << 16) |
+ color_conversion->gu);
+ I915_WRITE(TV_CSC_U2, (color_conversion->bu << 16) |
+ color_conversion->au);
+ I915_WRITE(TV_CSC_V, (color_conversion->rv << 16) |
+ color_conversion->gv);
+ I915_WRITE(TV_CSC_V2, (color_conversion->bv << 16) |
+ color_conversion->av);
+ }
+
+ if (INTEL_INFO(dev)->gen >= 4)
+ I915_WRITE(TV_CLR_KNOBS, 0x00404000);
+ else
+ I915_WRITE(TV_CLR_KNOBS, 0x00606000);
+
+ if (video_levels)
+ I915_WRITE(TV_CLR_LEVEL,
+ ((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
+ (video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
+ {
+ int pipeconf_reg = PIPECONF(pipe);
+ int dspcntr_reg = DSPCNTR(intel_crtc->plane);
+ int pipeconf = I915_READ(pipeconf_reg);
+ int dspcntr = I915_READ(dspcntr_reg);
+ int xpos = 0x0, ypos = 0x0;
+ unsigned int xsize, ysize;
+ /* Pipe must be off here */
+ I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
+ intel_flush_display_plane(dev_priv, intel_crtc->plane);
+
+ /* Wait for vblank for the disable to take effect */
+ if (IS_GEN2(dev))
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ I915_WRITE(pipeconf_reg, pipeconf & ~PIPECONF_ENABLE);
+ /* Wait for vblank for the disable to take effect. */
+ intel_wait_for_pipe_off(dev, intel_crtc->pipe);
+
+ /* Filter ctl must be set before TV_WIN_SIZE */
+ I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
+ xsize = tv_mode->hblank_start - tv_mode->hblank_end;
+ if (tv_mode->progressive)
+ ysize = tv_mode->nbr_end + 1;
+ else
+ ysize = 2*tv_mode->nbr_end + 1;
+
+ xpos += intel_tv->margin[TV_MARGIN_LEFT];
+ ypos += intel_tv->margin[TV_MARGIN_TOP];
+ xsize -= (intel_tv->margin[TV_MARGIN_LEFT] +
+ intel_tv->margin[TV_MARGIN_RIGHT]);
+ ysize -= (intel_tv->margin[TV_MARGIN_TOP] +
+ intel_tv->margin[TV_MARGIN_BOTTOM]);
+ I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos);
+ I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize);
+
+ I915_WRITE(pipeconf_reg, pipeconf);
+ I915_WRITE(dspcntr_reg, dspcntr);
+ intel_flush_display_plane(dev_priv, intel_crtc->plane);
+ }
+
+ j = 0;
+ for (i = 0; i < 60; i++)
+ I915_WRITE(TV_H_LUMA_0 + (i<<2), tv_mode->filter_table[j++]);
+ for (i = 0; i < 60; i++)
+ I915_WRITE(TV_H_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]);
+ for (i = 0; i < 43; i++)
+ I915_WRITE(TV_V_LUMA_0 + (i<<2), tv_mode->filter_table[j++]);
+ for (i = 0; i < 43; i++)
+ I915_WRITE(TV_V_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]);
+ I915_WRITE(TV_DAC, I915_READ(TV_DAC) & TV_DAC_SAVE);
+ I915_WRITE(TV_CTL, tv_ctl);
+}
+
+static const struct drm_display_mode reported_modes[] = {
+ {
+ .name = "NTSC 480i",
+ .clock = 107520,
+ .hdisplay = 1280,
+ .hsync_start = 1368,
+ .hsync_end = 1496,
+ .htotal = 1712,
+
+ .vdisplay = 1024,
+ .vsync_start = 1027,
+ .vsync_end = 1034,
+ .vtotal = 1104,
+ .type = DRM_MODE_TYPE_DRIVER,
+ },
+};
+
+/**
+ * Detects TV presence by checking for load.
+ *
+ * Requires that the current pipe's DPLL is active.
+
+ * \return true if TV is connected.
+ * \return false if TV is disconnected.
+ */
+int
+intel_tv_detect_type(struct intel_tv *intel_tv,
+ struct drm_connector *connector)
+{
+ struct drm_encoder *encoder = &intel_tv->base.base;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_device *dev = encoder->dev;
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ u32 tv_ctl, save_tv_ctl;
+ u32 tv_dac, save_tv_dac;
+ int type;
+
+ /* Disable TV interrupts around load detect or we'll recurse */
+ if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
+ mtx_enter(&dev_priv->irq_lock);
+ i915_disable_pipestat(dev_priv, 0,
+ PIPE_HOTPLUG_INTERRUPT_ENABLE |
+ PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
+ mtx_leave(&dev_priv->irq_lock);
+ }
+
+ save_tv_dac = tv_dac = I915_READ(TV_DAC);
+ save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
+
+ /* Poll for TV detection */
+ tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK);
+ tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
+ if (intel_crtc->pipe == 1)
+ tv_ctl |= TV_ENC_PIPEB_SELECT;
+ else
+ tv_ctl &= ~TV_ENC_PIPEB_SELECT;
+
+ tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK);
+ tv_dac |= (TVDAC_STATE_CHG_EN |
+ TVDAC_A_SENSE_CTL |
+ TVDAC_B_SENSE_CTL |
+ TVDAC_C_SENSE_CTL |
+ DAC_CTL_OVERRIDE |
+ DAC_A_0_7_V |
+ DAC_B_0_7_V |
+ DAC_C_0_7_V);
+
+
+ /*
+ * The TV sense state should be cleared to zero on cantiga platform. Otherwise
+ * the TV is misdetected. This is hardware requirement.
+ */
+ if (IS_GM45(dev))
+ tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL |
+ TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL);
+
+ I915_WRITE(TV_CTL, tv_ctl);
+ I915_WRITE(TV_DAC, tv_dac);
+ POSTING_READ(TV_DAC);
+
+ intel_wait_for_vblank(intel_tv->base.base.dev,
+ to_intel_crtc(intel_tv->base.base.crtc)->pipe);
+
+ type = -1;
+ tv_dac = I915_READ(TV_DAC);
+ DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac);
+ /*
+ * A B C
+ * 0 1 1 Composite
+ * 1 0 X svideo
+ * 0 0 0 Component
+ */
+ if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
+ DRM_DEBUG_KMS("Detected Composite TV connection\n");
+ type = DRM_MODE_CONNECTOR_Composite;
+ } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
+ DRM_DEBUG_KMS("Detected S-Video TV connection\n");
+ type = DRM_MODE_CONNECTOR_SVIDEO;
+ } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
+ DRM_DEBUG_KMS("Detected Component TV connection\n");
+ type = DRM_MODE_CONNECTOR_Component;
+ } else {
+ DRM_DEBUG_KMS("Unrecognised TV connection\n");
+ type = -1;
+ }
+
+ I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
+ I915_WRITE(TV_CTL, save_tv_ctl);
+ POSTING_READ(TV_CTL);
+
+ /* For unknown reasons the hw barfs if we don't do this vblank wait. */
+ intel_wait_for_vblank(intel_tv->base.base.dev,
+ to_intel_crtc(intel_tv->base.base.crtc)->pipe);
+
+ /* Restore interrupt config */
+ if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
+ mtx_enter(&dev_priv->irq_lock);
+ i915_enable_pipestat(dev_priv, 0,
+ PIPE_HOTPLUG_INTERRUPT_ENABLE |
+ PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
+ mtx_leave(&dev_priv->irq_lock);
+ }
+
+ return type;
+}
+
+/*
+ * Here we set accurate tv format according to connector type
+ * i.e Component TV should not be assigned by NTSC or PAL
+ */
+void
+intel_tv_find_better_format(struct drm_connector *connector)
+{
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ int i;
+
+ if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
+ tv_mode->component_only)
+ return;
+
+
+ for (i = 0; i < sizeof(tv_modes) / sizeof(*tv_modes); i++) {
+ tv_mode = tv_modes + i;
+
+ if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
+ tv_mode->component_only)
+ break;
+ }
+
+ intel_tv->tv_format = tv_mode->name;
+ drm_object_property_set_value(&connector->base,
+ connector->dev->mode_config.tv_mode_property, i);
+}
+
+/**
+ * Detect the TV connection.
+ *
+ * Currently this always returns CONNECTOR_STATUS_UNKNOWN, as we need to be sure
+ * we have a pipe programmed in order to probe the TV.
+ */
+enum drm_connector_status
+intel_tv_detect(struct drm_connector *connector, bool force)
+{
+ struct drm_display_mode mode;
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
+ int type;
+
+ mode = reported_modes[0];
+
+ if (force) {
+ struct intel_load_detect_pipe tmp;
+
+ if (intel_get_load_detect_pipe(connector, &mode, &tmp)) {
+ type = intel_tv_detect_type(intel_tv, connector);
+ intel_release_load_detect_pipe(connector, &tmp);
+ } else
+ return connector_status_unknown;
+ } else
+ return connector->status;
+
+ if (type < 0)
+ return connector_status_disconnected;
+
+ intel_tv->type = type;
+ intel_tv_find_better_format(connector);
+
+ return connector_status_connected;
+}
+
+static const struct input_res {
+ const char *name;
+ int w, h;
+} input_res_table[] = {
+ {"640x480", 640, 480},
+ {"800x600", 800, 600},
+ {"1024x768", 1024, 768},
+ {"1280x1024", 1280, 1024},
+ {"848x480", 848, 480},
+ {"1280x720", 1280, 720},
+ {"1920x1080", 1920, 1080},
+};
+
+/*
+ * Chose preferred mode according to line number of TV format
+ */
+void
+intel_tv_chose_preferred_modes(struct drm_connector *connector,
+ struct drm_display_mode *mode_ptr)
+{
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+
+ if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
+ mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
+ else if (tv_mode->nbr_end > 480) {
+ if (tv_mode->progressive == true && tv_mode->nbr_end < 720) {
+ if (mode_ptr->vdisplay == 720)
+ mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
+ } else if (mode_ptr->vdisplay == 1080)
+ mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
+ }
+}
+
+/**
+ * Stub get_modes function.
+ *
+ * This should probably return a set of fixed modes, unless we can figure out
+ * how to probe modes off of TV connections.
+ */
+
+int
+intel_tv_get_modes(struct drm_connector *connector)
+{
+ struct drm_display_mode *mode_ptr;
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ int j, count = 0;
+ u64 tmp;
+
+ for (j = 0; j < ARRAY_SIZE(input_res_table);
+ j++) {
+ const struct input_res *input = &input_res_table[j];
+ unsigned int hactive_s = input->w;
+ unsigned int vactive_s = input->h;
+
+ if (tv_mode->max_srcw && input->w > tv_mode->max_srcw)
+ continue;
+
+ if (input->w > 1024 && (!tv_mode->progressive
+ && !tv_mode->component_only))
+ continue;
+
+ mode_ptr = drm_mode_create(connector->dev);
+ if (!mode_ptr)
+ continue;
+ strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN);
+
+ mode_ptr->hdisplay = hactive_s;
+ mode_ptr->hsync_start = hactive_s + 1;
+ mode_ptr->hsync_end = hactive_s + 64;
+ if (mode_ptr->hsync_end <= mode_ptr->hsync_start)
+ mode_ptr->hsync_end = mode_ptr->hsync_start + 1;
+ mode_ptr->htotal = hactive_s + 96;
+
+ mode_ptr->vdisplay = vactive_s;
+ mode_ptr->vsync_start = vactive_s + 1;
+ mode_ptr->vsync_end = vactive_s + 32;
+ if (mode_ptr->vsync_end <= mode_ptr->vsync_start)
+ mode_ptr->vsync_end = mode_ptr->vsync_start + 1;
+ mode_ptr->vtotal = vactive_s + 33;
+
+ tmp = (u64) tv_mode->refresh * mode_ptr->vtotal;
+ tmp *= mode_ptr->htotal;
+ tmp = tmp / 1000000;
+ mode_ptr->clock = (int) tmp;
+
+ mode_ptr->type = DRM_MODE_TYPE_DRIVER;
+ intel_tv_chose_preferred_modes(connector, mode_ptr);
+ drm_mode_probed_add(connector, mode_ptr);
+ count++;
+ }
+
+ return count;
+}
+
+void
+intel_tv_destroy(struct drm_connector *connector)
+{
+#if 0
+ drm_sysfs_connector_remove(connector);
+#endif
+ drm_connector_cleanup(connector);
+ free(connector, M_DRM);
+}
+
+
+int
+intel_tv_set_property(struct drm_connector *connector, struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = connector->dev;
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
+ struct drm_crtc *crtc = intel_tv->base.base.crtc;
+ int ret = 0;
+ bool changed = false;
+
+ ret = drm_object_property_set_value(&connector->base, property, val);
+ if (ret < 0)
+ goto out;
+
+ if (property == dev->mode_config.tv_left_margin_property &&
+ intel_tv->margin[TV_MARGIN_LEFT] != val) {
+ intel_tv->margin[TV_MARGIN_LEFT] = val;
+ changed = true;
+ } else if (property == dev->mode_config.tv_right_margin_property &&
+ intel_tv->margin[TV_MARGIN_RIGHT] != val) {
+ intel_tv->margin[TV_MARGIN_RIGHT] = val;
+ changed = true;
+ } else if (property == dev->mode_config.tv_top_margin_property &&
+ intel_tv->margin[TV_MARGIN_TOP] != val) {
+ intel_tv->margin[TV_MARGIN_TOP] = val;
+ changed = true;
+ } else if (property == dev->mode_config.tv_bottom_margin_property &&
+ intel_tv->margin[TV_MARGIN_BOTTOM] != val) {
+ intel_tv->margin[TV_MARGIN_BOTTOM] = val;
+ changed = true;
+ } else if (property == dev->mode_config.tv_mode_property) {
+ if (val >= ARRAY_SIZE(tv_modes)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!strcmp(intel_tv->tv_format, tv_modes[val].name))
+ goto out;
+
+ intel_tv->tv_format = tv_modes[val].name;
+ changed = true;
+ } else {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (changed && crtc)
+ intel_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
+out:
+ return ret;
+}
+
+static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
+ .mode_fixup = intel_tv_mode_fixup,
+ .mode_set = intel_tv_mode_set,
+ .disable = intel_encoder_noop,
+};
+
+static const struct drm_connector_funcs intel_tv_connector_funcs = {
+ .dpms = intel_connector_dpms,
+ .detect = intel_tv_detect,
+ .destroy = intel_tv_destroy,
+ .set_property = intel_tv_set_property,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+};
+
+static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
+ .mode_valid = intel_tv_mode_valid,
+ .get_modes = intel_tv_get_modes,
+ .best_encoder = intel_best_encoder,
+};
+
+static const struct drm_encoder_funcs intel_tv_enc_funcs = {
+ .destroy = intel_encoder_destroy,
+};
+
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the integrated TV is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it assumes that the TV is present.
+ */
+int
+tv_is_present_in_vbt(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct child_device_config *p_child;
+ int i, ret;
+
+ if (!dev_priv->child_dev_num)
+ return 1;
+
+ ret = 0;
+ for (i = 0; i < dev_priv->child_dev_num; i++) {
+ p_child = dev_priv->child_dev + i;
+ /*
+ * If the device type is not TV, continue.
+ */
+ if (p_child->device_type != DEVICE_TYPE_INT_TV &&
+ p_child->device_type != DEVICE_TYPE_TV)
+ continue;
+ /* Only when the addin_offset is non-zero, it is regarded
+ * as present.
+ */
+ if (p_child->addin_offset) {
+ ret = 1;
+ break;
+ }
+ }
+ return ret;
+}
+
+void
+intel_tv_init(struct drm_device *dev)
+{
+ struct inteldrm_softc *dev_priv = dev->dev_private;
+ struct drm_connector *connector;
+ struct intel_tv *intel_tv;
+ struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
+ u32 tv_dac_on, tv_dac_off, save_tv_dac;
+ char *tv_format_names[ARRAY_SIZE(tv_modes)];
+ int i, initial_mode = 0;
+
+ if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
+ return;
+
+ if (!tv_is_present_in_vbt(dev)) {
+ DRM_DEBUG_KMS("Integrated TV is not present.\n");
+ return;
+ }
+ /* Even if we have an encoder we may not have a connector */
+ if (!dev_priv->int_tv_support)
+ return;
+
+ /*
+ * Sanity check the TV output by checking to see if the
+ * DAC register holds a value
+ */
+ save_tv_dac = I915_READ(TV_DAC);
+
+ I915_WRITE(TV_DAC, save_tv_dac | TVDAC_STATE_CHG_EN);
+ tv_dac_on = I915_READ(TV_DAC);
+
+ I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
+ tv_dac_off = I915_READ(TV_DAC);
+
+ I915_WRITE(TV_DAC, save_tv_dac);
+
+ /*
+ * If the register does not hold the state change enable
+ * bit, (either as a 0 or a 1), assume it doesn't really
+ * exist
+ */
+ if ((tv_dac_on & TVDAC_STATE_CHG_EN) == 0 ||
+ (tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
+ return;
+
+ intel_tv = malloc(sizeof(struct intel_tv), M_DRM,
+ M_WAITOK | M_ZERO);
+ if (!intel_tv) {
+ return;
+ }
+
+ intel_connector = malloc(sizeof(struct intel_connector), M_DRM,
+ M_WAITOK | M_ZERO);
+ if (!intel_connector) {
+ free(intel_tv, M_DRM);
+ return;
+ }
+
+ intel_encoder = &intel_tv->base;
+ connector = &intel_connector->base;
+
+ /* The documentation, for the older chipsets at least, recommend
+ * using a polling method rather than hotplug detection for TVs.
+ * This is because in order to perform the hotplug detection, the PLLs
+ * for the TV must be kept alive increasing power drain and starving
+ * bandwidth from other encoders. Notably for instance, it causes
+ * pipe underruns on Crestline when this encoder is supposedly idle.
+ *
+ * More recent chipsets favour HDMI rather than integrated S-Video.
+ */
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+
+ drm_connector_init(dev, connector, &intel_tv_connector_funcs,
+ DRM_MODE_CONNECTOR_SVIDEO);
+
+ drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
+ DRM_MODE_ENCODER_TVDAC);
+
+ intel_encoder->enable = intel_enable_tv;
+ intel_encoder->disable = intel_disable_tv;
+ intel_encoder->get_hw_state = intel_tv_get_hw_state;
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
+ intel_encoder->type = INTEL_OUTPUT_TVOUT;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->cloneable = false;
+ intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
+ intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
+ intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
+
+ /* BIOS margin values */
+ intel_tv->margin[TV_MARGIN_LEFT] = 54;
+ intel_tv->margin[TV_MARGIN_TOP] = 36;
+ intel_tv->margin[TV_MARGIN_RIGHT] = 46;
+ intel_tv->margin[TV_MARGIN_BOTTOM] = 37;
+
+ intel_tv->tv_format = tv_modes[initial_mode].name;
+
+ drm_encoder_helper_add(&intel_encoder->base, &intel_tv_helper_funcs);
+ drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ /* Create TV properties then attach current values */
+ for (i = 0; i < ARRAY_SIZE(tv_modes); i++)
+ tv_format_names[i] = (char *)tv_modes[i].name;
+ drm_mode_create_tv_properties(dev,
+ ARRAY_SIZE(tv_modes),
+ tv_format_names);
+
+ drm_object_attach_property(&connector->base, dev->mode_config.tv_mode_property,
+ initial_mode);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.tv_left_margin_property,
+ intel_tv->margin[TV_MARGIN_LEFT]);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.tv_top_margin_property,
+ intel_tv->margin[TV_MARGIN_TOP]);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.tv_right_margin_property,
+ intel_tv->margin[TV_MARGIN_RIGHT]);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.tv_bottom_margin_property,
+ intel_tv->margin[TV_MARGIN_BOTTOM]);
+#if 0
+ drm_sysfs_connector_add(connector);
+#endif
+}
diff --git a/sys/dev/pci/drm/i915_drm.h b/sys/dev/pci/drm/i915_drm.h
index 5db47a46caf..6a3ff2f277a 100644
--- a/sys/dev/pci/drm/i915_drm.h
+++ b/sys/dev/pci/drm/i915_drm.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: i915_drm.h,v 1.14 2013/01/09 10:33:42 jsg Exp $ */
+/* $OpenBSD: i915_drm.h,v 1.15 2013/03/18 12:36:51 jsg Exp $ */
/*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
@@ -281,6 +281,13 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_RELAXED_DELTA 15
#define I915_PARAM_HAS_GEN7_SOL_RESET 16
#define I915_PARAM_HAS_LLC 17
+#define I915_PARAM_HAS_ALIASING_PPGTT 18
+#define I915_PARAM_HAS_WAIT_TIMEOUT 19
+#define I915_PARAM_HAS_SEMAPHORES 20
+#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21
+#define I915_PARAM_RSVD_FOR_FUTURE_USE 22
+#define I915_PARAM_HAS_SECURE_BATCHES 23
+#define I915_PARAM_HAS_PINNED_BATCHES 24
typedef struct drm_i915_getparam {
int param;
@@ -595,6 +602,23 @@ struct drm_i915_gem_execbuffer2 {
u_int64_t rsvd2;
};
+/** Resets the SO write offset registers for transform feedback on gen7. */
+#define I915_EXEC_GEN7_SOL_RESET (1<<8)
+
+/** Request a privileged ("secure") batch buffer. Note only available for
+ * DRM_ROOT_ONLY | DRM_MASTER processes.
+ */
+#define I915_EXEC_SECURE (1<<9)
+
+/** Inform the kernel that the batch is and will always be pinned. This
+ * negates the requirement for a workaround to be performed to avoid
+ * an incoherent CS (such as can be found on 830/845). If this flag is
+ * not passed, the kernel will endeavour to make sure the batch is
+ * coherent with the CS before execution. If this flag is passed,
+ * userspace assumes the responsibility for ensuring the same.
+ */
+#define I915_EXEC_IS_PINNED (1<<10)
+
struct drm_i915_gem_pin {
/** Handle of the buffer to be pinned. */
uint32_t handle;
diff --git a/sys/dev/pci/drm/i915_drv.c b/sys/dev/pci/drm/i915_drv.c
deleted file mode 100644
index 394d92fb5d2..00000000000
--- a/sys/dev/pci/drm/i915_drv.c
+++ /dev/null
@@ -1,5434 +0,0 @@
-/* $OpenBSD: i915_drv.c,v 1.123 2012/09/25 10:19:46 jsg Exp $ */
-/*
- * Copyright (c) 2008-2009 Owain G. Ainsworth <oga@openbsd.org>
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-/*-
- * Copyright © 2008 Intel Corporation
- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
- * copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Gareth Hughes <gareth@valinux.com>
- * Eric Anholt <eric@anholt.net>
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i915_drm.h"
-#include "i915_drv.h"
-
-#include <machine/pmap.h>
-
-#include <sys/queue.h>
-#include <sys/workq.h>
-#if 0
-# define INTELDRM_WATCH_COHERENCY
-# define WATCH_INACTIVE
-#endif
-
-#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
-
-int inteldrm_probe(struct device *, void *, void *);
-void inteldrm_attach(struct device *, struct device *, void *);
-int inteldrm_detach(struct device *, int);
-int inteldrm_activate(struct device *, int);
-int inteldrm_ioctl(struct drm_device *, u_long, caddr_t, struct drm_file *);
-int inteldrm_doioctl(struct drm_device *, u_long, caddr_t, struct drm_file *);
-int inteldrm_intr(void *);
-void inteldrm_error(struct inteldrm_softc *);
-int inteldrm_ironlake_intr(void *);
-void inteldrm_lastclose(struct drm_device *);
-
-void inteldrm_wrap_ring(struct inteldrm_softc *);
-int inteldrm_gmch_match(struct pci_attach_args *);
-void inteldrm_chipset_flush(struct inteldrm_softc *);
-void inteldrm_timeout(void *);
-void inteldrm_hangcheck(void *);
-void inteldrm_hung(void *, void *);
-void inteldrm_965_reset(struct inteldrm_softc *, u_int8_t);
-int inteldrm_fault(struct drm_obj *, struct uvm_faultinfo *, off_t,
- vaddr_t, vm_page_t *, int, int, vm_prot_t, int );
-void inteldrm_wipe_mappings(struct drm_obj *);
-void inteldrm_purge_obj(struct drm_obj *);
-void inteldrm_set_max_obj_size(struct inteldrm_softc *);
-void inteldrm_quiesce(struct inteldrm_softc *);
-
-/* For reset and suspend */
-
-void i915_alloc_ifp(struct inteldrm_softc *, struct pci_attach_args *);
-void i965_alloc_ifp(struct inteldrm_softc *, struct pci_attach_args *);
-
-void inteldrm_detect_bit_6_swizzle(struct inteldrm_softc *,
- struct pci_attach_args *);
-
-int inteldrm_setup_mchbar(struct inteldrm_softc *,
- struct pci_attach_args *);
-void inteldrm_teardown_mchbar(struct inteldrm_softc *,
- struct pci_attach_args *, int);
-
-/* Ioctls */
-int inteldrm_getparam(struct inteldrm_softc *dev_priv, void *data);
-int inteldrm_setparam(struct inteldrm_softc *dev_priv, void *data);
-int i915_gem_init_ioctl(struct drm_device *, void *, struct drm_file *);
-int i915_gem_create_ioctl(struct drm_device *, void *, struct drm_file *);
-int i915_gem_pread_ioctl(struct drm_device *, void *, struct drm_file *);
-int i915_gem_pwrite_ioctl(struct drm_device *, void *, struct drm_file *);
-int i915_gem_set_domain_ioctl(struct drm_device *, void *,
- struct drm_file *);
-int i915_gem_execbuffer2(struct drm_device *, void *, struct drm_file *);
-int i915_gem_pin_ioctl(struct drm_device *, void *, struct drm_file *);
-int i915_gem_unpin_ioctl(struct drm_device *, void *, struct drm_file *);
-int i915_gem_busy_ioctl(struct drm_device *, void *, struct drm_file *);
-int i915_gem_entervt_ioctl(struct drm_device *, void *, struct drm_file *);
-int i915_gem_leavevt_ioctl(struct drm_device *, void *, struct drm_file *);
-int i915_gem_get_aperture_ioctl(struct drm_device *, void *,
- struct drm_file *);
-int i915_gem_set_tiling(struct drm_device *, void *, struct drm_file *);
-int i915_gem_get_tiling(struct drm_device *, void *, struct drm_file *);
-int i915_gem_gtt_map_ioctl(struct drm_device *, void *, struct drm_file *);
-int i915_gem_madvise_ioctl(struct drm_device *, void *, struct drm_file *);
-
-/* GEM memory manager functions */
-int i915_gem_init_object(struct drm_obj *);
-void i915_gem_free_object(struct drm_obj *);
-int i915_gem_object_pin(struct drm_obj *, uint32_t, int);
-void i915_gem_object_unpin(struct drm_obj *);
-void i915_gem_retire_requests(struct inteldrm_softc *);
-void i915_gem_retire_request(struct inteldrm_softc *,
- struct inteldrm_request *);
-void i915_gem_retire_work_handler(void *, void*);
-int i915_gem_idle(struct inteldrm_softc *);
-void i915_gem_object_move_to_active(struct drm_obj *);
-void i915_gem_object_move_off_active(struct drm_obj *);
-void i915_gem_object_move_to_inactive(struct drm_obj *);
-void i915_gem_object_move_to_inactive_locked(struct drm_obj *);
-uint32_t i915_add_request(struct inteldrm_softc *);
-void inteldrm_process_flushing(struct inteldrm_softc *, u_int32_t);
-void i915_move_to_tail(struct inteldrm_obj *, struct i915_gem_list *);
-void i915_list_remove(struct inteldrm_obj *);
-int i915_gem_init_hws(struct inteldrm_softc *);
-void i915_gem_cleanup_hws(struct inteldrm_softc *);
-int i915_gem_init_ringbuffer(struct inteldrm_softc *);
-int inteldrm_start_ring(struct inteldrm_softc *);
-void i915_gem_cleanup_ringbuffer(struct inteldrm_softc *);
-int i915_gem_ring_throttle(struct drm_device *, struct drm_file *);
-int i915_gem_evict_inactive(struct inteldrm_softc *, int);
-int i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *,
- u_int32_t, struct drm_i915_gem_relocation_entry **);
-int i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *,
- u_int32_t, struct drm_i915_gem_relocation_entry *);
-void i915_dispatch_gem_execbuffer(struct drm_device *,
- struct drm_i915_gem_execbuffer2 *, uint64_t);
-void i915_gem_object_set_to_gpu_domain(struct drm_obj *);
-int i915_gem_object_pin_and_relocate(struct drm_obj *,
- struct drm_file *, struct drm_i915_gem_exec_object2 *,
- struct drm_i915_gem_relocation_entry *);
-int i915_gem_object_bind_to_gtt(struct drm_obj *, bus_size_t, int);
-int i915_wait_request(struct inteldrm_softc *, uint32_t, int);
-u_int32_t i915_gem_flush(struct inteldrm_softc *, uint32_t, uint32_t);
-int i915_gem_object_unbind(struct drm_obj *, int);
-
-struct drm_obj *i915_gem_find_inactive_object(struct inteldrm_softc *,
- size_t);
-
-int i915_gem_evict_everything(struct inteldrm_softc *, int);
-int i915_gem_evict_something(struct inteldrm_softc *, size_t, int);
-int i915_gem_object_set_to_gtt_domain(struct drm_obj *, int, int);
-int i915_gem_object_set_to_cpu_domain(struct drm_obj *, int, int);
-int i915_gem_object_flush_gpu_write_domain(struct drm_obj *, int, int, int);
-int i915_gem_get_fence_reg(struct drm_obj *, int);
-int i915_gem_object_put_fence_reg(struct drm_obj *, int);
-bus_size_t i915_gem_get_gtt_alignment(struct drm_obj *);
-
-bus_size_t i915_get_fence_size(struct inteldrm_softc *, bus_size_t);
-int i915_tiling_ok(struct drm_device *, int, int, int);
-int i915_gem_object_fence_offset_ok(struct drm_obj *, int);
-void sandybridge_write_fence_reg(struct inteldrm_fence *);
-void i965_write_fence_reg(struct inteldrm_fence *);
-void i915_write_fence_reg(struct inteldrm_fence *);
-void i830_write_fence_reg(struct inteldrm_fence *);
-void i915_gem_bit_17_swizzle(struct drm_obj *);
-void i915_gem_save_bit_17_swizzle(struct drm_obj *);
-int inteldrm_swizzle_page(struct vm_page *page);
-
-/* Debug functions, mostly called from ddb */
-void i915_gem_seqno_info(int);
-void i915_interrupt_info(int);
-void i915_gem_fence_regs_info(int);
-void i915_hws_info(int);
-void i915_batchbuffer_info(int);
-void i915_ringbuffer_data(int);
-void i915_ringbuffer_info(int);
-#ifdef WATCH_INACTIVE
-void inteldrm_verify_inactive(struct inteldrm_softc *, char *, int);
-#else
-#define inteldrm_verify_inactive(dev,file,line)
-#endif
-
-const static struct drm_pcidev inteldrm_pciidlist[] = {
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82830M_IGD,
- CHIP_I830|CHIP_M|CHIP_GEN2},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82845G_IGD,
- CHIP_I845G|CHIP_GEN2},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82855GM_IGD,
- CHIP_I85X|CHIP_M|CHIP_GEN2},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82865G_IGD,
- CHIP_I865G|CHIP_GEN2},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82915G_IGD_1,
- CHIP_I915G|CHIP_I9XX|CHIP_GEN3},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_E7221_IGD,
- CHIP_I915G|CHIP_I9XX|CHIP_GEN3},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82915GM_IGD_1,
- CHIP_I915GM|CHIP_I9XX|CHIP_M|CHIP_GEN3},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82945G_IGD_1,
- CHIP_I945G|CHIP_I9XX|CHIP_GEN3},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82945GM_IGD_1,
- CHIP_I945GM|CHIP_I9XX|CHIP_M|CHIP_GEN3},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82945GME_IGD_1,
- CHIP_I945GM|CHIP_I9XX|CHIP_M|CHIP_GEN3},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82946GZ_IGD_1,
- CHIP_I965|CHIP_I9XX|CHIP_GEN4},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82G35_IGD_1,
- CHIP_I965|CHIP_I9XX|CHIP_GEN4},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82Q965_IGD_1,
- CHIP_I965|CHIP_I9XX|CHIP_GEN4},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82G965_IGD_1,
- CHIP_I965|CHIP_I9XX|CHIP_GEN4},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82GM965_IGD_1,
- CHIP_I965GM|CHIP_I965|CHIP_I9XX|CHIP_M|CHIP_GEN4},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82GME965_IGD_1,
- CHIP_I965GM|CHIP_I965|CHIP_I9XX|CHIP_M|CHIP_GEN4},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82G33_IGD_1,
- CHIP_G33|CHIP_I9XX|CHIP_HWS|CHIP_GEN3},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82Q35_IGD_1,
- CHIP_G33|CHIP_I9XX|CHIP_HWS|CHIP_GEN3},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82Q33_IGD_1,
- CHIP_G33|CHIP_I9XX|CHIP_HWS|CHIP_GEN3},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82GM45_IGD_1,
- CHIP_G4X|CHIP_GM45|CHIP_I965|CHIP_I9XX|CHIP_M|CHIP_HWS|CHIP_GEN4},
- {PCI_VENDOR_INTEL, 0x2E02,
- CHIP_G4X|CHIP_I965|CHIP_I9XX|CHIP_HWS|CHIP_GEN4},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82Q45_IGD_1,
- CHIP_G4X|CHIP_I965|CHIP_I9XX|CHIP_HWS|CHIP_GEN4},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82G45_IGD_1,
- CHIP_G4X|CHIP_I965|CHIP_I9XX|CHIP_HWS|CHIP_GEN4},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82G41_IGD_1,
- CHIP_G4X|CHIP_I965|CHIP_I9XX|CHIP_HWS|CHIP_GEN4},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PINEVIEW_IGC_1,
- CHIP_G33|CHIP_PINEVIEW|CHIP_M|CHIP_I9XX|CHIP_HWS|CHIP_GEN3},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PINEVIEW_M_IGC_1,
- CHIP_G33|CHIP_PINEVIEW|CHIP_M|CHIP_I9XX|CHIP_HWS|CHIP_GEN3},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CLARKDALE_IGD,
- CHIP_IRONLAKE|CHIP_IRONLAKE_D|CHIP_I965|CHIP_I9XX|CHIP_HWS},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ARRANDALE_IGD,
- CHIP_IRONLAKE|CHIP_M|CHIP_IRONLAKE_M|CHIP_I965|CHIP_I9XX|CHIP_HWS},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE2G_GT1,
- CHIP_SANDYBRIDGE|CHIP_I965|CHIP_I9XX|CHIP_HWS|CHIP_GEN6},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE2G_M_GT1,
- CHIP_SANDYBRIDGE|CHIP_M|CHIP_I965|CHIP_I9XX|CHIP_HWS|CHIP_GEN6},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE2G_GT2,
- CHIP_SANDYBRIDGE|CHIP_I965|CHIP_I9XX|CHIP_HWS|CHIP_GEN6},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE2G_M_GT2,
- CHIP_SANDYBRIDGE|CHIP_M|CHIP_I965|CHIP_I9XX|CHIP_HWS|CHIP_GEN6},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE2G_GT2_PLUS,
- CHIP_SANDYBRIDGE|CHIP_I965|CHIP_I9XX|CHIP_HWS|CHIP_GEN6},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE2G_M_GT2_PLUS,
- CHIP_SANDYBRIDGE|CHIP_M|CHIP_I965|CHIP_I9XX|CHIP_HWS|CHIP_GEN6},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE3G_D_GT1,
- CHIP_IVYBRIDGE|CHIP_I965|CHIP_I9XX|CHIP_HWS|CHIP_GEN7},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE3G_M_GT1,
- CHIP_IVYBRIDGE|CHIP_M|CHIP_I965|CHIP_I9XX|CHIP_HWS|CHIP_GEN7},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE3G_S_GT1,
- CHIP_IVYBRIDGE|CHIP_I965|CHIP_I9XX|CHIP_HWS|CHIP_GEN7},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE3G_D_GT2,
- CHIP_IVYBRIDGE|CHIP_I965|CHIP_I9XX|CHIP_HWS|CHIP_GEN7},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE3G_M_GT2,
- CHIP_IVYBRIDGE|CHIP_M|CHIP_I965|CHIP_I9XX|CHIP_HWS|CHIP_GEN7},
- {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_CORE3G_S_GT2,
- CHIP_IVYBRIDGE|CHIP_I965|CHIP_I9XX|CHIP_HWS|CHIP_GEN7},
- {0, 0, 0}
-};
-
-static const struct drm_driver_info inteldrm_driver = {
- .buf_priv_size = 1, /* No dev_priv */
- .file_priv_size = sizeof(struct inteldrm_file),
- .ioctl = inteldrm_ioctl,
- .lastclose = inteldrm_lastclose,
- .vblank_pipes = 2,
- .get_vblank_counter = i915_get_vblank_counter,
- .enable_vblank = i915_enable_vblank,
- .disable_vblank = i915_disable_vblank,
- .irq_install = i915_driver_irq_install,
- .irq_uninstall = i915_driver_irq_uninstall,
-
- .gem_init_object = i915_gem_init_object,
- .gem_free_object = i915_gem_free_object,
- .gem_fault = inteldrm_fault,
- .gem_size = sizeof(struct inteldrm_obj),
-
- .name = DRIVER_NAME,
- .desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRIVER_MAJOR,
- .minor = DRIVER_MINOR,
- .patchlevel = DRIVER_PATCHLEVEL,
-
- .flags = DRIVER_AGP | DRIVER_AGP_REQUIRE |
- DRIVER_MTRR | DRIVER_IRQ | DRIVER_GEM,
-};
-
-int
-inteldrm_probe(struct device *parent, void *match, void *aux)
-{
- return (drm_pciprobe((struct pci_attach_args *)aux,
- inteldrm_pciidlist));
-}
-
-/*
- * We're intel IGD, bus 0 function 0 dev 0 should be the GMCH, so it should
- * be Intel
- */
-int
-inteldrm_gmch_match(struct pci_attach_args *pa)
-{
- if (pa->pa_bus == 0 && pa->pa_device == 0 && pa->pa_function == 0 &&
- PCI_VENDOR(pa->pa_id) == PCI_VENDOR_INTEL &&
- PCI_CLASS(pa->pa_class) == PCI_CLASS_BRIDGE &&
- PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_BRIDGE_HOST)
- return (1);
- return (0);
-}
-
-void
-inteldrm_attach(struct device *parent, struct device *self, void *aux)
-{
- struct inteldrm_softc *dev_priv = (struct inteldrm_softc *)self;
- struct pci_attach_args *pa = aux, bpa;
- struct vga_pci_bar *bar;
- struct drm_device *dev;
- const struct drm_pcidev *id_entry;
- int i;
-
- id_entry = drm_find_description(PCI_VENDOR(pa->pa_id),
- PCI_PRODUCT(pa->pa_id), inteldrm_pciidlist);
- dev_priv->flags = id_entry->driver_private;
- dev_priv->pci_device = PCI_PRODUCT(pa->pa_id);
- if (dev_priv->flags & CHIP_GEN2)
- dev_priv->gen = 2;
- else if (dev_priv->flags & CHIP_GEN3)
- dev_priv->gen = 3;
- else if (dev_priv->flags & CHIP_GEN4)
- dev_priv->gen = 4;
- else if (dev_priv->flags & CHIP_IRONLAKE)
- dev_priv->gen = 5;
- else if (dev_priv->flags & CHIP_GEN6)
- dev_priv->gen = 6;
- else if (dev_priv->flags & CHIP_GEN7)
- dev_priv->gen = 7;
- KASSERT(dev_priv->gen != 0);
-
- dev_priv->pc = pa->pa_pc;
- dev_priv->tag = pa->pa_tag;
- dev_priv->dmat = pa->pa_dmat;
- dev_priv->bst = pa->pa_memt;
-
- /* we need to use this api for now due to sharing with intagp */
- bar = vga_pci_bar_info((struct vga_pci_softc *)parent,
- (IS_I9XX(dev_priv) ? 0 : 1));
- if (bar == NULL) {
- printf(": can't get BAR info\n");
- return;
- }
-
- dev_priv->regs = vga_pci_bar_map((struct vga_pci_softc *)parent,
- bar->addr, 0, 0);
- if (dev_priv->regs == NULL) {
- printf(": can't map mmio space\n");
- return;
- }
-
- /*
- * i945G/GM report MSI capability despite not actually supporting it.
- * so explicitly disable it.
- */
- if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
- pa->pa_flags &= ~PCI_FLAGS_MSI_ENABLED;
-
- if (pci_intr_map(pa, &dev_priv->ih) != 0) {
- printf(": couldn't map interrupt\n");
- return;
- }
-
- /*
- * set up interrupt handler, note that we don't switch the interrupt
- * on until the X server talks to us, kms will change this.
- */
- dev_priv->irqh = pci_intr_establish(dev_priv->pc, dev_priv->ih, IPL_TTY,
- (HAS_PCH_SPLIT(dev_priv) ? inteldrm_ironlake_intr : inteldrm_intr),
- dev_priv, dev_priv->dev.dv_xname);
- if (dev_priv->irqh == NULL) {
- printf(": couldn't establish interrupt\n");
- return;
- }
-
- /* Unmask the interrupts that we always want on. */
- if (HAS_PCH_SPLIT(dev_priv)) {
- dev_priv->irq_mask_reg = ~PCH_SPLIT_DISPLAY_INTR_FIX;
- /* masked for now, turned on on demand */
- dev_priv->gt_irq_mask_reg = ~PCH_SPLIT_RENDER_INTR_FIX;
- dev_priv->pch_irq_mask_reg = ~PCH_SPLIT_HOTPLUG_INTR_FIX;
- } else {
- dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
- }
-
- dev_priv->workq = workq_create("intelrel", 1, IPL_TTY);
- if (dev_priv->workq == NULL) {
- printf("couldn't create workq\n");
- return;
- }
-
- /* GEM init */
- TAILQ_INIT(&dev_priv->mm.active_list);
- TAILQ_INIT(&dev_priv->mm.flushing_list);
- TAILQ_INIT(&dev_priv->mm.inactive_list);
- TAILQ_INIT(&dev_priv->mm.gpu_write_list);
- TAILQ_INIT(&dev_priv->mm.request_list);
- TAILQ_INIT(&dev_priv->mm.fence_list);
- timeout_set(&dev_priv->mm.retire_timer, inteldrm_timeout, dev_priv);
- timeout_set(&dev_priv->mm.hang_timer, inteldrm_hangcheck, dev_priv);
- dev_priv->mm.next_gem_seqno = 1;
- dev_priv->mm.suspended = 1;
-
- /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
- if (IS_GEN3(dev_priv)) {
- u_int32_t tmp = I915_READ(MI_ARB_STATE);
- if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
- /*
- * arb state is a masked write, so set bit + bit
- * in mask
- */
- tmp = MI_ARB_C3_LP_WRITE_ENABLE |
- (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
- I915_WRITE(MI_ARB_STATE, tmp);
- }
- }
-
- /* For the X server, in kms mode this will not be needed */
- dev_priv->fence_reg_start = 3;
-
- if (IS_I965G(dev_priv) || IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
- IS_G33(dev_priv))
- dev_priv->num_fence_regs = 16;
- else
- dev_priv->num_fence_regs = 8;
-
- /* Initialise fences to zero, else on some macs we'll get corruption */
- if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
- for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0);
- } else if (IS_I965G(dev_priv)) {
- for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
- } else {
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
- if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
- IS_G33(dev_priv))
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
- }
-
- if (pci_find_device(&bpa, inteldrm_gmch_match) == 0) {
- printf(": can't find GMCH\n");
- return;
- }
-
- /* Set up the IFP for chipset flushing */
- if (dev_priv->flags & (CHIP_I915G|CHIP_I915GM|CHIP_I945G|CHIP_I945GM)) {
- i915_alloc_ifp(dev_priv, &bpa);
- } else if (IS_I965G(dev_priv) || IS_G33(dev_priv)) {
- i965_alloc_ifp(dev_priv, &bpa);
- } else {
- int nsegs;
- /*
- * I8XX has no flush page mechanism, we fake it by writing until
- * the cache is empty. allocate a page to scribble on
- */
- dev_priv->ifp.i8xx.kva = NULL;
- if (bus_dmamem_alloc(pa->pa_dmat, PAGE_SIZE, 0, 0,
- &dev_priv->ifp.i8xx.seg, 1, &nsegs, BUS_DMA_WAITOK) == 0) {
- if (bus_dmamem_map(pa->pa_dmat, &dev_priv->ifp.i8xx.seg,
- 1, PAGE_SIZE, &dev_priv->ifp.i8xx.kva, 0) != 0) {
- bus_dmamem_free(pa->pa_dmat,
- &dev_priv->ifp.i8xx.seg, nsegs);
- dev_priv->ifp.i8xx.kva = NULL;
- }
- }
- }
-
- inteldrm_detect_bit_6_swizzle(dev_priv, &bpa);
- /* Init HWS */
- if (!I915_NEED_GFX_HWS(dev_priv)) {
- if (i915_init_phys_hws(dev_priv, pa->pa_dmat) != 0) {
- printf(": couldn't alloc HWS page\n");
- return;
- }
- }
-
- printf(": %s\n", pci_intr_string(pa->pa_pc, dev_priv->ih));
-
- mtx_init(&dev_priv->user_irq_lock, IPL_TTY);
- mtx_init(&dev_priv->list_lock, IPL_NONE);
- mtx_init(&dev_priv->request_lock, IPL_NONE);
- mtx_init(&dev_priv->fence_lock, IPL_NONE);
-
- /* All intel chipsets need to be treated as agp, so just pass one */
- dev_priv->drmdev = drm_attach_pci(&inteldrm_driver, pa, 1, self);
-
- dev = (struct drm_device *)dev_priv->drmdev;
-
- /* XXX would be a lot nicer to get agp info before now */
- uvm_page_physload(atop(dev->agp->base), atop(dev->agp->base +
- dev->agp->info.ai_aperture_size), atop(dev->agp->base),
- atop(dev->agp->base + dev->agp->info.ai_aperture_size),
- PHYSLOAD_DEVICE);
- /* array of vm pages that physload introduced. */
- dev_priv->pgs = PHYS_TO_VM_PAGE(dev->agp->base);
- KASSERT(dev_priv->pgs != NULL);
- /*
- * XXX mark all pages write combining so user mmaps get the right
- * bits. We really need a proper MI api for doing this, but for now
- * this allows us to use PAT where available.
- */
- for (i = 0; i < atop(dev->agp->info.ai_aperture_size); i++)
- atomic_setbits_int(&(dev_priv->pgs[i].pg_flags), PG_PMAP_WC);
- if (agp_init_map(dev_priv->bst, dev->agp->base,
- dev->agp->info.ai_aperture_size, BUS_SPACE_MAP_LINEAR |
- BUS_SPACE_MAP_PREFETCHABLE, &dev_priv->agph))
- panic("can't map aperture");
-}
-
-int
-inteldrm_detach(struct device *self, int flags)
-{
- struct inteldrm_softc *dev_priv = (struct inteldrm_softc *)self;
-
- /* this will quiesce any dma that's going on and kill the timeouts. */
- if (dev_priv->drmdev != NULL) {
- config_detach(dev_priv->drmdev, flags);
- dev_priv->drmdev = NULL;
- }
-
- if (!I915_NEED_GFX_HWS(dev_priv) && dev_priv->hws_dmamem) {
- drm_dmamem_free(dev_priv->dmat, dev_priv->hws_dmamem);
- dev_priv->hws_dmamem = NULL;
- /* Need to rewrite hardware status page */
- I915_WRITE(HWS_PGA, 0x1ffff000);
- dev_priv->hw_status_page = NULL;
- }
-
- if (IS_I9XX(dev_priv) && dev_priv->ifp.i9xx.bsh != 0) {
- bus_space_unmap(dev_priv->ifp.i9xx.bst, dev_priv->ifp.i9xx.bsh,
- PAGE_SIZE);
- } else if (dev_priv->flags & (CHIP_I830 | CHIP_I845G | CHIP_I85X |
- CHIP_I865G) && dev_priv->ifp.i8xx.kva != NULL) {
- bus_dmamem_unmap(dev_priv->dmat, dev_priv->ifp.i8xx.kva,
- PAGE_SIZE);
- bus_dmamem_free(dev_priv->dmat, &dev_priv->ifp.i8xx.seg, 1);
- }
-
- pci_intr_disestablish(dev_priv->pc, dev_priv->irqh);
-
- if (dev_priv->regs != NULL)
- vga_pci_bar_unmap(dev_priv->regs);
-
- return (0);
-}
-
-int
-inteldrm_activate(struct device *arg, int act)
-{
- struct inteldrm_softc *dev_priv = (struct inteldrm_softc *)arg;
-
- switch (act) {
- case DVACT_QUIESCE:
- inteldrm_quiesce(dev_priv);
- break;
- case DVACT_SUSPEND:
- i915_save_state(dev_priv);
- break;
- case DVACT_RESUME:
- i915_restore_state(dev_priv);
- /* entrypoints can stop sleeping now */
- atomic_clearbits_int(&dev_priv->sc_flags, INTELDRM_QUIET);
- wakeup(&dev_priv->flags);
- break;
- }
-
- return (0);
-}
-
-struct cfattach inteldrm_ca = {
- sizeof(struct inteldrm_softc), inteldrm_probe, inteldrm_attach,
- inteldrm_detach, inteldrm_activate
-};
-
-struct cfdriver inteldrm_cd = {
- 0, "inteldrm", DV_DULL
-};
-
-int
-inteldrm_ioctl(struct drm_device *dev, u_long cmd, caddr_t data,
- struct drm_file *file_priv)
-{
- struct inteldrm_softc *dev_priv = dev->dev_private;
- int error = 0;
-
- while ((dev_priv->sc_flags & INTELDRM_QUIET) && error == 0)
- error = tsleep(&dev_priv->flags, PCATCH, "intelioc", 0);
- if (error)
- return (error);
- dev_priv->entries++;
-
- error = inteldrm_doioctl(dev, cmd, data, file_priv);
-
- dev_priv->entries--;
- if (dev_priv->sc_flags & INTELDRM_QUIET)
- wakeup(&dev_priv->entries);
- return (error);
-}
-
-int
-inteldrm_doioctl(struct drm_device *dev, u_long cmd, caddr_t data,
- struct drm_file *file_priv)
-{
- struct inteldrm_softc *dev_priv = dev->dev_private;
-
- if (file_priv->authenticated == 1) {
- switch (cmd) {
- case DRM_IOCTL_I915_GETPARAM:
- return (inteldrm_getparam(dev_priv, data));
- case DRM_IOCTL_I915_GEM_EXECBUFFER2:
- return (i915_gem_execbuffer2(dev, data, file_priv));
- case DRM_IOCTL_I915_GEM_BUSY:
- return (i915_gem_busy_ioctl(dev, data, file_priv));
- case DRM_IOCTL_I915_GEM_THROTTLE:
- return (i915_gem_ring_throttle(dev, file_priv));
- case DRM_IOCTL_I915_GEM_MMAP:
- return (i915_gem_gtt_map_ioctl(dev, data, file_priv));
- case DRM_IOCTL_I915_GEM_CREATE:
- return (i915_gem_create_ioctl(dev, data, file_priv));
- case DRM_IOCTL_I915_GEM_PREAD:
- return (i915_gem_pread_ioctl(dev, data, file_priv));
- case DRM_IOCTL_I915_GEM_PWRITE:
- return (i915_gem_pwrite_ioctl(dev, data, file_priv));
- case DRM_IOCTL_I915_GEM_SET_DOMAIN:
- return (i915_gem_set_domain_ioctl(dev, data,
- file_priv));
- case DRM_IOCTL_I915_GEM_SET_TILING:
- return (i915_gem_set_tiling(dev, data, file_priv));
- case DRM_IOCTL_I915_GEM_GET_TILING:
- return (i915_gem_get_tiling(dev, data, file_priv));
- case DRM_IOCTL_I915_GEM_GET_APERTURE:
- return (i915_gem_get_aperture_ioctl(dev, data,
- file_priv));
- case DRM_IOCTL_I915_GEM_MADVISE:
- return (i915_gem_madvise_ioctl(dev, data, file_priv));
- default:
- break;
- }
- }
-
- if (file_priv->master == 1) {
- switch (cmd) {
- case DRM_IOCTL_I915_SETPARAM:
- return (inteldrm_setparam(dev_priv, data));
- case DRM_IOCTL_I915_GEM_INIT:
- return (i915_gem_init_ioctl(dev, data, file_priv));
- case DRM_IOCTL_I915_GEM_ENTERVT:
- return (i915_gem_entervt_ioctl(dev, data, file_priv));
- case DRM_IOCTL_I915_GEM_LEAVEVT:
- return (i915_gem_leavevt_ioctl(dev, data, file_priv));
- case DRM_IOCTL_I915_GEM_PIN:
- return (i915_gem_pin_ioctl(dev, data, file_priv));
- case DRM_IOCTL_I915_GEM_UNPIN:
- return (i915_gem_unpin_ioctl(dev, data, file_priv));
- }
- }
- return (EINVAL);
-}
-
-int
-inteldrm_ironlake_intr(void *arg)
-{
- struct inteldrm_softc *dev_priv = arg;
- struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
- u_int32_t de_iir, gt_iir, de_ier, pch_iir;
- int ret = 0, i;
-
- de_ier = I915_READ(DEIER);
- I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
- (void)I915_READ(DEIER);
-
- de_iir = I915_READ(DEIIR);
- gt_iir = I915_READ(GTIIR);
- pch_iir = I915_READ(SDEIIR);
-
- if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
- goto done;
- ret = 1;
-
- if (gt_iir & GT_USER_INTERRUPT) {
- wakeup(dev_priv);
- dev_priv->mm.hang_cnt = 0;
- timeout_add_msec(&dev_priv->mm.hang_timer, 750);
- }
- if (gt_iir & GT_MASTER_ERROR)
- inteldrm_error(dev_priv);
-
- if (IS_GEN7(dev_priv)) {
- for (i = 0; i < 3; i++) {
- if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
- drm_handle_vblank(dev, i);
- }
- } else {
- if (de_iir & DE_PIPEA_VBLANK)
- drm_handle_vblank(dev, 0);
- if (de_iir & DE_PIPEB_VBLANK)
- drm_handle_vblank(dev, 1);
- }
-
- /* should clear PCH hotplug event before clearing CPU irq */
- I915_WRITE(SDEIIR, pch_iir);
- I915_WRITE(GTIIR, gt_iir);
- I915_WRITE(DEIIR, de_iir);
-
-done:
- I915_WRITE(DEIER, de_ier);
- (void)I915_READ(DEIER);
-
- return (ret);
-}
-
-int
-inteldrm_intr(void *arg)
-{
- struct inteldrm_softc *dev_priv = arg;
- struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
- u_int32_t iir, pipea_stats = 0, pipeb_stats = 0;
-
- /*
- * we're not set up, don't poke the hw and if we're vt switched
- * then nothing will be enabled
- */
- if (dev_priv->hw_status_page == NULL || dev_priv->mm.suspended)
- return (0);
-
- iir = I915_READ(IIR);
- if (iir == 0)
- return (0);
-
- /*
- * lock is to protect from writes to PIPESTAT and IMR from other cores.
- */
- mtx_enter(&dev_priv->user_irq_lock);
- /*
- * Clear the PIPE(A|B)STAT regs before the IIR
- */
- if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) {
- pipea_stats = I915_READ(_PIPEASTAT);
- I915_WRITE(_PIPEASTAT, pipea_stats);
- }
- if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
- pipeb_stats = I915_READ(_PIPEBSTAT);
- I915_WRITE(_PIPEBSTAT, pipeb_stats);
- }
- if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- inteldrm_error(dev_priv);
-
- I915_WRITE(IIR, iir);
- (void)I915_READ(IIR); /* Flush posted writes */
-
- if (iir & I915_USER_INTERRUPT) {
- wakeup(dev_priv);
- dev_priv->mm.hang_cnt = 0;
- timeout_add_msec(&dev_priv->mm.hang_timer, 750);
- }
-
- mtx_leave(&dev_priv->user_irq_lock);
-
- if (pipea_stats & PIPE_VBLANK_INTERRUPT_STATUS)
- drm_handle_vblank(dev, 0);
-
- if (pipeb_stats & PIPE_VBLANK_INTERRUPT_STATUS)
- drm_handle_vblank(dev, 1);
-
- return (1);
-}
-
-u_int32_t
-inteldrm_read_hws(struct inteldrm_softc *dev_priv, int reg)
-{
- struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
- struct inteldrm_obj *obj_priv;
- bus_dma_tag_t tag;
- bus_dmamap_t map;
- u_int32_t val;
-
- if (I915_NEED_GFX_HWS(dev_priv)) {
- obj_priv = (struct inteldrm_obj *)dev_priv->hws_obj;
- map = obj_priv->dmamap;
- tag = dev_priv->agpdmat;
- } else {
- map = dev_priv->hws_dmamem->map;
- tag = dev->dmat;
- }
-
- bus_dmamap_sync(tag, map, 0, PAGE_SIZE, BUS_DMASYNC_POSTREAD);
-
- val = ((volatile u_int32_t *)(dev_priv->hw_status_page))[reg];
- bus_dmamap_sync(tag, map, 0, PAGE_SIZE, BUS_DMASYNC_PREREAD);
-
- return (val);
-}
-
-/*
- * These five ring manipulation functions are protected by dev->dev_lock.
- */
-int
-inteldrm_wait_ring(struct inteldrm_softc *dev_priv, int n)
-{
- struct inteldrm_ring *ring = &dev_priv->ring;
- u_int32_t acthd_reg, acthd, last_acthd, last_head;
- int i;
-
- acthd_reg = IS_I965G(dev_priv) ? ACTHD_I965 : ACTHD;
- last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- last_acthd = I915_READ(acthd_reg);
-
- /* ugh. Could really do with a proper, resettable timer here. */
- for (i = 0; i < 100000; i++) {
- ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- acthd = I915_READ(acthd_reg);
- ring->space = ring->head - (ring->tail + 8);
-
- INTELDRM_VPRINTF("%s: head: %x tail: %x space: %x\n", __func__,
- ring->head, ring->tail, ring->space);
- if (ring->space < 0)
- ring->space += ring->size;
- if (ring->space >= n)
- return (0);
-
- /* Only timeout if the ring isn't chewing away on something */
- if (ring->head != last_head || acthd != last_acthd)
- i = 0;
-
- last_head = ring->head;
- last_acthd = acthd;
- delay(10);
- }
-
- return (EBUSY);
-}
-
-void
-inteldrm_wrap_ring(struct inteldrm_softc *dev_priv)
-{
- u_int32_t rem;;
-
- rem = dev_priv->ring.size - dev_priv->ring.tail;
- if (dev_priv->ring.space < rem &&
- inteldrm_wait_ring(dev_priv, rem) != 0)
- return; /* XXX */
-
- dev_priv->ring.space -= rem;
-
- bus_space_set_region_4(dev_priv->bst, dev_priv->ring.bsh,
- dev_priv->ring.woffset, MI_NOOP, rem / 4);
-
- dev_priv->ring.tail = 0;
-}
-
-void
-inteldrm_begin_ring(struct inteldrm_softc *dev_priv, int ncmd)
-{
- int bytes = 4 * ncmd;
-
- INTELDRM_VPRINTF("%s: %d\n", __func__, ncmd);
- if (dev_priv->ring.tail + bytes > dev_priv->ring.size)
- inteldrm_wrap_ring(dev_priv);
- if (dev_priv->ring.space < bytes)
- inteldrm_wait_ring(dev_priv, bytes);
- dev_priv->ring.woffset = dev_priv->ring.tail;
- dev_priv->ring.tail += bytes;
- dev_priv->ring.tail &= dev_priv->ring.size - 1;
- dev_priv->ring.space -= bytes;
-}
-
-void
-inteldrm_out_ring(struct inteldrm_softc *dev_priv, u_int32_t cmd)
-{
- INTELDRM_VPRINTF("%s: %x\n", __func__, cmd);
- bus_space_write_4(dev_priv->bst, dev_priv->ring.bsh,
- dev_priv->ring.woffset, cmd);
- /*
- * don't need to deal with wrap here because we padded
- * the ring out if we would wrap
- */
- dev_priv->ring.woffset += 4;
-}
-
-void
-inteldrm_advance_ring(struct inteldrm_softc *dev_priv)
-{
- INTELDRM_VPRINTF("%s: %x, %x\n", __func__, dev_priv->ring.wspace,
- dev_priv->ring.woffset);
- DRM_MEMORYBARRIER();
- I915_WRITE(PRB0_TAIL, dev_priv->ring.tail);
-}
-
-void
-inteldrm_update_ring(struct inteldrm_softc *dev_priv)
-{
- struct inteldrm_ring *ring = &dev_priv->ring;
-
- ring->head = (I915_READ(PRB0_HEAD) & HEAD_ADDR);
- ring->tail = (I915_READ(PRB0_TAIL) & TAIL_ADDR);
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->size;
- INTELDRM_VPRINTF("%s: head: %x tail: %x space: %x\n", __func__,
- ring->head, ring->tail, ring->space);
-}
-
-/*
- * Sets up the hardware status page for devices that need a physical address
- * in the register.
- */
-int
-i915_init_phys_hws(struct inteldrm_softc *dev_priv, bus_dma_tag_t dmat)
-{
- /* Program Hardware Status Page */
- if ((dev_priv->hws_dmamem = drm_dmamem_alloc(dmat, PAGE_SIZE,
- PAGE_SIZE, 1, PAGE_SIZE, 0, BUS_DMA_READ)) == NULL) {
- return (ENOMEM);
- }
-
- dev_priv->hw_status_page = dev_priv->hws_dmamem->kva;
-
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
-
- bus_dmamap_sync(dmat, dev_priv->hws_dmamem->map, 0, PAGE_SIZE,
- BUS_DMASYNC_PREREAD);
- I915_WRITE(HWS_PGA, dev_priv->hws_dmamem->map->dm_segs[0].ds_addr);
- DRM_DEBUG("Enabled hardware status page\n");
- return (0);
-}
-
-void
-i915_alloc_ifp(struct inteldrm_softc *dev_priv, struct pci_attach_args *bpa)
-{
- bus_addr_t addr;
- u_int32_t reg;
-
- dev_priv->ifp.i9xx.bst = bpa->pa_memt;
-
- reg = pci_conf_read(bpa->pa_pc, bpa->pa_tag, I915_IFPADDR);
- if (reg & 0x1) {
- addr = (bus_addr_t)reg;
- addr &= ~0x1;
- /* XXX extents ... need data on whether bioses alloc or not. */
- if (bus_space_map(bpa->pa_memt, addr, PAGE_SIZE, 0,
- &dev_priv->ifp.i9xx.bsh) != 0)
- goto nope;
- return;
- } else if (bpa->pa_memex == NULL || extent_alloc(bpa->pa_memex,
- PAGE_SIZE, PAGE_SIZE, 0, 0, 0, &addr) || bus_space_map(bpa->pa_memt,
- addr, PAGE_SIZE, 0, &dev_priv->ifp.i9xx.bsh))
- goto nope;
-
- pci_conf_write(bpa->pa_pc, bpa->pa_tag, I915_IFPADDR, addr | 0x1);
-
- return;
-
-nope:
- dev_priv->ifp.i9xx.bsh = 0;
- printf(": no ifp ");
-}
-
-void
-i965_alloc_ifp(struct inteldrm_softc *dev_priv, struct pci_attach_args *bpa)
-{
- bus_addr_t addr;
- u_int32_t lo, hi;
-
- dev_priv->ifp.i9xx.bst = bpa->pa_memt;
-
- hi = pci_conf_read(bpa->pa_pc, bpa->pa_tag, I965_IFPADDR + 4);
- lo = pci_conf_read(bpa->pa_pc, bpa->pa_tag, I965_IFPADDR);
- if (lo & 0x1) {
- addr = (((u_int64_t)hi << 32) | lo);
- addr &= ~0x1;
- /* XXX extents ... need data on whether bioses alloc or not. */
- if (bus_space_map(bpa->pa_memt, addr, PAGE_SIZE, 0,
- &dev_priv->ifp.i9xx.bsh) != 0)
- goto nope;
- return;
- } else if (bpa->pa_memex == NULL || extent_alloc(bpa->pa_memex,
- PAGE_SIZE, PAGE_SIZE, 0, 0, 0, &addr) || bus_space_map(bpa->pa_memt,
- addr, PAGE_SIZE, 0, &dev_priv->ifp.i9xx.bsh))
- goto nope;
-
- pci_conf_write(bpa->pa_pc, bpa->pa_tag, I965_IFPADDR + 4,
- upper_32_bits(addr));
- pci_conf_write(bpa->pa_pc, bpa->pa_tag, I965_IFPADDR,
- (addr & 0xffffffff) | 0x1);
-
- return;
-
-nope:
- dev_priv->ifp.i9xx.bsh = 0;
- printf(": no ifp ");
-}
-
-void
-inteldrm_chipset_flush(struct inteldrm_softc *dev_priv)
-{
- /*
- * Write to this flush page flushes the chipset write cache.
- * The write will return when it is done.
- */
- if (IS_I9XX(dev_priv)) {
- if (dev_priv->ifp.i9xx.bsh != 0)
- bus_space_write_4(dev_priv->ifp.i9xx.bst,
- dev_priv->ifp.i9xx.bsh, 0, 1);
- } else {
- int i;
-
- wbinvd();
-
-#define I830_HIC 0x70
-
- I915_WRITE(I830_HIC, (I915_READ(I830_HIC) | (1<<31)));
- for (i = 1000; i; i--) {
- if (!(I915_READ(I830_HIC) & (1<<31)))
- break;
- delay(100);
- }
-
- }
-}
-
-void
-inteldrm_lastclose(struct drm_device *dev)
-{
- struct inteldrm_softc *dev_priv = dev->dev_private;
- struct vm_page *p;
- int ret;
-
- ret = i915_gem_idle(dev_priv);
- if (ret)
- DRM_ERROR("failed to idle hardware: %d\n", ret);
-
- if (dev_priv->agpdmat != NULL) {
- /*
- * make sure we nuke everything, we may have mappings that we've
- * unrefed, but uvm has a reference to them for maps. Make sure
- * they get unbound and any accesses will segfault.
- * XXX only do ones in GEM.
- */
- for (p = dev_priv->pgs; p < dev_priv->pgs +
- (dev->agp->info.ai_aperture_size / PAGE_SIZE); p++)
- pmap_page_protect(p, VM_PROT_NONE);
- agp_bus_dma_destroy((struct agp_softc *)dev->agp->agpdev,
- dev_priv->agpdmat);
- }
- dev_priv->agpdmat = NULL;
-}
-
-int
-inteldrm_getparam(struct inteldrm_softc *dev_priv, void *data)
-{
- drm_i915_getparam_t *param = data;
- int value;
-
- switch (param->param) {
- case I915_PARAM_CHIPSET_ID:
- value = dev_priv->pci_device;
- break;
- case I915_PARAM_HAS_GEM:
- value = 1;
- break;
- case I915_PARAM_NUM_FENCES_AVAIL:
- value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
- break;
- case I915_PARAM_HAS_EXECBUF2:
- value = 1;
- break;
- default:
- DRM_DEBUG("Unknown parameter %d\n", param->param);
- return (EINVAL);
- }
- return (copyout(&value, param->value, sizeof(int)));
-}
-
-int
-inteldrm_setparam(struct inteldrm_softc *dev_priv, void *data)
-{
- drm_i915_setparam_t *param = data;
-
- switch (param->param) {
- case I915_SETPARAM_NUM_USED_FENCES:
- if (param->value > dev_priv->num_fence_regs ||
- param->value < 0)
- return EINVAL;
- /* Userspace can use first N regs */
- dev_priv->fence_reg_start = param->value;
- break;
- default:
- DRM_DEBUG("unknown parameter %d\n", param->param);
- return (EINVAL);
- }
-
- return 0;
-}
-
-
-int
-i915_gem_init_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct inteldrm_softc *dev_priv = dev->dev_private;
- struct drm_i915_gem_init *args = data;
-
- DRM_LOCK();
-
- if (args->gtt_start >= args->gtt_end ||
- args->gtt_end > dev->agp->info.ai_aperture_size ||
- (args->gtt_start & PAGE_MASK) != 0 ||
- (args->gtt_end & PAGE_MASK) != 0) {
- DRM_UNLOCK();
- return (EINVAL);
- }
- /*
- * putting stuff in the last page of the aperture can cause nasty
- * problems with prefetch going into unassigned memory. Since we put
- * a scratch page on all unused aperture pages, just leave the last
- * page as a spill to prevent gpu hangs.
- */
- if (args->gtt_end == dev->agp->info.ai_aperture_size)
- args->gtt_end -= 4096;
-
- if (agp_bus_dma_init((struct agp_softc *)dev->agp->agpdev,
- dev->agp->base + args->gtt_start, dev->agp->base + args->gtt_end,
- &dev_priv->agpdmat) != 0) {
- DRM_UNLOCK();
- return (ENOMEM);
- }
-
- dev->gtt_total = (uint32_t)(args->gtt_end - args->gtt_start);
- inteldrm_set_max_obj_size(dev_priv);
-
- DRM_UNLOCK();
-
- return 0;
-}
-
-void
-inteldrm_set_max_obj_size(struct inteldrm_softc *dev_priv)
-{
- struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
-
- /*
- * Allow max obj size up to the size where ony 2 would fit the
- * aperture, but some slop exists due to alignment etc
- */
- dev_priv->max_gem_obj_size = (dev->gtt_total -
- atomic_read(&dev->pin_memory)) * 3 / 4 / 2;
-
-}
-
-int
-i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_gem_get_aperture *args = data;
-
- /* we need a write lock here to make sure we get the right value */
- DRM_LOCK();
- args->aper_size = dev->gtt_total;
- args->aper_available_size = (args->aper_size -
- atomic_read(&dev->pin_memory));
- DRM_UNLOCK();
-
- return (0);
-}
-
-/**
- * Creates a new mm object and returns a handle to it.
- */
-int
-i915_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct inteldrm_softc *dev_priv = dev->dev_private;
- struct drm_i915_gem_create *args = data;
- struct drm_obj *obj;
- int handle, ret;
-
- args->size = round_page(args->size);
- /*
- * XXX to avoid copying between 2 objs more than half the aperture size
- * we don't allow allocations that are that big. This will be fixed
- * eventually by intelligently falling back to cpu reads/writes in
- * such cases. (linux allows this but does cpu maps in the ddx instead).
- */
- if (args->size > dev_priv->max_gem_obj_size)
- return (EFBIG);
-
- /* Allocate the new object */
- obj = drm_gem_object_alloc(dev, args->size);
- if (obj == NULL)
- return (ENOMEM);
-
- /* we give our reference to the handle */
- ret = drm_handle_create(file_priv, obj, &handle);
-
- if (ret == 0)
- args->handle = handle;
- else
- drm_unref(&obj->uobj);
-
- return (ret);
-}
-
-/**
- * Reads data from the object referenced by handle.
- *
- * On error, the contents of *data are undefined.
- */
-int
-i915_gem_pread_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct inteldrm_softc *dev_priv = dev->dev_private;
- struct drm_i915_gem_pread *args = data;
- struct drm_obj *obj;
- struct inteldrm_obj *obj_priv;
- char *vaddr;
- bus_space_handle_t bsh;
- bus_size_t bsize;
- voff_t offset;
- int ret;
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return (EBADF);
- DRM_READLOCK();
- drm_hold_object(obj);
-
- /*
- * Bounds check source.
- */
- if (args->offset > obj->size || args->size > obj->size ||
- args->offset + args->size > obj->size) {
- ret = EINVAL;
- goto out;
- }
-
- ret = i915_gem_object_pin(obj, 0, 1);
- if (ret) {
- goto out;
- }
- ret = i915_gem_object_set_to_gtt_domain(obj, 0, 1);
- if (ret)
- goto unpin;
-
- obj_priv = (struct inteldrm_obj *)obj;
- offset = obj_priv->gtt_offset + args->offset;
-
- bsize = round_page(offset + args->size) - trunc_page(offset);
-
- if ((ret = agp_map_subregion(dev_priv->agph,
- trunc_page(offset), bsize, &bsh)) != 0)
- goto unpin;
- vaddr = bus_space_vaddr(dev->bst, bsh);
- if (vaddr == NULL) {
- ret = EFAULT;
- goto unmap;
- }
-
- ret = copyout(vaddr + (offset & PAGE_MASK),
- (char *)(uintptr_t)args->data_ptr, args->size);
-
-unmap:
- agp_unmap_subregion(dev_priv->agph, bsh, bsize);
-unpin:
- i915_gem_object_unpin(obj);
-out:
- drm_unhold_and_unref(obj);
- DRM_READUNLOCK();
-
- return (ret);
-}
-
-
-/**
- * Writes data to the object referenced by handle.
- *
- * On error, the contents of the buffer that were to be modified are undefined.
- */
-int
-i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct inteldrm_softc *dev_priv = dev->dev_private;
- struct drm_i915_gem_pwrite *args = data;
- struct drm_obj *obj;
- struct inteldrm_obj *obj_priv;
- char *vaddr;
- bus_space_handle_t bsh;
- bus_size_t bsize;
- off_t offset;
- int ret = 0;
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return (EBADF);
- DRM_READLOCK();
- drm_hold_object(obj);
-
- /* Bounds check destination. */
- if (args->offset > obj->size || args->size > obj->size ||
- args->offset + args->size > obj->size) {
- ret = EINVAL;
- goto out;
- }
-
- ret = i915_gem_object_pin(obj, 0, 1);
- if (ret) {
- goto out;
- }
- ret = i915_gem_object_set_to_gtt_domain(obj, 1, 1);
- if (ret)
- goto unpin;
-
- obj_priv = (struct inteldrm_obj *)obj;
- offset = obj_priv->gtt_offset + args->offset;
- bsize = round_page(offset + args->size) - trunc_page(offset);
-
- if ((ret = agp_map_subregion(dev_priv->agph,
- trunc_page(offset), bsize, &bsh)) != 0)
- goto unpin;
- vaddr = bus_space_vaddr(dev_priv->bst, bsh);
- if (vaddr == NULL) {
- ret = EFAULT;
- goto unmap;
- }
-
- ret = copyin((char *)(uintptr_t)args->data_ptr,
- vaddr + (offset & PAGE_MASK), args->size);
-
-
-unmap:
- agp_unmap_subregion(dev_priv->agph, bsh, bsize);
-unpin:
- i915_gem_object_unpin(obj);
-out:
- drm_unhold_and_unref(obj);
- DRM_READUNLOCK();
-
- return (ret);
-}
-
-/**
- * Called when user space prepares to use an object with the CPU, either through
- * the mmap ioctl's mapping or a GTT mapping.
- */
-int
-i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_gem_set_domain *args = data;
- struct drm_obj *obj;
- u_int32_t read_domains = args->read_domains;
- u_int32_t write_domain = args->write_domain;
- int ret;
-
- /*
- * Only handle setting domains to types we allow the cpu to see.
- * while linux allows the CPU domain here, we only allow GTT since that
- * is all that we let userland near.
- * Also sanity check that having something in the write domain implies
- * it's in the read domain, and only that read domain.
- */
- if ((write_domain | read_domains) & ~I915_GEM_DOMAIN_GTT ||
- (write_domain != 0 && read_domains != write_domain))
- return (EINVAL);
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return (EBADF);
- drm_hold_object(obj);
-
- ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0, 1);
-
- drm_unhold_and_unref(obj);
- /*
- * Silently promote `you're not bound, there was nothing to do'
- * to success, since the client was just asking us to make sure
- * everything was done.
- */
- return ((ret == EINVAL) ? 0 : ret);
-}
-
-int
-i915_gem_gtt_map_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_gem_mmap *args = data;
- struct drm_obj *obj;
- struct inteldrm_obj *obj_priv;
- vaddr_t addr;
- voff_t offset;
- vsize_t end, nsize;
- int ret;
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return (EBADF);
-
- /* Since we are doing purely uvm-related operations here we do
- * not need to hold the object, a reference alone is sufficient
- */
- obj_priv = (struct inteldrm_obj *)obj;
-
- /* Check size. Also ensure that the object is not purgeable */
- if (args->size == 0 || args->offset > obj->size || args->size >
- obj->size || (args->offset + args->size) > obj->size ||
- i915_obj_purgeable(obj_priv)) {
- ret = EINVAL;
- goto done;
- }
-
- end = round_page(args->offset + args->size);
- offset = trunc_page(args->offset);
- nsize = end - offset;
-
- /*
- * We give our reference from object_lookup to the mmap, so only
- * must free it in the case that the map fails.
- */
- addr = 0;
- ret = uvm_map(&curproc->p_vmspace->vm_map, &addr, nsize, &obj->uobj,
- offset, 0, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
- UVM_INH_SHARE, UVM_ADV_RANDOM, 0));
-
-done:
- if (ret == 0)
- args->addr_ptr = (uint64_t) addr + (args->offset & PAGE_MASK);
- else
- drm_unref(&obj->uobj);
-
- return (ret);
-}
-
-/* called locked */
-void
-i915_gem_object_move_to_active(struct drm_obj *obj)
-{
- struct drm_device *dev = obj->dev;
- struct inteldrm_softc *dev_priv = dev->dev_private;
- struct inteldrm_obj *obj_priv = (struct inteldrm_obj *)obj;
- struct inteldrm_fence *reg;
- u_int32_t seqno = dev_priv->mm.next_gem_seqno;
-
- MUTEX_ASSERT_LOCKED(&dev_priv->request_lock);
- MUTEX_ASSERT_LOCKED(&dev_priv->list_lock);
-
- /* Add a reference if we're newly entering the active list. */
- if (!inteldrm_is_active(obj_priv)) {
- drm_ref(&obj->uobj);
- atomic_setbits_int(&obj->do_flags, I915_ACTIVE);
- }
-
- if (inteldrm_needs_fence(obj_priv)) {
- reg = &dev_priv->fence_regs[obj_priv->fence_reg];
- reg->last_rendering_seqno = seqno;
- }
- if (obj->write_domain)
- obj_priv->last_write_seqno = seqno;
-
- /* Move from whatever list we were on to the tail of execution. */
- i915_move_to_tail(obj_priv, &dev_priv->mm.active_list);
- obj_priv->last_rendering_seqno = seqno;
-}
-
-void
-i915_gem_object_move_off_active(struct drm_obj *obj)
-{
- struct drm_device *dev = obj->dev;
- struct inteldrm_softc *dev_priv = dev->dev_private;
- struct inteldrm_obj *obj_priv = (struct inteldrm_obj *)obj;
- struct inteldrm_fence *reg;
-
- MUTEX_ASSERT_LOCKED(&dev_priv->list_lock);
- DRM_OBJ_ASSERT_LOCKED(obj);
-
- obj_priv->last_rendering_seqno = 0;
- /* if we have a fence register, then reset the seqno */
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
- reg = &dev_priv->fence_regs[obj_priv->fence_reg];
- reg->last_rendering_seqno = 0;
- }
- if (obj->write_domain == 0)
- obj_priv->last_write_seqno = 0;
-}
-
-/* If you call this on an object that you have held, you must have your own
- * reference, not just the reference from the active list.
- */
-void
-i915_gem_object_move_to_inactive(struct drm_obj *obj)
-{
- struct drm_device *dev = obj->dev;
- struct inteldrm_softc *dev_priv = dev->dev_private;
-
- mtx_enter(&dev_priv->list_lock);
- drm_lock_obj(obj);
- /* unlocks list lock and object lock */
- i915_gem_object_move_to_inactive_locked(obj);
-}
-
-/* called locked */
-void
-i915_gem_object_move_to_inactive_locked(struct drm_obj *obj)
-{
- struct drm_device *dev = obj->dev;
- struct inteldrm_softc *dev_priv = dev->dev_private;
- struct inteldrm_obj *obj_priv = (struct inteldrm_obj *)obj;
-
- MUTEX_ASSERT_LOCKED(&dev_priv->list_lock);
- DRM_OBJ_ASSERT_LOCKED(obj);
-
- inteldrm_verify_inactive(dev_priv, __FILE__, __LINE__);
- if (obj_priv->pin_count != 0)
- i915_list_remove(obj_priv);
- else
- i915_move_to_tail(obj_priv, &dev_priv->mm.inactive_list);
-
- i915_gem_object_move_off_active(obj);
- atomic_clearbits_int(&obj->do_flags, I915_FENCED_EXEC);
-
- KASSERT((obj->do_flags & I915_GPU_WRITE) == 0);
- /* unlock because this unref could recurse */
- mtx_leave(&dev_priv->list_lock);
- if (inteldrm_is_active(obj_priv)) {
- atomic_clearbits_int(&obj->do_flags,
- I915_ACTIVE);
- drm_unref_locked(&obj->uobj);
- } else {
- drm_unlock_obj(obj);
- }
- inteldrm_verify_inactive(dev_priv, __FILE__, __LINE__);
-}
-
-void
-inteldrm_purge_obj(struct drm_obj *obj)
-{
- DRM_ASSERT_HELD(obj);
- /*
- * may sleep. We free here instead of deactivate (which
- * the madvise() syscall would do) because in this case
- * (userland bo cache and GL_APPLE_object_purgeable objects in
- * OpenGL) the pages are defined to be freed if they were cleared
- * so kill them and free up the memory
- */
- simple_lock(&obj->uao->vmobjlock);
- obj->uao->pgops->pgo_flush(obj->uao, 0, obj->size,
- PGO_ALLPAGES | PGO_FREE);
- simple_unlock(&obj->uao->vmobjlock);
-
- /*
- * If flush failed, it may have halfway through, so just
- * always mark as purged
- */
- atomic_setbits_int(&obj->do_flags, I915_PURGED);
-}
-
-void
-inteldrm_process_flushing(struct inteldrm_softc *dev_priv,
- u_int32_t flush_domains)
-{
- struct inteldrm_obj *obj_priv, *next;
-
- MUTEX_ASSERT_LOCKED(&dev_priv->request_lock);
- mtx_enter(&dev_priv->list_lock);
- for (obj_priv = TAILQ_FIRST(&dev_priv->mm.gpu_write_list);
- obj_priv != TAILQ_END(&dev_priv->mm.gpu_write_list);
- obj_priv = next) {
- struct drm_obj *obj = &(obj_priv->obj);
-
- next = TAILQ_NEXT(obj_priv, write_list);
-
- if ((obj->write_domain & flush_domains)) {
- TAILQ_REMOVE(&dev_priv->mm.gpu_write_list,
- obj_priv, write_list);
- atomic_clearbits_int(&obj->do_flags,
- I915_GPU_WRITE);
- i915_gem_object_move_to_active(obj);
- obj->write_domain = 0;
- /* if we still need the fence, update LRU */
- if (inteldrm_needs_fence(obj_priv)) {
- KASSERT(obj_priv->fence_reg !=
- I915_FENCE_REG_NONE);
- /* we have a fence, won't sleep, can't fail
- * since we have the fence we no not need
- * to have the object held
- */
- i915_gem_get_fence_reg(obj, 1);
- }
-
- }
- }
- mtx_leave(&dev_priv->list_lock);
-}
-
-/**
- * Creates a new sequence number, emitting a write of it to the status page
- * plus an interrupt, which will trigger and interrupt if they are currently
- * enabled.
- *
- * Must be called with struct_lock held.
- *
- * Returned sequence numbers are nonzero on success.
- */
-uint32_t
-i915_add_request(struct inteldrm_softc *dev_priv)
-{
- struct inteldrm_request *request;
- uint32_t seqno;
- int was_empty;
-
- MUTEX_ASSERT_LOCKED(&dev_priv->request_lock);
-
- request = drm_calloc(1, sizeof(*request));
- if (request == NULL) {
- printf("%s: failed to allocate request\n", __func__);
- return 0;
- }
-
- /* Grab the seqno we're going to make this request be, and bump the
- * next (skipping 0 so it can be the reserved no-seqno value).
- */
- seqno = dev_priv->mm.next_gem_seqno;
- dev_priv->mm.next_gem_seqno++;
- if (dev_priv->mm.next_gem_seqno == 0)
- dev_priv->mm.next_gem_seqno++;
-
- if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
- BEGIN_LP_RING(10);
- else
- BEGIN_LP_RING(4);
-
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(seqno);
- OUT_RING(MI_USER_INTERRUPT);
- ADVANCE_LP_RING();
-
- DRM_DEBUG("%d\n", seqno);
-
- /* XXX request timing for throttle */
- request->seqno = seqno;
- was_empty = TAILQ_EMPTY(&dev_priv->mm.request_list);
- TAILQ_INSERT_TAIL(&dev_priv->mm.request_list, request, list);
-
- if (dev_priv->mm.suspended == 0) {
- if (was_empty)
- timeout_add_sec(&dev_priv->mm.retire_timer, 1);
- /* XXX was_empty? */
- timeout_add_msec(&dev_priv->mm.hang_timer, 750);
- }
- return seqno;
-}
-
-/**
- * Moves buffers associated only with the given active seqno from the active
- * to inactive list, potentially freeing them.
- *
- * called with and sleeps with the drm_lock.
- */
-void
-i915_gem_retire_request(struct inteldrm_softc *dev_priv,
- struct inteldrm_request *request)
-{
- struct inteldrm_obj *obj_priv;
-
- MUTEX_ASSERT_LOCKED(&dev_priv->request_lock);
- mtx_enter(&dev_priv->list_lock);
- /* Move any buffers on the active list that are no longer referenced
- * by the ringbuffer to the flushing/inactive lists as appropriate. */
- while ((obj_priv = TAILQ_FIRST(&dev_priv->mm.active_list)) != NULL) {
- struct drm_obj *obj = &obj_priv->obj;
-
- /* If the seqno being retired doesn't match the oldest in the
- * list, then the oldest in the list must still be newer than
- * this seqno.
- */
- if (obj_priv->last_rendering_seqno != request->seqno)
- break;
-
- drm_lock_obj(obj);
- /*
- * If we're now clean and can be read from, move inactive,
- * else put on the flushing list to signify that we're not
- * available quite yet.
- */
- if (obj->write_domain != 0) {
- KASSERT(inteldrm_is_active(obj_priv));
- i915_move_to_tail(obj_priv,
- &dev_priv->mm.flushing_list);
- i915_gem_object_move_off_active(obj);
- drm_unlock_obj(obj);
- } else {
- /* unlocks object for us and drops ref */
- i915_gem_object_move_to_inactive_locked(obj);
- mtx_enter(&dev_priv->list_lock);
- }
- }
- mtx_leave(&dev_priv->list_lock);
-}
-
-/**
- * This function clears the request list as sequence numbers are passed.
- */
-void
-i915_gem_retire_requests(struct inteldrm_softc *dev_priv)
-{
- struct inteldrm_request *request;
- uint32_t seqno;
-
- if (dev_priv->hw_status_page == NULL)
- return;
-
- seqno = i915_get_gem_seqno(dev_priv);
-
- mtx_enter(&dev_priv->request_lock);
- while ((request = TAILQ_FIRST(&dev_priv->mm.request_list)) != NULL) {
- if (i915_seqno_passed(seqno, request->seqno) ||
- dev_priv->mm.wedged) {
- TAILQ_REMOVE(&dev_priv->mm.request_list, request, list);
- i915_gem_retire_request(dev_priv, request);
- mtx_leave(&dev_priv->request_lock);
-
- drm_free(request);
- mtx_enter(&dev_priv->request_lock);
- } else
- break;
- }
- mtx_leave(&dev_priv->request_lock);
-}
-
-void
-i915_gem_retire_work_handler(void *arg1, void *unused)
-{
- struct inteldrm_softc *dev_priv = arg1;
-
- i915_gem_retire_requests(dev_priv);
- if (!TAILQ_EMPTY(&dev_priv->mm.request_list))
- timeout_add_sec(&dev_priv->mm.retire_timer, 1);
-}
-
-/**
- * Waits for a sequence number to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- *
- * Called locked, sleeps with it.
- */
-int
-i915_wait_request(struct inteldrm_softc *dev_priv, uint32_t seqno,
- int interruptible)
-{
- int ret = 0;
-
- /* Check first because poking a wedged chip is bad. */
- if (dev_priv->mm.wedged)
- return (EIO);
-
- if (seqno == dev_priv->mm.next_gem_seqno) {
- mtx_enter(&dev_priv->request_lock);
- seqno = i915_add_request(dev_priv);
- mtx_leave(&dev_priv->request_lock);
- if (seqno == 0)
- return (ENOMEM);
- }
-
- if (!i915_seqno_passed(i915_get_gem_seqno(dev_priv), seqno)) {
- mtx_enter(&dev_priv->user_irq_lock);
- i915_user_irq_get(dev_priv);
- while (ret == 0) {
- if (i915_seqno_passed(i915_get_gem_seqno(dev_priv),
- seqno) || dev_priv->mm.wedged)
- break;
- ret = msleep(dev_priv, &dev_priv->user_irq_lock,
- PZERO | (interruptible ? PCATCH : 0), "gemwt", 0);
- }
- i915_user_irq_put(dev_priv);
- mtx_leave(&dev_priv->user_irq_lock);
- }
- if (dev_priv->mm.wedged)
- ret = EIO;
-
- /* Directly dispatch request retiring. While we have the work queue
- * to handle this, the waiter on a request often wants an associated
- * buffer to have made it to the inactive list, and we would need
- * a separate wait queue to handle that.
- */
- if (ret == 0)
- i915_gem_retire_requests(dev_priv);
-
- return (ret);
-}
-
-/*
- * flush and invalidate the provided domains
- * if we have successfully queued a gpu flush, then we return a seqno from
- * the request. else (failed or just cpu flushed) we return 0.
- */
-u_int32_t
-i915_gem_flush(struct inteldrm_softc *dev_priv, uint32_t invalidate_domains,
- uint32_t flush_domains)
-{
- uint32_t cmd;
- int ret = 0;
-
- if (flush_domains & I915_GEM_DOMAIN_CPU)
- inteldrm_chipset_flush(dev_priv);
- if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
- return (0);
- /*
- * read/write caches:
- *
- * I915_GEM_DOMAIN_RENDER is always invalidated, but is
- * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
- * also flushed at 2d versus 3d pipeline switches.
- *
- * read-only caches:
- *
- * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
- * MI_READ_FLUSH is set, and is always flushed on 965.
- *
- * I915_GEM_DOMAIN_COMMAND may not exist?
- *
- * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
- * invalidated when MI_EXE_FLUSH is set.
- *
- * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
- * invalidated with every MI_FLUSH.
- *
- * TLBs:
- *
- * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
- * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
- * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
- * are flushed at any MI_FLUSH.
- */
-
- cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
- if ((invalidate_domains | flush_domains) &
- I915_GEM_DOMAIN_RENDER)
- cmd &= ~MI_NO_WRITE_FLUSH;
- /*
- * On the 965, the sampler cache always gets flushed
- * and this bit is reserved.
- */
- if (!IS_I965G(dev_priv) &&
- invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
- cmd |= MI_READ_FLUSH;
- if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
- cmd |= MI_EXE_FLUSH;
-
- mtx_enter(&dev_priv->request_lock);
- BEGIN_LP_RING(2);
- OUT_RING(cmd);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
-
- /* if this is a gpu flush, process the results */
- if (flush_domains & I915_GEM_GPU_DOMAINS) {
- inteldrm_process_flushing(dev_priv, flush_domains);
- ret = i915_add_request(dev_priv);
- }
- mtx_leave(&dev_priv->request_lock);
-
- return (ret);
-}
-
-/**
- * Unbinds an object from the GTT aperture.
- *
- * XXX track dirty and pass down to uvm (note, DONTNEED buffers are clean).
- */
-int
-i915_gem_object_unbind(struct drm_obj *obj, int interruptible)
-{
- struct drm_device *dev = obj->dev;
- struct inteldrm_softc *dev_priv = dev->dev_private;
- struct inteldrm_obj *obj_priv = (struct inteldrm_obj *)obj;
- int ret = 0;
-
- DRM_ASSERT_HELD(obj);
- /*
- * if it's already unbound, or we've already done lastclose, just
- * let it happen. XXX does this fail to unwire?
- */
- if (obj_priv->dmamap == NULL || dev_priv->agpdmat == NULL)
- return 0;
-
- if (obj_priv->pin_count != 0) {
- DRM_ERROR("Attempting to unbind pinned buffer\n");
- return (EINVAL);
- }
-
- KASSERT(!i915_obj_purged(obj_priv));
-
- /* Move the object to the CPU domain to ensure that
- * any possible CPU writes while it's not in the GTT
- * are flushed when we go to remap it. This will
- * also ensure that all pending GPU writes are finished
- * before we unbind.
- */
- ret = i915_gem_object_set_to_cpu_domain(obj, 1, interruptible);
- if (ret)
- return ret;
-
- KASSERT(!inteldrm_is_active(obj_priv));
-
- /* if it's purgeable don't bother dirtying the pages */
- if (i915_obj_purgeable(obj_priv))
- atomic_clearbits_int(&obj->do_flags, I915_DIRTY);
- /*
- * unload the map, then unwire the backing object.
- */
- i915_gem_save_bit_17_swizzle(obj);
- bus_dmamap_unload(dev_priv->agpdmat, obj_priv->dmamap);
- uvm_objunwire(obj->uao, 0, obj->size);
- /* XXX persistent dmamap worth the memory? */
- bus_dmamap_destroy(dev_priv->agpdmat, obj_priv->dmamap);
- obj_priv->dmamap = NULL;
- free(obj_priv->dma_segs, M_DRM);
- obj_priv->dma_segs = NULL;
- /* XXX this should change whether we tell uvm the page is dirty */
- atomic_clearbits_int(&obj->do_flags, I915_DIRTY);
-
- obj_priv->gtt_offset = 0;
- atomic_dec(&dev->gtt_count);
- atomic_sub(obj->size, &dev->gtt_memory);
-
- /* Remove ourselves from any LRU list if present. */
- i915_list_remove((struct inteldrm_obj *)obj);
-
- if (i915_obj_purgeable(obj_priv))
- inteldrm_purge_obj(obj);
-
- return (0);
-}
-
-int
-i915_gem_evict_something(struct inteldrm_softc *dev_priv, size_t min_size,
- int interruptible)
-{
- struct drm_obj *obj;
- struct inteldrm_request *request;
- struct inteldrm_obj *obj_priv;
- u_int32_t seqno;
- int ret = 0, write_domain = 0;
-
- for (;;) {
- i915_gem_retire_requests(dev_priv);
-
- /* If there's an inactive buffer available now, grab it
- * and be done.
- */
- obj = i915_gem_find_inactive_object(dev_priv, min_size);
- if (obj != NULL) {
- obj_priv = (struct inteldrm_obj *)obj;
- /* find inactive object returns the object with a
- * reference for us, and held
- */
- KASSERT(obj_priv->pin_count == 0);
- KASSERT(!inteldrm_is_active(obj_priv));
- DRM_ASSERT_HELD(obj);
-
- /* Wait on the rendering and unbind the buffer. */
- ret = i915_gem_object_unbind(obj, interruptible);
- drm_unhold_and_unref(obj);
- return (ret);
- }
-
- /* If we didn't get anything, but the ring is still processing
- * things, wait for one of those things to finish and hopefully
- * leave us a buffer to evict.
- */
- mtx_enter(&dev_priv->request_lock);
- if ((request = TAILQ_FIRST(&dev_priv->mm.request_list))
- != NULL) {
- seqno = request->seqno;
- mtx_leave(&dev_priv->request_lock);
-
- ret = i915_wait_request(dev_priv, seqno, interruptible);
- if (ret)
- return (ret);
-
- continue;
- }
- mtx_leave(&dev_priv->request_lock);
-
- /* If we didn't have anything on the request list but there
- * are buffers awaiting a flush, emit one and try again.
- * When we wait on it, those buffers waiting for that flush
- * will get moved to inactive.
- */
- mtx_enter(&dev_priv->list_lock);
- TAILQ_FOREACH(obj_priv, &dev_priv->mm.flushing_list, list) {
- obj = &obj_priv->obj;
- if (obj->size >= min_size) {
- write_domain = obj->write_domain;
- break;
- }
- obj = NULL;
- }
- mtx_leave(&dev_priv->list_lock);
-
- if (write_domain) {
- if (i915_gem_flush(dev_priv, write_domain,
- write_domain) == 0)
- return (ENOMEM);
- continue;
- }
-
- /*
- * If we didn't do any of the above, there's no single buffer
- * large enough to swap out for the new one, so just evict
- * everything and start again. (This should be rare.)
- */
- if (!TAILQ_EMPTY(&dev_priv->mm.inactive_list))
- return (i915_gem_evict_inactive(dev_priv,
- interruptible));
- else
- return (i915_gem_evict_everything(dev_priv,
- interruptible));
- }
- /* NOTREACHED */
-}
-
-struct drm_obj *
-i915_gem_find_inactive_object(struct inteldrm_softc *dev_priv,
- size_t min_size)
-{
- struct drm_obj *obj, *best = NULL, *first = NULL;
- struct inteldrm_obj *obj_priv;
-
- /*
- * We don't need references to the object as long as we hold the list
- * lock, they won't disappear until we release the lock.
- */
- mtx_enter(&dev_priv->list_lock);
- TAILQ_FOREACH(obj_priv, &dev_priv->mm.inactive_list, list) {
- obj = &obj_priv->obj;
- if (obj->size >= min_size) {
- if ((!inteldrm_is_dirty(obj_priv) ||
- i915_obj_purgeable(obj_priv)) &&
- (best == NULL || obj->size < best->size)) {
- best = obj;
- if (best->size == min_size)
- break;
- }
- }
- if (first == NULL)
- first = obj;
- }
- if (best == NULL)
- best = first;
- if (best) {
- drm_ref(&best->uobj);
- /*
- * if we couldn't grab it, we may as well fail and go
- * onto the next step for the sake of simplicity.
- */
- if (drm_try_hold_object(best) == 0) {
- drm_unref(&best->uobj);
- best = NULL;
- }
- }
- mtx_leave(&dev_priv->list_lock);
- return (best);
-}
-
-int
-i915_gem_evict_everything(struct inteldrm_softc *dev_priv, int interruptible)
-{
- u_int32_t seqno;
- int ret;
-
- if (TAILQ_EMPTY(&dev_priv->mm.inactive_list) &&
- TAILQ_EMPTY(&dev_priv->mm.flushing_list) &&
- TAILQ_EMPTY(&dev_priv->mm.active_list))
- return (ENOSPC);
-
- seqno = i915_gem_flush(dev_priv, I915_GEM_GPU_DOMAINS,
- I915_GEM_GPU_DOMAINS);
- if (seqno == 0)
- return (ENOMEM);
-
- if ((ret = i915_wait_request(dev_priv, seqno, interruptible)) != 0 ||
- (ret = i915_gem_evict_inactive(dev_priv, interruptible)) != 0)
- return (ret);
-
- /*
- * All lists should be empty because we flushed the whole queue, then
- * we evicted the whole shebang, only pinned objects are still bound.
- */
- KASSERT(TAILQ_EMPTY(&dev_priv->mm.inactive_list));
- KASSERT(TAILQ_EMPTY(&dev_priv->mm.flushing_list));
- KASSERT(TAILQ_EMPTY(&dev_priv->mm.active_list));
-
- return (0);
-}
-/*
- * return required GTT alignment for an object, taking into account potential
- * fence register needs
- */
-bus_size_t
-i915_gem_get_gtt_alignment(struct drm_obj *obj)
-{
- struct drm_device *dev = obj->dev;
- struct inteldrm_softc *dev_priv = dev->dev_private;
- struct inteldrm_obj *obj_priv = (struct inteldrm_obj *)obj;
- bus_size_t start, i;
-
- /*
- * Minimum alignment is 4k (GTT page size), but fence registers may
- * modify this
- */
- if (IS_I965G(dev_priv) || obj_priv->tiling_mode == I915_TILING_NONE)
- return (4096);
-
- /*
- * Older chips need to be aligned to the size of the smallest fence
- * register that can contain the object.
- */
- if (IS_I9XX(dev_priv))
- start = 1024 * 1024;
- else
- start = 512 * 1024;
-
- for (i = start; i < obj->size; i <<= 1)
- ;
-
- return (i);
-}
-
-void
-sandybridge_write_fence_reg(struct inteldrm_fence *reg)
-{
- struct drm_obj *obj = reg->obj;
- struct drm_device *dev = obj->dev;
- struct inteldrm_softc *dev_priv = dev->dev_private;
- struct inteldrm_obj *obj_priv = (struct inteldrm_obj *)obj;
- int regnum = obj_priv->fence_reg;
- u_int64_t val;
-
- val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
- 0xfffff000) << 32;
- val |= obj_priv->gtt_offset & 0xfffff000;
- val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
- SANDYBRIDGE_FENCE_PITCH_SHIFT;
- if (obj_priv->tiling_mode == I915_TILING_Y)
- val |= 1 << I965_FENCE_TILING_Y_SHIFT;
- val |= I965_FENCE_REG_VALID;
-
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
-}
-
-void
-i965_write_fence_reg(struct inteldrm_fence *reg)
-{
- struct drm_obj *obj = reg->obj;
- struct drm_device *dev = obj->dev;
- struct inteldrm_softc *dev_priv = dev->dev_private;
- struct inteldrm_obj *obj_priv = (struct inteldrm_obj *)obj;
- int regnum = obj_priv->fence_reg;
- u_int64_t val;
-
- val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
- 0xfffff000) << 32;
- val |= obj_priv->gtt_offset & 0xfffff000;
- val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
- if (obj_priv->tiling_mode == I915_TILING_Y)
- val |= 1 << I965_FENCE_TILING_Y_SHIFT;
- val |= I965_FENCE_REG_VALID;
-
- I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
-}
-
-void
-i915_write_fence_reg(struct inteldrm_fence *reg)
-{
- struct drm_obj *obj = reg->obj;
- struct drm_device *dev = obj->dev;
- struct inteldrm_softc *dev_priv = dev->dev_private;
- struct inteldrm_obj *obj_priv = (struct inteldrm_obj *)obj;
- bus_size_t fence_reg;
- u_int32_t val;
- u_int32_t pitch_val;
- int regnum = obj_priv->fence_reg;
- int tile_width;
-
- if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
- (obj_priv->gtt_offset & (obj->size - 1))) {
- DRM_ERROR("object 0x%lx not 1M or size (0x%zx) aligned\n",
- obj_priv->gtt_offset, obj->size);
- return;
- }
-
- if (obj_priv->tiling_mode == I915_TILING_Y &&
- HAS_128_BYTE_Y_TILING(dev_priv))
- tile_width = 128;
- else
- tile_width = 512;
-
- /* Note: pitch better be a power of two tile widths */
- pitch_val = obj_priv->stride / tile_width;
- pitch_val = ffs(pitch_val) - 1;
-
- /* XXX print more */
- if ((obj_priv->tiling_mode == I915_TILING_Y &&
- HAS_128_BYTE_Y_TILING(dev_priv) &&
- pitch_val > I830_FENCE_MAX_PITCH_VAL) ||
- pitch_val > I915_FENCE_MAX_PITCH_VAL)
- printf("%s: invalid pitch provided", __func__);
-
- val = obj_priv->gtt_offset;
- if (obj_priv->tiling_mode == I915_TILING_Y)
- val |= 1 << I830_FENCE_TILING_Y_SHIFT;
- val |= I915_FENCE_SIZE_BITS(obj->size);
- val |= pitch_val << I830_FENCE_PITCH_SHIFT;
- val |= I830_FENCE_REG_VALID;
-
- if (regnum < 8)
- fence_reg = FENCE_REG_830_0 + (regnum * 4);
- else
- fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
- I915_WRITE(fence_reg, val);
-}
-
-void
-i830_write_fence_reg(struct inteldrm_fence *reg)
-{
- struct drm_obj *obj = reg->obj;
- struct drm_device *dev = obj->dev;
- struct inteldrm_softc *dev_priv = dev->dev_private;
- struct inteldrm_obj *obj_priv = (struct inteldrm_obj *)obj;
- int regnum = obj_priv->fence_reg;
- u_int32_t pitch_val, val;
-
- if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
- (obj_priv->gtt_offset & (obj->size - 1))) {
- DRM_ERROR("object 0x%08x not 512K or size aligned 0x%lx\n",
- obj_priv->gtt_offset, obj->size);
- return;
- }
-
- pitch_val = ffs(obj_priv->stride / 128) - 1;
-
- val = obj_priv->gtt_offset;
- if (obj_priv->tiling_mode == I915_TILING_Y)
- val |= 1 << I830_FENCE_TILING_Y_SHIFT;
- val |= I830_FENCE_SIZE_BITS(obj->size);
- val |= pitch_val << I830_FENCE_PITCH_SHIFT;
- val |= I830_FENCE_REG_VALID;
-
- I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
-
-}
-
-/*
- * i915_gem_get_fence_reg - set up a fence reg for an object
- *
- * When mapping objects through the GTT, userspace wants to be able to write
- * to them without having to worry about swizzling if the object is tiled.
- *
- * This function walks the fence regs looking for a free one, stealing one
- * if it can't find any.
- *
- * It then sets up the reg based on the object's properties: address, pitch
- * and tiling format.
- */
-int
-i915_gem_get_fence_reg(struct drm_obj *obj, int interruptible)
-{
- struct drm_device *dev = obj->dev;
- struct inteldrm_softc *dev_priv = dev->dev_private;
- struct inteldrm_obj *obj_priv = (struct inteldrm_obj *)obj;
- struct inteldrm_obj *old_obj_priv = NULL;
- struct drm_obj *old_obj = NULL;
- struct inteldrm_fence *reg = NULL;
- int i, ret, avail;
-
- /* If our fence is getting used, just update our place in the LRU */
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
- mtx_enter(&dev_priv->fence_lock);
- reg = &dev_priv->fence_regs[obj_priv->fence_reg];
-
- TAILQ_REMOVE(&dev_priv->mm.fence_list, reg, list);
- TAILQ_INSERT_TAIL(&dev_priv->mm.fence_list, reg, list);
- mtx_leave(&dev_priv->fence_lock);
- return (0);
- }
-
- DRM_ASSERT_HELD(obj);
- switch (obj_priv->tiling_mode) {
- case I915_TILING_NONE:
- DRM_ERROR("allocating a fence for non-tiled object?\n");
- break;
- case I915_TILING_X:
- if (obj_priv->stride == 0)
- return (EINVAL);
- if (obj_priv->stride & (512 - 1))
- DRM_ERROR("object 0x%08x is X tiled but has non-512B"
- " pitch\n", obj_priv->gtt_offset);
- break;
- case I915_TILING_Y:
- if (obj_priv->stride == 0)
- return (EINVAL);
- if (obj_priv->stride & (128 - 1))
- DRM_ERROR("object 0x%08x is Y tiled but has non-128B"
- " pitch\n", obj_priv->gtt_offset);
- break;
- }
-
-again:
- /* First try to find a free reg */
- avail = 0;
- mtx_enter(&dev_priv->fence_lock);
- for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
- reg = &dev_priv->fence_regs[i];
- if (reg->obj == NULL)
- break;
-
- old_obj_priv = (struct inteldrm_obj *)reg->obj;
- if (old_obj_priv->pin_count == 0)
- avail++;
- }
-
- /* None available, try to steal one or wait for a user to finish */
- if (i == dev_priv->num_fence_regs) {
- if (avail == 0) {
- mtx_leave(&dev_priv->fence_lock);
- return (ENOMEM);
- }
-
- TAILQ_FOREACH(reg, &dev_priv->mm.fence_list,
- list) {
- old_obj = reg->obj;
- old_obj_priv = (struct inteldrm_obj *)old_obj;
-
- if (old_obj_priv->pin_count)
- continue;
-
- /* Ref it so that wait_rendering doesn't free it under
- * us. if we can't hold it, it may change state soon
- * so grab the next one.
- */
- drm_ref(&old_obj->uobj);
- if (drm_try_hold_object(old_obj) == 0) {
- drm_unref(&old_obj->uobj);
- continue;
- }
-
- break;
- }
- mtx_leave(&dev_priv->fence_lock);
-
- /* if we tried all of them, give it another whirl. we failed to
- * get a hold this go round.
- */
- if (reg == NULL)
- goto again;
-
- ret = i915_gem_object_put_fence_reg(old_obj, interruptible);
- drm_unhold_and_unref(old_obj);
- if (ret != 0)
- return (ret);
- /* we should have freed one up now, so relock and re-search */
- goto again;
- }
-
- /*
- * Here we will either have found a register in the first
- * loop, or we will have waited for one and in the second case
- * and thus have grabbed the object in question, freed the register
- * then redone the second loop (having relocked the fence list).
- * Therefore at this point it is impossible to have a null value
- * in reg.
- */
- KASSERT(reg != NULL);
-
- obj_priv->fence_reg = i;
- reg->obj = obj;
- TAILQ_INSERT_TAIL(&dev_priv->mm.fence_list, reg, list);
-
- if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
- sandybridge_write_fence_reg(reg);
- else if (IS_I965G(dev_priv))
- i965_write_fence_reg(reg);
- else if (IS_I9XX(dev_priv))
- i915_write_fence_reg(reg);
- else
- i830_write_fence_reg(reg);
- mtx_leave(&dev_priv->fence_lock);
-
- return 0;
-}
-
-int
-i915_gem_object_put_fence_reg(struct drm_obj *obj, int interruptible)
-{
- struct drm_device *dev = obj->dev;
- struct inteldrm_softc *dev_priv = dev->dev_private;
- struct inteldrm_obj *obj_priv = (struct inteldrm_obj *)obj;
- struct inteldrm_fence *reg;
- int ret;
-
- DRM_ASSERT_HELD(obj);
- if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
- return (0);
-
- /*
- * If the last execbuffer we did on the object needed a fence then
- * we must emit a flush.
- */
- if (inteldrm_needs_fence(obj_priv)) {
- ret = i915_gem_object_flush_gpu_write_domain(obj, 1,
- interruptible, 0);
- if (ret != 0)
- return (ret);
- }
-
- /* if rendering is queued up that depends on the fence, wait for it */
- reg = &dev_priv->fence_regs[obj_priv->fence_reg];
- if (reg->last_rendering_seqno != 0) {
- ret = i915_wait_request(dev_priv, reg->last_rendering_seqno,
- interruptible);
- if (ret != 0)
- return (ret);
- }
-
- /* tiling changed, must wipe userspace mappings */
- if ((obj->write_domain | obj->read_domains) & I915_GEM_DOMAIN_GTT) {
- inteldrm_wipe_mappings(obj);
- if (obj->write_domain == I915_GEM_DOMAIN_GTT)
- obj->write_domain = 0;
- }
-
- mtx_enter(&dev_priv->fence_lock);
- if (IS_I965G(dev_priv)) {
- I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
- } else {
- u_int32_t fence_reg;
-
- if (obj_priv->fence_reg < 8)
- fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
- else
- fence_reg = FENCE_REG_945_8 +
- (obj_priv->fence_reg - 8) * 4;
- I915_WRITE(fence_reg , 0);
- }
-
- reg->obj = NULL;
- TAILQ_REMOVE(&dev_priv->mm.fence_list, reg, list);
- obj_priv->fence_reg = I915_FENCE_REG_NONE;
- mtx_leave(&dev_priv->fence_lock);
- atomic_clearbits_int(&obj->do_flags, I915_FENCE_INVALID);
-
- return (0);
-}
-
-int
-inteldrm_fault(struct drm_obj *obj, struct uvm_faultinfo *ufi, off_t offset,
- vaddr_t vaddr, vm_page_t *pps, int npages, int centeridx,
- vm_prot_t access_type, int flags)
-{
- struct drm_device *dev = obj->dev;
- struct inteldrm_softc *dev_priv = dev->dev_private;
- struct inteldrm_obj *obj_priv = (struct inteldrm_obj *)obj;
- paddr_t paddr;
- int lcv, ret;
- int write = !!(access_type & VM_PROT_WRITE);
- vm_prot_t mapprot;
- boolean_t locked = TRUE;
-
- /* Are we about to suspend?, if so wait until we're done */
- if (dev_priv->sc_flags & INTELDRM_QUIET) {
- /* we're about to sleep, unlock the map etc */
- uvmfault_unlockall(ufi, NULL, &obj->uobj, NULL);
- while (dev_priv->sc_flags & INTELDRM_QUIET)
- tsleep(&dev_priv->flags, 0, "intelflt", 0);
- dev_priv->entries++;
- /*
- * relock so we're in the same state we would be in if we
- * were not quiesced before
- */
- locked = uvmfault_relock(ufi);
- if (locked) {
- drm_lock_obj(obj);
- } else {
- dev_priv->entries--;
- if (dev_priv->sc_flags & INTELDRM_QUIET)
- wakeup(&dev_priv->entries);
- return (VM_PAGER_REFAULT);
- }
- } else {
- dev_priv->entries++;
- }
-
- if (rw_enter(&dev->dev_lock, RW_NOSLEEP | RW_READ) != 0) {
- uvmfault_unlockall(ufi, NULL, &obj->uobj, NULL);
- DRM_READLOCK();
- locked = uvmfault_relock(ufi);
- if (locked)
- drm_lock_obj(obj);
- }
- if (locked)
- drm_hold_object_locked(obj);
- else { /* obj already unlocked */
- dev_priv->entries--;
- if (dev_priv->sc_flags & INTELDRM_QUIET)
- wakeup(&dev_priv->entries);
- return (VM_PAGER_REFAULT);
- }
-
- /* we have a hold set on the object now, we can unlock so that we can
- * sleep in binding and flushing.
- */
- drm_unlock_obj(obj);
-
- if (obj_priv->dmamap != NULL &&
- (obj_priv->gtt_offset & (i915_gem_get_gtt_alignment(obj) - 1) ||
- (!i915_gem_object_fence_offset_ok(obj, obj_priv->tiling_mode)))) {
- /*
- * pinned objects are defined to have a sane alignment which can
- * not change.
- */
- KASSERT(obj_priv->pin_count == 0);
- if ((ret = i915_gem_object_unbind(obj, 0)))
- goto error;
- }
-
- if (obj_priv->dmamap == NULL) {
- ret = i915_gem_object_bind_to_gtt(obj, 0, 0);
- if (ret) {
- printf("%s: failed to bind\n", __func__);
- goto error;
- }
- i915_gem_object_move_to_inactive(obj);
- }
-
- /*
- * We could only do this on bind so allow for map_buffer_range
- * unsynchronised objects (where buffer suballocation
- * is done by the GL application), however it gives coherency problems
- * normally.
- */
- ret = i915_gem_object_set_to_gtt_domain(obj, write, 0);
- if (ret) {
- printf("%s: failed to set to gtt (%d)\n",
- __func__, ret);
- goto error;
- }
-
- mapprot = ufi->entry->protection;
- /*
- * if it's only a read fault, we only put ourselves into the gtt
- * read domain, so make sure we fault again and set ourselves to write.
- * this prevents us needing userland to do domain management and get
- * it wrong, and makes us fully coherent with the gpu re mmap.
- */
- if (write == 0)
- mapprot &= ~VM_PROT_WRITE;
- /* XXX try and be more efficient when we do this */
- for (lcv = 0 ; lcv < npages ; lcv++, offset += PAGE_SIZE,
- vaddr += PAGE_SIZE) {
- if ((flags & PGO_ALLPAGES) == 0 && lcv != centeridx)
- continue;
-
- if (pps[lcv] == PGO_DONTCARE)
- continue;
-
- paddr = dev->agp->base + obj_priv->gtt_offset + offset;
-
- if (pmap_enter(ufi->orig_map->pmap, vaddr, paddr,
- mapprot, PMAP_CANFAIL | mapprot) != 0) {
- drm_unhold_object(obj);
- uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
- NULL, NULL);
- DRM_READUNLOCK();
- dev_priv->entries--;
- if (dev_priv->sc_flags & INTELDRM_QUIET)
- wakeup(&dev_priv->entries);
- uvm_wait("intelflt");
- return (VM_PAGER_REFAULT);
- }
- }
-error:
- drm_unhold_object(obj);
- uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, NULL, NULL);
- DRM_READUNLOCK();
- dev_priv->entries--;
- if (dev_priv->sc_flags & INTELDRM_QUIET)
- wakeup(&dev_priv->entries);
- pmap_update(ufi->orig_map->pmap);
- if (ret == EIO) {
- /*
- * EIO means we're wedged, so upon resetting the gpu we'll
- * be alright and can refault. XXX only on resettable chips.
- */
- ret = VM_PAGER_REFAULT;
- } else if (ret) {
- ret = VM_PAGER_ERROR;
- } else {
- ret = VM_PAGER_OK;
- }
- return (ret);
-}
-
-void
-inteldrm_wipe_mappings(struct drm_obj *obj)
-{
- struct inteldrm_obj *obj_priv = (struct inteldrm_obj *)obj;
- struct drm_device *dev = obj->dev;
- struct inteldrm_softc *dev_priv = dev->dev_private;
- struct vm_page *pg;
-
- DRM_ASSERT_HELD(obj);
- /* make sure any writes hit the bus before we do whatever change
- * that prompted us to kill the mappings.
- */
- DRM_MEMORYBARRIER();
- /* nuke all our mappings. XXX optimise. */
- for (pg = &dev_priv->pgs[atop(obj_priv->gtt_offset)]; pg !=
- &dev_priv->pgs[atop(obj_priv->gtt_offset + obj->size)]; pg++)
- pmap_page_protect(pg, VM_PROT_NONE);
-}
-
-/**
- * Finds free space in the GTT aperture and binds the object there.
- */
-int
-i915_gem_object_bind_to_gtt(struct drm_obj *obj, bus_size_t alignment,
- int interruptible)
-{
- struct drm_device *dev = obj->dev;
- struct inteldrm_softc *dev_priv = dev->dev_private;
- struct inteldrm_obj *obj_priv = (struct inteldrm_obj *)obj;
- int ret;
-
- DRM_ASSERT_HELD(obj);
- if (dev_priv->agpdmat == NULL)
- return (EINVAL);
- if (alignment == 0) {
- alignment = i915_gem_get_gtt_alignment(obj);
- } else if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
- DRM_ERROR("Invalid object alignment requested %u\n", alignment);
- return (EINVAL);
- }
-
- /* Can't bind a purgeable buffer */
- if (i915_obj_purgeable(obj_priv)) {
- printf("tried to bind purgeable buffer");
- return (EINVAL);
- }
-
- if ((ret = bus_dmamap_create(dev_priv->agpdmat, obj->size, 1,
- obj->size, 0, BUS_DMA_WAITOK, &obj_priv->dmamap)) != 0) {
- DRM_ERROR("Failed to create dmamap\n");
- return (ret);
- }
- agp_bus_dma_set_alignment(dev_priv->agpdmat, obj_priv->dmamap,
- alignment);
-
- search_free:
- /*
- * the helper function wires the uao then binds it to the aperture for
- * us, so all we have to do is set up the dmamap then load it.
- */
- ret = drm_gem_load_uao(dev_priv->agpdmat, obj_priv->dmamap, obj->uao,
- obj->size, BUS_DMA_WAITOK | obj_priv->dma_flags,
- &obj_priv->dma_segs);
- /* XXX NOWAIT? */
- if (ret != 0) {
- /* If the gtt is empty and we're still having trouble
- * fitting our object in, we're out of memory.
- */
- if (TAILQ_EMPTY(&dev_priv->mm.inactive_list) &&
- TAILQ_EMPTY(&dev_priv->mm.flushing_list) &&
- TAILQ_EMPTY(&dev_priv->mm.active_list)) {
- DRM_ERROR("GTT full, but LRU list empty\n");
- goto error;
- }
-
- ret = i915_gem_evict_something(dev_priv, obj->size,
- interruptible);
- if (ret != 0)
- goto error;
- goto search_free;
- }
- i915_gem_bit_17_swizzle(obj);
-
- obj_priv->gtt_offset = obj_priv->dmamap->dm_segs[0].ds_addr -
- dev->agp->base;
-
- atomic_inc(&dev->gtt_count);
- atomic_add(obj->size, &dev->gtt_memory);
-
- /* Assert that the object is not currently in any GPU domain. As it
- * wasn't in the GTT, there shouldn't be any way it could have been in
- * a GPU cache
- */
- KASSERT((obj->read_domains & I915_GEM_GPU_DOMAINS) == 0);
- KASSERT((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0);
-
- return (0);
-
-error:
- bus_dmamap_destroy(dev_priv->agpdmat, obj_priv->dmamap);
- obj_priv->dmamap = NULL;
- obj_priv->gtt_offset = 0;
- return (ret);
-}
-
-/*
- * Flush the GPU write domain for the object if dirty, then wait for the
- * rendering to complete. When this returns it is safe to unbind from the
- * GTT or access from the CPU.
- */
-int
-i915_gem_object_flush_gpu_write_domain(struct drm_obj *obj, int pipelined,
- int interruptible, int write)
-{
- struct drm_device *dev = obj->dev;
- struct inteldrm_softc *dev_priv = dev->dev_private;
- struct inteldrm_obj *obj_priv = (struct inteldrm_obj *)obj;
- u_int32_t seqno;
- int ret = 0;
-
- DRM_ASSERT_HELD(obj);
- if ((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0) {
- /*
- * Queue the GPU write cache flushing we need.
- * This call will move stuff form the flushing list to the
- * active list so all we need to is wait for it.
- */
- (void)i915_gem_flush(dev_priv, 0, obj->write_domain);
- KASSERT(obj->write_domain == 0);
- }
-
- /* wait for queued rendering so we know it's flushed and bo is idle */
- if (pipelined == 0 && inteldrm_is_active(obj_priv)) {
- if (write) {
- seqno = obj_priv->last_rendering_seqno;
- } else {
- seqno = obj_priv->last_write_seqno;
- }
- ret = i915_wait_request(dev_priv, seqno, interruptible);
- }
- return (ret);
-}
-
-/*
- * Moves a single object to the GTT and possibly write domain.
- *
- * This function returns when the move is complete, including waiting on
- * flushes to occur.
- */
-int
-i915_gem_object_set_to_gtt_domain(struct drm_obj *obj, int write,
- int interruptible)
-{
- struct drm_device *dev = (struct drm_device *)obj->dev;
- struct inteldrm_softc *dev_priv = dev->dev_private;
- struct inteldrm_obj *obj_priv = (struct inteldrm_obj *)obj;
- int ret;
-
- DRM_ASSERT_HELD(obj);
- /* Not valid to be called on unbound objects. */
- if (obj_priv->dmamap == NULL)
- return (EINVAL);
-
- /* Wait on any GPU rendering and flushing to occur. */
- if ((ret = i915_gem_object_flush_gpu_write_domain(obj, 0,
- interruptible, write)) != 0)
- return (ret);
-
- if (obj->write_domain == I915_GEM_DOMAIN_CPU) {
- /* clflush the pages, and flush chipset cache */
- bus_dmamap_sync(dev_priv->agpdmat, obj_priv->dmamap, 0,
- obj->size, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
- inteldrm_chipset_flush(dev_priv);
- obj->write_domain = 0;
- }
-
- /* We're accessing through the gpu, so grab a new fence register or
- * update the LRU.
- */
- if (obj->do_flags & I915_FENCE_INVALID) {
- ret = i915_gem_object_put_fence_reg(obj, interruptible);
- if (ret)
- return (ret);
- }
- if (obj_priv->tiling_mode != I915_TILING_NONE)
- ret = i915_gem_get_fence_reg(obj, interruptible);
-
- /*
- * If we're writing through the GTT domain then the CPU and GPU caches
- * will need to be invalidated at next use.
- * It should now be out of any other write domains and we can update
- * to the correct ones
- */
- KASSERT((obj->write_domain & ~I915_GEM_DOMAIN_GTT) == 0);
- if (write) {
- obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_GTT;
- atomic_setbits_int(&obj->do_flags, I915_DIRTY);
- } else {
- obj->read_domains |= I915_GEM_DOMAIN_GTT;
- }
-
- return (ret);
-}
-
-/*
- * Moves a single object to the CPU read and possibly write domain.
- *
- * This function returns when the move is complete, including waiting on
- * flushes to return.
- */
-int
-i915_gem_object_set_to_cpu_domain(struct drm_obj *obj, int write,
- int interruptible)
-{
- struct drm_device *dev = obj->dev;
- struct inteldrm_softc *dev_priv = dev->dev_private;
- struct inteldrm_obj *obj_priv = (struct inteldrm_obj *)obj;
- int ret;
-
- DRM_ASSERT_HELD(obj);
- /* Wait on any GPU rendering and flushing to occur. */
- if ((ret = i915_gem_object_flush_gpu_write_domain(obj, 0,
- interruptible, write)) != 0)
- return (ret);
-
- if (obj->write_domain == I915_GEM_DOMAIN_GTT ||
- (write && obj->read_domains & I915_GEM_DOMAIN_GTT)) {
- /*
- * No actual flushing is required for the GTT write domain.
- * Writes to it immeditately go to main memory as far as we
- * know, so there's no chipset flush. It also doesn't land
- * in render cache.
- */
- inteldrm_wipe_mappings(obj);
- if (obj->write_domain == I915_GEM_DOMAIN_GTT)
- obj->write_domain = 0;
- }
-
- /* remove the fence register since we're not using it anymore */
- if ((ret = i915_gem_object_put_fence_reg(obj, interruptible)) != 0)
- return (ret);
-
- /* Flush the CPU cache if it's still invalid. */
- if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
- bus_dmamap_sync(dev_priv->agpdmat, obj_priv->dmamap, 0,
- obj->size, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
-
- obj->read_domains |= I915_GEM_DOMAIN_CPU;
- }
-
- /*
- * It should now be out of any other write domain, and we can update
- * the domain value for our changes.
- */
- KASSERT((obj->write_domain & ~I915_GEM_DOMAIN_CPU) == 0);
-
- /*
- * If we're writing through the CPU, then the GPU read domains will
- * need to be invalidated at next use.
- */
- if (write)
- obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU;
-
- return (0);
-}
-
-/*
- * Set the next domain for the specified object. This
- * may not actually perform the necessary flushing/invaliding though,
- * as that may want to be batched with other set_domain operations
- *
- * This is (we hope) the only really tricky part of gem. The goal
- * is fairly simple -- track which caches hold bits of the object
- * and make sure they remain coherent. A few concrete examples may
- * help to explain how it works. For shorthand, we use the notation
- * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
- * a pair of read and write domain masks.
- *
- * Case 1: the batch buffer
- *
- * 1. Allocated
- * 2. Written by CPU
- * 3. Mapped to GTT
- * 4. Read by GPU
- * 5. Unmapped from GTT
- * 6. Freed
- *
- * Let's take these a step at a time
- *
- * 1. Allocated
- * Pages allocated from the kernel may still have
- * cache contents, so we set them to (CPU, CPU) always.
- * 2. Written by CPU (using pwrite)
- * The pwrite function calls set_domain (CPU, CPU) and
- * this function does nothing (as nothing changes)
- * 3. Mapped by GTT
- * This function asserts that the object is not
- * currently in any GPU-based read or write domains
- * 4. Read by GPU
- * i915_gem_execbuffer calls set_domain (COMMAND, 0).
- * As write_domain is zero, this function adds in the
- * current read domains (CPU+COMMAND, 0).
- * flush_domains is set to CPU.
- * invalidate_domains is set to COMMAND
- * clflush is run to get data out of the CPU caches
- * then i915_dev_set_domain calls i915_gem_flush to
- * emit an MI_FLUSH and drm_agp_chipset_flush
- * 5. Unmapped from GTT
- * i915_gem_object_unbind calls set_domain (CPU, CPU)
- * flush_domains and invalidate_domains end up both zero
- * so no flushing/invalidating happens
- * 6. Freed
- * yay, done
- *
- * Case 2: The shared render buffer
- *
- * 1. Allocated
- * 2. Mapped to GTT
- * 3. Read/written by GPU
- * 4. set_domain to (CPU,CPU)
- * 5. Read/written by CPU
- * 6. Read/written by GPU
- *
- * 1. Allocated
- * Same as last example, (CPU, CPU)
- * 2. Mapped to GTT
- * Nothing changes (assertions find that it is not in the GPU)
- * 3. Read/written by GPU
- * execbuffer calls set_domain (RENDER, RENDER)
- * flush_domains gets CPU
- * invalidate_domains gets GPU
- * clflush (obj)
- * MI_FLUSH and drm_agp_chipset_flush
- * 4. set_domain (CPU, CPU)
- * flush_domains gets GPU
- * invalidate_domains gets CPU
- * flush_gpu_write (obj) to make sure all drawing is complete.
- * This will include an MI_FLUSH to get the data from GPU
- * to memory
- * clflush (obj) to invalidate the CPU cache
- * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
- * 5. Read/written by CPU
- * cache lines are loaded and dirtied
- * 6. Read written by GPU
- * Same as last GPU access
- *
- * Case 3: The constant buffer
- *
- * 1. Allocated
- * 2. Written by CPU
- * 3. Read by GPU
- * 4. Updated (written) by CPU again
- * 5. Read by GPU
- *
- * 1. Allocated
- * (CPU, CPU)
- * 2. Written by CPU
- * (CPU, CPU)
- * 3. Read by GPU
- * (CPU+RENDER, 0)
- * flush_domains = CPU
- * invalidate_domains = RENDER
- * clflush (obj)
- * MI_FLUSH
- * drm_agp_chipset_flush
- * 4. Updated (written) by CPU again
- * (CPU, CPU)
- * flush_domains = 0 (no previous write domain)
- * invalidate_domains = 0 (no new read domains)
- * 5. Read by GPU
- * (CPU+RENDER, 0)
- * flush_domains = CPU
- * invalidate_domains = RENDER
- * clflush (obj)
- * MI_FLUSH
- * drm_agp_chipset_flush
- */
-void
-i915_gem_object_set_to_gpu_domain(struct drm_obj *obj)
-{
- struct drm_device *dev = obj->dev;
- struct inteldrm_softc *dev_priv = dev->dev_private;
- struct inteldrm_obj *obj_priv = (struct inteldrm_obj *)obj;
- u_int32_t invalidate_domains = 0;
- u_int32_t flush_domains = 0;
-
- DRM_ASSERT_HELD(obj);
- KASSERT((obj->pending_read_domains & I915_GEM_DOMAIN_CPU) == 0);
- KASSERT(obj->pending_write_domain != I915_GEM_DOMAIN_CPU);
- /*
- * If the object isn't moving to a new write domain,
- * let the object stay in multiple read domains
- */
- if (obj->pending_write_domain == 0)
- obj->pending_read_domains |= obj->read_domains;
- else
- atomic_setbits_int(&obj->do_flags, I915_DIRTY);
-
- /*
- * Flush the current write domain if
- * the new read domains don't match. Invalidate
- * any read domains which differ from the old
- * write domain
- */
- if (obj->write_domain &&
- obj->write_domain != obj->pending_read_domains) {
- flush_domains |= obj->write_domain;
- invalidate_domains |= obj->pending_read_domains &
- ~obj->write_domain;
- }
- /*
- * Invalidate any read caches which may have
- * stale data. That is, any new read domains.
- */
- invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
- /* clflush the cpu now, gpu caches get queued. */
- if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
- bus_dmamap_sync(dev_priv->agpdmat, obj_priv->dmamap, 0,
- obj->size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- }
- if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT) {
- inteldrm_wipe_mappings(obj);
- }
-
- /* The actual obj->write_domain will be updated with
- * pending_write_domain after we emit the accumulated flush for all of
- * the domain changes in execuffer (which clears object's write
- * domains). So if we have a current write domain that we aren't
- * changing, set pending_write_domain to it.
- */
- if (flush_domains == 0 && obj->pending_write_domain == 0 &&
- (obj->pending_read_domains == obj->write_domain ||
- obj->write_domain == 0))
- obj->pending_write_domain = obj->write_domain;
- obj->read_domains = obj->pending_read_domains;
- obj->pending_read_domains = 0;
-
- dev->invalidate_domains |= invalidate_domains;
- dev->flush_domains |= flush_domains;
-}
-
-/**
- * Pin an object to the GTT and evaluate the relocations landing in it.
- */
-int
-i915_gem_object_pin_and_relocate(struct drm_obj *obj,
- struct drm_file *file_priv, struct drm_i915_gem_exec_object2 *entry,
- struct drm_i915_gem_relocation_entry *relocs)
-{
- struct drm_device *dev = obj->dev;
- struct inteldrm_softc *dev_priv = dev->dev_private;
- struct drm_obj *target_obj;
- struct inteldrm_obj *obj_priv = (struct inteldrm_obj *)obj;
- bus_space_handle_t bsh;
- int i, ret, needs_fence;
-
- DRM_ASSERT_HELD(obj);
- needs_fence = ((entry->flags & EXEC_OBJECT_NEEDS_FENCE) &&
- obj_priv->tiling_mode != I915_TILING_NONE);
- if (needs_fence)
- atomic_setbits_int(&obj->do_flags, I915_EXEC_NEEDS_FENCE);
-
- /* Choose the GTT offset for our buffer and put it there. */
- ret = i915_gem_object_pin(obj, (u_int32_t)entry->alignment,
- needs_fence);
- if (ret)
- return ret;
-
- entry->offset = obj_priv->gtt_offset;
-
- /* Apply the relocations, using the GTT aperture to avoid cache
- * flushing requirements.
- */
- for (i = 0; i < entry->relocation_count; i++) {
- struct drm_i915_gem_relocation_entry *reloc = &relocs[i];
- struct inteldrm_obj *target_obj_priv;
- uint32_t reloc_val, reloc_offset;
-
- target_obj = drm_gem_object_lookup(obj->dev, file_priv,
- reloc->target_handle);
- /* object must have come before us in the list */
- if (target_obj == NULL) {
- i915_gem_object_unpin(obj);
- return (EBADF);
- }
- if ((target_obj->do_flags & I915_IN_EXEC) == 0) {
- printf("%s: object not already in execbuffer\n",
- __func__);
- ret = EBADF;
- goto err;
- }
-
- target_obj_priv = (struct inteldrm_obj *)target_obj;
-
- /* The target buffer should have appeared before us in the
- * exec_object list, so it should have a GTT space bound by now.
- */
- if (target_obj_priv->dmamap == 0) {
- DRM_ERROR("No GTT space found for object %d\n",
- reloc->target_handle);
- ret = EINVAL;
- goto err;
- }
-
- /* must be in one write domain and one only */
- if (reloc->write_domain & (reloc->write_domain - 1)) {
- ret = EINVAL;
- goto err;
- }
- if (reloc->read_domains & I915_GEM_DOMAIN_CPU ||
- reloc->write_domain & I915_GEM_DOMAIN_CPU) {
- DRM_ERROR("relocation with read/write CPU domains: "
- "obj %p target %d offset %d "
- "read %08x write %08x", obj,
- reloc->target_handle, (int)reloc->offset,
- reloc->read_domains, reloc->write_domain);
- ret = EINVAL;
- goto err;
- }
-
- if (reloc->write_domain && target_obj->pending_write_domain &&
- reloc->write_domain != target_obj->pending_write_domain) {
- DRM_ERROR("Write domain conflict: "
- "obj %p target %d offset %d "
- "new %08x old %08x\n",
- obj, reloc->target_handle,
- (int) reloc->offset,
- reloc->write_domain,
- target_obj->pending_write_domain);
- ret = EINVAL;
- goto err;
- }
-
- target_obj->pending_read_domains |= reloc->read_domains;
- target_obj->pending_write_domain |= reloc->write_domain;
-
-
- if (reloc->offset > obj->size - 4) {
- DRM_ERROR("Relocation beyond object bounds: "
- "obj %p target %d offset %d size %d.\n",
- obj, reloc->target_handle,
- (int) reloc->offset, (int) obj->size);
- ret = EINVAL;
- goto err;
- }
- if (reloc->offset & 3) {
- DRM_ERROR("Relocation not 4-byte aligned: "
- "obj %p target %d offset %d.\n",
- obj, reloc->target_handle,
- (int) reloc->offset);
- ret = EINVAL;
- goto err;
- }
-
- if (reloc->delta > target_obj->size) {
- DRM_ERROR("reloc larger than target\n");
- ret = EINVAL;
- goto err;
- }
-
- /* Map the page containing the relocation we're going to
- * perform.
- */
- reloc_offset = obj_priv->gtt_offset + reloc->offset;
- reloc_val = target_obj_priv->gtt_offset + reloc->delta;
-
- if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
- drm_gem_object_unreference(target_obj);
- continue;
- }
-
- ret = i915_gem_object_set_to_gtt_domain(obj, 1, 1);
- if (ret != 0)
- goto err;
-
- if ((ret = agp_map_subregion(dev_priv->agph,
- trunc_page(reloc_offset), PAGE_SIZE, &bsh)) != 0) {
- DRM_ERROR("map failed...\n");
- goto err;
- }
-
- bus_space_write_4(dev_priv->bst, bsh, reloc_offset & PAGE_MASK,
- reloc_val);
-
- reloc->presumed_offset = target_obj_priv->gtt_offset;
-
- agp_unmap_subregion(dev_priv->agph, bsh, PAGE_SIZE);
- drm_gem_object_unreference(target_obj);
- }
-
- return 0;
-
-err:
- /* we always jump to here mid-loop */
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return (ret);
-}
-
-/** Dispatch a batchbuffer to the ring
- */
-void
-i915_dispatch_gem_execbuffer(struct drm_device *dev,
- struct drm_i915_gem_execbuffer2 *exec, uint64_t exec_offset)
-{
- struct inteldrm_softc *dev_priv = dev->dev_private;
- uint32_t exec_start, exec_len;
-
- MUTEX_ASSERT_LOCKED(&dev_priv->request_lock);
- exec_start = (uint32_t)exec_offset + exec->batch_start_offset;
- exec_len = (uint32_t)exec->batch_len;
-
- if (IS_I830(dev_priv) || IS_845G(dev_priv)) {
- BEGIN_LP_RING(6);
- OUT_RING(MI_BATCH_BUFFER);
- OUT_RING(exec_start | MI_BATCH_NON_SECURE);
- OUT_RING(exec_start + exec_len - 4);
- OUT_RING(MI_NOOP);
- } else {
- BEGIN_LP_RING(4);
- if (IS_I965G(dev_priv)) {
- if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
- OUT_RING(MI_BATCH_BUFFER_START |
- MI_BATCH_NON_SECURE_I965);
- else
- OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) |
- MI_BATCH_NON_SECURE_I965);
- OUT_RING(exec_start);
- } else {
- OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
- OUT_RING(exec_start | MI_BATCH_NON_SECURE);
- }
- }
-
- /*
- * Ensure that the commands in the batch buffer are
- * finished before the interrupt fires (from a subsequent request
- * added). We get back a seqno representing the execution of the
- * current buffer, which we can wait on. We would like to mitigate
- * these interrupts, likely by only creating seqnos occasionally
- * (so that we have *some* interrupts representing completion of
- * buffers that we can wait on when trying to clear up gtt space).
- */
- OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
- /*
- * move to active associated all previous buffers with the seqno
- * that this call will emit. so we don't need the return. If it fails
- * then the next seqno will take care of it.
- */
- (void)i915_add_request(dev_priv);
-
- inteldrm_verify_inactive(dev_priv, __FILE__, __LINE__);
-}
-
-/* Throttle our rendering by waiting until the ring has completed our requests
- * emitted over 20 msec ago.
- *
- * This should get us reasonable parallelism between CPU and GPU but also
- * relatively low latency when blocking on a particular request to finish.
- */
-int
-i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
-{
-#if 0
- struct inteldrm_file *intel_file = (struct inteldrm_file *)file_priv;
- u_int32_t seqno;
-#endif
- int ret = 0;
-
- return ret;
-}
-
-int
-i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
- u_int32_t buffer_count, struct drm_i915_gem_relocation_entry **relocs)
-{
- u_int32_t reloc_count = 0, reloc_index = 0, i;
- int ret;
-
- *relocs = NULL;
- for (i = 0; i < buffer_count; i++) {
- if (reloc_count + exec_list[i].relocation_count < reloc_count)
- return (EINVAL);
- reloc_count += exec_list[i].relocation_count;
- }
-
- if (reloc_count == 0)
- return (0);
-
- if (SIZE_MAX / reloc_count < sizeof(**relocs))
- return (EINVAL);
- *relocs = drm_alloc(reloc_count * sizeof(**relocs));
- for (i = 0; i < buffer_count; i++) {
- if ((ret = copyin((void *)(uintptr_t)exec_list[i].relocs_ptr,
- &(*relocs)[reloc_index], exec_list[i].relocation_count *
- sizeof(**relocs))) != 0) {
- drm_free(*relocs);
- *relocs = NULL;
- return (ret);
- }
- reloc_index += exec_list[i].relocation_count;
- }
-
- return (0);
-}
-
-int
-i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
- u_int32_t buffer_count, struct drm_i915_gem_relocation_entry *relocs)
-{
- u_int32_t reloc_count = 0, i;
- int ret = 0;
-
- if (relocs == NULL)
- return (0);
-
- for (i = 0; i < buffer_count; i++) {
- if ((ret = copyout(&relocs[reloc_count],
- (void *)(uintptr_t)exec_list[i].relocs_ptr,
- exec_list[i].relocation_count * sizeof(*relocs))) != 0)
- break;
- reloc_count += exec_list[i].relocation_count;
- }
-
- drm_free(relocs);
-
- return (ret);
-}
-
-int
-i915_gem_execbuffer2(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct inteldrm_softc *dev_priv = dev->dev_private;
- struct drm_i915_gem_execbuffer2 *args = data;
- struct drm_i915_gem_exec_object2 *exec_list = NULL;
- struct drm_i915_gem_relocation_entry *relocs = NULL;
- struct inteldrm_obj *obj_priv, *batch_obj_priv;
- struct drm_obj **object_list = NULL;
- struct drm_obj *batch_obj, *obj;
- size_t oflow;
- int ret, ret2, i;
- int pinned = 0, pin_tries;
- uint32_t reloc_index;
-
- /*
- * Check for valid execbuffer offset. We can do this early because
- * bound object are always page aligned, so only the start offset
- * matters. Also check for integer overflow in the batch offset and size
- */
- if ((args->batch_start_offset | args->batch_len) & 0x7 ||
- args->batch_start_offset + args->batch_len < args->batch_len ||
- args->batch_start_offset + args->batch_len <
- args->batch_start_offset)
- return (EINVAL);
-
- if (args->buffer_count < 1) {
- DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
- return (EINVAL);
- }
- /* Copy in the exec list from userland, check for overflow */
- oflow = SIZE_MAX / args->buffer_count;
- if (oflow < sizeof(*exec_list) || oflow < sizeof(*object_list))
- return (EINVAL);
- exec_list = drm_alloc(sizeof(*exec_list) * args->buffer_count);
- object_list = drm_alloc(sizeof(*object_list) * args->buffer_count);
- if (exec_list == NULL || object_list == NULL) {
- ret = ENOMEM;
- goto pre_mutex_err;
- }
- ret = copyin((void *)(uintptr_t)args->buffers_ptr, exec_list,
- sizeof(*exec_list) * args->buffer_count);
- if (ret != 0)
- goto pre_mutex_err;
-
- ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
- &relocs);
- if (ret != 0)
- goto pre_mutex_err;
-
- DRM_LOCK();
- inteldrm_verify_inactive(dev_priv, __FILE__, __LINE__);
-
- /* XXX check these before we copyin... but we do need the lock */
- if (dev_priv->mm.wedged) {
- ret = EIO;
- goto unlock;
- }
-
- if (dev_priv->mm.suspended) {
- ret = EBUSY;
- goto unlock;
- }
-
- /* Look up object handles */
- for (i = 0; i < args->buffer_count; i++) {
- object_list[i] = drm_gem_object_lookup(dev, file_priv,
- exec_list[i].handle);
- obj = object_list[i];
- if (obj == NULL) {
- DRM_ERROR("Invalid object handle %d at index %d\n",
- exec_list[i].handle, i);
- ret = EBADF;
- goto err;
- }
- if (obj->do_flags & I915_IN_EXEC) {
- DRM_ERROR("Object %p appears more than once in object_list\n",
- object_list[i]);
- ret = EBADF;
- goto err;
- }
- atomic_setbits_int(&obj->do_flags, I915_IN_EXEC);
- }
-
- /* Pin and relocate */
- for (pin_tries = 0; ; pin_tries++) {
- ret = pinned = 0;
- reloc_index = 0;
-
- for (i = 0; i < args->buffer_count; i++) {
- object_list[i]->pending_read_domains = 0;
- object_list[i]->pending_write_domain = 0;
- drm_hold_object(object_list[i]);
- ret = i915_gem_object_pin_and_relocate(object_list[i],
- file_priv, &exec_list[i], &relocs[reloc_index]);
- if (ret) {
- drm_unhold_object(object_list[i]);
- break;
- }
- pinned++;
- reloc_index += exec_list[i].relocation_count;
- }
- /* success */
- if (ret == 0)
- break;
-
- /* error other than GTT full, or we've already tried again */
- if (ret != ENOSPC || pin_tries >= 1)
- goto err;
-
- /*
- * unpin all of our buffers and unhold them so they can be
- * unbound so we can try and refit everything in the aperture.
- */
- for (i = 0; i < pinned; i++) {
- i915_gem_object_unpin(object_list[i]);
- drm_unhold_object(object_list[i]);
- }
- pinned = 0;
- /* evict everyone we can from the aperture */
- ret = i915_gem_evict_everything(dev_priv, 1);
- if (ret)
- goto err;
- }
-
- /* If we get here all involved objects are referenced, pinned, relocated
- * and held. Now we can finish off the exec processing.
- *
- * First, set the pending read domains for the batch buffer to
- * command.
- */
- batch_obj = object_list[args->buffer_count - 1];
- batch_obj_priv = (struct inteldrm_obj *)batch_obj;
- if (args->batch_start_offset + args->batch_len > batch_obj->size ||
- batch_obj->pending_write_domain) {
- ret = EINVAL;
- goto err;
- }
- batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
-
- inteldrm_verify_inactive(dev_priv, __FILE__, __LINE__);
-
- /*
- * Zero the global flush/invalidate flags. These will be modified as
- * new domains are computed for each object
- */
- dev->invalidate_domains = 0;
- dev->flush_domains = 0;
-
- /* Compute new gpu domains and update invalidate/flush */
- for (i = 0; i < args->buffer_count; i++)
- i915_gem_object_set_to_gpu_domain(object_list[i]);
-
- inteldrm_verify_inactive(dev_priv, __FILE__, __LINE__);
-
- /* flush and invalidate any domains that need them. */
- (void)i915_gem_flush(dev_priv, dev->invalidate_domains,
- dev->flush_domains);
-
- /*
- * update the write domains, and fence/gpu write accounting information.
- * Also do the move to active list here. The lazy seqno accounting will
- * make sure that they have the correct seqno. If the add_request
- * fails, then we will wait for a later batch (or one added on the
- * wait), which will waste some time, but if we're that low on memory
- * then we could fail in much worse ways.
- */
- mtx_enter(&dev_priv->request_lock); /* to prevent races on next_seqno */
- mtx_enter(&dev_priv->list_lock);
- for (i = 0; i < args->buffer_count; i++) {
- obj = object_list[i];
- obj_priv = (struct inteldrm_obj *)obj;
- drm_lock_obj(obj);
-
- obj->write_domain = obj->pending_write_domain;
- /*
- * if we have a write domain, add us to the gpu write list
- * else we can remove the bit because it has been flushed.
- */
- if (obj->do_flags & I915_GPU_WRITE)
- TAILQ_REMOVE(&dev_priv->mm.gpu_write_list, obj_priv,
- write_list);
- if (obj->write_domain) {
- TAILQ_INSERT_TAIL(&dev_priv->mm.gpu_write_list,
- obj_priv, write_list);
- atomic_setbits_int(&obj->do_flags, I915_GPU_WRITE);
- } else {
- atomic_clearbits_int(&obj->do_flags,
- I915_GPU_WRITE);
- }
- /* if this batchbuffer needs a fence, then the object is
- * counted as fenced exec. else any outstanding fence waits
- * will just wait on the fence last_seqno.
- */
- if (inteldrm_exec_needs_fence(obj_priv)) {
- atomic_setbits_int(&obj->do_flags,
- I915_FENCED_EXEC);
- } else {
- atomic_clearbits_int(&obj->do_flags,
- I915_FENCED_EXEC);
- }
-
- i915_gem_object_move_to_active(object_list[i]);
- drm_unlock_obj(obj);
- }
- mtx_leave(&dev_priv->list_lock);
-
- inteldrm_verify_inactive(dev_priv, __FILE__, __LINE__);
-
- /* Exec the batchbuffer */
- /*
- * XXX make sure that this may never fail by preallocating the request.
- */
- i915_dispatch_gem_execbuffer(dev, args, batch_obj_priv->gtt_offset);
- mtx_leave(&dev_priv->request_lock);
-
- inteldrm_verify_inactive(dev_priv, __FILE__, __LINE__);
-
- ret = copyout(exec_list, (void *)(uintptr_t)args->buffers_ptr,
- sizeof(*exec_list) * args->buffer_count);
-
-err:
- for (i = 0; i < args->buffer_count; i++) {
- if (object_list[i] == NULL)
- break;
-
- atomic_clearbits_int(&object_list[i]->do_flags, I915_IN_EXEC |
- I915_EXEC_NEEDS_FENCE);
- if (i < pinned) {
- i915_gem_object_unpin(object_list[i]);
- drm_unhold_and_unref(object_list[i]);
- } else {
- drm_unref(&object_list[i]->uobj);
- }
- }
-
-unlock:
- DRM_UNLOCK();
-
-pre_mutex_err:
- /* update userlands reloc state. */
- ret2 = i915_gem_put_relocs_to_user(exec_list,
- args->buffer_count, relocs);
- if (ret2 != 0 && ret == 0)
- ret = ret2;
-
- drm_free(object_list);
- drm_free(exec_list);
-
- return ret;
-}
-
-int
-i915_gem_object_pin(struct drm_obj *obj, uint32_t alignment, int needs_fence)
-{
- struct drm_device *dev = obj->dev;
- struct inteldrm_obj *obj_priv = (struct inteldrm_obj *)obj;
- int ret;
-
- DRM_ASSERT_HELD(obj);
- inteldrm_verify_inactive(dev_priv, __FILE__, __LINE__);
- /*
- * if already bound, but alignment is unsuitable, unbind so we can
- * fix it. Similarly if we have constraints due to fence registers,
- * adjust if needed. Note that if we are already pinned we may as well
- * fail because whatever depends on this alignment will render poorly
- * otherwise, so just fail the pin (with a printf so we can fix a
- * wrong userland).
- */
- if (obj_priv->dmamap != NULL &&
- ((alignment && obj_priv->gtt_offset & (alignment - 1)) ||
- obj_priv->gtt_offset & (i915_gem_get_gtt_alignment(obj) - 1) ||
- !i915_gem_object_fence_offset_ok(obj, obj_priv->tiling_mode))) {
- /* if it is already pinned we sanitised the alignment then */
- KASSERT(obj_priv->pin_count == 0);
- if ((ret = i915_gem_object_unbind(obj, 1)))
- return (ret);
- }
-
- if (obj_priv->dmamap == NULL) {
- ret = i915_gem_object_bind_to_gtt(obj, alignment, 1);
- if (ret != 0)
- return (ret);
- }
-
- /*
- * due to lazy fence destruction we may have an invalid fence now.
- * So if so, nuke it before we do anything with the gpu.
- * XXX 965+ can put this off.. and be faster
- */
- if (obj->do_flags & I915_FENCE_INVALID) {
- ret= i915_gem_object_put_fence_reg(obj, 1);
- if (ret)
- return (ret);
- }
- /*
- * Pre-965 chips may need a fence register set up in order to
- * handle tiling properly. GTT mapping may have blown it away so
- * restore.
- * With execbuf2 support we don't always need it, but if we do grab
- * it.
- */
- if (needs_fence && obj_priv->tiling_mode != I915_TILING_NONE &&
- (ret = i915_gem_get_fence_reg(obj, 1)) != 0)
- return (ret);
-
- /* If the object is not active and not pending a flush,
- * remove it from the inactive list
- */
- if (++obj_priv->pin_count == 1) {
- atomic_inc(&dev->pin_count);
- atomic_add(obj->size, &dev->pin_memory);
- if (!inteldrm_is_active(obj_priv))
- i915_list_remove(obj_priv);
- }
- inteldrm_verify_inactive(dev_priv, __FILE__, __LINE__);
-
- return (0);
-}
-
-void
-i915_gem_object_unpin(struct drm_obj *obj)
-{
- struct drm_device *dev = obj->dev;
- struct inteldrm_obj *obj_priv = (struct inteldrm_obj *)obj;
-
- inteldrm_verify_inactive(dev_priv, __FILE__, __LINE__);
- KASSERT(obj_priv->pin_count >= 1);
- KASSERT(obj_priv->dmamap != NULL);
- DRM_ASSERT_HELD(obj);
-
- /* If the object is no longer pinned, and is
- * neither active nor being flushed, then stick it on
- * the inactive list
- */
- if (--obj_priv->pin_count == 0) {
- if (!inteldrm_is_active(obj_priv))
- i915_gem_object_move_to_inactive(obj);
- atomic_dec(&dev->pin_count);
- atomic_sub(obj->size, &dev->pin_memory);
- }
- inteldrm_verify_inactive(dev_priv, __FILE__, __LINE__);
-}
-
-int
-i915_gem_pin_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct inteldrm_softc *dev_priv = dev->dev_private;
- struct drm_i915_gem_pin *args = data;
- struct drm_obj *obj;
- struct inteldrm_obj *obj_priv;
- int ret = 0;
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return (EBADF);
- DRM_LOCK();
- drm_hold_object(obj);
-
- obj_priv = (struct inteldrm_obj *)obj;
- if (i915_obj_purgeable(obj_priv)) {
- printf("%s: pinning purgeable object\n", __func__);
- ret = EINVAL;
- goto out;
- }
-
- if (++obj_priv->user_pin_count == 1) {
- ret = i915_gem_object_pin(obj, args->alignment, 1);
- if (ret != 0)
- goto out;
- inteldrm_set_max_obj_size(dev_priv);
- }
-
- /* XXX - flush the CPU caches for pinned objects
- * as the X server doesn't manage domains yet
- */
- i915_gem_object_set_to_gtt_domain(obj, 1, 1);
- args->offset = obj_priv->gtt_offset;
-
-out:
- drm_unhold_and_unref(obj);
- DRM_UNLOCK();
-
- return (ret);
-}
-
-int
-i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct inteldrm_softc *dev_priv = dev->dev_private;
- struct drm_i915_gem_pin *args = data;
- struct inteldrm_obj *obj_priv;
- struct drm_obj *obj;
- int ret = 0;
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return (EBADF);
-
- DRM_LOCK();
- drm_hold_object(obj);
-
- obj_priv = (struct inteldrm_obj *)obj;
- if (obj_priv->user_pin_count == 0) {
- ret = EINVAL;
- goto out;
- }
-
- if (--obj_priv->user_pin_count == 0) {
- i915_gem_object_unpin(obj);
- inteldrm_set_max_obj_size(dev_priv);
- }
-
-out:
- drm_unhold_and_unref(obj);
- DRM_UNLOCK();
- return (ret);
-}
-
-int
-i915_gem_busy_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct inteldrm_softc *dev_priv = dev->dev_private;
- struct drm_i915_gem_busy *args = data;
- struct drm_obj *obj;
- struct inteldrm_obj *obj_priv;
- int ret = 0;
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL) {
- DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
- args->handle);
- return (EBADF);
- }
-
- obj_priv = (struct inteldrm_obj *)obj;
- args->busy = inteldrm_is_active(obj_priv);
- if (args->busy) {
- /*
- * Unconditionally flush objects write domain if they are
- * busy. The fact userland is calling this ioctl means that
- * it wants to use this buffer sooner rather than later, so
- * flushing now shoul reduce latency.
- */
- if (obj->write_domain)
- (void)i915_gem_flush(dev_priv, obj->write_domain,
- obj->write_domain);
- /*
- * Update the active list after the flush otherwise this is
- * only updated on a delayed timer. Updating now reduces
- * working set size.
- */
- i915_gem_retire_requests(dev_priv);
- args->busy = inteldrm_is_active(obj_priv);
- }
-
- drm_unref(&obj->uobj);
- return (ret);
-}
-
-int
-i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_gem_madvise *args = data;
- struct drm_obj *obj;
- struct inteldrm_obj *obj_priv;
- int need, ret = 0;
-
- switch (args->madv) {
- case I915_MADV_DONTNEED:
- need = 0;
- break;
- case I915_MADV_WILLNEED:
- need = 1;
- break;
- default:
- return (EINVAL);
- }
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return (EBADF);
-
- drm_hold_object(obj);
- obj_priv = (struct inteldrm_obj *)obj;
-
- /* invalid to madvise on a pinned BO */
- if (obj_priv->pin_count) {
- ret = EINVAL;
- goto out;
- }
-
- if (!i915_obj_purged(obj_priv)) {
- if (need) {
- atomic_clearbits_int(&obj->do_flags,
- I915_DONTNEED);
- } else {
- atomic_setbits_int(&obj->do_flags, I915_DONTNEED);
- }
- }
-
-
- /* if the object is no longer bound, discard its backing storage */
- if (i915_obj_purgeable(obj_priv) && obj_priv->dmamap == NULL)
- inteldrm_purge_obj(obj);
-
- args->retained = !i915_obj_purged(obj_priv);
-
-out:
- drm_unhold_and_unref(obj);
-
- return (ret);
-}
-
-int
-i915_gem_init_object(struct drm_obj *obj)
-{
- struct inteldrm_obj *obj_priv = (struct inteldrm_obj *)obj;
-
- /*
- * We've just allocated pages from the kernel,
- * so they've just been written by the CPU with
- * zeros. They'll need to be flushed before we
- * use them with the GPU.
- */
- obj->write_domain = I915_GEM_DOMAIN_CPU;
- obj->read_domains = I915_GEM_DOMAIN_CPU;
-
- /* normal objects don't need special treatment */
- obj_priv->dma_flags = 0;
- obj_priv->fence_reg = I915_FENCE_REG_NONE;
-
- return 0;
-}
-
-/*
- * NOTE all object unreferences in this driver need to hold the DRM_LOCK(),
- * because if they free they poke around in driver structures.
- */
-void
-i915_gem_free_object(struct drm_obj *obj)
-{
- struct inteldrm_obj *obj_priv = (struct inteldrm_obj *)obj;
-
- DRM_ASSERT_HELD(obj);
- while (obj_priv->pin_count > 0)
- i915_gem_object_unpin(obj);
-
- i915_gem_object_unbind(obj, 0);
- drm_free(obj_priv->bit_17);
- obj_priv->bit_17 = NULL;
- /* XXX dmatag went away? */
-}
-
-/* Clear out the inactive list and unbind everything in it. */
-int
-i915_gem_evict_inactive(struct inteldrm_softc *dev_priv, int interruptible)
-{
- struct inteldrm_obj *obj_priv;
- int ret = 0;
-
- mtx_enter(&dev_priv->list_lock);
- while ((obj_priv = TAILQ_FIRST(&dev_priv->mm.inactive_list)) != NULL) {
- if (obj_priv->pin_count != 0) {
- ret = EINVAL;
- DRM_ERROR("Pinned object in unbind list\n");
- break;
- }
- /* reference it so that we can frob it outside the lock */
- drm_ref(&obj_priv->obj.uobj);
- mtx_leave(&dev_priv->list_lock);
-
- drm_hold_object(&obj_priv->obj);
- ret = i915_gem_object_unbind(&obj_priv->obj, interruptible);
- drm_unhold_and_unref(&obj_priv->obj);
-
- mtx_enter(&dev_priv->list_lock);
- if (ret)
- break;
- }
- mtx_leave(&dev_priv->list_lock);
-
- return (ret);
-}
-
-void
-inteldrm_quiesce(struct inteldrm_softc *dev_priv)
-{
- /*
- * Right now we depend on X vt switching, so we should be
- * already suspended, but fallbacks may fault, etc.
- * Since we can't readback the gtt to reset what we have, make
- * sure that everything is unbound.
- */
- KASSERT(dev_priv->mm.suspended);
- KASSERT(dev_priv->ring.ring_obj == NULL);
- atomic_setbits_int(&dev_priv->sc_flags, INTELDRM_QUIET);
- while (dev_priv->entries)
- tsleep(&dev_priv->entries, 0, "intelquiet", 0);
- /*
- * nothing should be dirty WRT the chip, only stuff that's bound
- * for gtt mapping. Nothing should be pinned over vt switch, if it
- * is then rendering corruption will occur due to api misuse, shame.
- */
- KASSERT(TAILQ_EMPTY(&dev_priv->mm.flushing_list));
- KASSERT(TAILQ_EMPTY(&dev_priv->mm.active_list));
- /* Disabled because root could panic the kernel if this was enabled */
- /* KASSERT(dev->pin_count == 0); */
-
- /* can't fail since uninterruptible */
- (void)i915_gem_evict_inactive(dev_priv, 0);
-}
-
-int
-i915_gem_idle(struct inteldrm_softc *dev_priv)
-{
- struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
- int ret;
-
- /* If drm attach failed */
- if (dev == NULL)
- return (0);
-
- DRM_LOCK();
- if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
- KASSERT(TAILQ_EMPTY(&dev_priv->mm.flushing_list));
- KASSERT(TAILQ_EMPTY(&dev_priv->mm.active_list));
- (void)i915_gem_evict_inactive(dev_priv, 0);
- DRM_UNLOCK();
- return (0);
- }
-
- /*
- * To idle the gpu, flush anything pending then unbind the whole
- * shebang. If we're wedged, assume that the reset workq will clear
- * everything out and continue as normal.
- */
- if ((ret = i915_gem_evict_everything(dev_priv, 1)) != 0 &&
- ret != ENOSPC && ret != EIO) {
- DRM_UNLOCK();
- return (ret);
- }
-
- /* Hack! Don't let anybody do execbuf while we don't control the chip.
- * We need to replace this with a semaphore, or something.
- */
- dev_priv->mm.suspended = 1;
- /* if we hung then the timer alredy fired. */
- timeout_del(&dev_priv->mm.hang_timer);
-
- inteldrm_update_ring(dev_priv);
- i915_gem_cleanup_ringbuffer(dev_priv);
- DRM_UNLOCK();
-
- /* this should be idle now */
- timeout_del(&dev_priv->mm.retire_timer);
-
- return 0;
-}
-
-int
-i915_gem_init_hws(struct inteldrm_softc *dev_priv)
-{
- struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
- struct drm_obj *obj;
- struct inteldrm_obj *obj_priv;
- int ret;
-
- /* If we need a physical address for the status page, it's already
- * initialized at driver load time.
- */
- if (!I915_NEED_GFX_HWS(dev_priv))
- return 0;
-
- obj = drm_gem_object_alloc(dev, 4096);
- if (obj == NULL) {
- DRM_ERROR("Failed to allocate status page\n");
- return (ENOMEM);
- }
- obj_priv = (struct inteldrm_obj *)obj;
- drm_hold_object(obj);
- /*
- * snooped gtt mapping please .
- * Normally this flag is only to dmamem_map, but it's been overloaded
- * for the agp mapping
- */
- obj_priv->dma_flags = BUS_DMA_COHERENT | BUS_DMA_READ;
-
- ret = i915_gem_object_pin(obj, 4096, 0);
- if (ret != 0) {
- drm_unhold_and_unref(obj);
- return ret;
- }
-
- dev_priv->hw_status_page = (void *)vm_map_min(kernel_map);
- obj->uao->pgops->pgo_reference(obj->uao);
- if ((ret = uvm_map(kernel_map, (vaddr_t *)&dev_priv->hw_status_page,
- PAGE_SIZE, obj->uao, 0, 0, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
- UVM_INH_SHARE, UVM_ADV_RANDOM, 0))) != 0)
- if (ret != 0) {
- DRM_ERROR("Failed to map status page.\n");
- obj->uao->pgops->pgo_detach(obj->uao);
- i915_gem_object_unpin(obj);
- drm_unhold_and_unref(obj);
- return (EINVAL);
- }
- drm_unhold_object(obj);
- dev_priv->hws_obj = obj;
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
- I915_WRITE(HWS_PGA, obj_priv->gtt_offset);
- I915_READ(HWS_PGA); /* posting read */
- DRM_DEBUG("hws offset: 0x%08x\n", obj_priv->gtt_offset);
-
- return 0;
-}
-
-void
-i915_gem_cleanup_hws(struct inteldrm_softc *dev_priv)
-{
- struct drm_obj *obj;
-
- if (!I915_NEED_GFX_HWS(dev_priv) || dev_priv->hws_obj == NULL)
- return;
-
- obj = dev_priv->hws_obj;
-
- uvm_unmap(kernel_map, (vaddr_t)dev_priv->hw_status_page,
- (vaddr_t)dev_priv->hw_status_page + PAGE_SIZE);
- dev_priv->hw_status_page = NULL;
- drm_hold_object(obj);
- i915_gem_object_unpin(obj);
- drm_unhold_and_unref(obj);
- dev_priv->hws_obj = NULL;
-
- /* Write high address into HWS_PGA when disabling. */
- I915_WRITE(HWS_PGA, 0x1ffff000);
-}
-
-int
-i915_gem_init_ringbuffer(struct inteldrm_softc *dev_priv)
-{
- struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
- struct drm_obj *obj;
- struct inteldrm_obj *obj_priv;
- int ret;
-
- ret = i915_gem_init_hws(dev_priv);
- if (ret != 0)
- return ret;
-
- obj = drm_gem_object_alloc(dev, 128 * 1024);
- if (obj == NULL) {
- DRM_ERROR("Failed to allocate ringbuffer\n");
- ret = ENOMEM;
- goto delhws;
- }
- drm_hold_object(obj);
- obj_priv = (struct inteldrm_obj *)obj;
-
- ret = i915_gem_object_pin(obj, 4096, 0);
- if (ret != 0)
- goto unref;
-
- /* Set up the kernel mapping for the ring. */
- dev_priv->ring.size = obj->size;
-
- if ((ret = agp_map_subregion(dev_priv->agph, obj_priv->gtt_offset,
- obj->size, &dev_priv->ring.bsh)) != 0) {
- DRM_INFO("can't map ringbuffer\n");
- goto unpin;
- }
- dev_priv->ring.ring_obj = obj;
-
- if ((ret = inteldrm_start_ring(dev_priv)) != 0)
- goto unmap;
-
- drm_unhold_object(obj);
- return (0);
-
-unmap:
- agp_unmap_subregion(dev_priv->agph, dev_priv->ring.bsh, obj->size);
-unpin:
- memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
- i915_gem_object_unpin(obj);
-unref:
- drm_unhold_and_unref(obj);
-delhws:
- i915_gem_cleanup_hws(dev_priv);
- return (ret);
-}
-
-int
-inteldrm_start_ring(struct inteldrm_softc *dev_priv)
-{
- struct drm_obj *obj = dev_priv->ring.ring_obj;
- struct inteldrm_obj *obj_priv = (struct inteldrm_obj *)obj;
- u_int32_t head;
-
- /* Stop the ring if it's running. */
- I915_WRITE(PRB0_CTL, 0);
- I915_WRITE(PRB0_TAIL, 0);
- I915_WRITE(PRB0_HEAD, 0);
-
- /* Initialize the ring. */
- I915_WRITE(PRB0_START, obj_priv->gtt_offset);
- head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-
- /* G45 ring initialisation fails to reset head to zero */
- if (head != 0) {
- I915_WRITE(PRB0_HEAD, 0);
- DRM_DEBUG("Forced ring head to zero ctl %08x head %08x"
- "tail %08x start %08x\n", I915_READ(PRB0_CTL),
- I915_READ(PRB0_HEAD), I915_READ(PRB0_TAIL),
- I915_READ(PRB0_START));
- }
-
- I915_WRITE(PRB0_CTL, ((obj->size - 4096) & RING_NR_PAGES) |
- RING_NO_REPORT | RING_VALID);
-
- head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- /* If ring head still != 0, the ring is dead */
- if (head != 0) {
- DRM_ERROR("Ring initialisation failed: ctl %08x head %08x"
- "tail %08x start %08x\n", I915_READ(PRB0_CTL),
- I915_READ(PRB0_HEAD), I915_READ(PRB0_TAIL),
- I915_READ(PRB0_START));
- return (EIO);
- }
-
- /* Update our cache of the ring state */
- inteldrm_update_ring(dev_priv);
-
- if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
- I915_WRITE(MI_MODE | MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE,
- (VS_TIMER_DISPATCH) << 15 | VS_TIMER_DISPATCH);
- else if (IS_I9XX(dev_priv) && !IS_GEN3(dev_priv))
- I915_WRITE(MI_MODE, (VS_TIMER_DISPATCH) << 15 |
- VS_TIMER_DISPATCH);
-
- return (0);
-}
-
-void
-i915_gem_cleanup_ringbuffer(struct inteldrm_softc *dev_priv)
-{
- if (dev_priv->ring.ring_obj == NULL)
- return;
- agp_unmap_subregion(dev_priv->agph, dev_priv->ring.bsh,
- dev_priv->ring.ring_obj->size);
- drm_hold_object(dev_priv->ring.ring_obj);
- i915_gem_object_unpin(dev_priv->ring.ring_obj);
- drm_unhold_and_unref(dev_priv->ring.ring_obj);
- dev_priv->ring.ring_obj = NULL;
- memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
-
- i915_gem_cleanup_hws(dev_priv);
-}
-
-int
-i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct inteldrm_softc *dev_priv = dev->dev_private;
- int ret;
-
- /* XXX until we have support for the rings on sandybridge */
- if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
- return (0);
-
- if (dev_priv->mm.wedged) {
- DRM_ERROR("Reenabling wedged hardware, good luck\n");
- dev_priv->mm.wedged = 0;
- }
-
-
- DRM_LOCK();
- dev_priv->mm.suspended = 0;
-
- ret = i915_gem_init_ringbuffer(dev_priv);
- if (ret != 0) {
- DRM_UNLOCK();
- return (ret);
- }
-
- /* gtt mapping means that the inactive list may not be empty */
- KASSERT(TAILQ_EMPTY(&dev_priv->mm.active_list));
- KASSERT(TAILQ_EMPTY(&dev_priv->mm.flushing_list));
- KASSERT(TAILQ_EMPTY(&dev_priv->mm.request_list));
- DRM_UNLOCK();
-
- drm_irq_install(dev);
-
- return (0);
-}
-
-int
-i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct inteldrm_softc *dev_priv = dev->dev_private;
- int ret;
-
- /* don't unistall if we fail, repeat calls on failure will screw us */
- if ((ret = i915_gem_idle(dev_priv)) == 0)
- drm_irq_uninstall(dev);
- return (ret);
-}
-
-void
-inteldrm_timeout(void *arg)
-{
- struct inteldrm_softc *dev_priv = arg;
-
- if (workq_add_task(dev_priv->workq, 0, i915_gem_retire_work_handler,
- dev_priv, NULL) == ENOMEM)
- DRM_ERROR("failed to run retire handler\n");
-}
-
-/*
- * handle hung hardware, or error interrupts. for now print debug info.
- */
-void
-inteldrm_error(struct inteldrm_softc *dev_priv)
-{
- u_int32_t eir, ipeir;
- u_int8_t reset = GRDOM_RENDER;
- char *errbitstr;
-
- eir = I915_READ(EIR);
- if (eir == 0)
- return;
-
- if (IS_IRONLAKE(dev_priv)) {
- errbitstr = "\20\x05PTEE\x04MPVE\x03CPVE";
- } else if (IS_G4X(dev_priv)) {
- errbitstr = "\20\x10 BCSINSTERR\x06PTEERR\x05MPVERR\x04CPVERR"
- "\x03 BCSPTEERR\x02REFRESHERR\x01INSTERR";
- } else {
- errbitstr = "\20\x5PTEERR\x2REFRESHERR\x1INSTERR";
- }
-
- printf("render error detected, EIR: %b\n", eir, errbitstr);
- if (IS_IRONLAKE(dev_priv) || IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
- if (eir & I915_ERROR_PAGE_TABLE) {
- dev_priv->mm.wedged = 1;
- reset = GRDOM_FULL;
- }
- } else {
- if (IS_G4X(dev_priv)) {
- if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
- printf(" IPEIR: 0x%08x\n",
- I915_READ(IPEIR_I965));
- printf(" IPEHR: 0x%08x\n",
- I915_READ(IPEHR_I965));
- printf(" INSTDONE: 0x%08x\n",
- I915_READ(INSTDONE_I965));
- printf(" INSTPS: 0x%08x\n",
- I915_READ(INSTPS));
- printf(" INSTDONE1: 0x%08x\n",
- I915_READ(INSTDONE1));
- printf(" ACTHD: 0x%08x\n",
- I915_READ(ACTHD_I965));
- }
- if (eir & GM45_ERROR_PAGE_TABLE) {
- printf(" PGTBL_ER: 0x%08x\n",
- I915_READ(PGTBL_ER));
- dev_priv->mm.wedged = 1;
- reset = GRDOM_FULL;
-
- }
- } else if (IS_I9XX(dev_priv) && eir & I915_ERROR_PAGE_TABLE) {
- printf(" PGTBL_ER: 0x%08x\n", I915_READ(PGTBL_ER));
- dev_priv->mm.wedged = 1;
- reset = GRDOM_FULL;
- }
- if (eir & I915_ERROR_MEMORY_REFRESH) {
- printf("PIPEASTAT: 0x%08x\n",
- I915_READ(_PIPEASTAT));
- printf("PIPEBSTAT: 0x%08x\n",
- I915_READ(_PIPEBSTAT));
- }
- if (eir & I915_ERROR_INSTRUCTION) {
- printf(" INSTPM: 0x%08x\n",
- I915_READ(INSTPM));
- if (!IS_I965G(dev_priv)) {
- ipeir = I915_READ(IPEIR);
-
- printf(" IPEIR: 0x%08x\n",
- I915_READ(IPEIR));
- printf(" IPEHR: 0x%08x\n",
- I915_READ(IPEHR));
- printf(" INSTDONE: 0x%08x\n",
- I915_READ(INSTDONE));
- printf(" ACTHD: 0x%08x\n",
- I915_READ(ACTHD));
- I915_WRITE(IPEIR, ipeir);
- (void)I915_READ(IPEIR);
- } else {
- ipeir = I915_READ(IPEIR_I965);
-
- printf(" IPEIR: 0x%08x\n",
- I915_READ(IPEIR_I965));
- printf(" IPEHR: 0x%08x\n",
- I915_READ(IPEHR_I965));
- printf(" INSTDONE: 0x%08x\n",
- I915_READ(INSTDONE_I965));
- printf(" INSTPS: 0x%08x\n",
- I915_READ(INSTPS));
- printf(" INSTDONE1: 0x%08x\n",
- I915_READ(INSTDONE1));
- printf(" ACTHD: 0x%08x\n",
- I915_READ(ACTHD_I965));
- I915_WRITE(IPEIR_I965, ipeir);
- (void)I915_READ(IPEIR_I965);
- }
- }
- }
-
- I915_WRITE(EIR, eir);
- eir = I915_READ(EIR);
- /*
- * nasty errors don't clear and need a reset, mask them until we reset
- * else we'll get infinite interrupt storms.
- */
- if (eir) {
- if (dev_priv->mm.wedged == 0)
- DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
- I915_WRITE(EMR, I915_READ(EMR) | eir);
- if (IS_IRONLAKE(dev_priv) || IS_GEN6(dev_priv) ||
- IS_GEN7(dev_priv)) {
- I915_WRITE(GTIIR, GT_MASTER_ERROR);
- } else {
- I915_WRITE(IIR,
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
- }
- }
- /*
- * if it was a pagetable error, or we were called from hangcheck, then
- * reset the gpu.
- */
- if (dev_priv->mm.wedged && workq_add_task(dev_priv->workq, 0,
- inteldrm_hung, dev_priv, (void *)(uintptr_t)reset) == ENOMEM)
- DRM_INFO("failed to schedule reset task\n");
-
-}
-
-void
-inteldrm_hung(void *arg, void *reset_type)
-{
- struct inteldrm_softc *dev_priv = arg;
- struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
- struct inteldrm_obj *obj_priv;
- u_int8_t reset = (u_int8_t)(uintptr_t)reset_type;
-
- DRM_LOCK();
- if (HAS_RESET(dev_priv)) {
- DRM_INFO("resetting gpu: ");
- inteldrm_965_reset(dev_priv, reset);
- printf("done!\n");
- } else
- printf("no reset function for chipset.\n");
-
- /*
- * Clear out all of the requests and make everything inactive.
- */
- i915_gem_retire_requests(dev_priv);
-
- /*
- * Clear the active and flushing lists to inactive. Since
- * we've reset the hardware then they're not going to get
- * flushed or completed otherwise. nuke the domains since
- * they're now irrelavent.
- */
- mtx_enter(&dev_priv->list_lock);
- while ((obj_priv = TAILQ_FIRST(&dev_priv->mm.flushing_list)) != NULL) {
- drm_lock_obj(&obj_priv->obj);
- if (obj_priv->obj.write_domain & I915_GEM_GPU_DOMAINS) {
- TAILQ_REMOVE(&dev_priv->mm.gpu_write_list,
- obj_priv, write_list);
- atomic_clearbits_int(&obj_priv->obj.do_flags,
- I915_GPU_WRITE);
- obj_priv->obj.write_domain &= ~I915_GEM_GPU_DOMAINS;
- }
- /* unlocks object and list */
- i915_gem_object_move_to_inactive_locked(&obj_priv->obj);
- mtx_enter(&dev_priv->list_lock);
- }
- mtx_leave(&dev_priv->list_lock);
-
- /* unbind everything */
- (void)i915_gem_evict_inactive(dev_priv, 0);
-
- if (HAS_RESET(dev_priv))
- dev_priv->mm.wedged = 0;
- DRM_UNLOCK();
-}
-
-void
-inteldrm_hangcheck(void *arg)
-{
- struct inteldrm_softc *dev_priv = arg;
- u_int32_t acthd, instdone, instdone1;
-
- /* are we idle? no requests, or ring is empty */
- if (TAILQ_EMPTY(&dev_priv->mm.request_list) ||
- (I915_READ(PRB0_HEAD) & HEAD_ADDR) ==
- (I915_READ(PRB0_TAIL) & TAIL_ADDR)) {
- dev_priv->mm.hang_cnt = 0;
- return;
- }
-
- if (IS_I965G(dev_priv)) {
- acthd = I915_READ(ACTHD_I965);
- instdone = I915_READ(INSTDONE_I965);
- instdone1 = I915_READ(INSTDONE1);
- } else {
- acthd = I915_READ(ACTHD);
- instdone = I915_READ(INSTDONE);
- instdone1 = 0;
- }
-
- /* if we've hit ourselves before and the hardware hasn't moved, hung. */
- if (dev_priv->mm.last_acthd == acthd &&
- dev_priv->mm.last_instdone == instdone &&
- dev_priv->mm.last_instdone1 == instdone1) {
- /* if that's twice we didn't hit it, then we're hung */
- if (++dev_priv->mm.hang_cnt >= 2) {
- if (!IS_GEN2(dev_priv)) {
- u_int32_t tmp = I915_READ(PRB0_CTL);
- if (tmp & RING_WAIT) {
- I915_WRITE(PRB0_CTL, tmp);
- (void)I915_READ(PRB0_CTL);
- goto out;
- }
- }
- dev_priv->mm.hang_cnt = 0;
- /* XXX atomic */
- dev_priv->mm.wedged = 1;
- DRM_INFO("gpu hung!\n");
- /* XXX locking */
- wakeup(dev_priv);
- inteldrm_error(dev_priv);
- return;
- }
- } else {
- dev_priv->mm.hang_cnt = 0;
-
- dev_priv->mm.last_acthd = acthd;
- dev_priv->mm.last_instdone = instdone;
- dev_priv->mm.last_instdone1 = instdone1;
- }
-out:
- /* Set ourselves up again, in case we haven't added another batch */
- timeout_add_msec(&dev_priv->mm.hang_timer, 750);
-}
-
-void
-i915_move_to_tail(struct inteldrm_obj *obj_priv, struct i915_gem_list *head)
-{
- i915_list_remove(obj_priv);
- TAILQ_INSERT_TAIL(head, obj_priv, list);
- obj_priv->current_list = head;
-}
-
-void
-i915_list_remove(struct inteldrm_obj *obj_priv)
-{
- if (obj_priv->current_list != NULL)
- TAILQ_REMOVE(obj_priv->current_list, obj_priv, list);
- obj_priv->current_list = NULL;
-}
-
-/*
- *
- * Support for managing tiling state of buffer objects.
- *
- * The idea behind tiling is to increase cache hit rates by rearranging
- * pixel data so that a group of pixel accesses are in the same cacheline.
- * Performance improvement from doing this on the back/depth buffer are on
- * the order of 30%.
- *
- * Intel architectures make this somewhat more complicated, though, by
- * adjustments made to addressing of data when the memory is in interleaved
- * mode (matched pairs of DIMMS) to improve memory bandwidth.
- * For interleaved memory, the CPU sends every sequential 64 bytes
- * to an alternate memory channel so it can get the bandwidth from both.
- *
- * The GPU also rearranges its accesses for increased bandwidth to interleaved
- * memory, and it matches what the CPU does for non-tiled. However, when tiled
- * it does it a little differently, since one walks addresses not just in the
- * X direction but also Y. So, along with alternating channels when bit
- * 6 of the address flips, it also alternates when other bits flip -- Bits 9
- * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
- * are common to both the 915 and 965-class hardware.
- *
- * The CPU also sometimes XORs in higher bits as well, to improve
- * bandwidth doing strided access like we do so frequently in graphics. This
- * is called "Channel XOR Randomization" in the MCH documentation. The result
- * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
- * decode.
- *
- * All of this bit 6 XORing has an effect on our memory management,
- * as we need to make sure that the 3d driver can correctly address object
- * contents.
- *
- * If we don't have interleaved memory, all tiling is safe and no swizzling is
- * required.
- *
- * When bit 17 is XORed in, we simply refuse to tile at all. Bit
- * 17 is not just a page offset, so as we page an object out and back in,
- * individual pages in it will have different bit 17 addresses, resulting in
- * each 64 bytes being swapped with its neighbor!
- *
- * Otherwise, if interleaved, we have to tell the 3d driver what the address
- * swizzling it needs to do is, since it's writing with the CPU to the pages
- * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
- * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
- * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
- * to match what the GPU expects.
- */
-
-#define MCHBAR_I915 0x44
-#define MCHBAR_I965 0x48
-#define MCHBAR_SIZE (4*4096)
-
-#define DEVEN_REG 0x54
-#define DEVEN_MCHBAR_EN (1 << 28)
-
-
-/*
- * Check the MCHBAR on the host bridge is enabled, and if not allocate it.
- * we do not need to actually map it because we access the bar through it's
- * mirror on the IGD, however, if it is disabled or not allocated then
- * the mirror does not work. *sigh*.
- *
- * we return a trinary state:
- * 0 = already enabled, or can not enable
- * 1 = enabled, needs disable
- * 2 = enabled, needs disable and free.
- */
-int
-inteldrm_setup_mchbar(struct inteldrm_softc *dev_priv,
- struct pci_attach_args *bpa)
-{
- u_int64_t mchbar_addr;
- pcireg_t tmp, low, high = 0;
- u_long addr;
- int reg = IS_I965G(dev_priv) ? MCHBAR_I965 : MCHBAR_I915;
- int ret = 1, enabled = 0;
-
- if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
- tmp = pci_conf_read(bpa->pa_pc, bpa->pa_tag, DEVEN_REG);
- enabled = !!(tmp & DEVEN_MCHBAR_EN);
- } else {
- tmp = pci_conf_read(bpa->pa_pc, bpa->pa_tag, reg);
- enabled = tmp & 1;
- }
-
- if (enabled) {
- return (0);
- }
-
- if (IS_I965G(dev_priv))
- high = pci_conf_read(bpa->pa_pc, bpa->pa_tag, reg + 4);
- low = pci_conf_read(bpa->pa_pc, bpa->pa_tag, reg);
- mchbar_addr = ((u_int64_t)high << 32) | low;
-
- /*
- * XXX need to check to see if it's allocated in the pci resources,
- * right now we just check to see if there's any address there
- *
- * if there's no address, then we allocate one.
- * note that we can't just use pci_mapreg_map here since some intel
- * BARs are special in that they set bit 0 to show they're enabled,
- * this is not handled by generic pci code.
- */
- if (mchbar_addr == 0) {
- addr = (u_long)mchbar_addr;
- if (bpa->pa_memex == NULL || extent_alloc(bpa->pa_memex,
- MCHBAR_SIZE, MCHBAR_SIZE, 0, 0, 0, &addr)) {
- return (0); /* just say we don't need to disable */
- } else {
- mchbar_addr = addr;
- ret = 2;
- /* We've allocated it, now fill in the BAR again */
- if (IS_I965G(dev_priv))
- pci_conf_write(bpa->pa_pc, bpa->pa_tag,
- reg + 4, upper_32_bits(mchbar_addr));
- pci_conf_write(bpa->pa_pc, bpa->pa_tag,
- reg, mchbar_addr & 0xffffffff);
- }
- }
- /* set the enable bit */
- if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
- pci_conf_write(bpa->pa_pc, bpa->pa_tag, DEVEN_REG,
- tmp | DEVEN_MCHBAR_EN);
- } else {
- tmp = pci_conf_read(bpa->pa_pc, bpa->pa_tag, reg);
- pci_conf_write(bpa->pa_pc, bpa->pa_tag, reg, tmp | 1);
- }
-
- return (ret);
-}
-
-/*
- * we take the trinary returned from inteldrm_setup_mchbar and clean up after
- * it.
- */
-void
-inteldrm_teardown_mchbar(struct inteldrm_softc *dev_priv,
- struct pci_attach_args *bpa, int disable)
-{
- u_int64_t mchbar_addr;
- pcireg_t tmp, low, high = 0;
- int reg = IS_I965G(dev_priv) ? MCHBAR_I965 : MCHBAR_I915;
-
- switch(disable) {
- case 2:
- if (IS_I965G(dev_priv))
- high = pci_conf_read(bpa->pa_pc, bpa->pa_tag, reg + 4);
- low = pci_conf_read(bpa->pa_pc, bpa->pa_tag, reg);
- mchbar_addr = ((u_int64_t)high << 32) | low;
- if (bpa->pa_memex)
- extent_free(bpa->pa_memex, mchbar_addr, MCHBAR_SIZE, 0);
- /* FALLTHROUGH */
- case 1:
- if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
- tmp = pci_conf_read(bpa->pa_pc, bpa->pa_tag, DEVEN_REG);
- tmp &= ~DEVEN_MCHBAR_EN;
- pci_conf_write(bpa->pa_pc, bpa->pa_tag, DEVEN_REG, tmp);
- } else {
- tmp = pci_conf_read(bpa->pa_pc, bpa->pa_tag, reg);
- tmp &= ~1;
- pci_conf_write(bpa->pa_pc, bpa->pa_tag, reg, tmp);
- }
- break;
- case 0:
- default:
- break;
- };
-}
-
-/**
- * Detects bit 6 swizzling of address lookup between IGD access and CPU
- * access through main memory.
- */
-void
-inteldrm_detect_bit_6_swizzle(struct inteldrm_softc *dev_priv,
- struct pci_attach_args *bpa)
-{
- uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
- uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
- int need_disable;
-
- if (!IS_I9XX(dev_priv)) {
- /* As far as we know, the 865 doesn't have these bit 6
- * swizzling issues.
- */
- swizzle_x = I915_BIT_6_SWIZZLE_NONE;
- swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else if (HAS_PCH_SPLIT(dev_priv)) {
- /*
- * On ironlake and sandybridge the swizzling is the same
- * no matter what the DRAM config
- */
- swizzle_x = I915_BIT_6_SWIZZLE_9_10;
- swizzle_y = I915_BIT_6_SWIZZLE_9;
- } else if (IS_MOBILE(dev_priv)) {
- uint32_t dcc;
-
- /* try to enable MCHBAR, a lot of biosen disable it */
- need_disable = inteldrm_setup_mchbar(dev_priv, bpa);
-
- /* On 915-945 and GM965, channel interleave by the CPU is
- * determined by DCC. The CPU will alternate based on bit 6
- * in interleaved mode, and the GPU will then also alternate
- * on bit 6, 9, and 10 for X, but the CPU may also optionally
- * alternate based on bit 17 (XOR not disabled and XOR
- * bit == 17).
- */
- dcc = I915_READ(DCC);
- switch (dcc & DCC_ADDRESSING_MODE_MASK) {
- case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
- case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
- swizzle_x = I915_BIT_6_SWIZZLE_NONE;
- swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- break;
- case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
- if (dcc & DCC_CHANNEL_XOR_DISABLE) {
- /* This is the base swizzling by the GPU for
- * tiled buffers.
- */
- swizzle_x = I915_BIT_6_SWIZZLE_9_10;
- swizzle_y = I915_BIT_6_SWIZZLE_9;
- } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
- /* Bit 11 swizzling by the CPU in addition. */
- swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
- swizzle_y = I915_BIT_6_SWIZZLE_9_11;
- } else {
- /* Bit 17 swizzling by the CPU in addition. */
- swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
- swizzle_y = I915_BIT_6_SWIZZLE_9_17;
- }
- break;
- }
- if (dcc == 0xffffffff) {
- DRM_ERROR("Couldn't read from MCHBAR. "
- "Disabling tiling.\n");
- swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
- swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
- }
-
- inteldrm_teardown_mchbar(dev_priv, bpa, need_disable);
- } else {
- /* The 965, G33, and newer, have a very flexible memory
- * configuration. It will enable dual-channel mode
- * (interleaving) on as much memory as it can, and the GPU
- * will additionally sometimes enable different bit 6
- * swizzling for tiled objects from the CPU.
- *
- * Here's what I found on G965:
- *
- * slot fill memory size swizzling
- * 0A 0B 1A 1B 1-ch 2-ch
- * 512 0 0 0 512 0 O
- * 512 0 512 0 16 1008 X
- * 512 0 0 512 16 1008 X
- * 0 512 0 512 16 1008 X
- * 1024 1024 1024 0 2048 1024 O
- *
- * We could probably detect this based on either the DRB
- * matching, which was the case for the swizzling required in
- * the table above, or from the 1-ch value being less than
- * the minimum size of a rank.
- */
- if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
- swizzle_x = I915_BIT_6_SWIZZLE_NONE;
- swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else {
- swizzle_x = I915_BIT_6_SWIZZLE_9_10;
- swizzle_y = I915_BIT_6_SWIZZLE_9;
- }
- }
-
- dev_priv->mm.bit_6_swizzle_x = swizzle_x;
- dev_priv->mm.bit_6_swizzle_y = swizzle_y;
-}
-
-int
-inteldrm_swizzle_page(struct vm_page *pg)
-{
- vaddr_t va;
- int i;
- u_int8_t temp[64], *vaddr;
-
-#if defined (__HAVE_PMAP_DIRECT)
- va = pmap_map_direct(pg);
-#else
- va = uvm_km_valloc(kernel_map, PAGE_SIZE);
- if (va == 0)
- return (ENOMEM);
- pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), UVM_PROT_RW);
- pmap_update(pmap_kernel());
-#endif
- vaddr = (u_int8_t *)va;
-
- for (i = 0; i < PAGE_SIZE; i += 128) {
- memcpy(temp, &vaddr[i], 64);
- memcpy(&vaddr[i], &vaddr[i + 64], 64);
- memcpy(&vaddr[i + 64], temp, 64);
- }
-
-#if defined (__HAVE_PMAP_DIRECT)
- pmap_unmap_direct(va);
-#else
- pmap_kremove(va, PAGE_SIZE);
- pmap_update(pmap_kernel());
- uvm_km_free(kernel_map, va, PAGE_SIZE);
-#endif
- return (0);
-}
-
-void
-i915_gem_bit_17_swizzle(struct drm_obj *obj)
-{
- struct drm_device *dev = obj->dev;
- struct inteldrm_softc *dev_priv = dev->dev_private;
- struct inteldrm_obj *obj_priv = (struct inteldrm_obj *)obj;
- struct vm_page *pg;
- bus_dma_segment_t *segp;
- int page_count = obj->size >> PAGE_SHIFT;
- int i, n, ret;
-
- if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17 ||
- obj_priv->bit_17 == NULL)
- return;
-
- segp = &obj_priv->dma_segs[0];
- n = 0;
- for (i = 0; i < page_count; i++) {
- /* compare bit 17 with previous one (in case we swapped).
- * if they don't match we'll have to swizzle the page
- */
- if ((((segp->ds_addr + n) >> 17) & 0x1) !=
- test_bit(i, obj_priv->bit_17)) {
- /* XXX move this to somewhere where we already have pg */
- pg = PHYS_TO_VM_PAGE(segp->ds_addr + n);
- KASSERT(pg != NULL);
- ret = inteldrm_swizzle_page(pg);
- if (ret)
- return;
- atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
- }
-
- n += PAGE_SIZE;
- if (n >= segp->ds_len) {
- n = 0;
- segp++;
- }
- }
-
-}
-
-void
-i915_gem_save_bit_17_swizzle(struct drm_obj *obj)
-{
- struct drm_device *dev = obj->dev;
- struct inteldrm_softc *dev_priv = dev->dev_private;
- struct inteldrm_obj *obj_priv = (struct inteldrm_obj *)obj;
- bus_dma_segment_t *segp;
- int page_count = obj->size >> PAGE_SHIFT, i, n;
-
- if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
- return;
-
- if (obj_priv->bit_17 == NULL) {
- /* round up number of pages to a multiple of 32 so we know what
- * size to make the bitmask. XXX this is wasteful with malloc
- * and a better way should be done
- */
- size_t nb17 = ((page_count + 31) & ~31)/32;
- obj_priv->bit_17 = drm_alloc(nb17 * sizeof(u_int32_t));
- if (obj_priv-> bit_17 == NULL) {
- return;
- }
-
- }
-
- segp = &obj_priv->dma_segs[0];
- n = 0;
- for (i = 0; i < page_count; i++) {
- if ((segp->ds_addr + n) & (1 << 17))
- set_bit(i, obj_priv->bit_17);
- else
- clear_bit(i, obj_priv->bit_17);
-
- n += PAGE_SIZE;
- if (n >= segp->ds_len) {
- n = 0;
- segp++;
- }
- }
-}
-
-bus_size_t
-i915_get_fence_size(struct inteldrm_softc *dev_priv, bus_size_t size)
-{
- bus_size_t i, start;
-
- if (IS_I965G(dev_priv)) {
- /* 965 can have fences anywhere, so align to gpu-page size */
- return ((size + (4096 - 1)) & ~(4096 - 1));
- } else {
- /*
- * Align the size to a power of two greater than the smallest
- * fence size.
- */
- if (IS_I9XX(dev_priv))
- start = 1024 * 1024;
- else
- start = 512 * 1024;
-
- for (i = start; i < size; i <<= 1)
- ;
-
- return (i);
- }
-}
-
-int
-i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
-{
- struct inteldrm_softc *dev_priv = dev->dev_private;
- int tile_width;
-
- /* Linear is always ok */
- if (tiling_mode == I915_TILING_NONE)
- return (1);
-
- if (!IS_I9XX(dev_priv) || (tiling_mode == I915_TILING_Y &&
- HAS_128_BYTE_Y_TILING(dev_priv)))
- tile_width = 128;
- else
- tile_width = 512;
-
- /* Check stride and size constraints */
- if (IS_I965G(dev_priv)) {
- /* fence reg has end address, so size is ok */
- if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
- return (0);
- } else if (IS_GEN3(dev_priv) || IS_GEN2(dev_priv)) {
- if (stride > 8192)
- return (0);
- if (IS_GEN3(dev_priv)) {
- if (size > I830_FENCE_MAX_SIZE_VAL << 20)
- return (0);
- } else if (size > I830_FENCE_MAX_SIZE_VAL << 19)
- return (0);
- }
-
- /* 965+ just needs multiples of the tile width */
- if (IS_I965G(dev_priv))
- return ((stride & (tile_width - 1)) == 0);
-
- /* Pre-965 needs power-of-two */
- if (stride < tile_width || stride & (stride - 1) ||
- i915_get_fence_size(dev_priv, size) != size)
- return (0);
- return (1);
-}
-
-int
-i915_gem_object_fence_offset_ok(struct drm_obj *obj, int tiling_mode)
-{
- struct drm_device *dev = obj->dev;
- struct inteldrm_softc *dev_priv = dev->dev_private;
- struct inteldrm_obj *obj_priv = (struct inteldrm_obj *)obj;
-
- if (obj_priv->dmamap == NULL || tiling_mode == I915_TILING_NONE)
- return (1);
-
- if (!IS_I965G(dev_priv)) {
- if (obj_priv->gtt_offset & (obj->size -1))
- return (0);
- if (IS_I9XX(dev_priv)) {
- if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
- return (0);
- } else {
- if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
- return (0);
- }
- }
- return (1);
-}
-/**
- * Sets the tiling mode of an object, returning the required swizzling of
- * bit 6 of addresses in the object.
- */
-int
-i915_gem_set_tiling(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_gem_set_tiling *args = data;
- struct inteldrm_softc *dev_priv = dev->dev_private;
- struct drm_obj *obj;
- struct inteldrm_obj *obj_priv;
- int ret = 0;
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return (EBADF);
- obj_priv = (struct inteldrm_obj *)obj;
- drm_hold_object(obj);
-
- if (obj_priv->pin_count != 0) {
- ret = EBUSY;
- goto out;
- }
- if (i915_tiling_ok(dev, args->stride, obj->size,
- args->tiling_mode) == 0) {
- ret = EINVAL;
- goto out;
- }
-
- if (args->tiling_mode == I915_TILING_NONE) {
- args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
- args->stride = 0;
- } else {
- if (args->tiling_mode == I915_TILING_X)
- args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
- else
- args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
- /* If we can't handle the swizzling, make it untiled. */
- if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
- args->tiling_mode = I915_TILING_NONE;
- args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
- args->stride = 0;
- }
- }
-
- if (args->tiling_mode != obj_priv->tiling_mode ||
- args->stride != obj_priv->stride) {
- /*
- * We need to rebind the object if its current allocation no
- * longer meets the alignment restrictions for its new tiling
- * mode. Otherwise we can leave it alone, but must clear any
- * fence register.
- */
- /* fence may no longer be correct, wipe it */
- inteldrm_wipe_mappings(obj);
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
- atomic_setbits_int(&obj->do_flags,
- I915_FENCE_INVALID);
- obj_priv->tiling_mode = args->tiling_mode;
- obj_priv->stride = args->stride;
- }
-
-out:
- drm_unhold_and_unref(obj);
-
- return (ret);
-}
-
-/**
- * Returns the current tiling mode and required bit 6 swizzling for the object.
- */
-int
-i915_gem_get_tiling(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_gem_get_tiling *args = data;
- struct inteldrm_softc *dev_priv = dev->dev_private;
- struct drm_obj *obj;
- struct inteldrm_obj *obj_priv;
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return (EBADF);
- drm_hold_object(obj);
- obj_priv = (struct inteldrm_obj *)obj;
-
- args->tiling_mode = obj_priv->tiling_mode;
- switch (obj_priv->tiling_mode) {
- case I915_TILING_X:
- args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
- break;
- case I915_TILING_Y:
- args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
- break;
- case I915_TILING_NONE:
- args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
- break;
- default:
- DRM_ERROR("unknown tiling mode\n");
- }
-
- drm_unhold_and_unref(obj);
-
- return 0;
-}
-
-
-/*
- * Reset the chip after a hang (965 only)
- *
- * The procedure that should be followed is relatively simple:
- * - reset the chip using the reset reg
- * - re-init context state
- * - re-init Hardware status page
- * - re-init ringbuffer
- * - re-init interrupt state
- * - re-init display
- */
-void
-inteldrm_965_reset(struct inteldrm_softc *dev_priv, u_int8_t flags)
-{
- pcireg_t reg;
- int i = 0;
-
- /*
- * There seems to be soemthing wrong with !full reset modes, so force
- * the whole shebang for now.
- */
- flags = GRDOM_FULL;
-
- if (flags == GRDOM_FULL)
- i915_save_display(dev_priv);
-
- reg = pci_conf_read(dev_priv->pc, dev_priv->tag, I965_GDRST);
- /*
- * Set the domains we want to reset, then bit 0 (reset itself).
- * then we wait for the hardware to clear it.
- */
- pci_conf_write(dev_priv->pc, dev_priv->tag, I965_GDRST,
- reg | (u_int32_t)flags | ((flags == GRDOM_FULL) ? 0x1 : 0x0));
- delay(50);
- /* don't clobber the rest of the register */
- pci_conf_write(dev_priv->pc, dev_priv->tag, I965_GDRST, reg & 0xfe);
-
- /* if this fails we're pretty much fucked, but don't loop forever */
- do {
- delay(100);
- reg = pci_conf_read(dev_priv->pc, dev_priv->tag, I965_GDRST);
- } while ((reg & 0x1) && ++i < 10);
-
- if (reg & 0x1)
- printf("bit 0 not cleared .. ");
-
- /* put everything back together again */
-
- /*
- * GTT is already up (we didn't do a pci-level reset, thank god.
- *
- * We don't have to restore the contexts (we don't use them yet).
- * So, if X is running we need to put the ringbuffer back first.
- */
- if (dev_priv->mm.suspended == 0) {
- struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
- if (inteldrm_start_ring(dev_priv) != 0)
- panic("can't restart ring, we're fucked");
-
- /* put the hardware status page back */
- if (I915_NEED_GFX_HWS(dev_priv)) {
- I915_WRITE(HWS_PGA, ((struct inteldrm_obj *)
- dev_priv->hws_obj)->gtt_offset);
- } else {
- I915_WRITE(HWS_PGA,
- dev_priv->hws_dmamem->map->dm_segs[0].ds_addr);
- }
- I915_READ(HWS_PGA); /* posting read */
-
- /* so we remove the handler and can put it back in */
- DRM_UNLOCK();
- drm_irq_uninstall(dev);
- drm_irq_install(dev);
- DRM_LOCK();
- } else
- printf("not restarting ring...\n");
-
-
- if (flags == GRDOM_FULL)
- i915_restore_display(dev_priv);
-}
-
-/*
- * Debug code from here.
- */
-#ifdef WATCH_INACTIVE
-void
-inteldrm_verify_inactive(struct inteldrm_softc *dev_priv, char *file,
- int line)
-{
- struct drm_obj *obj;
- struct inteldrm_obj *obj_priv;
-
- TAILQ_FOREACH(obj_priv, &dev_priv->mm.inactive_list, list) {
- obj = (struct drm_obj *)obj_priv;
- if (obj_priv->pin_count || inteldrm_is_active(obj_priv) ||
- obj->write_domain & I915_GEM_GPU_DOMAINS)
- DRM_ERROR("inactive %p (p $d a $d w $x) %s:%d\n",
- obj, obj_priv->pin_count,
- inteldrm_is_active(obj_priv),
- obj->write_domain, file, line);
- }
-}
-#endif /* WATCH_INACTIVE */
-
-#if (INTELDRM_DEBUG > 1)
-
-static const char *get_pin_flag(struct inteldrm_obj *obj_priv)
-{
- if (obj_priv->pin_count > 0)
- return "p";
- else
- return " ";
-}
-
-static const char *get_tiling_flag(struct inteldrm_obj *obj_priv)
-{
- switch (obj_priv->tiling_mode) {
- default:
- case I915_TILING_NONE: return " ";
- case I915_TILING_X: return "X";
- case I915_TILING_Y: return "Y";
- }
-}
-
-void
-i915_gem_seqno_info(int kdev)
-{
- struct drm_device *dev = drm_get_device_from_kdev(kdev);
- struct inteldrm_softc *dev_priv = dev->dev_private;
-
- if (dev_priv->hw_status_page != NULL) {
- printf("Current sequence: %d\n", i915_get_gem_seqno(dev_priv));
- } else {
- printf("Current sequence: hws uninitialized\n");
- }
-}
-
-void
-i915_interrupt_info(int kdev)
-{
- struct drm_device *dev = drm_get_device_from_kdev(kdev);
- struct inteldrm_softc *dev_priv = dev->dev_private;
-
- printf("Interrupt enable: %08x\n",
- I915_READ(IER));
- printf("Interrupt identity: %08x\n",
- I915_READ(IIR));
- printf("Interrupt mask: %08x\n",
- I915_READ(IMR));
- printf("Pipe A stat: %08x\n",
- I915_READ(_PIPEASTAT));
- printf("Pipe B stat: %08x\n",
- I915_READ(_PIPEBSTAT));
- printf("Interrupts received: 0\n");
- if (dev_priv->hw_status_page != NULL) {
- printf("Current sequence: %d\n",
- i915_get_gem_seqno(dev_priv));
- } else {
- printf("Current sequence: hws uninitialized\n");
- }
-}
-
-void
-i915_gem_fence_regs_info(int kdev)
-{
- struct drm_device *dev = drm_get_device_from_kdev(kdev);
- struct inteldrm_softc *dev_priv = dev->dev_private;
- int i;
-
- printf("Reserved fences = %d\n", dev_priv->fence_reg_start);
- printf("Total fences = %d\n", dev_priv->num_fence_regs);
- for (i = 0; i < dev_priv->num_fence_regs; i++) {
- struct drm_obj *obj = dev_priv->fence_regs[i].obj;
-
- if (obj == NULL) {
- printf("Fenced object[%2d] = unused\n", i);
- } else {
- struct inteldrm_obj *obj_priv;
-
- obj_priv = (struct inteldrm_obj *)obj;
- printf("Fenced object[%2d] = %p: %s "
- "%08x %08zx %08x %s %08x %08x %d",
- i, obj, get_pin_flag(obj_priv),
- obj_priv->gtt_offset,
- obj->size, obj_priv->stride,
- get_tiling_flag(obj_priv),
- obj->read_domains, obj->write_domain,
- obj_priv->last_rendering_seqno);
- if (obj->name)
- printf(" (name: %d)", obj->name);
- printf("\n");
- }
- }
-}
-
-void
-i915_hws_info(int kdev)
-{
- struct drm_device *dev = drm_get_device_from_kdev(kdev);
- struct inteldrm_softc *dev_priv = dev->dev_private;
- int i;
- volatile u32 *hws;
-
- hws = (volatile u32 *)dev_priv->hw_status_page;
- if (hws == NULL)
- return;
-
- for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
- printf("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
- i * 4,
- hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
- }
-}
-
-static void
-i915_dump_pages(bus_space_tag_t bst, bus_space_handle_t bsh,
- bus_size_t size)
-{
- bus_addr_t offset = 0;
- int i = 0;
-
- /*
- * this is a bit odd so i don't have to play with the intel
- * tools too much.
- */
- for (offset = 0; offset < size; offset += 4, i += 4) {
- if (i == PAGE_SIZE)
- i = 0;
- printf("%08x : %08x\n", i, bus_space_read_4(bst, bsh,
- offset));
- }
-}
-
-void
-i915_batchbuffer_info(int kdev)
-{
- struct drm_device *dev = drm_get_device_from_kdev(kdev);
- struct inteldrm_softc *dev_priv = dev->dev_private;
- struct drm_obj *obj;
- struct inteldrm_obj *obj_priv;
- bus_space_handle_t bsh;
- int ret;
-
- TAILQ_FOREACH(obj_priv, &dev_priv->mm.active_list, list) {
- obj = &obj_priv->obj;
- if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
- if ((ret = agp_map_subregion(dev_priv->agph,
- obj_priv->gtt_offset, obj->size, &bsh)) != 0) {
- DRM_ERROR("Failed to map pages: %d\n", ret);
- return;
- }
- printf("--- gtt_offset = 0x%08x\n",
- obj_priv->gtt_offset);
- i915_dump_pages(dev_priv->bst, bsh, obj->size);
- agp_unmap_subregion(dev_priv->agph, dev_priv->ring.bsh,
- obj->size);
- }
- }
-}
-
-void
-i915_ringbuffer_data(int kdev)
-{
- struct drm_device *dev = drm_get_device_from_kdev(kdev);
- struct inteldrm_softc *dev_priv = dev->dev_private;
- bus_size_t off;
-
- if (!dev_priv->ring.ring_obj) {
- printf("No ringbuffer setup\n");
- return;
- }
-
- for (off = 0; off < dev_priv->ring.size; off += 4)
- printf("%08x : %08x\n", off, bus_space_read_4(dev_priv->bst,
- dev_priv->ring.bsh, off));
-}
-
-void
-i915_ringbuffer_info(int kdev)
-{
- struct drm_device *dev = drm_get_device_from_kdev(kdev);
- struct inteldrm_softc *dev_priv = dev->dev_private;
- u_int32_t head, tail;
-
- head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
-
- printf("RingHead : %08x\n", head);
- printf("RingTail : %08x\n", tail);
- printf("RingMask : %08x\n", dev_priv->ring.size - 1);
- printf("RingSize : %08lx\n", dev_priv->ring.size);
- printf("Acthd : %08x\n", I915_READ(IS_I965G(dev_priv) ?
- ACTHD_I965 : ACTHD));
-}
-
-#endif
diff --git a/sys/dev/pci/drm/i915_drv.h b/sys/dev/pci/drm/i915_drv.h
deleted file mode 100644
index 8c17a67a048..00000000000
--- a/sys/dev/pci/drm/i915_drv.h
+++ /dev/null
@@ -1,765 +0,0 @@
-/* $OpenBSD: i915_drv.h,v 1.73 2012/09/25 10:19:46 jsg Exp $ */
-/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
- */
-/*
- *
- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _I915_DRV_H_
-#define _I915_DRV_H_
-
-#include "i915_reg.h"
-
-/* General customization:
- */
-
-#define DRIVER_AUTHOR "Tungsten Graphics, Inc."
-
-#define DRIVER_NAME "i915"
-#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20080730"
-
-enum pipe {
- PIPE_A = 0,
- PIPE_B,
-};
-
-/* Interface history:
- *
- * 1.1: Original.
- * 1.2: Add Power Management
- * 1.3: Add vblank support
- * 1.4: Fix cmdbuffer path, add heap destroy
- * 1.5: Add vblank pipe configuration
- * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
- * - Support vertical blank on secondary display pipe
- */
-#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 6
-#define DRIVER_PATCHLEVEL 0
-
-struct inteldrm_ring {
- struct drm_obj *ring_obj;
- bus_space_handle_t bsh;
- bus_size_t size;
- u_int32_t head;
- u_int32_t space;
- u_int32_t tail;
- u_int32_t woffset;
-};
-
-#define I915_FENCE_REG_NONE -1
-
-struct inteldrm_fence {
- TAILQ_ENTRY(inteldrm_fence) list;
- struct drm_obj *obj;
- u_int32_t last_rendering_seqno;
-};
-
-/*
- * lock ordering:
- * exec lock,
- * request lock
- * list lock.
- *
- * XXX fence lock ,object lock
- */
-struct inteldrm_softc {
- struct device dev;
- struct device *drmdev;
- bus_dma_tag_t agpdmat; /* tag from intagp for GEM */
- bus_dma_tag_t dmat;
- bus_space_tag_t bst;
- struct agp_map *agph;
-
- u_long flags;
- u_int16_t pci_device;
- int gen;
-
- pci_chipset_tag_t pc;
- pcitag_t tag;
- pci_intr_handle_t ih;
- void *irqh;
-
- struct vga_pci_bar *regs;
-
- union flush {
- struct {
- bus_space_tag_t bst;
- bus_space_handle_t bsh;
- } i9xx;
- struct {
- bus_dma_segment_t seg;
- caddr_t kva;
- } i8xx;
- } ifp;
- struct inteldrm_ring ring;
- struct workq *workq;
- struct vm_page *pgs;
- union hws {
- struct drm_obj *obj;
- struct drm_dmamem *dmamem;
- } hws;
-#define hws_obj hws.obj
-#define hws_dmamem hws.dmamem
- void *hw_status_page;
- size_t max_gem_obj_size; /* XXX */
-
- /* Protects user_irq_refcount and irq_mask reg */
- struct mutex user_irq_lock;
- /* Refcount for user irq, only enabled when needed */
- int user_irq_refcount;
- /* Cached value of IMR to avoid reads in updating the bitfield */
- u_int32_t irq_mask_reg;
- u_int32_t pipestat[2];
- /* these two ironlake only, we should union this with pipestat XXX */
- u_int32_t gt_irq_mask_reg;
- u_int32_t pch_irq_mask_reg;
-
- struct mutex fence_lock;
- struct inteldrm_fence fence_regs[16]; /* 965 */
- int fence_reg_start; /* 4 by default */
- int num_fence_regs; /* 8 pre-965, 16 post */
-
-#define INTELDRM_QUIET 0x01 /* suspend close, get off the hardware */
-#define INTELDRM_WEDGED 0x02 /* chipset hung pending reset */
-#define INTELDRM_SUSPENDED 0x04 /* in vt switch, no commands */
- int sc_flags; /* quiet, suspended, hung */
- /* number of ioctls + faults in flight */
- int entries;
-
- /* protects inactive, flushing, active and exec locks */
- struct mutex list_lock;
-
- /* protects access to request_list */
- struct mutex request_lock;
-
- /* Register state */
- u8 saveLBB;
- u32 saveDSPACNTR;
- u32 saveDSPBCNTR;
- u32 saveDSPARB;
- u32 saveHWS;
- u32 savePIPEACONF;
- u32 savePIPEBCONF;
- u32 savePIPEASRC;
- u32 savePIPEBSRC;
- u32 saveFPA0;
- u32 saveFPA1;
- u32 saveDPLL_A;
- u32 saveDPLL_A_MD;
- u32 saveHTOTAL_A;
- u32 saveHBLANK_A;
- u32 saveHSYNC_A;
- u32 saveVTOTAL_A;
- u32 saveVBLANK_A;
- u32 saveVSYNC_A;
- u32 saveBCLRPAT_A;
- u32 saveTRANSACONF;
- u32 saveTRANS_HTOTAL_A;
- u32 saveTRANS_HBLANK_A;
- u32 saveTRANS_HSYNC_A;
- u32 saveTRANS_VTOTAL_A;
- u32 saveTRANS_VBLANK_A;
- u32 saveTRANS_VSYNC_A;
- u32 savePIPEASTAT;
- u32 saveDSPASTRIDE;
- u32 saveDSPASIZE;
- u32 saveDSPAPOS;
- u32 saveDSPAADDR;
- u32 saveDSPASURF;
- u32 saveDSPATILEOFF;
- u32 savePFIT_PGM_RATIOS;
- u32 saveBLC_HIST_CTL;
- u32 saveBLC_PWM_CTL;
- u32 saveBLC_PWM_CTL2;
- u32 saveBLC_CPU_PWM_CTL;
- u32 saveBLC_CPU_PWM_CTL2;
- u32 saveFPB0;
- u32 saveFPB1;
- u32 saveDPLL_B;
- u32 saveDPLL_B_MD;
- u32 saveHTOTAL_B;
- u32 saveHBLANK_B;
- u32 saveHSYNC_B;
- u32 saveVTOTAL_B;
- u32 saveVBLANK_B;
- u32 saveVSYNC_B;
- u32 saveBCLRPAT_B;
- u32 saveTRANSBCONF;
- u32 saveTRANS_HTOTAL_B;
- u32 saveTRANS_HBLANK_B;
- u32 saveTRANS_HSYNC_B;
- u32 saveTRANS_VTOTAL_B;
- u32 saveTRANS_VBLANK_B;
- u32 saveTRANS_VSYNC_B;
- u32 savePIPEBSTAT;
- u32 saveDSPBSTRIDE;
- u32 saveDSPBSIZE;
- u32 saveDSPBPOS;
- u32 saveDSPBADDR;
- u32 saveDSPBSURF;
- u32 saveDSPBTILEOFF;
- u32 saveVGA0;
- u32 saveVGA1;
- u32 saveVGA_PD;
- u32 saveVGACNTRL;
- u32 saveADPA;
- u32 saveLVDS;
- u32 savePP_ON_DELAYS;
- u32 savePP_OFF_DELAYS;
- u32 saveDVOA;
- u32 saveDVOB;
- u32 saveDVOC;
- u32 savePP_ON;
- u32 savePP_OFF;
- u32 savePP_CONTROL;
- u32 savePP_DIVISOR;
- u32 savePFIT_CONTROL;
- u32 save_palette_a[256];
- u32 save_palette_b[256];
- u32 saveDPFC_CB_BASE;
- u32 saveFBC_CFB_BASE;
- u32 saveFBC_LL_BASE;
- u32 saveFBC_CONTROL;
- u32 saveFBC_CONTROL2;
- u32 saveIER;
- u32 saveIIR;
- u32 saveIMR;
- u32 saveDEIER;
- u32 saveDEIMR;
- u32 saveGTIER;
- u32 saveGTIMR;
- u32 saveFDI_RXA_IMR;
- u32 saveFDI_RXB_IMR;
- u32 saveCACHE_MODE_0;
- u32 saveD_STATE;
- u32 saveDSPCLK_GATE_D;
- u32 saveDSPCLK_GATE;
- u32 saveRENCLK_GATE_D1;
- u32 saveRENCLK_GATE_D2;
- u32 saveRAMCLK_GATE_D;
- u32 saveDEUC;
- u32 saveMI_ARB_STATE;
- u32 saveSWF0[16];
- u32 saveSWF1[16];
- u32 saveSWF2[3];
- u8 saveMSR;
- u8 saveSR[8];
- u8 saveGR[25];
- u8 saveAR_INDEX;
- u8 saveAR[21];
- u8 saveDACMASK;
- u8 saveCR[37];
- uint64_t saveFENCE[16];
- u32 saveCURACNTR;
- u32 saveCURAPOS;
- u32 saveCURABASE;
- u32 saveCURBCNTR;
- u32 saveCURBPOS;
- u32 saveCURBBASE;
- u32 saveCURSIZE;
- u32 saveDP_B;
- u32 saveDP_C;
- u32 saveDP_D;
- u32 savePIPEA_GMCH_DATA_M;
- u32 savePIPEB_GMCH_DATA_M;
- u32 savePIPEA_GMCH_DATA_N;
- u32 savePIPEB_GMCH_DATA_N;
- u32 savePIPEA_DP_LINK_M;
- u32 savePIPEB_DP_LINK_M;
- u32 savePIPEA_DP_LINK_N;
- u32 savePIPEB_DP_LINK_N;
- u32 saveFDI_RXA_CTL;
- u32 saveFDI_TXA_CTL;
- u32 saveFDI_RXB_CTL;
- u32 saveFDI_TXB_CTL;
- u32 savePFA_CTL_1;
- u32 savePFB_CTL_1;
- u32 savePFA_WIN_SZ;
- u32 savePFB_WIN_SZ;
- u32 savePFA_WIN_POS;
- u32 savePFB_WIN_POS;
- u32 savePCH_DREF_CONTROL;
- u32 saveDISP_ARB_CTL;
- u32 savePIPEA_DATA_M1;
- u32 savePIPEA_DATA_N1;
- u32 savePIPEA_LINK_M1;
- u32 savePIPEA_LINK_N1;
- u32 savePIPEB_DATA_M1;
- u32 savePIPEB_DATA_N1;
- u32 savePIPEB_LINK_M1;
- u32 savePIPEB_LINK_N1;
- u32 saveMCHBAR_RENDER_STANDBY;
- u32 savePCH_PORT_HOTPLUG;
-
- struct {
- /**
- * List of objects currently involved in rendering from the
- * ringbuffer.
- *
- * Includes buffers having the contents of their GPU caches
- * flushed, not necessarily primitives. last_rendering_seqno
- * represents when the rendering involved will be completed.
- *
- * A reference is held on the buffer while on this list.
- */
- TAILQ_HEAD(i915_gem_list, inteldrm_obj) active_list;
-
- /**
- * List of objects which are not in the ringbuffer but which
- * still have a write_domain which needs to be flushed before
- * unbinding.
- *
- * last_rendering_seqno is 0 while an object is in this list
- *
- * A reference is held on the buffer while on this list.
- */
- struct i915_gem_list flushing_list;
-
- /*
- * list of objects currently pending a GPU write flush.
- *
- * All elements on this list will either be on the active
- * or flushing list, last rendiering_seqno differentiates the
- * two.
- */
- struct i915_gem_list gpu_write_list;
- /**
- * LRU list of objects which are not in the ringbuffer and
- * are ready to unbind, but are still in the GTT.
- *
- * last_rendering_seqno is 0 while an object is in this list
- *
- * A reference is not held on the buffer while on this list,
- * as merely being GTT-bound shouldn't prevent its being
- * freed, and we'll pull it off the list in the free path.
- */
- struct i915_gem_list inactive_list;
-
- /* Fence LRU */
- TAILQ_HEAD(i915_fence, inteldrm_fence) fence_list;
-
- /**
- * List of breadcrumbs associated with GPU requests currently
- * outstanding.
- */
- TAILQ_HEAD(i915_request , inteldrm_request) request_list;
-
- /**
- * We leave the user IRQ off as much as possible,
- * but this means that requests will finish and never
- * be retired once the system goes idle. Set a timer to
- * fire periodically while the ring is running. When it
- * fires, go retire requests in a workq.
- */
- struct timeout retire_timer;
- struct timeout hang_timer;
- /* for hangcheck */
- int hang_cnt;
- u_int32_t last_acthd;
- u_int32_t last_instdone;
- u_int32_t last_instdone1;
-
- uint32_t next_gem_seqno;
-
- /**
- * Flag if the X Server, and thus DRM, is not currently in
- * control of the device.
- *
- * This is set between LeaveVT and EnterVT. It needs to be
- * replaced with a semaphore. It also needs to be
- * transitioned away from for kernel modesetting.
- */
- int suspended;
-
- /**
- * Flag if the hardware appears to be wedged.
- *
- * This is set when attempts to idle the device timeout.
- * It prevents command submission from occuring and makes
- * every pending request fail
- */
- int wedged;
-
- /** Bit 6 swizzling required for X tiling */
- uint32_t bit_6_swizzle_x;
- /** Bit 6 swizzling required for Y tiling */
- uint32_t bit_6_swizzle_y;
- } mm;
-};
-
-struct inteldrm_file {
- struct drm_file file_priv;
- struct {
- } mm;
-};
-
-/* chip type flags */
-#define CHIP_I830 0x00001
-#define CHIP_I845G 0x00002
-#define CHIP_I85X 0x00004
-#define CHIP_I865G 0x00008
-#define CHIP_I9XX 0x00010
-#define CHIP_I915G 0x00020
-#define CHIP_I915GM 0x00040
-#define CHIP_I945G 0x00080
-#define CHIP_I945GM 0x00100
-#define CHIP_I965 0x00200
-#define CHIP_I965GM 0x00400
-#define CHIP_G33 0x00800
-#define CHIP_GM45 0x01000
-#define CHIP_G4X 0x02000
-#define CHIP_M 0x04000
-#define CHIP_HWS 0x08000
-#define CHIP_GEN2 0x10000
-#define CHIP_GEN3 0x20000
-#define CHIP_GEN4 0x40000
-#define CHIP_GEN6 0x80000
-#define CHIP_PINEVIEW 0x100000
-#define CHIP_IRONLAKE 0x200000
-#define CHIP_IRONLAKE_D 0x400000
-#define CHIP_IRONLAKE_M 0x800000
-#define CHIP_SANDYBRIDGE 0x1000000
-#define CHIP_IVYBRIDGE 0x2000000
-#define CHIP_GEN7 0x4000000
-
-/* flags we use in drm_obj's do_flags */
-#define I915_ACTIVE 0x0010 /* being used by the gpu. */
-#define I915_IN_EXEC 0x0020 /* being processed in execbuffer */
-#define I915_USER_PINNED 0x0040 /* BO has been pinned from userland */
-#define I915_GPU_WRITE 0x0080 /* BO has been not flushed */
-#define I915_DONTNEED 0x0100 /* BO backing pages purgable */
-#define I915_PURGED 0x0200 /* BO backing pages purged */
-#define I915_DIRTY 0x0400 /* BO written to since last bound */
-#define I915_EXEC_NEEDS_FENCE 0x0800 /* being processed but will need fence*/
-#define I915_FENCED_EXEC 0x1000 /* Most recent exec needs fence */
-#define I915_FENCE_INVALID 0x2000 /* fence has been lazily invalidated */
-
-/** driver private structure attached to each drm_gem_object */
-struct inteldrm_obj {
- struct drm_obj obj;
-
- /** This object's place on the active/flushing/inactive lists */
- TAILQ_ENTRY(inteldrm_obj) list;
- TAILQ_ENTRY(inteldrm_obj) write_list;
- struct i915_gem_list *current_list;
- /* GTT binding. */
- bus_dmamap_t dmamap;
- bus_dma_segment_t *dma_segs;
- /* Current offset of the object in GTT space. */
- bus_addr_t gtt_offset;
- u_int32_t *bit_17;
- /* extra flags to bus_dma */
- int dma_flags;
- /* Fence register for this object. needed for tiling. */
- int fence_reg;
- /** refcount for times pinned this object in GTT space */
- int pin_count;
- /* number of times pinned by pin ioctl. */
- u_int user_pin_count;
-
- /** Breadcrumb of last rendering to the buffer. */
- u_int32_t last_rendering_seqno;
- u_int32_t last_write_seqno;
- /** Current tiling mode for the object. */
- u_int32_t tiling_mode;
- u_int32_t stride;
-};
-
-/**
- * Request queue structure.
- *
- * The request queue allows us to note sequence numbers that have been emitted
- * and may be associated with active buffers to be retired.
- *
- * By keeping this list, we can avoid having to do questionable
- * sequence-number comparisons on buffer last_rendering_seqnos, and associate
- * an emission time with seqnos for tracking how far ahead of the GPU we are.
- */
-struct inteldrm_request {
- TAILQ_ENTRY(inteldrm_request) list;
- /** GEM sequence number associated with this request. */
- uint32_t seqno;
-};
-
-u_int32_t inteldrm_read_hws(struct inteldrm_softc *, int);
-int inteldrm_wait_ring(struct inteldrm_softc *dev, int n);
-void inteldrm_begin_ring(struct inteldrm_softc *, int);
-void inteldrm_out_ring(struct inteldrm_softc *, u_int32_t);
-void inteldrm_advance_ring(struct inteldrm_softc *);
-void inteldrm_update_ring(struct inteldrm_softc *);
-int inteldrm_pipe_enabled(struct inteldrm_softc *, int);
-int i915_init_phys_hws(struct inteldrm_softc *, bus_dma_tag_t);
-
-/* i915_irq.c */
-
-extern int i915_driver_irq_install(struct drm_device * dev);
-extern void i915_driver_irq_uninstall(struct drm_device * dev);
-extern int i915_enable_vblank(struct drm_device *dev, int crtc);
-extern void i915_disable_vblank(struct drm_device *dev, int crtc);
-extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
-extern void i915_user_irq_get(struct inteldrm_softc *);
-extern void i915_user_irq_put(struct inteldrm_softc *);
-
-/* i915_suspend.c */
-
-extern void i915_save_display(struct inteldrm_softc *);
-extern void i915_restore_display(struct inteldrm_softc *);
-extern int i915_save_state(struct inteldrm_softc *);
-extern int i915_restore_state(struct inteldrm_softc *);
-
-/* XXX need bus_space_write_8, this evaluated arguments twice */
-static __inline void
-write64(struct inteldrm_softc *dev_priv, bus_size_t off, u_int64_t reg)
-{
- bus_space_write_4(dev_priv->regs->bst, dev_priv->regs->bsh,
- off, (u_int32_t)reg);
- bus_space_write_4(dev_priv->regs->bst, dev_priv->regs->bsh,
- off + 4, upper_32_bits(reg));
-}
-
-static __inline u_int64_t
-read64(struct inteldrm_softc *dev_priv, bus_size_t off)
-{
- u_int32_t low, high;
-
- low = bus_space_read_4(dev_priv->regs->bst,
- dev_priv->regs->bsh, off);
- high = bus_space_read_4(dev_priv->regs->bst,
- dev_priv->regs->bsh, off + 4);
-
- return ((u_int64_t)low | ((u_int64_t)high << 32));
-}
-
-#define I915_READ64(off) read64(dev_priv, off)
-
-#define I915_WRITE64(off, reg) write64(dev_priv, off, reg)
-
-#define I915_READ(reg) bus_space_read_4(dev_priv->regs->bst, \
- dev_priv->regs->bsh, (reg))
-#define I915_WRITE(reg,val) bus_space_write_4(dev_priv->regs->bst, \
- dev_priv->regs->bsh, (reg), (val))
-#define I915_READ16(reg) bus_space_read_2(dev_priv->regs->bst, \
- dev_priv->regs->bsh, (reg))
-#define I915_WRITE16(reg,val) bus_space_write_2(dev_priv->regs->bst, \
- dev_priv->regs->bsh, (reg), (val))
-#define I915_READ8(reg) bus_space_read_1(dev_priv->regs->bst, \
- dev_priv->regs->bsh, (reg))
-#define I915_WRITE8(reg,val) bus_space_write_1(dev_priv->regs->bst, \
- dev_priv->regs->bsh, (reg), (val))
-
-#define POSTING_READ(reg) (void)I915_READ(reg)
-#define POSTING_READ16(reg) (void)I915_READ16(reg)
-
-#define INTELDRM_VERBOSE 0
-#if INTELDRM_VERBOSE > 0
-#define INTELDRM_VPRINTF(fmt, args...) DRM_INFO(fmt, ##args)
-#else
-#define INTELDRM_VPRINTF(fmt, args...)
-#endif
-
-#define BEGIN_LP_RING(n) inteldrm_begin_ring(dev_priv, n)
-#define OUT_RING(n) inteldrm_out_ring(dev_priv, n)
-#define ADVANCE_LP_RING() inteldrm_advance_ring(dev_priv)
-
-/* MCH IFP BARs */
-#define I915_IFPADDR 0x60
-#define I965_IFPADDR 0x70
-
-/**
- * Reads a dword out of the status page, which is written to from the command
- * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
- * MI_STORE_DATA_IMM.
- *
- * The following dwords have a reserved meaning:
- * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
- * 0x04: ring 0 head pointer
- * 0x05: ring 1 head pointer (915-class)
- * 0x06: ring 2 head pointer (915-class)
- * 0x10-0x1b: Context status DWords (GM45)
- * 0x1f: Last written status offset. (GM45)
- *
- * The area from dword 0x20 to 0x3ff is available for driver usage.
- */
-#define READ_HWSP(dev_priv, reg) inteldrm_read_hws(dev_priv, reg)
-#define I915_GEM_HWS_INDEX 0x20
-
-/* Chipset type macros */
-
-#define IS_I830(dev_priv) ((dev_priv)->flags & CHIP_I830)
-#define IS_845G(dev_priv) ((dev_priv)->flags & CHIP_I845G)
-#define IS_I85X(dev_priv) ((dev_priv)->flags & CHIP_I85X)
-#define IS_I865G(dev_priv) ((dev_priv)->flags & CHIP_I865G)
-
-#define IS_I915G(dev_priv) ((dev_priv)->flags & CHIP_I915G)
-#define IS_I915GM(dev_priv) ((dev_priv)->flags & CHIP_I915GM)
-#define IS_I945G(dev_priv) ((dev_priv)->flags & CHIP_I945G)
-#define IS_I945GM(dev_priv) ((dev_priv)->flags & CHIP_I945GM)
-#define IS_I965G(dev_priv) ((dev_priv)->flags & CHIP_I965)
-#define IS_I965GM(dev_priv) ((dev_priv)->flags & CHIP_I965GM)
-
-#define IS_GM45(dev_priv) ((dev_priv)->flags & CHIP_GM45)
-#define IS_G4X(dev_priv) ((dev_priv)->flags & CHIP_G4X)
-
-#define IS_G33(dev_priv) ((dev_priv)->flags & CHIP_G33)
-
-#define IS_I9XX(dev_priv) ((dev_priv)->flags & CHIP_I9XX)
-
-#define IS_IRONLAKE(dev_priv) ((dev_priv)->flags & CHIP_IRONLAKE)
-#define IS_IRONLAKE_D(dev_priv) ((dev_priv)->flags & CHIP_IRONLAKE_D)
-#define IS_IRONLAKE_M(dev_priv) ((dev_priv)->flags & CHIP_IRONLAKE_M)
-
-#define IS_SANDYBRIDGE(dev_priv) ((dev_priv)->flags & CHIP_SANDYBRIDGE)
-#define IS_SANDYBRIDGE_D(dev_priv) ((dev_priv)->flags & CHIP_SANDYBRIDGE_D)
-#define IS_SANDYBRIDGE_M(dev_priv) ((dev_priv)->flags & CHIP_SANDYBRIDGE_M)
-
-#define IS_MOBILE(dev_priv) (dev_priv->flags & CHIP_M)
-
-#define I915_NEED_GFX_HWS(dev_priv) (dev_priv->flags & CHIP_HWS)
-
-#define HAS_RESET(dev_priv) IS_I965G(dev_priv) && (!IS_GEN6(dev_priv)) \
- && (!IS_GEN7(dev_priv))
-
-#define IS_GEN2(dev_priv) (dev_priv->flags & CHIP_GEN2)
-#define IS_GEN3(dev_priv) (dev_priv->flags & CHIP_GEN3)
-#define IS_GEN4(dev_priv) (dev_priv->flags & CHIP_GEN4)
-#define IS_GEN6(dev_priv) (dev_priv->flags & CHIP_GEN6)
-#define IS_GEN7(dev_priv) (dev_priv->flags & CHIP_GEN7)
-
-#define I915_HAS_FBC(dev) (IS_I965GM(dev) || IS_GM45(dev))
-
-#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev) || \
- IS_GEN7(dev))
-
-#define INTEL_INFO(dev) (dev)
-
-/*
- * Interrupts that are always left unmasked.
- *
- * Since pipe events are edge-triggered from the PIPESTAT register to IIRC,
- * we leave them always unmasked in IMR and then control enabling them through
- * PIPESTAT alone.
- */
-#define I915_INTERRUPT_ENABLE_FIX \
- (I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
-
-/* Interrupts that we mask and unmask at runtime */
-#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
-
-/* These are all of the interrupts used by the driver */
-#define I915_INTERRUPT_ENABLE_MASK \
- (I915_INTERRUPT_ENABLE_FIX | \
- I915_INTERRUPT_ENABLE_VAR)
-
-/*
- * if kms we want pch event, gse, and plane flip masks too
- */
-#define PCH_SPLIT_DISPLAY_INTR_FIX (DE_MASTER_IRQ_CONTROL)
-#define PCH_SPLIT_DISPLAY_INTR_VAR (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK)
-#define PCH_SPLIT_DISPLAY_ENABLE_MASK \
- (PCH_SPLIT_DISPLAY_INTR_FIX | PCH_SPLIT_DISPLAY_INTR_VAR)
-#define PCH_SPLIT_RENDER_INTR_FIX (0)
-#define PCH_SPLIT_RENDER_INTR_VAR (GT_USER_INTERRUPT | GT_MASTER_ERROR)
-#define PCH_SPLIT_RENDER_ENABLE_MASK \
- (PCH_SPLIT_RENDER_INTR_FIX | PCH_SPLIT_RENDER_INTR_VAR)
-/* not yet */
-#define PCH_SPLIT_HOTPLUG_INTR_FIX (0)
-#define PCH_SPLIT_HOTPLUG_INTR_VAR (0)
-#define PCH_SPLIT_HOTPLUG_ENABLE_MASK \
- (PCH_SPLIT_HOTPLUG_INTR_FIX | PCH_SPLIT_HOTPLUG_INTR_VAR)
-
-#define printeir(val) printf("%s: error reg: %b\n", __func__, val, \
- "\20\x10PTEERR\x2REFRESHERR\x1INSTERR")
-
-/*
- * With the i45 and later, Y tiling got adjusted so that it was 32 128-byte
- * rows, which changes the alignment requirements and fence programming.
- */
-#define HAS_128_BYTE_Y_TILING(dev_priv) (IS_I9XX(dev_priv) && \
- !(IS_I915G(dev_priv) || IS_I915GM(dev_priv)))
-
-#define PRIMARY_RINGBUFFER_SIZE (128*1024)
-
-/* Inlines */
-
-/**
- * Returns true if seq1 is later than seq2.
- */
-static __inline int
-i915_seqno_passed(uint32_t seq1, uint32_t seq2)
-{
- return ((int32_t)(seq1 - seq2) >= 0);
-}
-
-/*
- * Read seqence number from the Hardware status page.
- */
-static __inline u_int32_t
-i915_get_gem_seqno(struct inteldrm_softc *dev_priv)
-{
- return (READ_HWSP(dev_priv, I915_GEM_HWS_INDEX));
-}
-
-static __inline int
-i915_obj_purgeable(struct inteldrm_obj *obj_priv)
-{
- return (obj_priv->obj.do_flags & I915_DONTNEED);
-}
-
-static __inline int
-i915_obj_purged(struct inteldrm_obj *obj_priv)
-{
- return (obj_priv->obj.do_flags & I915_PURGED);
-}
-
-static __inline int
-inteldrm_is_active(struct inteldrm_obj *obj_priv)
-{
- return (obj_priv->obj.do_flags & I915_ACTIVE);
-}
-
-static __inline int
-inteldrm_is_dirty(struct inteldrm_obj *obj_priv)
-{
- return (obj_priv->obj.do_flags & I915_DIRTY);
-}
-
-static __inline int
-inteldrm_exec_needs_fence(struct inteldrm_obj *obj_priv)
-{
- return (obj_priv->obj.do_flags & I915_EXEC_NEEDS_FENCE);
-}
-
-static __inline int
-inteldrm_needs_fence(struct inteldrm_obj *obj_priv)
-{
- return (obj_priv->obj.do_flags & I915_FENCED_EXEC);
-}
-
-#endif
diff --git a/sys/dev/pci/drm/i915_irq.c b/sys/dev/pci/drm/i915_irq.c
deleted file mode 100644
index 99edae49483..00000000000
--- a/sys/dev/pci/drm/i915_irq.c
+++ /dev/null
@@ -1,380 +0,0 @@
-/* $OpenBSD: i915_irq.c,v 1.54 2012/09/25 10:19:46 jsg Exp $ */
-/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
- */
-/*
- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i915_drm.h"
-#include "i915_drv.h"
-
-void i915_enable_irq(struct inteldrm_softc *, u_int32_t);
-void i915_disable_irq(struct inteldrm_softc *, u_int32_t);
-void ironlake_enable_graphics_irq(struct inteldrm_softc *, u_int32_t);
-void ironlake_disable_graphics_irq(struct inteldrm_softc *, u_int32_t);
-void ironlake_enable_display_irq(struct inteldrm_softc *, u_int32_t);
-void ironlake_disable_display_irq(struct inteldrm_softc *, u_int32_t);
-void i915_enable_pipestat(struct inteldrm_softc *, int, u_int32_t);
-void i915_disable_pipestat(struct inteldrm_softc *, int, u_int32_t);
-int ironlake_irq_install(struct inteldrm_softc *);
-
-void
-i915_enable_irq(struct inteldrm_softc *dev_priv, u_int32_t mask)
-{
- if ((dev_priv->irq_mask_reg & mask) != 0) {
- dev_priv->irq_mask_reg &= ~mask;
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
- (void)I915_READ(IMR);
- }
-}
-
-void
-i915_disable_irq(struct inteldrm_softc *dev_priv, u_int32_t mask)
-{
- if ((dev_priv->irq_mask_reg & mask) != mask) {
- dev_priv->irq_mask_reg |= mask;
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
- (void)I915_READ(IMR);
- }
-}
-
-inline void
-ironlake_enable_graphics_irq(struct inteldrm_softc *dev_priv, u_int32_t mask)
-{
- if ((dev_priv->gt_irq_mask_reg & mask) != 0) {
- /* XXX imr bullshit */
- dev_priv->gt_irq_mask_reg &= ~mask;
- if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
- I915_WRITE(0x20a8, dev_priv->gt_irq_mask_reg);
- (void)I915_READ(0x20a8);
- } else {
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
- (void)I915_READ(GTIMR);
- }
- }
-}
-
-inline void
-ironlake_disable_graphics_irq(struct inteldrm_softc *dev_priv, u_int32_t mask)
-{
- if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
- dev_priv->gt_irq_mask_reg |= mask;
- if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
- I915_WRITE(0x20a8, dev_priv->gt_irq_mask_reg);
- (void)I915_READ(0x20a8);
- } else {
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
- (void)I915_READ(GTIMR);
- }
- }
-}
-
-/* display hotplug and vblank irqs */
-inline void
-ironlake_enable_display_irq(struct inteldrm_softc *dev_priv, u_int32_t mask)
-{
- if ((dev_priv->irq_mask_reg & mask) != 0) {
- dev_priv->irq_mask_reg &= ~mask;
- I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
- (void)I915_READ(DEIMR);
- }
-}
-
-inline void
-ironlake_disable_display_irq(struct inteldrm_softc *dev_priv, u_int32_t mask)
-{
- if ((dev_priv->irq_mask_reg & mask) != mask) {
- dev_priv->irq_mask_reg |= mask;
- I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
- (void)I915_READ(DEIMR);
- }
-}
-
-void
-i915_enable_pipestat(struct inteldrm_softc *dev_priv, int pipe, u_int32_t mask)
-{
- if ((dev_priv->pipestat[pipe] & mask) != mask) {
- bus_size_t reg = pipe == 0 ? _PIPEASTAT : _PIPEBSTAT;
-
- dev_priv->pipestat[pipe] |= mask;
- /* Enable the interrupt, clear and pending status */
- I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
- (void)I915_READ(reg);
- }
-}
-
-void
-i915_disable_pipestat(struct inteldrm_softc *dev_priv, int pipe, u_int32_t mask)
-{
- if ((dev_priv->pipestat[pipe] & mask) != 0) {
- bus_size_t reg = pipe == 0 ? _PIPEASTAT : _PIPEBSTAT;
-
- dev_priv->pipestat[pipe] &= ~mask;
- I915_WRITE(reg, dev_priv->pipestat[pipe]);
- (void)I915_READ(reg);
- }
-}
-
-/**
- * inteldrm_pipe_enabled - check if a pipe is enabled
- * @dev: DRM device
- * @pipe: pipe to check
- *
- * Reading certain registers when the pipe is disabled can hang the chip.
- * Use this routine to make sure the PLL is running and the pipe is active
- * before reading such registers if unsure.
- */
-int
-inteldrm_pipe_enabled(struct inteldrm_softc *dev_priv, int pipe)
-{
- return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
-}
-
-u_int32_t
-i915_get_vblank_counter(struct drm_device *dev, int pipe)
-{
- struct inteldrm_softc *dev_priv = dev->dev_private;
- bus_size_t high_frame, low_frame;
- u_int32_t high1, high2, low;
-
- if (inteldrm_pipe_enabled(dev_priv, pipe) == 0) {
- DRM_DEBUG("trying to get vblank count for disabled pipe %d\n",
- pipe);
- return (0);
- }
-
- high_frame = PIPEFRAME(pipe);
- low_frame = PIPEFRAMEPIXEL(pipe);
-
- /* GM45 just had to be different... */
- if (IS_GM45(dev_priv) || IS_G4X(dev_priv) || IS_IRONLAKE(dev_priv) ||
- IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
- return (I915_READ(PIPE_FRMCOUNT_GM45(pipe)));
- }
-
- /*
- * High & low register fields aren't synchronized, so make sure
- * we get a low value that's stable across two reads of the high
- * register.
- */
- do {
- high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
- PIPE_FRAME_HIGH_SHIFT);
- low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
- PIPE_FRAME_LOW_SHIFT);
- high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
- PIPE_FRAME_HIGH_SHIFT);
- } while (high1 != high2);
-
- return ((high1 << 8) | low);
-}
-
-void
-i915_user_irq_get(struct inteldrm_softc *dev_priv)
-{
- if (++dev_priv->user_irq_refcount == 1) {
- if (HAS_PCH_SPLIT(dev_priv))
- ironlake_enable_graphics_irq(dev_priv,
- GT_USER_INTERRUPT);
- else
- i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
- }
-}
-
-void
-i915_user_irq_put(struct inteldrm_softc *dev_priv)
-{
- if (--dev_priv->user_irq_refcount == 0) {
- if (HAS_PCH_SPLIT(dev_priv))
- ironlake_disable_graphics_irq(dev_priv,
- GT_USER_INTERRUPT);
- else
- i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
- }
-}
-
-int
-i915_enable_vblank(struct drm_device *dev, int pipe)
-{
- struct inteldrm_softc *dev_priv = dev->dev_private;
-
- if (inteldrm_pipe_enabled(dev_priv, pipe) == 0)
- return (EINVAL);
-
- mtx_enter(&dev_priv->user_irq_lock);
- if (IS_GEN7(dev_priv))
- ironlake_enable_display_irq(dev_priv,
- DE_PIPEA_VBLANK_IVB << (5 * pipe));
- else if (HAS_PCH_SPLIT(dev_priv))
- ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
- DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
- else
- i915_enable_pipestat(dev_priv, pipe, (IS_I965G(dev_priv) ?
- PIPE_START_VBLANK_INTERRUPT_ENABLE :
- PIPE_VBLANK_INTERRUPT_ENABLE));
- mtx_leave(&dev_priv->user_irq_lock);
-
- return (0);
-}
-
-void
-i915_disable_vblank(struct drm_device *dev, int pipe)
-{
- struct inteldrm_softc *dev_priv = dev->dev_private;
-
- mtx_enter(&dev_priv->user_irq_lock);
- if (IS_GEN7(dev_priv))
- ironlake_disable_display_irq(dev_priv,
- DE_PIPEA_VBLANK_IVB << (pipe * 5));
- else if (HAS_PCH_SPLIT(dev_priv))
- ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
- DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
- else
- i915_disable_pipestat(dev_priv, pipe,
- PIPE_START_VBLANK_INTERRUPT_ENABLE |
- PIPE_VBLANK_INTERRUPT_ENABLE);
- mtx_leave(&dev_priv->user_irq_lock);
-}
-
-/* drm_dma.h hooks
-*/
-int
-i915_driver_irq_install(struct drm_device *dev)
-{
- struct inteldrm_softc *dev_priv = dev->dev_private;
-
- dev->vblank->vb_max = 0xffffff; /* only 24 bits of frame count */
- if (IS_G4X(dev_priv) || IS_IRONLAKE(dev_priv) || IS_GEN6(dev_priv) ||
- IS_GEN7(dev_priv))
- dev->vblank->vb_max = 0xffffffff;
-
- I915_WRITE(HWSTAM, 0xeffe);
-
- if (HAS_PCH_SPLIT(dev_priv))
- return (ironlake_irq_install(dev_priv));
-
- I915_WRITE(_PIPEASTAT, 0);
- I915_WRITE(_PIPEBSTAT, 0);
- I915_WRITE(IMR, 0xffffffff);
- I915_WRITE(IER, 0x0);
- (void)I915_READ(IER);
-
-
- /*
- * Enable some error detection, note the instruction error mask
- * bit is reserved, so we leave it masked.
- */
- I915_WRITE(EMR, IS_G4X(dev_priv) ?
- ~(GM45_ERROR_PAGE_TABLE | GM45_ERROR_MEM_PRIV |
- GM45_ERROR_CP_PRIV | I915_ERROR_MEMORY_REFRESH) :
- ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
-
- /*
- * Disable pipe interrupt enables, clear pending pipe status
- * add back in the enabled interrupts from previous iterations
- * (say in the reset case where we want vblank interrupts etc to be
- * switched back on if they were running
- */
- I915_WRITE(_PIPEASTAT, (I915_READ(_PIPEASTAT) & 0x8000ffff) |
- dev_priv->pipestat[0]);
- I915_WRITE(_PIPEBSTAT, (I915_READ(_PIPEBSTAT) & 0x8000ffff) |
- dev_priv->pipestat[1]);
- /* Clear pending interrupt status */
- I915_WRITE(IIR, I915_READ(IIR));
-
- I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
- (void)I915_READ(IER);
-
- return (0);
-}
-
-int
-ironlake_irq_install(struct inteldrm_softc *dev_priv)
-{
- /* mask and ack everything before we turn anything on. */
- /*
- * XXX this is a legacy of the only preinstall/postinstall split.
- * I wonder if we could avoid this now...
- */
- I915_WRITE(DEIMR, 0xffffffff);
- I915_WRITE(DEIER, 0x0);
- (void)I915_READ(DEIER);
-
- /* GT */
- I915_WRITE(GTIMR, 0xfffffff);
- I915_WRITE(GTIER, 0x0);
- (void)I915_READ(GTIER);
-
- /*
- * Everything is turned off now and everything acked.
- * now we can set everything up
- */
-
- I915_WRITE(DEIIR, I915_READ(DEIIR));
- I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
- I915_WRITE(DEIER, PCH_SPLIT_DISPLAY_ENABLE_MASK);
-
- I915_WRITE(GTIIR, I915_READ(GTIIR));
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
- I915_WRITE(GTIER, PCH_SPLIT_RENDER_ENABLE_MASK);
-
- /* south display irq -- hotplug off for now */
- I915_WRITE(SDEIIR, I915_READ(SDEIIR));
- I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg);
- I915_WRITE(SDEIER, PCH_SPLIT_HOTPLUG_ENABLE_MASK);
- (void)I915_READ(SDEIER);
-
- return (0);
-}
-
-void
-i915_driver_irq_uninstall(struct drm_device *dev)
-{
- struct inteldrm_softc *dev_priv = dev->dev_private;
-
- I915_WRITE(HWSTAM, 0xffffffff);
-
- if (HAS_PCH_SPLIT(dev_priv)) {
- I915_WRITE(DEIMR, 0xffffffff);
- I915_WRITE(DEIER, 0x0);
- I915_WRITE(DEIIR, I915_READ(DEIIR));
-
- I915_WRITE(GTIMR, 0xfffffff);
- I915_WRITE(GTIER, 0x0);
- I915_WRITE(GTIIR, I915_READ(GTIIR));
- } else {
- I915_WRITE(_PIPEASTAT, 0);
- I915_WRITE(_PIPEBSTAT, 0);
- I915_WRITE(IMR, 0xffffffff);
- I915_WRITE(IER, 0x0);
-
- I915_WRITE(_PIPEASTAT, I915_READ(_PIPEASTAT) & 0x8000ffff);
- I915_WRITE(_PIPEBSTAT, I915_READ(_PIPEBSTAT) & 0x8000ffff);
- I915_WRITE(IIR, I915_READ(IIR));
- }
-}
diff --git a/sys/dev/pci/drm/i915_suspend.c b/sys/dev/pci/drm/i915_suspend.c
deleted file mode 100644
index ce6471cdce6..00000000000
--- a/sys/dev/pci/drm/i915_suspend.c
+++ /dev/null
@@ -1,927 +0,0 @@
-/*
- *
- * Copyright 2008 (c) Intel Corporation
- * Jesse Barnes <jbarnes@virtuousgeek.org>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i915_drm.h"
-#include "i915_drv.h"
-
-int i915_pipe_enabled(struct inteldrm_softc *, enum pipe);
-void i915_save_palette(struct inteldrm_softc *, enum pipe);
-void i915_restore_palette(struct inteldrm_softc *, enum pipe);
-u_int8_t i915_read_indexed(struct inteldrm_softc *, u_int16_t,
- u_int16_t, u_int8_t);
-void i915_write_indexed(struct inteldrm_softc *, u_int16_t,
- u_int16_t, u_int8_t, u_int8_t);
-void i915_write_ar(struct inteldrm_softc *, u_int16_t, u_int8_t,
- u_int8_t, u_int16_t);
-u_int8_t i915_read_ar(struct inteldrm_softc *, u_int16_t,
- u_int8_t, u_int16_t);
-void i915_save_vga(struct inteldrm_softc *);
-void i915_restore_vga(struct inteldrm_softc *);
-void i915_save_modeset_reg(struct inteldrm_softc *);
-void i915_restore_modeset_reg(struct inteldrm_softc *);
-
-int
-i915_pipe_enabled(struct inteldrm_softc *dev, enum pipe pipe)
-{
- struct inteldrm_softc *dev_priv = dev;
- u32 dpll_reg;
-
- /* On IVB, 3rd pipe shares PLL with another one */
- if (pipe > 1)
- return 0;
-
- if (HAS_PCH_SPLIT(dev))
- dpll_reg = PCH_DPLL(pipe);
- else
- dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
-
- return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE);
-}
-
-void
-i915_save_palette(struct inteldrm_softc *dev, enum pipe pipe)
-{
- struct inteldrm_softc *dev_priv = dev;
- unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
- u32 *array;
- int i;
-
- if (!i915_pipe_enabled(dev, pipe))
- return;
-
- if (HAS_PCH_SPLIT(dev))
- reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
-
- if (pipe == PIPE_A)
- array = dev_priv->save_palette_a;
- else
- array = dev_priv->save_palette_b;
-
- for (i = 0; i < 256; i++)
- array[i] = I915_READ(reg + (i << 2));
-}
-
-void
-i915_restore_palette(struct inteldrm_softc *dev, enum pipe pipe)
-{
- struct inteldrm_softc *dev_priv = dev;
- unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
- u32 *array;
- int i;
-
- if (!i915_pipe_enabled(dev, pipe))
- return;
-
- if (HAS_PCH_SPLIT(dev))
- reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
-
- if (pipe == PIPE_A)
- array = dev_priv->save_palette_a;
- else
- array = dev_priv->save_palette_b;
-
- for (i = 0; i < 256; i++)
- I915_WRITE(reg + (i << 2), array[i]);
-}
-
-u_int8_t
-i915_read_indexed(struct inteldrm_softc *dev_priv, u_int16_t index_port,
- u_int16_t data_port, u_int8_t reg)
-{
- I915_WRITE8(index_port, reg);
- return I915_READ8(data_port);
-}
-
-u_int8_t
-i915_read_ar(struct inteldrm_softc *dev_priv, u_int16_t st01,
- u_int8_t reg, u_int16_t palette_enable)
-{
- I915_READ8(st01);
- I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
- return I915_READ8(VGA_AR_DATA_READ);
-}
-
-void
-i915_write_ar(struct inteldrm_softc *dev_priv, u_int16_t st01, u_int8_t reg,
- u_int8_t val, u_int16_t palette_enable)
-{
- I915_READ8(st01);
- I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
- I915_WRITE8(VGA_AR_DATA_WRITE, val);
-}
-
-void
-i915_write_indexed(struct inteldrm_softc *dev_priv, u_int16_t index_port,
- u_int16_t data_port, u_int8_t reg, u_int8_t val)
-{
- I915_WRITE8(index_port, reg);
- I915_WRITE8(data_port, val);
-}
-
-void
-i915_save_vga(struct inteldrm_softc *dev)
-{
- struct inteldrm_softc *dev_priv = dev;
- int i;
- u16 cr_index, cr_data, st01;
-
- /* VGA color palette registers */
- dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK);
-
- /* MSR bits */
- dev_priv->saveMSR = I915_READ8(VGA_MSR_READ);
- if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
- cr_index = VGA_CR_INDEX_CGA;
- cr_data = VGA_CR_DATA_CGA;
- st01 = VGA_ST01_CGA;
- } else {
- cr_index = VGA_CR_INDEX_MDA;
- cr_data = VGA_CR_DATA_MDA;
- st01 = VGA_ST01_MDA;
- }
-
- /* CRT controller regs */
- i915_write_indexed(dev, cr_index, cr_data, 0x11,
- i915_read_indexed(dev, cr_index, cr_data, 0x11) &
- (~0x80));
- for (i = 0; i <= 0x24; i++)
- dev_priv->saveCR[i] =
- i915_read_indexed(dev, cr_index, cr_data, i);
- /* Make sure we don't turn off CR group 0 writes */
- dev_priv->saveCR[0x11] &= ~0x80;
-
- /* Attribute controller registers */
- I915_READ8(st01);
- dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
- for (i = 0; i <= 0x14; i++)
- dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0);
- I915_READ8(st01);
- I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX);
- I915_READ8(st01);
-
- /* Graphics controller registers */
- for (i = 0; i < 9; i++)
- dev_priv->saveGR[i] =
- i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
-
- dev_priv->saveGR[0x10] =
- i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
- dev_priv->saveGR[0x11] =
- i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
- dev_priv->saveGR[0x18] =
- i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
-
- /* Sequencer registers */
- for (i = 0; i < 8; i++)
- dev_priv->saveSR[i] =
- i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
-}
-
-void
-i915_restore_vga(struct inteldrm_softc *dev)
-{
- struct inteldrm_softc *dev_priv = dev;
- int i;
- u16 cr_index, cr_data, st01;
-
- /* MSR bits */
- I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR);
- if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
- cr_index = VGA_CR_INDEX_CGA;
- cr_data = VGA_CR_DATA_CGA;
- st01 = VGA_ST01_CGA;
- } else {
- cr_index = VGA_CR_INDEX_MDA;
- cr_data = VGA_CR_DATA_MDA;
- st01 = VGA_ST01_MDA;
- }
-
- /* Sequencer registers, don't write SR07 */
- for (i = 0; i < 7; i++)
- i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
- dev_priv->saveSR[i]);
-
- /* CRT controller regs */
- /* Enable CR group 0 writes */
- i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
- for (i = 0; i <= 0x24; i++)
- i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]);
-
- /* Graphics controller regs */
- for (i = 0; i < 9; i++)
- i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
- dev_priv->saveGR[i]);
-
- i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
- dev_priv->saveGR[0x10]);
- i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
- dev_priv->saveGR[0x11]);
- i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
- dev_priv->saveGR[0x18]);
-
- /* Attribute controller registers */
- I915_READ8(st01); /* switch back to index mode */
- for (i = 0; i <= 0x14; i++)
- i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0);
- I915_READ8(st01); /* switch back to index mode */
- I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20);
- I915_READ8(st01);
-
- /* VGA color palette registers */
- I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK);
-}
-
-void
-i915_save_modeset_reg(struct inteldrm_softc *dev)
-{
- struct inteldrm_softc *dev_priv = dev;
- int i;
-
- /* Cursor state */
- dev_priv->saveCURACNTR = I915_READ(_CURACNTR);
- dev_priv->saveCURAPOS = I915_READ(_CURAPOS);
- dev_priv->saveCURABASE = I915_READ(_CURABASE);
- dev_priv->saveCURBCNTR = I915_READ(_CURBCNTR);
- dev_priv->saveCURBPOS = I915_READ(_CURBPOS);
- dev_priv->saveCURBBASE = I915_READ(_CURBBASE);
- if (IS_GEN2(dev))
- dev_priv->saveCURSIZE = I915_READ(CURSIZE);
-
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
- dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
- }
-
- /* Pipe & plane A info */
- dev_priv->savePIPEACONF = I915_READ(_PIPEACONF);
- dev_priv->savePIPEASRC = I915_READ(_PIPEASRC);
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->saveFPA0 = I915_READ(_PCH_FPA0);
- dev_priv->saveFPA1 = I915_READ(_PCH_FPA1);
- dev_priv->saveDPLL_A = I915_READ(_PCH_DPLL_A);
- } else {
- dev_priv->saveFPA0 = I915_READ(_FPA0);
- dev_priv->saveFPA1 = I915_READ(_FPA1);
- dev_priv->saveDPLL_A = I915_READ(_DPLL_A);
- }
- if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- dev_priv->saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
- dev_priv->saveHTOTAL_A = I915_READ(_HTOTAL_A);
- dev_priv->saveHBLANK_A = I915_READ(_HBLANK_A);
- dev_priv->saveHSYNC_A = I915_READ(_HSYNC_A);
- dev_priv->saveVTOTAL_A = I915_READ(_VTOTAL_A);
- dev_priv->saveVBLANK_A = I915_READ(_VBLANK_A);
- dev_priv->saveVSYNC_A = I915_READ(_VSYNC_A);
- if (!HAS_PCH_SPLIT(dev))
- dev_priv->saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
-
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
- dev_priv->savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
- dev_priv->savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
- dev_priv->savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
-
- dev_priv->saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
- dev_priv->saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
-
- dev_priv->savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
- dev_priv->savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
- dev_priv->savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
-
- dev_priv->saveTRANSACONF = I915_READ(_TRANSACONF);
- dev_priv->saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
- dev_priv->saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
- dev_priv->saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
- dev_priv->saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
- dev_priv->saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
- dev_priv->saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
- }
-
- dev_priv->saveDSPACNTR = I915_READ(_DSPACNTR);
- dev_priv->saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
- dev_priv->saveDSPASIZE = I915_READ(_DSPASIZE);
- dev_priv->saveDSPAPOS = I915_READ(_DSPAPOS);
- dev_priv->saveDSPAADDR = I915_READ(_DSPAADDR);
- if (INTEL_INFO(dev)->gen >= 4) {
- dev_priv->saveDSPASURF = I915_READ(_DSPASURF);
- dev_priv->saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
- }
- i915_save_palette(dev, PIPE_A);
- dev_priv->savePIPEASTAT = I915_READ(_PIPEASTAT);
-
- /* Pipe & plane B info */
- dev_priv->savePIPEBCONF = I915_READ(_PIPEBCONF);
- dev_priv->savePIPEBSRC = I915_READ(_PIPEBSRC);
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->saveFPB0 = I915_READ(_PCH_FPB0);
- dev_priv->saveFPB1 = I915_READ(_PCH_FPB1);
- dev_priv->saveDPLL_B = I915_READ(_PCH_DPLL_B);
- } else {
- dev_priv->saveFPB0 = I915_READ(_FPB0);
- dev_priv->saveFPB1 = I915_READ(_FPB1);
- dev_priv->saveDPLL_B = I915_READ(_DPLL_B);
- }
- if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- dev_priv->saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
- dev_priv->saveHTOTAL_B = I915_READ(_HTOTAL_B);
- dev_priv->saveHBLANK_B = I915_READ(_HBLANK_B);
- dev_priv->saveHSYNC_B = I915_READ(_HSYNC_B);
- dev_priv->saveVTOTAL_B = I915_READ(_VTOTAL_B);
- dev_priv->saveVBLANK_B = I915_READ(_VBLANK_B);
- dev_priv->saveVSYNC_B = I915_READ(_VSYNC_B);
- if (!HAS_PCH_SPLIT(dev))
- dev_priv->saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
-
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
- dev_priv->savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
- dev_priv->savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
- dev_priv->savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
-
- dev_priv->saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
- dev_priv->saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
-
- dev_priv->savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
- dev_priv->savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
- dev_priv->savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
-
- dev_priv->saveTRANSBCONF = I915_READ(_TRANSBCONF);
- dev_priv->saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
- dev_priv->saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
- dev_priv->saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
- dev_priv->saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
- dev_priv->saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
- dev_priv->saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
- }
-
- dev_priv->saveDSPBCNTR = I915_READ(_DSPBCNTR);
- dev_priv->saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
- dev_priv->saveDSPBSIZE = I915_READ(_DSPBSIZE);
- dev_priv->saveDSPBPOS = I915_READ(_DSPBPOS);
- dev_priv->saveDSPBADDR = I915_READ(_DSPBADDR);
- if (INTEL_INFO(dev)->gen >= 4) {
- dev_priv->saveDSPBSURF = I915_READ(_DSPBSURF);
- dev_priv->saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
- }
- i915_save_palette(dev, PIPE_B);
- dev_priv->savePIPEBSTAT = I915_READ(_PIPEBSTAT);
-
- /* Fences */
- switch (INTEL_INFO(dev)->gen) {
- case 7:
- case 6:
- for (i = 0; i < 16; i++)
- dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
- break;
- case 5:
- case 4:
- for (i = 0; i < 16; i++)
- dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
- break;
- case 3:
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
- case 2:
- for (i = 0; i < 8; i++)
- dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
- break;
- }
-
- return;
-}
-
-void
-i915_restore_modeset_reg(struct inteldrm_softc *dev)
-{
- struct inteldrm_softc *dev_priv = dev;
- int dpll_a_reg, fpa0_reg, fpa1_reg;
- int dpll_b_reg, fpb0_reg, fpb1_reg;
- int i;
-
- /* XXX until we have FDI link training */
- if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
- return;
-
- /* Fences */
- switch (INTEL_INFO(dev)->gen) {
- case 7:
- case 6:
- for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
- break;
- case 5:
- case 4:
- for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
- break;
- case 3:
- case 2:
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
- break;
- }
-
-
- if (HAS_PCH_SPLIT(dev)) {
- dpll_a_reg = _PCH_DPLL_A;
- dpll_b_reg = _PCH_DPLL_B;
- fpa0_reg = _PCH_FPA0;
- fpb0_reg = _PCH_FPB0;
- fpa1_reg = _PCH_FPA1;
- fpb1_reg = _PCH_FPB1;
- } else {
- dpll_a_reg = _DPLL_A;
- dpll_b_reg = _DPLL_B;
- fpa0_reg = _FPA0;
- fpb0_reg = _FPB0;
- fpa1_reg = _FPA1;
- fpb1_reg = _FPB1;
- }
-
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL);
- I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL);
- }
-
- /* Pipe & plane A info */
- /* Prime the clock */
- if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
- I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A &
- ~DPLL_VCO_ENABLE);
- POSTING_READ(dpll_a_reg);
- DRM_UDELAY(150);
- }
- I915_WRITE(fpa0_reg, dev_priv->saveFPA0);
- I915_WRITE(fpa1_reg, dev_priv->saveFPA1);
- /* Actually enable it */
- I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
- POSTING_READ(dpll_a_reg);
- DRM_UDELAY(150);
- if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_DPLL_A_MD, dev_priv->saveDPLL_A_MD);
- POSTING_READ(_DPLL_A_MD);
- }
- DRM_UDELAY(150);
-
- /* Restore mode */
- I915_WRITE(_HTOTAL_A, dev_priv->saveHTOTAL_A);
- I915_WRITE(_HBLANK_A, dev_priv->saveHBLANK_A);
- I915_WRITE(_HSYNC_A, dev_priv->saveHSYNC_A);
- I915_WRITE(_VTOTAL_A, dev_priv->saveVTOTAL_A);
- I915_WRITE(_VBLANK_A, dev_priv->saveVBLANK_A);
- I915_WRITE(_VSYNC_A, dev_priv->saveVSYNC_A);
- if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(_BCLRPAT_A, dev_priv->saveBCLRPAT_A);
-
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
- I915_WRITE(_PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
- I915_WRITE(_PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
- I915_WRITE(_PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1);
-
- I915_WRITE(_FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL);
- I915_WRITE(_FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL);
-
- I915_WRITE(_PFA_CTL_1, dev_priv->savePFA_CTL_1);
- I915_WRITE(_PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ);
- I915_WRITE(_PFA_WIN_POS, dev_priv->savePFA_WIN_POS);
-
- I915_WRITE(_TRANSACONF, dev_priv->saveTRANSACONF);
- I915_WRITE(_TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A);
- I915_WRITE(_TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A);
- I915_WRITE(_TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A);
- I915_WRITE(_TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A);
- I915_WRITE(_TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A);
- I915_WRITE(_TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A);
- }
-
- /* Restore plane info */
- I915_WRITE(_DSPASIZE, dev_priv->saveDSPASIZE);
- I915_WRITE(_DSPAPOS, dev_priv->saveDSPAPOS);
- I915_WRITE(_PIPEASRC, dev_priv->savePIPEASRC);
- I915_WRITE(_DSPAADDR, dev_priv->saveDSPAADDR);
- I915_WRITE(_DSPASTRIDE, dev_priv->saveDSPASTRIDE);
- if (INTEL_INFO(dev)->gen >= 4) {
- I915_WRITE(_DSPASURF, dev_priv->saveDSPASURF);
- I915_WRITE(_DSPATILEOFF, dev_priv->saveDSPATILEOFF);
- }
-
- I915_WRITE(_PIPEACONF, dev_priv->savePIPEACONF);
-
- i915_restore_palette(dev, PIPE_A);
- /* Enable the plane */
- I915_WRITE(_DSPACNTR, dev_priv->saveDSPACNTR);
- I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
-
- /* Pipe & plane B info */
- if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
- I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B &
- ~DPLL_VCO_ENABLE);
- POSTING_READ(dpll_b_reg);
- DRM_UDELAY(150);
- }
- I915_WRITE(fpb0_reg, dev_priv->saveFPB0);
- I915_WRITE(fpb1_reg, dev_priv->saveFPB1);
- /* Actually enable it */
- I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
- POSTING_READ(dpll_b_reg);
- DRM_UDELAY(150);
- if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_DPLL_B_MD, dev_priv->saveDPLL_B_MD);
- POSTING_READ(_DPLL_B_MD);
- }
- DRM_UDELAY(150);
-
- /* Restore mode */
- I915_WRITE(_HTOTAL_B, dev_priv->saveHTOTAL_B);
- I915_WRITE(_HBLANK_B, dev_priv->saveHBLANK_B);
- I915_WRITE(_HSYNC_B, dev_priv->saveHSYNC_B);
- I915_WRITE(_VTOTAL_B, dev_priv->saveVTOTAL_B);
- I915_WRITE(_VBLANK_B, dev_priv->saveVBLANK_B);
- I915_WRITE(_VSYNC_B, dev_priv->saveVSYNC_B);
- if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(_BCLRPAT_B, dev_priv->saveBCLRPAT_B);
-
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
- I915_WRITE(_PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
- I915_WRITE(_PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
- I915_WRITE(_PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1);
-
- I915_WRITE(_FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL);
- I915_WRITE(_FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL);
-
- I915_WRITE(_PFB_CTL_1, dev_priv->savePFB_CTL_1);
- I915_WRITE(_PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ);
- I915_WRITE(_PFB_WIN_POS, dev_priv->savePFB_WIN_POS);
-
- I915_WRITE(_TRANSBCONF, dev_priv->saveTRANSBCONF);
- I915_WRITE(_TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B);
- I915_WRITE(_TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B);
- I915_WRITE(_TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B);
- I915_WRITE(_TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B);
- I915_WRITE(_TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B);
- I915_WRITE(_TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B);
- }
-
- /* Restore plane info */
- I915_WRITE(_DSPBSIZE, dev_priv->saveDSPBSIZE);
- I915_WRITE(_DSPBPOS, dev_priv->saveDSPBPOS);
- I915_WRITE(_PIPEBSRC, dev_priv->savePIPEBSRC);
- I915_WRITE(_DSPBADDR, dev_priv->saveDSPBADDR);
- I915_WRITE(_DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
- if (INTEL_INFO(dev)->gen >= 4) {
- I915_WRITE(_DSPBSURF, dev_priv->saveDSPBSURF);
- I915_WRITE(_DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
- }
-
- I915_WRITE(_PIPEBCONF, dev_priv->savePIPEBCONF);
-
- i915_restore_palette(dev, PIPE_B);
- /* Enable the plane */
- I915_WRITE(_DSPBCNTR, dev_priv->saveDSPBCNTR);
- I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
-
- /* Cursor state */
- I915_WRITE(_CURAPOS, dev_priv->saveCURAPOS);
- I915_WRITE(_CURACNTR, dev_priv->saveCURACNTR);
- I915_WRITE(_CURABASE, dev_priv->saveCURABASE);
- I915_WRITE(_CURBPOS, dev_priv->saveCURBPOS);
- I915_WRITE(_CURBCNTR, dev_priv->saveCURBCNTR);
- I915_WRITE(_CURBBASE, dev_priv->saveCURBBASE);
- if (IS_GEN2(dev))
- I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
-
- return;
-}
-
-void
-i915_save_display(struct inteldrm_softc *dev)
-{
- struct inteldrm_softc *dev_priv = dev;
-
- /* Display arbitration control */
- dev_priv->saveDSPARB = I915_READ(DSPARB);
-
- /* This is only meaningful in non-KMS mode */
- /* Don't save them in KMS mode */
- i915_save_modeset_reg(dev);
-
- /* CRT state */
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->saveADPA = I915_READ(PCH_ADPA);
- } else {
- dev_priv->saveADPA = I915_READ(ADPA);
- }
-
- /* LVDS state */
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
- dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
- dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
- dev_priv->saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
- dev_priv->saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
- dev_priv->saveLVDS = I915_READ(PCH_LVDS);
- } else {
- dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
- dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
- dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
- dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
- if (INTEL_INFO(dev)->gen >= 4)
- dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
- if (IS_MOBILE(dev) && !IS_I830(dev))
- dev_priv->saveLVDS = I915_READ(LVDS);
- }
-
- if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
- dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
-
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
- dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
- dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
- } else {
- dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
- dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
- dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
- }
-
- /* FIXME: save TV & SDVO state */
-
- /* Only save FBC state on the platform that supports FBC */
- if (I915_HAS_FBC(dev)) {
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
- } else if (IS_GM45(dev)) {
- dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
- } else {
- dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
- dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
- dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
- dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
- }
- }
-
- /* VGA state */
- dev_priv->saveVGA0 = I915_READ(VGA0);
- dev_priv->saveVGA1 = I915_READ(VGA1);
- dev_priv->saveVGA_PD = I915_READ(VGA_PD);
- if (HAS_PCH_SPLIT(dev))
- dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL);
- else
- dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
-
- i915_save_vga(dev);
-}
-
-void
-i915_restore_display(struct inteldrm_softc *dev)
-{
- struct inteldrm_softc *dev_priv = dev;
-
- /* Display arbitration */
- I915_WRITE(DSPARB, dev_priv->saveDSPARB);
-
- /* This is only meaningful in non-KMS mode */
- /* Don't restore them in KMS mode */
- i915_restore_modeset_reg(dev);
-
- /* CRT state */
- if (HAS_PCH_SPLIT(dev))
- I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
- else
- I915_WRITE(ADPA, dev_priv->saveADPA);
-
- /* LVDS state */
- if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
-
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PCH_LVDS, dev_priv->saveLVDS);
- } else if (IS_MOBILE(dev) && !IS_I830(dev))
- I915_WRITE(LVDS, dev_priv->saveLVDS);
-
- if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
- I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
-
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
- I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
- I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
- I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2);
- I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
- I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
- I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
- I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL);
- I915_WRITE(RSTDBYCTL,
- dev_priv->saveMCHBAR_RENDER_STANDBY);
- } else {
- I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
- I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
- I915_WRITE(BLC_HIST_CTL, dev_priv->saveBLC_HIST_CTL);
- I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
- I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
- I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
- I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
- }
- /* FIXME: restore TV & SDVO state */
-
- /* only restore FBC info on the platform that supports FBC*/
- if (I915_HAS_FBC(dev)) {
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
- } else if (IS_GM45(dev)) {
- I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
- } else {
- I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
- I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
- I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
- I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
- }
- }
- /* VGA state */
- if (HAS_PCH_SPLIT(dev))
- I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
- else
- I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
-
- I915_WRITE(VGA0, dev_priv->saveVGA0);
- I915_WRITE(VGA1, dev_priv->saveVGA1);
- I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
- POSTING_READ(VGA_PD);
- DRM_UDELAY(150);
-
- i915_restore_vga(dev);
-}
-
-int
-i915_save_state(struct inteldrm_softc *dev)
-{
- struct inteldrm_softc *dev_priv = dev;
- int i;
-
- dev_priv->saveLBB = pci_conf_read(dev_priv->pc, dev_priv->tag, LBB);
-
- /* Hardware status page */
- dev_priv->saveHWS = I915_READ(HWS_PGA);
-
- i915_save_display(dev);
-
- /* Interrupt state */
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->saveDEIER = I915_READ(DEIER);
- dev_priv->saveDEIMR = I915_READ(DEIMR);
- dev_priv->saveGTIER = I915_READ(GTIER);
- dev_priv->saveGTIMR = I915_READ(GTIMR);
- dev_priv->saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
- dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
- dev_priv->saveMCHBAR_RENDER_STANDBY =
- I915_READ(RSTDBYCTL);
- dev_priv->savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
- } else {
- dev_priv->saveIER = I915_READ(IER);
- dev_priv->saveIMR = I915_READ(IMR);
- }
-
- /* XXX gen6_disable_rps(dev) */
-
- /* Clock gating state */
- if (HAS_PCH_SPLIT(dev_priv)) {
- dev_priv->saveDSPCLK_GATE_D = I915_READ(PCH_DSPCLK_GATE_D);
- dev_priv->saveDSPCLK_GATE = I915_READ(ILK_DSPCLK_GATE);
- } else if (IS_G4X(dev_priv)) {
- dev_priv->saveRENCLK_GATE_D1 = I915_READ(RENCLK_GATE_D1);
- dev_priv->saveRENCLK_GATE_D2 = I915_READ(RENCLK_GATE_D2);
- dev_priv->saveRAMCLK_GATE_D = I915_READ(RAMCLK_GATE_D);
- dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D);
- } else if (IS_I965GM(dev_priv)) {
- dev_priv->saveRENCLK_GATE_D1 = I915_READ(RENCLK_GATE_D1);
- dev_priv->saveRENCLK_GATE_D2 = I915_READ(RENCLK_GATE_D2);
- dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D);
- dev_priv->saveRAMCLK_GATE_D = I915_READ(RAMCLK_GATE_D);
- dev_priv->saveDEUC = I915_READ16(DEUC);
- } else if (IS_I965G(dev_priv)) {
- dev_priv->saveRENCLK_GATE_D1 = I915_READ(RENCLK_GATE_D1);
- dev_priv->saveRENCLK_GATE_D2 = I915_READ(RENCLK_GATE_D2);
- } else if (IS_I9XX(dev_priv)) {
- dev_priv->saveD_STATE = I915_READ(D_STATE);
- } else if (IS_I85X(dev_priv) || IS_I865G(dev_priv)) {
- dev_priv->saveRENCLK_GATE_D1 = I915_READ(RENCLK_GATE_D1);
- } else if (IS_I830(dev_priv)) {
- dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D);
- }
-
- /* Cache mode state */
- dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
-
- /* Memory Arbitration state */
- dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
-
- /* Scratch space */
- for (i = 0; i < 16; i++) {
- dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
- dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
- }
- for (i = 0; i < 3; i++)
- dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
-
- return 0;
-}
-
-int
-i915_restore_state(struct inteldrm_softc *dev)
-{
- struct inteldrm_softc *dev_priv = dev;
- int i;
-
- pci_conf_write(dev_priv->pc, dev_priv->tag, LBB, dev_priv->saveLBB);
-
- /* Hardware status page */
- I915_WRITE(HWS_PGA, dev_priv->saveHWS);
-
- i915_restore_display(dev);
-
- /* Interrupt state */
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(DEIER, dev_priv->saveDEIER);
- I915_WRITE(DEIMR, dev_priv->saveDEIMR);
- I915_WRITE(GTIER, dev_priv->saveGTIER);
- I915_WRITE(GTIMR, dev_priv->saveGTIMR);
- I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
- I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
- I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->savePCH_PORT_HOTPLUG);
- } else {
- I915_WRITE (IER, dev_priv->saveIER);
- I915_WRITE (IMR, dev_priv->saveIMR);
- }
-
- /* Clock gating state */
- if (HAS_PCH_SPLIT(dev_priv)) {
- I915_WRITE(PCH_DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
- I915_WRITE(ILK_DSPCLK_GATE, dev_priv->saveDSPCLK_GATE);
- } if (IS_G4X(dev_priv)) {
- I915_WRITE(RENCLK_GATE_D1, dev_priv->saveRENCLK_GATE_D1);
- I915_WRITE(RENCLK_GATE_D2, dev_priv->saveRENCLK_GATE_D2);
- I915_WRITE(RAMCLK_GATE_D, dev_priv->saveRAMCLK_GATE_D);
- I915_WRITE(DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
- } else if (IS_I965GM(dev_priv)) {
- I915_WRITE(RENCLK_GATE_D1, dev_priv->saveRENCLK_GATE_D1);
- I915_WRITE(RENCLK_GATE_D2, dev_priv->saveRENCLK_GATE_D2);
- I915_WRITE(DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
- I915_WRITE(RAMCLK_GATE_D, dev_priv->saveRAMCLK_GATE_D);
- I915_WRITE16(DEUC, dev_priv->saveDEUC);
- } else if (IS_I965G(dev_priv)) {
- I915_WRITE(RENCLK_GATE_D1, dev_priv->saveRENCLK_GATE_D1);
- I915_WRITE(RENCLK_GATE_D2, dev_priv->saveRENCLK_GATE_D2);
- } else if (IS_I9XX(dev_priv)) {
- I915_WRITE(D_STATE, dev_priv->saveD_STATE);
- } else if (IS_I85X(dev_priv) || IS_I865G(dev_priv)) {
- I915_WRITE(RENCLK_GATE_D1, dev_priv->saveRENCLK_GATE_D1);
- } else if (IS_I830(dev_priv)) {
- I915_WRITE(DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
- }
-
- /* Cache mode state */
- I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
-
- /* Memory arbitration state */
- I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
-
- for (i = 0; i < 16; i++) {
- I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
- I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i]);
- }
- for (i = 0; i < 3; i++)
- I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
-
- return 0;
-}
diff --git a/sys/dev/pci/drm/radeon_drv.c b/sys/dev/pci/drm/radeon_drv.c
index 6a0bf3652c8..7a31210bd24 100644
--- a/sys/dev/pci/drm/radeon_drv.c
+++ b/sys/dev/pci/drm/radeon_drv.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: radeon_drv.c,v 1.62 2013/01/04 05:31:35 jsg Exp $ */
+/* $OpenBSD: radeon_drv.c,v 1.63 2013/03/18 12:36:51 jsg Exp $ */
/* radeon_drv.c -- ATI Radeon driver -*- linux-c -*-
* Created: Wed Feb 14 17:10:04 2001 by gareth@valinux.com
*/
@@ -686,7 +686,7 @@ const static struct drm_pcidev radeondrm_pciidlist[] = {
{0, 0, 0}
};
-static const struct drm_driver_info radeondrm_driver = {
+static struct drm_driver_info radeondrm_driver = {
.buf_priv_size = sizeof(drm_radeon_buf_priv_t),
.file_priv_size = sizeof(struct drm_radeon_file),
.firstopen = radeon_driver_firstopen,
@@ -802,6 +802,9 @@ radeondrm_attach(struct device *parent, struct device *self, void *aux)
TAILQ_INIT(&dev_priv->fb_heap);
dev_priv->drmdev = drm_attach_pci(&radeondrm_driver, pa, is_agp, self);
+
+ if (drm_vblank_init((struct drm_device *)dev_priv->drmdev, 2))
+ printf(": drm_vblank_init failed\n");
}
int
diff --git a/sys/dev/pci/drm/radeon_irq.c b/sys/dev/pci/drm/radeon_irq.c
index b8276837532..85c894d51c2 100644
--- a/sys/dev/pci/drm/radeon_irq.c
+++ b/sys/dev/pci/drm/radeon_irq.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: radeon_irq.c,v 1.27 2011/06/19 12:04:20 oga Exp $ */
+/* $OpenBSD: radeon_irq.c,v 1.28 2013/03/18 12:36:51 jsg Exp $ */
/* radeon_irq.c -- IRQ handling for radeon -*- linux-c -*- */
/*
* Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
@@ -349,7 +349,7 @@ radeon_driver_irq_install(struct drm_device * dev)
atomic_set(&dev_priv->swi_emitted, 0);
- dev->vblank->vb_max = 0x001fffff;
+ dev->max_vblank_count = 0x001fffff;
radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);