[PC-BSD Commits] r15683 - pcbsd/current/build-files/src-patches

svn at pcbsd.org svn at pcbsd.org
Mon Mar 5 07:41:43 PST 2012


Author: kris
Date: 2012-03-05 15:41:25 +0000 (Mon, 05 Mar 2012)
New Revision: 15683

Modified:
   pcbsd/current/build-files/src-patches/patch-intel-kms
Log:

Update the intel patch to 13.3



Modified: pcbsd/current/build-files/src-patches/patch-intel-kms
===================================================================
--- pcbsd/current/build-files/src-patches/patch-intel-kms	2012-03-05 10:14:48 UTC (rev 15682)
+++ pcbsd/current/build-files/src-patches/patch-intel-kms	2012-03-05 15:41:25 UTC (rev 15683)
@@ -1,546 +1,8 @@
-Index: sys/modules/drm/drm/Makefile
-===================================================================
-diff --git sys/modules/drm/drm/Makefile sys/modules/drm/drm/Makefile
---- sys/modules/drm/drm/Makefile	(revision 230124)
-+++ sys/modules/drm/drm/Makefile	(working copy)
-@@ -8,22 +8,32 @@
- 	drm_auth.c \
- 	drm_bufs.c \
- 	drm_context.c \
-+	drm_crtc.c \
-+	drm_crtc_helper.c \
- 	drm_dma.c \
-+	drm_dp_iic_helper.c \
- 	drm_drawable.c \
- 	drm_drv.c \
-+	drm_edid.c \
-+	drm_fb_helper.c \
- 	drm_fops.c \
-+	drm_gem.c \
-+	drm_gem_names.c \
- 	drm_hashtab.c \
- 	drm_ioctl.c \
- 	drm_irq.c \
-+	drm_linux_list_sort.c \
- 	drm_lock.c \
- 	drm_memory.c \
- 	drm_mm.c \
-+	drm_modes.c \
- 	drm_pci.c \
- 	drm_scatter.c \
- 	drm_sman.c \
-+	drm_stub.c \
- 	drm_sysctl.c \
- 	drm_vm.c
- 
--SRCS	+=device_if.h bus_if.h pci_if.h opt_drm.h
-+SRCS	+=device_if.h bus_if.h pci_if.h device_if.h iicbus_if.h opt_drm.h
- 
- .include <bsd.kmod.mk>
-Index: sys/modules/drm/i915/Makefile
-===================================================================
-diff --git sys/modules/drm/i915/Makefile sys/modules/drm/i915/Makefile
---- sys/modules/drm/i915/Makefile	(revision 230124)
-+++ sys/modules/drm/i915/Makefile	(working copy)
-@@ -2,7 +2,34 @@
- 
- .PATH:	${.CURDIR}/../../../dev/drm
- KMOD	= i915
--SRCS	= i915_dma.c i915_drv.c i915_irq.c i915_mem.c i915_suspend.c
--SRCS	+=device_if.h bus_if.h pci_if.h opt_drm.h
-+SRCS	= \
-+	i915_debug.c \
-+	i915_dma.c \
-+	i915_drv.c \
-+	i915_gem.c \
-+	i915_gem_execbuffer.c \
-+	i915_gem_evict.c \
-+	i915_gem_gtt.c \
-+	i915_gem_tiling.c \
-+	i915_irq.c \
-+	i915_mem.c \
-+	i915_suspend.c \
-+	intel_bios.c \
-+	intel_crt.c \
-+	intel_display.c \
-+	intel_dp.c \
-+	intel_fb.c \
-+	intel_hdmi.c \
-+	intel_iic.c \
-+	intel_lvds.c \
-+	intel_modes.c \
-+	intel_opregion.c \
-+	intel_overlay.c \
-+	intel_panel.c \
-+	intel_ringbuffer.c \
-+	intel_sdvo.c \
-+	intel_sprite.c \
-+	intel_tv.c
-+SRCS	+= device_if.h bus_if.h pci_if.h iicbus_if.h iicbb_if.h opt_drm.h
- 
- .include <bsd.kmod.mk>
-Index: sys/modules/agp/Makefile
-===================================================================
-diff --git sys/modules/agp/Makefile sys/modules/agp/Makefile
---- sys/modules/agp/Makefile	(revision 230124)
-+++ sys/modules/agp/Makefile	(working copy)
-@@ -33,4 +33,16 @@
- 		agp_unbind_memory	\
- 		agp_memory_info
- 
-+
-+.if ${MACHINE_CPUARCH} == "i386"  || ${MACHINE_CPUARCH} == "amd64"
-+EXPORT_SYMS+=	intel_gtt_clear_range	\
-+		intel_gtt_insert_pages	\
-+		intel_gtt_get		\
-+		intel_gtt_chipset_flush	\
-+		intel_gtt_unmap_memory	\
-+		intel_gtt_map_memory	\
-+		intel_gtt_insert_sg_entries \
-+		intel_gtt_get_bridge_device
-+.endif
-+
- .include <bsd.kmod.mk>
-Index: sys/vm/vm_pager.c
-===================================================================
-diff --git sys/vm/vm_pager.c sys/vm/vm_pager.c
---- sys/vm/vm_pager.c	(revision 230124)
-+++ sys/vm/vm_pager.c	(working copy)
-@@ -159,7 +159,8 @@
- 	&devicepagerops,	/* OBJT_DEVICE */
- 	&physpagerops,		/* OBJT_PHYS */
- 	&deadpagerops,		/* OBJT_DEAD */
--	&sgpagerops		/* OBJT_SG */
-+	&sgpagerops,		/* OBJT_SG */
-+	&mgtdevicepagerops,	/* OBJT_MGTDEVICE */
- };
- 
- static const int npagers = sizeof(pagertab) / sizeof(pagertab[0]);
-Index: sys/vm/vm_pager.h
-===================================================================
-diff --git sys/vm/vm_pager.h sys/vm/vm_pager.h
---- sys/vm/vm_pager.h	(revision 230124)
-+++ sys/vm/vm_pager.h	(working copy)
-@@ -71,6 +71,7 @@
- extern struct pagerops devicepagerops;
- extern struct pagerops physpagerops;
- extern struct pagerops sgpagerops;
-+extern struct pagerops mgtdevicepagerops;
- 
- /*
-  * get/put return values
-Index: sys/vm/vm_pageout.c
-===================================================================
-diff --git sys/vm/vm_pageout.c sys/vm/vm_pageout.c
---- sys/vm/vm_pageout.c	(revision 230124)
-+++ sys/vm/vm_pageout.c	(working copy)
-@@ -800,6 +800,11 @@
- 		if (m->flags & PG_MARKER)
- 			continue;
- 
-+		KASSERT((m->flags & PG_FICTITIOUS) == 0,
-+		    ("Fictitious page %p cannot be in inactive queue", m));
-+		KASSERT((m->oflags & VPO_UNMANAGED) == 0,
-+		    ("Unmanaged page %p cannot be in inactive queue", m));
-+
- 		/*
- 		 * Lock the page.
- 		 */
-@@ -1138,6 +1143,10 @@
- 			m = next;
- 			continue;
- 		}
-+		KASSERT((m->flags & PG_FICTITIOUS) == 0,
-+		    ("Fictitious page %p cannot be in active queue", m));
-+		KASSERT((m->oflags & VPO_UNMANAGED) == 0,
-+		    ("Unmanaged page %p cannot be in active queue", m));
- 		if (!vm_pageout_page_lock(m, &next)) {
- 			vm_page_unlock(m);
- 			m = next;
-Index: sys/vm/vm_phys.c
-===================================================================
-diff --git sys/vm/vm_phys.c sys/vm/vm_phys.c
---- sys/vm/vm_phys.c	(revision 230124)
-+++ sys/vm/vm_phys.c	(working copy)
-@@ -83,6 +83,15 @@
- 
- static int vm_phys_nsegs;
- 
-+#define VM_PHYS_FICTITIOUS_NSEGS	8
-+static struct vm_phys_fictitious_seg {
-+	vm_paddr_t	start;
-+	vm_paddr_t	end;
-+	vm_page_t	first_page;
-+} vm_phys_fictitious_segs[VM_PHYS_FICTITIOUS_NSEGS];
-+static struct mtx vm_phys_fictitious_reg_mtx;
-+MALLOC_DEFINE(M_FICT_PAGES, "", "");
-+
- static struct vm_freelist
-     vm_phys_free_queues[VM_RAW_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER];
- static struct vm_freelist
-@@ -362,6 +371,8 @@
- 	for (flind = 0; flind < vm_nfreelists; flind++)
- 		vm_phys_lookup_lists[0][flind] = &vm_phys_free_queues[flind];
- #endif
-+
-+	mtx_init(&vm_phys_fictitious_reg_mtx, "vmfctr", NULL, MTX_DEF);
- }
- 
- /*
-@@ -526,6 +537,110 @@
- 	return (NULL);
- }
- 
-+vm_page_t
-+vm_phys_fictitious_to_vm_page(vm_paddr_t pa)
-+{
-+	struct vm_phys_fictitious_seg *seg;
-+	vm_page_t m;
-+	int segind;
-+
-+	m = NULL;
-+	for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
-+		seg = &vm_phys_fictitious_segs[segind];
-+		if (pa >= seg->start && pa < seg->end) {
-+			m = &seg->first_page[atop(pa - seg->start)];
-+			KASSERT((m->flags & PG_FICTITIOUS) != 0,
-+			    ("%p not fictitious", m));
-+			break;
-+		}
-+	}
-+	return (m);
-+}
-+
-+int
-+vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
-+    vm_memattr_t memattr)
-+{
-+	struct vm_phys_fictitious_seg *seg;
-+	vm_page_t fp;
-+	long i, page_count;
-+	int segind;
-+#ifdef VM_PHYSSEG_DENSE
-+	long pi;
-+	boolean_t malloced;
-+#endif
-+
-+	page_count = (end - start) / PAGE_SIZE;
-+
-+#ifdef VM_PHYSSEG_DENSE
-+	pi = atop(start);
-+	if (pi >= first_page && atop(end) < vm_page_array_size) {
-+		fp = &vm_page_array[pi - first_page];
-+		malloced = FALSE;
-+	} else
-+#endif
-+	{
-+		fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES,
-+		    M_WAITOK | M_ZERO);
-+		malloced = TRUE;
-+	}
-+	for (i = 0; i < page_count; i++) {
-+		vm_page_initfake(&fp[i], start + PAGE_SIZE * i, memattr);
-+		pmap_page_init(&fp[i]);
-+		fp[i].oflags &= ~(VPO_BUSY | VPO_UNMANAGED);
-+	}
-+	mtx_lock(&vm_phys_fictitious_reg_mtx);
-+	for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
-+		seg = &vm_phys_fictitious_segs[segind];
-+		if (seg->start == 0 && seg->end == 0) {
-+			seg->start = start;
-+			seg->end = end;
-+			seg->first_page = fp;
-+			mtx_unlock(&vm_phys_fictitious_reg_mtx);
-+			return (0);
-+		}
-+	}
-+	mtx_unlock(&vm_phys_fictitious_reg_mtx);
-+#ifdef VM_PHYSSEG_DENSE
-+	if (malloced)
-+#endif
-+		free(fp, M_FICT_PAGES);
-+	return (EBUSY);
-+}
-+
-+void
-+vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end)
-+{
-+	struct vm_phys_fictitious_seg *seg;
-+	vm_page_t fp;
-+	int segind;
-+#ifdef VM_PHYSSEG_DENSE
-+	long pi;
-+#endif
-+
-+#ifdef VM_PHYSSEG_DENSE
-+	pi = atop(start);
-+#endif
-+
-+	mtx_lock(&vm_phys_fictitious_reg_mtx);
-+	for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
-+		seg = &vm_phys_fictitious_segs[segind];
-+		if (seg->start == start && seg->end == end) {
-+			seg->start = seg->end = 0;
-+			fp = seg->first_page;
-+			seg->first_page = NULL;
-+			mtx_unlock(&vm_phys_fictitious_reg_mtx);
-+#ifdef VM_PHYSSEG_DENSE
-+			if (pi < first_page || atop(end) >= vm_page_array_size)
-+#endif
-+				free(fp, M_FICT_PAGES);
-+			return;
-+		}
-+	}
-+	mtx_unlock(&vm_phys_fictitious_reg_mtx);
-+	KASSERT(0, ("Unregistering not registered fictitious range"));
-+}
-+
- /*
-  * Find the segment containing the given physical address.
-  */
-Index: sys/vm/vm_phys.h
-===================================================================
-diff --git sys/vm/vm_phys.h sys/vm/vm_phys.h
---- sys/vm/vm_phys.h	(revision 230124)
-+++ sys/vm/vm_phys.h	(working copy)
-@@ -55,6 +55,10 @@
-     unsigned long alignment, unsigned long boundary);
- vm_page_t vm_phys_alloc_freelist_pages(int flind, int pool, int order);
- vm_page_t vm_phys_alloc_pages(int pool, int order);
-+int vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
-+    vm_memattr_t memattr);
-+void vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end);
-+vm_page_t vm_phys_fictitious_to_vm_page(vm_paddr_t pa);
- vm_paddr_t vm_phys_bootstrap_alloc(vm_size_t size, unsigned long alignment);
- void vm_phys_free_pages(vm_page_t m, int order);
- void vm_phys_init(void);
-Index: sys/vm/vm_object.c
-===================================================================
-diff --git sys/vm/vm_object.c sys/vm/vm_object.c
---- sys/vm/vm_object.c	(revision 230124)
-+++ sys/vm/vm_object.c	(working copy)
-@@ -1852,8 +1852,10 @@
- 		}
- 		if (vm_page_sleep_if_busy(p, TRUE, "vmopar"))
- 			goto again;
--		KASSERT((p->flags & PG_FICTITIOUS) == 0,
--		    ("vm_object_page_remove: page %p is fictitious", p));
-+		KASSERT((p->flags & PG_FICTITIOUS) == 0 ||
-+		    (p->oflags & VPO_UNMANAGED) == 0,
-+		    ("vm_object_page_remove: page %p is fictitious unmanaged",
-+		    p));
- 		if ((options & OBJPR_CLEANONLY) != 0 && p->valid != 0) {
- 			if ((options & OBJPR_NOTMAPPED) == 0)
- 				pmap_remove_write(p);
-Index: sys/vm/device_pager.c
-===================================================================
-diff --git sys/vm/device_pager.c sys/vm/device_pager.c
---- sys/vm/device_pager.c	(revision 230124)
-+++ sys/vm/device_pager.c	(working copy)
-@@ -76,6 +76,14 @@
- 	.pgo_haspage =	dev_pager_haspage,
- };
- 
-+struct pagerops mgtdevicepagerops = {
-+	.pgo_alloc =	dev_pager_alloc,
-+	.pgo_dealloc =	dev_pager_dealloc,
-+	.pgo_getpages =	dev_pager_getpages,
-+	.pgo_putpages =	dev_pager_putpages,
-+	.pgo_haspage =	dev_pager_haspage,
-+};
-+
- static int old_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
-     vm_ooffset_t foff, struct ucred *cred, u_short *color);
- static void old_dev_pager_dtor(void *handle);
-@@ -115,7 +123,7 @@
- 	vm_pindex_t pindex;
- 	u_short color;
- 
--	if (tp != OBJT_DEVICE)
-+	if (tp != OBJT_DEVICE && tp != OBJT_MGTDEVICE)
- 		return (NULL);
- 
- 	/*
-@@ -240,6 +248,11 @@
- 	}
- 
- 	if (error == VM_PAGER_OK) {
-+		KASSERT((object->type == OBJT_DEVICE &&
-+		     (ma[reqpage]->oflags & VPO_UNMANAGED) != 0) ||
-+		    (object->type == OBJT_MGTDEVICE &&
-+		     (ma[reqpage]->oflags & VPO_UNMANAGED) == 0),
-+		    ("Wrong page type %p %p", ma[reqpage], object));
- 		TAILQ_INSERT_TAIL(&object->un_pager.devp.devp_pglist,
- 		    ma[reqpage], pageq);
- 	}
-Index: sys/vm/vm_fault.c
-===================================================================
-diff --git sys/vm/vm_fault.c sys/vm/vm_fault.c
---- sys/vm/vm_fault.c	(revision 230124)
-+++ sys/vm/vm_fault.c	(working copy)
-@@ -1035,7 +1035,8 @@
- 			break;
- 		}
- 		if (m->valid == VM_PAGE_BITS_ALL &&
--		    (m->flags & PG_FICTITIOUS) == 0)
-+		    (m->flags & PG_FICTITIOUS) == 0 &&
-+		    (m->oflags & VPO_UNMANAGED) == 0)
- 			pmap_enter_quick(pmap, addr, m, entry->protection);
- 		VM_OBJECT_UNLOCK(lobject);
- 	}
-Index: sys/vm/vm_page.c
-===================================================================
-diff --git sys/vm/vm_page.c sys/vm/vm_page.c
---- sys/vm/vm_page.c	(revision 230124)
-+++ sys/vm/vm_page.c	(working copy)
-@@ -122,7 +122,7 @@
- struct vpglocks	pa_lock[PA_LOCK_COUNT];
- 
- vm_page_t vm_page_array = 0;
--int vm_page_array_size = 0;
-+long vm_page_array_size = 0;
- long first_page = 0;
- int vm_page_zero_count = 0;
- 
-@@ -632,6 +632,30 @@
- 		mtx_unlock(mtx);
- }
- 
-+vm_page_t
-+PHYS_TO_VM_PAGE(vm_paddr_t pa)
-+{
-+	vm_page_t m;
-+
-+#ifdef VM_PHYSSEG_SPARSE
-+	m = vm_phys_paddr_to_vm_page(pa);
-+	if (m == NULL)
-+		m = vm_phys_fictitious_to_vm_page(pa);
-+	return (m);
-+#elif defined(VM_PHYSSEG_DENSE)
-+	long pi;
-+
-+	pi = atop(pa);
-+	if (pi >= first_page && pi < vm_page_array_size) {
-+		m = &vm_page_array[pi - first_page];
-+		return (m);
-+	}
-+	return (vm_phys_fictitious_to_vm_page(pa));
-+#else
-+#error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
-+#endif
-+}
-+
- /*
-  *	vm_page_getfake:
-  *
-@@ -645,6 +669,17 @@
- 	vm_page_t m;
- 
- 	m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO);
-+	vm_page_initfake(m, paddr, memattr);
-+	return (m);
-+}
-+
-+void
-+vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
-+{
-+
-+	if ((m->flags & PG_FICTITIOUS) != 0)
-+		return;
-+
- 	m->phys_addr = paddr;
- 	m->queue = PQ_NONE;
- 	/* Fictitious pages don't use "segind". */
-@@ -653,7 +688,6 @@
- 	m->oflags = VPO_BUSY | VPO_UNMANAGED;
- 	m->wire_count = 1;
- 	pmap_page_set_memattr(m, memattr);
--	return (m);
- }
- 
- /*
-@@ -665,9 +699,14 @@
- vm_page_putfake(vm_page_t m)
- {
- 
--	KASSERT((m->flags & PG_FICTITIOUS) != 0,
--	    ("vm_page_putfake: bad page %p", m));
--	uma_zfree(fakepg_zone, m);
-+	if ((m->oflags & VPO_UNMANAGED) == 0) {
-+		pmap_remove_all(m);
-+		vm_page_lock(m);
-+		vm_page_remove(m);
-+		vm_page_unlock(m);
-+	} else {
-+		uma_zfree(fakepg_zone, m);
-+	}
- }
- 
- /*
-Index: sys/vm/vm_page.h
-===================================================================
-diff --git sys/vm/vm_page.h sys/vm/vm_page.h
---- sys/vm/vm_page.h	(revision 230124)
-+++ sys/vm/vm_page.h	(working copy)
-@@ -295,7 +295,7 @@
- extern int vm_page_zero_count;
- 
- extern vm_page_t vm_page_array;		/* First resident page in table */
--extern int vm_page_array_size;		/* number of vm_page_t's */
-+extern long vm_page_array_size;		/* number of vm_page_t's */
- extern long first_page;			/* first physical page number */
- 
- #define	VM_PAGE_IS_FREE(m)	(((m)->flags & PG_FREE) != 0)
-@@ -304,20 +304,8 @@
- 
- vm_page_t vm_phys_paddr_to_vm_page(vm_paddr_t pa);
- 
--static __inline vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
-+vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
- 
--static __inline vm_page_t
--PHYS_TO_VM_PAGE(vm_paddr_t pa)
--{
--#ifdef VM_PHYSSEG_SPARSE
--	return (vm_phys_paddr_to_vm_page(pa));
--#elif defined(VM_PHYSSEG_DENSE)
--	return (&vm_page_array[atop(pa) - first_page]);
--#else
--#error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
--#endif
--}
--
- extern struct vpglocks vm_page_queue_lock;
- 
- #define	vm_page_queue_mtx	vm_page_queue_lock.data
-@@ -372,6 +360,7 @@
- void vm_page_deactivate (vm_page_t);
- vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t);
- vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr);
-+void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
- void vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t);
- vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t);
- vm_page_t vm_page_next(vm_page_t m);
-Index: sys/vm/vm.h
-===================================================================
-diff --git sys/vm/vm.h sys/vm/vm.h
---- sys/vm/vm.h	(revision 230124)
-+++ sys/vm/vm.h	(working copy)
-@@ -83,7 +83,7 @@
- #define	VM_PROT_DEFAULT		VM_PROT_ALL
- 
- enum obj_type { OBJT_DEFAULT, OBJT_SWAP, OBJT_VNODE, OBJT_DEVICE, OBJT_PHYS,
--		OBJT_DEAD, OBJT_SG };
-+		OBJT_DEAD, OBJT_SG, OBJT_MGTDEVICE };
- typedef u_char objtype_t;
- 
- union vm_map_object;
-Index: sys/i386/i386/pmap.c
-===================================================================
-diff --git sys/i386/i386/pmap.c sys/i386/i386/pmap.c
---- sys/i386/i386/pmap.c	(revision 230124)
-+++ sys/i386/i386/pmap.c	(working copy)
-@@ -2220,7 +2220,8 @@
+diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
+index 5ab11fd..8171ee4 100644
+--- sys/amd64/amd64/pmap.c
++++ sys/amd64/amd64/pmap.c
+@@ -2137,7 +2137,8 @@ pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq)
  				PMAP_UNLOCK(pmap);
  		}
  		if (TAILQ_EMPTY(&m->md.pv_list) &&
@@ -549,196 +11,8 @@
 +		     TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list)))
  			vm_page_aflag_clear(m, PGA_WRITEABLE);
  	}
- 	sched_unpin();
-@@ -2454,15 +2455,12 @@
- static void
- pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
- {
--	struct md_page *pvh;
- 
- 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
- 	pmap_pvh_free(&m->md, pmap, va);
--	if (TAILQ_EMPTY(&m->md.pv_list)) {
--		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
--		if (TAILQ_EMPTY(&pvh->pv_list))
--			vm_page_aflag_clear(m, PGA_WRITEABLE);
--	}
-+	if (TAILQ_EMPTY(&m->md.pv_list) && ((m->flags & PG_FICTITIOUS) != 0 ||
-+	    TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list)))
-+		vm_page_aflag_clear(m, PGA_WRITEABLE);
  }
- 
- /*
-@@ -2932,6 +2930,8 @@
- 	free = NULL;
- 	vm_page_lock_queues();
- 	sched_pin();
-+	if ((m->flags & PG_FICTITIOUS) != 0)
-+		goto small_mappings;
- 	pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
- 	while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
- 		va = pv->pv_va;
-@@ -2941,6 +2941,7 @@
- 		(void)pmap_demote_pde(pmap, pde, va);
- 		PMAP_UNLOCK(pmap);
- 	}
-+small_mappings:
- 	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
- 		pmap = PV_PMAP(pv);
- 		PMAP_LOCK(pmap);
-@@ -3456,7 +3457,8 @@
- 			}
- 			if ((origpte & PG_MANAGED) != 0 &&
- 			    TAILQ_EMPTY(&om->md.pv_list) &&
--			    TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list))
-+			    ((om->flags & PG_FICTITIOUS) != 0 ||
-+			     TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
- 				vm_page_aflag_clear(om, PGA_WRITEABLE);
- 			if (invlva)
- 				pmap_invalidate_page(pmap, va);
-@@ -3469,7 +3471,8 @@
- 	 * populated, then attempt promotion.
- 	 */
- 	if ((mpte == NULL || mpte->wire_count == NPTEPG) &&
--	    pg_ps_enabled && vm_reserv_level_iffullpop(m) == 0)
-+	    pg_ps_enabled && vm_reserv_level_iffullpop(m) == 0 &&
-+	    (m->flags & PG_FICTITIOUS) == 0)
- 		pmap_promote_pde(pmap, pde, va);
- 
- 	sched_unpin();
-@@ -4109,7 +4112,7 @@
- 		if (loops >= 16)
- 			break;
- 	}
--	if (!rv && loops < 16) {
-+	if (!rv && (m->flags & PG_FICTITIOUS) == 0 && loops < 16) {
- 		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
- 		TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
- 			if (PV_PMAP(pv) == pmap) {
-@@ -4185,7 +4188,8 @@
- 		return (FALSE);
- 	vm_page_lock_queues();
- 	rv = !TAILQ_EMPTY(&m->md.pv_list) ||
--	    !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list);
-+	    ((m->flags & PG_FICTITIOUS) == 0 &&
-+	     !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
- 	vm_page_unlock_queues();
- 	return (rv);
- }
-@@ -4258,7 +4262,8 @@
- 				    m, (uintmax_t)m->phys_addr,
- 				    (uintmax_t)tpte));
- 
--				KASSERT(m < &vm_page_array[vm_page_array_size],
-+				KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
-+					m < &vm_page_array[vm_page_array_size],
- 					("pmap_remove_pages: bad tpte %#jx",
- 					(uintmax_t)tpte));
- 
-@@ -4355,7 +4360,8 @@
- 		return (FALSE);
- 	vm_page_lock_queues();
- 	rv = pmap_is_modified_pvh(&m->md) ||
--	    pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)));
-+	    ((m->flags & PG_FICTITIOUS) == 0 &&
-+	     pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
- 	vm_page_unlock_queues();
- 	return (rv);
- }
-@@ -4428,7 +4434,8 @@
- 	    ("pmap_is_referenced: page %p is not managed", m));
- 	vm_page_lock_queues();
- 	rv = pmap_is_referenced_pvh(&m->md) ||
--	    pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)));
-+	    ((m->flags & PG_FICTITIOUS) == 0 &&
-+	     pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
- 	vm_page_unlock_queues();
- 	return (rv);
- }
-@@ -4488,6 +4495,8 @@
- 		return;
- 	vm_page_lock_queues();
- 	sched_pin();
-+	if ((m->flags & PG_FICTITIOUS) != 0)
-+		goto small_mappings;
- 	pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
- 	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
- 		va = pv->pv_va;
-@@ -4498,6 +4507,7 @@
- 			(void)pmap_demote_pde(pmap, pde, va);
- 		PMAP_UNLOCK(pmap);
- 	}
-+small_mappings:
- 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
- 		pmap = PV_PMAP(pv);
- 		PMAP_LOCK(pmap);
-@@ -4555,6 +4565,8 @@
- 	pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
- 	vm_page_lock_queues();
- 	sched_pin();
-+	if ((m->flags & PG_FICTITIOUS) != 0)
-+		goto small_mappings;
- 	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, pvn) {
- 		va = pv->pv_va;
- 		pmap = PV_PMAP(pv);
-@@ -4585,6 +4597,7 @@
- 		}
- 		PMAP_UNLOCK(pmap);
- 	}
-+small_mappings:
- 	if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
- 		pvf = pv;
- 		do {
-@@ -4641,6 +4654,8 @@
- 		return;
- 	vm_page_lock_queues();
- 	sched_pin();
-+	if ((m->flags & PG_FICTITIOUS) != 0)
-+		goto small_mappings;
- 	pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
- 	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
- 		va = pv->pv_va;
-@@ -4678,6 +4693,7 @@
- 		}
- 		PMAP_UNLOCK(pmap);
- 	}
-+small_mappings:
- 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
- 		pmap = PV_PMAP(pv);
- 		PMAP_LOCK(pmap);
-@@ -4719,6 +4735,8 @@
- 	    ("pmap_clear_reference: page %p is not managed", m));
- 	vm_page_lock_queues();
- 	sched_pin();
-+	if ((m->flags & PG_FICTITIOUS) != 0)
-+		goto small_mappings;
- 	pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
- 	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
- 		va = pv->pv_va;
-@@ -4742,6 +4760,7 @@
- 		}
- 		PMAP_UNLOCK(pmap);
- 	}
-+small_mappings:
- 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
- 		pmap = PV_PMAP(pv);
- 		PMAP_LOCK(pmap);
-Index: sys/amd64/amd64/pmap.c
-===================================================================
-diff --git sys/amd64/amd64/pmap.c sys/amd64/amd64/pmap.c
---- sys/amd64/amd64/pmap.c	(revision 230124)
-+++ sys/amd64/amd64/pmap.c	(working copy)
-@@ -2136,7 +2136,8 @@
- 				PMAP_UNLOCK(pmap);
- 		}
- 		if (TAILQ_EMPTY(&m->md.pv_list) &&
--		    TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list))
-+		    ((m->flags & PG_FICTITIOUS) != 0 ||
-+		     TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list)))
- 			vm_page_aflag_clear(m, PGA_WRITEABLE);
- 	}
- }
-@@ -2384,15 +2385,12 @@
+@@ -2385,15 +2386,12 @@ pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
  static void
  pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
  {
@@ -757,7 +31,7 @@
  }
  
  /*
-@@ -2851,6 +2849,8 @@
+@@ -2852,6 +2850,8 @@ pmap_remove_all(vm_page_t m)
  	    ("pmap_remove_all: page %p is not managed", m));
  	free = NULL;
  	vm_page_lock_queues();
@@ -766,7 +40,7 @@
  	pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
  	while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
  		pmap = PV_PMAP(pv);
-@@ -2860,6 +2860,7 @@
+@@ -2861,6 +2861,7 @@ pmap_remove_all(vm_page_t m)
  		(void)pmap_demote_pde(pmap, pde, va);
  		PMAP_UNLOCK(pmap);
  	}
@@ -774,7 +48,7 @@
  	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
  		pmap = PV_PMAP(pv);
  		PMAP_LOCK(pmap);
-@@ -3338,7 +3339,8 @@
+@@ -3339,7 +3340,8 @@ validate:
  			}
  			if ((origpte & PG_MANAGED) != 0 &&
  			    TAILQ_EMPTY(&om->md.pv_list) &&
@@ -784,7 +58,7 @@
  				vm_page_aflag_clear(om, PGA_WRITEABLE);
  			if (invlva)
  				pmap_invalidate_page(pmap, va);
-@@ -3351,7 +3353,8 @@
+@@ -3352,7 +3354,8 @@ validate:
  	 * populated, then attempt promotion.
  	 */
  	if ((mpte == NULL || mpte->wire_count == NPTEPG) &&
@@ -794,7 +68,7 @@
  		pmap_promote_pde(pmap, pde, va);
  
  	vm_page_unlock_queues();
-@@ -3971,7 +3974,7 @@
+@@ -3972,7 +3975,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
  		if (loops >= 16)
  			break;
  	}
@@ -803,7 +77,7 @@
  		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
  		TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
  			if (PV_PMAP(pv) == pmap) {
-@@ -4045,7 +4048,8 @@
+@@ -4046,7 +4049,8 @@ pmap_page_is_mapped(vm_page_t m)
  		return (FALSE);
  	vm_page_lock_queues();
  	rv = !TAILQ_EMPTY(&m->md.pv_list) ||
@@ -813,7 +87,7 @@
  	vm_page_unlock_queues();
  	return (rv);
  }
-@@ -4118,7 +4122,8 @@
+@@ -4119,7 +4123,8 @@ pmap_remove_pages(pmap_t pmap)
  				    m, (uintmax_t)m->phys_addr,
  				    (uintmax_t)tpte));
  
@@ -823,7 +97,7 @@
  					("pmap_remove_pages: bad tpte %#jx",
  					(uintmax_t)tpte));
  
-@@ -4213,7 +4218,8 @@
+@@ -4214,7 +4219,8 @@ pmap_is_modified(vm_page_t m)
  		return (FALSE);
  	vm_page_lock_queues();
  	rv = pmap_is_modified_pvh(&m->md) ||
@@ -833,7 +107,7 @@
  	vm_page_unlock_queues();
  	return (rv);
  }
-@@ -4284,7 +4290,8 @@
+@@ -4285,7 +4291,8 @@ pmap_is_referenced(vm_page_t m)
  	    ("pmap_is_referenced: page %p is not managed", m));
  	vm_page_lock_queues();
  	rv = pmap_is_referenced_pvh(&m->md) ||
@@ -843,7 +117,7 @@
  	vm_page_unlock_queues();
  	return (rv);
  }
-@@ -4341,6 +4348,8 @@
+@@ -4342,6 +4349,8 @@ pmap_remove_write(vm_page_t m)
  	    (m->aflags & PGA_WRITEABLE) == 0)
  		return;
  	vm_page_lock_queues();
@@ -852,7 +126,7 @@
  	pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
  	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
  		pmap = PV_PMAP(pv);
-@@ -4351,6 +4360,7 @@
+@@ -4352,6 +4361,7 @@ pmap_remove_write(vm_page_t m)
  			(void)pmap_demote_pde(pmap, pde, va);
  		PMAP_UNLOCK(pmap);
  	}
@@ -860,19 +134,19 @@
  	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
  		pmap = PV_PMAP(pv);
  		PMAP_LOCK(pmap);
-@@ -4399,8 +4409,10 @@
+@@ -4400,8 +4410,10 @@ pmap_ts_referenced(vm_page_t m)
  
  	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
  	    ("pmap_ts_referenced: page %p is not managed", m));
-+	vm_page_lock_queues();
+-	pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
+ 	vm_page_lock_queues();
 +	if ((m->flags & PG_FICTITIOUS) != 0)
 +		goto small_mappings;
- 	pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
--	vm_page_lock_queues();
++	pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
  	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, pvn) {
  		pmap = PV_PMAP(pv);
  		PMAP_LOCK(pmap);
-@@ -4431,6 +4443,7 @@
+@@ -4432,6 +4444,7 @@ pmap_ts_referenced(vm_page_t m)
  		}
  		PMAP_UNLOCK(pmap);
  	}
@@ -880,7 +154,7 @@
  	if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
  		pvf = pv;
  		do {
-@@ -4485,6 +4498,8 @@
+@@ -4486,6 +4499,8 @@ pmap_clear_modify(vm_page_t m)
  	if ((m->aflags & PGA_WRITEABLE) == 0)
  		return;
  	vm_page_lock_queues();
@@ -889,7 +163,7 @@
  	pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
  	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
  		pmap = PV_PMAP(pv);
-@@ -4517,6 +4532,7 @@
+@@ -4518,6 +4533,7 @@ pmap_clear_modify(vm_page_t m)
  		}
  		PMAP_UNLOCK(pmap);
  	}
@@ -897,7 +171,7 @@
  	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
  		pmap = PV_PMAP(pv);
  		PMAP_LOCK(pmap);
-@@ -4551,6 +4567,8 @@
+@@ -4552,6 +4568,8 @@ pmap_clear_reference(vm_page_t m)
  	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
  	    ("pmap_clear_reference: page %p is not managed", m));
  	vm_page_lock_queues();
@@ -906,7 +180,7 @@
  	pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
  	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
  		pmap = PV_PMAP(pv);
-@@ -4574,6 +4592,7 @@
+@@ -4575,6 +4593,7 @@ pmap_clear_reference(vm_page_t m)
  		}
  		PMAP_UNLOCK(pmap);
  	}
@@ -914,52 +188,47 @@
  	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
  		pmap = PV_PMAP(pv);
  		PMAP_LOCK(pmap);
-Index: sys/sys/agpio.h
-===================================================================
-diff --git sys/sys/agpio.h sys/sys/agpio.h
---- sys/sys/agpio.h	(revision 230124)
-+++ sys/sys/agpio.h	(working copy)
-@@ -88,6 +88,7 @@
- #define AGPIOC_DEALLOCATE _IOW (AGPIOC_BASE, 7, int)
- #define AGPIOC_BIND       _IOW (AGPIOC_BASE, 8, agp_bind)
- #define AGPIOC_UNBIND     _IOW (AGPIOC_BASE, 9, agp_unbind)
-+#define AGPIOC_CHIPSET_FLUSH _IO (AGPIOC_BASE, 10)
+diff --git a/sys/dev/agp/agp.c b/sys/dev/agp/agp.c
+index 6e5aa35..ccd7f51 100644
+--- sys/dev/agp/agp.c
++++ sys/dev/agp/agp.c
+@@ -239,7 +239,8 @@ agp_generic_attach(device_t dev)
+ 		if (memsize <= agp_max[i][0])
+ 			break;
+ 	}
+-	if (i == agp_max_size) i = agp_max_size - 1;
++	if (i == agp_max_size)
++		i = agp_max_size - 1;
+ 	sc->as_maxmem = agp_max[i][1] << 20U;
  
- typedef struct _agp_version {
- 	u_int16_t major;
-Index: sys/dev/agp/agp_if.m
-===================================================================
-diff --git sys/dev/agp/agp_if.m sys/dev/agp/agp_if.m
---- sys/dev/agp/agp_if.m	(revision 230124)
-+++ sys/dev/agp/agp_if.m	(working copy)
-@@ -36,6 +36,14 @@
- #
- INTERFACE agp;
+ 	/*
+@@ -803,6 +804,13 @@ agp_unbind_user(device_t dev, agp_unbind *unbind)
+ }
  
-+CODE {
-+	static int
-+	null_agp_chipset_flush(device_t dev)
-+	{
-+		return (ENXIO);
-+	}
-+};
+ static int
++agp_chipset_flush(device_t dev)
++{
 +
- #
- # Return the current aperture size.
- #
-@@ -132,3 +140,7 @@
- 	device_t	dev;
- 	struct agp_memory *handle;
- };
++	return (AGP_CHIPSET_FLUSH(dev));
++}
 +
-+METHOD int chipset_flush {
-+	device_t	dev;
-+} DEFAULT null_agp_chipset_flush;
-Index: sys/dev/agp/agp_i810.c
-===================================================================
-diff --git sys/dev/agp/agp_i810.c sys/dev/agp/agp_i810.c
---- sys/dev/agp/agp_i810.c	(revision 230124)
-+++ sys/dev/agp/agp_i810.c	(working copy)
++static int
+ agp_open(struct cdev *kdev, int oflags, int devtype, struct thread *td)
+ {
+ 	device_t dev = kdev->si_drv1;
+@@ -869,6 +877,8 @@ agp_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int fflag, struct thread
+ 	case AGPIOC_UNBIND:
+ 		return agp_unbind_user(dev, (agp_unbind *)data);
+ 
++	case AGPIOC_CHIPSET_FLUSH:
++		return agp_chipset_flush(dev);
+ 	}
+ 
+ 	return EINVAL;
+diff --git a/sys/dev/agp/agp_i810.c b/sys/dev/agp/agp_i810.c
+index ff3ad1c..ec397b7 100644
+--- sys/dev/agp/agp_i810.c
++++ sys/dev/agp/agp_i810.c
 @@ -1,8 +1,12 @@
  /*-
   * Copyright (c) 2000 Doug Rabson
@@ -983,7 +252,7 @@
   */
  
  #include <sys/cdefs.h>
-@@ -35,10 +42,17 @@
+@@ -35,10 +42,17 @@ __FBSDID("$FreeBSD$");
  
  #include "opt_bus.h"
  
@@ -1001,7 +270,7 @@
  #include <sys/module.h>
  #include <sys/bus.h>
  #include <sys/lock.h>
-@@ -47,8 +61,10 @@
+@@ -47,8 +61,10 @@ __FBSDID("$FreeBSD$");
  
  #include <dev/agp/agppriv.h>
  #include <dev/agp/agpreg.h>
@@ -1012,7 +281,7 @@
  
  #include <vm/vm.h>
  #include <vm/vm_object.h>
-@@ -63,6 +79,82 @@
+@@ -63,6 +79,82 @@ __FBSDID("$FreeBSD$");
  
  MALLOC_DECLARE(M_AGP);
  
@@ -1095,7 +364,7 @@
  enum {
  	CHIP_I810,	/* i810/i815 */
  	CHIP_I830,	/* 830M/845G */
-@@ -72,6 +164,7 @@
+@@ -72,6 +164,7 @@ enum {
  	CHIP_G33,	/* G33/Q33/Q35 */
  	CHIP_IGD,	/* Pineview */
  	CHIP_G4X,	/* G45/Q45 */
@@ -1103,7 +372,7 @@
  };
  
  /* The i810 through i855 have the registers at BAR 1, and the GATT gets
-@@ -96,21 +189,298 @@
+@@ -96,19 +189,296 @@ static struct resource_spec agp_i965_res_spec[] = {
  	{ -1, 0 }
  };
  
@@ -1136,8 +405,8 @@
 +	struct resource *sc_flush_page_res;
 +	void *sc_flush_page_vaddr;
 +	int sc_bios_allocated_flush_page;
- };
- 
++};
++
 +static device_t intel_agp;
 +
 +struct agp_i810_driver {
@@ -1402,12 +671,10 @@
 +	.chipset_flush_setup = agp_i810_chipset_flush_setup,
 +	.chipset_flush_teardown = agp_i810_chipset_flush_teardown,
 +	.chipset_flush = agp_i810_chipset_flush,
-+};
-+
+ };
+ 
  /* For adding new devices, devid is the id of the graphics controller
-  * (pci:0:2:0, for example).  The placeholder (usually at pci:0:2:1) for the
-  * second head should never be added.  The bridge_offset is the offset to
-@@ -118,75 +488,232 @@
+@@ -118,75 +488,232 @@ struct agp_i810_softc {
   */
  static const struct agp_i810_match {
  	int devid;
@@ -1707,7 +974,7 @@
  };
  
  static const struct agp_i810_match*
-@@ -196,17 +723,17 @@
+@@ -196,17 +723,17 @@ agp_i810_match(device_t dev)
  
  	if (pci_get_class(dev) != PCIC_DISPLAY
  	    || pci_get_subclass(dev) != PCIS_DISPLAY_VGA)
@@ -1729,7 +996,7 @@
  }
  
  /*
-@@ -215,28 +742,8 @@
+@@ -215,28 +742,8 @@ agp_i810_match(device_t dev)
  static device_t
  agp_i810_find_bridge(device_t dev)
  {
@@ -1740,14 +1007,14 @@
 -  
 -	match = agp_i810_match(dev);
 -	devid = match->devid - match->bridge_offset;
- 
+-
 -	if (device_get_children(device_get_parent(device_get_parent(dev)),
 -	    &children, &nchildren))
 -		return 0;
 -
 -	for (i = 0; i < nchildren; i++) {
 -		child = children[i];
--
+ 
 -		if (pci_get_devid(child) == devid) {
 -			free(children, M_TEMP);
 -			return child;
@@ -1759,7 +1026,7 @@
  }
  
  static void
-@@ -249,92 +756,116 @@
+@@ -249,92 +756,116 @@ agp_i810_identify(driver_t *driver, device_t parent)
  }
  
  static int
@@ -1902,13 +1169,8 @@
 -			return ENXIO;
 -		}
 -		break;
-+	err = match->driver->check_active(bdev);
-+	if (err != 0) {
-+		if (bootverbose)
-+			printf("i810: disabled, not probing\n");
-+		return (err);
- 	}
- 
+-	}
+-
 -	if (match->devid == 0x35828086) {
 -		switch (pci_read_config(dev, AGP_I85X_CAPID, 1)) {
 -		case AGP_I855_GME:
@@ -1934,15 +1196,20 @@
 -		}
 -	} else {
 -		device_set_desc(dev, match->name);
--	}
--
++	err = match->driver->check_active(bdev);
++	if (err != 0) {
++		if (bootverbose)
++			printf("i810: disabled, not probing\n");
++		return (err);
+ 	}
+ 
 -	return BUS_PROBE_DEFAULT;
 +	match->driver->set_desc(dev, match);
 +	return (BUS_PROBE_DEFAULT);
  }
  
  static void
-@@ -344,391 +875,637 @@
+@@ -344,391 +875,637 @@ agp_i810_dump_regs(device_t dev)
  
  	device_printf(dev, "AGP_I810_PGTBL_CTL: %08x\n",
  	    bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL));
@@ -1954,6 +1221,28 @@
 -	case CHIP_I810:
 -		device_printf(dev, "AGP_I810_MISCC: 0x%04x\n",
 -		    pci_read_config(sc->bdev, AGP_I810_MISCC, 2));
+-		break;
+-	case CHIP_I830:
+-		device_printf(dev, "AGP_I830_GCC1: 0x%02x\n",
+-		    pci_read_config(sc->bdev, AGP_I830_GCC1, 1));
+-		break;
+-	case CHIP_I855:
+-		device_printf(dev, "AGP_I855_GCC1: 0x%02x\n",
+-		    pci_read_config(sc->bdev, AGP_I855_GCC1, 1));
+-		break;
+-	case CHIP_I915:
+-	case CHIP_I965:
+-	case CHIP_G33:
+-	case CHIP_IGD:
+-	case CHIP_G4X:
+-		device_printf(dev, "AGP_I855_GCC1: 0x%02x\n",
+-		    pci_read_config(sc->bdev, AGP_I855_GCC1, 1));
+-		device_printf(dev, "AGP_I915_MSAC: 0x%02x\n",
+-		    pci_read_config(sc->bdev, AGP_I915_MSAC, 1));
+-		break;
+-	}
+-	device_printf(dev, "Aperture resource size: %d bytes\n",
+-	    AGP_GET_APERTURE(dev));
 +static void
 +agp_i830_dump_regs(device_t dev)
 +{
@@ -1963,24 +1252,53 @@
 +	    bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL));
 +	device_printf(dev, "AGP_I830_GCC1: 0x%02x\n",
 +	    pci_read_config(sc->bdev, AGP_I830_GCC1, 1));
-+}
-+
+ }
+ 
+-static int
+-agp_i810_attach(device_t dev)
 +static void
 +agp_i855_dump_regs(device_t dev)
-+{
-+	struct agp_i810_softc *sc = device_get_softc(dev);
-+
+ {
+ 	struct agp_i810_softc *sc = device_get_softc(dev);
+-	struct agp_gatt *gatt;
+-	const struct agp_i810_match *match;
+-	int error;
+ 
+-	sc->bdev = agp_i810_find_bridge(dev);
+-	if (!sc->bdev)
+-		return ENOENT;
 +	device_printf(dev, "AGP_I810_PGTBL_CTL: %08x\n",
 +	    bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL));
 +	device_printf(dev, "AGP_I855_GCC1: 0x%02x\n",
 +	    pci_read_config(sc->bdev, AGP_I855_GCC1, 1));
 +}
-+
+ 
+-	match = agp_i810_match(dev);
+-	sc->chiptype = match->chiptype;
 +static void
 +agp_i915_dump_regs(device_t dev)
 +{
 +	struct agp_i810_softc *sc = device_get_softc(dev);
-+
+ 
+-	switch (sc->chiptype) {
+-	case CHIP_I810:
+-	case CHIP_I830:
+-	case CHIP_I855:
+-		sc->sc_res_spec = agp_i810_res_spec;
+-		agp_set_aperture_resource(dev, AGP_APBASE);
+-		break;
+-	case CHIP_I915:
+-	case CHIP_G33:
+-	case CHIP_IGD:
+-		sc->sc_res_spec = agp_i915_res_spec;
+-		agp_set_aperture_resource(dev, AGP_I915_GMADR);
+-		break;
+-	case CHIP_I965:
+-	case CHIP_G4X:
+-		sc->sc_res_spec = agp_i965_res_spec;
+-		agp_set_aperture_resource(dev, AGP_I915_GMADR);
+-		break;
+-	}
 +	device_printf(dev, "AGP_I810_PGTBL_CTL: %08x\n",
 +	    bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL));
 +	device_printf(dev, "AGP_I855_GCC1: 0x%02x\n",
@@ -1988,12 +1306,23 @@
 +	device_printf(dev, "AGP_I915_MSAC: 0x%02x\n",
 +	    pci_read_config(sc->bdev, AGP_I915_MSAC, 1));
 +}
-+
+ 
+-	error = agp_generic_attach(dev);
+-	if (error)
+-		return error;
 +static void
 +agp_i965_dump_regs(device_t dev)
 +{
 +	struct agp_i810_softc *sc = device_get_softc(dev);
-+
+ 
+-	if (sc->chiptype != CHIP_I965 && sc->chiptype != CHIP_G33 &&
+-	    sc->chiptype != CHIP_IGD && sc->chiptype != CHIP_G4X &&
+-	    ptoa((vm_paddr_t)Maxmem) > 0xfffffffful)
+-	{
+-		device_printf(dev, "agp_i810.c does not support physical "
+-		    "memory above 4GB.\n");
+-		return ENOENT;
+-	}
 +	device_printf(dev, "AGP_I965_PGTBL_CTL2: %08x\n",
 +	    bus_read_4(sc->sc_res[0], AGP_I965_PGTBL_CTL2));
 +	device_printf(dev, "AGP_I855_GCC1: 0x%02x\n",
@@ -2001,50 +1330,120 @@
 +	device_printf(dev, "AGP_I965_MSAC: 0x%02x\n",
 +	    pci_read_config(sc->bdev, AGP_I965_MSAC, 1));
 +}
-+
+ 
+-	if (bus_alloc_resources(dev, sc->sc_res_spec, sc->sc_res)) {
+-		agp_generic_detach(dev);
+-		return ENODEV;
+-	}
 +static void
 +agp_sb_dump_regs(device_t dev)
 +{
 +	struct agp_i810_softc *sc = device_get_softc(dev);
-+
+ 
+-	sc->initial_aperture = AGP_GET_APERTURE(dev);
 +	device_printf(dev, "AGP_SNB_GFX_MODE: %08x\n",
 +	    bus_read_4(sc->sc_res[0], AGP_SNB_GFX_MODE));
 +	device_printf(dev, "AGP_SNB_GCC1: 0x%04x\n",
 +	    pci_read_config(sc->bdev, AGP_SNB_GCC1, 2));
 +}
-+
+ 
+-	gatt = malloc( sizeof(struct agp_gatt), M_AGP, M_NOWAIT);
+-	if (!gatt) {
+-		bus_release_resources(dev, sc->sc_res_spec, sc->sc_res);
+- 		agp_generic_detach(dev);
+- 		return ENOMEM;
+-	}
+-	sc->gatt = gatt;
 +static int
 +agp_i810_get_stolen_size(device_t dev)
 +{
 +	struct agp_i810_softc *sc;
-+
+ 
+-	gatt->ag_entries = AGP_GET_APERTURE(dev) >> AGP_PAGE_SHIFT;
 +	sc = device_get_softc(dev);
 +	sc->stolen = 0;
 +	sc->stolen_size = 0;
 +	return (0);
 +}
-+
+ 
+-	if ( sc->chiptype == CHIP_I810 ) {
+-		/* Some i810s have on-chip memory called dcache */
+-		if (bus_read_1(sc->sc_res[0], AGP_I810_DRT) &
+-		    AGP_I810_DRT_POPULATED)
+-			sc->dcache_size = 4 * 1024 * 1024;
+-		else
+-			sc->dcache_size = 0;
+-
+-		/* According to the specs the gatt on the i810 must be 64k */
+-		gatt->ag_virtual = contigmalloc( 64 * 1024, M_AGP, 0, 
+-					0, ~0, PAGE_SIZE, 0);
+-		if (!gatt->ag_virtual) {
+-			if (bootverbose)
+-				device_printf(dev, "contiguous allocation failed\n");
+-			bus_release_resources(dev, sc->sc_res_spec,
+-			    sc->sc_res);
+-			free(gatt, M_AGP);
+-			agp_generic_detach(dev);
+-			return ENOMEM;
+-		}
+-		bzero(gatt->ag_virtual, gatt->ag_entries * sizeof(u_int32_t));
+-	
+-		gatt->ag_physical = vtophys((vm_offset_t) gatt->ag_virtual);
+-		agp_flush_cache();
+-		/* Install the GATT. */
+-		bus_write_4(sc->sc_res[0], AGP_I810_PGTBL_CTL,
+-		    gatt->ag_physical | 1);
+-	} else if ( sc->chiptype == CHIP_I830 ) {
+-		/* The i830 automatically initializes the 128k gatt on boot. */
+-		unsigned int gcc1, pgtblctl;
+-		
+-		gcc1 = pci_read_config(sc->bdev, AGP_I830_GCC1, 1);
+-		switch (gcc1 & AGP_I830_GCC1_GMS) {
+-			case AGP_I830_GCC1_GMS_STOLEN_512:
+-				sc->stolen = (512 - 132) * 1024 / 4096;
+-				break;
+-			case AGP_I830_GCC1_GMS_STOLEN_1024: 
+-				sc->stolen = (1024 - 132) * 1024 / 4096;
+-				break;
+-			case AGP_I830_GCC1_GMS_STOLEN_8192: 
+-				sc->stolen = (8192 - 132) * 1024 / 4096;
+-				break;
+-			default:
+-				sc->stolen = 0;
+-				device_printf(dev, "unknown memory configuration, disabling\n");
+-				bus_release_resources(dev, sc->sc_res_spec,
+-				    sc->sc_res);
+-				free(gatt, M_AGP);
+-				agp_generic_detach(dev);
+-				return EINVAL;
+-		}
 +static int
 +agp_i830_get_stolen_size(device_t dev)
 +{
 +	struct agp_i810_softc *sc;
 +	unsigned int gcc1;
-+
+ 
+-		/* GATT address is already in there, make sure it's enabled */
+-		pgtblctl = bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL);
+-		pgtblctl |= 1;
+-		bus_write_4(sc->sc_res[0], AGP_I810_PGTBL_CTL, pgtblctl);
 +	sc = device_get_softc(dev);
-+
+ 
+-		gatt->ag_physical = pgtblctl & ~1;
+-	} else if (sc->chiptype == CHIP_I855 || sc->chiptype == CHIP_I915 ||
+-	    sc->chiptype == CHIP_I965 || sc->chiptype == CHIP_G33 ||
+-	    sc->chiptype == CHIP_IGD || sc->chiptype == CHIP_G4X) {
+-		unsigned int gcc1, pgtblctl, stolen, gtt_size;
 +	gcc1 = pci_read_config(sc->bdev, AGP_I830_GCC1, 1);
 +	switch (gcc1 & AGP_I830_GCC1_GMS) {
 +	case AGP_I830_GCC1_GMS_STOLEN_512:
 +		sc->stolen = (512 - 132) * 1024 / 4096;
 +		sc->stolen_size = 512 * 1024;
- 		break;
--	case CHIP_I830:
--		device_printf(dev, "AGP_I830_GCC1: 0x%02x\n",
--		    pci_read_config(sc->bdev, AGP_I830_GCC1, 1));
++		break;
 +	case AGP_I830_GCC1_GMS_STOLEN_1024: 
 +		sc->stolen = (1024 - 132) * 1024 / 4096;
 +		sc->stolen_size = 1024 * 1024;
- 		break;
++		break;
 +	case AGP_I830_GCC1_GMS_STOLEN_8192: 
 +		sc->stolen = (8192 - 132) * 1024 / 4096;
 +		sc->stolen_size = 8192 * 1024;
@@ -2056,7 +1455,13 @@
 +	}
 +	return (0);
 +}
-+
+ 
+-		/* Stolen memory is set up at the beginning of the aperture by
+-		 * the BIOS, consisting of the GATT followed by 4kb for the
+-		 * BIOS display.
+-		 */
+-		switch (sc->chiptype) {
+-		case CHIP_I855:
 +static int
 +agp_i915_get_stolen_size(device_t dev)
 +{
@@ -2071,68 +1476,214 @@
 +	 * BIOS display.
 +	 */
 +	switch (sc->match->driver->chiptype) {
- 	case CHIP_I855:
--		device_printf(dev, "AGP_I855_GCC1: 0x%02x\n",
--		    pci_read_config(sc->bdev, AGP_I855_GCC1, 1));
++	case CHIP_I855:
 +		gtt_size = 128;
- 		break;
- 	case CHIP_I915:
++		break;
++	case CHIP_I915:
 +		gtt_size = 256;
 +		break;
- 	case CHIP_I965:
++	case CHIP_I965:
 +		switch (bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL) &
 +			AGP_I810_PGTBL_SIZE_MASK) {
 +		case AGP_I810_PGTBL_SIZE_128KB:
-+			gtt_size = 128;
-+			break;
+ 			gtt_size = 128;
+ 			break;
+-		case CHIP_I915:
 +		case AGP_I810_PGTBL_SIZE_256KB:
-+			gtt_size = 256;
-+			break;
+ 			gtt_size = 256;
+ 			break;
+-		case CHIP_I965:
+-			switch (bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL) &
+-			    AGP_I810_PGTBL_SIZE_MASK) {
+-			case AGP_I810_PGTBL_SIZE_128KB:
+-				gtt_size = 128;
+-				break;
+-			case AGP_I810_PGTBL_SIZE_256KB:
+-				gtt_size = 256;
+-				break;
+-			case AGP_I810_PGTBL_SIZE_512KB:
+-				gtt_size = 512;
+-				break;
+-			case AGP_I965_PGTBL_SIZE_1MB:
+-				gtt_size = 1024;
+-				break;
+-			case AGP_I965_PGTBL_SIZE_2MB:
+-				gtt_size = 2048;
+-				break;
+-			case AGP_I965_PGTBL_SIZE_1_5MB:
+-				gtt_size = 1024 + 512;
+-				break;
+-			default:
+-				device_printf(dev, "Bad PGTBL size\n");
+-				bus_release_resources(dev, sc->sc_res_spec,
+-				    sc->sc_res);
+-				free(gatt, M_AGP);
+-				agp_generic_detach(dev);
+-				return EINVAL;
+-			}
 +		case AGP_I810_PGTBL_SIZE_512KB:
 +			gtt_size = 512;
 +			break;
 +		case AGP_I965_PGTBL_SIZE_1MB:
 +			gtt_size = 1024;
-+			break;
+ 			break;
+-		case CHIP_G33:
+-			gcc1 = pci_read_config(sc->bdev, AGP_I855_GCC1, 2);
+-			switch (gcc1 & AGP_G33_MGGC_GGMS_MASK) {
+-			case AGP_G33_MGGC_GGMS_SIZE_1M:
+-				gtt_size = 1024;
+-				break;
+-			case AGP_G33_MGGC_GGMS_SIZE_2M:
+-				gtt_size = 2048;
+-				break;
+-			default:
+-				device_printf(dev, "Bad PGTBL size\n");
+-				bus_release_resources(dev, sc->sc_res_spec,
+-				    sc->sc_res);
+-				free(gatt, M_AGP);
+-				agp_generic_detach(dev);
+-				return EINVAL;
+-			}
 +		case AGP_I965_PGTBL_SIZE_2MB:
 +			gtt_size = 2048;
-+			break;
+ 			break;
+-		case CHIP_IGD:
+-		case CHIP_G4X:
+-			gtt_size = 0;
 +		case AGP_I965_PGTBL_SIZE_1_5MB:
 +			gtt_size = 1024 + 512;
-+			break;
-+		default:
+ 			break;
+ 		default:
+-			device_printf(dev, "Bad chiptype\n");
+-			bus_release_resources(dev, sc->sc_res_spec,
+-			    sc->sc_res);
+-			free(gatt, M_AGP);
+-			agp_generic_detach(dev);
+-			return EINVAL;
 +			device_printf(dev, "Bad PGTBL size\n");
 +			return (EINVAL);
-+		}
+ 		}
+-
+-		/* GCC1 is called MGGC on i915+ */
+-		gcc1 = pci_read_config(sc->bdev, AGP_I855_GCC1, 1);
+-		switch (gcc1 & AGP_I855_GCC1_GMS) {
+-		case AGP_I855_GCC1_GMS_STOLEN_1M:
+-			stolen = 1024;
+-			break;
+-		case AGP_I855_GCC1_GMS_STOLEN_4M:
+-			stolen = 4 * 1024;
 +		break;
- 	case CHIP_G33:
++	case CHIP_G33:
 +		gcc1 = pci_read_config(sc->bdev, AGP_I855_GCC1, 2);
 +		switch (gcc1 & AGP_G33_MGGC_GGMS_MASK) {
 +		case AGP_G33_MGGC_GGMS_SIZE_1M:
 +			gtt_size = 1024;
-+			break;
+ 			break;
+-		case AGP_I855_GCC1_GMS_STOLEN_8M:
+-			stolen = 8 * 1024;
+-			break;
+-		case AGP_I855_GCC1_GMS_STOLEN_16M:
+-			stolen = 16 * 1024;
+-			break;
+-		case AGP_I855_GCC1_GMS_STOLEN_32M:
+-			stolen = 32 * 1024;
+-			break;
+-		case AGP_I915_GCC1_GMS_STOLEN_48M:
+-			if (sc->chiptype == CHIP_I915 ||
+-			    sc->chiptype == CHIP_I965 ||
+-			    sc->chiptype == CHIP_G33 ||
+-			    sc->chiptype == CHIP_IGD ||
+-			    sc->chiptype == CHIP_G4X) {
+-				stolen = 48 * 1024;
+-			} else {
+-				stolen = 0;
+-			}
+-			break;
+-		case AGP_I915_GCC1_GMS_STOLEN_64M:
+-			if (sc->chiptype == CHIP_I915 ||
+-			    sc->chiptype == CHIP_I965 ||
+-			    sc->chiptype == CHIP_G33 ||
+-			    sc->chiptype == CHIP_IGD ||
+-			    sc->chiptype == CHIP_G4X) {
+-				stolen = 64 * 1024;
+-			} else {
+-				stolen = 0;
+-			}
+-			break;
+-		case AGP_G33_GCC1_GMS_STOLEN_128M:
+-			if (sc->chiptype == CHIP_I965 ||
+-			    sc->chiptype == CHIP_G33 ||
+-			    sc->chiptype == CHIP_IGD ||
+-			    sc->chiptype == CHIP_G4X) {
+-				stolen = 128 * 1024;
+-			} else {
+-				stolen = 0;
+-			}
+-			break;
+-		case AGP_G33_GCC1_GMS_STOLEN_256M:
+-			if (sc->chiptype == CHIP_I965 ||
+-			    sc->chiptype == CHIP_G33 ||
+-			    sc->chiptype == CHIP_IGD ||
+-			    sc->chiptype == CHIP_G4X) {
+-				stolen = 256 * 1024;
+-			} else {
+-				stolen = 0;
+-			}
+-			break;
+-		case AGP_G4X_GCC1_GMS_STOLEN_96M:
+-			if (sc->chiptype == CHIP_I965 ||
+-			    sc->chiptype == CHIP_G4X) {
+-				stolen = 96 * 1024;
+-			} else {
+-				stolen = 0;
+-			}
+-			break;
+-		case AGP_G4X_GCC1_GMS_STOLEN_160M:
+-			if (sc->chiptype == CHIP_I965 ||
+-			    sc->chiptype == CHIP_G4X) {
+-				stolen = 160 * 1024;
+-			} else {
+-				stolen = 0;
+-			}
+-			break;
+-		case AGP_G4X_GCC1_GMS_STOLEN_224M:
+-			if (sc->chiptype == CHIP_I965 ||
+-			    sc->chiptype == CHIP_G4X) {
+-				stolen = 224 * 1024;
+-			} else {
+-				stolen = 0;
+-			}
+-			break;
+-		case AGP_G4X_GCC1_GMS_STOLEN_352M:
+-			if (sc->chiptype == CHIP_I965 ||
+-			    sc->chiptype == CHIP_G4X) {
+-				stolen = 352 * 1024;
+-			} else {
+-				stolen = 0;
+-			}
 +		case AGP_G33_MGGC_GGMS_SIZE_2M:
 +			gtt_size = 2048;
-+			break;
-+		default:
+ 			break;
+ 		default:
+-			device_printf(dev, "unknown memory configuration, "
+-			    "disabling\n");
+-			bus_release_resources(dev, sc->sc_res_spec,
+-			    sc->sc_res);
+-			free(gatt, M_AGP);
+-			agp_generic_detach(dev);
+-			return EINVAL;
 +			device_printf(dev, "Bad PGTBL size\n");
 +			return (EINVAL);
-+		}
+ 		}
 +		break;
- 	case CHIP_IGD:
- 	case CHIP_G4X:
--		device_printf(dev, "AGP_I855_GCC1: 0x%02x\n",
--		    pci_read_config(sc->bdev, AGP_I855_GCC1, 1));
--		device_printf(dev, "AGP_I915_MSAC: 0x%02x\n",
--		    pci_read_config(sc->bdev, AGP_I915_MSAC, 1));
++	case CHIP_IGD:
++	case CHIP_G4X:
 +		gtt_size = 0;
- 		break;
++		break;
 +	default:
 +		device_printf(dev, "Bad chiptype\n");
 +		return (EINVAL);
- 	}
--	device_printf(dev, "Aperture resource size: %d bytes\n",
--	    AGP_GET_APERTURE(dev));
++	}
 +
 +	/* GCC1 is called MGGC on i915+ */
 +	gcc1 = pci_read_config(sc->bdev, AGP_I855_GCC1, 1);
@@ -2196,28 +1747,27 @@
 +		device_printf(dev, "unknown memory configuration, disabling\n");
 +		return (EINVAL);
 +	}
-+
+ 
+-		gtt_size += 4;
 +	gtt_size += 4;
 +	sc->stolen_size = stolen * 1024;
 +	sc->stolen = (stolen - gtt_size) * 1024 / 4096;
-+
+ 
+-		sc->stolen = (stolen - gtt_size) * 1024 / 4096;
 +	return (0);
- }
++}
  
- static int
--agp_i810_attach(device_t dev)
+-		/* GATT address is already in there, make sure it's enabled */
+-		pgtblctl = bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL);
+-		pgtblctl |= 1;
+-		bus_write_4(sc->sc_res[0], AGP_I810_PGTBL_CTL, pgtblctl);
++static int
 +agp_sb_get_stolen_size(device_t dev)
- {
--	struct agp_i810_softc *sc = device_get_softc(dev);
--	struct agp_gatt *gatt;
--	const struct agp_i810_match *match;
--	int error;
++{
 +	struct agp_i810_softc *sc;
 +	uint16_t gmch_ctl;
  
--	sc->bdev = agp_i810_find_bridge(dev);
--	if (!sc->bdev)
--		return ENOENT;
+-		gatt->ag_physical = pgtblctl & ~1;
 +	sc = device_get_softc(dev);
 +	gmch_ctl = pci_read_config(sc->bdev, AGP_SNB_GCC1, 2);
 +	switch (gmch_ctl & AGP_SNB_GMCH_GMS_STOLEN_MASK) {
@@ -2269,26 +1819,18 @@
 +	case AGP_SNB_GMCH_GMS_STOLEN_512M:
 +		sc->stolen_size = 512 * 1024 * 1024;
 +		break;
-+	}
+ 	}
 +	sc->stolen = (sc->stolen_size - 4) / 4096;
 +	return (0);
 +}
- 
--	match = agp_i810_match(dev);
--	sc->chiptype = match->chiptype;
++
 +static int
 +agp_i810_get_gtt_mappable_entries(device_t dev)
 +{
 +	struct agp_i810_softc *sc;
 +	uint32_t ap;
 +	uint16_t miscc;
- 
--	switch (sc->chiptype) {
--	case CHIP_I810:
--	case CHIP_I830:
--	case CHIP_I855:
--		sc->sc_res_spec = agp_i810_res_spec;
--		agp_set_aperture_resource(dev, AGP_APBASE);
++
 +	sc = device_get_softc(dev);
 +	miscc = pci_read_config(sc->bdev, AGP_I810_MISCC, 2);
 +	if ((miscc & AGP_I810_MISCC_WINSIZE) == AGP_I810_MISCC_WINSIZE_32)
@@ -2332,7 +1874,7 @@
 +agp_i810_get_gtt_total_entries(device_t dev)
 +{
 +	struct agp_i810_softc *sc;
-+
+ 
 +	sc = device_get_softc(dev);
 +	sc->gtt_total_entries = sc->gtt_mappable_entries;
 +	return (0);
@@ -2351,22 +1893,13 @@
 +	switch (pgetbl_ctl & AGP_I810_PGTBL_SIZE_MASK) {
 +	case AGP_I810_PGTBL_SIZE_128KB:
 +		sc->gtt_total_entries = 128 * 1024 / 4;
- 		break;
--	case CHIP_I915:
--	case CHIP_G33:
--	case CHIP_IGD:
--		sc->sc_res_spec = agp_i915_res_spec;
--		agp_set_aperture_resource(dev, AGP_I915_GMADR);
++		break;
 +	case AGP_I810_PGTBL_SIZE_256KB:
 +		sc->gtt_total_entries = 256 * 1024 / 4;
- 		break;
--	case CHIP_I965:
--	case CHIP_G4X:
--		sc->sc_res_spec = agp_i965_res_spec;
--		agp_set_aperture_resource(dev, AGP_I915_GMADR);
++		break;
 +	case AGP_I810_PGTBL_SIZE_512KB:
 +		sc->gtt_total_entries = 512 * 1024 / 4;
- 		break;
++		break;
 +	/* GTT pagetable sizes bigger than 512KB are not possible on G33! */
 +	case AGP_I810_PGTBL_SIZE_1MB:
 +		sc->gtt_total_entries = 1024 * 1024 / 4;
@@ -2380,26 +1913,16 @@
 +	default:
 +		device_printf(dev, "Unknown page table size\n");
 +		error = ENXIO;
- 	}
++	}
 +	return (error);
 +}
- 
--	error = agp_generic_attach(dev);
--	if (error)
--		return error;
++
 +static void
 +agp_gen5_adjust_pgtbl_size(device_t dev, uint32_t sz)
 +{
 +	struct agp_i810_softc *sc;
 +	uint32_t pgetbl_ctl, pgetbl_ctl2;
- 
--	if (sc->chiptype != CHIP_I965 && sc->chiptype != CHIP_G33 &&
--	    sc->chiptype != CHIP_IGD && sc->chiptype != CHIP_G4X &&
--	    ptoa((vm_paddr_t)Maxmem) > 0xfffffffful)
--	{
--		device_printf(dev, "agp_i810.c does not support physical "
--		    "memory above 4GB.\n");
--		return ENOENT;
++
 +	sc = device_get_softc(dev);
 +
 +	/* Disable per-process page table. */
@@ -2438,11 +1961,8 @@
 +	default:
 +		device_printf(dev, "Unknown page table size\n");
 +		return (ENXIO);
- 	}
- 
--	if (bus_alloc_resources(dev, sc->sc_res_spec, sc->sc_res)) {
--		agp_generic_detach(dev);
--		return ENODEV;
++	}
++
 +	return (agp_i965_get_gtt_total_entries(dev));
 +}
 +
@@ -2466,21 +1986,15 @@
 +	case AGP_SNB_GTT_SIZE_2M:
 +		sc->gtt_total_entries = 2 * 1024 * 1024 / 4;
 +		break;
- 	}
++	}
 +	return (0);
 +}
- 
--	sc->initial_aperture = AGP_GET_APERTURE(dev);
++
 +static int
 +agp_i810_install_gatt(device_t dev)
 +{
 +	struct agp_i810_softc *sc;
- 
--	gatt = malloc( sizeof(struct agp_gatt), M_AGP, M_NOWAIT);
--	if (!gatt) {
--		bus_release_resources(dev, sc->sc_res_spec, sc->sc_res);
-- 		agp_generic_detach(dev);
-- 		return ENOMEM;
++
 +	sc = device_get_softc(dev);
 +
 +	/* Some i810s have on-chip memory called dcache. */
@@ -2497,10 +2011,8 @@
 +		if (bootverbose)
 +			device_printf(dev, "contiguous allocation failed\n");
 +		return (ENOMEM);
- 	}
--	sc->gatt = gatt;
- 
--	gatt->ag_entries = AGP_GET_APERTURE(dev) >> AGP_PAGE_SHIFT;
++	}
++
 +	bzero(sc->gatt->ag_virtual, sc->gatt->ag_entries * sizeof(u_int32_t));
 +	sc->gatt->ag_physical = vtophys((vm_offset_t)sc->gatt->ag_virtual);
 +	agp_flush_cache();
@@ -2509,33 +2021,13 @@
 +	    sc->gatt->ag_physical | 1);
 +	return (0);
 +}
- 
--	if ( sc->chiptype == CHIP_I810 ) {
--		/* Some i810s have on-chip memory called dcache */
--		if (bus_read_1(sc->sc_res[0], AGP_I810_DRT) &
--		    AGP_I810_DRT_POPULATED)
--			sc->dcache_size = 4 * 1024 * 1024;
--		else
--			sc->dcache_size = 0;
++
 +static int
 +agp_i830_install_gatt(device_t dev)
 +{
 +	struct agp_i810_softc *sc;
 +	uint32_t pgtblctl;
- 
--		/* According to the specs the gatt on the i810 must be 64k */
--		gatt->ag_virtual = contigmalloc( 64 * 1024, M_AGP, 0, 
--					0, ~0, PAGE_SIZE, 0);
--		if (!gatt->ag_virtual) {
--			if (bootverbose)
--				device_printf(dev, "contiguous allocation failed\n");
--			bus_release_resources(dev, sc->sc_res_spec,
--			    sc->sc_res);
--			free(gatt, M_AGP);
--			agp_generic_detach(dev);
--			return ENOMEM;
--		}
--		bzero(gatt->ag_virtual, gatt->ag_entries * sizeof(u_int32_t));
++
 +	sc = device_get_softc(dev);
 +
 +	/*
@@ -2545,241 +2037,30 @@
 +	pgtblctl = bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL);
 +	pgtblctl |= 1;
 +	bus_write_4(sc->sc_res[0], AGP_I810_PGTBL_CTL, pgtblctl);
- 	
--		gatt->ag_physical = vtophys((vm_offset_t) gatt->ag_virtual);
--		agp_flush_cache();
--		/* Install the GATT. */
--		bus_write_4(sc->sc_res[0], AGP_I810_PGTBL_CTL,
--		    gatt->ag_physical | 1);
--	} else if ( sc->chiptype == CHIP_I830 ) {
--		/* The i830 automatically initializes the 128k gatt on boot. */
--		unsigned int gcc1, pgtblctl;
--		
--		gcc1 = pci_read_config(sc->bdev, AGP_I830_GCC1, 1);
--		switch (gcc1 & AGP_I830_GCC1_GMS) {
--			case AGP_I830_GCC1_GMS_STOLEN_512:
--				sc->stolen = (512 - 132) * 1024 / 4096;
--				break;
--			case AGP_I830_GCC1_GMS_STOLEN_1024: 
--				sc->stolen = (1024 - 132) * 1024 / 4096;
--				break;
--			case AGP_I830_GCC1_GMS_STOLEN_8192: 
--				sc->stolen = (8192 - 132) * 1024 / 4096;
--				break;
--			default:
--				sc->stolen = 0;
--				device_printf(dev, "unknown memory configuration, disabling\n");
--				bus_release_resources(dev, sc->sc_res_spec,
--				    sc->sc_res);
--				free(gatt, M_AGP);
--				agp_generic_detach(dev);
--				return EINVAL;
--		}
++	
 +	sc->gatt->ag_physical = pgtblctl & ~1;
 +	return (0);
 +}
- 
--		/* GATT address is already in there, make sure it's enabled */
--		pgtblctl = bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL);
--		pgtblctl |= 1;
--		bus_write_4(sc->sc_res[0], AGP_I810_PGTBL_CTL, pgtblctl);
++
 +static int
 +agp_i810_attach(device_t dev)
 +{
 +	struct agp_i810_softc *sc;
 +	int error;
- 
--		gatt->ag_physical = pgtblctl & ~1;
--	} else if (sc->chiptype == CHIP_I855 || sc->chiptype == CHIP_I915 ||
--	    sc->chiptype == CHIP_I965 || sc->chiptype == CHIP_G33 ||
--	    sc->chiptype == CHIP_IGD || sc->chiptype == CHIP_G4X) {
--		unsigned int gcc1, pgtblctl, stolen, gtt_size;
++
 +	sc = device_get_softc(dev);
 +	sc->bdev = agp_i810_find_bridge(dev);
 +	if (sc->bdev == NULL)
 +		return (ENOENT);
- 
--		/* Stolen memory is set up at the beginning of the aperture by
--		 * the BIOS, consisting of the GATT followed by 4kb for the
--		 * BIOS display.
--		 */
--		switch (sc->chiptype) {
--		case CHIP_I855:
--			gtt_size = 128;
--			break;
--		case CHIP_I915:
--			gtt_size = 256;
--			break;
--		case CHIP_I965:
--			switch (bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL) &
--			    AGP_I810_PGTBL_SIZE_MASK) {
--			case AGP_I810_PGTBL_SIZE_128KB:
--				gtt_size = 128;
--				break;
--			case AGP_I810_PGTBL_SIZE_256KB:
--				gtt_size = 256;
--				break;
--			case AGP_I810_PGTBL_SIZE_512KB:
--				gtt_size = 512;
--				break;
--			case AGP_I965_PGTBL_SIZE_1MB:
--				gtt_size = 1024;
--				break;
--			case AGP_I965_PGTBL_SIZE_2MB:
--				gtt_size = 2048;
--				break;
--			case AGP_I965_PGTBL_SIZE_1_5MB:
--				gtt_size = 1024 + 512;
--				break;
--			default:
--				device_printf(dev, "Bad PGTBL size\n");
--				bus_release_resources(dev, sc->sc_res_spec,
--				    sc->sc_res);
--				free(gatt, M_AGP);
--				agp_generic_detach(dev);
--				return EINVAL;
--			}
--			break;
--		case CHIP_G33:
--			gcc1 = pci_read_config(sc->bdev, AGP_I855_GCC1, 2);
--			switch (gcc1 & AGP_G33_MGGC_GGMS_MASK) {
--			case AGP_G33_MGGC_GGMS_SIZE_1M:
--				gtt_size = 1024;
--				break;
--			case AGP_G33_MGGC_GGMS_SIZE_2M:
--				gtt_size = 2048;
--				break;
--			default:
--				device_printf(dev, "Bad PGTBL size\n");
--				bus_release_resources(dev, sc->sc_res_spec,
--				    sc->sc_res);
--				free(gatt, M_AGP);
--				agp_generic_detach(dev);
--				return EINVAL;
--			}
--			break;
--		case CHIP_IGD:
--		case CHIP_G4X:
--			gtt_size = 0;
--			break;
--		default:
--			device_printf(dev, "Bad chiptype\n");
--			bus_release_resources(dev, sc->sc_res_spec,
--			    sc->sc_res);
--			free(gatt, M_AGP);
--			agp_generic_detach(dev);
--			return EINVAL;
--		}
++
 +	sc->match = agp_i810_match(dev);
- 
--		/* GCC1 is called MGGC on i915+ */
--		gcc1 = pci_read_config(sc->bdev, AGP_I855_GCC1, 1);
--		switch (gcc1 & AGP_I855_GCC1_GMS) {
--		case AGP_I855_GCC1_GMS_STOLEN_1M:
--			stolen = 1024;
--			break;
--		case AGP_I855_GCC1_GMS_STOLEN_4M:
--			stolen = 4 * 1024;
--			break;
--		case AGP_I855_GCC1_GMS_STOLEN_8M:
--			stolen = 8 * 1024;
--			break;
--		case AGP_I855_GCC1_GMS_STOLEN_16M:
--			stolen = 16 * 1024;
--			break;
--		case AGP_I855_GCC1_GMS_STOLEN_32M:
--			stolen = 32 * 1024;
--			break;
--		case AGP_I915_GCC1_GMS_STOLEN_48M:
--			if (sc->chiptype == CHIP_I915 ||
--			    sc->chiptype == CHIP_I965 ||
--			    sc->chiptype == CHIP_G33 ||
--			    sc->chiptype == CHIP_IGD ||
--			    sc->chiptype == CHIP_G4X) {
--				stolen = 48 * 1024;
--			} else {
--				stolen = 0;
--			}
--			break;
--		case AGP_I915_GCC1_GMS_STOLEN_64M:
--			if (sc->chiptype == CHIP_I915 ||
--			    sc->chiptype == CHIP_I965 ||
--			    sc->chiptype == CHIP_G33 ||
--			    sc->chiptype == CHIP_IGD ||
--			    sc->chiptype == CHIP_G4X) {
--				stolen = 64 * 1024;
--			} else {
--				stolen = 0;
--			}
--			break;
--		case AGP_G33_GCC1_GMS_STOLEN_128M:
--			if (sc->chiptype == CHIP_I965 ||
--			    sc->chiptype == CHIP_G33 ||
--			    sc->chiptype == CHIP_IGD ||
--			    sc->chiptype == CHIP_G4X) {
--				stolen = 128 * 1024;
--			} else {
--				stolen = 0;
--			}
--			break;
--		case AGP_G33_GCC1_GMS_STOLEN_256M:
--			if (sc->chiptype == CHIP_I965 ||
--			    sc->chiptype == CHIP_G33 ||
--			    sc->chiptype == CHIP_IGD ||
--			    sc->chiptype == CHIP_G4X) {
--				stolen = 256 * 1024;
--			} else {
--				stolen = 0;
--			}
--			break;
--		case AGP_G4X_GCC1_GMS_STOLEN_96M:
--			if (sc->chiptype == CHIP_I965 ||
--			    sc->chiptype == CHIP_G4X) {
--				stolen = 96 * 1024;
--			} else {
--				stolen = 0;
--			}
--			break;
--		case AGP_G4X_GCC1_GMS_STOLEN_160M:
--			if (sc->chiptype == CHIP_I965 ||
--			    sc->chiptype == CHIP_G4X) {
--				stolen = 160 * 1024;
--			} else {
--				stolen = 0;
--			}
--			break;
--		case AGP_G4X_GCC1_GMS_STOLEN_224M:
--			if (sc->chiptype == CHIP_I965 ||
--			    sc->chiptype == CHIP_G4X) {
--				stolen = 224 * 1024;
--			} else {
--				stolen = 0;
--			}
--			break;
--		case AGP_G4X_GCC1_GMS_STOLEN_352M:
--			if (sc->chiptype == CHIP_I965 ||
--			    sc->chiptype == CHIP_G4X) {
--				stolen = 352 * 1024;
--			} else {
--				stolen = 0;
--			}
--			break;
--		default:
--			device_printf(dev, "unknown memory configuration, "
--			    "disabling\n");
--			bus_release_resources(dev, sc->sc_res_spec,
--			    sc->sc_res);
--			free(gatt, M_AGP);
--			agp_generic_detach(dev);
--			return EINVAL;
--		}
++
 +	agp_set_aperture_resource(dev, sc->match->driver->gen <= 2 ?
 +	    AGP_APBASE : AGP_I915_GMADR);
 +	error = agp_generic_attach(dev);
 +	if (error)
 +		return (error);
- 
--		gtt_size += 4;
++
 +	if (ptoa((vm_paddr_t)Maxmem) >
 +	    (1ULL << sc->match->driver->busdma_addr_mask_sz) - 1) {
 +		device_printf(dev, "agp_i810 does not support physical "
@@ -2787,22 +2068,16 @@
 +		    sc->match->driver->busdma_addr_mask_sz) - 1);
 +		return (ENOENT);
 +	}
- 
--		sc->stolen = (stolen - gtt_size) * 1024 / 4096;
++
 +	if (bus_alloc_resources(dev, sc->match->driver->res_spec, sc->sc_res)) {
 +		agp_generic_detach(dev);
 +		return (ENODEV);
 +	}
- 
--		/* GATT address is already in there, make sure it's enabled */
--		pgtblctl = bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL);
--		pgtblctl |= 1;
--		bus_write_4(sc->sc_res[0], AGP_I810_PGTBL_CTL, pgtblctl);
++
 +	sc->initial_aperture = AGP_GET_APERTURE(dev);
 +	sc->gatt = malloc(sizeof(struct agp_gatt), M_AGP, M_WAITOK);
 +	sc->gatt->ag_entries = AGP_GET_APERTURE(dev) >> AGP_PAGE_SHIFT;
- 
--		gatt->ag_physical = pgtblctl & ~1;
++
 +	if ((error = sc->match->driver->get_stolen_size(dev)) != 0 ||
 +	    (error = sc->match->driver->install_gatt(dev)) != 0 ||
 +	    (error = sc->match->driver->get_gtt_mappable_entries(dev)) != 0 ||
@@ -2813,8 +2088,8 @@
 +		free(sc->gatt, M_AGP);
 +		agp_generic_detach(dev);
 +		return (error);
- 	}
- 
++	}
++
 +	intel_agp = dev;
  	device_printf(dev, "aperture size is %dM",
  	    sc->initial_aperture / 1024 / 1024);
@@ -2838,13 +2113,13 @@
 +agp_i810_deinstall_gatt(device_t dev)
 +{
 +	struct agp_i810_softc *sc;
- 
--	return 0;
++
 +	sc = device_get_softc(dev);
 +	bus_write_4(sc->sc_res[0], AGP_I810_PGTBL_CTL, 0);
 +	contigfree(sc->gatt->ag_virtual, 64 * 1024, M_AGP);
- }
++}
  
+-	return 0;
 +static void
 +agp_i830_deinstall_gatt(device_t dev)
 +{
@@ -2855,8 +2130,8 @@
 +	pgtblctl = bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL);
 +	pgtblctl &= ~1;
 +	bus_write_4(sc->sc_res[0], AGP_I810_PGTBL_CTL, pgtblctl);
-+}
-+
+ }
+ 
  static int
  agp_i810_detach(device_t dev)
  {
@@ -2876,9 +2151,9 @@
 -		bus_write_4(sc->sc_res[0], AGP_I810_PGTBL_CTL, pgtblctl);
 -	}
 +	sc->match->driver->deinstall_gatt(dev);
++
++	sc->match->driver->chipset_flush_teardown(dev);
  
-+	sc->match->driver->chipset_flush_teardown(dev);
-+
  	/* Put the aperture back the way it started. */
  	AGP_SET_APERTURE(dev, sc->initial_aperture);
  
@@ -2896,7 +2171,7 @@
  }
  
  static int
-@@ -758,145 +1535,314 @@
+@@ -758,143 +1535,312 @@ agp_i810_resume(device_t dev)
  static int
  agp_i810_set_aperture(device_t dev, u_int32_t aperture)
  {
@@ -2982,12 +2257,11 @@
 +		gcc1 |= AGP_I830_GCC1_GMASIZE_64;
 +	else
 +		gcc1 |= AGP_I830_GCC1_GMASIZE_128;
- 
--	return 0;
++
 +	pci_write_config(sc->bdev, AGP_I830_GCC1, gcc1, 2);
 +	return (0);
- }
- 
++}
++
 +static int
 +agp_i915_set_aperture(device_t dev, u_int32_t aperture)
 +{
@@ -2999,11 +2273,12 @@
 +agp_i810_method_set_aperture(device_t dev, u_int32_t aperture)
 +{
 +	struct agp_i810_softc *sc;
-+
+ 
+-	return 0;
 +	sc = device_get_softc(dev);
 +	return (sc->match->driver->set_aperture(dev, aperture));
-+}
-+
+ }
+ 
  /**
 - * Writes a GTT entry mapping the page at the given offset from the beginning
 - * of the aperture to the given physical address.
@@ -3039,31 +2314,8 @@
 -		 */
 -		KASSERT((pte & 0x0000000f00000000ull) == 0,
 -		    (">4GB physical address in agp"));
--	}
 +	sc = device_get_softc(dev);
- 
--	switch (sc->chiptype) {
--	case CHIP_I810:
--	case CHIP_I830:
--	case CHIP_I855:
--		bus_write_4(sc->sc_res[0],
--		    AGP_I810_GTT + (offset >> AGP_PAGE_SHIFT) * 4, pte);
--		break;
--	case CHIP_I915:
--	case CHIP_G33:
--	case CHIP_IGD:
--		bus_write_4(sc->sc_res[1],
--		    (offset >> AGP_PAGE_SHIFT) * 4, pte);
--		break;
--	case CHIP_I965:
--		bus_write_4(sc->sc_res[0],
--		    (offset >> AGP_PAGE_SHIFT) * 4 + (512 * 1024), pte);
--		break;
--	case CHIP_G4X:
--		bus_write_4(sc->sc_res[0],
--		    (offset >> AGP_PAGE_SHIFT) * 4 + (2 * 1024 * 1024), pte);
--		break;
--	}
++
 +	pte = (u_int32_t)physical | I810_PTE_VALID;
 +	if (flags == AGP_DCACHE_MEMORY)
 +		pte |= I810_PTE_LOCAL;
@@ -3071,8 +2323,8 @@
 +		pte |= I830_PTE_SYSTEM_CACHED;
 +	bus_write_4(sc->sc_res[0], AGP_I810_GTT + index * 4, pte);
 +	CTR2(KTR_AGP_I810, "810_pte %x %x", index, pte);
- }
- 
++}
++
 +static void
 +agp_i830_install_gtt_pte(device_t dev, u_int index, vm_offset_t physical,
 +    int flags)
@@ -3166,55 +2418,56 @@
 +	CTR2(KTR_AGP_I810, "sb_pte %x %x", index, pte);
 +}
 +
- static int
- agp_i810_bind_page(device_t dev, vm_offset_t offset, vm_offset_t physical)
- {
- 	struct agp_i810_softc *sc = device_get_softc(dev);
++static int
++agp_i810_bind_page(device_t dev, vm_offset_t offset, vm_offset_t physical)
++{
++	struct agp_i810_softc *sc = device_get_softc(dev);
 +	u_int index;
- 
- 	if (offset >= (sc->gatt->ag_entries << AGP_PAGE_SHIFT)) {
--		device_printf(dev, "failed: offset is 0x%08jx, shift is %d, entries is %d\n", (intmax_t)offset, AGP_PAGE_SHIFT, sc->gatt->ag_entries);
--		return EINVAL;
++
++	if (offset >= (sc->gatt->ag_entries << AGP_PAGE_SHIFT)) {
 +		device_printf(dev, "failed: offset is 0x%08jx, "
 +		    "shift is %d, entries is %d\n", (intmax_t)offset,
 +		    AGP_PAGE_SHIFT, sc->gatt->ag_entries);
 +		return (EINVAL);
  	}
--
--	if ( sc->chiptype != CHIP_I810 ) {
--		if ( (offset >> AGP_PAGE_SHIFT) < sc->stolen ) {
--			device_printf(dev, "trying to bind into stolen memory");
--			return EINVAL;
--		}
 +	index = offset >> AGP_PAGE_SHIFT;
 +	if (sc->stolen != 0 && index < sc->stolen) {
 +		device_printf(dev, "trying to bind into stolen memory\n");
 +		return (EINVAL);
- 	}
--
--	agp_i810_write_gtt_entry(dev, offset, physical, 1);
--
--	return 0;
++	}
 +	sc->match->driver->install_gtt_pte(dev, index, physical, 0);
 +	return (0);
- }
++}
  
- static int
- agp_i810_unbind_page(device_t dev, vm_offset_t offset)
- {
--	struct agp_i810_softc *sc = device_get_softc(dev);
+-	switch (sc->chiptype) {
+-	case CHIP_I810:
+-	case CHIP_I830:
+-	case CHIP_I855:
+-		bus_write_4(sc->sc_res[0],
+-		    AGP_I810_GTT + (offset >> AGP_PAGE_SHIFT) * 4, pte);
+-		break;
+-	case CHIP_I915:
+-	case CHIP_G33:
+-	case CHIP_IGD:
+-		bus_write_4(sc->sc_res[1],
+-		    (offset >> AGP_PAGE_SHIFT) * 4, pte);
+-		break;
+-	case CHIP_I965:
+-		bus_write_4(sc->sc_res[0],
+-		    (offset >> AGP_PAGE_SHIFT) * 4 + (512 * 1024), pte);
+-		break;
+-	case CHIP_G4X:
+-		bus_write_4(sc->sc_res[0],
+-		    (offset >> AGP_PAGE_SHIFT) * 4 + (2 * 1024 * 1024), pte);
+-		break;
++static int
++agp_i810_unbind_page(device_t dev, vm_offset_t offset)
++{
 +	struct agp_i810_softc *sc;
 +	u_int index;
- 
++
 +	sc = device_get_softc(dev);
- 	if (offset >= (sc->gatt->ag_entries << AGP_PAGE_SHIFT))
--		return EINVAL;
--
--	if ( sc->chiptype != CHIP_I810 ) {
--		if ( (offset >> AGP_PAGE_SHIFT) < sc->stolen ) {
--			device_printf(dev, "trying to unbind from stolen memory");
--			return EINVAL;
--		}
++	if (offset >= (sc->gatt->ag_entries << AGP_PAGE_SHIFT))
 +		return (EINVAL);
 +	index = offset >> AGP_PAGE_SHIFT;
 +	if (sc->stolen != 0 && index < sc->stolen) {
@@ -3223,27 +2476,39 @@
  	}
 +	sc->match->driver->install_gtt_pte(dev, index, 0, 0);
 +	return (0);
-+}
+ }
  
--	agp_i810_write_gtt_entry(dev, offset, 0, 0);
+-static int
+-agp_i810_bind_page(device_t dev, vm_offset_t offset, vm_offset_t physical)
 +static u_int32_t
 +agp_i810_read_gtt_pte(device_t dev, u_int index)
-+{
+ {
+-	struct agp_i810_softc *sc = device_get_softc(dev);
 +	struct agp_i810_softc *sc;
 +	u_int32_t pte;
  
--	return 0;
+-	if (offset >= (sc->gatt->ag_entries << AGP_PAGE_SHIFT)) {
+-		device_printf(dev, "failed: offset is 0x%08jx, shift is %d, entries is %d\n", (intmax_t)offset, AGP_PAGE_SHIFT, sc->gatt->ag_entries);
+-		return EINVAL;
+-	}
 +	sc = device_get_softc(dev);
 +	pte = bus_read_4(sc->sc_res[0], AGP_I810_GTT + index * 4);
 +	return (pte);
- }
++}
  
+-	if ( sc->chiptype != CHIP_I810 ) {
+-		if ( (offset >> AGP_PAGE_SHIFT) < sc->stolen ) {
+-			device_printf(dev, "trying to bind into stolen memory");
+-			return EINVAL;
+-		}
+-	}
 +static u_int32_t
 +agp_i915_read_gtt_pte(device_t dev, u_int index)
 +{
 +	struct agp_i810_softc *sc;
 +	u_int32_t pte;
-+
+ 
+-	agp_i810_write_gtt_entry(dev, offset, physical, 1);
 +	sc = device_get_softc(dev);
 +	pte = bus_read_4(sc->sc_res[1], index * 4);
 +	return (pte);
@@ -3254,23 +2519,35 @@
 +{
 +	struct agp_i810_softc *sc;
 +	u_int32_t pte;
-+
+ 
+-	return 0;
 +	sc = device_get_softc(dev);
 +	pte = bus_read_4(sc->sc_res[0], index * 4 + (512 * 1024));
 +	return (pte);
-+}
-+
+ }
+ 
+-static int
+-agp_i810_unbind_page(device_t dev, vm_offset_t offset)
 +static u_int32_t
 +agp_g4x_read_gtt_pte(device_t dev, u_int index)
-+{
+ {
+-	struct agp_i810_softc *sc = device_get_softc(dev);
 +	struct agp_i810_softc *sc;
 +	u_int32_t pte;
-+
+ 
+-	if (offset >= (sc->gatt->ag_entries << AGP_PAGE_SHIFT))
+-		return EINVAL;
 +	sc = device_get_softc(dev);
 +	pte = bus_read_4(sc->sc_res[0], index * 4 + (2 * 1024 * 1024));
 +	return (pte);
 +}
-+
+ 
+-	if ( sc->chiptype != CHIP_I810 ) {
+-		if ( (offset >> AGP_PAGE_SHIFT) < sc->stolen ) {
+-			device_printf(dev, "trying to unbind from stolen memory");
+-			return EINVAL;
+-		}
+-	}
 +static vm_paddr_t
 +agp_i810_read_gtt_pte_paddr(device_t dev, u_int index)
 +{
@@ -3296,24 +2573,24 @@
 +	res = (pte & ~PAGE_MASK) | ((pte & 0xf0) << 28);
 +	return (res);
 +}
-+
+ 
+-	agp_i810_write_gtt_entry(dev, offset, 0, 0);
 +static vm_paddr_t
 +agp_sb_read_gtt_pte_paddr(device_t dev, u_int index)
 +{
 +	struct agp_i810_softc *sc;
 +	u_int32_t pte;
 +	vm_paddr_t res;
-+
+ 
+-	return 0;
 +	sc = device_get_softc(dev);
 +	pte = sc->match->driver->read_gtt_pte(dev, index);
 +	res = (pte & ~PAGE_MASK) | ((pte & 0xff0) << 28);
 +	return (res);
-+}
-+
+ }
+ 
  /*
-  * Writing via memory mapped registers already flushes all TLBs.
-  */
-@@ -909,29 +1855,30 @@
+@@ -909,29 +1855,30 @@ static int
  agp_i810_enable(device_t dev, u_int32_t mode)
  {
  
@@ -3353,7 +2630,7 @@
  	} else if (type == 2) {
  		/*
  		 * Type 2 is the contiguous physical memory type, that hands
-@@ -942,13 +1889,13 @@
+@@ -942,13 +1889,13 @@ agp_i810_alloc_memory(device_t dev, int type, vm_size_t size)
  		 */
  		if (size != AGP_PAGE_SIZE) {
  			if (sc->argb_cursor != NULL)
@@ -3369,7 +2646,7 @@
  		}
  	}
  
-@@ -958,7 +1905,7 @@
+@@ -958,7 +1905,7 @@ agp_i810_alloc_memory(device_t dev, int type, vm_size_t size)
  	mem->am_type = type;
  	if (type != 1 && (type != 2 || size == AGP_PAGE_SIZE))
  		mem->am_obj = vm_object_allocate(OBJT_DEFAULT,
@@ -3378,7 +2655,7 @@
  	else
  		mem->am_obj = 0;
  
-@@ -968,8 +1915,6 @@
+@@ -968,8 +1915,6 @@ agp_i810_alloc_memory(device_t dev, int type, vm_size_t size)
  			 * Allocate and wire down the page now so that we can
  			 * get its physical address.
  			 */
@@ -3387,7 +2664,7 @@
  			VM_OBJECT_LOCK(mem->am_obj);
  			m = vm_page_grab(mem->am_obj, 0, VM_ALLOC_NOBUSY |
  			    VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
-@@ -981,33 +1926,33 @@
+@@ -981,33 +1926,33 @@ agp_i810_alloc_memory(device_t dev, int type, vm_size_t size)
  			 */
  			mem->am_physical = vtophys(sc->argb_cursor);
  		}
@@ -3415,9 +2692,9 @@
  	if (mem->am_is_bound)
 -		return EBUSY;
 +		return (EBUSY);
++
++	sc = device_get_softc(dev);
  
-+	sc = device_get_softc(dev);
-+
  	if (mem->am_type == 2) {
  		if (mem->am_size == AGP_PAGE_SIZE) {
  			/*
@@ -3428,7 +2705,7 @@
  			VM_OBJECT_LOCK(mem->am_obj);
  			m = vm_page_lookup(mem->am_obj, 0);
  			vm_page_lock(m);
-@@ -1025,14 +1970,13 @@
+@@ -1025,14 +1970,13 @@ agp_i810_free_memory(device_t dev, struct agp_memory *mem)
  	if (mem->am_obj)
  		vm_object_deallocate(mem->am_obj);
  	free(mem, M_AGP);
@@ -3446,7 +2723,7 @@
  	vm_offset_t i;
  
  	/* Do some sanity checks first. */
-@@ -1040,76 +1984,78 @@
+@@ -1040,76 +1984,78 @@ agp_i810_bind_memory(device_t dev, struct agp_memory *mem,
  	    offset + mem->am_size > AGP_GET_APERTURE(dev)) {
  		device_printf(dev, "binding memory at bad offset %#x\n",
  		    (int)offset);
@@ -3531,11 +2808,11 @@
  
  	if (mem->am_type != 1)
 -		return agp_generic_unbind_memory(dev, mem);
+-
+-	if ( sc->chiptype != CHIP_I810 )
+-		return EINVAL;
 +		return (agp_generic_unbind_memory(dev, mem));
  
--	if ( sc->chiptype != CHIP_I810 )
--		return EINVAL;
--
 +	if (sc->match->driver->chiptype != CHIP_I810)
 +		return (EINVAL);
  	for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE) {
@@ -3550,7 +2827,7 @@
  }
  
  static device_method_t agp_i810_methods[] = {
-@@ -1123,7 +2069,7 @@
+@@ -1123,7 +2069,7 @@ static device_method_t agp_i810_methods[] = {
  
  	/* AGP interface */
  	DEVMETHOD(agp_get_aperture,	agp_generic_get_aperture),
@@ -3559,7 +2836,7 @@
  	DEVMETHOD(agp_bind_page,	agp_i810_bind_page),
  	DEVMETHOD(agp_unbind_page,	agp_i810_unbind_page),
  	DEVMETHOD(agp_flush_tlb,	agp_i810_flush_tlb),
-@@ -1132,6 +2078,7 @@
+@@ -1132,6 +2078,7 @@ static device_method_t agp_i810_methods[] = {
  	DEVMETHOD(agp_free_memory,	agp_i810_free_memory),
  	DEVMETHOD(agp_bind_memory,	agp_i810_bind_memory),
  	DEVMETHOD(agp_unbind_memory,	agp_i810_unbind_memory),
@@ -3567,7 +2844,7 @@
  
  	{ 0, 0 }
  };
-@@ -1147,3 +2094,414 @@
+@@ -1147,3 +2094,414 @@ static devclass_t agp_devclass;
  DRIVER_MODULE(agp_i810, vgapci, agp_i810_driver, agp_devclass, 0, 0);
  MODULE_DEPEND(agp_i810, agp, 1, 1, 1);
  MODULE_DEPEND(agp_i810, pci, 1, 1, 1);
@@ -3982,18 +3259,158 @@
 +	sc = device_get_softc(intel_agp);
 +	return (sc->match->driver->read_gtt_pte(intel_agp, entry));
 +}
-Index: sys/dev/agp/agpreg.h
-===================================================================
-diff --git sys/dev/agp/agpreg.h sys/dev/agp/agpreg.h
---- sys/dev/agp/agpreg.h	(revision 230124)
-+++ sys/dev/agp/agpreg.h	(working copy)
+diff --git a/sys/dev/agp/agp_i810.h b/sys/dev/agp/agp_i810.h
+new file mode 100644
+index 0000000..2900709
+--- /dev/null
++++ sys/dev/agp/agp_i810.h
+@@ -0,0 +1,95 @@
++/*-
++ * Copyright (c) 2011 The FreeBSD Foundation
++ * All rights reserved.
++ *
++ * This software was developed by Konstantin Belousov under sponsorship from
++ * the FreeBSD Foundation.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *    notice, this list of conditions and the following disclaimer in the
++ *    documentation and/or other materials provided with the distribution.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
++ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
++ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
++ * SUCH DAMAGE.
++ *
++ * $FreeBSD$
++ */
++
++#ifndef AGP_AGP_I810_H
++#define	AGP_AGP_I810_H
++
++#include <sys/param.h>
++#include <sys/sglist.h>
++
++#include <vm/vm.h>
++#include <vm/vm_page.h>
++
++/* Special gtt memory types */
++#define AGP_DCACHE_MEMORY	1
++#define AGP_PHYS_MEMORY		2
++
++/* New caching attributes for gen6/sandybridge */
++#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2)
++#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4)
++
++/* flag for GFDT type */
++#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
++
++struct intel_gtt {
++	/* Size of memory reserved for graphics by the BIOS */
++	u_int stolen_size;
++	/* Total number of gtt entries. */
++	u_int gtt_total_entries;
++	/*
++	 * Part of the gtt that is mappable by the cpu, for those
++	 * chips where this is not the full gtt.
++	 */
++	u_int gtt_mappable_entries;
++
++	/*
++	 * Always false.
++	 */
++	u_int do_idle_maps;
++};
++
++struct intel_gtt agp_intel_gtt_get(device_t dev);
++int agp_intel_gtt_chipset_flush(device_t dev);
++void agp_intel_gtt_unmap_memory(device_t dev, struct sglist *sg_list);
++void agp_intel_gtt_clear_range(device_t dev, u_int first_entry,
++    u_int num_entries);
++int agp_intel_gtt_map_memory(device_t dev, vm_page_t *pages, u_int num_entries,
++    struct sglist **sg_list);
++void agp_intel_gtt_insert_sg_entries(device_t dev, struct sglist *sg_list,
++    u_int pg_start, u_int flags);
++void agp_intel_gtt_insert_pages(device_t dev, u_int first_entry,
++    u_int num_entries, vm_page_t *pages, u_int flags);
++
++struct intel_gtt intel_gtt_get(void);
++int intel_gtt_chipset_flush(void);
++void intel_gtt_unmap_memory(struct sglist *sg_list);
++void intel_gtt_clear_range(u_int first_entry, u_int num_entries);
++int intel_gtt_map_memory(vm_page_t *pages, u_int num_entries,
++    struct sglist **sg_list);
++void intel_gtt_insert_sg_entries(struct sglist *sg_list, u_int pg_start,
++    u_int flags);
++void intel_gtt_insert_pages(u_int first_entry, u_int num_entries,
++    vm_page_t *pages, u_int flags);
++vm_paddr_t intel_gtt_read_pte_paddr(u_int entry);
++u_int32_t intel_gtt_read_pte(u_int entry);
++device_t intel_gtt_get_bridge_device(void);
++
++#endif
+diff --git a/sys/dev/agp/agp_if.m b/sys/dev/agp/agp_if.m
+index da2e19a..4a97ca9 100644
+--- sys/dev/agp/agp_if.m
++++ sys/dev/agp/agp_if.m
+@@ -36,6 +36,14 @@
+ #
+ INTERFACE agp;
+ 
++CODE {
++	static int
++	null_agp_chipset_flush(device_t dev)
++	{
++		return (ENXIO);
++	}
++};
++
+ #
+ # Return the current aperture size.
+ #
+@@ -132,3 +140,7 @@ METHOD int unbind_memory {
+ 	device_t	dev;
+ 	struct agp_memory *handle;
+ };
++
++METHOD int chipset_flush {
++	device_t	dev;
++} DEFAULT null_agp_chipset_flush;
+diff --git a/sys/dev/agp/agppriv.h b/sys/dev/agp/agppriv.h
+index fd64056..00e7dc1 100644
+--- sys/dev/agp/agppriv.h
++++ sys/dev/agp/agppriv.h
+@@ -73,7 +73,7 @@ struct agp_softc {
+ 	struct agp_memory_list	as_memory;	/* list of allocated memory */
+ 	int			as_nextid;	/* next memory block id */
+ 	int			as_isopen;	/* user device is open */
+-	struct cdev *as_devnode;	/* from make_dev */
++	struct cdev		*as_devnode;	/* from make_dev */
+ 	struct mtx		as_lock;	/* lock for access to GATT */
+ };
+ 
+diff --git a/sys/dev/agp/agpreg.h b/sys/dev/agp/agpreg.h
+index b453cac..dfa93a5 100644
+--- sys/dev/agp/agpreg.h
++++ sys/dev/agp/agpreg.h
 @@ -176,10 +176,33 @@
  #define AGP_I810_GMADR		0x10
  #define AGP_I810_MMADR		0x14
  
 +#define	I810_PTE_VALID		0x00000001
 +
- /*
++/*
 + * Cache control
 + *
 + * Pre-Sandybridge bits
@@ -4013,7 +3430,7 @@
 +#define	GEN6_PTE_LLC_MLC	0x00000006	/* Cache in LLC and MLC */
 +#define	GEN6_PTE_GFDT		0x00000008	/* Graphics Data Type */
 +
-+/*
+ /*
   * Memory mapped register offsets for i810 chipset.
   */
  #define AGP_I810_PGTBL_CTL	0x2020
@@ -4125,212 +3542,4246 @@
   * NVIDIA nForce/nForce2 registers
   */
  #define	AGP_NVIDIA_0_APBASE		0x10
-Index: sys/dev/agp/agppriv.h
-===================================================================
-diff --git sys/dev/agp/agppriv.h sys/dev/agp/agppriv.h
---- sys/dev/agp/agppriv.h	(revision 230124)
-+++ sys/dev/agp/agppriv.h	(working copy)
-@@ -73,7 +73,7 @@
- 	struct agp_memory_list	as_memory;	/* list of allocated memory */
- 	int			as_nextid;	/* next memory block id */
- 	int			as_isopen;	/* user device is open */
--	struct cdev *as_devnode;	/* from make_dev */
-+	struct cdev		*as_devnode;	/* from make_dev */
- 	struct mtx		as_lock;	/* lock for access to GATT */
+diff --git a/sys/dev/agp/agpvar.h b/sys/dev/agp/agpvar.h
+index 52d40ef..5aeebc9 100644
+--- sys/dev/agp/agpvar.h
++++ sys/dev/agp/agpvar.h
+@@ -122,4 +122,10 @@ int agp_unbind_memory(device_t dev, void *handle);
+  */
+ void agp_memory_info(device_t dev, void *handle, struct agp_memory_info *mi);
+ 
++#define AGP_NORMAL_MEMORY 0
++
++#define AGP_USER_TYPES (1 << 16)
++#define AGP_USER_MEMORY (AGP_USER_TYPES)
++#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
++
+ #endif /* !_PCI_AGPVAR_H_ */
+diff --git a/sys/dev/drm/drm.h b/sys/dev/drm/drm.h
+index 592b5b3..c9d3a23 100644
+--- sys/dev/drm/drm.h
++++ sys/dev/drm/drm.h
+@@ -239,7 +239,7 @@ enum drm_map_type {
+ 	_DRM_AGP = 3,		  /**< AGP/GART */
+ 	_DRM_SCATTER_GATHER = 4,  /**< Scatter/gather memory for PCI DMA */
+ 	_DRM_CONSISTENT = 5,	  /**< Consistent memory for PCI DMA */
+-	_DRM_TTM = 6
++	_DRM_GEM = 6		  /**< GEM */
  };
  
-Index: sys/dev/agp/agp_i810.h
-===================================================================
-diff --git sys/dev/agp/agp_i810.h sys/dev/agp/agp_i810.h
-new file mode 10644
---- /dev/null	(revision 0)
-+++ sys/dev/agp/agp_i810.h	(working copy)
-@@ -0,0 +1,95 @@
-+/*-
-+ * Copyright (c) 2011 The FreeBSD Foundation
-+ * All rights reserved.
-+ *
-+ * This software was developed by Konstantin Belousov under sponsorship from
-+ * the FreeBSD Foundation.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions
-+ * are met:
-+ * 1. Redistributions of source code must retain the above copyright
-+ *    notice, this list of conditions and the following disclaimer.
-+ * 2. Redistributions in binary form must reproduce the above copyright
-+ *    notice, this list of conditions and the following disclaimer in the
-+ *    documentation and/or other materials provided with the distribution.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
-+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
-+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
-+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
-+ * SUCH DAMAGE.
-+ *
-+ * $FreeBSD$
+ /**
+@@ -525,15 +525,18 @@ struct drm_irq_busid {
+ enum drm_vblank_seq_type {
+ 	_DRM_VBLANK_ABSOLUTE = 0x0,	/**< Wait for specific vblank sequence number */
+ 	_DRM_VBLANK_RELATIVE = 0x1,	/**< Wait for given number of vblanks */
++	_DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e,
++	_DRM_VBLANK_EVENT = 0x4000000,   /**< Send event instead of blocking */
+ 	_DRM_VBLANK_FLIP = 0x8000000,	/**< Scheduled buffer swap should flip */
+ 	_DRM_VBLANK_NEXTONMISS = 0x10000000,	/**< If missed, wait for next vblank */
+ 	_DRM_VBLANK_SECONDARY = 0x20000000,	/**< Secondary display controller */
+ 	_DRM_VBLANK_SIGNAL = 0x40000000	/**< Send signal instead of blocking */
+ };
++#define _DRM_VBLANK_HIGH_CRTC_SHIFT 1
+ 
+ #define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
+-#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \
+-				_DRM_VBLANK_NEXTONMISS)
++#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \
++				_DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)
+ 
+ struct drm_wait_vblank_request {
+ 	enum drm_vblank_seq_type type;
+@@ -644,7 +647,6 @@ struct drm_set_version {
+ 	int drm_dd_minor;
+ };
+ 
+-
+ #define DRM_FENCE_FLAG_EMIT                0x00000001
+ #define DRM_FENCE_FLAG_SHAREABLE           0x00000002
+ /**
+@@ -671,7 +673,7 @@ struct drm_fence_arg {
+ 	unsigned int error;
+ 	unsigned int sequence;
+ 	unsigned int pad64;
+-	uint64_t expand_pad[2]; /*Future expansion */
++	uint64_t expand_pad[2]; /* Future expansion */
+ };
+ 
+ /* Buffer permissions, referring to how the GPU uses the buffers.
+@@ -987,6 +989,28 @@ struct drm_gem_open {
+ 	uint64_t size;
+ };
+ 
++struct drm_event {
++	uint32_t type;
++	uint32_t length;
++};
++
++#define DRM_EVENT_VBLANK 0x01
++#define DRM_EVENT_FLIP_COMPLETE 0x02
++
++struct drm_event_vblank {
++	struct drm_event base;
++	uint64_t user_data;
++	uint32_t tv_sec;
++	uint32_t tv_usec;
++	uint32_t sequence;
++	uint32_t reserved;
++};
++
++#define DRM_CAP_DUMB_BUFFER 0x1
++#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
++
++#include "drm_mode.h"
++
+ /**
+  * \name Ioctls Definitions
+  */
+@@ -1029,6 +1053,9 @@ struct drm_gem_open {
+ #define DRM_IOCTL_SET_SAREA_CTX		DRM_IOW( 0x1c, struct drm_ctx_priv_map)
+ #define DRM_IOCTL_GET_SAREA_CTX		DRM_IOWR(0x1d, struct drm_ctx_priv_map)
+ 
++#define DRM_IOCTL_SET_MASTER            DRM_IO(0x1e)
++#define DRM_IOCTL_DROP_MASTER           DRM_IO(0x1f)
++
+ #define DRM_IOCTL_ADD_CTX		DRM_IOWR(0x20, struct drm_ctx)
+ #define DRM_IOCTL_RM_CTX		DRM_IOWR(0x21, struct drm_ctx)
+ #define DRM_IOCTL_MOD_CTX		DRM_IOW( 0x22, struct drm_ctx)
+@@ -1043,6 +1070,8 @@ struct drm_gem_open {
+ #define DRM_IOCTL_UNLOCK		DRM_IOW( 0x2b, struct drm_lock)
+ #define DRM_IOCTL_FINISH		DRM_IOW( 0x2c, struct drm_lock)
+ 
++#define DRM_IOCTL_GEM_PRIME_OPEN        DRM_IOWR(0x2e, struct drm_gem_open)
++
+ #define DRM_IOCTL_AGP_ACQUIRE		DRM_IO(  0x30)
+ #define DRM_IOCTL_AGP_RELEASE		DRM_IO(  0x31)
+ #define DRM_IOCTL_AGP_ENABLE		DRM_IOW( 0x32, struct drm_agp_mode)
+@@ -1059,6 +1088,34 @@ struct drm_gem_open {
+ 
+ #define DRM_IOCTL_UPDATE_DRAW           DRM_IOW(0x3f, struct drm_update_draw)
+ 
++#define DRM_IOCTL_MODE_GETRESOURCES	DRM_IOWR(0xA0, struct drm_mode_card_res)
++#define DRM_IOCTL_MODE_GETCRTC		DRM_IOWR(0xA1, struct drm_mode_crtc)
++#define DRM_IOCTL_MODE_SETCRTC		DRM_IOWR(0xA2, struct drm_mode_crtc)
++#define DRM_IOCTL_MODE_CURSOR		DRM_IOWR(0xA3, struct drm_mode_cursor)
++#define DRM_IOCTL_MODE_GETGAMMA		DRM_IOWR(0xA4, struct drm_mode_crtc_lut)
++#define DRM_IOCTL_MODE_SETGAMMA		DRM_IOWR(0xA5, struct drm_mode_crtc_lut)
++#define DRM_IOCTL_MODE_GETENCODER	DRM_IOWR(0xA6, struct drm_mode_get_encoder)
++#define DRM_IOCTL_MODE_GETCONNECTOR	DRM_IOWR(0xA7, struct drm_mode_get_connector)
++#define DRM_IOCTL_MODE_ATTACHMODE	DRM_IOWR(0xA8, struct drm_mode_mode_cmd)
++#define DRM_IOCTL_MODE_DETACHMODE	DRM_IOWR(0xA9, struct drm_mode_mode_cmd)
++
++#define DRM_IOCTL_MODE_GETPROPERTY	DRM_IOWR(0xAA, struct drm_mode_get_property)
++#define DRM_IOCTL_MODE_SETPROPERTY	DRM_IOWR(0xAB, struct drm_mode_connector_set_property)
++#define DRM_IOCTL_MODE_GETPROPBLOB	DRM_IOWR(0xAC, struct drm_mode_get_blob)
++#define DRM_IOCTL_MODE_GETFB		DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
++#define DRM_IOCTL_MODE_ADDFB		DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
++#define DRM_IOCTL_MODE_RMFB		DRM_IOWR(0xAF, unsigned int)
++#define DRM_IOCTL_MODE_PAGE_FLIP	DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
++#define DRM_IOCTL_MODE_DIRTYFB		DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
++
++#define DRM_IOCTL_MODE_CREATE_DUMB	DRM_IOWR(0xB2, struct drm_mode_create_dumb)
++#define DRM_IOCTL_MODE_MAP_DUMB		DRM_IOWR(0xB3, struct drm_mode_map_dumb)
++#define DRM_IOCTL_MODE_DESTROY_DUMB	DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
++#define DRM_IOCTL_MODE_GETPLANERESOURCES DRM_IOWR(0xB5, struct drm_mode_get_plane_res)
++#define DRM_IOCTL_MODE_GETPLANE		DRM_IOWR(0xB6, struct drm_mode_get_plane)
++#define DRM_IOCTL_MODE_SETPLANE		DRM_IOWR(0xB7, struct drm_mode_set_plane)
++#define DRM_IOCTL_MODE_ADDFB2		DRM_IOWR(0xB8, struct drm_mode_fb_cmd2)
++
+ #define DRM_IOCTL_MM_INIT               DRM_IOWR(0xc0, struct drm_mm_init_arg)
+ #define DRM_IOCTL_MM_TAKEDOWN           DRM_IOWR(0xc1, struct drm_mm_type_arg)
+ #define DRM_IOCTL_MM_LOCK               DRM_IOWR(0xc2, struct drm_mm_type_arg)
+diff --git a/sys/dev/drm/drmP.h b/sys/dev/drm/drmP.h
+index 3b121e7..ccd41b9 100644
+--- sys/dev/drm/drmP.h
++++ sys/dev/drm/drmP.h
+@@ -46,24 +46,25 @@ struct drm_file;
+ #include <sys/queue.h>
+ #include <sys/malloc.h>
+ #include <sys/kernel.h>
++#include <sys/ktr.h>
+ #include <sys/module.h>
+ #include <sys/systm.h>
+ #include <sys/conf.h>
+ #include <sys/sglist.h>
+ #include <sys/stat.h>
+-#if __FreeBSD_version >= 700000
+ #include <sys/priv.h>
+-#endif
+ #include <sys/proc.h>
+ #include <sys/lock.h>
+ #include <sys/fcntl.h>
+ #include <sys/uio.h>
+ #include <sys/filio.h>
++#include <sys/selinfo.h>
+ #include <sys/sysctl.h>
+ #include <sys/bus.h>
+ #include <sys/queue.h>
+ #include <sys/signalvar.h>
+ #include <sys/poll.h>
++#include <sys/sbuf.h>
+ #include <sys/taskqueue.h>
+ #include <sys/tree.h>
+ #include <vm/vm.h>
+@@ -88,11 +89,7 @@ struct drm_file;
+ #include <sys/mman.h>
+ #include <sys/rman.h>
+ #include <sys/memrange.h>
+-#if __FreeBSD_version >= 800004
+ #include <dev/agp/agpvar.h>
+-#else /* __FreeBSD_version >= 800004 */
+-#include <pci/agpvar.h>
+-#endif /* __FreeBSD_version >= 800004 */
+ #include <sys/agpio.h>
+ #include <sys/mutex.h>
+ #include <dev/pci/pcivar.h>
+@@ -104,6 +101,9 @@ struct drm_file;
+ #include "dev/drm/drm_atomic.h"
+ #include "dev/drm/drm_internal.h"
+ #include "dev/drm/drm_linux_list.h"
++#include "dev/drm/drm_gem_names.h"
++#include "dev/drm/drm_mm.h"
++#include "dev/drm/drm_hashtab.h"
+ 
+ #include <opt_drm.h>
+ #ifdef DRM_DEBUG
+@@ -111,18 +111,12 @@ struct drm_file;
+ #define DRM_DEBUG_DEFAULT_ON 1
+ #endif /* DRM_DEBUG */
+ 
+-#if defined(DRM_LINUX) && DRM_LINUX && !defined(__amd64__)
+-#include <sys/file.h>
+-#include <sys/proc.h>
+-#include <machine/../linux/linux.h>
+-#include <machine/../linux/linux_proto.h>
+-#else
+-/* Either it was defined when it shouldn't be (FreeBSD amd64) or it isn't
+- * supported on this OS yet.
+- */
++#define	DRM_DEBUGBITS_DEBUG		0x1
++#define	DRM_DEBUGBITS_KMS		0x2
++#define	DRM_DEBUGBITS_FAILED_IOCTL	0x4
++
+ #undef DRM_LINUX
+ #define DRM_LINUX 0
+-#endif
+ 
+ /* driver capabilities and requirements mask */
+ #define DRIVER_USE_AGP     0x1
+@@ -132,13 +126,29 @@ struct drm_file;
+ #define DRIVER_SG          0x10
+ #define DRIVER_HAVE_DMA    0x20
+ #define DRIVER_HAVE_IRQ    0x40
+-#define DRIVER_DMA_QUEUE   0x100
++#define DRIVER_IRQ_SHARED  0x80
++#define DRIVER_IRQ_VBL     0x100
++#define DRIVER_DMA_QUEUE   0x200
++#define DRIVER_FB_DMA      0x400
++#define DRIVER_IRQ_VBL2    0x800
++#define DRIVER_GEM         0x1000
++#define DRIVER_MODESET     0x2000
++#define DRIVER_USE_PLATFORM_DEVICE  0x4000
++#define	DRIVER_LOCKLESS_IRQ 0x8000
+ 
+ 
+ #define DRM_HASH_SIZE	      16 /* Size of key hash table		  */
+ #define DRM_KERNEL_CONTEXT    0	 /* Change drm_resctx if changed	  */
+ #define DRM_RESERVED_CONTEXTS 1	 /* Change drm_resctx if changed	  */
+ 
++#define	DRM_GEM_MAPPING_MASK	(3ULL << 62)
++#define	DRM_GEM_MAPPING_KEY	(2ULL << 62) /* Non-canonical address form */
++#define	DRM_GEM_MAX_IDX		0x3fffff
++#define	DRM_GEM_MAPPING_IDX(o)	(((o) >> 40) & DRM_GEM_MAX_IDX)
++#define	DRM_GEM_MAPPING_OFF(i)	(((uint64_t)(i)) << 40)
++#define	DRM_GEM_MAPPING_MAPOFF(o) \
++    ((o) & ~(DRM_GEM_MAPPING_OFF(DRM_GEM_MAX_IDX) | DRM_GEM_MAPPING_KEY))
++
+ MALLOC_DECLARE(DRM_MEM_DMA);
+ MALLOC_DECLARE(DRM_MEM_SAREA);
+ MALLOC_DECLARE(DRM_MEM_DRIVER);
+@@ -159,6 +169,7 @@ MALLOC_DECLARE(DRM_MEM_SGLISTS);
+ MALLOC_DECLARE(DRM_MEM_DRAWABLE);
+ MALLOC_DECLARE(DRM_MEM_MM);
+ MALLOC_DECLARE(DRM_MEM_HASHTAB);
++MALLOC_DECLARE(DRM_MEM_KMS);
+ 
+ SYSCTL_DECL(_hw_drm);
+ 
+@@ -196,8 +207,21 @@ SYSCTL_DECL(_hw_drm);
+ #define DRM_SPINUNLOCK_IRQRESTORE(u, irqflags) mtx_unlock(u)
+ #define DRM_SPINLOCK_ASSERT(l)	mtx_assert(l, MA_OWNED)
+ #define DRM_CURRENTPID		curthread->td_proc->p_pid
+-#define DRM_LOCK()		mtx_lock(&dev->dev_lock)
+-#define DRM_UNLOCK() 		mtx_unlock(&dev->dev_lock)
++#define DRM_LOCK()		(dev)->driver->device_lock((dev))
++#define DRM_UNLOCK() 		(dev)->driver->device_unlock((dev))
++#define	DRM_LOCK_SLEEP(dev, chan, flags, msg, timeout)			\
++    dev->driver->device_lock_sleep((dev), (chan), (flags), (msg), (timeout))
++#if defined(INVARIANTS)
++#define	DRM_LOCK_ASSERT(d)	(d)->driver->device_lock_assert((d))
++#define	DRM_UNLOCK_ASSERT(d)	(d)->driver->device_unlock_assert((d))
++#define	DRM_NONSLEEPABLE_UNLOCK_ASSERT(d) \
++    (d)->driver->device_nonsleepable_unlock_assert((d))
++#else
++#define	DRM_LOCK_ASSERT(d)
++#define	DRM_UNLOCK_ASSERT(d)
++#define	DRM_NONSLEEPABLE_UNLOCK_ASSERT(d)
++#endif
++
+ #define DRM_SYSCTL_HANDLER_ARGS	(SYSCTL_HANDLER_ARGS)
+ 
+ #define DRM_IRQ_ARGS		void *arg
+@@ -221,20 +245,25 @@ enum {
+ 
+ #define PAGE_ALIGN(addr) round_page(addr)
+ /* DRM_SUSER returns true if the user is superuser */
+-#if __FreeBSD_version >= 700000
+ #define DRM_SUSER(p)		(priv_check(p, PRIV_DRIVER) == 0)
+-#else
+-#define DRM_SUSER(p)		(suser(p) == 0)
+-#endif
+ #define DRM_AGP_FIND_DEVICE()	agp_find_device()
+ #define DRM_MTRR_WC		MDF_WRITECOMBINE
+ #define jiffies			ticks
++#define	jiffies_to_msecs(x)	(((int64_t)(x)) * 1000 / hz)
++#define	msecs_to_jiffies(x)	(((int64_t)(x)) * hz / 1000)
++#define	time_after(a,b)		((long)(b) - (long)(a) < 0)
++#define	time_after_eq(a,b)	((long)(b) - (long)(a) <= 0)
++#define drm_msleep(x, msg)	pause((msg), ((int64_t)(x)) * 1000 / hz)
+ 
+ typedef vm_paddr_t dma_addr_t;
+-typedef u_int64_t u64;
+-typedef u_int32_t u32;
+-typedef u_int16_t u16;
+-typedef u_int8_t u8;
++typedef uint64_t u64;
++typedef uint32_t u32;
++typedef uint16_t u16;
++typedef uint8_t u8;
++typedef int64_t s64;
++typedef int32_t s32;
++typedef int16_t s16;
++typedef int8_t s8;
+ 
+ /* DRM_READMEMORYBARRIER() prevents reordering of reads.
+  * DRM_WRITEMEMORYBARRIER() prevents reordering of writes.
+@@ -253,6 +282,9 @@ typedef u_int8_t u8;
+ #define DRM_READ32(map, offset)						\
+ 	le32toh(*(volatile u_int32_t *)(((vm_offset_t)(map)->virtual) +	\
+ 	    (vm_offset_t)(offset)))
++#define DRM_READ64(map, offset)						\
++	le64toh(*(volatile u_int64_t *)(((vm_offset_t)(map)->virtual) +	\
++	    (vm_offset_t)(offset)))
+ #define DRM_WRITE8(map, offset, val)					\
+ 	*(volatile u_int8_t *)(((vm_offset_t)(map)->virtual) +		\
+ 	    (vm_offset_t)(offset)) = val
+@@ -262,6 +294,9 @@ typedef u_int8_t u8;
+ #define DRM_WRITE32(map, offset, val)					\
+ 	*(volatile u_int32_t *)(((vm_offset_t)(map)->virtual) +		\
+ 	    (vm_offset_t)(offset)) = htole32(val)
++#define DRM_WRITE64(map, offset, val)					\
++	*(volatile u_int64_t *)(((vm_offset_t)(map)->virtual) +		\
++	    (vm_offset_t)(offset)) = htole64(val)
+ 
+ #define DRM_VERIFYAREA_READ( uaddr, size )		\
+ 	(!useracc(__DECONST(caddr_t, uaddr), size, VM_PROT_READ))
+@@ -317,11 +352,23 @@ for ( ret = 0 ; !ret && !(condition) ; ) {			\
+ #define DRM_INFO(fmt, ...)  printf("info: [" DRM_NAME "] " fmt , ##__VA_ARGS__)
+ 
+ #define DRM_DEBUG(fmt, ...) do {					\
+-	if (drm_debug_flag)						\
++	if ((drm_debug_flag & DRM_DEBUGBITS_DEBUG) != 0)		\
+ 		printf("[" DRM_NAME ":pid%d:%s] " fmt, DRM_CURRENTPID,	\
+ 			__func__ , ##__VA_ARGS__);			\
+ } while (0)
+ 
++#define DRM_DEBUG_KMS(fmt, ...) do {					\
++	if ((drm_debug_flag & DRM_DEBUGBITS_KMS) != 0)			\
++		printf("[" DRM_NAME ":KMS:pid%d:%s] " fmt, DRM_CURRENTPID,\
++			__func__ , ##__VA_ARGS__);			\
++} while (0)
++
++#define DRM_DEBUG_DRIVER(fmt, ...) do {					\
++	if ((drm_debug_flag & DRM_DEBUGBITS_KMS) != 0)			\
++		printf("[" DRM_NAME ":KMS:pid%d:%s] " fmt, DRM_CURRENTPID,\
++			__func__ , ##__VA_ARGS__);			\
++} while (0)
++
+ typedef struct drm_pci_id_list
+ {
+ 	int vendor;
+@@ -339,6 +386,9 @@ struct drm_msi_blacklist_entry
+ #define DRM_AUTH	0x1
+ #define DRM_MASTER	0x2
+ #define DRM_ROOT_ONLY	0x4
++#define DRM_CONTROL_ALLOW 0x8
++#define DRM_UNLOCKED	0x10
++
+ typedef struct drm_ioctl_desc {
+ 	unsigned long cmd;
+ 	int (*func)(struct drm_device *dev, void *data,
+@@ -415,6 +465,16 @@ typedef struct drm_buf_entry {
+ 	drm_freelist_t	  freelist;
+ } drm_buf_entry_t;
+ 
++/* Event queued up for userspace to read */
++struct drm_pending_event {
++	struct drm_event *event;
++	struct list_head link;
++	struct drm_file *file_priv;
++	pid_t pid; /* pid of requester, no guarantee it's valid by the time
++		      we deliver the event, for tracing only */
++	void (*destroy)(struct drm_pending_event *event);
++};
++
+ typedef TAILQ_HEAD(drm_file_list, drm_file) drm_file_list_t;
+ struct drm_file {
+ 	TAILQ_ENTRY(drm_file) link;
+@@ -425,7 +485,18 @@ struct drm_file {
+ 	uid_t		  uid;
+ 	drm_magic_t	  magic;
+ 	unsigned long	  ioctl_count;
++
+ 	void		 *driver_priv;
++	struct drm_gem_names object_names;
++
++	int		  is_master;
++	struct drm_master *masterp;
++
++	struct list_head  fbs;
++
++	struct list_head  event_list;
++	int		  event_space;
++	struct selinfo	  event_poll;
+ };
+ 
+ typedef struct drm_lock_data {
+@@ -519,6 +590,21 @@ struct drm_vblank_info {
+ 	int inmodeset;			/* Display driver is setting mode */
+ };
+ 
++/* Size of ringbuffer for vblank timestamps. Just double-buffer
++ * in initial implementation.
 + */
++#define DRM_VBLANKTIME_RBSIZE 2
 +
-+#ifndef AGP_AGP_I810_H
-+#define	AGP_AGP_I810_H
++/* Flags and return codes for get_vblank_timestamp() driver function. */
++#define DRM_CALLED_FROM_VBLIRQ 1
++#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0)
++#define DRM_VBLANKTIME_INVBL             (1 << 1)
 +
-+#include <sys/param.h>
-+#include <sys/sglist.h>
++/* get_scanout_position() return flags */
++#define DRM_SCANOUTPOS_VALID        (1 << 0)
++#define DRM_SCANOUTPOS_INVBL        (1 << 1)
++#define DRM_SCANOUTPOS_ACCURATE     (1 << 2)
 +
-+#include <vm/vm.h>
-+#include <vm/vm_page.h>
+ /* location of GART table */
+ #define DRM_ATI_GART_MAIN 1
+ #define DRM_ATI_GART_FB   2
+@@ -540,6 +626,67 @@ struct drm_ati_pcigart_info {
+ 	struct drm_dma_handle *dmah; /* handle for ATI PCIGART table */
+ };
+ 
++typedef vm_paddr_t resource_size_t;
 +
-+/* Special gtt memory types */
-+#define AGP_DCACHE_MEMORY	1
-+#define AGP_PHYS_MEMORY		2
++/**
++ * GEM specific mm private for tracking GEM objects
++ */
++struct drm_gem_mm {
++	struct drm_open_hash offset_hash; /**< User token hash table for maps */
++	struct unrhdr *idxunr;
++};
 +
-+/* New caching attributes for gen6/sandybridge */
-+#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2)
-+#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4)
++struct drm_gem_object {
++	/** Reference count of this object */
++	u_int refcount;
 +
-+/* flag for GFDT type */
-+#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
++	/** Handle count of this object. Each handle also holds a reference */
++	u_int handle_count; /* number of handles on this object */
 +
-+struct intel_gtt {
-+	/* Size of memory reserved for graphics by the BIOS */
-+	u_int stolen_size;
-+	/* Total number of gtt entries. */
-+	u_int gtt_total_entries;
-+	/*
-+	 * Part of the gtt that is mappable by the cpu, for those
-+	 * chips where this is not the full gtt.
++	/** Related drm device */
++	struct drm_device *dev;
++
++	/** File representing the shmem storage: filp in Linux parlance */
++	vm_object_t vm_obj;
++
++	bool on_map;
++	struct drm_hash_item map_list;
++
++	/**
++	 * Size of the object, in bytes.  Immutable over the object's
++	 * lifetime.
 +	 */
-+	u_int gtt_mappable_entries;
++	size_t size;
 +
-+	/*
-+	 * Always false.
++	/**
++	 * Global name for this object, starts at 1. 0 means unnamed.
++	 * Access is covered by the object_name_lock in the related drm_device
 +	 */
-+	u_int do_idle_maps;
++	int name;
++
++	/**
++	 * Memory domains. These monitor which caches contain read/write data
++	 * related to the object. When transitioning from one set of domains
++	 * to another, the driver is called to ensure that caches are suitably
++	 * flushed and invalidated
++	 */
++	uint32_t read_domains;
++	uint32_t write_domain;
++
++	/**
++	 * While validating an exec operation, the
++	 * new read/write domain values are computed here.
++	 * They will be transferred to the above values
++	 * at the point that any cache flushing occurs
++	 */
++	uint32_t pending_read_domains;
++	uint32_t pending_write_domain;
++
++	void *driver_private;
 +};
 +
-+struct intel_gtt agp_intel_gtt_get(device_t dev);
-+int agp_intel_gtt_chipset_flush(device_t dev);
-+void agp_intel_gtt_unmap_memory(device_t dev, struct sglist *sg_list);
-+void agp_intel_gtt_clear_range(device_t dev, u_int first_entry,
-+    u_int num_entries);
-+int agp_intel_gtt_map_memory(device_t dev, vm_page_t *pages, u_int num_entries,
-+    struct sglist **sg_list);
-+void agp_intel_gtt_insert_sg_entries(device_t dev, struct sglist *sg_list,
-+    u_int pg_start, u_int flags);
-+void agp_intel_gtt_insert_pages(device_t dev, u_int first_entry,
-+    u_int num_entries, vm_page_t *pages, u_int flags);
++#include "drm_crtc.h"
 +
-+struct intel_gtt intel_gtt_get(void);
-+int intel_gtt_chipset_flush(void);
-+void intel_gtt_unmap_memory(struct sglist *sg_list);
-+void intel_gtt_clear_range(u_int first_entry, u_int num_entries);
-+int intel_gtt_map_memory(vm_page_t *pages, u_int num_entries,
-+    struct sglist **sg_list);
-+void intel_gtt_insert_sg_entries(struct sglist *sg_list, u_int pg_start,
-+    u_int flags);
-+void intel_gtt_insert_pages(u_int first_entry, u_int num_entries,
-+    vm_page_t *pages, u_int flags);
-+vm_paddr_t intel_gtt_read_pte_paddr(u_int entry);
-+u_int32_t intel_gtt_read_pte(u_int entry);
-+device_t intel_gtt_get_bridge_device(void);
+ #ifndef DMA_BIT_MASK
+ #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : (1ULL<<(n)) - 1)
+ #endif
+@@ -573,9 +720,40 @@ struct drm_driver_info {
+ 	int	(*irq_postinstall)(struct drm_device *dev);
+ 	void	(*irq_uninstall)(struct drm_device *dev);
+ 	void	(*irq_handler)(DRM_IRQ_ARGS);
 +
-+#endif
-
-Property changes on: stable/9/sys/dev/agp/agp_i810.h
-___________________________________________________________________
-Added: svn:mime-type
-## -0,0 +1 ##
-+text/plain
-Added: svn:keywords
-## -0,0 +1 ##
-+FreeBSD=%H
-Added: svn:eol-style
-## -0,0 +1 ##
-+native
-Index: sys/dev/agp/agpvar.h
-===================================================================
-diff --git sys/dev/agp/agpvar.h sys/dev/agp/agpvar.h
---- sys/dev/agp/agpvar.h	(revision 230124)
-+++ sys/dev/agp/agpvar.h	(working copy)
-@@ -122,4 +122,10 @@
-  */
- void agp_memory_info(device_t dev, void *handle, struct agp_memory_info *mi);
+ 	u32	(*get_vblank_counter)(struct drm_device *dev, int crtc);
+ 	int	(*enable_vblank)(struct drm_device *dev, int crtc);
+ 	void	(*disable_vblank)(struct drm_device *dev, int crtc);
++	int	(*get_scanout_position)(struct drm_device *dev, int crtc,
++		    int *vpos, int *hpos);
++
++	int	(*get_vblank_timestamp)(struct drm_device *dev, int crtc,
++		    int *max_error, struct timeval *vblank_time,
++		    unsigned flags);
++
++	int	(*gem_init_object)(struct drm_gem_object *obj);
++	void	(*gem_free_object)(struct drm_gem_object *obj);
++
++	struct cdev_pager_ops *gem_pager_ops;
++
++	int	(*dumb_create)(struct drm_file *file_priv,
++		    struct drm_device *dev, struct drm_mode_create_dumb *args);
++	int	(*dumb_map_offset)(struct drm_file *file_priv,
++		    struct drm_device *dev, uint32_t handle, uint64_t *offset);
++	int	(*dumb_destroy)(struct drm_file *file_priv,
++		    struct drm_device *dev, uint32_t handle);
++
++	int	(*sysctl_init)(struct drm_device *dev,
++		    struct sysctl_ctx_list *ctx, struct sysctl_oid *top);
++	void	(*sysctl_cleanup)(struct drm_device *dev);
++
++	void	(*device_lock)(struct drm_device *dev);
++	void	(*device_unlock)(struct drm_device *dev);
++	int	(*device_lock_sleep)(struct drm_device *dev, void *chan,
++		    int flags, const char *msg, int timeout);
++	void	(*device_lock_assert)(struct drm_device *dev);
++	void	(*device_unlock_assert)(struct drm_device *dev);
++	void	(*device_nonsleepable_unlock_assert)(struct drm_device *dev);
  
-+#define AGP_NORMAL_MEMORY 0
+ 	drm_pci_id_list_t *id_entry;	/* PCI ID, name, and chipset private */
+ 
+@@ -607,6 +785,41 @@ struct drm_driver_info {
+ 	u32 driver_features;
+ };
+ 
++/**
++ * DRM minor structure. This structure represents a drm minor number.
++ */
++struct drm_minor {
++	int index;			/**< Minor device number */
++	int type;                       /**< Control or render */
++	device_t kdev;			/**< OS device */
++	struct drm_device *dev;
 +
-+#define AGP_USER_TYPES (1 << 16)
-+#define AGP_USER_MEMORY (AGP_USER_TYPES)
-+#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
++	struct drm_master *master; /* currently active master for this node */
++	struct list_head master_list;
++	struct drm_mode_group mode_group;
++};
 +
- #endif /* !_PCI_AGPVAR_H_ */
-Index: sys/dev/agp/agp.c
-===================================================================
-diff --git sys/dev/agp/agp.c sys/dev/agp/agp.c
---- sys/dev/agp/agp.c	(revision 230124)
-+++ sys/dev/agp/agp.c	(working copy)
-@@ -239,7 +239,8 @@
- 		if (memsize <= agp_max[i][0])
- 			break;
- 	}
--	if (i == agp_max_size) i = agp_max_size - 1;
-+	if (i == agp_max_size)
-+		i = agp_max_size - 1;
- 	sc->as_maxmem = agp_max[i][1] << 20U;
++/* mode specified on the command line */
++struct drm_cmdline_mode {
++	bool specified;
++	bool refresh_specified;
++	bool bpp_specified;
++	int xres, yres;
++	int bpp;
++	int refresh;
++	bool rb;
++	bool interlace;
++	bool cvt;
++	bool margins;
++	enum drm_connector_force force;
++};
++
++struct drm_pending_vblank_event {
++	struct drm_pending_event base;
++	int pipe;
++	struct drm_event_vblank event;
++};
++
+ /* Length for the array of resource pointers for drm_get_resource_*. */
+ #define DRM_MAX_PCI_RESOURCE	6
  
- 	/*
-@@ -803,6 +804,13 @@
- }
+@@ -629,10 +842,10 @@ struct drm_device {
+ 	int		  flags;	/* Flags to open(2)		   */
  
- static int
-+agp_chipset_flush(device_t dev)
-+{
+ 				/* Locks */
+-	struct mtx	  vbl_lock;	/* protects vblank operations */
+ 	struct mtx	  dma_lock;	/* protects dev->dma */
+ 	struct mtx	  irq_lock;	/* protects irq condition checks */
+ 	struct mtx	  dev_lock;	/* protects everything else */
++	struct sx	  dev_struct_lock;
+ 	DRM_SPINTYPE	  drw_lock;
+ 
+ 				/* Usage Counters */
+@@ -680,16 +893,13 @@ struct drm_device {
+ 	atomic_t	  context_flag;	/* Context swapping flag	   */
+ 	int		  last_context;	/* Last current context		   */
+ 
+-	int		  vblank_disable_allowed;
+-	struct callout	  vblank_disable_timer;
+-	u32		  max_vblank_count;	/* size of vblank counter register */
+-	struct drm_vblank_info *vblank;		/* per crtc vblank info */
+ 	int		  num_crtcs;
+ 
+ 	struct sigio      *buf_sigio;	/* Processes waiting for SIGIO     */
+ 
+ 				/* Sysctl support */
+ 	struct drm_sysctl_info *sysctl;
++	int		  sysctl_node_idx;
+ 
+ 	drm_agp_head_t    *agp;
+ 	drm_sg_mem_t      *sg;  /* Scatter gather memory */
+@@ -698,9 +908,43 @@ struct drm_device {
+ 	unsigned int	  agp_buffer_token;
+ 	drm_local_map_t   *agp_buffer_map;
+ 
++	struct drm_minor *control;		/**< Control node for card */
++	struct drm_minor *primary;		/**< render type primary screen head */
 +
-+	return (AGP_CHIPSET_FLUSH(dev));
-+}
+ 	struct unrhdr	  *drw_unrhdr;
+ 	/* RB tree of drawable infos */
+ 	RB_HEAD(drawable_tree, bsd_drm_drawable_info) drw_head;
 +
-+static int
- agp_open(struct cdev *kdev, int oflags, int devtype, struct thread *td)
++	int vblank_disable_allowed;
++
++	atomic_t *_vblank_count;        /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
++	struct timeval *_vblank_time;   /**< timestamp of current vblank_count (drivers must alloc right number of fields) */
++	struct mtx vblank_time_lock;    /**< Protects vblank count and time updates during vblank enable/disable */
++	struct mtx vbl_lock;
++	atomic_t *vblank_refcount;      /* number of users of vblank interruptsper crtc */
++	u32 *last_vblank;               /* protected by dev->vbl_lock, used */
++					/* for wraparound handling */
++	int *vblank_enabled;            /* so we don't call enable more than
++					   once per disable */
++	int *vblank_inmodeset;          /* Display driver is setting mode */
++	u32 *last_vblank_wait;		/* Last vblank seqno waited per CRTC */
++	struct callout vblank_disable_callout;
++
++	u32 max_vblank_count;           /**< size of vblank counter register */
++
++	struct list_head vblank_event_list;
++	struct mtx	 event_lock;
++
++        struct drm_mode_config mode_config;	/**< Current mode config */
++
++	/* GEM part */
++	struct sx	  object_name_lock;
++	struct drm_gem_names object_names;
++	void		 *mm_private;
++
++	void *sysctl_private;
++	char busid_str[128];
++	int modesetting;
+ };
+ 
+ static __inline__ int drm_core_check_feature(struct drm_device *dev,
+@@ -719,6 +963,9 @@ static inline int drm_core_has_AGP(struct drm_device *dev)
+ #endif
+ 
+ extern int	drm_debug_flag;
++extern int	drm_notyet_flag;
++extern unsigned int drm_vblank_offdelay;
++extern unsigned int drm_timestamp_precision;
+ 
+ /* Device setup support (drm_drv.c) */
+ int	drm_probe(device_t kdev, drm_pci_id_list_t *idlist);
+@@ -732,6 +979,11 @@ d_poll_t drm_poll;
+ d_mmap_t drm_mmap;
+ extern drm_local_map_t	*drm_getsarea(struct drm_device *dev);
+ 
++void drm_event_wakeup(struct drm_pending_event *e);
++
++int drm_add_busid_modesetting(struct drm_device *dev,
++    struct sysctl_ctx_list *ctx, struct sysctl_oid *top);
++
+ /* File operations helpers (drm_fops.c) */
+ extern int		drm_open_helper(struct cdev *kdev, int flags, int fmt,
+ 					 DRM_STRUCTPROC *p,
+@@ -791,16 +1043,37 @@ irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
+ void	drm_driver_irq_preinstall(struct drm_device *dev);
+ void	drm_driver_irq_postinstall(struct drm_device *dev);
+ void	drm_driver_irq_uninstall(struct drm_device *dev);
+-void	drm_handle_vblank(struct drm_device *dev, int crtc);
+-u32	drm_vblank_count(struct drm_device *dev, int crtc);
+-int	drm_vblank_get(struct drm_device *dev, int crtc);
+-void	drm_vblank_put(struct drm_device *dev, int crtc);
+-void	drm_vblank_cleanup(struct drm_device *dev);
+-int	drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
+-int	drm_vblank_init(struct drm_device *dev, int num_crtcs);
++
++void	drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
++void	drm_vblank_post_modeset(struct drm_device *dev, int crtc);
+ int 	drm_modeset_ctl(struct drm_device *dev, void *data,
+ 			struct drm_file *file_priv);
+ 
++extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
++extern int drm_wait_vblank(struct drm_device *dev, void *data,
++			   struct drm_file *filp);
++extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
++extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
++extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
++				     struct timeval *vblanktime);
++extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
++void drm_handle_vblank_events(struct drm_device *dev, int crtc);
++extern int drm_vblank_get(struct drm_device *dev, int crtc);
++extern void drm_vblank_put(struct drm_device *dev, int crtc);
++extern void drm_vblank_off(struct drm_device *dev, int crtc);
++extern void drm_vblank_cleanup(struct drm_device *dev);
++extern u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
++				     struct timeval *tvblank, unsigned flags);
++extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
++						 int crtc, int *max_error,
++						 struct timeval *vblank_time,
++						 unsigned flags,
++						 struct drm_crtc *refcrtc);
++extern void drm_calc_timestamping_constants(struct drm_crtc *crtc);
++
++struct timeval ns_to_timeval(const int64_t nsec);
++int64_t timeval_to_ns(const struct timeval *tv);
++
+ /* AGP/PCI Express/GART support (drm_agpsupport.c) */
+ int	drm_device_is_agp(struct drm_device *dev);
+ int	drm_device_is_pcie(struct drm_device *dev);
+@@ -832,6 +1105,9 @@ int	drm_ati_pcigart_init(struct drm_device *dev,
+ int	drm_ati_pcigart_cleanup(struct drm_device *dev,
+ 				struct drm_ati_pcigart_info *gart_info);
+ 
++/* Cache management (drm_memory.c) */
++void	drm_clflush_pages(vm_page_t *pages, unsigned long num_pages);
++
+ /* Locking IOCTL support (drm_drv.c) */
+ int	drm_lock(struct drm_device *dev, void *data,
+ 		 struct drm_file *file_priv);
+@@ -919,8 +1195,6 @@ int	drm_dma(struct drm_device *dev, void *data, struct drm_file *file_priv);
+ /* IRQ support (drm_irq.c) */
+ int	drm_control(struct drm_device *dev, void *data,
+ 		    struct drm_file *file_priv);
+-int	drm_wait_vblank(struct drm_device *dev, void *data,
+-			struct drm_file *file_priv);
+ 
+ /* AGP/GART support (drm_agpsupport.c) */
+ int	drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
+@@ -940,6 +1214,12 @@ int	drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
+ int	drm_agp_bind_ioctl(struct drm_device *dev, void *data,
+ 			   struct drm_file *file_priv);
+ 
++				/* Stub support (drm_stub.h) */
++extern int drm_setmaster_ioctl(struct drm_device *dev, void *data,
++			       struct drm_file *file_priv);
++extern int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
++				struct drm_file *file_priv);
++
+ /* Scatter Gather Support (drm_scatter.c) */
+ int	drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
+ 			   struct drm_file *file_priv);
+@@ -951,6 +1231,73 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
+ 				size_t align, dma_addr_t maxaddr);
+ void	drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah);
+ 
++/* Graphics Execution Manager library functions (drm_gem.c) */
++int drm_gem_init(struct drm_device *dev);
++void drm_gem_destroy(struct drm_device *dev);
++
++int drm_gem_close_ioctl(struct drm_device *dev, void *data,
++			struct drm_file *file_priv);
++int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
++			struct drm_file *file_priv);
++int drm_gem_open_ioctl(struct drm_device *dev, void *data,
++		       struct drm_file *file_priv);
++int drm_gem_handle_create(struct drm_file *file_priv,
++			  struct drm_gem_object *obj,
++			  u32 *handlep);
++int drm_gem_handle_delete(struct drm_file *file_priv, uint32_t handle);
++void drm_gem_object_handle_reference(struct drm_gem_object *obj);
++void drm_gem_object_handle_unreference(struct drm_gem_object *obj);
++void drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj);
++void drm_gem_object_handle_free(struct drm_gem_object *obj);
++void drm_gem_object_reference(struct drm_gem_object *obj);
++void drm_gem_object_unreference(struct drm_gem_object *obj);
++void drm_gem_object_unreference_unlocked(struct drm_gem_object *obj);
++void drm_gem_object_release(struct drm_gem_object *obj);
++void drm_gem_object_free(struct drm_gem_object *obj);
++int drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj,
++    size_t size);
++int drm_gem_private_object_init(struct drm_device *dev,
++    struct drm_gem_object *obj, size_t size);
++struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
++    size_t size);
++struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
++    struct drm_file *file_priv, uint32_t handle);
++
++void drm_gem_open(struct drm_device *dev, struct drm_file *file_priv);
++void drm_gem_release(struct drm_device *dev, struct drm_file *file_priv);
++
++int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
++void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
++int drm_gem_mmap_single(struct cdev *kdev, vm_ooffset_t *offset, vm_size_t size,
++    struct vm_object **obj_res, int nprot);
++void drm_gem_pager_dtr(void *obj);
++
++void drm_device_lock_mtx(struct drm_device *dev);
++void drm_device_unlock_mtx(struct drm_device *dev);
++int drm_device_sleep_mtx(struct drm_device *dev, void *chan, int flags,
++    const char *msg, int timeout);
++void drm_device_assert_mtx_locked(struct drm_device *dev);
++void drm_device_assert_mtx_unlocked(struct drm_device *dev);
++
++void drm_device_lock_struct(struct drm_device *dev);
++void drm_device_unlock_struct(struct drm_device *dev);
++int drm_device_sleep_struct(struct drm_device *dev, void *chan, int flags,
++    const char *msg, int timeout);
++void drm_device_assert_struct_locked(struct drm_device *dev);
++void drm_device_assert_struct_unlocked(struct drm_device *dev);
++
++void drm_compat_locking_init(struct drm_device *dev);
++void drm_sleep_locking_init(struct drm_device *dev);
++
++/* drm_modes.c */
++bool drm_mode_parse_command_line_for_connector(const char *mode_option,
++    struct drm_connector *connector, struct drm_cmdline_mode *mode);
++struct drm_display_mode *drm_mode_create_from_cmdline_mode(
++    struct drm_device *dev, struct drm_cmdline_mode *cmd);
++
++/* drm_edid.c */
++u8 *drm_find_cea_extension(struct edid *edid);
++
+ /* Inline replacements for drm_alloc and friends */
+ static __inline__ void *
+ drm_alloc(size_t size, struct malloc_type *area)
+@@ -1000,7 +1347,7 @@ drm_core_findmap(struct drm_device *dev, unsigned long offset)
  {
- 	device_t dev = kdev->si_drv1;
-@@ -869,6 +877,8 @@
- 	case AGPIOC_UNBIND:
- 		return agp_unbind_user(dev, (agp_unbind *)data);
+ 	drm_local_map_t *map;
  
-+	case AGPIOC_CHIPSET_FLUSH:
-+		return agp_chipset_flush(dev);
- 	}
+-	DRM_SPINLOCK_ASSERT(&dev->dev_lock);
++	DRM_LOCK_ASSERT(dev);
+ 	TAILQ_FOREACH(map, &dev->maplist, link) {
+ 		if (offset == (unsigned long)map->handle)
+ 			return map;
+@@ -1012,5 +1359,13 @@ static __inline__ void drm_core_dropmap(struct drm_map *map)
+ {
+ }
  
- 	return EINVAL;
-Index: sys/dev/drm/sis_drv.c
-===================================================================
-diff --git sys/dev/drm/sis_drv.c sys/dev/drm/sis_drv.c
---- sys/dev/drm/sis_drv.c	(revision 230124)
-+++ sys/dev/drm/sis_drv.c	(working copy)
-@@ -57,6 +57,8 @@
- 	dev->driver->major		= DRIVER_MAJOR;
- 	dev->driver->minor		= DRIVER_MINOR;
- 	dev->driver->patchlevel		= DRIVER_PATCHLEVEL;
++#define KIB_NOTYET()							\
++do {									\
++	if (drm_debug_flag && drm_notyet_flag)				\
++		printf("NOTYET: %s at %s:%d\n", __func__, __FILE__, __LINE__); \
++} while (0)
 +
-+	drm_compat_locking_init(dev);
++#define	KTR_DRM	KTR_DEV
++
+ #endif /* __KERNEL__ */
+ #endif /* _DRM_P_H_ */
+diff --git a/sys/dev/drm/drm_atomic.h b/sys/dev/drm/drm_atomic.h
+index e8cd818..e7dbed9 100644
+--- sys/dev/drm/drm_atomic.h
++++ sys/dev/drm/drm_atomic.h
+@@ -89,3 +89,5 @@ find_first_zero_bit(volatile void *p, int max)
+ 	}
+ 	return max;
  }
++
++#define	BITS_TO_LONGS(x) (howmany((x), NBBY * sizeof(long)))
+diff --git a/sys/dev/drm/drm_auth.c b/sys/dev/drm/drm_auth.c
+index 555badc..0daf52c 100644
+--- sys/dev/drm/drm_auth.c
++++ sys/dev/drm/drm_auth.c
+@@ -51,7 +51,7 @@ static struct drm_file *drm_find_file(struct drm_device *dev, drm_magic_t magic)
+ 	drm_magic_entry_t *pt;
+ 	int hash = drm_hash_magic(magic);
  
- static int
-Index: sys/dev/drm/intel_drv.h
-===================================================================
-diff --git sys/dev/drm/intel_drv.h sys/dev/drm/intel_drv.h
-new file mode 10644
---- /dev/null	(revision 0)
-+++ sys/dev/drm/intel_drv.h	(working copy)
-@@ -0,0 +1,421 @@
+-	DRM_SPINLOCK_ASSERT(&dev->dev_lock);
++	DRM_LOCK_ASSERT(dev);
+ 
+ 	for (pt = dev->magiclist[hash].head; pt; pt = pt->next) {
+ 		if (pt->magic == magic) {
+@@ -74,7 +74,7 @@ static int drm_add_magic(struct drm_device *dev, struct drm_file *priv,
+ 
+ 	DRM_DEBUG("%d\n", magic);
+ 
+-	DRM_SPINLOCK_ASSERT(&dev->dev_lock);
++	DRM_LOCK_ASSERT(dev);
+ 
+ 	hash = drm_hash_magic(magic);
+ 	entry = malloc(sizeof(*entry), DRM_MEM_MAGIC, M_ZERO | M_NOWAIT);
+@@ -105,7 +105,7 @@ static int drm_remove_magic(struct drm_device *dev, drm_magic_t magic)
+ 	drm_magic_entry_t *pt;
+ 	int		  hash;
+ 
+-	DRM_SPINLOCK_ASSERT(&dev->dev_lock);
++	DRM_LOCK_ASSERT(dev);
+ 
+ 	DRM_DEBUG("%d\n", magic);
+ 	hash = drm_hash_magic(magic);
+diff --git a/sys/dev/drm/drm_bufs.c b/sys/dev/drm/drm_bufs.c
+index 2d27cd4..e3260e1 100644
+--- sys/dev/drm/drm_bufs.c
++++ sys/dev/drm/drm_bufs.c
+@@ -48,7 +48,7 @@ static int drm_alloc_resource(struct drm_device *dev, int resource)
+ 	struct resource *res;
+ 	int rid;
+ 
+-	DRM_SPINLOCK_ASSERT(&dev->dev_lock);
++	DRM_LOCK_ASSERT(dev);
+ 
+ 	if (resource >= DRM_MAX_PCI_RESOURCE) {
+ 		DRM_ERROR("Resource %d too large\n", resource);
+@@ -301,7 +301,7 @@ int drm_addmap_ioctl(struct drm_device *dev, void *data,
+ 
+ void drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
+ {
+-	DRM_SPINLOCK_ASSERT(&dev->dev_lock);
++	DRM_LOCK_ASSERT(dev);
+ 
+ 	if (map == NULL)
+ 		return;
+diff --git a/sys/dev/drm/drm_crtc.c b/sys/dev/drm/drm_crtc.c
+new file mode 100644
+index 0000000..04295e5
+--- /dev/null
++++ sys/dev/drm/drm_crtc.c
+@@ -0,0 +1,3304 @@
 +/*
-+ * Copyright (c) 2006 Dave Airlie <airlied at linux.ie>
-+ * Copyright (c) 2007-2008 Intel Corporation
++ * Copyright (c) 2006-2008 Intel Corporation
++ * Copyright (c) 2007 Dave Airlie <airlied at linux.ie>
++ * Copyright (c) 2008 Red Hat Inc.
++ *
++ * DRM core CRTC related functions
++ *
++ * Permission to use, copy, modify, distribute, and sell this software and its
++ * documentation for any purpose is hereby granted without fee, provided that
++ * the above copyright notice appear in all copies and that both that copyright
++ * notice and this permission notice appear in supporting documentation, and
++ * that the name of the copyright holders not be used in advertising or
++ * publicity pertaining to distribution of the software without specific,
++ * written prior permission.  The copyright holders make no representations
++ * about the suitability of this software for any purpose.  It is provided "as
++ * is" without express or implied warranty.
++ *
++ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
++ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
++ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
++ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
++ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
++ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
++ * OF THIS SOFTWARE.
++ *
++ * Authors:
++ *      Keith Packard
++ *	Eric Anholt <eric at anholt.net>
++ *      Dave Airlie <airlied at linux.ie>
++ *      Jesse Barnes <jesse.barnes at intel.com>
++ */
++#include "dev/drm/drm.h"
++#include "dev/drm/drmP.h"
++#include "dev/drm/drm_crtc.h"
++#include "dev/drm/drm_edid.h"
++#include "dev/drm/drm_fourcc.h"
++#include <sys/limits.h>
++
++struct drm_prop_enum_list {
++	int type;
++	char *name;
++};
++
++/* Avoid boilerplate.  I'm tired of typing. */
++#define DRM_ENUM_NAME_FN(fnname, list)				\
++	char *fnname(int val)					\
++	{							\
++		int i;						\
++		for (i = 0; i < DRM_ARRAY_SIZE(list); i++) {	\
++			if (list[i].type == val)		\
++				return list[i].name;		\
++		}						\
++		return "(unknown)";				\
++	}
++
++/*
++ * Global properties
++ */
++static struct drm_prop_enum_list drm_dpms_enum_list[] =
++{	{ DRM_MODE_DPMS_ON, "On" },
++	{ DRM_MODE_DPMS_STANDBY, "Standby" },
++	{ DRM_MODE_DPMS_SUSPEND, "Suspend" },
++	{ DRM_MODE_DPMS_OFF, "Off" }
++};
++
++DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
++
++/*
++ * Optional properties
++ */
++static struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
++{
++	{ DRM_MODE_SCALE_NONE, "None" },
++	{ DRM_MODE_SCALE_FULLSCREEN, "Full" },
++	{ DRM_MODE_SCALE_CENTER, "Center" },
++	{ DRM_MODE_SCALE_ASPECT, "Full aspect" },
++};
++
++static struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
++{
++	{ DRM_MODE_DITHERING_OFF, "Off" },
++	{ DRM_MODE_DITHERING_ON, "On" },
++	{ DRM_MODE_DITHERING_AUTO, "Automatic" },
++};
++
++/*
++ * Non-global properties, but "required" for certain connectors.
++ */
++static struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
++{
++	{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
++	{ DRM_MODE_SUBCONNECTOR_DVID,      "DVI-D"     }, /* DVI-I  */
++	{ DRM_MODE_SUBCONNECTOR_DVIA,      "DVI-A"     }, /* DVI-I  */
++};
++
++DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
++
++static struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
++{
++	{ DRM_MODE_SUBCONNECTOR_Unknown,   "Unknown"   }, /* DVI-I and TV-out */
++	{ DRM_MODE_SUBCONNECTOR_DVID,      "DVI-D"     }, /* DVI-I  */
++	{ DRM_MODE_SUBCONNECTOR_DVIA,      "DVI-A"     }, /* DVI-I  */
++};
++
++DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
++		 drm_dvi_i_subconnector_enum_list)
++
++static struct drm_prop_enum_list drm_tv_select_enum_list[] =
++{
++	{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
++	{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
++	{ DRM_MODE_SUBCONNECTOR_SVIDEO,    "SVIDEO"    }, /* TV-out */
++	{ DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
++	{ DRM_MODE_SUBCONNECTOR_SCART,     "SCART"     }, /* TV-out */
++};
++
++DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
++
++static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
++{
++	{ DRM_MODE_SUBCONNECTOR_Unknown,   "Unknown"   }, /* DVI-I and TV-out */
++	{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
++	{ DRM_MODE_SUBCONNECTOR_SVIDEO,    "SVIDEO"    }, /* TV-out */
++	{ DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
++	{ DRM_MODE_SUBCONNECTOR_SCART,     "SCART"     }, /* TV-out */
++};
++
++DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
++		 drm_tv_subconnector_enum_list)
++
++static struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
++	{ DRM_MODE_DIRTY_OFF,      "Off"      },
++	{ DRM_MODE_DIRTY_ON,       "On"       },
++	{ DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
++};
++
++DRM_ENUM_NAME_FN(drm_get_dirty_info_name,
++		 drm_dirty_info_enum_list)
++
++struct drm_conn_prop_enum_list {
++	int type;
++	char *name;
++	int count;
++};
++
++/*
++ * Connector and encoder types.
++ */
++static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
++{	{ DRM_MODE_CONNECTOR_Unknown, "Unknown", 0 },
++	{ DRM_MODE_CONNECTOR_VGA, "VGA", 0 },
++	{ DRM_MODE_CONNECTOR_DVII, "DVI-I", 0 },
++	{ DRM_MODE_CONNECTOR_DVID, "DVI-D", 0 },
++	{ DRM_MODE_CONNECTOR_DVIA, "DVI-A", 0 },
++	{ DRM_MODE_CONNECTOR_Composite, "Composite", 0 },
++	{ DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 },
++	{ DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 },
++	{ DRM_MODE_CONNECTOR_Component, "Component", 0 },
++	{ DRM_MODE_CONNECTOR_9PinDIN, "DIN", 0 },
++	{ DRM_MODE_CONNECTOR_DisplayPort, "DP", 0 },
++	{ DRM_MODE_CONNECTOR_HDMIA, "HDMI-A", 0 },
++	{ DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 },
++	{ DRM_MODE_CONNECTOR_TV, "TV", 0 },
++	{ DRM_MODE_CONNECTOR_eDP, "eDP", 0 },
++};
++
++static struct drm_prop_enum_list drm_encoder_enum_list[] =
++{	{ DRM_MODE_ENCODER_NONE, "None" },
++	{ DRM_MODE_ENCODER_DAC, "DAC" },
++	{ DRM_MODE_ENCODER_TMDS, "TMDS" },
++	{ DRM_MODE_ENCODER_LVDS, "LVDS" },
++	{ DRM_MODE_ENCODER_TVDAC, "TV" },
++};
++
++char *drm_get_encoder_name(struct drm_encoder *encoder)
++{
++	static char buf[32];
++
++	snprintf(buf, 32, "%s-%d",
++		 drm_encoder_enum_list[encoder->encoder_type].name,
++		 encoder->base.id);
++	return buf;
++}
++
++char *drm_get_connector_name(struct drm_connector *connector)
++{
++	static char buf[32];
++
++	snprintf(buf, 32, "%s-%d",
++		 drm_connector_enum_list[connector->connector_type].name,
++		 connector->connector_type_id);
++	return buf;
++}
++
++char *drm_get_connector_status_name(enum drm_connector_status status)
++{
++	if (status == connector_status_connected)
++		return "connected";
++	else if (status == connector_status_disconnected)
++		return "disconnected";
++	else
++		return "unknown";
++}
++
++/**
++ * drm_mode_object_get - allocate a new identifier
++ * @dev: DRM device
++ * @ptr: object pointer, used to generate unique ID
++ * @type: object type
++ *
++ * LOCKING:
++ *
++ * Create a unique identifier based on @ptr in @dev's identifier space.  Used
++ * for tracking modes, CRTCs and connectors.
++ *
++ * RETURNS:
++ * New unique (relative to other objects in @dev) integer identifier for the
++ * object.
++ */
++static int drm_mode_object_get(struct drm_device *dev,
++			       struct drm_mode_object *obj, uint32_t obj_type)
++{
++	int new_id;
++	int ret;
++
++	new_id = 0;
++	ret = drm_gem_name_create(&dev->mode_config.crtc_names, obj, &new_id);
++	if (ret != 0)
++		return (-ret);
++
++	obj->id = new_id;
++	obj->type = obj_type;
++	return 0;
++}
++
++/**
++ * drm_mode_object_put - free an identifer
++ * @dev: DRM device
++ * @id: ID to free
++ *
++ * LOCKING:
++ * Caller must hold DRM mode_config lock.
++ *
++ * Free @id from @dev's unique identifier pool.
++ */
++static void drm_mode_object_put(struct drm_device *dev,
++				struct drm_mode_object *object)
++{
++
++	drm_gem_names_remove(&dev->mode_config.crtc_names, object->id);
++}
++
++struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
++		uint32_t id, uint32_t type)
++{
++	struct drm_mode_object *obj;
++
++	obj = drm_gem_name_ref(&dev->mode_config.crtc_names, id, NULL);
++	if (!obj || (obj->type != type) || (obj->id != id))
++		obj = NULL;
++
++	return obj;
++}
++
++/**
++ * drm_framebuffer_init - initialize a framebuffer
++ * @dev: DRM device
++ *
++ * LOCKING:
++ * Caller must hold mode config lock.
++ *
++ * Allocates an ID for the framebuffer's parent mode object, sets its mode
++ * functions & device file and adds it to the master fd list.
++ *
++ * RETURNS:
++ * Zero on success, error code on failure.
++ */
++int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
++			 const struct drm_framebuffer_funcs *funcs)
++{
++	int ret;
++
++	DRM_MODE_CONFIG_ASSERT_LOCKED(dev);
++
++	ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
++	if (ret) {
++		return ret;
++	}
++
++	fb->dev = dev;
++	fb->funcs = funcs;
++	dev->mode_config.num_fb++;
++	list_add(&fb->head, &dev->mode_config.fb_list);
++
++	return 0;
++}
++
++/**
++ * drm_framebuffer_cleanup - remove a framebuffer object
++ * @fb: framebuffer to remove
++ *
++ * LOCKING:
++ * Caller must hold mode config lock.
++ *
++ * Scans all the CRTCs in @dev's mode_config.  If they're using @fb, removes
++ * it, setting it to NULL.
++ */
++void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
++{
++	struct drm_device *dev = fb->dev;
++	struct drm_crtc *crtc;
++	struct drm_plane *plane;
++	struct drm_mode_set set;
++	int ret;
++
++	DRM_MODE_CONFIG_ASSERT_LOCKED(dev);
++
++	/* remove from any CRTC */
++	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++		if (crtc->fb == fb) {
++			/* should turn off the crtc */
++			memset(&set, 0, sizeof(struct drm_mode_set));
++			set.crtc = crtc;
++			set.fb = NULL;
++			ret = crtc->funcs->set_config(&set);
++			if (ret)
++				DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
++		}
++	}
++
++	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
++		if (plane->fb == fb) {
++			/* should turn off the crtc */
++			ret = plane->funcs->disable_plane(plane);
++			if (ret)
++				DRM_ERROR("failed to disable plane with busy fb\n");
++			/* disconnect the plane from the fb and crtc: */
++			plane->fb = NULL;
++			plane->crtc = NULL;
++		}
++	}
++
++	drm_mode_object_put(dev, &fb->base);
++	list_del(&fb->head);
++	dev->mode_config.num_fb--;
++}
++
++/**
++ * drm_crtc_init - Initialise a new CRTC object
++ * @dev: DRM device
++ * @crtc: CRTC object to init
++ * @funcs: callbacks for the new CRTC
++ *
++ * LOCKING:
++ * Caller must hold mode config lock.
++ *
++ * Inits a new object created as base part of an driver crtc object.
++ */
++void drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
++		   const struct drm_crtc_funcs *funcs)
++{
++
++
++	crtc->dev = dev;
++	crtc->funcs = funcs;
++
++	sx_xlock(&dev->mode_config.mutex);
++	drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
++
++	list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
++	dev->mode_config.num_crtc++;
++	sx_xunlock(&dev->mode_config.mutex);
++}
++
++/**
++ * drm_crtc_cleanup - Cleans up the core crtc usage.
++ * @crtc: CRTC to cleanup
++ *
++ * LOCKING:
++ * Caller must hold mode config lock.
++ *
++ * Cleanup @crtc. Removes from drm modesetting space
++ * does NOT free object, caller does that.
++ */
++void drm_crtc_cleanup(struct drm_crtc *crtc)
++{
++	struct drm_device *dev = crtc->dev;
++
++	DRM_MODE_CONFIG_ASSERT_LOCKED(dev);
++
++	if (crtc->gamma_store) {
++		free(crtc->gamma_store, DRM_MEM_KMS);
++		crtc->gamma_store = NULL;
++	}
++
++	drm_mode_object_put(dev, &crtc->base);
++	list_del(&crtc->head);
++	dev->mode_config.num_crtc--;
++}
++
++/**
++ * drm_mode_probed_add - add a mode to a connector's probed mode list
++ * @connector: connector the new mode
++ * @mode: mode data
++ *
++ * LOCKING:
++ * Caller must hold mode config lock.
++ *
++ * Add @mode to @connector's mode list for later use.
++ */
++void drm_mode_probed_add(struct drm_connector *connector,
++			 struct drm_display_mode *mode)
++{
++
++	DRM_MODE_CONFIG_ASSERT_LOCKED(connector->dev);
++
++	list_add(&mode->head, &connector->probed_modes);
++}
++
++/**
++ * drm_mode_remove - remove and free a mode
++ * @connector: connector list to modify
++ * @mode: mode to remove
++ *
++ * LOCKING:
++ * Caller must hold mode config lock.
++ *
++ * Remove @mode from @connector's mode list, then free it.
++ */
++void drm_mode_remove(struct drm_connector *connector,
++		     struct drm_display_mode *mode)
++{
++
++	DRM_MODE_CONFIG_ASSERT_LOCKED(connector->dev);
++
++	list_del(&mode->head);
++	free(mode, DRM_MEM_KMS);
++}
++
++/**
++ * drm_connector_init - Init a preallocated connector
++ * @dev: DRM device
++ * @connector: the connector to init
++ * @funcs: callbacks for this connector
++ * @name: user visible name of the connector
++ *
++ * LOCKING:
++ * Caller must hold @dev's mode_config lock.
++ *
++ * Initialises a preallocated connector. Connectors should be
++ * subclassed as part of driver connector objects.
++ */
++void drm_connector_init(struct drm_device *dev,
++		     struct drm_connector *connector,
++		     const struct drm_connector_funcs *funcs,
++		     int connector_type)
++{
++	sx_xlock(&dev->mode_config.mutex);
++
++	connector->dev = dev;
++	connector->funcs = funcs;
++	drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
++	connector->connector_type = connector_type;
++	connector->connector_type_id =
++		++drm_connector_enum_list[connector_type].count; /* TODO */
++	INIT_LIST_HEAD(&connector->user_modes);
++	INIT_LIST_HEAD(&connector->probed_modes);
++	INIT_LIST_HEAD(&connector->modes);
++	connector->edid_blob_ptr = NULL;
++
++	list_add_tail(&connector->head, &dev->mode_config.connector_list);
++	dev->mode_config.num_connector++;
++
++	drm_connector_attach_property(connector,
++				      dev->mode_config.edid_property, 0);
++
++	drm_connector_attach_property(connector,
++				      dev->mode_config.dpms_property, 0);
++
++	sx_xunlock(&dev->mode_config.mutex);
++}
++
++/**
++ * drm_connector_cleanup - cleans up an initialised connector
++ * @connector: connector to cleanup
++ *
++ * LOCKING:
++ * Caller must hold @dev's mode_config lock. XXXKIB really ?
++ *
++ * Cleans up the connector but doesn't free the object.
++ */
++void drm_connector_cleanup(struct drm_connector *connector)
++{
++	struct drm_device *dev = connector->dev;
++	struct drm_display_mode *mode, *t;
++
++	list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
++		drm_mode_remove(connector, mode);
++
++	list_for_each_entry_safe(mode, t, &connector->modes, head)
++		drm_mode_remove(connector, mode);
++
++	list_for_each_entry_safe(mode, t, &connector->user_modes, head)
++		drm_mode_remove(connector, mode);
++
++	sx_xlock(&dev->mode_config.mutex);
++	drm_mode_object_put(dev, &connector->base);
++	list_del(&connector->head);
++	dev->mode_config.num_connector--;
++	sx_xunlock(&dev->mode_config.mutex);
++}
++
++void drm_encoder_init(struct drm_device *dev,
++		      struct drm_encoder *encoder,
++		      const struct drm_encoder_funcs *funcs,
++		      int encoder_type)
++{
++
++	sx_xlock(&dev->mode_config.mutex);
++
++	encoder->dev = dev;
++
++	drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
++	encoder->encoder_type = encoder_type;
++	encoder->funcs = funcs;
++
++	list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
++	dev->mode_config.num_encoder++;
++
++	sx_xunlock(&dev->mode_config.mutex);
++}
++
++void drm_encoder_cleanup(struct drm_encoder *encoder)
++{
++	struct drm_device *dev = encoder->dev;
++
++	sx_xlock(&dev->mode_config.mutex);
++	drm_mode_object_put(dev, &encoder->base);
++	list_del(&encoder->head);
++	dev->mode_config.num_encoder--;
++	sx_xunlock(&dev->mode_config.mutex);
++}
++
++int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
++		   unsigned long possible_crtcs,
++		   const struct drm_plane_funcs *funcs,
++		   const uint32_t *formats, uint32_t format_count,
++		   bool priv)
++{
++	sx_xlock(&dev->mode_config.mutex);
++
++	plane->dev = dev;
++	drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
++	plane->funcs = funcs;
++	plane->format_types = malloc(sizeof(uint32_t) * format_count,
++	    DRM_MEM_KMS, M_WAITOK);
++
++	memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
++	plane->format_count = format_count;
++	plane->possible_crtcs = possible_crtcs;
++
++	/* private planes are not exposed to userspace, but depending on
++	 * display hardware, might be convenient to allow sharing programming
++	 * for the scanout engine with the crtc implementation.
++	 */
++	if (!priv) {
++		list_add_tail(&plane->head, &dev->mode_config.plane_list);
++		dev->mode_config.num_plane++;
++	} else {
++		INIT_LIST_HEAD(&plane->head);
++	}
++
++	sx_xunlock(&dev->mode_config.mutex);
++
++	return 0;
++}
++
++void drm_plane_cleanup(struct drm_plane *plane)
++{
++	struct drm_device *dev = plane->dev;
++
++	sx_xlock(&dev->mode_config.mutex);
++	free(plane->format_types, DRM_MEM_KMS);
++	drm_mode_object_put(dev, &plane->base);
++	/* if not added to a list, it must be a private plane */
++	if (!list_empty(&plane->head)) {
++		list_del(&plane->head);
++		dev->mode_config.num_plane--;
++	}
++	sx_xunlock(&dev->mode_config.mutex);
++}
++
++/**
++ * drm_mode_create - create a new display mode
++ * @dev: DRM device
++ *
++ * LOCKING:
++ * Caller must hold DRM mode_config lock.
++ *
++ * Create a new drm_display_mode, give it an ID, and return it.
++ *
++ * RETURNS:
++ * Pointer to new mode on success, NULL on error.
++ */
++struct drm_display_mode *drm_mode_create(struct drm_device *dev)
++{
++	struct drm_display_mode *nmode;
++
++	nmode = malloc(sizeof(struct drm_display_mode), DRM_MEM_KMS,
++	    M_WAITOK | M_ZERO);
++
++	drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE);
++	return nmode;
++}
++
++/**
++ * drm_mode_destroy - remove a mode
++ * @dev: DRM device
++ * @mode: mode to remove
++ *
++ * LOCKING:
++ * Caller must hold mode config lock.
++ *
++ * Free @mode's unique identifier, then free it.
++ */
++void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
++{
++	drm_mode_object_put(dev, &mode->base);
++
++	free(mode, DRM_MEM_KMS);
++}
++
++static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
++{
++	struct drm_property *edid;
++	struct drm_property *dpms;
++	int i;
++
++	/*
++	 * Standard properties (apply to all connectors)
++	 */
++	edid = drm_property_create(dev, DRM_MODE_PROP_BLOB |
++				   DRM_MODE_PROP_IMMUTABLE,
++				   "EDID", 0);
++	dev->mode_config.edid_property = edid;
++
++	dpms = drm_property_create(dev, DRM_MODE_PROP_ENUM,
++	    "DPMS", DRM_ARRAY_SIZE(drm_dpms_enum_list));
++	for (i = 0; i < DRM_ARRAY_SIZE(drm_dpms_enum_list); i++)
++		drm_property_add_enum(dpms, i, drm_dpms_enum_list[i].type,
++				      drm_dpms_enum_list[i].name);
++	dev->mode_config.dpms_property = dpms;
++
++	return 0;
++}
++
++/**
++ * drm_mode_create_dvi_i_properties - create DVI-I specific connector properties
++ * @dev: DRM device
++ *
++ * Called by a driver the first time a DVI-I connector is made.
++ */
++int drm_mode_create_dvi_i_properties(struct drm_device *dev)
++{
++	struct drm_property *dvi_i_selector;
++	struct drm_property *dvi_i_subconnector;
++	int i;
++
++	if (dev->mode_config.dvi_i_select_subconnector_property)
++		return 0;
++
++	dvi_i_selector =
++		drm_property_create(dev, DRM_MODE_PROP_ENUM,
++				    "select subconnector",
++				    DRM_ARRAY_SIZE(drm_dvi_i_select_enum_list));
++	for (i = 0; i < DRM_ARRAY_SIZE(drm_dvi_i_select_enum_list); i++)
++		drm_property_add_enum(dvi_i_selector, i,
++				      drm_dvi_i_select_enum_list[i].type,
++				      drm_dvi_i_select_enum_list[i].name);
++	dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector;
++
++	dvi_i_subconnector =
++		drm_property_create(dev, DRM_MODE_PROP_ENUM |
++				    DRM_MODE_PROP_IMMUTABLE,
++				    "subconnector",
++				    DRM_ARRAY_SIZE(drm_dvi_i_subconnector_enum_list));
++	for (i = 0; i < DRM_ARRAY_SIZE(drm_dvi_i_subconnector_enum_list); i++)
++		drm_property_add_enum(dvi_i_subconnector, i,
++				      drm_dvi_i_subconnector_enum_list[i].type,
++				      drm_dvi_i_subconnector_enum_list[i].name);
++	dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector;
++
++	return 0;
++}
++
++/**
++ * drm_create_tv_properties - create TV specific connector properties
++ * @dev: DRM device
++ * @num_modes: number of different TV formats (modes) supported
++ * @modes: array of pointers to strings containing name of each format
++ *
++ * Called by a driver's TV initialization routine, this function creates
++ * the TV specific connector properties for a given device.  Caller is
++ * responsible for allocating a list of format names and passing them to
++ * this routine.
++ */
++int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes,
++				  char *modes[])
++{
++	struct drm_property *tv_selector;
++	struct drm_property *tv_subconnector;
++	int i;
++
++	if (dev->mode_config.tv_select_subconnector_property)
++		return 0;
++
++	/*
++	 * Basic connector properties
++	 */
++	tv_selector = drm_property_create(dev, DRM_MODE_PROP_ENUM,
++					  "select subconnector",
++					  DRM_ARRAY_SIZE(drm_tv_select_enum_list));
++	for (i = 0; i < DRM_ARRAY_SIZE(drm_tv_select_enum_list); i++)
++		drm_property_add_enum(tv_selector, i,
++				      drm_tv_select_enum_list[i].type,
++				      drm_tv_select_enum_list[i].name);
++	dev->mode_config.tv_select_subconnector_property = tv_selector;
++
++	tv_subconnector =
++		drm_property_create(dev, DRM_MODE_PROP_ENUM |
++				    DRM_MODE_PROP_IMMUTABLE, "subconnector",
++				    DRM_ARRAY_SIZE(drm_tv_subconnector_enum_list));
++	for (i = 0; i < DRM_ARRAY_SIZE(drm_tv_subconnector_enum_list); i++)
++		drm_property_add_enum(tv_subconnector, i,
++				      drm_tv_subconnector_enum_list[i].type,
++				      drm_tv_subconnector_enum_list[i].name);
++	dev->mode_config.tv_subconnector_property = tv_subconnector;
++
++	/*
++	 * Other, TV specific properties: margins & TV modes.
++	 */
++	dev->mode_config.tv_left_margin_property =
++		drm_property_create(dev, DRM_MODE_PROP_RANGE,
++				    "left margin", 2);
++	dev->mode_config.tv_left_margin_property->values[0] = 0;
++	dev->mode_config.tv_left_margin_property->values[1] = 100;
++
++	dev->mode_config.tv_right_margin_property =
++		drm_property_create(dev, DRM_MODE_PROP_RANGE,
++				    "right margin", 2);
++	dev->mode_config.tv_right_margin_property->values[0] = 0;
++	dev->mode_config.tv_right_margin_property->values[1] = 100;
++
++	dev->mode_config.tv_top_margin_property =
++		drm_property_create(dev, DRM_MODE_PROP_RANGE,
++				    "top margin", 2);
++	dev->mode_config.tv_top_margin_property->values[0] = 0;
++	dev->mode_config.tv_top_margin_property->values[1] = 100;
++
++	dev->mode_config.tv_bottom_margin_property =
++		drm_property_create(dev, DRM_MODE_PROP_RANGE,
++				    "bottom margin", 2);
++	dev->mode_config.tv_bottom_margin_property->values[0] = 0;
++	dev->mode_config.tv_bottom_margin_property->values[1] = 100;
++
++	dev->mode_config.tv_mode_property =
++		drm_property_create(dev, DRM_MODE_PROP_ENUM,
++				    "mode", num_modes);
++	for (i = 0; i < num_modes; i++)
++		drm_property_add_enum(dev->mode_config.tv_mode_property, i,
++				      i, modes[i]);
++
++	dev->mode_config.tv_brightness_property =
++		drm_property_create(dev, DRM_MODE_PROP_RANGE,
++				    "brightness", 2);
++	dev->mode_config.tv_brightness_property->values[0] = 0;
++	dev->mode_config.tv_brightness_property->values[1] = 100;
++
++	dev->mode_config.tv_contrast_property =
++		drm_property_create(dev, DRM_MODE_PROP_RANGE,
++				    "contrast", 2);
++	dev->mode_config.tv_contrast_property->values[0] = 0;
++	dev->mode_config.tv_contrast_property->values[1] = 100;
++
++	dev->mode_config.tv_flicker_reduction_property =
++		drm_property_create(dev, DRM_MODE_PROP_RANGE,
++				    "flicker reduction", 2);
++	dev->mode_config.tv_flicker_reduction_property->values[0] = 0;
++	dev->mode_config.tv_flicker_reduction_property->values[1] = 100;
++
++	dev->mode_config.tv_overscan_property =
++		drm_property_create(dev, DRM_MODE_PROP_RANGE,
++				    "overscan", 2);
++	dev->mode_config.tv_overscan_property->values[0] = 0;
++	dev->mode_config.tv_overscan_property->values[1] = 100;
++
++	dev->mode_config.tv_saturation_property =
++		drm_property_create(dev, DRM_MODE_PROP_RANGE,
++				    "saturation", 2);
++	dev->mode_config.tv_saturation_property->values[0] = 0;
++	dev->mode_config.tv_saturation_property->values[1] = 100;
++
++	dev->mode_config.tv_hue_property =
++		drm_property_create(dev, DRM_MODE_PROP_RANGE,
++				    "hue", 2);
++	dev->mode_config.tv_hue_property->values[0] = 0;
++	dev->mode_config.tv_hue_property->values[1] = 100;
++
++	return 0;
++}
++
++/**
++ * drm_mode_create_scaling_mode_property - create scaling mode property
++ * @dev: DRM device
++ *
++ * Called by a driver the first time it's needed, must be attached to desired
++ * connectors.
++ */
++int drm_mode_create_scaling_mode_property(struct drm_device *dev)
++{
++	struct drm_property *scaling_mode;
++	int i;
++
++	if (dev->mode_config.scaling_mode_property)
++		return 0;
++
++	scaling_mode =
++		drm_property_create(dev, DRM_MODE_PROP_ENUM, "scaling mode",
++				    DRM_ARRAY_SIZE(drm_scaling_mode_enum_list));
++	for (i = 0; i < DRM_ARRAY_SIZE(drm_scaling_mode_enum_list); i++)
++		drm_property_add_enum(scaling_mode, i,
++				      drm_scaling_mode_enum_list[i].type,
++				      drm_scaling_mode_enum_list[i].name);
++
++	dev->mode_config.scaling_mode_property = scaling_mode;
++
++	return 0;
++}
++
++/**
++ * drm_mode_create_dithering_property - create dithering property
++ * @dev: DRM device
++ *
++ * Called by a driver the first time it's needed, must be attached to desired
++ * connectors.
++ */
++int drm_mode_create_dithering_property(struct drm_device *dev)
++{
++	struct drm_property *dithering_mode;
++	int i;
++
++	if (dev->mode_config.dithering_mode_property)
++		return 0;
++
++	dithering_mode =
++		drm_property_create(dev, DRM_MODE_PROP_ENUM, "dithering",
++				    DRM_ARRAY_SIZE(drm_dithering_mode_enum_list));
++	for (i = 0; i < DRM_ARRAY_SIZE(drm_dithering_mode_enum_list); i++)
++		drm_property_add_enum(dithering_mode, i,
++				      drm_dithering_mode_enum_list[i].type,
++				      drm_dithering_mode_enum_list[i].name);
++	dev->mode_config.dithering_mode_property = dithering_mode;
++
++	return 0;
++}
++
++/**
++ * drm_mode_create_dirty_property - create dirty property
++ * @dev: DRM device
++ *
++ * Called by a driver the first time it's needed, must be attached to desired
++ * connectors.
++ */
++int drm_mode_create_dirty_info_property(struct drm_device *dev)
++{
++	struct drm_property *dirty_info;
++	int i;
++
++	if (dev->mode_config.dirty_info_property)
++		return 0;
++
++	dirty_info =
++		drm_property_create(dev, DRM_MODE_PROP_ENUM |
++				    DRM_MODE_PROP_IMMUTABLE,
++				    "dirty",
++				    DRM_ARRAY_SIZE(drm_dirty_info_enum_list));
++	for (i = 0; i < DRM_ARRAY_SIZE(drm_dirty_info_enum_list); i++)
++		drm_property_add_enum(dirty_info, i,
++				      drm_dirty_info_enum_list[i].type,
++				      drm_dirty_info_enum_list[i].name);
++	dev->mode_config.dirty_info_property = dirty_info;
++
++	return 0;
++}
++
++/**
++ * drm_mode_config_init - initialize DRM mode_configuration structure
++ * @dev: DRM device
++ *
++ * LOCKING:
++ * None, should happen single threaded at init time.
++ *
++ * Initialize @dev's mode_config structure, used for tracking the graphics
++ * configuration of @dev.
++ */
++void drm_mode_config_init(struct drm_device *dev)
++{
++	sx_init(&dev->mode_config.mutex, "kmslk");
++	INIT_LIST_HEAD(&dev->mode_config.fb_list);
++	INIT_LIST_HEAD(&dev->mode_config.crtc_list);
++	INIT_LIST_HEAD(&dev->mode_config.connector_list);
++	INIT_LIST_HEAD(&dev->mode_config.encoder_list);
++	INIT_LIST_HEAD(&dev->mode_config.property_list);
++	INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
++	INIT_LIST_HEAD(&dev->mode_config.plane_list);
++	drm_gem_names_init(&dev->mode_config.crtc_names);
++
++	sx_xlock(&dev->mode_config.mutex);
++	drm_mode_create_standard_connector_properties(dev);
++	sx_xunlock(&dev->mode_config.mutex);
++
++	/* Just to be sure */
++	dev->mode_config.num_fb = 0;
++	dev->mode_config.num_connector = 0;
++	dev->mode_config.num_crtc = 0;
++	dev->mode_config.num_encoder = 0;
++}
++
++static int
++drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
++{
++	uint32_t total_objects = 0;
++
++	total_objects += dev->mode_config.num_crtc;
++	total_objects += dev->mode_config.num_connector;
++	total_objects += dev->mode_config.num_encoder;
++
++	group->id_list = malloc(total_objects * sizeof(uint32_t),
++	    DRM_MEM_KMS, M_WAITOK | M_ZERO);
++
++	group->num_crtcs = 0;
++	group->num_connectors = 0;
++	group->num_encoders = 0;
++	return 0;
++}
++
++int drm_mode_group_init_legacy_group(struct drm_device *dev,
++				     struct drm_mode_group *group)
++{
++	struct drm_crtc *crtc;
++	struct drm_encoder *encoder;
++	struct drm_connector *connector;
++	int ret;
++
++	if ((ret = drm_mode_group_init(dev, group)))
++		return ret;
++
++	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
++		group->id_list[group->num_crtcs++] = crtc->base.id;
++
++	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
++		group->id_list[group->num_crtcs + group->num_encoders++] =
++		encoder->base.id;
++
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
++		group->id_list[group->num_crtcs + group->num_encoders +
++			       group->num_connectors++] = connector->base.id;
++
++	return 0;
++}
++
++/**
++ * drm_mode_config_cleanup - free up DRM mode_config info
++ * @dev: DRM device
++ *
++ * LOCKING:
++ * Caller must hold mode config lock.
++ *
++ * Free up all the connectors and CRTCs associated with this DRM device, then
++ * free up the framebuffers and associated buffer objects.
++ *
++ * FIXME: cleanup any dangling user buffer objects too
++ */
++void drm_mode_config_cleanup(struct drm_device *dev)
++{
++	struct drm_connector *connector, *ot;
++	struct drm_crtc *crtc, *ct;
++	struct drm_encoder *encoder, *enct;
++	struct drm_framebuffer *fb, *fbt;
++	struct drm_property *property, *pt;
++	struct drm_plane *plane, *plt;
++
++	list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
++				 head) {
++		encoder->funcs->destroy(encoder);
++	}
++
++	list_for_each_entry_safe(connector, ot,
++				 &dev->mode_config.connector_list, head) {
++		connector->funcs->destroy(connector);
++	}
++
++	list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
++				 head) {
++		drm_property_destroy(dev, property);
++	}
++
++	list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
++		fb->funcs->destroy(fb);
++	}
++
++	list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
++		crtc->funcs->destroy(crtc);
++	}
++
++	list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
++				 head) {
++		plane->funcs->destroy(plane);
++	}
++}
++
++/**
++ * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
++ * @out: drm_mode_modeinfo struct to return to the user
++ * @in: drm_display_mode to use
++ *
++ * LOCKING:
++ * None.
++ *
++ * Convert a drm_display_mode into a drm_mode_modeinfo structure to return to
++ * the user.
++ */
++static void
++drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
++			       struct drm_display_mode *in)
++{
++	out->clock = in->clock;
++	out->hdisplay = in->hdisplay;
++	out->hsync_start = in->hsync_start;
++	out->hsync_end = in->hsync_end;
++	out->htotal = in->htotal;
++	out->hskew = in->hskew;
++	out->vdisplay = in->vdisplay;
++	out->vsync_start = in->vsync_start;
++	out->vsync_end = in->vsync_end;
++	out->vtotal = in->vtotal;
++	out->vscan = in->vscan;
++	out->vrefresh = in->vrefresh;
++	out->flags = in->flags;
++	out->type = in->type;
++	strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
++	out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
++}
++
++/**
++ * drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode
++ * @out: drm_display_mode to return to the user
++ * @in: drm_mode_modeinfo to use
++ *
++ * LOCKING:
++ * None.
++ *
++ * Convert a drm_mode_modeinfo into a drm_display_mode structure to return to
++ * the caller.
++ */
++static void drm_crtc_convert_umode(struct drm_display_mode *out,
++			    struct drm_mode_modeinfo *in)
++{
++	out->clock = in->clock;
++	out->hdisplay = in->hdisplay;
++	out->hsync_start = in->hsync_start;
++	out->hsync_end = in->hsync_end;
++	out->htotal = in->htotal;
++	out->hskew = in->hskew;
++	out->vdisplay = in->vdisplay;
++	out->vsync_start = in->vsync_start;
++	out->vsync_end = in->vsync_end;
++	out->vtotal = in->vtotal;
++	out->vscan = in->vscan;
++	out->vrefresh = in->vrefresh;
++	out->flags = in->flags;
++	out->type = in->type;
++	strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
++	out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
++}
++
++/**
++ * drm_mode_getresources - get graphics configuration
++ * @inode: inode from the ioctl
++ * @filp: file * from the ioctl
++ * @cmd: cmd from ioctl
++ * @arg: arg from ioctl
++ *
++ * LOCKING:
++ * Takes mode config lock.
++ *
++ * Construct a set of configuration description structures and return
++ * them to the user, including CRTC, connector and framebuffer configuration.
++ *
++ * Called by the user via ioctl.
++ *
++ * RETURNS:
++ * Zero on success, errno on failure.
++ */
++int drm_mode_getresources(struct drm_device *dev, void *data,
++			  struct drm_file *file_priv)
++{
++	struct drm_mode_card_res *card_res = data;
++	struct list_head *lh;
++	struct drm_framebuffer *fb;
++	struct drm_connector *connector;
++	struct drm_crtc *crtc;
++	struct drm_encoder *encoder;
++	int ret = 0;
++	int connector_count = 0;
++	int crtc_count = 0;
++	int fb_count = 0;
++	int encoder_count = 0;
++	int copied = 0, i;
++	uint32_t __user *fb_id;
++	uint32_t __user *crtc_id;
++	uint32_t __user *connector_id;
++	uint32_t __user *encoder_id;
++	struct drm_mode_group *mode_group;
++
++	if (!drm_core_check_feature(dev, DRIVER_MODESET))
++		return (EINVAL);
++
++	sx_xlock(&dev->mode_config.mutex);
++
++	/*
++	 * For the non-control nodes we need to limit the list of resources
++	 * by IDs in the group list for this node
++	 */
++	list_for_each(lh, &file_priv->fbs)
++		fb_count++;
++
++#if 1
++	mode_group = NULL; /* XXXKIB */
++	if (1 || file_priv->master) {
++#else
++	mode_group = &file_priv->masterp->minor->mode_group;
++	if (file_priv->masterp->minor->type == DRM_MINOR_CONTROL) {
++#endif
++
++		list_for_each(lh, &dev->mode_config.crtc_list)
++			crtc_count++;
++
++		list_for_each(lh, &dev->mode_config.connector_list)
++			connector_count++;
++
++		list_for_each(lh, &dev->mode_config.encoder_list)
++			encoder_count++;
++	} else {
++
++		crtc_count = mode_group->num_crtcs;
++		connector_count = mode_group->num_connectors;
++		encoder_count = mode_group->num_encoders;
++	}
++
++	card_res->max_height = dev->mode_config.max_height;
++	card_res->min_height = dev->mode_config.min_height;
++	card_res->max_width = dev->mode_config.max_width;
++	card_res->min_width = dev->mode_config.min_width;
++
++	/* handle this in 4 parts */
++	/* FBs */
++	if (card_res->count_fbs >= fb_count) {
++		copied = 0;
++		fb_id = (uint32_t *)(uintptr_t)card_res->fb_id_ptr;
++		list_for_each_entry(fb, &file_priv->fbs, filp_head) {
++			if (copyout(&fb->base.id, fb_id + copied,
++			    sizeof(uint32_t))) {
++				ret = EFAULT;
++				goto out;
++			}
++			copied++;
++		}
++	}
++	card_res->count_fbs = fb_count;
++
++	/* CRTCs */
++	if (card_res->count_crtcs >= crtc_count) {
++		copied = 0;
++		crtc_id = (uint32_t *)(uintptr_t)card_res->crtc_id_ptr;
++#if 1
++		if (1 || file_priv->master) {
++#else
++		if (file_priv->masterp->minor->type == DRM_MINOR_CONTROL) {
++#endif
++			list_for_each_entry(crtc, &dev->mode_config.crtc_list,
++					    head) {
++				DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
++				if (copyout(&crtc->base.id, crtc_id +
++				    copied, sizeof(uint32_t))) {
++					ret = EFAULT;
++					goto out;
++				}
++				copied++;
++			}
++		} else {
++			for (i = 0; i < mode_group->num_crtcs; i++) {
++				if (copyout(&mode_group->id_list[i],
++				    crtc_id + copied, sizeof(uint32_t))) {
++					ret = EFAULT;
++					goto out;
++				}
++				copied++;
++			}
++		}
++	}
++	card_res->count_crtcs = crtc_count;
++
++	/* Encoders */
++	if (card_res->count_encoders >= encoder_count) {
++		copied = 0;
++		encoder_id = (uint32_t *)(uintptr_t)card_res->encoder_id_ptr;
++#if 1
++		if (file_priv->master) {
++#else
++		if (file_priv->masterp->minor->type == DRM_MINOR_CONTROL) {
++#endif
++			list_for_each_entry(encoder,
++					    &dev->mode_config.encoder_list,
++					    head) {
++				DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
++						drm_get_encoder_name(encoder));
++				if (copyout(&encoder->base.id, encoder_id +
++				    copied, sizeof(uint32_t))) {
++					ret = EFAULT;
++					goto out;
++				}
++				copied++;
++			}
++		} else {
++			for (i = mode_group->num_crtcs;
++			    i < mode_group->num_crtcs + mode_group->num_encoders;
++			     i++) {
++				if (copyout(&mode_group->id_list[i],
++				    encoder_id + copied, sizeof(uint32_t))) {
++					ret = EFAULT;
++					goto out;
++				}
++				copied++;
++			}
++
++		}
++	}
++	card_res->count_encoders = encoder_count;
++
++	/* Connectors */
++	if (card_res->count_connectors >= connector_count) {
++		copied = 0;
++		connector_id = (uint32_t *)(uintptr_t)card_res->connector_id_ptr;
++#if 1
++		if (file_priv->master) {
++#else
++		if (file_priv->masterp->minor->type == DRM_MINOR_CONTROL) {
++#endif
++			list_for_each_entry(connector,
++					    &dev->mode_config.connector_list,
++					    head) {
++				DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
++					connector->base.id,
++					drm_get_connector_name(connector));
++				if (copyout(&connector->base.id,
++				    connector_id + copied, sizeof(uint32_t))) {
++					ret = EFAULT;
++					goto out;
++				}
++				copied++;
++			}
++		} else {
++			int start = mode_group->num_crtcs +
++				mode_group->num_encoders;
++			for (i = start; i < start + mode_group->num_connectors; i++) {
++				if (copyout(&mode_group->id_list[i],
++				    connector_id + copied, sizeof(uint32_t))) {
++					ret = EFAULT;
++					goto out;
++				}
++				copied++;
++			}
++		}
++	}
++	card_res->count_connectors = connector_count;
++
++	DRM_DEBUG_KMS("CRTC[%d] CONNECTORS[%d] ENCODERS[%d]\n", card_res->count_crtcs,
++		  card_res->count_connectors, card_res->count_encoders);
++
++out:
++	sx_xunlock(&dev->mode_config.mutex);
++	return ret;
++}
++
++/**
++ * drm_mode_getcrtc - get CRTC configuration
++ * @inode: inode from the ioctl
++ * @filp: file * from the ioctl
++ * @cmd: cmd from ioctl
++ * @arg: arg from ioctl
++ *
++ * LOCKING:
++ * Caller? (FIXME)
++ *
++ * Construct a CRTC configuration structure to return to the user.
++ *
++ * Called by the user via ioctl.
++ *
++ * RETURNS:
++ * Zero on success, errno on failure.
++ */
++int drm_mode_getcrtc(struct drm_device *dev,
++		     void *data, struct drm_file *file_priv)
++{
++	struct drm_mode_crtc *crtc_resp = data;
++	struct drm_crtc *crtc;
++	struct drm_mode_object *obj;
++	int ret = 0;
++
++	if (!drm_core_check_feature(dev, DRIVER_MODESET))
++		return (EINVAL);
++
++	sx_xlock(&dev->mode_config.mutex);
++
++	obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
++				   DRM_MODE_OBJECT_CRTC);
++	if (!obj) {
++		ret = (EINVAL);
++		goto out;
++	}
++	crtc = obj_to_crtc(obj);
++
++	crtc_resp->x = crtc->x;
++	crtc_resp->y = crtc->y;
++	crtc_resp->gamma_size = crtc->gamma_size;
++	if (crtc->fb)
++		crtc_resp->fb_id = crtc->fb->base.id;
++	else
++		crtc_resp->fb_id = 0;
++
++	if (crtc->enabled) {
++
++		drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->mode);
++		crtc_resp->mode_valid = 1;
++
++	} else {
++		crtc_resp->mode_valid = 0;
++	}
++
++out:
++	sx_xunlock(&dev->mode_config.mutex);
++	return ret;
++}
++
++/**
++ * drm_mode_getconnector - get connector configuration
++ * @inode: inode from the ioctl
++ * @filp: file * from the ioctl
++ * @cmd: cmd from ioctl
++ * @arg: arg from ioctl
++ *
++ * LOCKING:
++ * Caller? (FIXME)
++ *
++ * Construct a connector configuration structure to return to the user.
++ *
++ * Called by the user via ioctl.
++ *
++ * RETURNS:
++ * Zero on success, errno on failure.
++ */
++int drm_mode_getconnector(struct drm_device *dev, void *data,
++			  struct drm_file *file_priv)
++{
++	struct drm_mode_get_connector *out_resp = data;
++	struct drm_mode_object *obj;
++	struct drm_connector *connector;
++	struct drm_display_mode *mode;
++	int mode_count = 0;
++	int props_count = 0;
++	int encoders_count = 0;
++	int ret = 0;
++	int copied = 0;
++	int i;
++	struct drm_mode_modeinfo u_mode;
++	struct drm_mode_modeinfo __user *mode_ptr;
++	uint32_t *prop_ptr;
++	uint64_t *prop_values;
++	uint32_t *encoder_ptr;
++
++	if (!drm_core_check_feature(dev, DRIVER_MODESET))
++		return (EINVAL);
++
++	memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
++
++	DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id);
++
++	sx_xlock(&dev->mode_config.mutex);
++
++	obj = drm_mode_object_find(dev, out_resp->connector_id,
++				   DRM_MODE_OBJECT_CONNECTOR);
++	if (!obj) {
++		ret = EINVAL;
++		goto out;
++	}
++	connector = obj_to_connector(obj);
++
++	for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
++		if (connector->property_ids[i] != 0) {
++			props_count++;
++		}
++	}
++
++	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
++		if (connector->encoder_ids[i] != 0) {
++			encoders_count++;
++		}
++	}
++
++	if (out_resp->count_modes == 0) {
++		connector->funcs->fill_modes(connector,
++					     dev->mode_config.max_width,
++					     dev->mode_config.max_height);
++	}
++
++	/* delayed so we get modes regardless of pre-fill_modes state */
++	list_for_each_entry(mode, &connector->modes, head)
++		mode_count++;
++
++	out_resp->connector_id = connector->base.id;
++	out_resp->connector_type = connector->connector_type;
++	out_resp->connector_type_id = connector->connector_type_id;
++	out_resp->mm_width = connector->display_info.width_mm;
++	out_resp->mm_height = connector->display_info.height_mm;
++	out_resp->subpixel = connector->display_info.subpixel_order;
++	out_resp->connection = connector->status;
++	if (connector->encoder)
++		out_resp->encoder_id = connector->encoder->base.id;
++	else
++		out_resp->encoder_id = 0;
++
++	/*
++	 * This ioctl is called twice, once to determine how much space is
++	 * needed, and the 2nd time to fill it.
++	 */
++	if ((out_resp->count_modes >= mode_count) && mode_count) {
++		copied = 0;
++		mode_ptr = (struct drm_mode_modeinfo *)(uintptr_t)out_resp->modes_ptr;
++		list_for_each_entry(mode, &connector->modes, head) {
++			drm_crtc_convert_to_umode(&u_mode, mode);
++			if (copyout(&u_mode, mode_ptr + copied,
++			    sizeof(u_mode))) {
++				ret = EFAULT;
++				goto out;
++			}
++			copied++;
++		}
++	}
++	out_resp->count_modes = mode_count;
++
++	if ((out_resp->count_props >= props_count) && props_count) {
++		copied = 0;
++		prop_ptr = (uint32_t *)(uintptr_t)(out_resp->props_ptr);
++		prop_values = (uint64_t *)(uintptr_t)(out_resp->prop_values_ptr);
++		for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
++			if (connector->property_ids[i] != 0) {
++				if (copyout(&connector->property_ids[i],
++				    prop_ptr + copied, sizeof(uint32_t))) {
++					ret = EFAULT;
++					goto out;
++				}
++
++				if (copyout(&connector->property_values[i],
++				    prop_values + copied, sizeof(uint64_t))) {
++					ret = EFAULT;
++					goto out;
++				}
++				copied++;
++			}
++		}
++	}
++	out_resp->count_props = props_count;
++
++	if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
++		copied = 0;
++		encoder_ptr = (uint32_t *)(uintptr_t)(out_resp->encoders_ptr);
++		for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
++			if (connector->encoder_ids[i] != 0) {
++				if (copyout(&connector->encoder_ids[i],
++				    encoder_ptr + copied, sizeof(uint32_t))) {
++					ret = EFAULT;
++					goto out;
++				}
++				copied++;
++			}
++		}
++	}
++	out_resp->count_encoders = encoders_count;
++
++out:
++	sx_xunlock(&dev->mode_config.mutex);
++	return ret;
++}
++
++int drm_mode_getencoder(struct drm_device *dev, void *data,
++			struct drm_file *file_priv)
++{
++	struct drm_mode_get_encoder *enc_resp = data;
++	struct drm_mode_object *obj;
++	struct drm_encoder *encoder;
++	int ret = 0;
++
++	if (!drm_core_check_feature(dev, DRIVER_MODESET))
++		return (EINVAL);
++
++	sx_xlock(&dev->mode_config.mutex);
++	obj = drm_mode_object_find(dev, enc_resp->encoder_id,
++				   DRM_MODE_OBJECT_ENCODER);
++	if (!obj) {
++		ret = EINVAL;
++		goto out;
++	}
++	encoder = obj_to_encoder(obj);
++
++	if (encoder->crtc)
++		enc_resp->crtc_id = encoder->crtc->base.id;
++	else
++		enc_resp->crtc_id = 0;
++	enc_resp->encoder_type = encoder->encoder_type;
++	enc_resp->encoder_id = encoder->base.id;
++	enc_resp->possible_crtcs = encoder->possible_crtcs;
++	enc_resp->possible_clones = encoder->possible_clones;
++
++out:
++	sx_xunlock(&dev->mode_config.mutex);
++	return ret;
++}
++
++/**
++ * drm_mode_getplane_res - get plane info
++ * @dev: DRM device
++ * @data: ioctl data
++ * @file_priv: DRM file info
++ *
++ * Return an plane count and set of IDs.
++ */
++int drm_mode_getplane_res(struct drm_device *dev, void *data,
++			    struct drm_file *file_priv)
++{
++	struct drm_mode_get_plane_res *plane_resp = data;
++	struct drm_mode_config *config;
++	struct drm_plane *plane;
++	uint32_t *plane_ptr;
++	int copied = 0, ret = 0;
++
++	if (!drm_core_check_feature(dev, DRIVER_MODESET))
++		return (EINVAL);
++
++	sx_xlock(&dev->mode_config.mutex);
++	config = &dev->mode_config;
++
++	/*
++	 * This ioctl is called twice, once to determine how much space is
++	 * needed, and the 2nd time to fill it.
++	 */
++	if (config->num_plane &&
++	    (plane_resp->count_planes >= config->num_plane)) {
++		plane_ptr = (uint32_t *)(unsigned long)plane_resp->plane_id_ptr;
++
++		list_for_each_entry(plane, &config->plane_list, head) {
++			if (copyout(&plane->base.id, plane_ptr + copied,
++			    sizeof(uint32_t))) {
++				ret = EFAULT;
++				goto out;
++			}
++			copied++;
++		}
++	}
++	plane_resp->count_planes = config->num_plane;
++
++out:
++	sx_xunlock(&dev->mode_config.mutex);
++	return ret;
++}
++
++/**
++ * drm_mode_getplane - get plane info
++ * @dev: DRM device
++ * @data: ioctl data
++ * @file_priv: DRM file info
++ *
++ * Return plane info, including formats supported, gamma size, any
++ * current fb, etc.
++ */
++int drm_mode_getplane(struct drm_device *dev, void *data,
++			struct drm_file *file_priv)
++{
++	struct drm_mode_get_plane *plane_resp = data;
++	struct drm_mode_object *obj;
++	struct drm_plane *plane;
++	uint32_t *format_ptr;
++	int ret = 0;
++
++	if (!drm_core_check_feature(dev, DRIVER_MODESET))
++		return (EINVAL);
++
++	sx_xlock(&dev->mode_config.mutex);
++	obj = drm_mode_object_find(dev, plane_resp->plane_id,
++				   DRM_MODE_OBJECT_PLANE);
++	if (!obj) {
++		ret = ENOENT;
++		goto out;
++	}
++	plane = obj_to_plane(obj);
++
++	if (plane->crtc)
++		plane_resp->crtc_id = plane->crtc->base.id;
++	else
++		plane_resp->crtc_id = 0;
++
++	if (plane->fb)
++		plane_resp->fb_id = plane->fb->base.id;
++	else
++		plane_resp->fb_id = 0;
++
++	plane_resp->plane_id = plane->base.id;
++	plane_resp->possible_crtcs = plane->possible_crtcs;
++	plane_resp->gamma_size = plane->gamma_size;
++
++	/*
++	 * This ioctl is called twice, once to determine how much space is
++	 * needed, and the 2nd time to fill it.
++	 */
++	if (plane->format_count &&
++	    (plane_resp->count_format_types >= plane->format_count)) {
++		format_ptr = (uint32_t *)(unsigned long)plane_resp->format_type_ptr;
++		if (copyout(format_ptr,
++				 plane->format_types,
++				 sizeof(uint32_t) * plane->format_count)) {
++			ret = EFAULT;
++			goto out;
++		}
++	}
++	plane_resp->count_format_types = plane->format_count;
++
++out:
++	sx_xunlock(&dev->mode_config.mutex);
++	return ret;
++}
++
++/**
++ * drm_mode_setplane - set up or tear down an plane
++ * @dev: DRM device
++ * @data: ioctl data*
++ * @file_prive: DRM file info
++ *
++ * Set plane info, including placement, fb, scaling, and other factors.
++ * Or pass a NULL fb to disable.
++ */
++int drm_mode_setplane(struct drm_device *dev, void *data,
++			struct drm_file *file_priv)
++{
++	struct drm_mode_set_plane *plane_req = data;
++	struct drm_mode_object *obj;
++	struct drm_plane *plane;
++	struct drm_crtc *crtc;
++	struct drm_framebuffer *fb;
++	int ret = 0;
++	unsigned int fb_width, fb_height;
++	int i;
++
++	if (!drm_core_check_feature(dev, DRIVER_MODESET))
++		return (EINVAL);
++
++	sx_xlock(&dev->mode_config.mutex);
++
++	/*
++	 * First, find the plane, crtc, and fb objects.  If not available,
++	 * we don't bother to call the driver.
++	 */
++	obj = drm_mode_object_find(dev, plane_req->plane_id,
++				   DRM_MODE_OBJECT_PLANE);
++	if (!obj) {
++		DRM_DEBUG_KMS("Unknown plane ID %d\n",
++			      plane_req->plane_id);
++		ret = ENOENT;
++		goto out;
++	}
++	plane = obj_to_plane(obj);
++
++	/* No fb means shut it down */
++	if (!plane_req->fb_id) {
++		plane->funcs->disable_plane(plane);
++		plane->crtc = NULL;
++		plane->fb = NULL;
++		goto out;
++	}
++
++	obj = drm_mode_object_find(dev, plane_req->crtc_id,
++				   DRM_MODE_OBJECT_CRTC);
++	if (!obj) {
++		DRM_DEBUG_KMS("Unknown crtc ID %d\n",
++			      plane_req->crtc_id);
++		ret = ENOENT;
++		goto out;
++	}
++	crtc = obj_to_crtc(obj);
++
++	obj = drm_mode_object_find(dev, plane_req->fb_id,
++				   DRM_MODE_OBJECT_FB);
++	if (!obj) {
++		DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
++			      plane_req->fb_id);
++		ret = ENOENT;
++		goto out;
++	}
++	fb = obj_to_fb(obj);
++
++	/* Check whether this plane supports the fb pixel format. */
++	for (i = 0; i < plane->format_count; i++)
++		if (fb->pixel_format == plane->format_types[i])
++			break;
++	if (i == plane->format_count) {
++		DRM_DEBUG_KMS("Invalid pixel format 0x%08x\n", fb->pixel_format);
++		ret = EINVAL;
++		goto out;
++	}
++
++	fb_width = fb->width << 16;
++	fb_height = fb->height << 16;
++
++	/* Make sure source coordinates are inside the fb. */
++	if (plane_req->src_w > fb_width ||
++	    plane_req->src_x > fb_width - plane_req->src_w ||
++	    plane_req->src_h > fb_height ||
++	    plane_req->src_y > fb_height - plane_req->src_h) {
++		DRM_DEBUG_KMS("Invalid source coordinates "
++			      "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
++			      plane_req->src_w >> 16,
++			      ((plane_req->src_w & 0xffff) * 15625) >> 10,
++			      plane_req->src_h >> 16,
++			      ((plane_req->src_h & 0xffff) * 15625) >> 10,
++			      plane_req->src_x >> 16,
++			      ((plane_req->src_x & 0xffff) * 15625) >> 10,
++			      plane_req->src_y >> 16,
++			      ((plane_req->src_y & 0xffff) * 15625) >> 10);
++		ret = ENOSPC;
++		goto out;
++	}
++
++	/* Give drivers some help against integer overflows */
++	if (plane_req->crtc_w > INT_MAX ||
++	    plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
++	    plane_req->crtc_h > INT_MAX ||
++	    plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
++		DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
++			      plane_req->crtc_w, plane_req->crtc_h,
++			      plane_req->crtc_x, plane_req->crtc_y);
++		ret = ERANGE;
++		goto out;
++	}
++
++	ret = -plane->funcs->update_plane(plane, crtc, fb,
++					 plane_req->crtc_x, plane_req->crtc_y,
++					 plane_req->crtc_w, plane_req->crtc_h,
++					 plane_req->src_x, plane_req->src_y,
++					 plane_req->src_w, plane_req->src_h);
++	if (!ret) {
++		plane->crtc = crtc;
++		plane->fb = fb;
++	}
++
++out:
++	sx_xunlock(&dev->mode_config.mutex);
++
++	return ret;
++}
++
++/**
++ * drm_mode_setcrtc - set CRTC configuration
++ * @inode: inode from the ioctl
++ * @filp: file * from the ioctl
++ * @cmd: cmd from ioctl
++ * @arg: arg from ioctl
++ *
++ * LOCKING:
++ * Caller? (FIXME)
++ *
++ * Build a new CRTC configuration based on user request.
++ *
++ * Called by the user via ioctl.
++ *
++ * RETURNS:
++ * Zero on success, errno on failure.
++ */
++int drm_mode_setcrtc(struct drm_device *dev, void *data,
++		     struct drm_file *file_priv)
++{
++	struct drm_mode_config *config = &dev->mode_config;
++	struct drm_mode_crtc *crtc_req = data;
++	struct drm_mode_object *obj;
++	struct drm_crtc *crtc, *crtcfb;
++	struct drm_connector **connector_set = NULL, *connector;
++	struct drm_framebuffer *fb = NULL;
++	struct drm_display_mode *mode = NULL;
++	struct drm_mode_set set;
++	uint32_t *set_connectors_ptr;
++	int ret = 0;
++	int i;
++
++	if (!drm_core_check_feature(dev, DRIVER_MODESET))
++		return (EINVAL);
++
++	sx_xlock(&dev->mode_config.mutex);
++	obj = drm_mode_object_find(dev, crtc_req->crtc_id,
++				   DRM_MODE_OBJECT_CRTC);
++	if (!obj) {
++		DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
++		ret = EINVAL;
++		goto out;
++	}
++	crtc = obj_to_crtc(obj);
++	DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
++
++	if (crtc_req->mode_valid) {
++		/* If we have a mode we need a framebuffer. */
++		/* If we pass -1, set the mode with the currently bound fb */
++		if (crtc_req->fb_id == -1) {
++			list_for_each_entry(crtcfb,
++					    &dev->mode_config.crtc_list, head) {
++				if (crtcfb == crtc) {
++					DRM_DEBUG_KMS("Using current fb for "
++							"setmode\n");
++					fb = crtc->fb;
++				}
++			}
++		} else {
++			obj = drm_mode_object_find(dev, crtc_req->fb_id,
++						   DRM_MODE_OBJECT_FB);
++			if (!obj) {
++				DRM_DEBUG_KMS("Unknown FB ID%d\n",
++						crtc_req->fb_id);
++				ret = EINVAL;
++				goto out;
++			}
++			fb = obj_to_fb(obj);
++		}
++
++		mode = drm_mode_create(dev);
++		drm_crtc_convert_umode(mode, &crtc_req->mode);
++		drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
++	}
++
++	if (crtc_req->count_connectors == 0 && mode) {
++		DRM_DEBUG_KMS("Count connectors is 0 but mode set\n");
++		ret = EINVAL;
++		goto out;
++	}
++
++	if (crtc_req->count_connectors > 0 && (!mode || !fb)) {
++		DRM_DEBUG_KMS("Count connectors is %d but no mode or fb set\n",
++			  crtc_req->count_connectors);
++		ret = EINVAL;
++		goto out;
++	}
++
++	if (crtc_req->count_connectors > 0) {
++		u32 out_id;
++
++		/* Avoid unbounded kernel memory allocation */
++		if (crtc_req->count_connectors > config->num_connector) {
++			ret = EINVAL;
++			goto out;
++		}
++
++		connector_set = malloc(crtc_req->count_connectors *
++		    sizeof(struct drm_connector *), DRM_MEM_KMS, M_WAITOK);
++
++		for (i = 0; i < crtc_req->count_connectors; i++) {
++			set_connectors_ptr = (uint32_t *)(uintptr_t)crtc_req->set_connectors_ptr;
++			if (copyin(&set_connectors_ptr[i], &out_id, sizeof(uint32_t))) {
++				ret = EFAULT;
++				goto out;
++			}
++
++			obj = drm_mode_object_find(dev, out_id,
++						   DRM_MODE_OBJECT_CONNECTOR);
++			if (!obj) {
++				DRM_DEBUG_KMS("Connector id %d unknown\n",
++						out_id);
++				ret = EINVAL;
++				goto out;
++			}
++			connector = obj_to_connector(obj);
++			DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
++					connector->base.id,
++					drm_get_connector_name(connector));
++
++			connector_set[i] = connector;
++		}
++	}
++
++	set.crtc = crtc;
++	set.x = crtc_req->x;
++	set.y = crtc_req->y;
++	set.mode = mode;
++	set.connectors = connector_set;
++	set.num_connectors = crtc_req->count_connectors;
++	set.fb = fb;
++	ret = crtc->funcs->set_config(&set);
++
++out:
++	free(connector_set, DRM_MEM_KMS);
++	sx_xunlock(&dev->mode_config.mutex);
++	return ret;
++}
++
++int drm_mode_cursor_ioctl(struct drm_device *dev,
++			void *data, struct drm_file *file_priv)
++{
++	struct drm_mode_cursor *req = data;
++	struct drm_mode_object *obj;
++	struct drm_crtc *crtc;
++	int ret = 0;
++
++	if (!drm_core_check_feature(dev, DRIVER_MODESET))
++		return (EINVAL);
++
++	if (!req->flags)
++		return (EINVAL);
++
++	sx_xlock(&dev->mode_config.mutex);
++	obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
++	if (!obj) {
++		DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
++		ret = EINVAL;
++		goto out;
++	}
++	crtc = obj_to_crtc(obj);
++
++	if (req->flags & DRM_MODE_CURSOR_BO) {
++		if (!crtc->funcs->cursor_set) {
++			ret = ENXIO;
++			goto out;
++		}
++		/* Turns off the cursor if handle is 0 */
++		ret = -crtc->funcs->cursor_set(crtc, file_priv, req->handle,
++					      req->width, req->height);
++	}
++
++	if (req->flags & DRM_MODE_CURSOR_MOVE) {
++		if (crtc->funcs->cursor_move) {
++			ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
++		} else {
++			ret = EFAULT;
++			goto out;
++		}
++	}
++out:
++	sx_xunlock(&dev->mode_config.mutex);
++	return ret;
++}
++
++/* Original addfb only supported RGB formats, so figure out which one */
++uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
++{
++	uint32_t fmt;
++
++	switch (bpp) {
++	case 8:
++		fmt = DRM_FORMAT_RGB332;
++		break;
++	case 16:
++		if (depth == 15)
++			fmt = DRM_FORMAT_XRGB1555;
++		else
++			fmt = DRM_FORMAT_RGB565;
++		break;
++	case 24:
++		fmt = DRM_FORMAT_RGB888;
++		break;
++	case 32:
++		if (depth == 24)
++			fmt = DRM_FORMAT_XRGB8888;
++		else if (depth == 30)
++			fmt = DRM_FORMAT_XRGB2101010;
++		else
++			fmt = DRM_FORMAT_ARGB8888;
++		break;
++	default:
++		DRM_ERROR("bad bpp, assuming RGB24 pixel format\n");
++		fmt = DRM_FORMAT_XRGB8888;
++		break;
++	}
++
++	return fmt;
++}
++
++/**
++ * drm_mode_addfb - add an FB to the graphics configuration
++ * @inode: inode from the ioctl
++ * @filp: file * from the ioctl
++ * @cmd: cmd from ioctl
++ * @arg: arg from ioctl
++ *
++ * LOCKING:
++ * Takes mode config lock.
++ *
++ * Add a new FB to the specified CRTC, given a user request.
++ *
++ * Called by the user via ioctl.
++ *
++ * RETURNS:
++ * Zero on success, errno on failure.
++ */
++int drm_mode_addfb(struct drm_device *dev,
++		   void *data, struct drm_file *file_priv)
++{
++	struct drm_mode_fb_cmd *or = data;
++	struct drm_mode_fb_cmd2 r = {};
++	struct drm_mode_config *config = &dev->mode_config;
++	struct drm_framebuffer *fb;
++	int ret = 0;
++
++	/* Use new struct with format internally */
++	r.fb_id = or->fb_id;
++	r.width = or->width;
++	r.height = or->height;
++	r.pitches[0] = or->pitch;
++	r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
++	r.handles[0] = or->handle;
++
++	if (!drm_core_check_feature(dev, DRIVER_MODESET))
++		return (EINVAL);
++
++	if ((config->min_width > r.width) || (r.width > config->max_width))
++		return (EINVAL);
++	if ((config->min_height > r.height) || (r.height > config->max_height))
++		return (EINVAL);
++
++	sx_xlock(&dev->mode_config.mutex);
++
++	ret = -dev->mode_config.funcs->fb_create(dev, file_priv, &r, &fb);
++	if (ret != 0) {
++		DRM_ERROR("could not create framebuffer, error %d\n", ret);
++		goto out;
++	}
++
++	or->fb_id = fb->base.id;
++	list_add(&fb->filp_head, &file_priv->fbs);
++	DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
++
++out:
++	sx_xunlock(&dev->mode_config.mutex);
++	return ret;
++}
++
++static int format_check(struct drm_mode_fb_cmd2 *r)
++{
++	uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
++
++	switch (format) {
++	case DRM_FORMAT_C8:
++	case DRM_FORMAT_RGB332:
++	case DRM_FORMAT_BGR233:
++	case DRM_FORMAT_XRGB4444:
++	case DRM_FORMAT_XBGR4444:
++	case DRM_FORMAT_RGBX4444:
++	case DRM_FORMAT_BGRX4444:
++	case DRM_FORMAT_ARGB4444:
++	case DRM_FORMAT_ABGR4444:
++	case DRM_FORMAT_RGBA4444:
++	case DRM_FORMAT_BGRA4444:
++	case DRM_FORMAT_XRGB1555:
++	case DRM_FORMAT_XBGR1555:
++	case DRM_FORMAT_RGBX5551:
++	case DRM_FORMAT_BGRX5551:
++	case DRM_FORMAT_ARGB1555:
++	case DRM_FORMAT_ABGR1555:
++	case DRM_FORMAT_RGBA5551:
++	case DRM_FORMAT_BGRA5551:
++	case DRM_FORMAT_RGB565:
++	case DRM_FORMAT_BGR565:
++	case DRM_FORMAT_RGB888:
++	case DRM_FORMAT_BGR888:
++	case DRM_FORMAT_XRGB8888:
++	case DRM_FORMAT_XBGR8888:
++	case DRM_FORMAT_RGBX8888:
++	case DRM_FORMAT_BGRX8888:
++	case DRM_FORMAT_ARGB8888:
++	case DRM_FORMAT_ABGR8888:
++	case DRM_FORMAT_RGBA8888:
++	case DRM_FORMAT_BGRA8888:
++	case DRM_FORMAT_XRGB2101010:
++	case DRM_FORMAT_XBGR2101010:
++	case DRM_FORMAT_RGBX1010102:
++	case DRM_FORMAT_BGRX1010102:
++	case DRM_FORMAT_ARGB2101010:
++	case DRM_FORMAT_ABGR2101010:
++	case DRM_FORMAT_RGBA1010102:
++	case DRM_FORMAT_BGRA1010102:
++	case DRM_FORMAT_YUYV:
++	case DRM_FORMAT_YVYU:
++	case DRM_FORMAT_UYVY:
++	case DRM_FORMAT_VYUY:
++	case DRM_FORMAT_AYUV:
++	case DRM_FORMAT_NV12:
++	case DRM_FORMAT_NV21:
++	case DRM_FORMAT_NV16:
++	case DRM_FORMAT_NV61:
++	case DRM_FORMAT_YUV410:
++	case DRM_FORMAT_YVU410:
++	case DRM_FORMAT_YUV411:
++	case DRM_FORMAT_YVU411:
++	case DRM_FORMAT_YUV420:
++	case DRM_FORMAT_YVU420:
++	case DRM_FORMAT_YUV422:
++	case DRM_FORMAT_YVU422:
++	case DRM_FORMAT_YUV444:
++	case DRM_FORMAT_YVU444:
++		return 0;
++	default:
++		return (EINVAL);
++	}
++}
++
++/**
++ * drm_mode_addfb2 - add an FB to the graphics configuration
++ * @inode: inode from the ioctl
++ * @filp: file * from the ioctl
++ * @cmd: cmd from ioctl
++ * @arg: arg from ioctl
++ *
++ * LOCKING:
++ * Takes mode config lock.
++ *
++ * Add a new FB to the specified CRTC, given a user request with format.
++ *
++ * Called by the user via ioctl.
++ *
++ * RETURNS:
++ * Zero on success, errno on failure.
++ */
++int drm_mode_addfb2(struct drm_device *dev,
++		    void *data, struct drm_file *file_priv)
++{
++	struct drm_mode_fb_cmd2 *r = data;
++	struct drm_mode_config *config = &dev->mode_config;
++	struct drm_framebuffer *fb;
++	int ret = 0;
++
++	if (!drm_core_check_feature(dev, DRIVER_MODESET))
++		return (EINVAL);
++
++	if ((config->min_width > r->width) || (r->width > config->max_width)) {
++		DRM_ERROR("bad framebuffer width %d, should be >= %d && <= %d\n",
++			  r->width, config->min_width, config->max_width);
++		return (EINVAL);
++	}
++	if ((config->min_height > r->height) || (r->height > config->max_height)) {
++		DRM_ERROR("bad framebuffer height %d, should be >= %d && <= %d\n",
++			  r->height, config->min_height, config->max_height);
++		return (EINVAL);
++	}
++
++	ret = format_check(r);
++	if (ret) {
++		DRM_ERROR("bad framebuffer format 0x%08x\n", r->pixel_format);
++		return ret;
++	}
++
++	sx_xlock(&dev->mode_config.mutex);
++
++	/* TODO check buffer is sufficiently large */
++	/* TODO setup destructor callback */
++
++	ret = -dev->mode_config.funcs->fb_create(dev, file_priv, r, &fb);
++	if (ret != 0) {
++		DRM_ERROR("could not create framebuffer, error %d\n", ret);
++		goto out;
++	}
++
++	r->fb_id = fb->base.id;
++	list_add(&fb->filp_head, &file_priv->fbs);
++	DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
++
++out:
++	sx_xunlock(&dev->mode_config.mutex);
++	return (ret);
++}
++
++/**
++ * drm_mode_rmfb - remove an FB from the configuration
++ * @inode: inode from the ioctl
++ * @filp: file * from the ioctl
++ * @cmd: cmd from ioctl
++ * @arg: arg from ioctl
++ *
++ * LOCKING:
++ * Takes mode config lock.
++ *
++ * Remove the FB specified by the user.
++ *
++ * Called by the user via ioctl.
++ *
++ * RETURNS:
++ * Zero on success, errno on failure.
++ */
++int drm_mode_rmfb(struct drm_device *dev,
++		   void *data, struct drm_file *file_priv)
++{
++	struct drm_mode_object *obj;
++	struct drm_framebuffer *fb = NULL;
++	struct drm_framebuffer *fbl = NULL;
++	uint32_t *id = data;
++	int ret = 0;
++	int found = 0;
++
++	if (!drm_core_check_feature(dev, DRIVER_MODESET))
++		return (EINVAL);
++
++	sx_xlock(&dev->mode_config.mutex);
++	obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
++	/* TODO check that we really get a framebuffer back. */
++	if (!obj) {
++		ret = EINVAL;
++		goto out;
++	}
++	fb = obj_to_fb(obj);
++
++	list_for_each_entry(fbl, &file_priv->fbs, filp_head)
++		if (fb == fbl)
++			found = 1;
++
++	if (!found) {
++		ret = EINVAL;
++		goto out;
++	}
++
++	/* TODO release all crtc connected to the framebuffer */
++	/* TODO unhock the destructor from the buffer object */
++
++	list_del(&fb->filp_head);
++	fb->funcs->destroy(fb);
++
++out:
++	sx_xunlock(&dev->mode_config.mutex);
++	return ret;
++}
++
++/**
++ * drm_mode_getfb - get FB info
++ * @inode: inode from the ioctl
++ * @filp: file * from the ioctl
++ * @cmd: cmd from ioctl
++ * @arg: arg from ioctl
++ *
++ * LOCKING:
++ * Caller? (FIXME)
++ *
++ * Lookup the FB given its ID and return info about it.
++ *
++ * Called by the user via ioctl.
++ *
++ * RETURNS:
++ * Zero on success, errno on failure.
++ */
++int drm_mode_getfb(struct drm_device *dev,
++		   void *data, struct drm_file *file_priv)
++{
++	struct drm_mode_fb_cmd *r = data;
++	struct drm_mode_object *obj;
++	struct drm_framebuffer *fb;
++	int ret = 0;
++
++	if (!drm_core_check_feature(dev, DRIVER_MODESET))
++		return (EINVAL);
++
++	sx_xlock(&dev->mode_config.mutex);
++	obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
++	if (!obj) {
++		ret = EINVAL;
++		goto out;
++	}
++	fb = obj_to_fb(obj);
++
++	r->height = fb->height;
++	r->width = fb->width;
++	r->depth = fb->depth;
++	r->bpp = fb->bits_per_pixel;
++	r->pitch = fb->pitches[0];
++	fb->funcs->create_handle(fb, file_priv, &r->handle);
++
++out:
++	sx_xunlock(&dev->mode_config.mutex);
++	return ret;
++}
++
++int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
++			   void *data, struct drm_file *file_priv)
++{
++	struct drm_clip_rect __user *clips_ptr;
++	struct drm_clip_rect *clips = NULL;
++	struct drm_mode_fb_dirty_cmd *r = data;
++	struct drm_mode_object *obj;
++	struct drm_framebuffer *fb;
++	unsigned flags;
++	int num_clips;
++	int ret = 0;
++
++	if (!drm_core_check_feature(dev, DRIVER_MODESET))
++		return (EINVAL);
++
++	sx_xlock(&dev->mode_config.mutex);
++	obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
++	if (!obj) {
++		ret = EINVAL;
++		goto out_err1;
++	}
++	fb = obj_to_fb(obj);
++
++	num_clips = r->num_clips;
++	clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
++
++	if (!num_clips != !clips_ptr) {
++		ret = EINVAL;
++		goto out_err1;
++	}
++
++	flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags;
++
++	/* If userspace annotates copy, clips must come in pairs */
++	if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) {
++		ret = EINVAL;
++		goto out_err1;
++	}
++
++	if (num_clips && clips_ptr) {
++		if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) {
++			ret = EINVAL;
++			goto out_err1;
++		}
++		clips = malloc(num_clips * sizeof(*clips), DRM_MEM_KMS,
++		    M_WAITOK | M_ZERO);
++
++		ret = copyin(clips_ptr, clips, num_clips * sizeof(*clips));
++		if (ret)
++			goto out_err2;
++	}
++
++	if (fb->funcs->dirty) {
++		ret = -fb->funcs->dirty(fb, file_priv, flags, r->color,
++				       clips, num_clips);
++	} else {
++		ret = ENOSYS;
++		goto out_err2;
++	}
++
++out_err2:
++	free(clips, DRM_MEM_KMS);
++out_err1:
++	sx_xunlock(&dev->mode_config.mutex);
++	return ret;
++}
++
++
++/**
++ * drm_fb_release - remove and free the FBs on this file
++ * @filp: file * from the ioctl
++ *
++ * LOCKING:
++ * Takes mode config lock.
++ *
++ * Destroy all the FBs associated with @filp.
++ *
++ * Called by the user via ioctl.
++ *
++ * RETURNS:
++ * Zero on success, errno on failure.
++ */
++void drm_fb_release(struct drm_file *priv)
++{
++#if 1
++	struct drm_device *dev = priv->dev;
++#else
++	struct drm_device *dev = priv->minor->dev;
++#endif
++	struct drm_framebuffer *fb, *tfb;
++
++	sx_xlock(&dev->mode_config.mutex);
++	list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
++		list_del(&fb->filp_head);
++		fb->funcs->destroy(fb);
++	}
++	sx_xunlock(&dev->mode_config.mutex);
++}
++
++/**
++ * drm_mode_attachmode - add a mode to the user mode list
++ * @dev: DRM device
++ * @connector: connector to add the mode to
++ * @mode: mode to add
++ *
++ * Add @mode to @connector's user mode list.
++ */
++static int drm_mode_attachmode(struct drm_device *dev,
++			       struct drm_connector *connector,
++			       struct drm_display_mode *mode)
++{
++	int ret = 0;
++
++	list_add_tail(&mode->head, &connector->user_modes);
++	return ret;
++}
++
++int drm_mode_attachmode_crtc(struct drm_device *dev, struct drm_crtc *crtc,
++			     struct drm_display_mode *mode)
++{
++	struct drm_connector *connector;
++	int ret = 0;
++	struct drm_display_mode *dup_mode;
++	int need_dup = 0;
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++		if (!connector->encoder)
++			break;
++		if (connector->encoder->crtc == crtc) {
++			if (need_dup)
++				dup_mode = drm_mode_duplicate(dev, mode);
++			else
++				dup_mode = mode;
++			ret = drm_mode_attachmode(dev, connector, dup_mode);
++			if (ret)
++				return ret;
++			need_dup = 1;
++		}
++	}
++	return 0;
++}
++
++static int drm_mode_detachmode(struct drm_device *dev,
++			       struct drm_connector *connector,
++			       struct drm_display_mode *mode)
++{
++	int found = 0;
++	int ret = 0;
++	struct drm_display_mode *match_mode, *t;
++
++	list_for_each_entry_safe(match_mode, t, &connector->user_modes, head) {
++		if (drm_mode_equal(match_mode, mode)) {
++			list_del(&match_mode->head);
++			drm_mode_destroy(dev, match_mode);
++			found = 1;
++			break;
++		}
++	}
++
++	if (!found)
++		ret = -EINVAL;
++
++	return ret;
++}
++
++int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode)
++{
++	struct drm_connector *connector;
++
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++		drm_mode_detachmode(dev, connector, mode);
++	}
++	return 0;
++}
++
++/**
++ * drm_fb_attachmode - Attach a user mode to an connector
++ * @inode: inode from the ioctl
++ * @filp: file * from the ioctl
++ * @cmd: cmd from ioctl
++ * @arg: arg from ioctl
++ *
++ * This attaches a user specified mode to an connector.
++ * Called by the user via ioctl.
++ *
++ * RETURNS:
++ * Zero on success, errno on failure.
++ */
++int drm_mode_attachmode_ioctl(struct drm_device *dev,
++			      void *data, struct drm_file *file_priv)
++{
++	struct drm_mode_mode_cmd *mode_cmd = data;
++	struct drm_connector *connector;
++	struct drm_display_mode *mode;
++	struct drm_mode_object *obj;
++	struct drm_mode_modeinfo *umode = &mode_cmd->mode;
++	int ret = 0;
++
++	if (!drm_core_check_feature(dev, DRIVER_MODESET))
++		return -EINVAL;
++
++	sx_xlock(&dev->mode_config.mutex);
++
++	obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
++	if (!obj) {
++		ret = -EINVAL;
++		goto out;
++	}
++	connector = obj_to_connector(obj);
++
++	mode = drm_mode_create(dev);
++	if (!mode) {
++		ret = -ENOMEM;
++		goto out;
++	}
++
++	drm_crtc_convert_umode(mode, umode);
++
++	ret = drm_mode_attachmode(dev, connector, mode);
++out:
++	sx_xunlock(&dev->mode_config.mutex);
++	return ret;
++}
++
++
++/**
++ * drm_fb_detachmode - Detach a user specified mode from an connector
++ * @inode: inode from the ioctl
++ * @filp: file * from the ioctl
++ * @cmd: cmd from ioctl
++ * @arg: arg from ioctl
++ *
++ * Called by the user via ioctl.
++ *
++ * RETURNS:
++ * Zero on success, errno on failure.
++ */
++int drm_mode_detachmode_ioctl(struct drm_device *dev,
++			      void *data, struct drm_file *file_priv)
++{
++	struct drm_mode_object *obj;
++	struct drm_mode_mode_cmd *mode_cmd = data;
++	struct drm_connector *connector;
++	struct drm_display_mode mode;
++	struct drm_mode_modeinfo *umode = &mode_cmd->mode;
++	int ret = 0;
++
++	if (!drm_core_check_feature(dev, DRIVER_MODESET))
++		return -EINVAL;
++
++	sx_xlock(&dev->mode_config.mutex);
++
++	obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
++	if (!obj) {
++		ret = -EINVAL;
++		goto out;
++	}
++	connector = obj_to_connector(obj);
++
++	drm_crtc_convert_umode(&mode, umode);
++	ret = drm_mode_detachmode(dev, connector, &mode);
++out:
++	sx_xunlock(&dev->mode_config.mutex);
++	return ret;
++}
++
++struct drm_property *drm_property_create(struct drm_device *dev, int flags,
++					 const char *name, int num_values)
++{
++	struct drm_property *property = NULL;
++
++	property = malloc(sizeof(struct drm_property), DRM_MEM_KMS,
++	    M_WAITOK | M_ZERO);
++
++	if (num_values) {
++		property->values = malloc(sizeof(uint64_t)*num_values, DRM_MEM_KMS,
++		    M_WAITOK | M_ZERO);
++	}
++
++	drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
++	property->flags = flags;
++	property->num_values = num_values;
++	INIT_LIST_HEAD(&property->enum_blob_list);
++
++	if (name) {
++		strncpy(property->name, name, DRM_PROP_NAME_LEN);
++		property->name[DRM_PROP_NAME_LEN-1] = '\0';
++	}
++
++	list_add_tail(&property->head, &dev->mode_config.property_list);
++	return property;
++}
++
++int drm_property_add_enum(struct drm_property *property, int index,
++			  uint64_t value, const char *name)
++{
++	struct drm_property_enum *prop_enum;
++
++	if (!(property->flags & DRM_MODE_PROP_ENUM))
++		return -EINVAL;
++
++	if (!list_empty(&property->enum_blob_list)) {
++		list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
++			if (prop_enum->value == value) {
++				strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
++				prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
++				return 0;
++			}
++		}
++	}
++
++	prop_enum = malloc(sizeof(struct drm_property_enum), DRM_MEM_KMS,
++	    M_WAITOK | M_ZERO);
++
++	strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
++	prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
++	prop_enum->value = value;
++
++	property->values[index] = value;
++	list_add_tail(&prop_enum->head, &property->enum_blob_list);
++	return 0;
++}
++
++void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
++{
++	struct drm_property_enum *prop_enum, *pt;
++
++	list_for_each_entry_safe(prop_enum, pt, &property->enum_blob_list, head) {
++		list_del(&prop_enum->head);
++		free(prop_enum, DRM_MEM_KMS);
++	}
++
++	if (property->num_values)
++		free(property->values, DRM_MEM_KMS);
++	drm_mode_object_put(dev, &property->base);
++	list_del(&property->head);
++	free(property, DRM_MEM_KMS);
++}
++
++int drm_connector_attach_property(struct drm_connector *connector,
++			       struct drm_property *property, uint64_t init_val)
++{
++	int i;
++
++	for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
++		if (connector->property_ids[i] == 0) {
++			connector->property_ids[i] = property->base.id;
++			connector->property_values[i] = init_val;
++			break;
++		}
++	}
++
++	if (i == DRM_CONNECTOR_MAX_PROPERTY)
++		return -EINVAL;
++	return 0;
++}
++
++int drm_connector_property_set_value(struct drm_connector *connector,
++				  struct drm_property *property, uint64_t value)
++{
++	int i;
++
++	for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
++		if (connector->property_ids[i] == property->base.id) {
++			connector->property_values[i] = value;
++			break;
++		}
++	}
++
++	if (i == DRM_CONNECTOR_MAX_PROPERTY)
++		return -EINVAL;
++	return 0;
++}
++
++int drm_connector_property_get_value(struct drm_connector *connector,
++				  struct drm_property *property, uint64_t *val)
++{
++	int i;
++
++	for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
++		if (connector->property_ids[i] == property->base.id) {
++			*val = connector->property_values[i];
++			break;
++		}
++	}
++
++	if (i == DRM_CONNECTOR_MAX_PROPERTY)
++		return -EINVAL;
++	return 0;
++}
++
++int drm_mode_getproperty_ioctl(struct drm_device *dev,
++			       void *data, struct drm_file *file_priv)
++{
++	struct drm_mode_object *obj;
++	struct drm_mode_get_property *out_resp = data;
++	struct drm_property *property;
++	int enum_count = 0;
++	int blob_count = 0;
++	int value_count = 0;
++	int ret = 0, i;
++	int copied;
++	struct drm_property_enum *prop_enum;
++	struct drm_mode_property_enum __user *enum_ptr;
++	struct drm_property_blob *prop_blob;
++	uint32_t *blob_id_ptr;
++	uint64_t *values_ptr;
++	uint32_t *blob_length_ptr;
++
++	if (!drm_core_check_feature(dev, DRIVER_MODESET))
++		return -EINVAL;
++
++	sx_xlock(&dev->mode_config.mutex);
++	obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
++	if (!obj) {
++		ret = -EINVAL;
++		goto done;
++	}
++	property = obj_to_property(obj);
++
++	if (property->flags & DRM_MODE_PROP_ENUM) {
++		list_for_each_entry(prop_enum, &property->enum_blob_list, head)
++			enum_count++;
++	} else if (property->flags & DRM_MODE_PROP_BLOB) {
++		list_for_each_entry(prop_blob, &property->enum_blob_list, head)
++			blob_count++;
++	}
++
++	value_count = property->num_values;
++
++	strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN);
++	out_resp->name[DRM_PROP_NAME_LEN-1] = 0;
++	out_resp->flags = property->flags;
++
++	if ((out_resp->count_values >= value_count) && value_count) {
++		values_ptr = (uint64_t *)(uintptr_t)out_resp->values_ptr;
++		for (i = 0; i < value_count; i++) {
++			if (copyout(&property->values[i], values_ptr + i, sizeof(uint64_t))) {
++				ret = -EFAULT;
++				goto done;
++			}
++		}
++	}
++	out_resp->count_values = value_count;
++
++	if (property->flags & DRM_MODE_PROP_ENUM) {
++		if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
++			copied = 0;
++			enum_ptr = (struct drm_mode_property_enum *)(uintptr_t)out_resp->enum_blob_ptr;
++			list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
++
++				if (copyout(&prop_enum->value, &enum_ptr[copied].value, sizeof(uint64_t))) {
++					ret = -EFAULT;
++					goto done;
++				}
++
++				if (copyout(&prop_enum->name,
++				    &enum_ptr[copied].name,DRM_PROP_NAME_LEN)) {
++					ret = -EFAULT;
++					goto done;
++				}
++				copied++;
++			}
++		}
++		out_resp->count_enum_blobs = enum_count;
++	}
++
++	if (property->flags & DRM_MODE_PROP_BLOB) {
++		if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
++			copied = 0;
++			blob_id_ptr = (uint32_t *)(uintptr_t)out_resp->enum_blob_ptr;
++			blob_length_ptr = (uint32_t *)(uintptr_t)out_resp->values_ptr;
++
++			list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
++				if (copyout(&prop_blob->base.id,
++				    blob_id_ptr + copied, sizeof(uint32_t))) {
++					ret = -EFAULT;
++					goto done;
++				}
++
++				if (copyout(&prop_blob->length,
++				    blob_length_ptr + copied, sizeof(uint32_t))) {
++					ret = -EFAULT;
++					goto done;
++				}
++
++				copied++;
++			}
++		}
++		out_resp->count_enum_blobs = blob_count;
++	}
++done:
++	sx_xunlock(&dev->mode_config.mutex);
++	return ret;
++}
++
++static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev, int length,
++							  void *data)
++{
++	struct drm_property_blob *blob;
++
++	if (!length || !data)
++		return NULL;
++
++	blob = malloc(sizeof(struct drm_property_blob)+length, DRM_MEM_KMS,
++	    M_WAITOK | M_ZERO);
++
++	blob->data = (void *)((char *)blob + sizeof(struct drm_property_blob));
++	blob->length = length;
++
++	memcpy(blob->data, data, length);
++
++	drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB);
++
++	list_add_tail(&blob->head, &dev->mode_config.property_blob_list);
++	return blob;
++}
++
++static void drm_property_destroy_blob(struct drm_device *dev,
++			       struct drm_property_blob *blob)
++{
++	drm_mode_object_put(dev, &blob->base);
++	list_del(&blob->head);
++	free(blob, DRM_MEM_KMS);
++}
++
++int drm_mode_getblob_ioctl(struct drm_device *dev,
++			   void *data, struct drm_file *file_priv)
++{
++	struct drm_mode_object *obj;
++	struct drm_mode_get_blob *out_resp = data;
++	struct drm_property_blob *blob;
++	int ret = 0;
++	void *blob_ptr;
++
++	if (!drm_core_check_feature(dev, DRIVER_MODESET))
++		return -EINVAL;
++
++	sx_xlock(&dev->mode_config.mutex);
++	obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
++	if (!obj) {
++		ret = -EINVAL;
++		goto done;
++	}
++	blob = obj_to_blob(obj);
++
++	if (out_resp->length == blob->length) {
++		blob_ptr = (void *)(unsigned long)out_resp->data;
++		if (copyout(blob->data, blob_ptr, blob->length)){
++			ret = -EFAULT;
++			goto done;
++		}
++	}
++	out_resp->length = blob->length;
++
++done:
++	sx_xunlock(&dev->mode_config.mutex);
++	return ret;
++}
++
++int drm_mode_connector_update_edid_property(struct drm_connector *connector,
++					    struct edid *edid)
++{
++	struct drm_device *dev = connector->dev;
++	int ret = 0, size;
++
++	if (connector->edid_blob_ptr)
++		drm_property_destroy_blob(dev, connector->edid_blob_ptr);
++
++	/* Delete edid, when there is none. */
++	if (!edid) {
++		connector->edid_blob_ptr = NULL;
++		ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, 0);
++		return ret;
++	}
++
++	size = EDID_LENGTH * (1 + edid->extensions);
++	connector->edid_blob_ptr = drm_property_create_blob(connector->dev,
++							    size, edid);
++
++	ret = drm_connector_property_set_value(connector,
++					       dev->mode_config.edid_property,
++					       connector->edid_blob_ptr->base.id);
++
++	return ret;
++}
++
++int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
++				       void *data, struct drm_file *file_priv)
++{
++	struct drm_mode_connector_set_property *out_resp = data;
++	struct drm_mode_object *obj;
++	struct drm_property *property;
++	struct drm_connector *connector;
++	int ret = -EINVAL;
++	int i;
++
++	if (!drm_core_check_feature(dev, DRIVER_MODESET))
++		return -EINVAL;
++
++	sx_xlock(&dev->mode_config.mutex);
++
++	obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR);
++	if (!obj) {
++		goto out;
++	}
++	connector = obj_to_connector(obj);
++
++	for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
++		if (connector->property_ids[i] == out_resp->prop_id)
++			break;
++	}
++
++	if (i == DRM_CONNECTOR_MAX_PROPERTY) {
++		goto out;
++	}
++
++	obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
++	if (!obj) {
++		goto out;
++	}
++	property = obj_to_property(obj);
++
++	if (property->flags & DRM_MODE_PROP_IMMUTABLE)
++		goto out;
++
++	if (property->flags & DRM_MODE_PROP_RANGE) {
++		if (out_resp->value < property->values[0])
++			goto out;
++
++		if (out_resp->value > property->values[1])
++			goto out;
++	} else {
++		int found = 0;
++		for (i = 0; i < property->num_values; i++) {
++			if (property->values[i] == out_resp->value) {
++				found = 1;
++				break;
++			}
++		}
++		if (!found) {
++			goto out;
++		}
++	}
++
++	/* Do DPMS ourselves */
++	if (property == connector->dev->mode_config.dpms_property) {
++		if (connector->funcs->dpms)
++			(*connector->funcs->dpms)(connector, (int) out_resp->value);
++		ret = 0;
++	} else if (connector->funcs->set_property)
++		ret = connector->funcs->set_property(connector, property, out_resp->value);
++
++	/* store the property value if successful */
++	if (!ret)
++		drm_connector_property_set_value(connector, property, out_resp->value);
++out:
++	sx_xunlock(&dev->mode_config.mutex);
++	return ret;
++}
++
++int drm_mode_connector_attach_encoder(struct drm_connector *connector,
++				      struct drm_encoder *encoder)
++{
++	int i;
++
++	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
++		if (connector->encoder_ids[i] == 0) {
++			connector->encoder_ids[i] = encoder->base.id;
++			return 0;
++		}
++	}
++	return -ENOMEM;
++}
++
++void drm_mode_connector_detach_encoder(struct drm_connector *connector,
++				    struct drm_encoder *encoder)
++{
++	int i;
++	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
++		if (connector->encoder_ids[i] == encoder->base.id) {
++			connector->encoder_ids[i] = 0;
++			if (connector->encoder == encoder)
++				connector->encoder = NULL;
++			break;
++		}
++	}
++}
++
++bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
++				  int gamma_size)
++{
++	crtc->gamma_size = gamma_size;
++
++	crtc->gamma_store = malloc(gamma_size * sizeof(uint16_t) * 3,
++	    DRM_MEM_KMS, M_WAITOK | M_ZERO);
++	if (!crtc->gamma_store) {
++		crtc->gamma_size = 0;
++		return false;
++	}
++
++	return true;
++}
++
++int drm_mode_gamma_set_ioctl(struct drm_device *dev,
++			     void *data, struct drm_file *file_priv)
++{
++	struct drm_mode_crtc_lut *crtc_lut = data;
++	struct drm_mode_object *obj;
++	struct drm_crtc *crtc;
++	void *r_base, *g_base, *b_base;
++	int size;
++	int ret = 0;
++
++	if (!drm_core_check_feature(dev, DRIVER_MODESET))
++		return -EINVAL;
++
++	sx_xlock(&dev->mode_config.mutex);
++	obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
++	if (!obj) {
++		ret = -EINVAL;
++		goto out;
++	}
++	crtc = obj_to_crtc(obj);
++
++	/* memcpy into gamma store */
++	if (crtc_lut->gamma_size != crtc->gamma_size) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	size = crtc_lut->gamma_size * (sizeof(uint16_t));
++	r_base = crtc->gamma_store;
++	if (copyin((void *)(uintptr_t)crtc_lut->red, r_base, size)) {
++		ret = -EFAULT;
++		goto out;
++	}
++
++	g_base = (char *)r_base + size;
++	if (copyin((void *)(uintptr_t)crtc_lut->green, g_base, size)) {
++		ret = -EFAULT;
++		goto out;
++	}
++
++	b_base = (char *)g_base + size;
++	if (copyin((void *)(uintptr_t)crtc_lut->blue, b_base, size)) {
++		ret = -EFAULT;
++		goto out;
++	}
++
++	crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
++
++out:
++	sx_xunlock(&dev->mode_config.mutex);
++	return ret;
++
++}
++
++int drm_mode_gamma_get_ioctl(struct drm_device *dev,
++			     void *data, struct drm_file *file_priv)
++{
++	struct drm_mode_crtc_lut *crtc_lut = data;
++	struct drm_mode_object *obj;
++	struct drm_crtc *crtc;
++	void *r_base, *g_base, *b_base;
++	int size;
++	int ret = 0;
++
++	if (!drm_core_check_feature(dev, DRIVER_MODESET))
++		return -EINVAL;
++
++	sx_xlock(&dev->mode_config.mutex);
++	obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
++	if (!obj) {
++		ret = -EINVAL;
++		goto out;
++	}
++	crtc = obj_to_crtc(obj);
++
++	/* memcpy into gamma store */
++	if (crtc_lut->gamma_size != crtc->gamma_size) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	size = crtc_lut->gamma_size * (sizeof(uint16_t));
++	r_base = crtc->gamma_store;
++	if (copyout(r_base, (void *)(uintptr_t)crtc_lut->red, size)) {
++		ret = -EFAULT;
++		goto out;
++	}
++
++	g_base = (char *)r_base + size;
++	if (copyout(g_base, (void *)(uintptr_t)crtc_lut->green, size)) {
++		ret = -EFAULT;
++		goto out;
++	}
++
++	b_base = (char *)g_base + size;
++	if (copyout(b_base, (void *)(uintptr_t)crtc_lut->blue, size)) {
++		ret = -EFAULT;
++		goto out;
++	}
++out:
++	sx_xunlock(&dev->mode_config.mutex);
++	return ret;
++}
++
++static void
++drm_kms_free(void *arg)
++{
++
++	free(arg, DRM_MEM_KMS);
++}
++
++int drm_mode_page_flip_ioctl(struct drm_device *dev, void *data,
++    struct drm_file *file_priv)
++{
++	struct drm_mode_crtc_page_flip *page_flip = data;
++	struct drm_mode_object *obj;
++	struct drm_crtc *crtc;
++	struct drm_framebuffer *fb;
++	struct drm_pending_vblank_event *e = NULL;
++	int ret = EINVAL;
++
++	if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
++	    page_flip->reserved != 0)
++		return (EINVAL);
++
++	sx_xlock(&dev->mode_config.mutex);
++	obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
++	if (!obj)
++		goto out;
++	crtc = obj_to_crtc(obj);
++
++	if (crtc->fb == NULL) {
++		/* The framebuffer is currently unbound, presumably
++		 * due to a hotplug event, that userspace has not
++		 * yet discovered.
++		 */
++		ret = EBUSY;
++		goto out;
++	}
++
++	if (crtc->funcs->page_flip == NULL)
++		goto out;
++
++	obj = drm_mode_object_find(dev, page_flip->fb_id, DRM_MODE_OBJECT_FB);
++	if (!obj)
++		goto out;
++	fb = obj_to_fb(obj);
++
++	if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
++		ret = ENOMEM;
++		mtx_lock(&dev->event_lock);
++		if (file_priv->event_space < sizeof e->event) {
++			mtx_unlock(&dev->event_lock);
++			goto out;
++		}
++		file_priv->event_space -= sizeof e->event;
++		mtx_unlock(&dev->event_lock);
++
++		e = malloc(sizeof *e, DRM_MEM_KMS, M_WAITOK | M_ZERO);
++
++		e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
++		e->event.base.length = sizeof e->event;
++		e->event.user_data = page_flip->user_data;
++		e->base.event = &e->event.base;
++		e->base.file_priv = file_priv;
++		e->base.destroy =
++			(void (*) (struct drm_pending_event *))drm_kms_free;
++	}
++
++	ret = -crtc->funcs->page_flip(crtc, fb, e);
++	if (ret != 0) {
++		mtx_lock(&dev->event_lock);
++		file_priv->event_space += sizeof e->event;
++		mtx_unlock(&dev->event_lock);
++		free(e, DRM_MEM_KMS);
++	}
++
++out:
++	sx_xunlock(&dev->mode_config.mutex);
++	CTR3(KTR_DRM, "page_flip_ioctl %d %d %d", curproc->p_pid,
++	    page_flip->crtc_id, ret);
++	return (ret);
++}
++
++void drm_mode_config_reset(struct drm_device *dev)
++{
++	struct drm_crtc *crtc;
++	struct drm_encoder *encoder;
++	struct drm_connector *connector;
++
++	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
++		if (crtc->funcs->reset)
++			crtc->funcs->reset(crtc);
++
++	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
++		if (encoder->funcs->reset)
++			encoder->funcs->reset(encoder);
++
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
++		if (connector->funcs->reset)
++			connector->funcs->reset(connector);
++}
++
++int drm_mode_create_dumb_ioctl(struct drm_device *dev,
++			       void *data, struct drm_file *file_priv)
++{
++	struct drm_mode_create_dumb *args = data;
++
++	if (!dev->driver->dumb_create)
++		return -ENOTSUP;
++	return dev->driver->dumb_create(file_priv, dev, args);
++}
++
++int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
++			     void *data, struct drm_file *file_priv)
++{
++	struct drm_mode_map_dumb *args = data;
++
++	/* call driver ioctl to get mmap offset */
++	if (!dev->driver->dumb_map_offset)
++		return -ENOTSUP;
++
++	return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset);
++}
++
++int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
++				void *data, struct drm_file *file_priv)
++{
++	struct drm_mode_destroy_dumb *args = data;
++
++	if (!dev->driver->dumb_destroy)
++		return -ENOTSUP;
++
++	return dev->driver->dumb_destroy(file_priv, dev, args->handle);
++}
++
++/*
++ * Just need to support RGB formats here for compat with code that doesn't
++ * use pixel formats directly yet.
++ */
++void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
++			  int *bpp)
++{
++	switch (format) {
++	case DRM_FORMAT_RGB332:
++	case DRM_FORMAT_BGR233:
++		*depth = 8;
++		*bpp = 8;
++		break;
++	case DRM_FORMAT_XRGB1555:
++	case DRM_FORMAT_XBGR1555:
++	case DRM_FORMAT_RGBX5551:
++	case DRM_FORMAT_BGRX5551:
++	case DRM_FORMAT_ARGB1555:
++	case DRM_FORMAT_ABGR1555:
++	case DRM_FORMAT_RGBA5551:
++	case DRM_FORMAT_BGRA5551:
++		*depth = 15;
++		*bpp = 16;
++		break;
++	case DRM_FORMAT_RGB565:
++	case DRM_FORMAT_BGR565:
++		*depth = 16;
++		*bpp = 16;
++		break;
++	case DRM_FORMAT_RGB888:
++	case DRM_FORMAT_BGR888:
++		*depth = 24;
++		*bpp = 24;
++		break;
++	case DRM_FORMAT_XRGB8888:
++	case DRM_FORMAT_XBGR8888:
++	case DRM_FORMAT_RGBX8888:
++	case DRM_FORMAT_BGRX8888:
++		*depth = 24;
++		*bpp = 32;
++		break;
++	case DRM_FORMAT_XRGB2101010:
++	case DRM_FORMAT_XBGR2101010:
++	case DRM_FORMAT_RGBX1010102:
++	case DRM_FORMAT_BGRX1010102:
++	case DRM_FORMAT_ARGB2101010:
++	case DRM_FORMAT_ABGR2101010:
++	case DRM_FORMAT_RGBA1010102:
++	case DRM_FORMAT_BGRA1010102:
++		*depth = 30;
++		*bpp = 32;
++		break;
++	case DRM_FORMAT_ARGB8888:
++	case DRM_FORMAT_ABGR8888:
++	case DRM_FORMAT_RGBA8888:
++	case DRM_FORMAT_BGRA8888:
++		*depth = 32;
++		*bpp = 32;
++		break;
++	default:
++		DRM_DEBUG_KMS("unsupported pixel format\n");
++		*depth = 0;
++		*bpp = 0;
++		break;
++	}
++}
+diff --git a/sys/dev/drm/drm_crtc.h b/sys/dev/drm/drm_crtc.h
+new file mode 100644
+index 0000000..b54cf97
+--- /dev/null
++++ sys/dev/drm/drm_crtc.h
+@@ -0,0 +1,918 @@
++/*
++ * Copyright © 2006 Keith Packard
++ * Copyright © 2007-2008 Dave Airlie
++ * Copyright © 2007-2008 Intel Corporation
 + *   Jesse Barnes <jesse.barnes at intel.com>
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a
@@ -4340,751 +7791,3150 @@
 + * and/or sell copies of the Software, and to permit persons to whom the
 + * Software is furnished to do so, subject to the following conditions:
 + *
-+ * The above copyright notice and this permission notice (including the next
-+ * paragraph) shall be included in all copies or substantial portions of the
-+ * Software.
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
 + *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
-+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
 + */
++#ifndef __DRM_CRTC_H__
++#define __DRM_CRTC_H__
 +
-+#ifndef DRM_INTEL_DRV_H
-+#define	DRM_INTEL_DRV_H
++#include "dev/drm/drm_gem_names.h"
++#include "dev/drm/drm_fourcc.h"
 +
-+#include "dev/drm/i915_drm.h"
-+#include "dev/drm/i915_drv.h"
-+#include "dev/drm/drm_crtc.h"
-+#include "dev/drm/drm_crtc_helper.h"
-+#include "dev/drm/drm_fb_helper.h"
++struct drm_device;
++struct drm_mode_set;
++struct drm_framebuffer;
++struct i2c_adapter;
 +
-+#define _intel_wait_for(DEV, COND, MS, W, WMSG)				\
-+({									\
-+	int end, ret;							\
-+									\
-+	end = ticks + (MS) * hz / 1000;					\
-+	ret = 0;							\
-+									\
-+	while (!(COND)) {						\
-+		if (time_after(ticks, end)) {				\
-+			ret = -ETIMEDOUT;				\
-+			break;						\
-+		}							\
-+		if (W)							\
-+			pause((WMSG), 1);				\
-+		else							\
-+			DELAY(1000);					\
-+	}								\
-+									\
-+	ret;								\
-+})
++#define DRM_MODE_OBJECT_CRTC 0xcccccccc
++#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
++#define DRM_MODE_OBJECT_ENCODER 0xe0e0e0e0
++#define DRM_MODE_OBJECT_MODE 0xdededede
++#define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0
++#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
++#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
++#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
 +
-+#define KHz(x) (1000*x)
-+#define MHz(x) KHz(1000*x)
++struct drm_mode_object {
++	uint32_t id;
++	uint32_t type;
++};
 +
-+/* store information about an Ixxx DVO */
-+/* The i830->i865 use multiple DVOs with multiple i2cs */
-+/* the i915, i945 have a single sDVO i2c bus - which is different */
-+#define MAX_OUTPUTS 6
-+/* maximum connectors per crtcs in the mode set */
-+#define INTELFB_CONN_LIMIT 4
++/*
++ * Note on terminology:  here, for brevity and convenience, we refer to connector
++ * control chips as 'CRTCs'.  They can control any type of connector, VGA, LVDS,
++ * DVI, etc.  And 'screen' refers to the whole of the visible display, which
++ * may span multiple monitors (and therefore multiple CRTC and connector
++ * structures).
++ */
 +
-+#define INTEL_I2C_BUS_DVO 1
-+#define INTEL_I2C_BUS_SDVO 2
++enum drm_mode_status {
++    MODE_OK	= 0,	/* Mode OK */
++    MODE_HSYNC,		/* hsync out of range */
++    MODE_VSYNC,		/* vsync out of range */
++    MODE_H_ILLEGAL,	/* mode has illegal horizontal timings */
++    MODE_V_ILLEGAL,	/* mode has illegal horizontal timings */
++    MODE_BAD_WIDTH,	/* requires an unsupported linepitch */
++    MODE_NOMODE,	/* no mode with a maching name */
++    MODE_NO_INTERLACE,	/* interlaced mode not supported */
++    MODE_NO_DBLESCAN,	/* doublescan mode not supported */
++    MODE_NO_VSCAN,	/* multiscan mode not supported */
++    MODE_MEM,		/* insufficient video memory */
++    MODE_VIRTUAL_X,	/* mode width too large for specified virtual size */
++    MODE_VIRTUAL_Y,	/* mode height too large for specified virtual size */
++    MODE_MEM_VIRT,	/* insufficient video memory given virtual size */
++    MODE_NOCLOCK,	/* no fixed clock available */
++    MODE_CLOCK_HIGH,	/* clock required is too high */
++    MODE_CLOCK_LOW,	/* clock required is too low */
++    MODE_CLOCK_RANGE,	/* clock/mode isn't in a ClockRange */
++    MODE_BAD_HVALUE,	/* horizontal timing was out of range */
++    MODE_BAD_VVALUE,	/* vertical timing was out of range */
++    MODE_BAD_VSCAN,	/* VScan value out of range */
++    MODE_HSYNC_NARROW,	/* horizontal sync too narrow */
++    MODE_HSYNC_WIDE,	/* horizontal sync too wide */
++    MODE_HBLANK_NARROW,	/* horizontal blanking too narrow */
++    MODE_HBLANK_WIDE,	/* horizontal blanking too wide */
++    MODE_VSYNC_NARROW,	/* vertical sync too narrow */
++    MODE_VSYNC_WIDE,	/* vertical sync too wide */
++    MODE_VBLANK_NARROW,	/* vertical blanking too narrow */
++    MODE_VBLANK_WIDE,	/* vertical blanking too wide */
++    MODE_PANEL,         /* exceeds panel dimensions */
++    MODE_INTERLACE_WIDTH, /* width too large for interlaced mode */
++    MODE_ONE_WIDTH,     /* only one width is supported */
++    MODE_ONE_HEIGHT,    /* only one height is supported */
++    MODE_ONE_SIZE,      /* only one resolution is supported */
++    MODE_NO_REDUCED,    /* monitor doesn't accept reduced blanking */
++    MODE_UNVERIFIED = -3, /* mode needs to reverified */
++    MODE_BAD = -2,	/* unspecified reason */
++    MODE_ERROR	= -1	/* error condition */
++};
 +
-+/* these are outputs from the chip - integrated only
-+   external chips are via DVO or SDVO output */
-+#define INTEL_OUTPUT_UNUSED 0
-+#define INTEL_OUTPUT_ANALOG 1
-+#define INTEL_OUTPUT_DVO 2
-+#define INTEL_OUTPUT_SDVO 3
-+#define INTEL_OUTPUT_LVDS 4
-+#define INTEL_OUTPUT_TVOUT 5
-+#define INTEL_OUTPUT_HDMI 6
-+#define INTEL_OUTPUT_DISPLAYPORT 7
-+#define INTEL_OUTPUT_EDP 8
++#define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \
++				    DRM_MODE_TYPE_CRTC_C)
 +
-+/* Intel Pipe Clone Bit */
-+#define INTEL_HDMIB_CLONE_BIT 1
-+#define INTEL_HDMIC_CLONE_BIT 2
-+#define INTEL_HDMID_CLONE_BIT 3
-+#define INTEL_HDMIE_CLONE_BIT 4
-+#define INTEL_HDMIF_CLONE_BIT 5
-+#define INTEL_SDVO_NON_TV_CLONE_BIT 6
-+#define INTEL_SDVO_TV_CLONE_BIT 7
-+#define INTEL_SDVO_LVDS_CLONE_BIT 8
-+#define INTEL_ANALOG_CLONE_BIT 9
-+#define INTEL_TV_CLONE_BIT 10
-+#define INTEL_DP_B_CLONE_BIT 11
-+#define INTEL_DP_C_CLONE_BIT 12
-+#define INTEL_DP_D_CLONE_BIT 13
-+#define INTEL_LVDS_CLONE_BIT 14
-+#define INTEL_DVO_TMDS_CLONE_BIT 15
-+#define INTEL_DVO_LVDS_CLONE_BIT 16
-+#define INTEL_EDP_CLONE_BIT 17
++#define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \
++	.name = nm, .status = 0, .type = (t), .clock = (c), \
++	.hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \
++	.htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \
++	.vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \
++	.vscan = (vs), .flags = (f), .vrefresh = 0
 +
-+#define INTEL_DVO_CHIP_NONE 0
-+#define INTEL_DVO_CHIP_LVDS 1
-+#define INTEL_DVO_CHIP_TMDS 2
-+#define INTEL_DVO_CHIP_TVOUT 4
++#define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */
 +
-+/* drm_display_mode->private_flags */
-+#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
-+#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
-+#define INTEL_MODE_DP_FORCE_6BPC (0x10)
++struct drm_display_mode {
++	/* Header */
++	struct list_head head;
++	struct drm_mode_object base;
 +
-+static inline void
-+intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
-+				int multiplier)
-+{
-+	mode->clock *= multiplier;
-+	mode->private_flags |= multiplier;
-+}
++	char name[DRM_DISPLAY_MODE_LEN];
 +
-+static inline int
-+intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
-+{
-+	return (mode->private_flags & INTEL_MODE_PIXEL_MULTIPLIER_MASK) >> INTEL_MODE_PIXEL_MULTIPLIER_SHIFT;
-+}
++	int connector_count;
++	enum drm_mode_status status;
++	int type;
 +
-+struct intel_framebuffer {
-+	struct drm_framebuffer base;
-+	struct drm_i915_gem_object *obj;
++	/* Proposed mode values */
++	int clock;		/* in kHz */
++	int hdisplay;
++	int hsync_start;
++	int hsync_end;
++	int htotal;
++	int hskew;
++	int vdisplay;
++	int vsync_start;
++	int vsync_end;
++	int vtotal;
++	int vscan;
++	unsigned int flags;
++
++	/* Addressable image size (may be 0 for projectors, etc.) */
++	int width_mm;
++	int height_mm;
++
++	/* Actual mode we give to hw */
++	int clock_index;
++	int synth_clock;
++	int crtc_hdisplay;
++	int crtc_hblank_start;
++	int crtc_hblank_end;
++	int crtc_hsync_start;
++	int crtc_hsync_end;
++	int crtc_htotal;
++	int crtc_hskew;
++	int crtc_vdisplay;
++	int crtc_vblank_start;
++	int crtc_vblank_end;
++	int crtc_vsync_start;
++	int crtc_vsync_end;
++	int crtc_vtotal;
++	int crtc_hadjusted;
++	int crtc_vadjusted;
++
++	/* Driver private mode info */
++	int private_size;
++	int *private;
++	int private_flags;
++
++	int vrefresh;		/* in Hz */
++	int hsync;		/* in kHz */
 +};
 +
-+struct intel_fbdev {
-+	struct drm_fb_helper helper;
-+	struct intel_framebuffer ifb;
-+	struct list_head fbdev_list;
-+	struct drm_display_mode *our_mode;
++enum drm_connector_status {
++	connector_status_connected = 1,
++	connector_status_disconnected = 2,
++	connector_status_unknown = 3,
 +};
 +
-+struct intel_encoder {
-+	struct drm_encoder base;
-+	int type;
-+	bool needs_tv_clock;
-+	void (*hot_plug)(struct intel_encoder *);
-+	int crtc_mask;
-+	int clone_mask;
++enum subpixel_order {
++	SubPixelUnknown = 0,
++	SubPixelHorizontalRGB,
++	SubPixelHorizontalBGR,
++	SubPixelVerticalRGB,
++	SubPixelVerticalBGR,
++	SubPixelNone,
 +};
 +
-+struct intel_connector {
-+	struct drm_connector base;
-+	struct intel_encoder *encoder;
++#define DRM_COLOR_FORMAT_RGB444		(1<<0)
++#define DRM_COLOR_FORMAT_YCRCB444	(1<<1)
++#define DRM_COLOR_FORMAT_YCRCB422	(1<<2)
++/*
++ * Describes a given display (e.g. CRT or flat panel) and its limitations.
++ */
++struct drm_display_info {
++	char name[DRM_DISPLAY_INFO_LEN];
++
++	/* Physical size */
++        unsigned int width_mm;
++	unsigned int height_mm;
++
++	/* Clock limits FIXME: storage format */
++	unsigned int min_vfreq, max_vfreq;
++	unsigned int min_hfreq, max_hfreq;
++	unsigned int pixel_clock;
++	unsigned int bpc;
++
++	enum subpixel_order subpixel_order;
++	u32 color_formats;
++
++	u8 cea_rev;
++
++	char *raw_edid; /* if any */
 +};
 +
-+struct intel_crtc {
-+	struct drm_crtc base;
-+	enum pipe pipe;
-+	enum plane plane;
-+	u8 lut_r[256], lut_g[256], lut_b[256];
-+	int dpms_mode;
-+	bool active; /* is the crtc on? independent of the dpms mode */
-+	bool busy; /* is scanout buffer being updated frequently? */
-+	struct callout idle_callout;
-+	bool lowfreq_avail;
-+	struct intel_overlay *overlay;
-+	struct intel_unpin_work *unpin_work;
-+	int fdi_lanes;
++struct drm_framebuffer_funcs {
++	void (*destroy)(struct drm_framebuffer *framebuffer);
++	int (*create_handle)(struct drm_framebuffer *fb,
++			     struct drm_file *file_priv,
++			     unsigned int *handle);
++	/**
++	 * Optinal callback for the dirty fb ioctl.
++	 *
++	 * Userspace can notify the driver via this callback
++	 * that a area of the framebuffer has changed and should
++	 * be flushed to the display hardware.
++	 *
++	 * See documentation in drm_mode.h for the struct
++	 * drm_mode_fb_dirty_cmd for more information as all
++	 * the semantics and arguments have a one to one mapping
++	 * on this function.
++	 */
++	int (*dirty)(struct drm_framebuffer *framebuffer,
++		     struct drm_file *file_priv, unsigned flags,
++		     unsigned color, struct drm_clip_rect *clips,
++		     unsigned num_clips);
++};
 +
-+	struct drm_i915_gem_object *cursor_bo;
-+	uint32_t cursor_addr;
-+	int16_t cursor_x, cursor_y;
-+	int16_t cursor_width, cursor_height;
-+	bool cursor_visible;
-+	unsigned int bpp;
++struct drm_framebuffer {
++	struct drm_device *dev;
++	struct list_head head;
++	struct drm_mode_object base;
++	const struct drm_framebuffer_funcs *funcs;
++	unsigned int pitches[4];
++	unsigned int offsets[4];
++	unsigned int width;
++	unsigned int height;
++	/* depth can be 15 or 16 */
++	unsigned int depth;
++	int bits_per_pixel;
++	int flags;
++	uint32_t pixel_format; /* fourcc format */
++	struct list_head filp_head;
++	/* if you are using the helper */
++	void *helper_private;
++};
 +
-+	bool no_pll; /* tertiary pipe for IVB */
-+	bool use_pll_a;
++struct drm_property_blob {
++	struct drm_mode_object base;
++	struct list_head head;
++	unsigned int length;
++	void *data;
 +};
 +
-+struct intel_plane {
-+	struct drm_plane base;
-+	enum pipe pipe;
-+	struct drm_i915_gem_object *obj;
-+	bool primary_disabled;
-+	int max_downscale;
-+	u32 lut_r[1024], lut_g[1024], lut_b[1024];
-+	void (*update_plane)(struct drm_plane *plane,
-+			     struct drm_framebuffer *fb,
-+			     struct drm_i915_gem_object *obj,
-+			     int crtc_x, int crtc_y,
-+			     unsigned int crtc_w, unsigned int crtc_h,
-+			     uint32_t x, uint32_t y,
-+			     uint32_t src_w, uint32_t src_h);
-+	void (*disable_plane)(struct drm_plane *plane);
-+	int (*update_colorkey)(struct drm_plane *plane,
-+			       struct drm_intel_sprite_colorkey *key);
-+	void (*get_colorkey)(struct drm_plane *plane,
-+			     struct drm_intel_sprite_colorkey *key);
++struct drm_property_enum {
++	uint64_t value;
++	struct list_head head;
++	char name[DRM_PROP_NAME_LEN];
 +};
 +
-+#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
-+#define to_intel_connector(x) container_of(x, struct intel_connector, base)
-+#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
-+#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
-+#define to_intel_plane(x) container_of(x, struct intel_plane, base)
++struct drm_property {
++	struct list_head head;
++	struct drm_mode_object base;
++	uint32_t flags;
++	char name[DRM_PROP_NAME_LEN];
++	uint32_t num_values;
++	uint64_t *values;
 +
-+#define DIP_HEADER_SIZE	5
++	struct list_head enum_blob_list;
++};
 +
-+#define DIP_TYPE_AVI    0x82
-+#define DIP_VERSION_AVI 0x2
-+#define DIP_LEN_AVI     13
++struct drm_crtc;
++struct drm_connector;
++struct drm_encoder;
++struct drm_pending_vblank_event;
++struct drm_plane;
 +
-+#define DIP_TYPE_SPD	0x83
-+#define DIP_VERSION_SPD	0x1
-+#define DIP_LEN_SPD	25
-+#define DIP_SPD_UNKNOWN	0
-+#define DIP_SPD_DSTB	0x1
-+#define DIP_SPD_DVDP	0x2
-+#define DIP_SPD_DVHS	0x3
-+#define DIP_SPD_HDDVR	0x4
-+#define DIP_SPD_DVC	0x5
-+#define DIP_SPD_DSC	0x6
-+#define DIP_SPD_VCD	0x7
-+#define DIP_SPD_GAME	0x8
-+#define DIP_SPD_PC	0x9
-+#define DIP_SPD_BD	0xa
-+#define DIP_SPD_SCD	0xb
++/**
++ * drm_crtc_funcs - control CRTCs for a given device
++ * @reset: reset CRTC after state has been invalidate (e.g. resume)
++ * @dpms: control display power levels
++ * @save: save CRTC state
++ * @resore: restore CRTC state
++ * @lock: lock the CRTC
++ * @unlock: unlock the CRTC
++ * @shadow_allocate: allocate shadow pixmap
++ * @shadow_create: create shadow pixmap for rotation support
++ * @shadow_destroy: free shadow pixmap
++ * @mode_fixup: fixup proposed mode
++ * @mode_set: set the desired mode on the CRTC
++ * @gamma_set: specify color ramp for CRTC
++ * @destroy: deinit and free object.
++ *
++ * The drm_crtc_funcs structure is the central CRTC management structure
++ * in the DRM.  Each CRTC controls one or more connectors (note that the name
++ * CRTC is simply historical, a CRTC may control LVDS, VGA, DVI, TV out, etc.
++ * connectors, not just CRTs).
++ *
++ * Each driver is responsible for filling out this structure at startup time,
++ * in addition to providing other modesetting features, like i2c and DDC
++ * bus accessors.
++ */
++struct drm_crtc_funcs {
++	/* Save CRTC state */
++	void (*save)(struct drm_crtc *crtc); /* suspend? */
++	/* Restore CRTC state */
++	void (*restore)(struct drm_crtc *crtc); /* resume? */
++	/* Reset CRTC state */
++	void (*reset)(struct drm_crtc *crtc);
 +
-+struct dip_infoframe {
-+	uint8_t type;		/* HB0 */
-+	uint8_t ver;		/* HB1 */
-+	uint8_t len;		/* HB2 - body len, not including checksum */
-+	uint8_t ecc;		/* Header ECC */
-+	uint8_t checksum;	/* PB0 */
-+	union {
-+		struct {
-+			/* PB1 - Y 6:5, A 4:4, B 3:2, S 1:0 */
-+			uint8_t Y_A_B_S;
-+			/* PB2 - C 7:6, M 5:4, R 3:0 */
-+			uint8_t C_M_R;
-+			/* PB3 - ITC 7:7, EC 6:4, Q 3:2, SC 1:0 */
-+			uint8_t ITC_EC_Q_SC;
-+			/* PB4 - VIC 6:0 */
-+			uint8_t VIC;
-+			/* PB5 - PR 3:0 */
-+			uint8_t PR;
-+			/* PB6 to PB13 */
-+			uint16_t top_bar_end;
-+			uint16_t bottom_bar_start;
-+			uint16_t left_bar_end;
-+			uint16_t right_bar_start;
-+		} avi;
-+		struct {
-+			uint8_t vn[8];
-+			uint8_t pd[16];
-+			uint8_t sdi;
-+		} spd;
-+		uint8_t payload[27];
-+	} __attribute__ ((packed)) body;
-+} __attribute__((packed));
++	/* cursor controls */
++	int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv,
++			  uint32_t handle, uint32_t width, uint32_t height);
++	int (*cursor_move)(struct drm_crtc *crtc, int x, int y);
 +
-+static inline struct drm_crtc *
-+intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
-+{
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	return dev_priv->pipe_to_crtc_mapping[pipe];
-+}
++	/* Set gamma on the CRTC */
++	void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
++			  uint32_t start, uint32_t size);
++	/* Object destroy routine */
++	void (*destroy)(struct drm_crtc *crtc);
 +
-+static inline struct drm_crtc *
-+intel_get_crtc_for_plane(struct drm_device *dev, int plane)
-+{
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	return dev_priv->plane_to_crtc_mapping[plane];
-+}
++	int (*set_config)(struct drm_mode_set *set);
 +
-+struct intel_unpin_work {
-+	struct task task;
++	/*
++	 * Flip to the given framebuffer.  This implements the page
++	 * flip ioctl descibed in drm_mode.h, specifically, the
++	 * implementation must return immediately and block all
++	 * rendering to the current fb until the flip has completed.
++	 * If userspace set the event flag in the ioctl, the event
++	 * argument will point to an event to send back when the flip
++	 * completes, otherwise it will be NULL.
++	 */
++	int (*page_flip)(struct drm_crtc *crtc,
++			 struct drm_framebuffer *fb,
++			 struct drm_pending_vblank_event *event);
++};
++
++/**
++ * drm_crtc - central CRTC control structure
++ * @enabled: is this CRTC enabled?
++ * @x: x position on screen
++ * @y: y position on screen
++ * @funcs: CRTC control functions
++ *
++ * Each CRTC may have one or more connectors associated with it.  This structure
++ * allows the CRTC to be controlled.
++ */
++struct drm_crtc {
 +	struct drm_device *dev;
-+	struct drm_i915_gem_object *old_fb_obj;
-+	struct drm_i915_gem_object *pending_flip_obj;
-+	struct drm_pending_vblank_event *event;
-+	int pending;
-+	bool enable_stall_check;
++	struct list_head head;
++
++	struct drm_mode_object base;
++
++	/* framebuffer the connector is currently bound to */
++	struct drm_framebuffer *fb;
++
++	bool enabled;
++
++	/* Requested mode from modesetting. */
++	struct drm_display_mode mode;
++
++	/* Programmed mode in hw, after adjustments for encoders,
++	 * crtc, panel scaling etc. Needed for timestamping etc.
++	 */
++	struct drm_display_mode hwmode;
++
++	int x, y;
++	const struct drm_crtc_funcs *funcs;
++
++	/* CRTC gamma size for reporting to userspace */
++	uint32_t gamma_size;
++	uint16_t *gamma_store;
++
++	/* Constants needed for precise vblank and swap timestamping. */
++	int64_t framedur_ns, linedur_ns, pixeldur_ns;
++
++	/* if you are using the helper */
++	void *helper_private;
 +};
 +
-+struct intel_fbc_work {
-+	struct timeout_task task;
++
++/**
++ * drm_connector_funcs - control connectors on a given device
++ * @dpms: set power state (see drm_crtc_funcs above)
++ * @save: save connector state
++ * @restore: restore connector state
++ * @reset: reset connector after state has been invalidate (e.g. resume)
++ * @mode_valid: is this mode valid on the given connector?
++ * @mode_fixup: try to fixup proposed mode for this connector
++ * @mode_set: set this mode
++ * @detect: is this connector active?
++ * @get_modes: get mode list for this connector
++ * @set_property: property for this connector may need update
++ * @destroy: make object go away
++ * @force: notify the driver the connector is forced on
++ *
++ * Each CRTC may have one or more connectors attached to it.  The functions
++ * below allow the core DRM code to control connectors, enumerate available modes,
++ * etc.
++ */
++struct drm_connector_funcs {
++	void (*dpms)(struct drm_connector *connector, int mode);
++	void (*save)(struct drm_connector *connector);
++	void (*restore)(struct drm_connector *connector);
++	void (*reset)(struct drm_connector *connector);
++
++	/* Check to see if anything is attached to the connector.
++	 * @force is set to false whilst polling, true when checking the
++	 * connector due to user request. @force can be used by the driver
++	 * to avoid expensive, destructive operations during automated
++	 * probing.
++	 */
++	enum drm_connector_status (*detect)(struct drm_connector *connector,
++					    bool force);
++	int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height);
++	int (*set_property)(struct drm_connector *connector, struct drm_property *property,
++			     uint64_t val);
++	void (*destroy)(struct drm_connector *connector);
++	void (*force)(struct drm_connector *connector);
++};
++
++struct drm_encoder_funcs {
++	void (*reset)(struct drm_encoder *encoder);
++	void (*destroy)(struct drm_encoder *encoder);
++};
++
++#define DRM_CONNECTOR_MAX_UMODES 16
++#define DRM_CONNECTOR_MAX_PROPERTY 16
++#define DRM_CONNECTOR_LEN 32
++#define DRM_CONNECTOR_MAX_ENCODER 2
++
++/**
++ * drm_encoder - central DRM encoder structure
++ */
++struct drm_encoder {
++	struct drm_device *dev;
++	struct list_head head;
++
++	struct drm_mode_object base;
++	int encoder_type;
++	uint32_t possible_crtcs;
++	uint32_t possible_clones;
++
 +	struct drm_crtc *crtc;
++	const struct drm_encoder_funcs *funcs;
++	void *helper_private;
++};
++
++enum drm_connector_force {
++	DRM_FORCE_UNSPECIFIED,
++	DRM_FORCE_OFF,
++	DRM_FORCE_ON,         /* force on analog part normally */
++	DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */
++};
++
++/* should we poll this connector for connects and disconnects */
++/* hot plug detectable */
++#define DRM_CONNECTOR_POLL_HPD (1 << 0)
++/* poll for connections */
++#define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
++/* can cleanly poll for disconnections without flickering the screen */
++/* DACs should rarely do this without a lot of testing */
++#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
++
++#define MAX_ELD_BYTES	128
++
++/**
++ * drm_connector - central DRM connector control structure
++ * @crtc: CRTC this connector is currently connected to, NULL if none
++ * @interlace_allowed: can this connector handle interlaced modes?
++ * @doublescan_allowed: can this connector handle doublescan?
++ * @available_modes: modes available on this connector (from get_modes() + user)
++ * @initial_x: initial x position for this connector
++ * @initial_y: initial y position for this connector
++ * @status: connector connected?
++ * @funcs: connector control functions
++ *
++ * Each connector may be connected to one or more CRTCs, or may be clonable by
++ * another connector if they can share a CRTC.  Each connector also has a specific
++ * position in the broader display (referred to as a 'screen' though it could
++ * span multiple monitors).
++ */
++struct drm_connector {
++	struct drm_device *dev;
++	/* struct device kdev; XXXKIB */
++	struct device_attribute *attr;
++	struct list_head head;
++
++	struct drm_mode_object base;
++
++	int connector_type;
++	int connector_type_id;
++	bool interlace_allowed;
++	bool doublescan_allowed;
++	struct list_head modes; /* list of modes on this connector */
++
++	int initial_x, initial_y;
++	enum drm_connector_status status;
++
++	/* these are modes added by probing with DDC or the BIOS */
++	struct list_head probed_modes;
++
++	struct drm_display_info display_info;
++	const struct drm_connector_funcs *funcs;
++
++	struct list_head user_modes;
++	struct drm_property_blob *edid_blob_ptr;
++	u32 property_ids[DRM_CONNECTOR_MAX_PROPERTY];
++	uint64_t property_values[DRM_CONNECTOR_MAX_PROPERTY];
++
++	uint8_t polled; /* DRM_CONNECTOR_POLL_* */
++
++	/* requested DPMS state */
++	int dpms;
++
++	void *helper_private;
++
++	/* forced on connector */
++	enum drm_connector_force force;
++	uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
++	uint32_t force_encoder_id;
++	struct drm_encoder *encoder; /* currently active encoder */
++
++	/* EDID bits */
++	uint8_t eld[MAX_ELD_BYTES];
++	bool dvi_dual;
++	int max_tmds_clock;	/* in MHz */
++	bool latency_present[2];
++	int video_latency[2];	/* [0]: progressive, [1]: interlaced */
++	int audio_latency[2];
++
++	int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
++};
++
++/**
++ * drm_plane_funcs - driver plane control functions
++ * @update_plane: update the plane configuration
++ * @disable_plane: shut down the plane
++ * @destroy: clean up plane resources
++ */
++struct drm_plane_funcs {
++	int (*update_plane)(struct drm_plane *plane,
++			    struct drm_crtc *crtc, struct drm_framebuffer *fb,
++			    int crtc_x, int crtc_y,
++			    unsigned int crtc_w, unsigned int crtc_h,
++			    uint32_t src_x, uint32_t src_y,
++			    uint32_t src_w, uint32_t src_h);
++	int (*disable_plane)(struct drm_plane *plane);
++	void (*destroy)(struct drm_plane *plane);
++};
++
++/**
++ * drm_plane - central DRM plane control structure
++ * @dev: DRM device this plane belongs to
++ * @head: for list management
++ * @base: base mode object
++ * @possible_crtcs: pipes this plane can be bound to
++ * @format_types: array of formats supported by this plane
++ * @format_count: number of formats supported
++ * @crtc: currently bound CRTC
++ * @fb: currently bound fb
++ * @gamma_size: size of gamma table
++ * @gamma_store: gamma correction table
++ * @enabled: enabled flag
++ * @funcs: helper functions
++ * @helper_private: storage for drver layer
++ */
++struct drm_plane {
++	struct drm_device *dev;
++	struct list_head head;
++
++	struct drm_mode_object base;
++
++	uint32_t possible_crtcs;
++	uint32_t *format_types;
++	uint32_t format_count;
++
++	struct drm_crtc *crtc;
 +	struct drm_framebuffer *fb;
-+	int interval;
++
++	/* CRTC gamma size for reporting to userspace */
++	uint32_t gamma_size;
++	uint16_t *gamma_store;
++
++	bool enabled;
++
++	const struct drm_plane_funcs *funcs;
++	void *helper_private;
 +};
 +
-+int intel_ddc_get_modes(struct drm_connector *c, device_t adapter);
-+extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
++/**
++ * struct drm_mode_set
++ *
++ * Represents a single crtc the connectors that it drives with what mode
++ * and from which framebuffer it scans out from.
++ *
++ * This is used to set modes.
++ */
++struct drm_mode_set {
++	struct list_head head;
 +
-+extern void intel_attach_force_audio_property(struct drm_connector *connector);
-+extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
++	struct drm_framebuffer *fb;
++	struct drm_crtc *crtc;
++	struct drm_display_mode *mode;
 +
-+extern void intel_crt_init(struct drm_device *dev);
-+extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
-+void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
-+extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
-+extern void intel_dvo_init(struct drm_device *dev);
-+extern void intel_tv_init(struct drm_device *dev);
-+extern void intel_mark_busy(struct drm_device *dev,
-+			    struct drm_i915_gem_object *obj);
-+extern bool intel_lvds_init(struct drm_device *dev);
-+extern void intel_dp_init(struct drm_device *dev, int dp_reg);
-+void
-+intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
-+		 struct drm_display_mode *adjusted_mode);
-+extern bool intel_dpd_is_edp(struct drm_device *dev);
-+extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
-+extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
-+extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
++	uint32_t x;
++	uint32_t y;
 +
-+/* intel_panel.c */
-+extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
-+				   struct drm_display_mode *adjusted_mode);
-+extern void intel_pch_panel_fitting(struct drm_device *dev,
-+				    int fitting_mode,
-+				    struct drm_display_mode *mode,
-+				    struct drm_display_mode *adjusted_mode);
-+extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
-+extern u32 intel_panel_get_backlight(struct drm_device *dev);
-+extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
-+extern int intel_panel_setup_backlight(struct drm_device *dev);
-+extern void intel_panel_enable_backlight(struct drm_device *dev);
-+extern void intel_panel_disable_backlight(struct drm_device *dev);
-+extern void intel_panel_destroy_backlight(struct drm_device *dev);
-+extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
++	struct drm_connector **connectors;
++	size_t num_connectors;
++};
 +
-+extern void intel_crtc_load_lut(struct drm_crtc *crtc);
-+extern void intel_encoder_prepare(struct drm_encoder *encoder);
-+extern void intel_encoder_commit(struct drm_encoder *encoder);
-+extern void intel_encoder_destroy(struct drm_encoder *encoder);
++/**
++ * struct drm_mode_config_funcs - configure CRTCs for a given screen layout
++ */
++struct drm_mode_config_funcs {
++	int (*fb_create)(struct drm_device *dev,
++	    struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd,
++	    struct drm_framebuffer **res);
++	void (*output_poll_changed)(struct drm_device *dev);
++};
 +
-+static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
-+{
-+	return to_intel_connector(connector)->encoder;
-+}
++struct drm_mode_group {
++	uint32_t num_crtcs;
++	uint32_t num_encoders;
++	uint32_t num_connectors;
 +
-+extern void intel_connector_attach_encoder(struct intel_connector *connector,
-+					   struct intel_encoder *encoder);
-+extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
++	/* list of object IDs for this group */
++	uint32_t *id_list;
++};
 +
-+extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
-+						    struct drm_crtc *crtc);
-+int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
-+				struct drm_file *file_priv);
-+extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
-+extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
++/**
++ * drm_mode_config - Mode configuration control structure
++ *
++ */
++struct drm_mode_config {
++	struct sx mutex; /* protects configuration (mode lists etc.) */
++	struct drm_gem_names crtc_names; /* use this idr for all IDs, fb, crtc, connector, modes */
++	/* this is limited to one for now */
++	int num_fb;
++	struct list_head fb_list;
++	int num_connector;
++	struct list_head connector_list;
++	int num_encoder;
++	struct list_head encoder_list;
++	int num_plane;
++	struct list_head plane_list;
 +
-+struct intel_load_detect_pipe {
-+	struct drm_framebuffer *release_fb;
-+	bool load_detect_temp;
-+	int dpms_mode;
++	int num_crtc;
++	struct list_head crtc_list;
++
++	struct list_head property_list;
++
++	int min_width, min_height;
++	int max_width, max_height;
++	struct drm_mode_config_funcs *funcs;
++	resource_size_t fb_base;
++
++	/* output poll support */
++	bool poll_enabled;
++	struct timeout_task output_poll_task;
++
++	/* pointers to standard properties */
++	struct list_head property_blob_list;
++	struct drm_property *edid_property;
++	struct drm_property *dpms_property;
++
++	/* DVI-I properties */
++	struct drm_property *dvi_i_subconnector_property;
++	struct drm_property *dvi_i_select_subconnector_property;
++
++	/* TV properties */
++	struct drm_property *tv_subconnector_property;
++	struct drm_property *tv_select_subconnector_property;
++	struct drm_property *tv_mode_property;
++	struct drm_property *tv_left_margin_property;
++	struct drm_property *tv_right_margin_property;
++	struct drm_property *tv_top_margin_property;
++	struct drm_property *tv_bottom_margin_property;
++	struct drm_property *tv_brightness_property;
++	struct drm_property *tv_contrast_property;
++	struct drm_property *tv_flicker_reduction_property;
++	struct drm_property *tv_overscan_property;
++	struct drm_property *tv_saturation_property;
++	struct drm_property *tv_hue_property;
++
++	/* Optional properties */
++	struct drm_property *scaling_mode_property;
++	struct drm_property *dithering_mode_property;
++	struct drm_property *dirty_info_property;
 +};
-+extern bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
-+				       struct drm_connector *connector,
-+				       struct drm_display_mode *mode,
-+				       struct intel_load_detect_pipe *old);
-+extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
-+					   struct drm_connector *connector,
-+					   struct intel_load_detect_pipe *old);
 +
-+extern void intelfb_restore(void);
-+extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
-+				    u16 blue, int regno);
-+extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
-+				    u16 *blue, int regno);
-+extern void intel_enable_clock_gating(struct drm_device *dev);
-+extern void ironlake_enable_drps(struct drm_device *dev);
-+extern void ironlake_disable_drps(struct drm_device *dev);
-+extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
-+extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
-+extern void gen6_disable_rps(struct drm_device *dev);
-+extern void intel_init_emon(struct drm_device *dev);
++#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
++#define obj_to_connector(x) container_of(x, struct drm_connector, base)
++#define obj_to_encoder(x) container_of(x, struct drm_encoder, base)
++#define obj_to_mode(x) container_of(x, struct drm_display_mode, base)
++#define obj_to_fb(x) container_of(x, struct drm_framebuffer, base)
++#define obj_to_property(x) container_of(x, struct drm_property, base)
++#define obj_to_blob(x) container_of(x, struct drm_property_blob, base)
++#define obj_to_plane(x) container_of(x, struct drm_plane, base)
 +
-+extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
-+				      struct drm_i915_gem_object *obj,
-+				      struct intel_ring_buffer *pipelined);
++#if defined(MODE_SETTING_LOCKING_IS_NOT_BROKEN)
++#define	DRM_MODE_CONFIG_ASSERT_LOCKED(dev) \
++	sx_assert(&dev->mode_config.mutex, SA_XLOCKED)
++#else
++#define	DRM_MODE_CONFIG_ASSERT_LOCKED(dev)
++#endif
 +
-+extern int intel_framebuffer_init(struct drm_device *dev,
-+				  struct intel_framebuffer *ifb,
-+				  struct drm_mode_fb_cmd2 *mode_cmd,
-+				  struct drm_i915_gem_object *obj);
-+extern int intel_fbdev_init(struct drm_device *dev);
-+extern void intel_fbdev_fini(struct drm_device *dev);
++extern char *drm_get_dirty_info_name(int val);
++extern char *drm_get_connector_status_name(enum drm_connector_status status);
 +
-+extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
-+extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
-+extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
++extern void drm_crtc_init(struct drm_device *dev,
++			  struct drm_crtc *crtc,
++			  const struct drm_crtc_funcs *funcs);
++extern void drm_crtc_cleanup(struct drm_crtc *crtc);
 +
-+extern void intel_setup_overlay(struct drm_device *dev);
-+extern void intel_cleanup_overlay(struct drm_device *dev);
-+extern int intel_overlay_switch_off(struct intel_overlay *overlay);
-+extern int intel_overlay_put_image(struct drm_device *dev, void *data,
++extern void drm_connector_init(struct drm_device *dev,
++			    struct drm_connector *connector,
++			    const struct drm_connector_funcs *funcs,
++			    int connector_type);
++
++extern void drm_connector_cleanup(struct drm_connector *connector);
++
++extern void drm_encoder_init(struct drm_device *dev,
++			     struct drm_encoder *encoder,
++			     const struct drm_encoder_funcs *funcs,
++			     int encoder_type);
++
++extern int drm_plane_init(struct drm_device *dev,
++			  struct drm_plane *plane,
++			  unsigned long possible_crtcs,
++			  const struct drm_plane_funcs *funcs,
++			  const uint32_t *formats, uint32_t format_count,
++			  bool priv);
++extern void drm_plane_cleanup(struct drm_plane *plane);
++
++extern void drm_encoder_cleanup(struct drm_encoder *encoder);
++
++extern char *drm_get_connector_name(struct drm_connector *connector);
++extern char *drm_get_dpms_name(int val);
++extern char *drm_get_dvi_i_subconnector_name(int val);
++extern char *drm_get_dvi_i_select_name(int val);
++extern char *drm_get_tv_subconnector_name(int val);
++extern char *drm_get_tv_select_name(int val);
++extern void drm_fb_release(struct drm_file *file_priv);
++extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
++extern struct edid *drm_get_edid(struct drm_connector *connector,
++				 device_t adapter);
++extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
++extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
++extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode);
++extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
++    const struct drm_display_mode *mode);
++extern void drm_mode_debug_printmodeline(struct drm_display_mode *mode);
++extern void drm_mode_config_init(struct drm_device *dev);
++extern void drm_mode_config_reset(struct drm_device *dev);
++extern void drm_mode_config_cleanup(struct drm_device *dev);
++extern void drm_mode_set_name(struct drm_display_mode *mode);
++extern bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2);
++extern int drm_mode_width(struct drm_display_mode *mode);
++extern int drm_mode_height(struct drm_display_mode *mode);
++
++/* for us by fb module */
++extern int drm_mode_attachmode_crtc(struct drm_device *dev,
++				    struct drm_crtc *crtc,
++				    struct drm_display_mode *mode);
++extern int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode);
++
++extern struct drm_display_mode *drm_mode_create(struct drm_device *dev);
++extern void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode);
++extern void drm_mode_list_concat(struct list_head *head,
++				 struct list_head *new);
++extern void drm_mode_validate_size(struct drm_device *dev,
++				   struct list_head *mode_list,
++				   int maxX, int maxY, int maxPitch);
++extern void drm_mode_validate_clocks(struct drm_device *dev,
++			      struct list_head *mode_list,
++			      int *min, int *max, int n_ranges);
++extern void drm_mode_prune_invalid(struct drm_device *dev,
++				   struct list_head *mode_list, bool verbose);
++extern void drm_mode_sort(struct list_head *mode_list);
++extern int drm_mode_hsync(const struct drm_display_mode *mode);
++extern int drm_mode_vrefresh(const struct drm_display_mode *mode);
++extern void drm_mode_set_crtcinfo(struct drm_display_mode *p,
++				  int adjust_flags);
++extern void drm_mode_connector_list_update(struct drm_connector *connector);
++extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
++						struct edid *edid);
++extern int drm_connector_property_set_value(struct drm_connector *connector,
++					 struct drm_property *property,
++					 uint64_t value);
++extern int drm_connector_property_get_value(struct drm_connector *connector,
++					 struct drm_property *property,
++					 uint64_t *value);
++extern struct drm_display_mode *drm_crtc_mode_create(struct drm_device *dev);
++extern void drm_framebuffer_set_object(struct drm_device *dev,
++				       unsigned long handle);
++extern int drm_framebuffer_init(struct drm_device *dev,
++				struct drm_framebuffer *fb,
++				const struct drm_framebuffer_funcs *funcs);
++extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
++extern int drmfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
++extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
++extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY);
++extern bool drm_crtc_in_use(struct drm_crtc *crtc);
++
++extern int drm_connector_attach_property(struct drm_connector *connector,
++				      struct drm_property *property, uint64_t init_val);
++extern struct drm_property *drm_property_create(struct drm_device *dev, int flags,
++						const char *name, int num_values);
++extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
++extern int drm_property_add_enum(struct drm_property *property, int index,
++				 uint64_t value, const char *name);
++extern int drm_mode_create_dvi_i_properties(struct drm_device *dev);
++extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats,
++				     char *formats[]);
++extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
++extern int drm_mode_create_dithering_property(struct drm_device *dev);
++extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
++extern char *drm_get_encoder_name(struct drm_encoder *encoder);
++
++extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
++					     struct drm_encoder *encoder);
++extern void drm_mode_connector_detach_encoder(struct drm_connector *connector,
++					   struct drm_encoder *encoder);
++extern bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
++					 int gamma_size);
++extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
++		uint32_t id, uint32_t type);
++/* IOCTLs */
++extern int drm_mode_getresources(struct drm_device *dev,
++				 void *data, struct drm_file *file_priv);
++extern int drm_mode_getplane_res(struct drm_device *dev, void *data,
 +				   struct drm_file *file_priv);
-+extern int intel_overlay_attrs(struct drm_device *dev, void *data,
-+			       struct drm_file *file_priv);
++extern int drm_mode_getcrtc(struct drm_device *dev,
++			    void *data, struct drm_file *file_priv);
++extern int drm_mode_getconnector(struct drm_device *dev,
++			      void *data, struct drm_file *file_priv);
++extern int drm_mode_setcrtc(struct drm_device *dev,
++			    void *data, struct drm_file *file_priv);
++extern int drm_mode_getplane(struct drm_device *dev,
++			       void *data, struct drm_file *file_priv);
++extern int drm_mode_setplane(struct drm_device *dev,
++			       void *data, struct drm_file *file_priv);
++extern int drm_mode_cursor_ioctl(struct drm_device *dev,
++				void *data, struct drm_file *file_priv);
++extern int drm_mode_addfb(struct drm_device *dev,
++			  void *data, struct drm_file *file_priv);
++extern int drm_mode_addfb2(struct drm_device *dev,
++			   void *data, struct drm_file *file_priv);
++extern uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
++extern int drm_mode_rmfb(struct drm_device *dev,
++			 void *data, struct drm_file *file_priv);
++extern int drm_mode_getfb(struct drm_device *dev,
++			  void *data, struct drm_file *file_priv);
++extern int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
++				  void *data, struct drm_file *file_priv);
++extern int drm_mode_addmode_ioctl(struct drm_device *dev,
++				  void *data, struct drm_file *file_priv);
++extern int drm_mode_rmmode_ioctl(struct drm_device *dev,
++				 void *data, struct drm_file *file_priv);
++extern int drm_mode_attachmode_ioctl(struct drm_device *dev,
++				     void *data, struct drm_file *file_priv);
++extern int drm_mode_detachmode_ioctl(struct drm_device *dev,
++				     void *data, struct drm_file *file_priv);
 +
-+extern void intel_fb_output_poll_changed(struct drm_device *dev);
-+extern void intel_fb_restore_mode(struct drm_device *dev);
++extern int drm_mode_getproperty_ioctl(struct drm_device *dev,
++				      void *data, struct drm_file *file_priv);
++extern int drm_mode_getblob_ioctl(struct drm_device *dev,
++				  void *data, struct drm_file *file_priv);
++extern int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
++					      void *data, struct drm_file *file_priv);
++extern int drm_mode_hotplug_ioctl(struct drm_device *dev,
++				  void *data, struct drm_file *file_priv);
++extern int drm_mode_replacefb(struct drm_device *dev,
++			      void *data, struct drm_file *file_priv);
++extern int drm_mode_getencoder(struct drm_device *dev,
++			       void *data, struct drm_file *file_priv);
++extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
++				    void *data, struct drm_file *file_priv);
++extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
++				    void *data, struct drm_file *file_priv);
++extern bool drm_detect_hdmi_monitor(struct edid *edid);
++extern bool drm_detect_monitor_audio(struct edid *edid);
++extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
++				    void *data, struct drm_file *file_priv);
++extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
++				int hdisplay, int vdisplay, int vrefresh,
++				bool reduced, bool interlaced, bool margins);
++extern struct drm_display_mode *drm_gtf_mode(struct drm_device *dev,
++				int hdisplay, int vdisplay, int vrefresh,
++				bool interlaced, int margins);
++extern struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev,
++				int hdisplay, int vdisplay, int vrefresh,
++				bool interlaced, int margins, int GTF_M,
++				int GTF_2C, int GTF_K, int GTF_2J);
++extern int drm_add_modes_noedid(struct drm_connector *connector,
++				int hdisplay, int vdisplay);
 +
-+extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
-+			bool state);
-+#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
-+#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
++extern int drm_edid_header_is_valid(const u8 *raw_edid);
++extern bool drm_edid_is_valid(struct edid *edid);
++struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
++					   int hsize, int vsize, int fresh);
 +
-+extern void intel_init_clock_gating(struct drm_device *dev);
-+extern void intel_write_eld(struct drm_encoder *encoder,
-+			    struct drm_display_mode *mode);
-+extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
++extern int drm_mode_create_dumb_ioctl(struct drm_device *dev,
++				      void *data, struct drm_file *file_priv);
++extern int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
++				    void *data, struct drm_file *file_priv);
++extern int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
++				      void *data, struct drm_file *file_priv);
 +
-+/* For use by IVB LP watermark workaround in intel_sprite.c */
-+extern void sandybridge_update_wm(struct drm_device *dev);
-+extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
-+					   uint32_t sprite_width,
-+					   int pixel_size);
-+extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
-+				     struct drm_file *file_priv);
-+extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
-+				     struct drm_file *file_priv);
++extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
++				 int *bpp);
++#endif /* __DRM_CRTC_H__ */
+diff --git a/sys/dev/drm/drm_crtc_helper.c b/sys/dev/drm/drm_crtc_helper.c
+new file mode 100644
+index 0000000..b63d465
+--- /dev/null
++++ sys/dev/drm/drm_crtc_helper.c
+@@ -0,0 +1,1038 @@
++/*
++ * Copyright (c) 2006-2008 Intel Corporation
++ * Copyright (c) 2007 Dave Airlie <airlied at linux.ie>
++ *
++ * DRM core CRTC related functions
++ *
++ * Permission to use, copy, modify, distribute, and sell this software and its
++ * documentation for any purpose is hereby granted without fee, provided that
++ * the above copyright notice appear in all copies and that both that copyright
++ * notice and this permission notice appear in supporting documentation, and
++ * that the name of the copyright holders not be used in advertising or
++ * publicity pertaining to distribution of the software without specific,
++ * written prior permission.  The copyright holders make no representations
++ * about the suitability of this software for any purpose.  It is provided "as
++ * is" without express or implied warranty.
++ *
++ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
++ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
++ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
++ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
++ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
++ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
++ * OF THIS SOFTWARE.
++ *
++ * Authors:
++ *      Keith Packard
++ *	Eric Anholt <eric at anholt.net>
++ *      Dave Airlie <airlied at linux.ie>
++ *      Jesse Barnes <jesse.barnes at intel.com>
++ */
 +
-+#endif
-
-Property changes on: stable/9/sys/dev/drm/intel_drv.h
-___________________________________________________________________
-Added: svn:mime-type
-## -0,0 +1 ##
-+text/plain
-Added: svn:keywords
-## -0,0 +1 ##
-+FreeBSD=%H
-Added: svn:eol-style
-## -0,0 +1 ##
-+native
-Index: sys/dev/drm/i915_drm.h
-===================================================================
-diff --git sys/dev/drm/i915_drm.h sys/dev/drm/i915_drm.h
---- sys/dev/drm/i915_drm.h	(revision 230124)
-+++ sys/dev/drm/i915_drm.h	(working copy)
-@@ -195,6 +195,15 @@
- #define DRM_I915_GEM_SW_FINISH	0x20
- #define DRM_I915_GEM_SET_TILING	0x21
- #define DRM_I915_GEM_GET_TILING	0x22
-+#define DRM_I915_GEM_GET_APERTURE 0x23
-+#define DRM_I915_GEM_MMAP_GTT	0x24
-+#define DRM_I915_GET_PIPE_FROM_CRTC_ID	0x25
-+#define DRM_I915_GEM_MADVISE	0x26
-+#define DRM_I915_OVERLAY_PUT_IMAGE	0x27
-+#define DRM_I915_OVERLAY_ATTRS	0x28
-+#define DRM_I915_GEM_EXECBUFFER2	0x29
-+#define DRM_I915_GET_SPRITE_COLORKEY 0x2a
-+#define DRM_I915_SET_SPRITE_COLORKEY 0x2b
- 
- #define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
- #define DRM_IOCTL_I915_FLUSH		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
-@@ -216,6 +225,7 @@
- #define DRM_IOCTL_I915_EXECBUFFER	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_EXECBUFFER, struct drm_i915_execbuffer)
- #define DRM_IOCTL_I915_GEM_INIT		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
- #define DRM_IOCTL_I915_GEM_EXECBUFFER	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
-+#define DRM_IOCTL_I915_GEM_EXECBUFFER2	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
- #define DRM_IOCTL_I915_GEM_PIN		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
- #define DRM_IOCTL_I915_GEM_UNPIN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
- #define DRM_IOCTL_I915_GEM_BUSY		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
-@@ -226,10 +236,18 @@
- #define DRM_IOCTL_I915_GEM_PREAD	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
- #define DRM_IOCTL_I915_GEM_PWRITE	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
- #define DRM_IOCTL_I915_GEM_MMAP		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
-+#define DRM_IOCTL_I915_GEM_MMAP_GTT	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
- #define DRM_IOCTL_I915_GEM_SET_DOMAIN	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
- #define DRM_IOCTL_I915_GEM_SW_FINISH	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
- #define DRM_IOCTL_I915_GEM_SET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
- #define DRM_IOCTL_I915_GEM_GET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
-+#define DRM_IOCTL_I915_GEM_GET_APERTURE	DRM_IOR  (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
-+#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
-+#define DRM_IOCTL_I915_GEM_MADVISE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
-+#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE	DRM_IOW(DRM_COMMAND_BASE + DRM_IOCTL_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
-+#define DRM_IOCTL_I915_OVERLAY_ATTRS	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
-+#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
-+#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
- 
- /* Asynchronous page flipping:
-  */
-@@ -284,6 +302,17 @@
- #define I915_PARAM_LAST_DISPATCH         3
- #define I915_PARAM_CHIPSET_ID            4
- #define I915_PARAM_HAS_GEM               5
-+#define I915_PARAM_NUM_FENCES_AVAIL      6
-+#define I915_PARAM_HAS_OVERLAY           7
-+#define I915_PARAM_HAS_PAGEFLIPPING	 8
-+#define I915_PARAM_HAS_EXECBUF2          9
-+#define I915_PARAM_HAS_BSD		 10
-+#define I915_PARAM_HAS_BLT		 11
-+#define I915_PARAM_HAS_RELAXED_FENCING	 12
-+#define I915_PARAM_HAS_COHERENT_RINGS	 13
-+#define I915_PARAM_HAS_EXEC_CONSTANTS	 14
-+#define I915_PARAM_HAS_RELAXED_DELTA	 15
-+#define I915_PARAM_HAS_GEN7_SOL_RESET	 16
- 
- typedef struct drm_i915_getparam {
- 	int param;
-@@ -295,6 +324,7 @@
- #define I915_SETPARAM_USE_MI_BATCHBUFFER_START            1
- #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY             2
- #define I915_SETPARAM_ALLOW_BATCHBUFFER                   3
-+#define I915_SETPARAM_NUM_USED_FENCES                     4
- 
- typedef struct drm_i915_setparam {
- 	int param;
-@@ -500,6 +530,18 @@
- 	uint64_t addr_ptr;	/* void *, but pointers are not 32/64 compatible */
- };
- 
-+struct drm_i915_gem_mmap_gtt {
-+	/** Handle for the object being mapped. */
-+	uint32_t handle;
-+	uint32_t pad;
-+	/**
-+	 * Fake offset to use for subsequent mmap call
-+	 *
-+	 * This is a fixed-size type for 32/64 compatibility.
-+	 */
-+	uint64_t offset;
-+};
++#include <sys/param.h>
++#include <sys/systm.h>
++#include "dev/drm/drmP.h"
++#include "dev/drm/drm_crtc.h"
++#include "dev/drm/drm_fourcc.h"
++#include "dev/drm/drm_crtc_helper.h"
++#include "dev/drm/drm_fb_helper.h"
 +
- struct drm_i915_gem_set_domain {
- 	/** Handle for the object */
- 	uint32_t handle;
-@@ -633,6 +675,76 @@
- 	uint64_t cliprects_ptr;	/* struct drm_clip_rect *cliprects */
- };
- 
-+struct drm_i915_gem_exec_object2 {
-+	/**
-+	 * User's handle for a buffer to be bound into the GTT for this
-+	 * operation.
++bool
++drm_fetch_cmdline_mode_from_kenv(struct drm_connector *connector,
++    struct drm_cmdline_mode *cmdline_mode)
++{
++	char *tun_var_name, *tun_mode;
++	static const char tun_prefix[] = "drm_mode.";
++	bool res;
++
++	res = false;
++	tun_var_name = malloc(sizeof(tun_prefix) +
++	    strlen(drm_get_connector_name(connector)), M_TEMP, M_WAITOK);
++	strcpy(tun_var_name, tun_prefix);
++	strcat(tun_var_name, drm_get_connector_name(connector));
++	tun_mode = getenv(tun_var_name);
++	if (tun_mode != NULL) {
++		res = drm_mode_parse_command_line_for_connector(tun_mode,
++		    connector, cmdline_mode);
++		freeenv(tun_mode);
++	}
++	free(tun_var_name, M_TEMP);
++	return (res);
++}
++
++static bool drm_kms_helper_poll = true;
++
++static void drm_mode_validate_flag(struct drm_connector *connector,
++				   int flags)
++{
++	struct drm_display_mode *mode, *t;
++
++	if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE))
++		return;
++
++	list_for_each_entry_safe(mode, t, &connector->modes, head) {
++		if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
++				!(flags & DRM_MODE_FLAG_INTERLACE))
++			mode->status = MODE_NO_INTERLACE;
++		if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
++				!(flags & DRM_MODE_FLAG_DBLSCAN))
++			mode->status = MODE_NO_DBLESCAN;
++	}
++
++	return;
++}
++
++/**
++ * drm_helper_probe_single_connector_modes - get complete set of display modes
++ * @dev: DRM device
++ * @maxX: max width for modes
++ * @maxY: max height for modes
++ *
++ * LOCKING:
++ * Caller must hold mode config lock.
++ *
++ * Based on @dev's mode_config layout, scan all the connectors and try to detect
++ * modes on them.  Modes will first be added to the connector's probed_modes
++ * list, then culled (based on validity and the @maxX, @maxY parameters) and
++ * put into the normal modes list.
++ *
++ * Intended to be used either at bootup time or when major configuration
++ * changes have occurred.
++ *
++ * FIXME: take into account monitor limits
++ *
++ * RETURNS:
++ * Number of modes found on @connector.
++ */
++int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
++					    uint32_t maxX, uint32_t maxY)
++{
++	struct drm_device *dev = connector->dev;
++	struct drm_display_mode *mode, *t;
++	struct drm_connector_helper_funcs *connector_funcs =
++		connector->helper_private;
++	struct drm_cmdline_mode cmdline_mode;
++	int count = 0;
++	int mode_flags = 0;
++
++	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
++			drm_get_connector_name(connector));
++	/* set all modes to the unverified state */
++	list_for_each_entry_safe(mode, t, &connector->modes, head)
++		mode->status = MODE_UNVERIFIED;
++
++	if (connector->force) {
++		if (connector->force == DRM_FORCE_ON)
++			connector->status = connector_status_connected;
++		else
++			connector->status = connector_status_disconnected;
++		if (connector->funcs->force)
++			connector->funcs->force(connector);
++	} else {
++		connector->status = connector->funcs->detect(connector, true);
++		drm_kms_helper_poll_enable(dev);
++	}
++
++	if (connector->status == connector_status_disconnected) {
++		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
++			connector->base.id, drm_get_connector_name(connector));
++		drm_mode_connector_update_edid_property(connector, NULL);
++		goto prune;
++	}
++
++	count = (*connector_funcs->get_modes)(connector);
++	if (count == 0 && drm_fetch_cmdline_mode_from_kenv(connector,
++	    &cmdline_mode)) {
++		mode = drm_mode_create_from_cmdline_mode(dev,
++		    &cmdline_mode);
++		if (mode != NULL) {
++			DRM_DEBUG_KMS(
++	"[CONNECTOR:%d:%s] found manual override ",
++			    connector->base.id,
++			    drm_get_connector_name(connector));
++			drm_mode_debug_printmodeline(mode);
++			drm_mode_probed_add(connector, mode);
++			count++;
++		} else {
++			DRM_ERROR(
++	"[CONNECTOR:%d:%s] manual override mode: parse error\n",
++			    connector->base.id,
++			    drm_get_connector_name(connector));
++		}
++	}
++	if (count == 0 && connector->status == connector_status_connected)
++		count = drm_add_modes_noedid(connector, 1024, 768);
++	if (count == 0)
++		goto prune;
++
++	drm_mode_connector_list_update(connector);
++
++	if (maxX && maxY)
++		drm_mode_validate_size(dev, &connector->modes, maxX,
++				       maxY, 0);
++
++	if (connector->interlace_allowed)
++		mode_flags |= DRM_MODE_FLAG_INTERLACE;
++	if (connector->doublescan_allowed)
++		mode_flags |= DRM_MODE_FLAG_DBLSCAN;
++	drm_mode_validate_flag(connector, mode_flags);
++
++	list_for_each_entry_safe(mode, t, &connector->modes, head) {
++		if (mode->status == MODE_OK)
++			mode->status = connector_funcs->mode_valid(connector,
++								   mode);
++	}
++
++prune:
++	drm_mode_prune_invalid(dev, &connector->modes, true);
++
++	if (list_empty(&connector->modes))
++		return 0;
++
++	drm_mode_sort(&connector->modes);
++
++	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
++			drm_get_connector_name(connector));
++	list_for_each_entry_safe(mode, t, &connector->modes, head) {
++		mode->vrefresh = drm_mode_vrefresh(mode);
++
++		drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
++		drm_mode_debug_printmodeline(mode);
++	}
++
++	return count;
++}
++
++/**
++ * drm_helper_encoder_in_use - check if a given encoder is in use
++ * @encoder: encoder to check
++ *
++ * LOCKING:
++ * Caller must hold mode config lock.
++ *
++ * Walk @encoders's DRM device's mode_config and see if it's in use.
++ *
++ * RETURNS:
++ * True if @encoder is part of the mode_config, false otherwise.
++ */
++bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
++{
++	struct drm_connector *connector;
++	struct drm_device *dev = encoder->dev;
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
++		if (connector->encoder == encoder)
++			return true;
++	return false;
++}
++
++/**
++ * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config
++ * @crtc: CRTC to check
++ *
++ * LOCKING:
++ * Caller must hold mode config lock.
++ *
++ * Walk @crtc's DRM device's mode_config and see if it's in use.
++ *
++ * RETURNS:
++ * True if @crtc is part of the mode_config, false otherwise.
++ */
++bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
++{
++	struct drm_encoder *encoder;
++	struct drm_device *dev = crtc->dev;
++	/* FIXME: Locking around list access? */
++	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
++		if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder))
++			return true;
++	return false;
++}
++
++static void
++drm_encoder_disable(struct drm_encoder *encoder)
++{
++	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
++
++	if (encoder_funcs->disable)
++		(*encoder_funcs->disable)(encoder);
++	else
++		(*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
++}
++
++/**
++ * drm_helper_disable_unused_functions - disable unused objects
++ * @dev: DRM device
++ *
++ * LOCKING:
++ * Caller must hold mode config lock.
++ *
++ * If an connector or CRTC isn't part of @dev's mode_config, it can be disabled
++ * by calling its dpms function, which should power it off.
++ */
++void drm_helper_disable_unused_functions(struct drm_device *dev)
++{
++	struct drm_encoder *encoder;
++	struct drm_connector *connector;
++	struct drm_crtc *crtc;
++
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++		if (!connector->encoder)
++			continue;
++		if (connector->status == connector_status_disconnected)
++			connector->encoder = NULL;
++	}
++
++	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
++		if (!drm_helper_encoder_in_use(encoder)) {
++			drm_encoder_disable(encoder);
++			/* disconnector encoder from any connector */
++			encoder->crtc = NULL;
++		}
++	}
++
++	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++		struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
++		crtc->enabled = drm_helper_crtc_in_use(crtc);
++		if (!crtc->enabled) {
++			if (crtc_funcs->disable)
++				(*crtc_funcs->disable)(crtc);
++			else
++				(*crtc_funcs->dpms)(crtc, DRM_MODE_DPMS_OFF);
++			crtc->fb = NULL;
++		}
++	}
++}
++
++/**
++ * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
++ * @encoder: encoder to test
++ * @crtc: crtc to test
++ *
++ * Return false if @encoder can't be driven by @crtc, true otherwise.
++ */
++static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
++				struct drm_crtc *crtc)
++{
++	struct drm_device *dev;
++	struct drm_crtc *tmp;
++	int crtc_mask = 1;
++
++	if (crtc == NULL)
++		printf("checking null crtc?\n");
++
++	dev = crtc->dev;
++
++	list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
++		if (tmp == crtc)
++			break;
++		crtc_mask <<= 1;
++	}
++
++	if (encoder->possible_crtcs & crtc_mask)
++		return true;
++	return false;
++}
++
++/*
++ * Check the CRTC we're going to map each output to vs. its current
++ * CRTC.  If they don't match, we have to disable the output and the CRTC
++ * since the driver will have to re-route things.
++ */
++static void
++drm_crtc_prepare_encoders(struct drm_device *dev)
++{
++	struct drm_encoder_helper_funcs *encoder_funcs;
++	struct drm_encoder *encoder;
++
++	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
++		encoder_funcs = encoder->helper_private;
++		/* Disable unused encoders */
++		if (encoder->crtc == NULL)
++			drm_encoder_disable(encoder);
++		/* Disable encoders whose CRTC is about to change */
++		if (encoder_funcs->get_crtc &&
++		    encoder->crtc != (*encoder_funcs->get_crtc)(encoder))
++			drm_encoder_disable(encoder);
++	}
++}
++
++/**
++ * drm_crtc_set_mode - set a mode
++ * @crtc: CRTC to program
++ * @mode: mode to use
++ * @x: width of mode
++ * @y: height of mode
++ *
++ * LOCKING:
++ * Caller must hold mode config lock.
++ *
++ * Try to set @mode on @crtc.  Give @crtc and its associated connectors a chance
++ * to fixup or reject the mode prior to trying to set it.
++ *
++ * RETURNS:
++ * True if the mode was set successfully, or false otherwise.
++ */
++bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
++			      struct drm_display_mode *mode,
++			      int x, int y,
++			      struct drm_framebuffer *old_fb)
++{
++	struct drm_device *dev = crtc->dev;
++	struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
++	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
++	struct drm_encoder_helper_funcs *encoder_funcs;
++	int saved_x, saved_y;
++	struct drm_encoder *encoder;
++	bool ret = true;
++
++	crtc->enabled = drm_helper_crtc_in_use(crtc);
++	if (!crtc->enabled)
++		return true;
++
++	adjusted_mode = drm_mode_duplicate(dev, mode);
++
++	saved_hwmode = crtc->hwmode;
++	saved_mode = crtc->mode;
++	saved_x = crtc->x;
++	saved_y = crtc->y;
++
++	/* Update crtc values up front so the driver can rely on them for mode
++	 * setting.
 +	 */
-+	uint32_t handle;
++	crtc->mode = *mode;
++	crtc->x = x;
++	crtc->y = y;
 +
-+	/** Number of relocations to be performed on this buffer */
-+	uint32_t relocation_count;
-+	/**
-+	 * Pointer to array of struct drm_i915_gem_relocation_entry containing
-+	 * the relocations to be performed in this buffer.
++	/* Pass our mode to the connectors and the CRTC to give them a chance to
++	 * adjust it according to limitations or connector properties, and also
++	 * a chance to reject the mode entirely.
 +	 */
-+	uint64_t relocs_ptr;
++	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 +
-+	/** Required alignment in graphics aperture */
-+	uint64_t alignment;
++		if (encoder->crtc != crtc)
++			continue;
++		encoder_funcs = encoder->helper_private;
++		if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
++						      adjusted_mode))) {
++			goto done;
++		}
++	}
 +
-+	/**
-+	 * Returned value of the updated offset of the object, for future
-+	 * presumed_offset writes.
++	if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) {
++		goto done;
++	}
++	DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
++
++	/* Prepare the encoders and CRTCs before setting the mode. */
++	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
++
++		if (encoder->crtc != crtc)
++			continue;
++		encoder_funcs = encoder->helper_private;
++		/* Disable the encoders as the first thing we do. */
++		encoder_funcs->prepare(encoder);
++	}
++
++	drm_crtc_prepare_encoders(dev);
++
++	crtc_funcs->prepare(crtc);
++
++	/* Set up the DPLL and any encoders state that needs to adjust or depend
++	 * on the DPLL.
 +	 */
-+	uint64_t offset;
++	ret = !crtc_funcs->mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
++	if (!ret)
++	    goto done;
 +
-+#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
-+	uint64_t flags;
-+	uint64_t rsvd1;
-+	uint64_t rsvd2;
-+};
++	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 +
-+struct drm_i915_gem_execbuffer2 {
-+	/**
-+	 * List of gem_exec_object2 structs
++		if (encoder->crtc != crtc)
++			continue;
++
++		DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
++			encoder->base.id, drm_get_encoder_name(encoder),
++			mode->base.id, mode->name);
++		encoder_funcs = encoder->helper_private;
++		encoder_funcs->mode_set(encoder, mode, adjusted_mode);
++	}
++
++	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
++	crtc_funcs->commit(crtc);
++
++	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
++
++		if (encoder->crtc != crtc)
++			continue;
++
++		encoder_funcs = encoder->helper_private;
++		encoder_funcs->commit(encoder);
++
++	}
++
++	/* Store real post-adjustment hardware mode. */
++	crtc->hwmode = *adjusted_mode;
++
++	/* Calculate and store various constants which
++	 * are later needed by vblank and swap-completion
++	 * timestamping. They are derived from true hwmode.
 +	 */
-+	uint64_t buffers_ptr;
-+	uint32_t buffer_count;
++	drm_calc_timestamping_constants(crtc);
 +
-+	/** Offset in the batchbuffer to start execution from. */
-+	uint32_t batch_start_offset;
-+	/** Bytes used in batchbuffer from batch_start_offset */
-+	uint32_t batch_len;
-+	uint32_t DR1;
-+	uint32_t DR4;
-+	uint32_t num_cliprects;
-+	/** This is a struct drm_clip_rect *cliprects */
-+	uint64_t cliprects_ptr;
-+#define I915_EXEC_RING_MASK              (7<<0)
-+#define I915_EXEC_DEFAULT                (0<<0)
-+#define I915_EXEC_RENDER                 (1<<0)
-+#define I915_EXEC_BSD                    (2<<0)
-+#define I915_EXEC_BLT                    (3<<0)
++	/* FIXME: add subpixel order */
++done:
++	drm_mode_destroy(dev, adjusted_mode);
++	if (!ret) {
++		crtc->hwmode = saved_hwmode;
++		crtc->mode = saved_mode;
++		crtc->x = saved_x;
++		crtc->y = saved_y;
++	}
 +
-+/* Used for switching the constants addressing mode on gen4+ RENDER ring.
-+ * Gen6+ only supports relative addressing to dynamic state (default) and
-+ * absolute addressing.
++	return ret;
++}
++
++static int
++drm_crtc_helper_disable(struct drm_crtc *crtc)
++{
++	struct drm_device *dev = crtc->dev;
++	struct drm_connector *connector;
++	struct drm_encoder *encoder;
++
++	/* Decouple all encoders and their attached connectors from this crtc */
++	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
++		if (encoder->crtc != crtc)
++			continue;
++
++		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++			if (connector->encoder != encoder)
++				continue;
++
++			connector->encoder = NULL;
++		}
++	}
++
++	drm_helper_disable_unused_functions(dev);
++	return 0;
++}
++
++/**
++ * drm_crtc_helper_set_config - set a new config from userspace
++ * @crtc: CRTC to setup
++ * @crtc_info: user provided configuration
++ * @new_mode: new mode to set
++ * @connector_set: set of connectors for the new config
++ * @fb: new framebuffer
 + *
-+ * These flags are ignored for the BSD and BLT rings.
++ * LOCKING:
++ * Caller must hold mode config lock.
++ *
++ * Setup a new configuration, provided by the user in @crtc_info, and enable
++ * it.
++ *
++ * RETURNS:
++ * Zero. (FIXME)
 + */
-+#define I915_EXEC_CONSTANTS_MASK 	(3<<6)
-+#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
-+#define I915_EXEC_CONSTANTS_ABSOLUTE 	(1<<6)
-+#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
-+	uint64_t flags;
-+	uint64_t rsvd1;
-+	uint64_t rsvd2;
-+};
++int drm_crtc_helper_set_config(struct drm_mode_set *set)
++{
++	struct drm_device *dev;
++	struct drm_crtc *save_crtcs, *new_crtc, *crtc;
++	struct drm_encoder *save_encoders, *new_encoder, *encoder;
++	struct drm_framebuffer *old_fb = NULL;
++	bool mode_changed = false; /* if true do a full mode set */
++	bool fb_changed = false; /* if true and !mode_changed just do a flip */
++	struct drm_connector *save_connectors, *connector;
++	int count = 0, ro, fail = 0;
++	struct drm_crtc_helper_funcs *crtc_funcs;
++	struct drm_mode_set save_set;
++	int ret = 0;
++	int i;
 +
-+/** Resets the SO write offset registers for transform feedback on gen7. */
-+#define I915_EXEC_GEN7_SOL_RESET	(1<<8)
++	DRM_DEBUG_KMS("\n");
 +
- struct drm_i915_gem_pin {
- 	/** Handle of the buffer to be pinned. */
- 	uint32_t handle;
-@@ -670,6 +782,9 @@
- #define I915_BIT_6_SWIZZLE_9_10_11	4
- /* Not seen by userland */
- #define I915_BIT_6_SWIZZLE_UNKNOWN	5
-+/* Seen by userland. */
-+#define I915_BIT_6_SWIZZLE_9_17		6
-+#define I915_BIT_6_SWIZZLE_9_10_17	7
- 
- struct drm_i915_gem_set_tiling {
- 	/** Handle of the buffer to have its tiling state updated */
-@@ -719,4 +834,137 @@
- 	uint32_t swizzle_mode;
- };
- 
-+struct drm_i915_gem_get_aperture {
-+	/** Total size of the aperture used by i915_gem_execbuffer, in bytes */
-+	uint64_t aper_size;
++	if (!set)
++		return -EINVAL;
 +
-+	/**
-+	 * Available space in the aperture used by i915_gem_execbuffer, in
-+	 * bytes
-+	 */
-+	uint64_t aper_available_size;
-+};
++	if (!set->crtc)
++		return -EINVAL;
 +
-+struct drm_i915_get_pipe_from_crtc_id {
-+        /** ID of CRTC being requested **/
-+        uint32_t crtc_id;
++	if (!set->crtc->helper_private)
++		return -EINVAL;
 +
-+        /** pipe of requested CRTC **/
-+        uint32_t pipe;
-+};
++	crtc_funcs = set->crtc->helper_private;
 +
-+#define I915_MADV_WILLNEED 0
-+#define I915_MADV_DONTNEED 1
-+#define I915_MADV_PURGED_INTERNAL 2 /* internal state */
++	if (!set->mode)
++		set->fb = NULL;
 +
-+struct drm_i915_gem_madvise {
-+	/** Handle of the buffer to change the backing store advice */
-+	uint32_t handle;
++	if (set->fb) {
++		DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
++				set->crtc->base.id, set->fb->base.id,
++				(int)set->num_connectors, set->x, set->y);
++	} else {
++		DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
++		return drm_crtc_helper_disable(set->crtc);
++	}
 +
-+	/* Advice: either the buffer will be needed again in the near future,
-+	 *         or wont be and could be discarded under memory pressure.
++	dev = set->crtc->dev;
++
++	/* Allocate space for the backup of all (non-pointer) crtc, encoder and
++	 * connector data. */
++	save_crtcs = malloc(dev->mode_config.num_crtc * sizeof(struct drm_crtc),
++	    DRM_MEM_KMS, M_WAITOK | M_ZERO);
++	save_encoders = malloc(dev->mode_config.num_encoder *
++	    sizeof(struct drm_encoder), DRM_MEM_KMS, M_WAITOK | M_ZERO);
++	save_connectors = malloc(dev->mode_config.num_connector *
++	    sizeof(struct drm_connector), DRM_MEM_KMS, M_WAITOK | M_ZERO);
++
++	/* Copy data. Note that driver private data is not affected.
++	 * Should anything bad happen only the expected state is
++	 * restored, not the drivers personal bookkeeping.
 +	 */
-+	uint32_t madv;
++	count = 0;
++	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++		save_crtcs[count++] = *crtc;
++	}
 +
-+	/** Whether the backing store still exists. */
-+	uint32_t retained;
++	count = 0;
++	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
++		save_encoders[count++] = *encoder;
++	}
++
++	count = 0;
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++		save_connectors[count++] = *connector;
++	}
++
++	save_set.crtc = set->crtc;
++	save_set.mode = &set->crtc->mode;
++	save_set.x = set->crtc->x;
++	save_set.y = set->crtc->y;
++	save_set.fb = set->crtc->fb;
++
++	/* We should be able to check here if the fb has the same properties
++	 * and then just flip_or_move it */
++	if (set->crtc->fb != set->fb) {
++		/* If we have no fb then treat it as a full mode set */
++		if (set->crtc->fb == NULL) {
++			DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
++			mode_changed = true;
++		} else if (set->fb == NULL) {
++			mode_changed = true;
++		} else
++			fb_changed = true;
++	}
++
++	if (set->x != set->crtc->x || set->y != set->crtc->y)
++		fb_changed = true;
++
++	if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
++		DRM_DEBUG_KMS("modes are different, full mode set\n");
++		drm_mode_debug_printmodeline(&set->crtc->mode);
++		drm_mode_debug_printmodeline(set->mode);
++		mode_changed = true;
++	}
++
++	/* a) traverse passed in connector list and get encoders for them */
++	count = 0;
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++		struct drm_connector_helper_funcs *connector_funcs =
++			connector->helper_private;
++		new_encoder = connector->encoder;
++		for (ro = 0; ro < set->num_connectors; ro++) {
++			if (set->connectors[ro] == connector) {
++				new_encoder = connector_funcs->best_encoder(connector);
++				/* if we can't get an encoder for a connector
++				   we are setting now - then fail */
++				if (new_encoder == NULL)
++					/* don't break so fail path works correct */
++					fail = 1;
++				break;
++			}
++		}
++
++		if (new_encoder != connector->encoder) {
++			DRM_DEBUG_KMS("encoder changed, full mode switch\n");
++			mode_changed = true;
++			/* If the encoder is reused for another connector, then
++			 * the appropriate crtc will be set later.
++			 */
++			if (connector->encoder)
++				connector->encoder->crtc = NULL;
++			connector->encoder = new_encoder;
++		}
++	}
++
++	if (fail) {
++		ret = -EINVAL;
++		goto fail;
++	}
++
++	count = 0;
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++		if (!connector->encoder)
++			continue;
++
++		if (connector->encoder->crtc == set->crtc)
++			new_crtc = NULL;
++		else
++			new_crtc = connector->encoder->crtc;
++
++		for (ro = 0; ro < set->num_connectors; ro++) {
++			if (set->connectors[ro] == connector)
++				new_crtc = set->crtc;
++		}
++
++		/* Make sure the new CRTC will work with the encoder */
++		if (new_crtc &&
++		    !drm_encoder_crtc_ok(connector->encoder, new_crtc)) {
++			ret = -EINVAL;
++			goto fail;
++		}
++		if (new_crtc != connector->encoder->crtc) {
++			DRM_DEBUG_KMS("crtc changed, full mode switch\n");
++			mode_changed = true;
++			connector->encoder->crtc = new_crtc;
++		}
++		if (new_crtc) {
++			DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
++				connector->base.id, drm_get_connector_name(connector),
++				new_crtc->base.id);
++		} else {
++			DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
++				connector->base.id, drm_get_connector_name(connector));
++		}
++	}
++
++	/* mode_set_base is not a required function */
++	if (fb_changed && !crtc_funcs->mode_set_base)
++		mode_changed = true;
++
++	if (mode_changed) {
++		set->crtc->enabled = drm_helper_crtc_in_use(set->crtc);
++		if (set->crtc->enabled) {
++			DRM_DEBUG_KMS("attempting to set mode from"
++					" userspace\n");
++			drm_mode_debug_printmodeline(set->mode);
++			old_fb = set->crtc->fb;
++			set->crtc->fb = set->fb;
++			if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
++						      set->x, set->y,
++						      old_fb)) {
++				DRM_ERROR("failed to set mode on [CRTC:%d]\n",
++					  set->crtc->base.id);
++				set->crtc->fb = old_fb;
++				ret = -EINVAL;
++				goto fail;
++			}
++			DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
++			for (i = 0; i < set->num_connectors; i++) {
++				DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
++					      drm_get_connector_name(set->connectors[i]));
++				set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
++			}
++		}
++		drm_helper_disable_unused_functions(dev);
++	} else if (fb_changed) {
++		set->crtc->x = set->x;
++		set->crtc->y = set->y;
++
++		old_fb = set->crtc->fb;
++		if (set->crtc->fb != set->fb)
++			set->crtc->fb = set->fb;
++		ret = crtc_funcs->mode_set_base(set->crtc,
++						set->x, set->y, old_fb);
++		if (ret != 0) {
++			set->crtc->fb = old_fb;
++			goto fail;
++		}
++	}
++
++	free(save_connectors, DRM_MEM_KMS);
++	free(save_encoders, DRM_MEM_KMS);
++	free(save_crtcs, DRM_MEM_KMS);
++	return 0;
++
++fail:
++	/* Restore all previous data. */
++	count = 0;
++	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++		*crtc = save_crtcs[count++];
++	}
++
++	count = 0;
++	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
++		*encoder = save_encoders[count++];
++	}
++
++	count = 0;
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++		*connector = save_connectors[count++];
++	}
++
++	/* Try to restore the config */
++	if (mode_changed &&
++	    !drm_crtc_helper_set_mode(save_set.crtc, save_set.mode, save_set.x,
++				      save_set.y, save_set.fb))
++		DRM_ERROR("failed to restore config after modeset failure\n");
++
++	free(save_connectors, DRM_MEM_KMS);
++	free(save_encoders, DRM_MEM_KMS);
++	free(save_crtcs, DRM_MEM_KMS);
++	return ret;
++}
++
++static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
++{
++	int dpms = DRM_MODE_DPMS_OFF;
++	struct drm_connector *connector;
++	struct drm_device *dev = encoder->dev;
++
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
++		if (connector->encoder == encoder)
++			if (connector->dpms < dpms)
++				dpms = connector->dpms;
++	return dpms;
++}
++
++static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
++{
++	int dpms = DRM_MODE_DPMS_OFF;
++	struct drm_connector *connector;
++	struct drm_device *dev = crtc->dev;
++
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
++		if (connector->encoder && connector->encoder->crtc == crtc)
++			if (connector->dpms < dpms)
++				dpms = connector->dpms;
++	return dpms;
++}
++
++/**
++ * drm_helper_connector_dpms
++ * @connector affected connector
++ * @mode DPMS mode
++ *
++ * Calls the low-level connector DPMS function, then
++ * calls appropriate encoder and crtc DPMS functions as well
++ */
++void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
++{
++	struct drm_encoder *encoder = connector->encoder;
++	struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
++	int old_dpms;
++
++	if (mode == connector->dpms)
++		return;
++
++	old_dpms = connector->dpms;
++	connector->dpms = mode;
++
++	/* from off to on, do crtc then encoder */
++	if (mode < old_dpms) {
++		if (crtc) {
++			struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
++			if (crtc_funcs->dpms)
++				(*crtc_funcs->dpms) (crtc,
++						     drm_helper_choose_crtc_dpms(crtc));
++		}
++		if (encoder) {
++			struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
++			if (encoder_funcs->dpms)
++				(*encoder_funcs->dpms) (encoder,
++							drm_helper_choose_encoder_dpms(encoder));
++		}
++	}
++
++	/* from on to off, do encoder then crtc */
++	if (mode > old_dpms) {
++		if (encoder) {
++			struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
++			if (encoder_funcs->dpms)
++				(*encoder_funcs->dpms) (encoder,
++							drm_helper_choose_encoder_dpms(encoder));
++		}
++		if (crtc) {
++			struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
++			if (crtc_funcs->dpms)
++				(*crtc_funcs->dpms) (crtc,
++						     drm_helper_choose_crtc_dpms(crtc));
++		}
++	}
++
++	return;
++}
++
++int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
++				   struct drm_mode_fb_cmd2 *mode_cmd)
++{
++	int i;
++
++	fb->width = mode_cmd->width;
++	fb->height = mode_cmd->height;
++	for (i = 0; i < 4; i++) {
++		fb->pitches[i] = mode_cmd->pitches[i];
++		fb->offsets[i] = mode_cmd->offsets[i];
++	}
++	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
++				    &fb->bits_per_pixel);
++	fb->pixel_format = mode_cmd->pixel_format;
++
++	return 0;
++}
++
++int drm_helper_resume_force_mode(struct drm_device *dev)
++{
++	struct drm_crtc *crtc;
++	struct drm_encoder *encoder;
++	struct drm_encoder_helper_funcs *encoder_funcs;
++	struct drm_crtc_helper_funcs *crtc_funcs;
++	int ret;
++
++	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++
++		if (!crtc->enabled)
++			continue;
++
++		ret = drm_crtc_helper_set_mode(crtc, &crtc->mode,
++					       crtc->x, crtc->y, crtc->fb);
++
++		if (!ret)
++			DRM_ERROR("failed to set mode on crtc %p\n", crtc);
++
++		/* Turn off outputs that were already powered off */
++		if (drm_helper_choose_crtc_dpms(crtc)) {
++			list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
++
++				if(encoder->crtc != crtc)
++					continue;
++
++				encoder_funcs = encoder->helper_private;
++				if (encoder_funcs->dpms)
++					(*encoder_funcs->dpms) (encoder,
++					    drm_helper_choose_encoder_dpms(encoder));
++			}
++
++			crtc_funcs = crtc->helper_private;
++			if (crtc_funcs->dpms)
++				(*crtc_funcs->dpms) (crtc,
++				    drm_helper_choose_crtc_dpms(crtc));
++		}
++	}
++	/* disable the unused connectors while restoring the modesetting */
++	drm_helper_disable_unused_functions(dev);
++	return 0;
++}
++
++#define DRM_OUTPUT_POLL_PERIOD (10 * hz)
++static void output_poll_execute(void *ctx, int pending)
++{
++	struct drm_device *dev;
++	struct drm_connector *connector;
++	enum drm_connector_status old_status;
++	bool repoll = false, changed = false;
++
++	if (!drm_kms_helper_poll)
++		return;
++
++	dev = ctx;
++
++	sx_xlock(&dev->mode_config.mutex);
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++
++		/* if this is HPD or polled don't check it -
++		   TV out for instance */
++		if (!connector->polled)
++			continue;
++
++		else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
++		    DRM_CONNECTOR_POLL_DISCONNECT))
++			repoll = true;
++
++		old_status = connector->status;
++		/* if we are connected and don't want to poll for disconnect
++		   skip it */
++		if (old_status == connector_status_connected &&
++		    !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) &&
++		    !(connector->polled & DRM_CONNECTOR_POLL_HPD))
++			continue;
++
++		connector->status = connector->funcs->detect(connector, false);
++		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
++			      connector->base.id,
++			      drm_get_connector_name(connector),
++			      old_status, connector->status);
++		if (old_status != connector->status)
++			changed = true;
++	}
++
++	sx_xunlock(&dev->mode_config.mutex);
++
++	if (changed) {
++#if 0
++		/* send a uevent + call fbdev */
++		drm_sysfs_hotplug_event(dev);
++#endif
++		if (dev->mode_config.funcs->output_poll_changed)
++			dev->mode_config.funcs->output_poll_changed(dev);
++	}
++
++	if (repoll) {
++		taskqueue_enqueue_timeout(taskqueue_thread,
++		    &dev->mode_config.output_poll_task,
++		    DRM_OUTPUT_POLL_PERIOD);
++	}
++}
++
++void drm_kms_helper_poll_disable(struct drm_device *dev)
++{
++	if (!dev->mode_config.poll_enabled)
++		return;
++	taskqueue_cancel_timeout(taskqueue_thread,
++	    &dev->mode_config.output_poll_task, NULL);
++}
++
++void drm_kms_helper_poll_enable(struct drm_device *dev)
++{
++	bool poll = false;
++	struct drm_connector *connector;
++
++	if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
++		return;
++
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++		if (connector->polled)
++			poll = true;
++	}
++
++	if (poll) {
++		taskqueue_enqueue_timeout(taskqueue_thread,
++		    &dev->mode_config.output_poll_task, DRM_OUTPUT_POLL_PERIOD);
++	}
++}
++
++void drm_kms_helper_poll_init(struct drm_device *dev)
++{
++
++	TIMEOUT_TASK_INIT(taskqueue_thread, &dev->mode_config.output_poll_task,
++	    0, output_poll_execute, dev);
++	dev->mode_config.poll_enabled = true;
++
++	drm_kms_helper_poll_enable(dev);
++}
++
++void drm_kms_helper_poll_fini(struct drm_device *dev)
++{
++	drm_kms_helper_poll_disable(dev);
++}
++
++void drm_helper_hpd_irq_event(struct drm_device *dev)
++{
++	if (!dev->mode_config.poll_enabled)
++		return;
++
++	/* kill timer and schedule immediate execution, this doesn't block */
++	taskqueue_cancel_timeout(taskqueue_thread,
++	    &dev->mode_config.output_poll_task, NULL);
++	if (drm_kms_helper_poll)
++		taskqueue_enqueue_timeout(taskqueue_thread,
++		    &dev->mode_config.output_poll_task, 0);
++}
+diff --git a/sys/dev/drm/drm_crtc_helper.h b/sys/dev/drm/drm_crtc_helper.h
+new file mode 100644
+index 0000000..cc602a0
+--- /dev/null
++++ sys/dev/drm/drm_crtc_helper.h
+@@ -0,0 +1,144 @@
++/*
++ * Copyright © 2006 Keith Packard
++ * Copyright © 2007-2008 Dave Airlie
++ * Copyright © 2007-2008 Intel Corporation
++ *   Jesse Barnes <jesse.barnes at intel.com>
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++/*
++ * The DRM mode setting helper functions are common code for drivers to use if
++ * they wish.  Drivers are not forced to use this code in their
++ * implementations but it would be useful if they code they do use at least
++ * provides a consistent interface and operation to userspace
++ */
++
++#ifndef __DRM_CRTC_HELPER_H__
++#define __DRM_CRTC_HELPER_H__
++
++enum mode_set_atomic {
++	LEAVE_ATOMIC_MODE_SET,
++	ENTER_ATOMIC_MODE_SET,
 +};
 +
-+#define I915_OVERLAY_TYPE_MASK 		0xff
-+#define I915_OVERLAY_YUV_PLANAR 	0x01
-+#define I915_OVERLAY_YUV_PACKED 	0x02
-+#define I915_OVERLAY_RGB		0x03
++struct drm_crtc_helper_funcs {
++	/*
++	 * Control power levels on the CRTC.  If the mode passed in is
++	 * unsupported, the provider must use the next lowest power level.
++	 */
++	void (*dpms)(struct drm_crtc *crtc, int mode);
++	void (*prepare)(struct drm_crtc *crtc);
++	void (*commit)(struct drm_crtc *crtc);
 +
-+#define I915_OVERLAY_DEPTH_MASK		0xff00
-+#define I915_OVERLAY_RGB24		0x1000
-+#define I915_OVERLAY_RGB16		0x2000
-+#define I915_OVERLAY_RGB15		0x3000
-+#define I915_OVERLAY_YUV422		0x0100
-+#define I915_OVERLAY_YUV411		0x0200
-+#define I915_OVERLAY_YUV420		0x0300
-+#define I915_OVERLAY_YUV410		0x0400
++	/* Provider can fixup or change mode timings before modeset occurs */
++	bool (*mode_fixup)(struct drm_crtc *crtc,
++			   struct drm_display_mode *mode,
++			   struct drm_display_mode *adjusted_mode);
++	/* Actually set the mode */
++	int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
++			struct drm_display_mode *adjusted_mode, int x, int y,
++			struct drm_framebuffer *old_fb);
 +
-+#define I915_OVERLAY_SWAP_MASK		0xff0000
-+#define I915_OVERLAY_NO_SWAP		0x000000
-+#define I915_OVERLAY_UV_SWAP		0x010000
-+#define I915_OVERLAY_Y_SWAP		0x020000
-+#define I915_OVERLAY_Y_AND_UV_SWAP	0x030000
++	/* Move the crtc on the current fb to the given position *optional* */
++	int (*mode_set_base)(struct drm_crtc *crtc, int x, int y,
++			     struct drm_framebuffer *old_fb);
++	int (*mode_set_base_atomic)(struct drm_crtc *crtc,
++				    struct drm_framebuffer *fb, int x, int y,
++				    enum mode_set_atomic);
 +
-+#define I915_OVERLAY_FLAGS_MASK		0xff000000
-+#define I915_OVERLAY_ENABLE		0x01000000
++	/* reload the current crtc LUT */
++	void (*load_lut)(struct drm_crtc *crtc);
 +
-+struct drm_intel_overlay_put_image {
-+	/* various flags and src format description */
-+	uint32_t flags;
-+	/* source picture description */
-+	uint32_t bo_handle;
-+	/* stride values and offsets are in bytes, buffer relative */
-+	uint16_t stride_Y; /* stride for packed formats */
-+	uint16_t stride_UV;
-+	uint32_t offset_Y; /* offset for packet formats */
-+	uint32_t offset_U;
-+	uint32_t offset_V;
-+	/* in pixels */
-+	uint16_t src_width;
-+	uint16_t src_height;
-+	/* to compensate the scaling factors for partially covered surfaces */
-+	uint16_t src_scan_width;
-+	uint16_t src_scan_height;
-+	/* output crtc description */
-+	uint32_t crtc_id;
-+	uint16_t dst_x;
-+	uint16_t dst_y;
-+	uint16_t dst_width;
-+	uint16_t dst_height;
++	/* disable crtc when not in use - more explicit than dpms off */
++	void (*disable)(struct drm_crtc *crtc);
 +};
 +
-+/* flags */
-+#define I915_OVERLAY_UPDATE_ATTRS	(1<<0)
-+#define I915_OVERLAY_UPDATE_GAMMA	(1<<1)
-+struct drm_intel_overlay_attrs {
-+	uint32_t flags;
-+	uint32_t color_key;
-+	int32_t brightness;
-+	uint32_t contrast;
-+	uint32_t saturation;
-+	uint32_t gamma0;
-+	uint32_t gamma1;
-+	uint32_t gamma2;
-+	uint32_t gamma3;
-+	uint32_t gamma4;
-+	uint32_t gamma5;
++struct drm_encoder_helper_funcs {
++	void (*dpms)(struct drm_encoder *encoder, int mode);
++	void (*save)(struct drm_encoder *encoder);
++	void (*restore)(struct drm_encoder *encoder);
++
++	bool (*mode_fixup)(struct drm_encoder *encoder,
++			   struct drm_display_mode *mode,
++			   struct drm_display_mode *adjusted_mode);
++	void (*prepare)(struct drm_encoder *encoder);
++	void (*commit)(struct drm_encoder *encoder);
++	void (*mode_set)(struct drm_encoder *encoder,
++			 struct drm_display_mode *mode,
++			 struct drm_display_mode *adjusted_mode);
++	struct drm_crtc *(*get_crtc)(struct drm_encoder *encoder);
++	/* detect for DAC style encoders */
++	enum drm_connector_status (*detect)(struct drm_encoder *encoder,
++					    struct drm_connector *connector);
++	/* disable encoder when not in use - more explicit than dpms off */
++	void (*disable)(struct drm_encoder *encoder);
 +};
 +
++struct drm_connector_helper_funcs {
++	int (*get_modes)(struct drm_connector *connector);
++	int (*mode_valid)(struct drm_connector *connector,
++			  struct drm_display_mode *mode);
++	struct drm_encoder *(*best_encoder)(struct drm_connector *connector);
++};
++
++extern int drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY);
++extern void drm_helper_disable_unused_functions(struct drm_device *dev);
++extern int drm_crtc_helper_set_config(struct drm_mode_set *set);
++extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
++				     struct drm_display_mode *mode,
++				     int x, int y,
++				     struct drm_framebuffer *old_fb);
++extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
++extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
++
++extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode);
++
++extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
++					  struct drm_mode_fb_cmd2 *mode_cmd);
++
++static inline void drm_crtc_helper_add(struct drm_crtc *crtc,
++				       const struct drm_crtc_helper_funcs *funcs)
++{
++	crtc->helper_private = __DECONST(void *, funcs);
++}
++
++static inline void drm_encoder_helper_add(struct drm_encoder *encoder,
++					  const struct drm_encoder_helper_funcs *funcs)
++{
++	encoder->helper_private = __DECONST(void *, funcs);
++}
++
++static inline void drm_connector_helper_add(struct drm_connector *connector,
++					    const struct drm_connector_helper_funcs *funcs)
++{
++	connector->helper_private = __DECONST(void *, funcs);
++}
++
++extern int drm_helper_resume_force_mode(struct drm_device *dev);
++extern void drm_kms_helper_poll_init(struct drm_device *dev);
++extern void drm_kms_helper_poll_fini(struct drm_device *dev);
++extern void drm_helper_hpd_irq_event(struct drm_device *dev);
++
++extern void drm_kms_helper_poll_disable(struct drm_device *dev);
++extern void drm_kms_helper_poll_enable(struct drm_device *dev);
++
++extern bool drm_fetch_cmdline_mode_from_kenv(struct drm_connector *connector,
++    struct drm_cmdline_mode *cmdline_mode);
++#endif
+diff --git a/sys/dev/drm/drm_dp_helper.h b/sys/dev/drm/drm_dp_helper.h
+new file mode 100644
+index 0000000..fc5d2a8
+--- /dev/null
++++ sys/dev/drm/drm_dp_helper.h
+@@ -0,0 +1,248 @@
 +/*
-+ * Intel sprite handling
++ * Copyright © 2008 Keith Packard
 + *
-+ * Color keying works with a min/mask/max tuple.  Both source and destination
-+ * color keying is allowed.
++ * Permission to use, copy, modify, distribute, and sell this software and its
++ * documentation for any purpose is hereby granted without fee, provided that
++ * the above copyright notice appear in all copies and that both that copyright
++ * notice and this permission notice appear in supporting documentation, and
++ * that the name of the copyright holders not be used in advertising or
++ * publicity pertaining to distribution of the software without specific,
++ * written prior permission.  The copyright holders make no representations
++ * about the suitability of this software for any purpose.  It is provided "as
++ * is" without express or implied warranty.
 + *
-+ * Source keying:
-+ * Sprite pixels within the min & max values, masked against the color channels
-+ * specified in the mask field, will be transparent.  All other pixels will
-+ * be displayed on top of the primary plane.  For RGB surfaces, only the min
-+ * and mask fields will be used; ranged compares are not allowed.
++ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
++ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
++ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
++ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
++ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
++ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
++ * OF THIS SOFTWARE.
++ */
++
++#ifndef _DRM_DP_HELPER_H_
++#define _DRM_DP_HELPER_H_
++
++/* From the VESA DisplayPort spec */
++
++#define AUX_NATIVE_WRITE	0x8
++#define AUX_NATIVE_READ		0x9
++#define AUX_I2C_WRITE		0x0
++#define AUX_I2C_READ		0x1
++#define AUX_I2C_STATUS		0x2
++#define AUX_I2C_MOT		0x4
++
++#define AUX_NATIVE_REPLY_ACK	(0x0 << 4)
++#define AUX_NATIVE_REPLY_NACK	(0x1 << 4)
++#define AUX_NATIVE_REPLY_DEFER	(0x2 << 4)
++#define AUX_NATIVE_REPLY_MASK	(0x3 << 4)
++
++#define AUX_I2C_REPLY_ACK	(0x0 << 6)
++#define AUX_I2C_REPLY_NACK	(0x1 << 6)
++#define AUX_I2C_REPLY_DEFER	(0x2 << 6)
++#define AUX_I2C_REPLY_MASK	(0x3 << 6)
++
++/* AUX CH addresses */
++/* DPCD */
++#define DP_DPCD_REV                         0x000
++
++#define DP_MAX_LINK_RATE                    0x001
++
++#define DP_MAX_LANE_COUNT                   0x002
++# define DP_MAX_LANE_COUNT_MASK		    0x1f
++# define DP_TPS3_SUPPORTED		    (1 << 6)
++# define DP_ENHANCED_FRAME_CAP		    (1 << 7)
++
++#define DP_MAX_DOWNSPREAD                   0x003
++# define DP_NO_AUX_HANDSHAKE_LINK_TRAINING  (1 << 6)
++
++#define DP_NORP                             0x004
++
++#define DP_DOWNSTREAMPORT_PRESENT           0x005
++# define DP_DWN_STRM_PORT_PRESENT           (1 << 0)
++# define DP_DWN_STRM_PORT_TYPE_MASK         0x06
++/* 00b = DisplayPort */
++/* 01b = Analog */
++/* 10b = TMDS or HDMI */
++/* 11b = Other */
++# define DP_FORMAT_CONVERSION               (1 << 3)
++
++#define DP_MAIN_LINK_CHANNEL_CODING         0x006
++
++#define DP_TRAINING_AUX_RD_INTERVAL         0x00e
++
++#define DP_PSR_SUPPORT                      0x070
++# define DP_PSR_IS_SUPPORTED                1
++#define DP_PSR_CAPS                         0x071
++# define DP_PSR_NO_TRAIN_ON_EXIT            1
++# define DP_PSR_SETUP_TIME_330              (0 << 1)
++# define DP_PSR_SETUP_TIME_275              (1 << 1)
++# define DP_PSR_SETUP_TIME_220              (2 << 1)
++# define DP_PSR_SETUP_TIME_165              (3 << 1)
++# define DP_PSR_SETUP_TIME_110              (4 << 1)
++# define DP_PSR_SETUP_TIME_55               (5 << 1)
++# define DP_PSR_SETUP_TIME_0                (6 << 1)
++# define DP_PSR_SETUP_TIME_MASK             (7 << 1)
++# define DP_PSR_SETUP_TIME_SHIFT            1
++
++/* link configuration */
++#define	DP_LINK_BW_SET		            0x100
++# define DP_LINK_BW_1_62		    0x06
++# define DP_LINK_BW_2_7			    0x0a
++# define DP_LINK_BW_5_4			    0x14
++
++#define DP_LANE_COUNT_SET	            0x101
++# define DP_LANE_COUNT_MASK		    0x0f
++# define DP_LANE_COUNT_ENHANCED_FRAME_EN    (1 << 7)
++
++#define DP_TRAINING_PATTERN_SET	            0x102
++# define DP_TRAINING_PATTERN_DISABLE	    0
++# define DP_TRAINING_PATTERN_1		    1
++# define DP_TRAINING_PATTERN_2		    2
++# define DP_TRAINING_PATTERN_3		    3
++# define DP_TRAINING_PATTERN_MASK	    0x3
++
++# define DP_LINK_QUAL_PATTERN_DISABLE	    (0 << 2)
++# define DP_LINK_QUAL_PATTERN_D10_2	    (1 << 2)
++# define DP_LINK_QUAL_PATTERN_ERROR_RATE    (2 << 2)
++# define DP_LINK_QUAL_PATTERN_PRBS7	    (3 << 2)
++# define DP_LINK_QUAL_PATTERN_MASK	    (3 << 2)
++
++# define DP_RECOVERED_CLOCK_OUT_EN	    (1 << 4)
++# define DP_LINK_SCRAMBLING_DISABLE	    (1 << 5)
++
++# define DP_SYMBOL_ERROR_COUNT_BOTH	    (0 << 6)
++# define DP_SYMBOL_ERROR_COUNT_DISPARITY    (1 << 6)
++# define DP_SYMBOL_ERROR_COUNT_SYMBOL	    (2 << 6)
++# define DP_SYMBOL_ERROR_COUNT_MASK	    (3 << 6)
++
++#define DP_TRAINING_LANE0_SET		    0x103
++#define DP_TRAINING_LANE1_SET		    0x104
++#define DP_TRAINING_LANE2_SET		    0x105
++#define DP_TRAINING_LANE3_SET		    0x106
++
++# define DP_TRAIN_VOLTAGE_SWING_MASK	    0x3
++# define DP_TRAIN_VOLTAGE_SWING_SHIFT	    0
++# define DP_TRAIN_MAX_SWING_REACHED	    (1 << 2)
++# define DP_TRAIN_VOLTAGE_SWING_400	    (0 << 0)
++# define DP_TRAIN_VOLTAGE_SWING_600	    (1 << 0)
++# define DP_TRAIN_VOLTAGE_SWING_800	    (2 << 0)
++# define DP_TRAIN_VOLTAGE_SWING_1200	    (3 << 0)
++
++# define DP_TRAIN_PRE_EMPHASIS_MASK	    (3 << 3)
++# define DP_TRAIN_PRE_EMPHASIS_0	    (0 << 3)
++# define DP_TRAIN_PRE_EMPHASIS_3_5	    (1 << 3)
++# define DP_TRAIN_PRE_EMPHASIS_6	    (2 << 3)
++# define DP_TRAIN_PRE_EMPHASIS_9_5	    (3 << 3)
++
++# define DP_TRAIN_PRE_EMPHASIS_SHIFT	    3
++# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED  (1 << 5)
++
++#define DP_DOWNSPREAD_CTRL		    0x107
++# define DP_SPREAD_AMP_0_5		    (1 << 4)
++
++#define DP_MAIN_LINK_CHANNEL_CODING_SET	    0x108
++# define DP_SET_ANSI_8B10B		    (1 << 0)
++
++#define DP_PSR_EN_CFG			    0x170
++# define DP_PSR_ENABLE			    (1 << 0)
++# define DP_PSR_MAIN_LINK_ACTIVE	    (1 << 1)
++# define DP_PSR_CRC_VERIFICATION	    (1 << 2)
++# define DP_PSR_FRAME_CAPTURE		    (1 << 3)
++
++#define DP_DEVICE_SERVICE_IRQ_VECTOR	    0x201
++# define DP_REMOTE_CONTROL_COMMAND_PENDING  (1 << 0)
++# define DP_AUTOMATED_TEST_REQUEST	    (1 << 1)
++# define DP_CP_IRQ			    (1 << 2)
++# define DP_SINK_SPECIFIC_IRQ		    (1 << 6)
++
++#define DP_LANE0_1_STATUS		    0x202
++#define DP_LANE2_3_STATUS		    0x203
++# define DP_LANE_CR_DONE		    (1 << 0)
++# define DP_LANE_CHANNEL_EQ_DONE	    (1 << 1)
++# define DP_LANE_SYMBOL_LOCKED		    (1 << 2)
++
++#define DP_CHANNEL_EQ_BITS (DP_LANE_CR_DONE |		\
++			    DP_LANE_CHANNEL_EQ_DONE |	\
++			    DP_LANE_SYMBOL_LOCKED)
++
++#define DP_LANE_ALIGN_STATUS_UPDATED	    0x204
++
++#define DP_INTERLANE_ALIGN_DONE		    (1 << 0)
++#define DP_DOWNSTREAM_PORT_STATUS_CHANGED   (1 << 6)
++#define DP_LINK_STATUS_UPDATED		    (1 << 7)
++
++#define DP_SINK_STATUS			    0x205
++
++#define DP_RECEIVE_PORT_0_STATUS	    (1 << 0)
++#define DP_RECEIVE_PORT_1_STATUS	    (1 << 1)
++
++#define DP_ADJUST_REQUEST_LANE0_1	    0x206
++#define DP_ADJUST_REQUEST_LANE2_3	    0x207
++# define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK  0x03
++# define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0
++# define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK   0x0c
++# define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT  2
++# define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK  0x30
++# define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4
++# define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK   0xc0
++# define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT  6
++
++#define DP_TEST_REQUEST			    0x218
++# define DP_TEST_LINK_TRAINING		    (1 << 0)
++# define DP_TEST_LINK_PATTERN		    (1 << 1)
++# define DP_TEST_LINK_EDID_READ		    (1 << 2)
++# define DP_TEST_LINK_PHY_TEST_PATTERN	    (1 << 3) /* DPCD >= 1.1 */
++
++#define DP_TEST_LINK_RATE		    0x219
++# define DP_LINK_RATE_162		    (0x6)
++# define DP_LINK_RATE_27		    (0xa)
++
++#define DP_TEST_LANE_COUNT		    0x220
++
++#define DP_TEST_PATTERN			    0x221
++
++#define DP_TEST_RESPONSE		    0x260
++# define DP_TEST_ACK			    (1 << 0)
++# define DP_TEST_NAK			    (1 << 1)
++# define DP_TEST_EDID_CHECKSUM_WRITE	    (1 << 2)
++
++#define DP_SET_POWER                        0x600
++# define DP_SET_POWER_D0                    0x1
++# define DP_SET_POWER_D3                    0x2
++
++#define DP_PSR_ERROR_STATUS                 0x2006
++# define DP_PSR_LINK_CRC_ERROR              (1 << 0)
++# define DP_PSR_RFB_STORAGE_ERROR           (1 << 1)
++
++#define DP_PSR_ESI                          0x2007
++# define DP_PSR_CAPS_CHANGE                 (1 << 0)
++
++#define DP_PSR_STATUS                       0x2008
++# define DP_PSR_SINK_INACTIVE               0
++# define DP_PSR_SINK_ACTIVE_SRC_SYNCED      1
++# define DP_PSR_SINK_ACTIVE_RFB             2
++# define DP_PSR_SINK_ACTIVE_SINK_SYNCED     3
++# define DP_PSR_SINK_ACTIVE_RESYNC          4
++# define DP_PSR_SINK_INTERNAL_ERROR         7
++# define DP_PSR_SINK_STATE_MASK             0x07
++
++#define MODE_I2C_START	1
++#define MODE_I2C_WRITE	2
++#define MODE_I2C_READ	4
++#define MODE_I2C_STOP	8
++
++struct iic_dp_aux_data {
++	bool running;
++	u16 address;
++	void *priv;
++	int (*aux_ch)(device_t adapter, int mode, uint8_t write_byte,
++	    uint8_t *read_byte);
++	device_t port;
++};
++
++int iic_dp_aux_add_bus(device_t dev, const char *name,
++    int (*ch)(device_t idev, int mode, uint8_t write_byte, uint8_t *read_byte),
++    void *priv, device_t *bus, device_t *adapter);
++
++#endif /* _DRM_DP_HELPER_H_ */
+diff --git a/sys/dev/drm/drm_dp_iic_helper.c b/sys/dev/drm/drm_dp_iic_helper.c
+new file mode 100644
+index 0000000..79c9042
+--- /dev/null
++++ sys/dev/drm/drm_dp_iic_helper.c
+@@ -0,0 +1,289 @@
++/*
++ * Copyright © 2009 Keith Packard
 + *
-+ * Destination keying:
-+ * Primary plane pixels that match the min value, masked against the color
-+ * channels specified in the mask field, will be replaced by corresponding
-+ * pixels from the sprite plane.
++ * Permission to use, copy, modify, distribute, and sell this software and its
++ * documentation for any purpose is hereby granted without fee, provided that
++ * the above copyright notice appear in all copies and that both that copyright
++ * notice and this permission notice appear in supporting documentation, and
++ * that the name of the copyright holders not be used in advertising or
++ * publicity pertaining to distribution of the software without specific,
++ * written prior permission.  The copyright holders make no representations
++ * about the suitability of this software for any purpose.  It is provided "as
++ * is" without express or implied warranty.
 + *
-+ * Note that source & destination keying are exclusive; only one can be
-+ * active on a given plane.
++ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
++ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
++ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
++ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
++ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
++ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
++ * OF THIS SOFTWARE.
 + */
 +
-+#define I915_SET_COLORKEY_NONE		(1<<0) /* disable color key matching */
-+#define I915_SET_COLORKEY_DESTINATION	(1<<1)
-+#define I915_SET_COLORKEY_SOURCE	(1<<2)
-+struct drm_intel_sprite_colorkey {
-+	uint32_t plane_id;
-+	uint32_t min_value;
-+	uint32_t channel_mask;
-+	uint32_t max_value;
-+	uint32_t flags;
++#include <sys/types.h>
++#include <sys/kobj.h>
++#include <sys/bus.h>
++#include <dev/iicbus/iic.h>
++#include "iicbus_if.h"
++#include <dev/iicbus/iiconf.h>
++#include "dev/drm/drmP.h"
++#include "dev/drm/drm_dp_helper.h"
++
++static int
++iic_dp_aux_transaction(device_t idev, int mode, uint8_t write_byte,
++    uint8_t *read_byte)
++{
++	struct iic_dp_aux_data *aux_data;
++	int ret;
++
++	aux_data = device_get_softc(idev);
++	ret = (*aux_data->aux_ch)(idev, mode, write_byte, read_byte);
++	return (ret);
++}
++
++/*
++ * I2C over AUX CH
++ */
++
++/*
++ * Send the address. If the I2C link is running, this 'restarts'
++ * the connection with the new address, this is used for doing
++ * a write followed by a read (as needed for DDC)
++ */
++static int
++iic_dp_aux_address(device_t idev, u16 address, bool reading)
++{
++	struct iic_dp_aux_data *aux_data;
++	int mode, ret;
++
++	aux_data = device_get_softc(idev);
++	mode = MODE_I2C_START;
++	if (reading)
++		mode |= MODE_I2C_READ;
++	else
++		mode |= MODE_I2C_WRITE;
++	aux_data->address = address;
++	aux_data->running = true;
++	ret = iic_dp_aux_transaction(idev, mode, 0, NULL);
++	return (ret);
++}
++
++/*
++ * Stop the I2C transaction. This closes out the link, sending
++ * a bare address packet with the MOT bit turned off
++ */
++static void
++iic_dp_aux_stop(device_t idev, bool reading)
++{
++	struct iic_dp_aux_data *aux_data;
++	int mode;
++
++	aux_data = device_get_softc(idev);
++	mode = MODE_I2C_STOP;
++	if (reading)
++		mode |= MODE_I2C_READ;
++	else
++		mode |= MODE_I2C_WRITE;
++	if (aux_data->running) {
++		(void)iic_dp_aux_transaction(idev, mode, 0, NULL);
++		aux_data->running = false;
++	}
++}
++
++/*
++ * Write a single byte to the current I2C address, the
++ * the I2C link must be running or this returns -EIO
++ */
++static int
++iic_dp_aux_put_byte(device_t idev, u8 byte)
++{
++	struct iic_dp_aux_data *aux_data;
++	int ret;
++
++	aux_data = device_get_softc(idev);
++
++	if (!aux_data->running)
++		return (EIO);
++
++	ret = iic_dp_aux_transaction(idev, MODE_I2C_WRITE, byte, NULL);
++	return (ret);
++}
++
++/*
++ * Read a single byte from the current I2C address, the
++ * I2C link must be running or this returns -EIO
++ */
++static int
++iic_dp_aux_get_byte(device_t idev, u8 *byte_ret)
++{
++	struct iic_dp_aux_data *aux_data;
++	int ret;
++
++	aux_data = device_get_softc(idev);
++
++	if (!aux_data->running)
++		return (EIO);
++
++	ret = iic_dp_aux_transaction(idev, MODE_I2C_READ, 0, byte_ret);
++	return (ret);
++}
++
++static int
++iic_dp_aux_xfer(device_t idev, struct iic_msg *msgs, uint32_t num)
++{
++	u8 *buf;
++	int b, m, ret;
++	u16 len;
++	bool reading;
++
++	ret = 0;
++	reading = false;
++
++	for (m = 0; m < num; m++) {
++		len = msgs[m].len;
++		buf = msgs[m].buf;
++		reading = (msgs[m].flags & IIC_M_RD) != 0;
++		ret = iic_dp_aux_address(idev, msgs[m].slave, reading);
++		if (ret != 0)
++			break;
++		if (reading) {
++			for (b = 0; b < len; b++) {
++				ret = iic_dp_aux_get_byte(idev, &buf[b]);
++				if (ret != 0)
++					break;
++			}
++		} else {
++			for (b = 0; b < len; b++) {
++				ret = iic_dp_aux_put_byte(idev, buf[b]);
++				if (ret != 0)
++					break;
++			}
++		}
++		if (ret != 0)
++			break;
++	}
++	iic_dp_aux_stop(idev, reading);
++	DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
++	return (ret);
++}
++
++static void
++iic_dp_aux_reset_bus(device_t idev)
++{
++
++	(void)iic_dp_aux_address(idev, 0, false);
++	(void)iic_dp_aux_stop(idev, false);
++}
++
++static int
++iic_dp_aux_reset(device_t idev, u_char speed, u_char addr, u_char *oldaddr)
++{
++
++	iic_dp_aux_reset_bus(idev);
++	return (0);				   
++}
++
++static int
++iic_dp_aux_prepare_bus(device_t idev)
++{
++
++	/* adapter->retries = 3; */
++	iic_dp_aux_reset_bus(idev);
++	return (0);
++}
++
++static int
++iic_dp_aux_probe(device_t idev)
++{
++
++	return (BUS_PROBE_DEFAULT);
++}
++
++static int
++iic_dp_aux_attach(device_t idev)
++{
++	struct iic_dp_aux_data *aux_data;
++
++	aux_data = device_get_softc(idev);
++	aux_data->port = device_add_child(idev, "iicbus", -1);
++	if (aux_data->port == NULL)
++		return (ENXIO);
++	device_quiet(aux_data->port);
++	bus_generic_attach(idev);
++	return (0);
++}
++
++static int
++iic_dp_aux_detach(device_t idev)
++{
++	struct iic_dp_aux_data *aux_data;
++	device_t port;
++
++	aux_data = device_get_softc(idev);
++
++	port = aux_data->port;
++	bus_generic_detach(idev);
++	if (port != NULL)
++		device_delete_child(idev, port);
++
++	return (0);
++}
++
++int
++iic_dp_aux_add_bus(device_t dev, const char *name,
++    int (*ch)(device_t idev, int mode, uint8_t write_byte, uint8_t *read_byte),
++    void *priv, device_t *bus, device_t *adapter)
++{
++	device_t ibus;
++	struct iic_dp_aux_data *data;
++	int idx, error;
++	static int dp_bus_counter;
++
++	mtx_lock(&Giant);
++
++	idx = atomic_fetchadd_int(&dp_bus_counter, 1);
++	ibus = device_add_child(dev, "drm_iic_dp_aux", idx);
++	if (ibus == NULL) {
++		mtx_unlock(&Giant);
++		DRM_ERROR("drm_iic_dp_aux bus %d creation error\n", idx);
++		return (-ENXIO);
++	}
++	device_quiet(ibus);
++	error = device_probe_and_attach(ibus);
++	if (error != 0) {
++		device_delete_child(dev, ibus);
++		mtx_unlock(&Giant);
++		DRM_ERROR("drm_iic_dp_aux bus %d attach failed, %d\n",
++		    idx, error);
++		return (-error);
++	}
++	data = device_get_softc(ibus);
++	data->running = false;
++	data->address = 0;
++	data->aux_ch = ch;
++	data->priv = priv;
++	error = iic_dp_aux_prepare_bus(ibus);
++	if (error == 0) {
++		*bus = ibus;
++		*adapter = data->port;
++	}
++	mtx_unlock(&Giant);
++	return (error);
++}
++
++static device_method_t drm_iic_dp_aux_methods[] = {
++	DEVMETHOD(device_probe,		iic_dp_aux_probe),
++	DEVMETHOD(device_attach,	iic_dp_aux_attach),
++	DEVMETHOD(device_detach,	iic_dp_aux_detach),
++	DEVMETHOD(iicbus_reset,		iic_dp_aux_reset),
++	DEVMETHOD(iicbus_transfer,	iic_dp_aux_xfer),
++	DEVMETHOD_END
 +};
++static driver_t drm_iic_dp_aux_driver = {
++	"drm_iic_dp_aux",
++	drm_iic_dp_aux_methods,
++	sizeof(struct iic_dp_aux_data)
++};
++static devclass_t drm_iic_dp_aux_devclass;
++DRIVER_MODULE_ORDERED(drm_iic_dp_aux, drm, drm_iic_dp_aux_driver,
++    drm_iic_dp_aux_devclass, 0, 0, SI_ORDER_SECOND);
+diff --git a/sys/dev/drm/drm_drv.c b/sys/dev/drm/drm_drv.c
+index 75902a7..6fd32d5 100644
+--- sys/dev/drm/drm_drv.c
++++ sys/dev/drm/drm_drv.c
+@@ -41,22 +41,47 @@ __FBSDID("$FreeBSD$");
+ #include "dev/drm/drmP.h"
+ #include "dev/drm/drm.h"
+ #include "dev/drm/drm_sarea.h"
++#include "dev/drm/drm_mode.h"
+ 
+ #ifdef DRM_DEBUG_DEFAULT_ON
+ int drm_debug_flag = 1;
+ #else
+-int drm_debug_flag = 0;
++int drm_debug_flag = 2;
+ #endif
++int drm_notyet_flag = 0;
 +
- #endif				/* _I915_DRM_H_ */
-Index: sys/dev/drm/drm_edid.c
-===================================================================
-diff --git sys/dev/drm/drm_edid.c sys/dev/drm/drm_edid.c
-new file mode 10644
---- /dev/null	(revision 0)
-+++ sys/dev/drm/drm_edid.c	(working copy)
++unsigned int drm_vblank_offdelay = 5000;    /* Default to 5000 msecs. */
++unsigned int drm_timestamp_precision = 20;  /* Default to 20 usecs. */
+ 
+ static int drm_load(struct drm_device *dev);
+ static void drm_unload(struct drm_device *dev);
+ static drm_pci_id_list_t *drm_find_description(int vendor, int device,
+     drm_pci_id_list_t *idlist);
+ 
++static int
++drm_modevent(module_t mod, int type, void *data)
++{
++
++	switch (type) {
++	case MOD_LOAD:
++		TUNABLE_INT_FETCH("drm.debug", &drm_debug_flag);
++		TUNABLE_INT_FETCH("drm.notyet", &drm_notyet_flag);
++		break;
++	}
++	return (0);
++}
++
++static moduledata_t drm_mod = {
++	"drm",
++	drm_modevent,
++	0
++}; 
++DECLARE_MODULE(drm, drm_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
+ MODULE_VERSION(drm, 1);
+ MODULE_DEPEND(drm, agp, 1, 1, 1);
+ MODULE_DEPEND(drm, pci, 1, 1, 1);
+ MODULE_DEPEND(drm, mem, 1, 1, 1);
++MODULE_DEPEND(drm, iicbus, 1, 1, 1);
+ 
+ static drm_ioctl_desc_t		  drm_ioctls[256] = {
+ 	DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
+@@ -79,6 +104,9 @@ static drm_ioctl_desc_t		  drm_ioctls[256] = {
+ 	DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ 	DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
+ 
++	DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
++	DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
++
+ 	DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
+ 	DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ 	DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+@@ -115,9 +143,39 @@ static drm_ioctl_desc_t		  drm_ioctls[256] = {
+ 
+ 	DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ 	DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+-	DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
++	DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
+ 	DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
+ 	DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_draw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++
++	DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
++
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ };
+ 
+ static struct cdevsw drm_cdevsw = {
+@@ -127,6 +185,7 @@ static struct cdevsw drm_cdevsw = {
+ 	.d_ioctl =	drm_ioctl,
+ 	.d_poll =	drm_poll,
+ 	.d_mmap =	drm_mmap,
++	.d_mmap_single = drm_gem_mmap_single,
+ 	.d_name =	"drm",
+ 	.d_flags =	D_TRACKCLOSE
+ };
+@@ -162,19 +221,9 @@ int drm_probe(device_t kdev, drm_pci_id_list_t *idlist)
+ {
+ 	drm_pci_id_list_t *id_entry;
+ 	int vendor, device;
+-#if __FreeBSD_version < 700010
+-	device_t realdev;
+-
+-	if (!strcmp(device_get_name(kdev), "drmsub"))
+-		realdev = device_get_parent(kdev);
+-	else
+-		realdev = kdev;
+-	vendor = pci_get_vendor(realdev);
+-	device = pci_get_device(realdev);
+-#else
++
+ 	vendor = pci_get_vendor(kdev);
+ 	device = pci_get_device(kdev);
+-#endif
+ 
+ 	if (pci_get_class(kdev) != PCIC_DISPLAY
+ 	    || pci_get_subclass(kdev) != PCIS_DISPLAY_VGA)
+@@ -201,14 +250,7 @@ int drm_attach(device_t kdev, drm_pci_id_list_t *idlist)
+ 	unit = device_get_unit(kdev);
+ 	dev = device_get_softc(kdev);
+ 
+-#if __FreeBSD_version < 700010
+-	if (!strcmp(device_get_name(kdev), "drmsub"))
+-		dev->device = device_get_parent(kdev);
+-	else
+-		dev->device = kdev;
+-#else
+ 	dev->device = kdev;
+-#endif
+ 	dev->devnode = make_dev(&drm_cdevsw,
+ 			0,
+ 			DRM_DEV_UID,
+@@ -217,11 +259,7 @@ int drm_attach(device_t kdev, drm_pci_id_list_t *idlist)
+ 			"dri/card%d", unit);
+ 	dev->devnode->si_drv1 = dev;
+ 
+-#if __FreeBSD_version >= 700053
+ 	dev->pci_domain = pci_get_domain(dev->device);
+-#else
+-	dev->pci_domain = 0;
+-#endif
+ 	dev->pci_bus = pci_get_bus(dev->device);
+ 	dev->pci_slot = pci_get_slot(dev->device);
+ 	dev->pci_func = pci_get_function(dev->device);
+@@ -258,6 +296,8 @@ int drm_attach(device_t kdev, drm_pci_id_list_t *idlist)
+ 	mtx_init(&dev->irq_lock, "drmirq", NULL, MTX_DEF);
+ 	mtx_init(&dev->vbl_lock, "drmvbl", NULL, MTX_DEF);
+ 	mtx_init(&dev->drw_lock, "drmdrw", NULL, MTX_DEF);
++	mtx_init(&dev->event_lock, "drmev", NULL, MTX_DEF);
++	sx_init(&dev->dev_struct_lock, "drmslk");
+ 
+ 	id_entry = drm_find_description(dev->pci_vendor,
+ 	    dev->pci_device, idlist);
+@@ -313,7 +353,7 @@ static int drm_firstopen(struct drm_device *dev)
+ 	drm_local_map_t *map;
+ 	int i;
+ 
+-	DRM_SPINLOCK_ASSERT(&dev->dev_lock);
++	DRM_LOCK_ASSERT(dev);
+ 
+ 	/* prebuild the SAREA */
+ 	i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
+@@ -338,7 +378,8 @@ static int drm_firstopen(struct drm_device *dev)
+ 	}
+ 
+ 	dev->lock.lock_queue = 0;
+-	dev->irq_enabled = 0;
++	if (!drm_core_check_feature(dev, DRIVER_MODESET))
++		dev->irq_enabled = 0;
+ 	dev->context_flag = 0;
+ 	dev->last_context = 0;
+ 	dev->if_version = 0;
+@@ -356,14 +397,14 @@ static int drm_lastclose(struct drm_device *dev)
+ 	drm_local_map_t *map, *mapsave;
+ 	int i;
+ 
+-	DRM_SPINLOCK_ASSERT(&dev->dev_lock);
++	DRM_LOCK_ASSERT(dev);
+ 
+ 	DRM_DEBUG("\n");
+ 
+ 	if (dev->driver->lastclose != NULL)
+ 		dev->driver->lastclose(dev);
+ 
+-	if (dev->irq_enabled)
++	if (!drm_core_check_feature(dev, DRIVER_MODESET) && dev->irq_enabled)
+ 		drm_irq_uninstall(dev);
+ 
+ 	if (dev->unique) {
+@@ -456,17 +497,7 @@ static int drm_load(struct drm_device *dev)
+ 	for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++)
+ 		atomic_set(&dev->counts[i], 0);
+ 
+-	if (dev->driver->load != NULL) {
+-		DRM_LOCK();
+-		/* Shared code returns -errno. */
+-		retcode = -dev->driver->load(dev,
+-		    dev->id_entry->driver_private);
+-		if (pci_enable_busmaster(dev->device))
+-			DRM_ERROR("Request to enable bus-master failed.\n");
+-		DRM_UNLOCK();
+-		if (retcode != 0)
+-			goto error;
+-	}
++	INIT_LIST_HEAD(&dev->vblank_event_list);
+ 
+ 	if (drm_core_has_AGP(dev)) {
+ 		if (drm_device_is_agp(dev))
+@@ -494,9 +525,31 @@ static int drm_load(struct drm_device *dev)
+ 	dev->drw_unrhdr = new_unrhdr(1, INT_MAX, NULL);
+ 	if (dev->drw_unrhdr == NULL) {
+ 		DRM_ERROR("Couldn't allocate drawable number allocator\n");
++		retcode = ENOMEM;
+ 		goto error;
+ 	}
+ 
++	if (dev->driver->driver_features & DRIVER_GEM) {
++		retcode = drm_gem_init(dev);
++		if (retcode != 0) {
++			DRM_ERROR("Cannot initialize graphics execution "
++				  "manager (GEM)\n");
++			goto error1;
++		}
++	}
++
++	if (dev->driver->load != NULL) {
++		DRM_LOCK();
++		/* Shared code returns -errno. */
++		retcode = -dev->driver->load(dev,
++		    dev->id_entry->driver_private);
++		if (pci_enable_busmaster(dev->device))
++			DRM_ERROR("Request to enable bus-master failed.\n");
++		DRM_UNLOCK();
++		if (retcode != 0)
++			goto error;
++	}
++
+ 	DRM_INFO("Initialized %s %d.%d.%d %s\n",
+ 	    dev->driver->name,
+ 	    dev->driver->major,
+@@ -506,6 +559,8 @@ static int drm_load(struct drm_device *dev)
+ 
+ 	return 0;
+ 
++error1:
++	delete_unrhdr(dev->drw_unrhdr);
+ error:
+ 	drm_sysctl_cleanup(dev);
+ 	DRM_LOCK();
+@@ -517,6 +572,8 @@ error:
+ 	mtx_destroy(&dev->vbl_lock);
+ 	mtx_destroy(&dev->irq_lock);
+ 	mtx_destroy(&dev->dev_lock);
++	mtx_destroy(&dev->event_lock);
++	sx_destroy(&dev->dev_struct_lock);
+ 
+ 	return retcode;
+ }
+@@ -532,6 +589,9 @@ static void drm_unload(struct drm_device *dev)
+ 
+ 	drm_ctxbitmap_cleanup(dev);
+ 
++	if (dev->driver->driver_features & DRIVER_GEM)
++		drm_gem_destroy(dev);
++
+ 	if (dev->agp && dev->agp->mtrr) {
+ 		int __unused retcode;
+ 
+@@ -582,6 +642,8 @@ static void drm_unload(struct drm_device *dev)
+ 	mtx_destroy(&dev->vbl_lock);
+ 	mtx_destroy(&dev->irq_lock);
+ 	mtx_destroy(&dev->dev_lock);
++	mtx_destroy(&dev->event_lock);
++	sx_destroy(&dev->dev_struct_lock);
+ }
+ 
+ int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv)
+@@ -652,6 +714,9 @@ void drm_close(void *data)
+ 	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
+ 	    DRM_CURRENTPID, (long)dev->device, dev->open_count);
+ 
++	if (dev->driver->driver_features & DRIVER_GEM)
++		drm_gem_release(dev, file_priv);
++
+ 	if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
+ 	    && dev->lock.file_priv == file_priv) {
+ 		DRM_DEBUG("Process %d dead, freeing lock for context %d\n",
+@@ -683,8 +748,8 @@ void drm_close(void *data)
+ 				break;	/* Got lock */
+ 			}
+ 			/* Contention */
+-			retcode = mtx_sleep((void *)&dev->lock.lock_queue,
+-			    &dev->dev_lock, PCATCH, "drmlk2", 0);
++			retcode = DRM_LOCK_SLEEP(dev, &dev->lock.lock_queue,
++			    PCATCH, "drmlk2", 0);
+ 			if (retcode)
+ 				break;
+ 		}
+@@ -699,6 +764,7 @@ void drm_close(void *data)
+ 		drm_reclaim_buffers(dev, file_priv);
+ 
+ 	funsetown(&dev->buf_sigio);
++	seldrain(&file_priv->event_poll);
+ 
+ 	if (dev->driver->postclose != NULL)
+ 		dev->driver->postclose(dev, file_priv);
+@@ -788,16 +854,25 @@ int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags,
+ 		return EACCES;
+ 
+ 	if (is_driver_ioctl) {
+-		DRM_LOCK();
++		if ((ioctl->flags & DRM_UNLOCKED) == 0)
++			DRM_LOCK();
+ 		/* shared code returns -errno */
+ 		retcode = -func(dev, data, file_priv);
+-		DRM_UNLOCK();
++		if ((ioctl->flags & DRM_UNLOCKED) == 0)
++			DRM_UNLOCK();
+ 	} else {
+ 		retcode = func(dev, data, file_priv);
+ 	}
+ 
+ 	if (retcode != 0)
+ 		DRM_DEBUG("    returning %d\n", retcode);
++	if (retcode != 0 &&
++	    (drm_debug_flag & DRM_DEBUGBITS_FAILED_IOCTL) != 0) {
++		printf(
++"pid %d, cmd 0x%02lx, nr 0x%02x/%1d, dev 0x%lx, auth %d, res %d\n",
++		    DRM_CURRENTPID, cmd, nr, is_driver_ioctl, (long)dev->device,
++		    file_priv->authenticated, retcode);
++	}
+ 
+ 	return retcode;
+ }
+@@ -806,7 +881,7 @@ drm_local_map_t *drm_getsarea(struct drm_device *dev)
+ {
+ 	drm_local_map_t *map;
+ 
+-	DRM_SPINLOCK_ASSERT(&dev->dev_lock);
++	DRM_LOCK_ASSERT(dev);
+ 	TAILQ_FOREACH(map, &dev->maplist, link) {
+ 		if (map->type == _DRM_SHM && (map->flags & _DRM_CONTAINS_LOCK))
+ 			return map;
+@@ -815,6 +890,131 @@ drm_local_map_t *drm_getsarea(struct drm_device *dev)
+ 	return NULL;
+ }
+ 
++void
++drm_device_lock_mtx(struct drm_device *dev)
++{
++
++	mtx_lock(&dev->dev_lock);
++}
++
++void
++drm_device_unlock_mtx(struct drm_device *dev)
++{
++
++	mtx_unlock(&dev->dev_lock);
++}
++
++int
++drm_device_sleep_mtx(struct drm_device *dev, void *chan, int flags,
++    const char *msg, int timeout)
++{
++
++	return (msleep(chan, &dev->dev_lock, flags, msg, timeout));
++}
++
++void
++drm_device_assert_mtx_locked(struct drm_device *dev)
++{
++
++	mtx_assert(&dev->dev_lock, MA_OWNED);
++}
++
++void
++drm_device_assert_mtx_unlocked(struct drm_device *dev)
++{
++
++	mtx_assert(&dev->dev_lock, MA_NOTOWNED);
++}
++
++void
++drm_device_lock_struct(struct drm_device *dev)
++{
++
++	sx_xlock(&dev->dev_struct_lock);
++}
++
++void
++drm_device_unlock_struct(struct drm_device *dev)
++{
++
++	sx_xunlock(&dev->dev_struct_lock);
++}
++
++int
++drm_device_sleep_struct(struct drm_device *dev, void *chan, int flags,
++    const char *msg, int timeout)
++{
++
++	return (sx_sleep(chan, &dev->dev_struct_lock, flags, msg, timeout));
++}
++
++void
++drm_device_assert_struct_locked(struct drm_device *dev)
++{
++
++	sx_assert(&dev->dev_struct_lock, SA_XLOCKED);
++}
++
++void
++drm_device_assert_struct_unlocked(struct drm_device *dev)
++{
++
++	sx_assert(&dev->dev_struct_lock, SA_UNLOCKED);
++}
++
++static void
++drm_device_assert_struct_nonsleepable_unlocked(struct drm_device *dev)
++{
++}
++
++void
++drm_compat_locking_init(struct drm_device *dev)
++{
++
++	dev->driver->device_lock = drm_device_lock_mtx;
++	dev->driver->device_unlock = drm_device_unlock_mtx;
++	dev->driver->device_lock_sleep = drm_device_sleep_mtx;
++	dev->driver->device_lock_assert = drm_device_assert_mtx_locked;
++	dev->driver->device_unlock_assert = drm_device_assert_mtx_unlocked;
++	dev->driver->device_nonsleepable_unlock_assert =
++	    drm_device_assert_mtx_unlocked;
++}
++
++void
++drm_sleep_locking_init(struct drm_device *dev)
++{
++
++	dev->driver->device_lock = drm_device_lock_struct;
++	dev->driver->device_unlock = drm_device_unlock_struct;
++	dev->driver->device_lock_sleep = drm_device_sleep_struct;
++	dev->driver->device_lock_assert = drm_device_assert_struct_locked;
++	dev->driver->device_unlock_assert = drm_device_assert_struct_unlocked;
++	dev->driver->device_nonsleepable_unlock_assert =
++	    drm_device_assert_struct_nonsleepable_unlocked;
++}
++
++int
++drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx,
++    struct sysctl_oid *top)
++{
++	struct sysctl_oid *oid;
++
++	snprintf(dev->busid_str, sizeof(dev->busid_str),
++	     "pci:%04x:%02x:%02x.%d", dev->pci_domain, dev->pci_bus,
++	     dev->pci_slot, dev->pci_func);
++	oid = SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "busid",
++	    CTLFLAG_RD, dev->busid_str, 0, NULL);
++	if (oid == NULL)
++		return (ENOMEM);
++	dev->modesetting = (dev->driver->driver_features & DRIVER_MODESET) != 0;
++	oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
++	    "modesetting", CTLFLAG_RD, &dev->modesetting, 0, NULL);
++	if (oid == NULL)
++		return (ENOMEM);
++
++	return (0);
++}
++
+ #if DRM_LINUX
+ 
+ #include <sys/sysproto.h>
+diff --git a/sys/dev/drm/drm_edid.c b/sys/dev/drm/drm_edid.c
+new file mode 100644
+index 0000000..0ee0647
+--- /dev/null
++++ sys/dev/drm/drm_edid.c
 @@ -0,0 +1,1776 @@
 +/*
 + * Copyright (c) 2006 Luc Verhaegen (quirks list)
@@ -6862,24 +12712,11 @@
 +	}
 +	return num_modes;
 +}
-
-Property changes on: stable/9/sys/dev/drm/drm_edid.c
-___________________________________________________________________
-Added: svn:mime-type
-## -0,0 +1 ##
-+text/plain
-Added: svn:keywords
-## -0,0 +1 ##
-+FreeBSD=%H
-Added: svn:eol-style
-## -0,0 +1 ##
-+native
-Index: sys/dev/drm/drm_edid.h
-===================================================================
-diff --git sys/dev/drm/drm_edid.h sys/dev/drm/drm_edid.h
-new file mode 10644
---- /dev/null	(revision 0)
-+++ sys/dev/drm/drm_edid.h	(working copy)
+diff --git a/sys/dev/drm/drm_edid.h b/sys/dev/drm/drm_edid.h
+new file mode 100644
+index 0000000..dc810bb
+--- /dev/null
++++ sys/dev/drm/drm_edid.h
 @@ -0,0 +1,242 @@
 +/*
 + * Copyright © 2007-2008 Intel Corporation
@@ -7123,436 +12960,2005 @@
 +				     struct drm_display_mode *mode);
 +
 +#endif /* __DRM_EDID_H__ */
-
-Property changes on: stable/9/sys/dev/drm/drm_edid.h
-___________________________________________________________________
-Added: svn:mime-type
-## -0,0 +1 ##
-+text/plain
-Added: svn:keywords
-## -0,0 +1 ##
-+FreeBSD=%H
-Added: svn:eol-style
-## -0,0 +1 ##
-+native
-Index: sys/dev/drm/drmP.h
-===================================================================
-diff --git sys/dev/drm/drmP.h sys/dev/drm/drmP.h
---- sys/dev/drm/drmP.h	(revision 230124)
-+++ sys/dev/drm/drmP.h	(working copy)
-@@ -46,24 +46,25 @@
- #include <sys/queue.h>
- #include <sys/malloc.h>
- #include <sys/kernel.h>
-+#include <sys/ktr.h>
- #include <sys/module.h>
- #include <sys/systm.h>
- #include <sys/conf.h>
- #include <sys/sglist.h>
- #include <sys/stat.h>
--#if __FreeBSD_version >= 700000
- #include <sys/priv.h>
--#endif
- #include <sys/proc.h>
- #include <sys/lock.h>
- #include <sys/fcntl.h>
- #include <sys/uio.h>
- #include <sys/filio.h>
-+#include <sys/selinfo.h>
- #include <sys/sysctl.h>
- #include <sys/bus.h>
- #include <sys/queue.h>
- #include <sys/signalvar.h>
- #include <sys/poll.h>
-+#include <sys/sbuf.h>
- #include <sys/taskqueue.h>
- #include <sys/tree.h>
- #include <vm/vm.h>
-@@ -88,11 +89,7 @@
- #include <sys/mman.h>
- #include <sys/rman.h>
- #include <sys/memrange.h>
--#if __FreeBSD_version >= 800004
- #include <dev/agp/agpvar.h>
--#else /* __FreeBSD_version >= 800004 */
--#include <pci/agpvar.h>
--#endif /* __FreeBSD_version >= 800004 */
- #include <sys/agpio.h>
- #include <sys/mutex.h>
- #include <dev/pci/pcivar.h>
-@@ -104,6 +101,9 @@
- #include "dev/drm/drm_atomic.h"
- #include "dev/drm/drm_internal.h"
- #include "dev/drm/drm_linux_list.h"
-+#include "dev/drm/drm_gem_names.h"
-+#include "dev/drm/drm_mm.h"
-+#include "dev/drm/drm_hashtab.h"
- 
- #include <opt_drm.h>
- #ifdef DRM_DEBUG
-@@ -111,18 +111,12 @@
- #define DRM_DEBUG_DEFAULT_ON 1
- #endif /* DRM_DEBUG */
- 
--#if defined(DRM_LINUX) && DRM_LINUX && !defined(__amd64__)
--#include <sys/file.h>
--#include <sys/proc.h>
--#include <machine/../linux/linux.h>
--#include <machine/../linux/linux_proto.h>
--#else
--/* Either it was defined when it shouldn't be (FreeBSD amd64) or it isn't
-- * supported on this OS yet.
-- */
-+#define	DRM_DEBUGBITS_DEBUG		0x1
-+#define	DRM_DEBUGBITS_KMS		0x2
-+#define	DRM_DEBUGBITS_FAILED_IOCTL	0x4
+diff --git a/sys/dev/drm/drm_edid_modes.h b/sys/dev/drm/drm_edid_modes.h
+new file mode 100644
+index 0000000..57caf35
+--- /dev/null
++++ sys/dev/drm/drm_edid_modes.h
+@@ -0,0 +1,379 @@
++/*
++ * Copyright (c) 2007-2008 Intel Corporation
++ *   Jesse Barnes <jesse.barnes at intel.com>
++ * Copyright 2010 Red Hat, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
 +
- #undef DRM_LINUX
- #define DRM_LINUX 0
--#endif
- 
- /* driver capabilities and requirements mask */
- #define DRIVER_USE_AGP     0x1
-@@ -132,13 +126,29 @@
- #define DRIVER_SG          0x10
- #define DRIVER_HAVE_DMA    0x20
- #define DRIVER_HAVE_IRQ    0x40
--#define DRIVER_DMA_QUEUE   0x100
-+#define DRIVER_IRQ_SHARED  0x80
-+#define DRIVER_IRQ_VBL     0x100
-+#define DRIVER_DMA_QUEUE   0x200
-+#define DRIVER_FB_DMA      0x400
-+#define DRIVER_IRQ_VBL2    0x800
-+#define DRIVER_GEM         0x1000
-+#define DRIVER_MODESET     0x2000
-+#define DRIVER_USE_PLATFORM_DEVICE  0x4000
-+#define	DRIVER_LOCKLESS_IRQ 0x8000
- 
- 
- #define DRM_HASH_SIZE	      16 /* Size of key hash table		  */
- #define DRM_KERNEL_CONTEXT    0	 /* Change drm_resctx if changed	  */
- #define DRM_RESERVED_CONTEXTS 1	 /* Change drm_resctx if changed	  */
- 
-+#define	DRM_GEM_MAPPING_MASK	(3ULL << 62)
-+#define	DRM_GEM_MAPPING_KEY	(2ULL << 62) /* Non-canonical address form */
-+#define	DRM_GEM_MAX_IDX		0x3fffff
-+#define	DRM_GEM_MAPPING_IDX(o)	(((o) >> 40) & DRM_GEM_MAX_IDX)
-+#define	DRM_GEM_MAPPING_OFF(i)	(((uint64_t)(i)) << 40)
-+#define	DRM_GEM_MAPPING_MAPOFF(o) \
-+    ((o) & ~(DRM_GEM_MAPPING_OFF(DRM_GEM_MAX_IDX) | DRM_GEM_MAPPING_KEY))
++#include "dev/drm/drmP.h"
++#include "dev/drm/drm_edid.h"
 +
- MALLOC_DECLARE(DRM_MEM_DMA);
- MALLOC_DECLARE(DRM_MEM_SAREA);
- MALLOC_DECLARE(DRM_MEM_DRIVER);
-@@ -159,6 +169,7 @@
- MALLOC_DECLARE(DRM_MEM_DRAWABLE);
- MALLOC_DECLARE(DRM_MEM_MM);
- MALLOC_DECLARE(DRM_MEM_HASHTAB);
-+MALLOC_DECLARE(DRM_MEM_KMS);
- 
- SYSCTL_DECL(_hw_drm);
- 
-@@ -196,8 +207,21 @@
- #define DRM_SPINUNLOCK_IRQRESTORE(u, irqflags) mtx_unlock(u)
- #define DRM_SPINLOCK_ASSERT(l)	mtx_assert(l, MA_OWNED)
- #define DRM_CURRENTPID		curthread->td_proc->p_pid
--#define DRM_LOCK()		mtx_lock(&dev->dev_lock)
--#define DRM_UNLOCK() 		mtx_unlock(&dev->dev_lock)
-+#define DRM_LOCK()		(dev)->driver->device_lock((dev))
-+#define DRM_UNLOCK() 		(dev)->driver->device_unlock((dev))
-+#define	DRM_LOCK_SLEEP(dev, chan, flags, msg, timeout)			\
-+    dev->driver->device_lock_sleep((dev), (chan), (flags), (msg), (timeout))
-+#if defined(INVARIANTS)
-+#define	DRM_LOCK_ASSERT(d)	(d)->driver->device_lock_assert((d))
-+#define	DRM_UNLOCK_ASSERT(d)	(d)->driver->device_unlock_assert((d))
-+#define	DRM_NONSLEEPABLE_UNLOCK_ASSERT(d) \
-+    (d)->driver->device_nonsleepable_unlock_assert((d))
-+#else
-+#define	DRM_LOCK_ASSERT(d)
-+#define	DRM_UNLOCK_ASSERT(d)
-+#define	DRM_NONSLEEPABLE_UNLOCK_ASSERT(d)
-+#endif
++/*
++ * Autogenerated from the DMT spec.
++ * This table is copied from xfree86/modes/xf86EdidModes.c.
++ * But the mode with Reduced blank feature is deleted.
++ */
++static struct drm_display_mode drm_dmt_modes[] = {
++	/* 640x350 at 85Hz */
++	{ DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
++		   736, 832, 0, 350, 382, 385, 445, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 640x400 at 85Hz */
++	{ DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
++		   736, 832, 0, 400, 401, 404, 445, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 720x400 at 85Hz */
++	{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756,
++		   828, 936, 0, 400, 401, 404, 446, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 640x480 at 60Hz */
++	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
++		   752, 800, 0, 480, 489, 492, 525, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 640x480 at 72Hz */
++	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
++		   704, 832, 0, 480, 489, 492, 520, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 640x480 at 75Hz */
++	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
++		   720, 840, 0, 480, 481, 484, 500, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 640x480 at 85Hz */
++	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696,
++		   752, 832, 0, 480, 481, 484, 509, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 800x600 at 56Hz */
++	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
++		   896, 1024, 0, 600, 601, 603, 625, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 800x600 at 60Hz */
++	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
++		   968, 1056, 0, 600, 601, 605, 628, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 800x600 at 72Hz */
++	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
++		   976, 1040, 0, 600, 637, 643, 666, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 800x600 at 75Hz */
++	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
++		   896, 1056, 0, 600, 601, 604, 625, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 800x600 at 85Hz */
++	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
++		   896, 1048, 0, 600, 601, 604, 631, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 848x480 at 60Hz */
++	{ DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
++		   976, 1088, 0, 480, 486, 494, 517, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1024x768 at 43Hz, interlace */
++	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
++		   1208, 1264, 0, 768, 768, 772, 817, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
++			DRM_MODE_FLAG_INTERLACE) },
++	/* 1024x768 at 60Hz */
++	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
++		   1184, 1344, 0, 768, 771, 777, 806, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 1024x768 at 70Hz */
++	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
++		   1184, 1328, 0, 768, 771, 777, 806, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 1024x768 at 75Hz */
++	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
++		   1136, 1312, 0, 768, 769, 772, 800, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1024x768 at 85Hz */
++	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
++		   1168, 1376, 0, 768, 769, 772, 808, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1152x864 at 75Hz */
++	{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
++		   1344, 1600, 0, 864, 865, 868, 900, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1280x768 at 60Hz */
++	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
++		   1472, 1664, 0, 768, 771, 778, 798, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1280x768 at 75Hz */
++	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360,
++		   1488, 1696, 0, 768, 771, 778, 805, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 1280x768 at 85Hz */
++	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
++		   1496, 1712, 0, 768, 771, 778, 809, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1280x800 at 60Hz */
++	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
++		   1480, 1680, 0, 800, 803, 809, 831, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 1280x800 at 75Hz */
++	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360,
++		   1488, 1696, 0, 800, 803, 809, 838, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1280x800 at 85Hz */
++	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
++		   1496, 1712, 0, 800, 803, 809, 843, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1280x960 at 60Hz */
++	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
++		   1488, 1800, 0, 960, 961, 964, 1000, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1280x960 at 85Hz */
++	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
++		   1504, 1728, 0, 960, 961, 964, 1011, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1280x1024 at 60Hz */
++	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
++		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1280x1024 at 75Hz */
++	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
++		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1280x1024 at 85Hz */
++	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
++		   1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1360x768 at 60Hz */
++	{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
++		   1536, 1792, 0, 768, 771, 777, 795, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1440x1050 at 60Hz */
++	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
++		   1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1440x1050 at 75Hz */
++	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
++		   1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1440x1050 at 85Hz */
++	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
++		   1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1440x900 at 60Hz */
++	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
++		   1672, 1904, 0, 900, 903, 909, 934, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1440x900 at 75Hz */
++	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536,
++		   1688, 1936, 0, 900, 903, 909, 942, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1440x900 at 85Hz */
++	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
++		   1696, 1952, 0, 900, 903, 909, 948, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1600x1200 at 60Hz */
++	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
++		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1600x1200 at 65Hz */
++	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664,
++		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1600x1200 at 70Hz */
++	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664,
++		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1600x1200 at 75Hz */
++	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664,
++		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1600x1200 at 85Hz */
++	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
++		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1680x1050 at 60Hz */
++	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
++		   1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1680x1050 at 75Hz */
++	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800,
++		   1976, 2272, 0, 1050, 1053, 1059, 1099, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1680x1050 at 85Hz */
++	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
++		   1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1792x1344 at 60Hz */
++	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
++		   2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1729x1344 at 75Hz */
++	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
++		   2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1853x1392 at 60Hz */
++	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
++		   2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1856x1392 at 75Hz */
++	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
++		   2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1920x1200 at 60Hz */
++	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
++		   2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1920x1200 at 75Hz */
++	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056,
++		   2264, 2608, 0, 1200, 1203, 1209, 1255, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1920x1200 at 85Hz */
++	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
++		   2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1920x1440 at 60Hz */
++	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
++		   2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1920x1440 at 75Hz */
++	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
++		   2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 2560x1600 at 60Hz */
++	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
++		   3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 2560x1600 at 75HZ */
++	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768,
++		   3048, 3536, 0, 1600, 1603, 1609, 1672, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 2560x1600 at 85HZ */
++	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
++		   3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++};
++static const int drm_num_dmt_modes =
++	sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
 +
- #define DRM_SYSCTL_HANDLER_ARGS	(SYSCTL_HANDLER_ARGS)
- 
- #define DRM_IRQ_ARGS		void *arg
-@@ -221,20 +245,25 @@
- 
- #define PAGE_ALIGN(addr) round_page(addr)
- /* DRM_SUSER returns true if the user is superuser */
--#if __FreeBSD_version >= 700000
- #define DRM_SUSER(p)		(priv_check(p, PRIV_DRIVER) == 0)
--#else
--#define DRM_SUSER(p)		(suser(p) == 0)
--#endif
- #define DRM_AGP_FIND_DEVICE()	agp_find_device()
- #define DRM_MTRR_WC		MDF_WRITECOMBINE
- #define jiffies			ticks
-+#define	jiffies_to_msecs(x)	(((int64_t)(x)) * 1000 / hz)
-+#define	msecs_to_jiffies(x)	(((int64_t)(x)) * hz / 1000)
-+#define	time_after(a,b)		((long)(b) - (long)(a) < 0)
-+#define	time_after_eq(a,b)	((long)(b) - (long)(a) <= 0)
-+#define drm_msleep(x, msg)	pause((msg), ((int64_t)(x)) * 1000 / hz)
- 
- typedef vm_paddr_t dma_addr_t;
--typedef u_int64_t u64;
--typedef u_int32_t u32;
--typedef u_int16_t u16;
--typedef u_int8_t u8;
-+typedef uint64_t u64;
-+typedef uint32_t u32;
-+typedef uint16_t u16;
-+typedef uint8_t u8;
-+typedef int64_t s64;
-+typedef int32_t s32;
-+typedef int16_t s16;
-+typedef int8_t s8;
- 
- /* DRM_READMEMORYBARRIER() prevents reordering of reads.
-  * DRM_WRITEMEMORYBARRIER() prevents reordering of writes.
-@@ -253,6 +282,9 @@
- #define DRM_READ32(map, offset)						\
- 	le32toh(*(volatile u_int32_t *)(((vm_offset_t)(map)->virtual) +	\
- 	    (vm_offset_t)(offset)))
-+#define DRM_READ64(map, offset)						\
-+	le64toh(*(volatile u_int64_t *)(((vm_offset_t)(map)->virtual) +	\
-+	    (vm_offset_t)(offset)))
- #define DRM_WRITE8(map, offset, val)					\
- 	*(volatile u_int8_t *)(((vm_offset_t)(map)->virtual) +		\
- 	    (vm_offset_t)(offset)) = val
-@@ -262,6 +294,9 @@
- #define DRM_WRITE32(map, offset, val)					\
- 	*(volatile u_int32_t *)(((vm_offset_t)(map)->virtual) +		\
- 	    (vm_offset_t)(offset)) = htole32(val)
-+#define DRM_WRITE64(map, offset, val)					\
-+	*(volatile u_int64_t *)(((vm_offset_t)(map)->virtual) +		\
-+	    (vm_offset_t)(offset)) = htole64(val)
- 
- #define DRM_VERIFYAREA_READ( uaddr, size )		\
- 	(!useracc(__DECONST(caddr_t, uaddr), size, VM_PROT_READ))
-@@ -317,11 +352,23 @@
- #define DRM_INFO(fmt, ...)  printf("info: [" DRM_NAME "] " fmt , ##__VA_ARGS__)
- 
- #define DRM_DEBUG(fmt, ...) do {					\
--	if (drm_debug_flag)						\
-+	if ((drm_debug_flag & DRM_DEBUGBITS_DEBUG) != 0)		\
- 		printf("[" DRM_NAME ":pid%d:%s] " fmt, DRM_CURRENTPID,	\
- 			__func__ , ##__VA_ARGS__);			\
- } while (0)
- 
-+#define DRM_DEBUG_KMS(fmt, ...) do {					\
-+	if ((drm_debug_flag & DRM_DEBUGBITS_KMS) != 0)			\
-+		printf("[" DRM_NAME ":KMS:pid%d:%s] " fmt, DRM_CURRENTPID,\
-+			__func__ , ##__VA_ARGS__);			\
-+} while (0)
++static struct drm_display_mode edid_est_modes[] = {
++	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
++		   968, 1056, 0, 600, 601, 605, 628, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600 at 60Hz */
++	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
++		   896, 1024, 0, 600, 601, 603,  625, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600 at 56Hz */
++	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
++		   720, 840, 0, 480, 481, 484, 500, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480 at 75Hz */
++	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
++		   704,  832, 0, 480, 489, 491, 520, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480 at 72Hz */
++	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
++		   768,  864, 0, 480, 483, 486, 525, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480 at 67Hz */
++	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
++		   752, 800, 0, 480, 490, 492, 525, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480 at 60Hz */
++	{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
++		   846, 900, 0, 400, 421, 423,  449, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400 at 88Hz */
++	{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
++		   846,  900, 0, 400, 412, 414, 449, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400 at 70Hz */
++	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
++		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024 at 75Hz */
++	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
++		   1136, 1312, 0,  768, 769, 772, 800, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768 at 75Hz */
++	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
++		   1184, 1328, 0,  768, 771, 777, 806, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768 at 70Hz */
++	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
++		   1184, 1344, 0,  768, 771, 777, 806, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768 at 60Hz */
++	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
++		   1208, 1264, 0, 768, 768, 776, 817, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768 at 43Hz */
++	{ DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
++		   928, 1152, 0, 624, 625, 628, 667, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624 at 75Hz */
++	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
++		   896, 1056, 0, 600, 601, 604,  625, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600 at 75Hz */
++	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
++		   976, 1040, 0, 600, 637, 643, 666, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600 at 72Hz */
++	{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
++		   1344, 1600, 0,  864, 865, 868, 900, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864 at 75Hz */
++};
 +
-+#define DRM_DEBUG_DRIVER(fmt, ...) do {					\
-+	if ((drm_debug_flag & DRM_DEBUGBITS_KMS) != 0)			\
-+		printf("[" DRM_NAME ":KMS:pid%d:%s] " fmt, DRM_CURRENTPID,\
-+			__func__ , ##__VA_ARGS__);			\
-+} while (0)
++static const struct {
++	short w;
++	short h;
++	short r;
++	short rb;
++} est3_modes[] = {
++	/* byte 6 */
++	{ 640, 350, 85, 0 },
++	{ 640, 400, 85, 0 },
++	{ 720, 400, 85, 0 },
++	{ 640, 480, 85, 0 },
++	{ 848, 480, 60, 0 },
++	{ 800, 600, 85, 0 },
++	{ 1024, 768, 85, 0 },
++	{ 1152, 864, 75, 0 },
++	/* byte 7 */
++	{ 1280, 768, 60, 1 },
++	{ 1280, 768, 60, 0 },
++	{ 1280, 768, 75, 0 },
++	{ 1280, 768, 85, 0 },
++	{ 1280, 960, 60, 0 },
++	{ 1280, 960, 85, 0 },
++	{ 1280, 1024, 60, 0 },
++	{ 1280, 1024, 85, 0 },
++	/* byte 8 */
++	{ 1360, 768, 60, 0 },
++	{ 1440, 900, 60, 1 },
++	{ 1440, 900, 60, 0 },
++	{ 1440, 900, 75, 0 },
++	{ 1440, 900, 85, 0 },
++	{ 1400, 1050, 60, 1 },
++	{ 1400, 1050, 60, 0 },
++	{ 1400, 1050, 75, 0 },
++	/* byte 9 */
++	{ 1400, 1050, 85, 0 },
++	{ 1680, 1050, 60, 1 },
++	{ 1680, 1050, 60, 0 },
++	{ 1680, 1050, 75, 0 },
++	{ 1680, 1050, 85, 0 },
++	{ 1600, 1200, 60, 0 },
++	{ 1600, 1200, 65, 0 },
++	{ 1600, 1200, 70, 0 },
++	/* byte 10 */
++	{ 1600, 1200, 75, 0 },
++	{ 1600, 1200, 85, 0 },
++	{ 1792, 1344, 60, 0 },
++	{ 1792, 1344, 85, 0 },
++	{ 1856, 1392, 60, 0 },
++	{ 1856, 1392, 75, 0 },
++	{ 1920, 1200, 60, 1 },
++	{ 1920, 1200, 60, 0 },
++	/* byte 11 */
++	{ 1920, 1200, 75, 0 },
++	{ 1920, 1200, 85, 0 },
++	{ 1920, 1440, 60, 0 },
++	{ 1920, 1440, 75, 0 },
++};
++static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]);
+diff --git a/sys/dev/drm/drm_fb_helper.c b/sys/dev/drm/drm_fb_helper.c
+new file mode 100644
+index 0000000..70661c5
+--- /dev/null
++++ sys/dev/drm/drm_fb_helper.c
+@@ -0,0 +1,1560 @@
++/*
++ * Copyright (c) 2006-2009 Red Hat Inc.
++ * Copyright (c) 2006-2008 Intel Corporation
++ * Copyright (c) 2007 Dave Airlie <airlied at linux.ie>
++ *
++ * DRM framebuffer helper functions
++ *
++ * Permission to use, copy, modify, distribute, and sell this software and its
++ * documentation for any purpose is hereby granted without fee, provided that
++ * the above copyright notice appear in all copies and that both that copyright
++ * notice and this permission notice appear in supporting documentation, and
++ * that the name of the copyright holders not be used in advertising or
++ * publicity pertaining to distribution of the software without specific,
++ * written prior permission.  The copyright holders make no representations
++ * about the suitability of this software for any purpose.  It is provided "as
++ * is" without express or implied warranty.
++ *
++ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
++ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
++ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
++ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
++ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
++ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
++ * OF THIS SOFTWARE.
++ *
++ * Authors:
++ *      Dave Airlie <airlied at linux.ie>
++ *      Jesse Barnes <jesse.barnes at intel.com>
++ */
++#include "dev/drm/drmP.h"
++#include "dev/drm/drm_crtc.h"
++#include "dev/drm/drm_fb_helper.h"
++#include "dev/drm/drm_crtc_helper.h"
 +
- typedef struct drm_pci_id_list
- {
- 	int vendor;
-@@ -339,6 +386,9 @@
- #define DRM_AUTH	0x1
- #define DRM_MASTER	0x2
- #define DRM_ROOT_ONLY	0x4
-+#define DRM_CONTROL_ALLOW 0x8
-+#define DRM_UNLOCKED	0x10
++static DRM_LIST_HEAD(kernel_fb_helper_list);
 +
- typedef struct drm_ioctl_desc {
- 	unsigned long cmd;
- 	int (*func)(struct drm_device *dev, void *data,
-@@ -415,6 +465,16 @@
- 	drm_freelist_t	  freelist;
- } drm_buf_entry_t;
- 
-+/* Event queued up for userspace to read */
-+struct drm_pending_event {
-+	struct drm_event *event;
-+	struct list_head link;
-+	struct drm_file *file_priv;
-+	pid_t pid; /* pid of requester, no guarantee it's valid by the time
-+		      we deliver the event, for tracing only */
-+	void (*destroy)(struct drm_pending_event *event);
-+};
++/* simple single crtc case helper function */
++int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
++{
++	struct drm_device *dev = fb_helper->dev;
++	struct drm_connector *connector;
 +
- typedef TAILQ_HEAD(drm_file_list, drm_file) drm_file_list_t;
- struct drm_file {
- 	TAILQ_ENTRY(drm_file) link;
-@@ -425,7 +485,18 @@
- 	uid_t		  uid;
- 	drm_magic_t	  magic;
- 	unsigned long	  ioctl_count;
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++		struct drm_fb_helper_connector *fb_helper_connector;
 +
- 	void		 *driver_priv;
-+	struct drm_gem_names object_names;
++		fb_helper_connector = malloc(
++		    sizeof(struct drm_fb_helper_connector), DRM_MEM_KMS,
++		    M_WAITOK | M_ZERO);
 +
-+	int		  is_master;
-+	struct drm_master *masterp;
++		fb_helper_connector->connector = connector;
++		fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
++	}
++	return 0;
++}
 +
-+	struct list_head  fbs;
++const char *fb_mode_option;
 +
-+	struct list_head  event_list;
-+	int		  event_space;
-+	struct selinfo	  event_poll;
- };
- 
- typedef struct drm_lock_data {
-@@ -519,6 +590,21 @@
- 	int inmodeset;			/* Display driver is setting mode */
- };
- 
-+/* Size of ringbuffer for vblank timestamps. Just double-buffer
-+ * in initial implementation.
++/**
++ * drm_fb_helper_connector_parse_command_line - parse command line for connector
++ * @connector - connector to parse line for
++ * @mode_option - per connector mode option
++ *
++ * This parses the connector specific then generic command lines for
++ * modes and options to configure the connector.
++ *
++ * This uses the same parameters as the fb modedb.c, except for extra
++ *	<xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
++ *
++ * enable/enable Digital/disable bit at the end
 + */
-+#define DRM_VBLANKTIME_RBSIZE 2
++static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_connector *fb_helper_conn,
++						       const char *mode_option)
++{
++	const char *name;
++	unsigned int namelen;
++	int res_specified = 0, bpp_specified = 0, refresh_specified = 0;
++	unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0;
++	int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0;
++	int i;
++	enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
++	struct drm_fb_helper_cmdline_mode *cmdline_mode;
++	struct drm_connector *connector;
 +
-+/* Flags and return codes for get_vblank_timestamp() driver function. */
-+#define DRM_CALLED_FROM_VBLIRQ 1
-+#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0)
-+#define DRM_VBLANKTIME_INVBL             (1 << 1)
++	if (!fb_helper_conn)
++		return false;
++	connector = fb_helper_conn->connector;
 +
-+/* get_scanout_position() return flags */
-+#define DRM_SCANOUTPOS_VALID        (1 << 0)
-+#define DRM_SCANOUTPOS_INVBL        (1 << 1)
-+#define DRM_SCANOUTPOS_ACCURATE     (1 << 2)
++	cmdline_mode = &fb_helper_conn->cmdline_mode;
++	if (!mode_option)
++		mode_option = fb_mode_option;
 +
- /* location of GART table */
- #define DRM_ATI_GART_MAIN 1
- #define DRM_ATI_GART_FB   2
-@@ -540,6 +626,67 @@
- 	struct drm_dma_handle *dmah; /* handle for ATI PCIGART table */
- };
- 
-+typedef vm_paddr_t resource_size_t;
++	if (!mode_option) {
++		cmdline_mode->specified = false;
++		return false;
++	}
 +
++	name = mode_option;
++	namelen = strlen(name);
++	for (i = namelen-1; i >= 0; i--) {
++		switch (name[i]) {
++		case '@':
++			namelen = i;
++			if (!refresh_specified && !bpp_specified &&
++			    !yres_specified) {
++				refresh = strtol(&name[i+1], NULL, 10);
++				refresh_specified = 1;
++				if (cvt || rb)
++					cvt = 0;
++			} else
++				goto done;
++			break;
++		case '-':
++			namelen = i;
++			if (!bpp_specified && !yres_specified) {
++				bpp = strtol(&name[i+1], NULL, 10);
++				bpp_specified = 1;
++				if (cvt || rb)
++					cvt = 0;
++			} else
++				goto done;
++			break;
++		case 'x':
++			if (!yres_specified) {
++				yres = strtol(&name[i+1], NULL, 10);
++				yres_specified = 1;
++			} else
++				goto done;
++		case '0' ... '9':
++			break;
++		case 'M':
++			if (!yres_specified)
++				cvt = 1;
++			break;
++		case 'R':
++			if (cvt)
++				rb = 1;
++			break;
++		case 'm':
++			if (!cvt)
++				margins = 1;
++			break;
++		case 'i':
++			if (!cvt)
++				interlace = 1;
++			break;
++		case 'e':
++			force = DRM_FORCE_ON;
++			break;
++		case 'D':
++			if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) &&
++			    (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB))
++				force = DRM_FORCE_ON;
++			else
++				force = DRM_FORCE_ON_DIGITAL;
++			break;
++		case 'd':
++			force = DRM_FORCE_OFF;
++			break;
++		default:
++			goto done;
++		}
++	}
++	if (i < 0 && yres_specified) {
++		xres = strtol(name, NULL, 10);
++		res_specified = 1;
++	}
++done:
++
++	DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
++		drm_get_connector_name(connector), xres, yres,
++		(refresh) ? refresh : 60, (rb) ? " reduced blanking" :
++		"", (margins) ? " with margins" : "", (interlace) ?
++		" interlaced" : "");
++
++	if (force) {
++		const char *s;
++		switch (force) {
++		case DRM_FORCE_OFF: s = "OFF"; break;
++		case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break;
++		default:
++		case DRM_FORCE_ON: s = "ON"; break;
++		}
++
++		DRM_INFO("forcing %s connector %s\n",
++			 drm_get_connector_name(connector), s);
++		connector->force = force;
++	}
++
++	if (res_specified) {
++		cmdline_mode->specified = true;
++		cmdline_mode->xres = xres;
++		cmdline_mode->yres = yres;
++	}
++
++	if (refresh_specified) {
++		cmdline_mode->refresh_specified = true;
++		cmdline_mode->refresh = refresh;
++	}
++
++	if (bpp_specified) {
++		cmdline_mode->bpp_specified = true;
++		cmdline_mode->bpp = bpp;
++	}
++	cmdline_mode->rb = rb ? true : false;
++	cmdline_mode->cvt = cvt  ? true : false;
++	cmdline_mode->interlace = interlace ? true : false;
++
++	return true;
++}
++
++static int
++fb_get_options(const char *connector_name, char **option)
++{
++
++	return (1);
++}
++
++static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
++{
++	struct drm_fb_helper_connector *fb_helper_conn;
++	int i;
++
++	for (i = 0; i < fb_helper->connector_count; i++) {
++		char *option = NULL;
++
++		fb_helper_conn = fb_helper->connector_info[i];
++
++		/* do something on return - turn off connector maybe */
++		if (fb_get_options(drm_get_connector_name(fb_helper_conn->connector), &option))
++			continue;
++
++		drm_fb_helper_connector_parse_command_line(fb_helper_conn, option);
++	}
++	return 0;
++}
++
++#if 0
++static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_helper *helper)
++{
++	uint16_t *r_base, *g_base, *b_base;
++	int i;
++
++	r_base = crtc->gamma_store;
++	g_base = r_base + crtc->gamma_size;
++	b_base = g_base + crtc->gamma_size;
++
++	for (i = 0; i < crtc->gamma_size; i++)
++		helper->funcs->gamma_get(crtc, &r_base[i], &g_base[i], &b_base[i], i);
++}
++
++static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
++{
++	uint16_t *r_base, *g_base, *b_base;
++
++	r_base = crtc->gamma_store;
++	g_base = r_base + crtc->gamma_size;
++	b_base = g_base + crtc->gamma_size;
++
++	crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
++}
++#endif
++
++#if 0
++int drm_fb_helper_debug_enter(struct fb_info *info)
++{
++	struct drm_fb_helper *helper = info->par;
++	struct drm_crtc_helper_funcs *funcs;
++	int i;
++
++	if (list_empty(&kernel_fb_helper_list))
++		return false;
++
++	list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
++		for (i = 0; i < helper->crtc_count; i++) {
++			struct drm_mode_set *mode_set =
++				&helper->crtc_info[i].mode_set;
++
++			if (!mode_set->crtc->enabled)
++				continue;
++
++			funcs =	mode_set->crtc->helper_private;
++			drm_fb_helper_save_lut_atomic(mode_set->crtc, helper);
++			funcs->mode_set_base_atomic(mode_set->crtc,
++						    mode_set->fb,
++						    mode_set->x,
++						    mode_set->y,
++						    ENTER_ATOMIC_MODE_SET);
++		}
++	}
++
++	return 0;
++}
++#endif
++
++#if 0
++/* Find the real fb for a given fb helper CRTC */
++static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc)
++{
++	struct drm_device *dev = crtc->dev;
++	struct drm_crtc *c;
++
++	list_for_each_entry(c, &dev->mode_config.crtc_list, head) {
++		if (crtc->base.id == c->base.id)
++			return c->fb;
++	}
++
++	return NULL;
++}
++#endif
++
++#if 0
++int drm_fb_helper_debug_leave(struct fb_info *info)
++{
++	struct drm_fb_helper *helper = info->par;
++	struct drm_crtc *crtc;
++	struct drm_crtc_helper_funcs *funcs;
++	struct drm_framebuffer *fb;
++	int i;
++
++	for (i = 0; i < helper->crtc_count; i++) {
++		struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set;
++		crtc = mode_set->crtc;
++		funcs = crtc->helper_private;
++		fb = drm_mode_config_fb(crtc);
++
++		if (!crtc->enabled)
++			continue;
++
++		if (!fb) {
++			DRM_ERROR("no fb to restore??\n");
++			continue;
++		}
++
++		drm_fb_helper_restore_lut_atomic(mode_set->crtc);
++		funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x,
++					    crtc->y, LEAVE_ATOMIC_MODE_SET);
++	}
++
++	return 0;
++}
++#endif
++
++bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
++{
++	bool error = false;
++	int i, ret;
++	for (i = 0; i < fb_helper->crtc_count; i++) {
++		struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
++		ret = drm_crtc_helper_set_config(mode_set);
++		if (ret)
++			error = true;
++	}
++	return error;
++}
++
++#if 0
++bool drm_fb_helper_force_kernel_mode(void)
++{
++	bool ret, error = false;
++	struct drm_fb_helper *helper;
++
++	if (list_empty(&kernel_fb_helper_list))
++		return false;
++
++	list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
++		if (helper->dev->switch_power_state == DRM_SWITCH_POWER_OFF)
++			continue;
++
++		ret = drm_fb_helper_restore_fbdev_mode(helper);
++		if (ret)
++			error = true;
++	}
++	return error;
++}
++#endif
++
++#if 0
++int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
++			void *panic_str)
++{
++	printf("panic occurred, switching back to text console\n");
++	return drm_fb_helper_force_kernel_mode();
++	return 0;
++}
++
++static struct notifier_block paniced = {
++	.notifier_call = drm_fb_helper_panic,
++};
++
 +/**
-+ * GEM specific mm private for tracking GEM objects
++ * drm_fb_helper_restore - restore the framebuffer console (kernel) config
++ *
++ * Restore's the kernel's fbcon mode, used for lastclose & panic paths.
 + */
-+struct drm_gem_mm {
-+	struct drm_open_hash offset_hash; /**< User token hash table for maps */
-+	struct unrhdr *idxunr;
++void drm_fb_helper_restore(void)
++{
++	bool ret;
++	ret = drm_fb_helper_force_kernel_mode();
++	if (ret == true)
++		DRM_ERROR("Failed to restore crtc configuration\n");
++}
++
++#ifdef CONFIG_MAGIC_SYSRQ
++static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
++{
++	drm_fb_helper_restore();
++}
++static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
++
++static void drm_fb_helper_sysrq(int dummy1)
++{
++	schedule_work(&drm_fb_helper_restore_work);
++}
++
++static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
++	.handler = drm_fb_helper_sysrq,
++	.help_msg = "force-fb(V)",
++	.action_msg = "Restore framebuffer console",
 +};
++#else
++static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
++#endif
++#endif
 +
-+struct drm_gem_object {
-+	/** Reference count of this object */
-+	u_int refcount;
++#if 0
++static void drm_fb_helper_on(struct fb_info *info)
++{
++	struct drm_fb_helper *fb_helper = info->par;
++	struct drm_device *dev = fb_helper->dev;
++	struct drm_crtc *crtc;
++	struct drm_crtc_helper_funcs *crtc_funcs;
++	struct drm_connector *connector;
++	struct drm_encoder *encoder;
++	int i, j;
 +
-+	/** Handle count of this object. Each handle also holds a reference */
-+	u_int handle_count; /* number of handles on this object */
++	/*
++	 * For each CRTC in this fb, turn the crtc on then,
++	 * find all associated encoders and turn them on.
++	 */
++	sx_xlock(&dev->mode_config.mutex);
++	for (i = 0; i < fb_helper->crtc_count; i++) {
++		crtc = fb_helper->crtc_info[i].mode_set.crtc;
++		crtc_funcs = crtc->helper_private;
 +
-+	/** Related drm device */
-+	struct drm_device *dev;
++		if (!crtc->enabled)
++			continue;
 +
-+	/** File representing the shmem storage: filp in Linux parlance */
-+	vm_object_t vm_obj;
++		crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
 +
-+	bool on_map;
-+	struct drm_hash_item map_list;
++		/* Walk the connectors & encoders on this fb turning them on */
++		for (j = 0; j < fb_helper->connector_count; j++) {
++			connector = fb_helper->connector_info[j]->connector;
++			connector->dpms = DRM_MODE_DPMS_ON;
++			drm_connector_property_set_value(connector,
++							 dev->mode_config.dpms_property,
++							 DRM_MODE_DPMS_ON);
++		}
++		/* Found a CRTC on this fb, now find encoders */
++		list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
++			if (encoder->crtc == crtc) {
++				struct drm_encoder_helper_funcs *encoder_funcs;
 +
-+	/**
-+	 * Size of the object, in bytes.  Immutable over the object's
-+	 * lifetime.
-+	 */
-+	size_t size;
++				encoder_funcs = encoder->helper_private;
++				encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
++			}
++		}
++	}
++	sx_xunlock(&dev->mode_config.mutex);
++}
++#endif
 +
-+	/**
-+	 * Global name for this object, starts at 1. 0 means unnamed.
-+	 * Access is covered by the object_name_lock in the related drm_device
-+	 */
-+	int name;
++#if 0
++static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
++{
++	struct drm_fb_helper *fb_helper = info->par;
++	struct drm_device *dev = fb_helper->dev;
++	struct drm_crtc *crtc;
++	struct drm_crtc_helper_funcs *crtc_funcs;
++	struct drm_connector *connector;
++	struct drm_encoder *encoder;
++	int i, j;
 +
-+	/**
-+	 * Memory domains. These monitor which caches contain read/write data
-+	 * related to the object. When transitioning from one set of domains
-+	 * to another, the driver is called to ensure that caches are suitably
-+	 * flushed and invalidated
++	/*
++	 * For each CRTC in this fb, find all associated encoders
++	 * and turn them off, then turn off the CRTC.
 +	 */
-+	uint32_t read_domains;
-+	uint32_t write_domain;
++	sx_xlock(&dev->mode_config.mutex);
++	for (i = 0; i < fb_helper->crtc_count; i++) {
++		crtc = fb_helper->crtc_info[i].mode_set.crtc;
++		crtc_funcs = crtc->helper_private;
 +
-+	/**
-+	 * While validating an exec operation, the
-+	 * new read/write domain values are computed here.
-+	 * They will be transferred to the above values
-+	 * at the point that any cache flushing occurs
++		if (!crtc->enabled)
++			continue;
++
++		/* Walk the connectors on this fb and mark them off */
++		for (j = 0; j < fb_helper->connector_count; j++) {
++			connector = fb_helper->connector_info[j]->connector;
++			connector->dpms = dpms_mode;
++			drm_connector_property_set_value(connector,
++							 dev->mode_config.dpms_property,
++							 dpms_mode);
++		}
++		/* Found a CRTC on this fb, now find encoders */
++		list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
++			if (encoder->crtc == crtc) {
++				struct drm_encoder_helper_funcs *encoder_funcs;
++
++				encoder_funcs = encoder->helper_private;
++				encoder_funcs->dpms(encoder, dpms_mode);
++			}
++		}
++		crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
++	}
++	sx_xunlock(&dev->mode_config.mutex);
++}
++#endif
++
++#if 0
++int drm_fb_helper_blank(int blank, struct fb_info *info)
++{
++	switch (blank) {
++	/* Display: On; HSync: On, VSync: On */
++	case FB_BLANK_UNBLANK:
++		drm_fb_helper_on(info);
++		break;
++	/* Display: Off; HSync: On, VSync: On */
++	case FB_BLANK_NORMAL:
++		drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
++		break;
++	/* Display: Off; HSync: Off, VSync: On */
++	case FB_BLANK_HSYNC_SUSPEND:
++		drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
++		break;
++	/* Display: Off; HSync: On, VSync: Off */
++	case FB_BLANK_VSYNC_SUSPEND:
++		drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND);
++		break;
++	/* Display: Off; HSync: Off, VSync: Off */
++	case FB_BLANK_POWERDOWN:
++		drm_fb_helper_off(info, DRM_MODE_DPMS_OFF);
++		break;
++	}
++	return 0;
++}
++#endif
++
++static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
++{
++	int i;
++
++	for (i = 0; i < helper->connector_count; i++)
++		free(helper->connector_info[i], DRM_MEM_KMS);
++	free(helper->connector_info, DRM_MEM_KMS);
++	for (i = 0; i < helper->crtc_count; i++)
++		free(helper->crtc_info[i].mode_set.connectors, DRM_MEM_KMS);
++	free(helper->crtc_info, DRM_MEM_KMS);
++}
++
++int drm_fb_helper_init(struct drm_device *dev,
++		       struct drm_fb_helper *fb_helper,
++		       int crtc_count, int max_conn_count)
++{
++	struct drm_crtc *crtc;
++	int i;
++
++	fb_helper->dev = dev;
++
++	INIT_LIST_HEAD(&fb_helper->kernel_fb_list);
++
++	fb_helper->crtc_info = malloc(crtc_count *
++	    sizeof(struct drm_fb_helper_crtc), DRM_MEM_KMS, M_WAITOK | M_ZERO);
++
++	fb_helper->crtc_count = crtc_count;
++	fb_helper->connector_info = malloc(dev->mode_config.num_connector *
++	    sizeof(struct drm_fb_helper_connector *), DRM_MEM_KMS,
++	    M_WAITOK | M_ZERO);
++	fb_helper->connector_count = 0;
++
++	for (i = 0; i < crtc_count; i++) {
++		fb_helper->crtc_info[i].mode_set.connectors =
++			malloc(max_conn_count * sizeof(struct drm_connector *),
++			    DRM_MEM_KMS, M_WAITOK | M_ZERO);
++
++		fb_helper->crtc_info[i].mode_set.num_connectors = 0;
++	}
++
++	i = 0;
++	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++		fb_helper->crtc_info[i].crtc_id = crtc->base.id;
++		fb_helper->crtc_info[i].mode_set.crtc = crtc;
++		i++;
++	}
++	fb_helper->conn_limit = max_conn_count;
++	return 0;
++}
++
++void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
++{
++	if (!list_empty(&fb_helper->kernel_fb_list)) {
++		list_del(&fb_helper->kernel_fb_list);
++		if (list_empty(&kernel_fb_helper_list)) {
++#if 0
++			printk(KERN_INFO "drm: unregistered panic notifier\n");
++			atomic_notifier_chain_unregister(&panic_notifier_list,
++							 &paniced);
++			unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
++#endif
++		}
++	}
++
++	drm_fb_helper_crtc_free(fb_helper);
++
++}
++
++#if 0
++static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
++		     u16 blue, u16 regno, struct fb_info *info)
++{
++	struct drm_fb_helper *fb_helper = info->par;
++	struct drm_framebuffer *fb = fb_helper->fb;
++	int pindex;
++
++	if (info->fix.visual == FB_VISUAL_trueCOLOR) {
++		u32 *palette;
++		u32 value;
++		/* place color in psuedopalette */
++		if (regno > 16)
++			return -EINVAL;
++		palette = (u32 *)info->pseudo_palette;
++		red >>= (16 - info->var.red.length);
++		green >>= (16 - info->var.green.length);
++		blue >>= (16 - info->var.blue.length);
++		value = (red << info->var.red.offset) |
++			(green << info->var.green.offset) |
++			(blue << info->var.blue.offset);
++		if (info->var.transp.length > 0) {
++			u32 mask = (1 << info->var.transp.length) - 1;
++			mask <<= info->var.transp.offset;
++			value |= mask;
++		}
++		palette[regno] = value;
++		return 0;
++	}
++
++	pindex = regno;
++
++	if (fb->bits_per_pixel == 16) {
++		pindex = regno << 3;
++
++		if (fb->depth == 16 && regno > 63)
++			return -EINVAL;
++		if (fb->depth == 15 && regno > 31)
++			return -EINVAL;
++
++		if (fb->depth == 16) {
++			u16 r, g, b;
++			int i;
++			if (regno < 32) {
++				for (i = 0; i < 8; i++)
++					fb_helper->funcs->gamma_set(crtc, red,
++						green, blue, pindex + i);
++			}
++
++			fb_helper->funcs->gamma_get(crtc, &r,
++						    &g, &b,
++						    pindex >> 1);
++
++			for (i = 0; i < 4; i++)
++				fb_helper->funcs->gamma_set(crtc, r,
++							    green, b,
++							    (pindex >> 1) + i);
++		}
++	}
++
++	if (fb->depth != 16)
++		fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex);
++	return 0;
++}
++#endif
++
++#if 0
++int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
++{
++	struct drm_fb_helper *fb_helper = info->par;
++	struct drm_crtc_helper_funcs *crtc_funcs;
++	u16 *red, *green, *blue, *transp;
++	struct drm_crtc *crtc;
++	int i, j, rc = 0;
++	int start;
++
++	for (i = 0; i < fb_helper->crtc_count; i++) {
++		crtc = fb_helper->crtc_info[i].mode_set.crtc;
++		crtc_funcs = crtc->helper_private;
++
++		red = cmap->red;
++		green = cmap->green;
++		blue = cmap->blue;
++		transp = cmap->transp;
++		start = cmap->start;
++
++		for (j = 0; j < cmap->len; j++) {
++			u16 hred, hgreen, hblue, htransp = 0xffff;
++
++			hred = *red++;
++			hgreen = *green++;
++			hblue = *blue++;
++
++			if (transp)
++				htransp = *transp++;
++
++			rc = setcolreg(crtc, hred, hgreen, hblue, start++, info);
++			if (rc)
++				return rc;
++		}
++		crtc_funcs->load_lut(crtc);
++	}
++	return rc;
++}
++#endif
++
++#if 0
++int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
++			    struct fb_info *info)
++{
++	struct drm_fb_helper *fb_helper = info->par;
++	struct drm_framebuffer *fb = fb_helper->fb;
++	int depth;
++
++	if (var->pixclock != 0 || in_dbg_master())
++		return -EINVAL;
++
++	/* Need to resize the fb object !!! */
++	if (var->bits_per_pixel > fb->bits_per_pixel || var->xres > fb->width || var->yres > fb->height) {
++		DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb "
++			  "object %dx%d-%d > %dx%d-%d\n", var->xres, var->yres, var->bits_per_pixel,
++			  fb->width, fb->height, fb->bits_per_pixel);
++		return -EINVAL;
++	}
++
++	switch (var->bits_per_pixel) {
++	case 16:
++		depth = (var->green.length == 6) ? 16 : 15;
++		break;
++	case 32:
++		depth = (var->transp.length > 0) ? 32 : 24;
++		break;
++	default:
++		depth = var->bits_per_pixel;
++		break;
++	}
++
++	switch (depth) {
++	case 8:
++		var->red.offset = 0;
++		var->green.offset = 0;
++		var->blue.offset = 0;
++		var->red.length = 8;
++		var->green.length = 8;
++		var->blue.length = 8;
++		var->transp.length = 0;
++		var->transp.offset = 0;
++		break;
++	case 15:
++		var->red.offset = 10;
++		var->green.offset = 5;
++		var->blue.offset = 0;
++		var->red.length = 5;
++		var->green.length = 5;
++		var->blue.length = 5;
++		var->transp.length = 1;
++		var->transp.offset = 15;
++		break;
++	case 16:
++		var->red.offset = 11;
++		var->green.offset = 5;
++		var->blue.offset = 0;
++		var->red.length = 5;
++		var->green.length = 6;
++		var->blue.length = 5;
++		var->transp.length = 0;
++		var->transp.offset = 0;
++		break;
++	case 24:
++		var->red.offset = 16;
++		var->green.offset = 8;
++		var->blue.offset = 0;
++		var->red.length = 8;
++		var->green.length = 8;
++		var->blue.length = 8;
++		var->transp.length = 0;
++		var->transp.offset = 0;
++		break;
++	case 32:
++		var->red.offset = 16;
++		var->green.offset = 8;
++		var->blue.offset = 0;
++		var->red.length = 8;
++		var->green.length = 8;
++		var->blue.length = 8;
++		var->transp.length = 8;
++		var->transp.offset = 24;
++		break;
++	default:
++		return -EINVAL;
++	}
++	return 0;
++}
++#endif
++
++#if 0
++/* this will let fbcon do the mode init */
++int drm_fb_helper_set_par(struct fb_info *info)
++{
++	struct drm_fb_helper *fb_helper = info->par;
++	struct drm_device *dev = fb_helper->dev;
++	struct fb_var_screeninfo *var = &info->var;
++	struct drm_crtc *crtc;
++	int ret;
++	int i;
++
++	if (var->pixclock != 0) {
++		DRM_ERROR("PIXEL CLOCK SET\n");
++		return -EINVAL;
++	}
++
++	mutex_lock(&dev->mode_config.mutex);
++	for (i = 0; i < fb_helper->crtc_count; i++) {
++		crtc = fb_helper->crtc_info[i].mode_set.crtc;
++		ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
++		if (ret) {
++			mutex_unlock(&dev->mode_config.mutex);
++			return ret;
++		}
++	}
++	mutex_unlock(&dev->mode_config.mutex);
++
++	if (fb_helper->delayed_hotplug) {
++		fb_helper->delayed_hotplug = false;
++		drm_fb_helper_hotplug_event(fb_helper);
++	}
++	return 0;
++}
++#endif
++
++#if 0
++int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
++			      struct fb_info *info)
++{
++	struct drm_fb_helper *fb_helper = info->par;
++	struct drm_device *dev = fb_helper->dev;
++	struct drm_mode_set *modeset;
++	struct drm_crtc *crtc;
++	int ret = 0;
++	int i;
++
++	mutex_lock(&dev->mode_config.mutex);
++	for (i = 0; i < fb_helper->crtc_count; i++) {
++		crtc = fb_helper->crtc_info[i].mode_set.crtc;
++
++		modeset = &fb_helper->crtc_info[i].mode_set;
++
++		modeset->x = var->xoffset;
++		modeset->y = var->yoffset;
++
++		if (modeset->num_connectors) {
++			ret = crtc->funcs->set_config(modeset);
++			if (!ret) {
++				info->var.xoffset = var->xoffset;
++				info->var.yoffset = var->yoffset;
++			}
++		}
++	}
++	mutex_unlock(&dev->mode_config.mutex);
++	return ret;
++}
++#endif
++
++int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
++				  int preferred_bpp)
++{
++	int new_fb = 0;
++	int crtc_count = 0;
++	int i;
++#if 0
++	struct fb_info *info;
++#endif
++	struct drm_fb_helper_surface_size sizes;
++	int gamma_size = 0;
++
++	memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size));
++	sizes.surface_depth = 24;
++	sizes.surface_bpp = 32;
++	sizes.fb_width = (unsigned)-1;
++	sizes.fb_height = (unsigned)-1;
++
++	/* if driver picks 8 or 16 by default use that
++	   for both depth/bpp */
++	if (preferred_bpp != sizes.surface_bpp) {
++		sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
++	}
++	/* first up get a count of crtcs now in use and new min/maxes width/heights */
++	for (i = 0; i < fb_helper->connector_count; i++) {
++		struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
++		struct drm_fb_helper_cmdline_mode *cmdline_mode;
++
++		cmdline_mode = &fb_helper_conn->cmdline_mode;
++
++		if (cmdline_mode->bpp_specified) {
++			switch (cmdline_mode->bpp) {
++			case 8:
++				sizes.surface_depth = sizes.surface_bpp = 8;
++				break;
++			case 15:
++				sizes.surface_depth = 15;
++				sizes.surface_bpp = 16;
++				break;
++			case 16:
++				sizes.surface_depth = sizes.surface_bpp = 16;
++				break;
++			case 24:
++				sizes.surface_depth = sizes.surface_bpp = 24;
++				break;
++			case 32:
++				sizes.surface_depth = 24;
++				sizes.surface_bpp = 32;
++				break;
++			}
++			break;
++		}
++	}
++
++	crtc_count = 0;
++	for (i = 0; i < fb_helper->crtc_count; i++) {
++		struct drm_display_mode *desired_mode;
++		desired_mode = fb_helper->crtc_info[i].desired_mode;
++
++		if (desired_mode) {
++			if (gamma_size == 0)
++				gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
++			if (desired_mode->hdisplay < sizes.fb_width)
++				sizes.fb_width = desired_mode->hdisplay;
++			if (desired_mode->vdisplay < sizes.fb_height)
++				sizes.fb_height = desired_mode->vdisplay;
++			if (desired_mode->hdisplay > sizes.surface_width)
++				sizes.surface_width = desired_mode->hdisplay;
++			if (desired_mode->vdisplay > sizes.surface_height)
++				sizes.surface_height = desired_mode->vdisplay;
++			crtc_count++;
++		}
++	}
++
++	if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
++		/* hmm everyone went away - assume VGA cable just fell out
++		   and will come back later. */
++		DRM_INFO("Cannot find any crtc or sizes - going 1024x768\n");
++		sizes.fb_width = sizes.surface_width = 1024;
++		sizes.fb_height = sizes.surface_height = 768;
++	}
++
++	/* push down into drivers */
++	new_fb = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
++	if (new_fb < 0)
++		return new_fb;
++
++#if 0
++	info = fb_helper->fbdev;
++#endif
++
++	/* set the fb pointer */
++	for (i = 0; i < fb_helper->crtc_count; i++) {
++		fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
++	}
++
++#if 0
++	if (new_fb) {
++		info->var.pixclock = 0;
++		if (register_framebuffer(info) < 0) {
++			return -EINVAL;
++		}
++
++		printf("fb%d: %s frame buffer device\n", info->node,
++		       info->fix.id);
++
++	} else {
++		drm_fb_helper_set_par(info);
++	}
++
++	/* Switch back to kernel console on panic */
++	/* multi card linked list maybe */
++	if (list_empty(&kernel_fb_helper_list)) {
++		printf("drm: registered panic notifier\n");
++		atomic_notifier_chain_register(&panic_notifier_list,
++					       &paniced);
++	}
++	if (new_fb)
++		list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
++#endif
++
++	return 0;
++}
++
++#if 0
++void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
++			    uint32_t depth)
++{
++	info->fix.type = FB_TYPE_PACKED_PIXELS;
++	info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
++		FB_VISUAL_trueCOLOR;
++	info->fix.mmio_start = 0;
++	info->fix.mmio_len = 0;
++	info->fix.type_aux = 0;
++	info->fix.xpanstep = 1; /* doing it in hw */
++	info->fix.ypanstep = 1; /* doing it in hw */
++	info->fix.ywrapstep = 0;
++	info->fix.accel = FB_ACCEL_NONE;
++	info->fix.type_aux = 0;
++
++	info->fix.line_length = pitch;
++	return;
++}
++
++void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
++			    uint32_t fb_width, uint32_t fb_height)
++{
++	struct drm_framebuffer *fb = fb_helper->fb;
++	info->pseudo_palette = fb_helper->pseudo_palette;
++	info->var.xres_virtual = fb->width;
++	info->var.yres_virtual = fb->height;
++	info->var.bits_per_pixel = fb->bits_per_pixel;
++	info->var.accel_flags = FB_ACCELF_TEXT;
++	info->var.xoffset = 0;
++	info->var.yoffset = 0;
++	info->var.activate = FB_ACTIVATE_NOW;
++	info->var.height = -1;
++	info->var.width = -1;
++
++	switch (fb->depth) {
++	case 8:
++		info->var.red.offset = 0;
++		info->var.green.offset = 0;
++		info->var.blue.offset = 0;
++		info->var.red.length = 8; /* 8bit DAC */
++		info->var.green.length = 8;
++		info->var.blue.length = 8;
++		info->var.transp.offset = 0;
++		info->var.transp.length = 0;
++		break;
++	case 15:
++		info->var.red.offset = 10;
++		info->var.green.offset = 5;
++		info->var.blue.offset = 0;
++		info->var.red.length = 5;
++		info->var.green.length = 5;
++		info->var.blue.length = 5;
++		info->var.transp.offset = 15;
++		info->var.transp.length = 1;
++		break;
++	case 16:
++		info->var.red.offset = 11;
++		info->var.green.offset = 5;
++		info->var.blue.offset = 0;
++		info->var.red.length = 5;
++		info->var.green.length = 6;
++		info->var.blue.length = 5;
++		info->var.transp.offset = 0;
++		break;
++	case 24:
++		info->var.red.offset = 16;
++		info->var.green.offset = 8;
++		info->var.blue.offset = 0;
++		info->var.red.length = 8;
++		info->var.green.length = 8;
++		info->var.blue.length = 8;
++		info->var.transp.offset = 0;
++		info->var.transp.length = 0;
++		break;
++	case 32:
++		info->var.red.offset = 16;
++		info->var.green.offset = 8;
++		info->var.blue.offset = 0;
++		info->var.red.length = 8;
++		info->var.green.length = 8;
++		info->var.blue.length = 8;
++		info->var.transp.offset = 24;
++		info->var.transp.length = 8;
++		break;
++	default:
++		break;
++	}
++
++	info->var.xres = fb_width;
++	info->var.yres = fb_height;
++}
++#endif
++
++static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper,
++					       uint32_t maxX,
++					       uint32_t maxY)
++{
++	struct drm_connector *connector;
++	int count = 0;
++	int i;
++
++	for (i = 0; i < fb_helper->connector_count; i++) {
++		connector = fb_helper->connector_info[i]->connector;
++		count += connector->funcs->fill_modes(connector, maxX, maxY);
++	}
++
++	return count;
++}
++
++static struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height)
++{
++	struct drm_display_mode *mode;
++
++	list_for_each_entry(mode, &fb_connector->connector->modes, head) {
++		if (drm_mode_width(mode) > width ||
++		    drm_mode_height(mode) > height)
++			continue;
++		if (mode->type & DRM_MODE_TYPE_PREFERRED)
++			return mode;
++	}
++	return NULL;
++}
++
++static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
++{
++	struct drm_fb_helper_cmdline_mode *cmdline_mode;
++	cmdline_mode = &fb_connector->cmdline_mode;
++	return cmdline_mode->specified;
++}
++
++static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
++						      int width, int height)
++{
++	struct drm_cmdline_mode *cmdline_mode;
++	struct drm_display_mode *mode = NULL;
++
++	cmdline_mode = &fb_helper_conn->cmdline_mode1;
++	if (cmdline_mode->specified == false &&
++	    !drm_fetch_cmdline_mode_from_kenv(fb_helper_conn->connector,
++	    cmdline_mode))
++			return (NULL);
++
++	/* attempt to find a matching mode in the list of modes
++	 *  we have gotten so far, if not add a CVT mode that conforms
 +	 */
-+	uint32_t pending_read_domains;
-+	uint32_t pending_write_domain;
++	if (cmdline_mode->rb || cmdline_mode->margins)
++		goto create_mode;
 +
-+	void *driver_private;
-+};
++	list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
++		/* check width/height */
++		if (mode->hdisplay != cmdline_mode->xres ||
++		    mode->vdisplay != cmdline_mode->yres)
++			continue;
 +
-+#include "drm_crtc.h"
++		if (cmdline_mode->refresh_specified) {
++			if (mode->vrefresh != cmdline_mode->refresh)
++				continue;
++		}
 +
- #ifndef DMA_BIT_MASK
- #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : (1ULL<<(n)) - 1)
- #endif
-@@ -573,10 +720,41 @@
- 	int	(*irq_postinstall)(struct drm_device *dev);
- 	void	(*irq_uninstall)(struct drm_device *dev);
- 	void	(*irq_handler)(DRM_IRQ_ARGS);
++		if (cmdline_mode->interlace) {
++			if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
++				continue;
++		}
++		return mode;
++	}
 +
- 	u32	(*get_vblank_counter)(struct drm_device *dev, int crtc);
- 	int	(*enable_vblank)(struct drm_device *dev, int crtc);
- 	void	(*disable_vblank)(struct drm_device *dev, int crtc);
-+	int	(*get_scanout_position)(struct drm_device *dev, int crtc,
-+		    int *vpos, int *hpos);
- 
-+	int	(*get_vblank_timestamp)(struct drm_device *dev, int crtc,
-+		    int *max_error, struct timeval *vblank_time,
-+		    unsigned flags);
++create_mode:
++	if (cmdline_mode->cvt)
++		mode = drm_cvt_mode(fb_helper_conn->connector->dev,
++				    cmdline_mode->xres, cmdline_mode->yres,
++				    cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
++				    cmdline_mode->rb, cmdline_mode->interlace,
++				    cmdline_mode->margins);
++	else
++		mode = drm_gtf_mode(fb_helper_conn->connector->dev,
++				    cmdline_mode->xres, cmdline_mode->yres,
++				    cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
++				    cmdline_mode->interlace,
++				    cmdline_mode->margins);
++	drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
++	list_add(&mode->head, &fb_helper_conn->connector->modes);
++	return mode;
++}
 +
-+	int	(*gem_init_object)(struct drm_gem_object *obj);
-+	void	(*gem_free_object)(struct drm_gem_object *obj);
++static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
++{
++	bool enable;
 +
-+	struct cdev_pager_ops *gem_pager_ops;
++	if (strict) {
++		enable = connector->status == connector_status_connected;
++	} else {
++		enable = connector->status != connector_status_disconnected;
++	}
++	return enable;
++}
 +
-+	int	(*dumb_create)(struct drm_file *file_priv,
-+		    struct drm_device *dev, struct drm_mode_create_dumb *args);
-+	int	(*dumb_map_offset)(struct drm_file *file_priv,
-+		    struct drm_device *dev, uint32_t handle, uint64_t *offset);
-+	int	(*dumb_destroy)(struct drm_file *file_priv,
-+		    struct drm_device *dev, uint32_t handle);
++static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
++				  bool *enabled)
++{
++	bool any_enabled = false;
++	struct drm_connector *connector;
++	int i = 0;
 +
-+	int	(*sysctl_init)(struct drm_device *dev,
-+		    struct sysctl_ctx_list *ctx, struct sysctl_oid *top);
-+	void	(*sysctl_cleanup)(struct drm_device *dev);
++	for (i = 0; i < fb_helper->connector_count; i++) {
++		connector = fb_helper->connector_info[i]->connector;
++		enabled[i] = drm_connector_enabled(connector, true);
++		DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
++			  enabled[i] ? "yes" : "no");
++		any_enabled |= enabled[i];
++	}
 +
-+	void	(*device_lock)(struct drm_device *dev);
-+	void	(*device_unlock)(struct drm_device *dev);
-+	int	(*device_lock_sleep)(struct drm_device *dev, void *chan,
-+		    int flags, const char *msg, int timeout);
-+	void	(*device_lock_assert)(struct drm_device *dev);
-+	void	(*device_unlock_assert)(struct drm_device *dev);
-+	void	(*device_nonsleepable_unlock_assert)(struct drm_device *dev);
++	if (any_enabled)
++		return;
 +
- 	drm_pci_id_list_t *id_entry;	/* PCI ID, name, and chipset private */
- 
- 	/**
-@@ -607,6 +785,41 @@
- 	u32 driver_features;
- };
- 
++	for (i = 0; i < fb_helper->connector_count; i++) {
++		connector = fb_helper->connector_info[i]->connector;
++		enabled[i] = drm_connector_enabled(connector, false);
++	}
++}
++
++static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
++			      struct drm_display_mode **modes,
++			      bool *enabled, int width, int height)
++{
++	int count, i, j;
++	bool can_clone = false;
++	struct drm_fb_helper_connector *fb_helper_conn;
++	struct drm_display_mode *dmt_mode, *mode;
++
++	/* only contemplate cloning in the single crtc case */
++	if (fb_helper->crtc_count > 1)
++		return false;
++
++	count = 0;
++	for (i = 0; i < fb_helper->connector_count; i++) {
++		if (enabled[i])
++			count++;
++	}
++
++	/* only contemplate cloning if more than one connector is enabled */
++	if (count <= 1)
++		return false;
++
++	/* check the command line or if nothing common pick 1024x768 */
++	can_clone = true;
++	for (i = 0; i < fb_helper->connector_count; i++) {
++		if (!enabled[i])
++			continue;
++		fb_helper_conn = fb_helper->connector_info[i];
++		modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
++		if (!modes[i]) {
++			can_clone = false;
++			break;
++		}
++		for (j = 0; j < i; j++) {
++			if (!enabled[j])
++				continue;
++			if (!drm_mode_equal(modes[j], modes[i]))
++				can_clone = false;
++		}
++	}
++
++	if (can_clone) {
++		DRM_DEBUG_KMS("can clone using command line\n");
++		return true;
++	}
++
++	/* try and find a 1024x768 mode on each connector */
++	can_clone = true;
++	dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60);
++
++	for (i = 0; i < fb_helper->connector_count; i++) {
++
++		if (!enabled[i])
++			continue;
++
++		fb_helper_conn = fb_helper->connector_info[i];
++		list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
++			if (drm_mode_equal(mode, dmt_mode))
++				modes[i] = mode;
++		}
++		if (!modes[i])
++			can_clone = false;
++	}
++
++	if (can_clone) {
++		DRM_DEBUG_KMS("can clone using 1024x768\n");
++		return true;
++	}
++	DRM_INFO("kms: can't enable cloning when we probably wanted to.\n");
++	return false;
++}
++
++static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
++				 struct drm_display_mode **modes,
++				 bool *enabled, int width, int height)
++{
++	struct drm_fb_helper_connector *fb_helper_conn;
++	int i;
++
++	for (i = 0; i < fb_helper->connector_count; i++) {
++		fb_helper_conn = fb_helper->connector_info[i];
++
++		if (enabled[i] == false)
++			continue;
++
++		DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
++			      fb_helper_conn->connector->base.id);
++
++		/* got for command line mode first */
++		modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
++		if (!modes[i]) {
++			DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
++				      fb_helper_conn->connector->base.id);
++			modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height);
++		}
++		/* No preferred modes, pick one off the list */
++		if (!modes[i] && !list_empty(&fb_helper_conn->connector->modes)) {
++			list_for_each_entry(modes[i], &fb_helper_conn->connector->modes, head)
++				break;
++		}
++		DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
++			  "none");
++	}
++	return true;
++}
++
++static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
++			  struct drm_fb_helper_crtc **best_crtcs,
++			  struct drm_display_mode **modes,
++			  int n, int width, int height)
++{
++	int c, o;
++	struct drm_device *dev = fb_helper->dev;
++	struct drm_connector *connector;
++	struct drm_connector_helper_funcs *connector_funcs;
++	struct drm_encoder *encoder;
++	struct drm_fb_helper_crtc *best_crtc;
++	int my_score, best_score, score;
++	struct drm_fb_helper_crtc **crtcs, *crtc;
++	struct drm_fb_helper_connector *fb_helper_conn;
++
++	if (n == fb_helper->connector_count)
++		return 0;
++
++	fb_helper_conn = fb_helper->connector_info[n];
++	connector = fb_helper_conn->connector;
++
++	best_crtcs[n] = NULL;
++	best_crtc = NULL;
++	best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height);
++	if (modes[n] == NULL)
++		return best_score;
++
++	crtcs = malloc(dev->mode_config.num_connector *
++	    sizeof(struct drm_fb_helper_crtc *), DRM_MEM_KMS,
++	    M_WAITOK | M_ZERO);
++
++	my_score = 1;
++	if (connector->status == connector_status_connected)
++		my_score++;
++	if (drm_has_cmdline_mode(fb_helper_conn))
++		my_score++;
++	if (drm_has_preferred_mode(fb_helper_conn, width, height))
++		my_score++;
++
++	connector_funcs = connector->helper_private;
++	encoder = connector_funcs->best_encoder(connector);
++	if (!encoder)
++		goto out;
++
++	/* select a crtc for this connector and then attempt to configure
++	   remaining connectors */
++	for (c = 0; c < fb_helper->crtc_count; c++) {
++		crtc = &fb_helper->crtc_info[c];
++
++		if ((encoder->possible_crtcs & (1 << c)) == 0) {
++			continue;
++		}
++
++		for (o = 0; o < n; o++)
++			if (best_crtcs[o] == crtc)
++				break;
++
++		if (o < n) {
++			/* ignore cloning unless only a single crtc */
++			if (fb_helper->crtc_count > 1)
++				continue;
++
++			if (!drm_mode_equal(modes[o], modes[n]))
++				continue;
++		}
++
++		crtcs[n] = crtc;
++		memcpy(crtcs, best_crtcs, n * sizeof(struct drm_fb_helper_crtc *));
++		score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1,
++						  width, height);
++		if (score > best_score) {
++			best_crtc = crtc;
++			best_score = score;
++			memcpy(best_crtcs, crtcs,
++			       dev->mode_config.num_connector *
++			       sizeof(struct drm_fb_helper_crtc *));
++		}
++	}
++out:
++	free(crtcs, DRM_MEM_KMS);
++	return best_score;
++}
++
++static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
++{
++	struct drm_device *dev = fb_helper->dev;
++	struct drm_fb_helper_crtc **crtcs;
++	struct drm_display_mode **modes;
++	struct drm_encoder *encoder;
++	struct drm_mode_set *modeset;
++	bool *enabled;
++	int width, height;
++	int i, ret;
++
++	DRM_DEBUG_KMS("\n");
++
++	width = dev->mode_config.max_width;
++	height = dev->mode_config.max_height;
++
++	/* clean out all the encoder/crtc combos */
++	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
++		encoder->crtc = NULL;
++	}
++
++	crtcs = malloc(dev->mode_config.num_connector *
++	    sizeof(struct drm_fb_helper_crtc *), DRM_MEM_KMS,
++	    M_WAITOK | M_ZERO);
++	modes = malloc(dev->mode_config.num_connector *
++	    sizeof(struct drm_display_mode *), DRM_MEM_KMS,
++	    M_WAITOK | M_ZERO);
++	enabled = malloc(dev->mode_config.num_connector *
++	    sizeof(bool), DRM_MEM_KMS, M_WAITOK | M_ZERO);
++
++	drm_enable_connectors(fb_helper, enabled);
++
++	ret = drm_target_cloned(fb_helper, modes, enabled, width, height);
++	if (!ret) {
++		ret = drm_target_preferred(fb_helper, modes, enabled, width, height);
++		if (!ret)
++			DRM_ERROR("Unable to find initial modes\n");
++	}
++
++	DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height);
++
++	drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height);
++
++	/* need to set the modesets up here for use later */
++	/* fill out the connector<->crtc mappings into the modesets */
++	for (i = 0; i < fb_helper->crtc_count; i++) {
++		modeset = &fb_helper->crtc_info[i].mode_set;
++		modeset->num_connectors = 0;
++	}
++
++	for (i = 0; i < fb_helper->connector_count; i++) {
++		struct drm_display_mode *mode = modes[i];
++		struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
++		modeset = &fb_crtc->mode_set;
++
++		if (mode && fb_crtc) {
++			DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
++				      mode->name, fb_crtc->mode_set.crtc->base.id);
++			fb_crtc->desired_mode = mode;
++			if (modeset->mode)
++				drm_mode_destroy(dev, modeset->mode);
++			modeset->mode = drm_mode_duplicate(dev,
++							   fb_crtc->desired_mode);
++			modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
++		}
++	}
++
++	free(crtcs, DRM_MEM_KMS);
++	free(modes, DRM_MEM_KMS);
++	free(enabled, DRM_MEM_KMS);
++}
++
 +/**
-+ * DRM minor structure. This structure represents a drm minor number.
++ * drm_helper_initial_config - setup a sane initial connector configuration
++ * @dev: DRM device
++ *
++ * LOCKING:
++ * Called at init time, must take mode config lock.
++ *
++ * Scan the CRTCs and connectors and try to put together an initial setup.
++ * At the moment, this is a cloned configuration across all heads with
++ * a new framebuffer object as the backing store.
++ *
++ * RETURNS:
++ * Zero if everything went ok, nonzero otherwise.
 + */
-+struct drm_minor {
-+	int index;			/**< Minor device number */
-+	int type;                       /**< Control or render */
-+	device_t kdev;			/**< OS device */
-+	struct drm_device *dev;
++bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
++{
++	struct drm_device *dev = fb_helper->dev;
++	int count = 0;
 +
-+	struct drm_master *master; /* currently active master for this node */
-+	struct list_head master_list;
-+	struct drm_mode_group mode_group;
++	/* disable all the possible outputs/crtcs before entering KMS mode */
++	drm_helper_disable_unused_functions(fb_helper->dev);
++
++	drm_fb_helper_parse_command_line(fb_helper);
++
++	count = drm_fb_helper_probe_connector_modes(fb_helper,
++						    dev->mode_config.max_width,
++						    dev->mode_config.max_height);
++	/*
++	 * we shouldn't end up with no modes here.
++	 */
++	if (count == 0) {
++		printf("No connectors reported connected with modes\n");
++	}
++	drm_setup_crtcs(fb_helper);
++
++	return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
++}
++
++int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
++{
++	struct drm_device *dev = fb_helper->dev;
++	int count = 0;
++	u32 max_width, max_height, bpp_sel;
++	bool bound = false, crtcs_bound = false;
++	struct drm_crtc *crtc;
++
++	if (!fb_helper->fb)
++		return 0;
++
++	sx_xlock(&dev->mode_config.mutex);
++	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++		if (crtc->fb)
++			crtcs_bound = true;
++		if (crtc->fb == fb_helper->fb)
++			bound = true;
++	}
++
++	if (!bound && crtcs_bound) {
++		fb_helper->delayed_hotplug = true;
++		sx_xunlock(&dev->mode_config.mutex);
++		return 0;
++	}
++	DRM_DEBUG_KMS("\n");
++
++	max_width = fb_helper->fb->width;
++	max_height = fb_helper->fb->height;
++	bpp_sel = fb_helper->fb->bits_per_pixel;
++
++	count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
++						    max_height);
++	drm_setup_crtcs(fb_helper);
++	sx_xunlock(&dev->mode_config.mutex);
++
++	return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
++}
++
+diff --git a/sys/dev/drm/drm_fb_helper.h b/sys/dev/drm/drm_fb_helper.h
+new file mode 100644
+index 0000000..2e8b662
+--- /dev/null
++++ sys/dev/drm/drm_fb_helper.h
+@@ -0,0 +1,139 @@
++/*
++ * Copyright (c) 2006-2009 Red Hat Inc.
++ * Copyright (c) 2006-2008 Intel Corporation
++ * Copyright (c) 2007 Dave Airlie <airlied at linux.ie>
++ *
++ * DRM framebuffer helper functions
++ *
++ * Permission to use, copy, modify, distribute, and sell this software and its
++ * documentation for any purpose is hereby granted without fee, provided that
++ * the above copyright notice appear in all copies and that both that copyright
++ * notice and this permission notice appear in supporting documentation, and
++ * that the name of the copyright holders not be used in advertising or
++ * publicity pertaining to distribution of the software without specific,
++ * written prior permission.  The copyright holders make no representations
++ * about the suitability of this software for any purpose.  It is provided "as
++ * is" without express or implied warranty.
++ *
++ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
++ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
++ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
++ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
++ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
++ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
++ * OF THIS SOFTWARE.
++ *
++ * Authors:
++ *      Dave Airlie <airlied at linux.ie>
++ *      Jesse Barnes <jesse.barnes at intel.com>
++ */
++#ifndef DRM_FB_HELPER_H
++#define DRM_FB_HELPER_H
++
++struct drm_fb_helper;
++
++struct drm_fb_helper_crtc {
++	uint32_t crtc_id;
++	struct drm_mode_set mode_set;
++	struct drm_display_mode *desired_mode;
 +};
 +
 +/* mode specified on the command line */
-+struct drm_cmdline_mode {
++struct drm_fb_helper_cmdline_mode {
 +	bool specified;
 +	bool refresh_specified;
 +	bool bpp_specified;
@@ -7563,297 +14969,230 @@
 +	bool interlace;
 +	bool cvt;
 +	bool margins;
-+	enum drm_connector_force force;
 +};
 +
-+struct drm_pending_vblank_event {
-+	struct drm_pending_event base;
-+	int pipe;
-+	struct drm_event_vblank event;
++struct drm_fb_helper_surface_size {
++	u32 fb_width;
++	u32 fb_height;
++	u32 surface_width;
++	u32 surface_height;
++	u32 surface_bpp;
++	u32 surface_depth;
 +};
 +
- /* Length for the array of resource pointers for drm_get_resource_*. */
- #define DRM_MAX_PCI_RESOURCE	6
- 
-@@ -629,10 +842,10 @@
- 	int		  flags;	/* Flags to open(2)		   */
- 
- 				/* Locks */
--	struct mtx	  vbl_lock;	/* protects vblank operations */
- 	struct mtx	  dma_lock;	/* protects dev->dma */
- 	struct mtx	  irq_lock;	/* protects irq condition checks */
- 	struct mtx	  dev_lock;	/* protects everything else */
-+	struct sx	  dev_struct_lock;
- 	DRM_SPINTYPE	  drw_lock;
- 
- 				/* Usage Counters */
-@@ -680,16 +893,13 @@
- 	atomic_t	  context_flag;	/* Context swapping flag	   */
- 	int		  last_context;	/* Last current context		   */
- 
--	int		  vblank_disable_allowed;
--	struct callout	  vblank_disable_timer;
--	u32		  max_vblank_count;	/* size of vblank counter register */
--	struct drm_vblank_info *vblank;		/* per crtc vblank info */
- 	int		  num_crtcs;
- 
- 	struct sigio      *buf_sigio;	/* Processes waiting for SIGIO     */
- 
- 				/* Sysctl support */
- 	struct drm_sysctl_info *sysctl;
-+	int		  sysctl_node_idx;
- 
- 	drm_agp_head_t    *agp;
- 	drm_sg_mem_t      *sg;  /* Scatter gather memory */
-@@ -698,9 +908,43 @@
- 	unsigned int	  agp_buffer_token;
- 	drm_local_map_t   *agp_buffer_map;
- 
-+	struct drm_minor *control;		/**< Control node for card */
-+	struct drm_minor *primary;		/**< render type primary screen head */
++struct drm_fb_helper_funcs {
++	void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green,
++			  u16 blue, int regno);
++	void (*gamma_get)(struct drm_crtc *crtc, u16 *red, u16 *green,
++			  u16 *blue, int regno);
 +
- 	struct unrhdr	  *drw_unrhdr;
- 	/* RB tree of drawable infos */
- 	RB_HEAD(drawable_tree, bsd_drm_drawable_info) drw_head;
++	int (*fb_probe)(struct drm_fb_helper *helper,
++			struct drm_fb_helper_surface_size *sizes);
++};
 +
-+	int vblank_disable_allowed;
++struct drm_fb_helper_connector {
++	struct drm_fb_helper_cmdline_mode cmdline_mode;
++	struct drm_cmdline_mode cmdline_mode1;
++	struct drm_connector *connector;
++};
 +
-+	atomic_t *_vblank_count;        /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
-+	struct timeval *_vblank_time;   /**< timestamp of current vblank_count (drivers must alloc right number of fields) */
-+	struct mtx vblank_time_lock;    /**< Protects vblank count and time updates during vblank enable/disable */
-+	struct mtx vbl_lock;
-+	atomic_t *vblank_refcount;      /* number of users of vblank interruptsper crtc */
-+	u32 *last_vblank;               /* protected by dev->vbl_lock, used */
-+					/* for wraparound handling */
-+	int *vblank_enabled;            /* so we don't call enable more than
-+					   once per disable */
-+	int *vblank_inmodeset;          /* Display driver is setting mode */
-+	u32 *last_vblank_wait;		/* Last vblank seqno waited per CRTC */
-+	struct callout vblank_disable_callout;
++struct drm_fb_helper {
++	struct drm_framebuffer *fb;
++	struct drm_framebuffer *saved_fb;
++	struct drm_device *dev;
++	struct drm_display_mode *mode;
++	int crtc_count;
++	struct drm_fb_helper_crtc *crtc_info;
++	int connector_count;
++	struct drm_fb_helper_connector **connector_info;
++	struct drm_fb_helper_funcs *funcs;
++	int conn_limit;
++	struct fb_info *fbdev;
++	u32 pseudo_palette[17];
++	struct list_head kernel_fb_list;
 +
-+	u32 max_vblank_count;           /**< size of vblank counter register */
++	/* we got a hotplug but fbdev wasn't running the console
++	   delay until next set_par */
++	bool delayed_hotplug;
++};
 +
-+	struct list_head vblank_event_list;
-+	struct mtx	 event_lock;
++struct fb_var_screeninfo;
++struct fb_cmap;
 +
-+        struct drm_mode_config mode_config;	/**< Current mode config */
++int drm_fb_helper_single_fb_probe(struct drm_fb_helper *helper,
++				  int preferred_bpp);
 +
-+	/* GEM part */
-+	struct sx	  object_name_lock;
-+	struct drm_gem_names object_names;
-+	void		 *mm_private;
++int drm_fb_helper_init(struct drm_device *dev,
++		       struct drm_fb_helper *helper, int crtc_count,
++		       int max_conn);
++void drm_fb_helper_fini(struct drm_fb_helper *helper);
++int drm_fb_helper_blank(int blank, struct fb_info *info);
++int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
++			      struct fb_info *info);
++int drm_fb_helper_set_par(struct fb_info *info);
++int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
++			    struct fb_info *info);
++int drm_fb_helper_setcolreg(unsigned regno,
++			    unsigned red,
++			    unsigned green,
++			    unsigned blue,
++			    unsigned transp,
++			    struct fb_info *info);
 +
-+	void *sysctl_private;
-+	char busid_str[128];
-+	int modesetting;
- };
- 
- static __inline__ int drm_core_check_feature(struct drm_device *dev,
-@@ -719,6 +963,9 @@
- #endif
- 
- extern int	drm_debug_flag;
-+extern int	drm_notyet_flag;
-+extern unsigned int drm_vblank_offdelay;
-+extern unsigned int drm_timestamp_precision;
- 
- /* Device setup support (drm_drv.c) */
- int	drm_probe(device_t kdev, drm_pci_id_list_t *idlist);
-@@ -732,6 +979,11 @@
- d_mmap_t drm_mmap;
- extern drm_local_map_t	*drm_getsarea(struct drm_device *dev);
- 
-+void drm_event_wakeup(struct drm_pending_event *e);
++bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper);
++void drm_fb_helper_restore(void);
++void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
++			    uint32_t fb_width, uint32_t fb_height);
++void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
++			    uint32_t depth);
 +
-+int drm_add_busid_modesetting(struct drm_device *dev,
-+    struct sysctl_ctx_list *ctx, struct sysctl_oid *top);
++int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
 +
- /* File operations helpers (drm_fops.c) */
- extern int		drm_open_helper(struct cdev *kdev, int flags, int fmt,
- 					 DRM_STRUCTPROC *p,
-@@ -791,16 +1043,37 @@
- void	drm_driver_irq_preinstall(struct drm_device *dev);
- void	drm_driver_irq_postinstall(struct drm_device *dev);
- void	drm_driver_irq_uninstall(struct drm_device *dev);
--void	drm_handle_vblank(struct drm_device *dev, int crtc);
--u32	drm_vblank_count(struct drm_device *dev, int crtc);
--int	drm_vblank_get(struct drm_device *dev, int crtc);
--void	drm_vblank_put(struct drm_device *dev, int crtc);
--void	drm_vblank_cleanup(struct drm_device *dev);
--int	drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
--int	drm_vblank_init(struct drm_device *dev, int num_crtcs);
++int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
++bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
++int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper);
++int drm_fb_helper_debug_enter(struct fb_info *info);
++int drm_fb_helper_debug_leave(struct fb_info *info);
++bool drm_fb_helper_force_kernel_mode(void);
 +
-+void	drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
-+void	drm_vblank_post_modeset(struct drm_device *dev, int crtc);
- int 	drm_modeset_ctl(struct drm_device *dev, void *data,
- 			struct drm_file *file_priv);
++#endif
+diff --git a/sys/dev/drm/drm_fops.c b/sys/dev/drm/drm_fops.c
+index 3f743e0..08cc53a 100644
+--- sys/dev/drm/drm_fops.c
++++ sys/dev/drm/drm_fops.c
+@@ -72,6 +72,13 @@ int drm_open_helper(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p,
+ 	/* for compatibility root is always authenticated */
+ 	priv->authenticated	= DRM_SUSER(p);
  
-+extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
-+extern int drm_wait_vblank(struct drm_device *dev, void *data,
-+			   struct drm_file *filp);
-+extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
-+extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
-+extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
-+				     struct timeval *vblanktime);
-+extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
-+void drm_handle_vblank_events(struct drm_device *dev, int crtc);
-+extern int drm_vblank_get(struct drm_device *dev, int crtc);
-+extern void drm_vblank_put(struct drm_device *dev, int crtc);
-+extern void drm_vblank_off(struct drm_device *dev, int crtc);
-+extern void drm_vblank_cleanup(struct drm_device *dev);
-+extern u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
-+				     struct timeval *tvblank, unsigned flags);
-+extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
-+						 int crtc, int *max_error,
-+						 struct timeval *vblank_time,
-+						 unsigned flags,
-+						 struct drm_crtc *refcrtc);
-+extern void drm_calc_timestamping_constants(struct drm_crtc *crtc);
++	INIT_LIST_HEAD(&priv->fbs);
++	INIT_LIST_HEAD(&priv->event_list);
++	priv->event_space = 4096; /* set aside 4k for event buffer */
 +
-+struct timeval ns_to_timeval(const int64_t nsec);
-+int64_t timeval_to_ns(const struct timeval *tv);
++	if (dev->driver->driver_features & DRIVER_GEM)
++		drm_gem_open(dev, priv);
 +
- /* AGP/PCI Express/GART support (drm_agpsupport.c) */
- int	drm_device_is_agp(struct drm_device *dev);
- int	drm_device_is_pcie(struct drm_device *dev);
-@@ -832,6 +1105,9 @@
- int	drm_ati_pcigart_cleanup(struct drm_device *dev,
- 				struct drm_ati_pcigart_info *gart_info);
+ 	if (dev->driver->open) {
+ 		/* shared code returns -errno */
+ 		retcode = -dev->driver->open(dev, priv);
+@@ -92,16 +99,104 @@ int drm_open_helper(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p,
+ 	return 0;
+ }
  
-+/* Cache management (drm_memory.c) */
-+void	drm_clflush_pages(vm_page_t *pages, unsigned long num_pages);
++static bool
++drm_dequeue_event(struct drm_device *dev, struct drm_file *file_priv,
++    struct uio *uio, struct drm_pending_event **out)
++{
++	struct drm_pending_event *e;
 +
- /* Locking IOCTL support (drm_drv.c) */
- int	drm_lock(struct drm_device *dev, void *data,
- 		 struct drm_file *file_priv);
-@@ -919,8 +1195,6 @@
- /* IRQ support (drm_irq.c) */
- int	drm_control(struct drm_device *dev, void *data,
- 		    struct drm_file *file_priv);
--int	drm_wait_vblank(struct drm_device *dev, void *data,
--			struct drm_file *file_priv);
++	if (list_empty(&file_priv->event_list))
++		return (false);
++	e = list_first_entry(&file_priv->event_list,
++	    struct drm_pending_event, link);
++	if (e->event->length > uio->uio_resid)
++		return (false);
++
++	file_priv->event_space += e->event->length;
++	list_del(&e->link);
++	*out = e;
++	return (true);
++}
  
- /* AGP/GART support (drm_agpsupport.c) */
- int	drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
-@@ -940,6 +1214,12 @@
- int	drm_agp_bind_ioctl(struct drm_device *dev, void *data,
- 			   struct drm_file *file_priv);
- 
-+				/* Stub support (drm_stub.h) */
-+extern int drm_setmaster_ioctl(struct drm_device *dev, void *data,
-+			       struct drm_file *file_priv);
-+extern int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
-+				struct drm_file *file_priv);
+-/* The drm_read and drm_poll are stubs to prevent spurious errors
+- * on older X Servers (4.3.0 and earlier) */
++int
++drm_read(struct cdev *kdev, struct uio *uio, int ioflag)
++{
++	struct drm_file *file_priv;
++	struct drm_device *dev;
++	struct drm_pending_event *e;
++	int error;
 +
- /* Scatter Gather Support (drm_scatter.c) */
- int	drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
- 			   struct drm_file *file_priv);
-@@ -951,6 +1231,73 @@
- 				size_t align, dma_addr_t maxaddr);
- void	drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah);
++	error = devfs_get_cdevpriv((void **)&file_priv);
++	if (error != 0) {
++		DRM_ERROR("can't find authenticator\n");
++		return (EINVAL);
++	}
++	dev = drm_get_device_from_kdev(kdev);
++	mtx_lock(&dev->event_lock);
++	while (list_empty(&file_priv->event_list)) {
++		if ((ioflag & O_NONBLOCK) != 0) {
++			error = EAGAIN;
++			goto out;
++		}
++		error = msleep(&file_priv->event_space, &dev->event_lock,
++	           PCATCH, "drmrea", 0);
++	       if (error != 0)
++		       goto out;
++	}
++	while (drm_dequeue_event(dev, file_priv, uio, &e)) {
++		mtx_unlock(&dev->event_lock);
++		error = uiomove(e->event, e->event->length, uio);
++		CTR3(KTR_DRM, "drm_event_dequeued %d %d %d", curproc->p_pid,
++		    e->event->type, e->event->length);
++		e->destroy(e);
++		if (error != 0)
++			return (error);
++		mtx_lock(&dev->event_lock);
++	}
++out:
++	mtx_unlock(&dev->event_lock);
++	return (error);
++}
  
-+/* Graphics Execution Manager library functions (drm_gem.c) */
-+int drm_gem_init(struct drm_device *dev);
-+void drm_gem_destroy(struct drm_device *dev);
+-int drm_read(struct cdev *kdev, struct uio *uio, int ioflag)
++void
++drm_event_wakeup(struct drm_pending_event *e)
+ {
+-	return 0;
++	struct drm_file *file_priv;
++	struct drm_device *dev;
 +
-+int drm_gem_close_ioctl(struct drm_device *dev, void *data,
-+			struct drm_file *file_priv);
-+int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
-+			struct drm_file *file_priv);
-+int drm_gem_open_ioctl(struct drm_device *dev, void *data,
-+		       struct drm_file *file_priv);
-+int drm_gem_handle_create(struct drm_file *file_priv,
-+			  struct drm_gem_object *obj,
-+			  u32 *handlep);
-+int drm_gem_handle_delete(struct drm_file *file_priv, uint32_t handle);
-+void drm_gem_object_handle_reference(struct drm_gem_object *obj);
-+void drm_gem_object_handle_unreference(struct drm_gem_object *obj);
-+void drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj);
-+void drm_gem_object_handle_free(struct drm_gem_object *obj);
-+void drm_gem_object_reference(struct drm_gem_object *obj);
-+void drm_gem_object_unreference(struct drm_gem_object *obj);
-+void drm_gem_object_unreference_unlocked(struct drm_gem_object *obj);
-+void drm_gem_object_release(struct drm_gem_object *obj);
-+void drm_gem_object_free(struct drm_gem_object *obj);
-+int drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj,
-+    size_t size);
-+int drm_gem_private_object_init(struct drm_device *dev,
-+    struct drm_gem_object *obj, size_t size);
-+struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
-+    size_t size);
-+struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
-+    struct drm_file *file_priv, uint32_t handle);
++	file_priv = e->file_priv;
++	dev = file_priv->dev;
++	mtx_assert(&dev->event_lock, MA_OWNED);
 +
-+void drm_gem_open(struct drm_device *dev, struct drm_file *file_priv);
-+void drm_gem_release(struct drm_device *dev, struct drm_file *file_priv);
-+
-+int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
-+void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
-+int drm_gem_mmap_single(struct cdev *kdev, vm_ooffset_t *offset, vm_size_t size,
-+    struct vm_object **obj_res, int nprot);
-+void drm_gem_pager_dtr(void *obj);
-+
-+void drm_device_lock_mtx(struct drm_device *dev);
-+void drm_device_unlock_mtx(struct drm_device *dev);
-+int drm_device_sleep_mtx(struct drm_device *dev, void *chan, int flags,
-+    const char *msg, int timeout);
-+void drm_device_assert_mtx_locked(struct drm_device *dev);
-+void drm_device_assert_mtx_unlocked(struct drm_device *dev);
-+
-+void drm_device_lock_struct(struct drm_device *dev);
-+void drm_device_unlock_struct(struct drm_device *dev);
-+int drm_device_sleep_struct(struct drm_device *dev, void *chan, int flags,
-+    const char *msg, int timeout);
-+void drm_device_assert_struct_locked(struct drm_device *dev);
-+void drm_device_assert_struct_unlocked(struct drm_device *dev);
-+
-+void drm_compat_locking_init(struct drm_device *dev);
-+void drm_sleep_locking_init(struct drm_device *dev);
-+
-+/* drm_modes.c */
-+bool drm_mode_parse_command_line_for_connector(const char *mode_option,
-+    struct drm_connector *connector, struct drm_cmdline_mode *mode);
-+struct drm_display_mode *drm_mode_create_from_cmdline_mode(
-+    struct drm_device *dev, struct drm_cmdline_mode *cmd);
-+
-+/* drm_edid.c */
-+u8 *drm_find_cea_extension(struct edid *edid);
-+
- /* Inline replacements for drm_alloc and friends */
- static __inline__ void *
- drm_alloc(size_t size, struct malloc_type *area)
-@@ -1000,7 +1347,7 @@
- {
- 	drm_local_map_t *map;
++	wakeup(&file_priv->event_space);
++	selwakeup(&file_priv->event_poll);
+ }
  
--	DRM_SPINLOCK_ASSERT(&dev->dev_lock);
-+	DRM_LOCK_ASSERT(dev);
- 	TAILQ_FOREACH(map, &dev->maplist, link) {
- 		if (offset == (unsigned long)map->handle)
- 			return map;
-@@ -1012,5 +1359,13 @@
+-int drm_poll(struct cdev *kdev, int events, DRM_STRUCTPROC *p)
++int
++drm_poll(struct cdev *kdev, int events, struct thread *td)
  {
- }
- 
-+#define KIB_NOTYET()							\
-+do {									\
-+	if (drm_debug_flag && drm_notyet_flag)				\
-+		printf("NOTYET: %s at %s:%d\n", __func__, __FILE__, __LINE__); \
-+} while (0)
+-	return 0;
++	struct drm_file *file_priv;
++	struct drm_device *dev;
++	int error, revents;
 +
-+#define	KTR_DRM	KTR_DEV
++	error = devfs_get_cdevpriv((void **)&file_priv);
++	if (error != 0) {
++		DRM_ERROR("can't find authenticator\n");
++		return (EINVAL);
++	}
++	dev = drm_get_device_from_kdev(kdev);
 +
- #endif /* __KERNEL__ */
- #endif /* _DRM_P_H_ */
-Index: sys/dev/drm/intel_sdvo_regs.h
-===================================================================
-diff --git sys/dev/drm/intel_sdvo_regs.h sys/dev/drm/intel_sdvo_regs.h
-new file mode 10644
---- /dev/null	(revision 0)
-+++ sys/dev/drm/intel_sdvo_regs.h	(working copy)
-@@ -0,0 +1,723 @@
++	revents = 0;
++	mtx_lock(&dev->event_lock);
++	if ((events & (POLLIN | POLLRDNORM)) != 0) {
++		if (list_empty(&file_priv->event_list)) {
++			CTR0(KTR_DRM, "drm_poll empty list");
++			selrecord(td, &file_priv->event_poll);
++		} else {
++			revents |= events & (POLLIN | POLLRDNORM);
++			CTR1(KTR_DRM, "drm_poll revents %x", revents);
++		}
++	}
++	mtx_unlock(&dev->event_lock);
++	return (revents);
+ }
+diff --git a/sys/dev/drm/drm_fourcc.h b/sys/dev/drm/drm_fourcc.h
+new file mode 100644
+index 0000000..4dc5c60
+--- /dev/null
++++ sys/dev/drm/drm_fourcc.h
+@@ -0,0 +1,137 @@
 +/*
-+ * Copyright © 2006-2007 Intel Corporation
++ * Copyright 2011 Intel Corporation
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the "Software"),
@@ -7869,1240 +15208,5293 @@
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
-+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef DRM_FOURCC_H
++#define DRM_FOURCC_H
++
++#include <sys/types.h>
++
++#define fourcc_code(a, b, c, d) ((uint32_t)(a) | ((uint32_t)(b) << 8) | \
++				 ((uint32_t)(c) << 16) | ((uint32_t)(d) << 24))
++
++#define DRM_FORMAT_BIG_ENDIAN (1<<31) /* format is big endian instead of little endian */
++
++/* color index */
++#define DRM_FORMAT_C8		fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
++
++/* 8 bpp RGB */
++#define DRM_FORMAT_RGB332	fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */
++#define DRM_FORMAT_BGR233	fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */
++
++/* 16 bpp RGB */
++#define DRM_FORMAT_XRGB4444	fourcc_code('X', 'R', '1', '2') /* [15:0] x:R:G:B 4:4:4:4 little endian */
++#define DRM_FORMAT_XBGR4444	fourcc_code('X', 'B', '1', '2') /* [15:0] x:B:G:R 4:4:4:4 little endian */
++#define DRM_FORMAT_RGBX4444	fourcc_code('R', 'X', '1', '2') /* [15:0] R:G:B:x 4:4:4:4 little endian */
++#define DRM_FORMAT_BGRX4444	fourcc_code('B', 'X', '1', '2') /* [15:0] B:G:R:x 4:4:4:4 little endian */
++
++#define DRM_FORMAT_ARGB4444	fourcc_code('A', 'R', '1', '2') /* [15:0] A:R:G:B 4:4:4:4 little endian */
++#define DRM_FORMAT_ABGR4444	fourcc_code('A', 'B', '1', '2') /* [15:0] A:B:G:R 4:4:4:4 little endian */
++#define DRM_FORMAT_RGBA4444	fourcc_code('R', 'A', '1', '2') /* [15:0] R:G:B:A 4:4:4:4 little endian */
++#define DRM_FORMAT_BGRA4444	fourcc_code('B', 'A', '1', '2') /* [15:0] B:G:R:A 4:4:4:4 little endian */
++
++#define DRM_FORMAT_XRGB1555	fourcc_code('X', 'R', '1', '5') /* [15:0] x:R:G:B 1:5:5:5 little endian */
++#define DRM_FORMAT_XBGR1555	fourcc_code('X', 'B', '1', '5') /* [15:0] x:B:G:R 1:5:5:5 little endian */
++#define DRM_FORMAT_RGBX5551	fourcc_code('R', 'X', '1', '5') /* [15:0] R:G:B:x 5:5:5:1 little endian */
++#define DRM_FORMAT_BGRX5551	fourcc_code('B', 'X', '1', '5') /* [15:0] B:G:R:x 5:5:5:1 little endian */
++
++#define DRM_FORMAT_ARGB1555	fourcc_code('A', 'R', '1', '5') /* [15:0] A:R:G:B 1:5:5:5 little endian */
++#define DRM_FORMAT_ABGR1555	fourcc_code('A', 'B', '1', '5') /* [15:0] A:B:G:R 1:5:5:5 little endian */
++#define DRM_FORMAT_RGBA5551	fourcc_code('R', 'A', '1', '5') /* [15:0] R:G:B:A 5:5:5:1 little endian */
++#define DRM_FORMAT_BGRA5551	fourcc_code('B', 'A', '1', '5') /* [15:0] B:G:R:A 5:5:5:1 little endian */
++
++#define DRM_FORMAT_RGB565	fourcc_code('R', 'G', '1', '6') /* [15:0] R:G:B 5:6:5 little endian */
++#define DRM_FORMAT_BGR565	fourcc_code('B', 'G', '1', '6') /* [15:0] B:G:R 5:6:5 little endian */
++
++/* 24 bpp RGB */
++#define DRM_FORMAT_RGB888	fourcc_code('R', 'G', '2', '4') /* [23:0] R:G:B little endian */
++#define DRM_FORMAT_BGR888	fourcc_code('B', 'G', '2', '4') /* [23:0] B:G:R little endian */
++
++/* 32 bpp RGB */
++#define DRM_FORMAT_XRGB8888	fourcc_code('X', 'R', '2', '4') /* [31:0] x:R:G:B 8:8:8:8 little endian */
++#define DRM_FORMAT_XBGR8888	fourcc_code('X', 'B', '2', '4') /* [31:0] x:B:G:R 8:8:8:8 little endian */
++#define DRM_FORMAT_RGBX8888	fourcc_code('R', 'X', '2', '4') /* [31:0] R:G:B:x 8:8:8:8 little endian */
++#define DRM_FORMAT_BGRX8888	fourcc_code('B', 'X', '2', '4') /* [31:0] B:G:R:x 8:8:8:8 little endian */
++
++#define DRM_FORMAT_ARGB8888	fourcc_code('A', 'R', '2', '4') /* [31:0] A:R:G:B 8:8:8:8 little endian */
++#define DRM_FORMAT_ABGR8888	fourcc_code('A', 'B', '2', '4') /* [31:0] A:B:G:R 8:8:8:8 little endian */
++#define DRM_FORMAT_RGBA8888	fourcc_code('R', 'A', '2', '4') /* [31:0] R:G:B:A 8:8:8:8 little endian */
++#define DRM_FORMAT_BGRA8888	fourcc_code('B', 'A', '2', '4') /* [31:0] B:G:R:A 8:8:8:8 little endian */
++
++#define DRM_FORMAT_XRGB2101010	fourcc_code('X', 'R', '3', '0') /* [31:0] x:R:G:B 2:10:10:10 little endian */
++#define DRM_FORMAT_XBGR2101010	fourcc_code('X', 'B', '3', '0') /* [31:0] x:B:G:R 2:10:10:10 little endian */
++#define DRM_FORMAT_RGBX1010102	fourcc_code('R', 'X', '3', '0') /* [31:0] R:G:B:x 10:10:10:2 little endian */
++#define DRM_FORMAT_BGRX1010102	fourcc_code('B', 'X', '3', '0') /* [31:0] B:G:R:x 10:10:10:2 little endian */
++
++#define DRM_FORMAT_ARGB2101010	fourcc_code('A', 'R', '3', '0') /* [31:0] A:R:G:B 2:10:10:10 little endian */
++#define DRM_FORMAT_ABGR2101010	fourcc_code('A', 'B', '3', '0') /* [31:0] A:B:G:R 2:10:10:10 little endian */
++#define DRM_FORMAT_RGBA1010102	fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
++#define DRM_FORMAT_BGRA1010102	fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
++
++/* packed YCbCr */
++#define DRM_FORMAT_YUYV		fourcc_code('Y', 'U', 'Y', 'V') /* [31:0] Cr0:Y1:Cb0:Y0 8:8:8:8 little endian */
++#define DRM_FORMAT_YVYU		fourcc_code('Y', 'V', 'Y', 'U') /* [31:0] Cb0:Y1:Cr0:Y0 8:8:8:8 little endian */
++#define DRM_FORMAT_UYVY		fourcc_code('U', 'Y', 'V', 'Y') /* [31:0] Y1:Cr0:Y0:Cb0 8:8:8:8 little endian */
++#define DRM_FORMAT_VYUY		fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */
++
++#define DRM_FORMAT_AYUV		fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
++
++/*
++ * 2 plane YCbCr
++ * index 0 = Y plane, [7:0] Y
++ * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
++ * or
++ * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
++ */
++#define DRM_FORMAT_NV12		fourcc_code('N', 'V', '1', '2') /* 2x2 subsampled Cr:Cb plane */
++#define DRM_FORMAT_NV21		fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */
++#define DRM_FORMAT_NV16		fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */
++#define DRM_FORMAT_NV61		fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */
++
++/* 2 non contiguous plane YCbCr */
++#define DRM_FORMAT_NV12M	fourcc_code('N', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane */
++#define DRM_FORMAT_NV12MT	fourcc_code('T', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane 64x32 macroblocks */
++
++/*
++ * 3 plane YCbCr
++ * index 0: Y plane, [7:0] Y
++ * index 1: Cb plane, [7:0] Cb
++ * index 2: Cr plane, [7:0] Cr
++ * or
++ * index 1: Cr plane, [7:0] Cr
++ * index 2: Cb plane, [7:0] Cb
++ */
++#define DRM_FORMAT_YUV410	fourcc_code('Y', 'U', 'V', '9') /* 4x4 subsampled Cb (1) and Cr (2) planes */
++#define DRM_FORMAT_YVU410	fourcc_code('Y', 'V', 'U', '9') /* 4x4 subsampled Cr (1) and Cb (2) planes */
++#define DRM_FORMAT_YUV411	fourcc_code('Y', 'U', '1', '1') /* 4x1 subsampled Cb (1) and Cr (2) planes */
++#define DRM_FORMAT_YVU411	fourcc_code('Y', 'V', '1', '1') /* 4x1 subsampled Cr (1) and Cb (2) planes */
++#define DRM_FORMAT_YUV420	fourcc_code('Y', 'U', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */
++#define DRM_FORMAT_YVU420	fourcc_code('Y', 'V', '1', '2') /* 2x2 subsampled Cr (1) and Cb (2) planes */
++#define DRM_FORMAT_YUV422	fourcc_code('Y', 'U', '1', '6') /* 2x1 subsampled Cb (1) and Cr (2) planes */
++#define DRM_FORMAT_YVU422	fourcc_code('Y', 'V', '1', '6') /* 2x1 subsampled Cr (1) and Cb (2) planes */
++#define DRM_FORMAT_YUV444	fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */
++#define DRM_FORMAT_YVU444	fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */
++
++/* 3 non contiguous plane YCbCr */
++#define DRM_FORMAT_YUV420M	fourcc_code('Y', 'M', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */
++
++#endif /* DRM_FOURCC_H */
+diff --git a/sys/dev/drm/drm_gem.c b/sys/dev/drm/drm_gem.c
+new file mode 100644
+index 0000000..6b496f0
+--- /dev/null
++++ sys/dev/drm/drm_gem.c
+@@ -0,0 +1,487 @@
++/*-
++ * Copyright (c) 2011 The FreeBSD Foundation
++ * All rights reserved.
 + *
-+ * Authors:
-+ *	Eric Anholt <eric at anholt.net>
++ * This software was developed by Konstantin Belousov under sponsorship from
++ * the FreeBSD Foundation.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *    notice, this list of conditions and the following disclaimer in the
++ *    documentation and/or other materials provided with the distribution.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
++ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
++ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
++ * SUCH DAMAGE.
 + */
 +
-+/**
-+ * @file SDVO command definitions and structures.
++#include <sys/cdefs.h>
++__FBSDID("$FreeBSD$");
++
++#include "opt_vm.h"
++
++#include <sys/param.h>
++#include <sys/systm.h>
++#include <sys/limits.h>
++#include <sys/lock.h>
++#include <sys/mutex.h>
++
++#include <vm/vm.h>
++#include <vm/vm_page.h>
++
++#include "dev/drm/drmP.h"
++#include "dev/drm/drm.h"
++#include "dev/drm/drm_sarea.h"
++
++/*
++ * We make up offsets for buffer objects so we can recognize them at
++ * mmap time.
 + */
 +
-+#define SDVO_OUTPUT_FIRST   (0)
-+#define SDVO_OUTPUT_TMDS0   (1 << 0)
-+#define SDVO_OUTPUT_RGB0    (1 << 1)
-+#define SDVO_OUTPUT_CVBS0   (1 << 2)
-+#define SDVO_OUTPUT_SVID0   (1 << 3)
-+#define SDVO_OUTPUT_YPRPB0  (1 << 4)
-+#define SDVO_OUTPUT_SCART0  (1 << 5)
-+#define SDVO_OUTPUT_LVDS0   (1 << 6)
-+#define SDVO_OUTPUT_TMDS1   (1 << 8)
-+#define SDVO_OUTPUT_RGB1    (1 << 9)
-+#define SDVO_OUTPUT_CVBS1   (1 << 10)
-+#define SDVO_OUTPUT_SVID1   (1 << 11)
-+#define SDVO_OUTPUT_YPRPB1  (1 << 12)
-+#define SDVO_OUTPUT_SCART1  (1 << 13)
-+#define SDVO_OUTPUT_LVDS1   (1 << 14)
-+#define SDVO_OUTPUT_LAST    (14)
++/* pgoff in mmap is an unsigned long, so we need to make sure that
++ * the faked up offset will fit
++ */
 +
-+struct intel_sdvo_caps {
-+	u8 vendor_id;
-+	u8 device_id;
-+	u8 device_rev_id;
-+	u8 sdvo_version_major;
-+	u8 sdvo_version_minor;
-+	unsigned int sdvo_inputs_mask:2;
-+	unsigned int smooth_scaling:1;
-+	unsigned int sharp_scaling:1;
-+	unsigned int up_scaling:1;
-+	unsigned int down_scaling:1;
-+	unsigned int stall_support:1;
-+	unsigned int pad:1;
-+	u16 output_flags;
-+} __attribute__((packed));
++#if ULONG_MAX == UINT64_MAX
++#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
++#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
++#else
++#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
++#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
++#endif
 +
-+/** This matches the EDID DTD structure, more or less */
-+struct intel_sdvo_dtd {
-+	struct {
-+		u16 clock;	/**< pixel clock, in 10kHz units */
-+		u8 h_active;	/**< lower 8 bits (pixels) */
-+		u8 h_blank;	/**< lower 8 bits (pixels) */
-+		u8 h_high;	/**< upper 4 bits each h_active, h_blank */
-+		u8 v_active;	/**< lower 8 bits (lines) */
-+		u8 v_blank;	/**< lower 8 bits (lines) */
-+		u8 v_high;	/**< upper 4 bits each v_active, v_blank */
-+	} part1;
++int
++drm_gem_init(struct drm_device *dev)
++{
++	struct drm_gem_mm *mm;
 +
-+	struct {
-+		u8 h_sync_off;	/**< lower 8 bits, from hblank start */
-+		u8 h_sync_width;	/**< lower 8 bits (pixels) */
-+		/** lower 4 bits each vsync offset, vsync width */
-+		u8 v_sync_off_width;
-+		/**
-+		* 2 high bits of hsync offset, 2 high bits of hsync width,
-+		* bits 4-5 of vsync offset, and 2 high bits of vsync width.
-+		*/
-+		u8 sync_off_width_high;
-+		u8 dtd_flags;
-+		u8 sdvo_flags;
-+		/** bits 6-7 of vsync offset at bits 6-7 */
-+		u8 v_sync_off_high;
-+		u8 reserved;
-+	} part2;
-+} __attribute__((packed));
++	drm_gem_names_init(&dev->object_names);
++	mm = malloc(sizeof(*mm), DRM_MEM_DRIVER, M_WAITOK);
++	dev->mm_private = mm;
++	if (drm_ht_create(&mm->offset_hash, 19) != 0) {
++		free(mm, DRM_MEM_DRIVER);
++		return (ENOMEM);
++	}
++	mm->idxunr = new_unrhdr(0, DRM_GEM_MAX_IDX, NULL);
++	return (0);
++}
 +
-+struct intel_sdvo_pixel_clock_range {
-+	u16 min;	/**< pixel clock, in 10kHz units */
-+	u16 max;	/**< pixel clock, in 10kHz units */
-+} __attribute__((packed));
++void
++drm_gem_destroy(struct drm_device *dev)
++{
++	struct drm_gem_mm *mm;
 +
-+struct intel_sdvo_preferred_input_timing_args {
-+	u16 clock;
-+	u16 width;
-+	u16 height;
-+	u8	interlace:1;
-+	u8	scaled:1;
-+	u8	pad:6;
-+} __attribute__((packed));
++	mm = dev->mm_private;
++	dev->mm_private = NULL;
++	drm_ht_remove(&mm->offset_hash);
++	delete_unrhdr(mm->idxunr);
++	free(mm, DRM_MEM_DRIVER);
++	drm_gem_names_fini(&dev->object_names);
++}
 +
-+/* I2C registers for SDVO */
-+#define SDVO_I2C_ARG_0				0x07
-+#define SDVO_I2C_ARG_1				0x06
-+#define SDVO_I2C_ARG_2				0x05
-+#define SDVO_I2C_ARG_3				0x04
-+#define SDVO_I2C_ARG_4				0x03
-+#define SDVO_I2C_ARG_5				0x02
-+#define SDVO_I2C_ARG_6				0x01
-+#define SDVO_I2C_ARG_7				0x00
-+#define SDVO_I2C_OPCODE				0x08
-+#define SDVO_I2C_CMD_STATUS			0x09
-+#define SDVO_I2C_RETURN_0			0x0a
-+#define SDVO_I2C_RETURN_1			0x0b
-+#define SDVO_I2C_RETURN_2			0x0c
-+#define SDVO_I2C_RETURN_3			0x0d
-+#define SDVO_I2C_RETURN_4			0x0e
-+#define SDVO_I2C_RETURN_5			0x0f
-+#define SDVO_I2C_RETURN_6			0x10
-+#define SDVO_I2C_RETURN_7			0x11
-+#define SDVO_I2C_VENDOR_BEGIN			0x20
++int
++drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj,
++    size_t size)
++{
 +
-+/* Status results */
-+#define SDVO_CMD_STATUS_POWER_ON		0x0
-+#define SDVO_CMD_STATUS_SUCCESS			0x1
-+#define SDVO_CMD_STATUS_NOTSUPP			0x2
-+#define SDVO_CMD_STATUS_INVALID_ARG		0x3
-+#define SDVO_CMD_STATUS_PENDING			0x4
-+#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED	0x5
-+#define SDVO_CMD_STATUS_SCALING_NOT_SUPP	0x6
++	KASSERT((size & (PAGE_SIZE - 1)) == 0,
++	    ("Bad size %ju", (uintmax_t)size));
 +
-+/* SDVO commands, argument/result registers */
++	obj->dev = dev;
++	obj->vm_obj = vm_pager_allocate(OBJT_DEFAULT, NULL, size,
++	    VM_PROT_READ | VM_PROT_WRITE, 0, curthread->td_ucred);
 +
-+#define SDVO_CMD_RESET					0x01
++	obj->refcount = 1;
++	obj->handle_count = 0;
++	obj->size = size;
 +
-+/** Returns a struct intel_sdvo_caps */
-+#define SDVO_CMD_GET_DEVICE_CAPS			0x02
++	return (0);
++}
 +
-+#define SDVO_CMD_GET_FIRMWARE_REV			0x86
-+# define SDVO_DEVICE_FIRMWARE_MINOR			SDVO_I2C_RETURN_0
-+# define SDVO_DEVICE_FIRMWARE_MAJOR			SDVO_I2C_RETURN_1
-+# define SDVO_DEVICE_FIRMWARE_PATCH			SDVO_I2C_RETURN_2
++int
++drm_gem_private_object_init(struct drm_device *dev, struct drm_gem_object *obj,
++    size_t size)
++{
 +
-+/**
-+ * Reports which inputs are trained (managed to sync).
++	MPASS((size & (PAGE_SIZE - 1)) == 0);
++
++	obj->dev = dev;
++	obj->vm_obj = NULL;
++
++	obj->refcount = 1;
++	atomic_set(&obj->handle_count, 0);
++	obj->size = size;
++
++	return (0);
++}
++
++
++struct drm_gem_object *
++drm_gem_object_alloc(struct drm_device *dev, size_t size)
++{
++	struct drm_gem_object *obj;
++
++	obj = malloc(sizeof(*obj), DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
++	if (drm_gem_object_init(dev, obj, size) != 0)
++		goto free;
++
++	if (dev->driver->gem_init_object != NULL &&
++	    dev->driver->gem_init_object(obj) != 0)
++		goto dealloc;
++	return (obj);
++dealloc:
++	vm_object_deallocate(obj->vm_obj);
++free:
++	free(obj, DRM_MEM_DRIVER);
++	return (NULL);
++}
++
++void
++drm_gem_object_free(struct drm_gem_object *obj)
++{
++	struct drm_device *dev;
++
++	dev = obj->dev;
++	DRM_LOCK_ASSERT(dev);
++	if (dev->driver->gem_free_object != NULL)
++		dev->driver->gem_free_object(obj);
++}
++
++void
++drm_gem_object_reference(struct drm_gem_object *obj)
++{
++
++	KASSERT(obj->refcount > 0, ("Dandling obj %p", obj));
++	refcount_acquire(&obj->refcount);
++}
++
++void
++drm_gem_object_unreference(struct drm_gem_object *obj)
++{
++
++	if (obj == NULL)
++		return;
++	if (refcount_release(&obj->refcount))
++		drm_gem_object_free(obj);
++}
++
++void
++drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
++{
++	struct drm_device *dev;
++
++	if (obj == NULL)
++		return;
++	dev = obj->dev;
++	DRM_LOCK();
++	drm_gem_object_unreference(obj);
++	DRM_UNLOCK();
++}
++
++void
++drm_gem_object_handle_reference(struct drm_gem_object *obj)
++{
++
++	drm_gem_object_reference(obj);
++	atomic_add_rel_int(&obj->handle_count, 1);
++}
++
++void
++drm_gem_object_handle_free(struct drm_gem_object *obj)
++{
++	struct drm_device *dev;
++	struct drm_gem_object *obj1;
++
++	dev = obj->dev;
++	if (obj->name != 0) {
++		obj1 = drm_gem_names_remove(&dev->object_names, obj->name);
++		obj->name = 0;
++		drm_gem_object_unreference(obj1);
++	}
++}
++
++void
++drm_gem_object_handle_unreference(struct drm_gem_object *obj)
++{
++
++	if (obj == NULL ||
++	    atomic_load_acq_int(&obj->handle_count) == 0)
++		return;
++
++	if (atomic_fetchadd_int(&obj->handle_count, -1) == 1)
++		drm_gem_object_handle_free(obj);
++	drm_gem_object_unreference(obj);
++}
++
++void
++drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
++{
++
++	if (obj == NULL ||
++	    atomic_load_acq_int(&obj->handle_count) == 0)
++		return;
++
++	if (atomic_fetchadd_int(&obj->handle_count, -1) == 1)
++		drm_gem_object_handle_free(obj);
++	drm_gem_object_unreference_unlocked(obj);
++}
++
++int
++drm_gem_handle_create(struct drm_file *file_priv, struct drm_gem_object *obj,
++    uint32_t *handle)
++{
++	int error;
++
++	error = drm_gem_name_create(&file_priv->object_names, obj, handle);
++	if (error != 0)
++		return (error);
++	drm_gem_object_handle_reference(obj);
++	return (0);
++}
++
++int
++drm_gem_handle_delete(struct drm_file *file_priv, uint32_t handle)
++{
++	struct drm_gem_object *obj;
++
++	obj = drm_gem_names_remove(&file_priv->object_names, handle);
++	if (obj == NULL)
++		return (EINVAL);
++	drm_gem_object_handle_unreference_unlocked(obj);
++	return (0);
++}
++
++void
++drm_gem_object_release(struct drm_gem_object *obj)
++{
++
++	/*
++	 * obj->vm_obj can be NULL for private gem objects.
++	 */
++	vm_object_deallocate(obj->vm_obj);
++}
++
++int
++drm_gem_open_ioctl(struct drm_device *dev, void *data,
++    struct drm_file *file_priv)
++{
++	struct drm_gem_open *args;
++	struct drm_gem_object *obj;
++	int ret;
++	uint32_t handle;
++
++	if (!drm_core_check_feature(dev, DRIVER_GEM))
++		return (ENODEV);
++	args = data;
++
++	obj = drm_gem_name_ref(&dev->object_names, args->name,
++	    (void (*)(void *))drm_gem_object_reference);
++	if (obj == NULL)
++		return (ENOENT);
++	handle = 0;
++	ret = drm_gem_handle_create(file_priv, obj, &handle);
++	drm_gem_object_unreference_unlocked(obj);
++	if (ret != 0)
++		return (ret);
++	
++	args->handle = handle;
++	args->size = obj->size;
++
++	return (0);
++}
++
++void
++drm_gem_open(struct drm_device *dev, struct drm_file *file_priv)
++{
++
++	drm_gem_names_init(&file_priv->object_names);
++}
++
++static int
++drm_gem_object_release_handle(uint32_t name, void *ptr, void *arg)
++{
++	struct drm_gem_object *obj;
++
++	obj = ptr;
++	drm_gem_object_handle_unreference(obj);
++	return (0);
++}
++
++void
++drm_gem_release(struct drm_device *dev, struct drm_file *file_priv)
++{
++
++	drm_gem_names_foreach(&file_priv->object_names,
++	    drm_gem_object_release_handle, NULL);
++	drm_gem_names_fini(&file_priv->object_names);
++}
++
++int
++drm_gem_close_ioctl(struct drm_device *dev, void *data,
++    struct drm_file *file_priv)
++{
++	struct drm_gem_close *args;
++
++	if (!drm_core_check_feature(dev, DRIVER_GEM))
++		return (ENODEV);
++	args = data;
++
++	return (drm_gem_handle_delete(file_priv, args->handle));
++}
++
++int
++drm_gem_flink_ioctl(struct drm_device *dev, void *data,
++    struct drm_file *file_priv)
++{
++	struct drm_gem_flink *args;
++	struct drm_gem_object *obj;
++	int error;
++
++	if (!drm_core_check_feature(dev, DRIVER_GEM))
++		return (ENODEV);
++	args = data;
++
++	obj = drm_gem_name_ref(&file_priv->object_names, args->handle,
++	    (void (*)(void *))drm_gem_object_reference);
++	if (obj == NULL)
++		return (ENOENT);
++	error = drm_gem_name_create(&dev->object_names, obj, &obj->name);
++	if (error != 0) {
++		if (error == EALREADY)
++			error = 0;
++		drm_gem_object_unreference_unlocked(obj);
++	}
++	if (error == 0)
++		args->name = obj->name;
++	return (error);
++}
++
++struct drm_gem_object *
++drm_gem_object_lookup(struct drm_device *dev, struct drm_file *file_priv,
++    uint32_t handle)
++{
++	struct drm_gem_object *obj;
++
++	obj = drm_gem_name_ref(&file_priv->object_names, handle,
++	    (void (*)(void *))drm_gem_object_reference);
++	return (obj);
++}
++
++static struct drm_gem_object *
++drm_gem_object_from_offset(struct drm_device *dev, vm_ooffset_t offset)
++{
++	struct drm_gem_object *obj;
++	struct drm_gem_mm *mm;
++	struct drm_hash_item *map_list;
++
++	if ((offset & DRM_GEM_MAPPING_MASK) != DRM_GEM_MAPPING_KEY)
++		return (NULL);
++	offset &= ~DRM_GEM_MAPPING_KEY;
++	mm = dev->mm_private;
++	if (drm_ht_find_item(&mm->offset_hash, DRM_GEM_MAPPING_IDX(offset),
++	    &map_list) != 0) {
++	DRM_DEBUG("drm_gem_object_from_offset: offset 0x%jx obj not found\n",
++		    (uintmax_t)offset);
++		return (NULL);
++	}
++	obj = member2struct(drm_gem_object, map_list, map_list);
++	return (obj);
++}
++
++int
++drm_gem_create_mmap_offset(struct drm_gem_object *obj)
++{
++	struct drm_device *dev;
++	struct drm_gem_mm *mm;
++	int ret;
++
++	if (obj->on_map)
++		return (0);
++	dev = obj->dev;
++	mm = dev->mm_private;
++	ret = 0;
++
++	obj->map_list.key = alloc_unr(mm->idxunr);
++	ret = drm_ht_insert_item(&mm->offset_hash, &obj->map_list);
++	if (ret != 0) {
++		DRM_ERROR("failed to add to map hash\n");
++		free_unr(mm->idxunr, obj->map_list.key);
++		return (ret);
++	}
++	obj->on_map = true;
++	return (0);
++}
++
++void
++drm_gem_free_mmap_offset(struct drm_gem_object *obj)
++{
++	struct drm_hash_item *list;
++	struct drm_gem_mm *mm;
++
++	if (!obj->on_map)
++		return;
++	mm = obj->dev->mm_private;
++	list = &obj->map_list;
++
++	drm_ht_remove_item(&mm->offset_hash, list);
++	free_unr(mm->idxunr, list->key);
++	obj->on_map = false;
++}
++
++int
++drm_gem_mmap_single(struct cdev *kdev, vm_ooffset_t *offset, vm_size_t size,
++    struct vm_object **obj_res, int nprot)
++{
++	struct drm_device *dev;
++	struct drm_gem_object *gem_obj;
++	struct vm_object *vm_obj;
++
++	dev = drm_get_device_from_kdev(kdev);
++	if ((dev->driver->driver_features & DRIVER_GEM) == 0)
++		return (ENODEV);
++	DRM_LOCK();
++	gem_obj = drm_gem_object_from_offset(dev, *offset);
++	if (gem_obj == NULL) {
++		DRM_UNLOCK();
++		return (ENODEV);
++	}
++	drm_gem_object_reference(gem_obj);
++	DRM_UNLOCK();
++	vm_obj = cdev_pager_allocate(gem_obj, OBJT_MGTDEVICE,
++	    dev->driver->gem_pager_ops, size, nprot,
++	    DRM_GEM_MAPPING_MAPOFF(*offset), curthread->td_ucred);
++	if (vm_obj == NULL) {
++		drm_gem_object_unreference_unlocked(gem_obj);
++		return (EINVAL);
++	}
++	*offset = DRM_GEM_MAPPING_MAPOFF(*offset);
++	*obj_res = vm_obj;
++	return (0);
++}
++
++void
++drm_gem_pager_dtr(void *handle)
++{
++	struct drm_gem_object *obj;
++	struct drm_device *dev;
++
++	obj = handle;
++	dev = obj->dev;
++
++	DRM_LOCK();
++	drm_gem_free_mmap_offset(obj);
++	drm_gem_object_unreference(obj);
++	DRM_UNLOCK();
++}
+diff --git a/sys/dev/drm/drm_gem_names.c b/sys/dev/drm/drm_gem_names.c
+new file mode 100644
+index 0000000..b1a904d
+--- /dev/null
++++ sys/dev/drm/drm_gem_names.c
+@@ -0,0 +1,211 @@
++/*-
++ * Copyright (c) 2011 The FreeBSD Foundation
++ * All rights reserved.
 + *
-+ * Devices must have trained within 2 vsyncs of a mode change.
++ * This software was developed by Konstantin Belousov under sponsorship from
++ * the FreeBSD Foundation.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *    notice, this list of conditions and the following disclaimer in the
++ *    documentation and/or other materials provided with the distribution.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
++ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
++ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
++ * SUCH DAMAGE.
 + */
-+#define SDVO_CMD_GET_TRAINED_INPUTS			0x03
-+struct intel_sdvo_get_trained_inputs_response {
-+	unsigned int input0_trained:1;
-+	unsigned int input1_trained:1;
-+	unsigned int pad:6;
-+} __attribute__((packed));
 +
-+/** Returns a struct intel_sdvo_output_flags of active outputs. */
-+#define SDVO_CMD_GET_ACTIVE_OUTPUTS			0x04
++#include <sys/cdefs.h>
++__FBSDID("$FreeBSD$");
 +
-+/**
-+ * Sets the current set of active outputs.
++#include <sys/param.h>
++#include <sys/systm.h>
++#include <sys/kernel.h>
++#include <sys/limits.h>
++#include <sys/malloc.h>
++
++#include "dev/drm/drm_gem_names.h"
++
++MALLOC_DEFINE(M_GEM_NAMES, "gem_name", "Hash headers for the gem names");
++
++static void drm_gem_names_delete_name(struct drm_gem_names *names,
++    struct drm_gem_name *np);
++
++void
++drm_gem_names_init(struct drm_gem_names *names)
++{
++
++	names->unr = new_unrhdr(1, INT_MAX, NULL); /* XXXKIB */
++	names->names_hash = hashinit(1000 /* XXXKIB */, M_GEM_NAMES,
++	    &names->hash_mask);
++	mtx_init(&names->lock, "drmnames", NULL, MTX_DEF);
++}
++
++void
++drm_gem_names_fini(struct drm_gem_names *names)
++{
++	struct drm_gem_name *np;
++	int i;
++
++	mtx_lock(&names->lock);
++	for (i = 0; i <= names->hash_mask; i++) {
++		while ((np = LIST_FIRST(&names->names_hash[i])) != NULL) {
++			drm_gem_names_delete_name(names, np);
++			mtx_lock(&names->lock);
++		}
++	}
++	mtx_unlock(&names->lock);
++	mtx_destroy(&names->lock);
++	hashdestroy(names->names_hash, M_GEM_NAMES, names->hash_mask);
++	delete_unrhdr(names->unr);
++}
++
++static struct drm_gem_names_head *
++gem_name_hash_index(struct drm_gem_names *names, int name)
++{
++
++	return (&names->names_hash[name & names->hash_mask]);
++}
++
++void *
++drm_gem_name_ref(struct drm_gem_names *names, uint32_t name,
++    void (*ref)(void *))
++{
++	struct drm_gem_name *n;
++
++	mtx_lock(&names->lock);
++	LIST_FOREACH(n, gem_name_hash_index(names, name), link) {
++		if (n->name == name) {
++			if (ref != NULL)
++				ref(n->ptr);
++			mtx_unlock(&names->lock);
++			return (n->ptr);
++		}
++	}
++	mtx_unlock(&names->lock);
++	return (NULL);
++}
++
++struct drm_gem_ptr_match_arg {
++	uint32_t res;
++	void *ptr;
++};
++
++static int
++drm_gem_ptr_match(uint32_t name, void *ptr, void *arg)
++{
++	struct drm_gem_ptr_match_arg *a;
++
++	a = arg;
++	if (ptr == a->ptr) {
++		a->res = name;
++		return (1);
++	} else
++		return (0);
++}
++
++uint32_t
++drm_gem_find_name(struct drm_gem_names *names, void *ptr)
++{
++	struct drm_gem_ptr_match_arg arg;
++
++	arg.res = 0;
++	arg.ptr = ptr;
++	drm_gem_names_foreach(names, drm_gem_ptr_match, &arg);
++	return (arg.res);
++}
++
++int
++drm_gem_name_create(struct drm_gem_names *names, void *p, uint32_t *name)
++{
++	struct drm_gem_name *np;
++
++	np = malloc(sizeof(struct drm_gem_name), M_GEM_NAMES, M_WAITOK);
++	mtx_lock(&names->lock);
++	if (*name != 0) {
++		mtx_unlock(&names->lock);
++		return (EALREADY);
++	}
++	np->name = alloc_unr(names->unr);
++	if (np->name == -1) {
++		mtx_unlock(&names->lock);
++		free(np, M_GEM_NAMES);
++		return (ENOMEM);
++	}
++	*name = np->name;
++	np->ptr = p;
++	LIST_INSERT_HEAD(gem_name_hash_index(names, np->name), np, link);
++	mtx_unlock(&names->lock);
++	return (0);
++}
++
++static void
++drm_gem_names_delete_name(struct drm_gem_names *names, struct drm_gem_name *np)
++{
++
++	mtx_assert(&names->lock, MA_OWNED);
++	LIST_REMOVE(np, link);
++	mtx_unlock(&names->lock);
++	free_unr(names->unr, np->name);
++	free(np, M_GEM_NAMES);
++}
++
++void *
++drm_gem_names_remove(struct drm_gem_names *names, uint32_t name)
++{
++	struct drm_gem_name *n;
++	void *res;
++
++	mtx_lock(&names->lock);
++	LIST_FOREACH(n, gem_name_hash_index(names, name), link) {
++		if (n->name == name) {
++			res = n->ptr;
++			drm_gem_names_delete_name(names, n);
++			return (res);
++		}
++	}
++	mtx_unlock(&names->lock);
++	return (NULL);
++}
++
++void
++drm_gem_names_foreach(struct drm_gem_names *names,
++    int (*f)(uint32_t, void *, void *), void *arg)
++{
++	struct drm_gem_name *np;
++	struct drm_gem_name marker;
++	int i, fres;
++
++	bzero(&marker, sizeof(marker));
++	marker.name = -1;
++	mtx_lock(&names->lock);
++	for (i = 0; i <= names->hash_mask; i++) {
++		for (np = LIST_FIRST(&names->names_hash[i]); np != NULL; ) {
++			if (np->name == -1) {
++				np = LIST_NEXT(np, link);
++				continue;
++			}
++			LIST_INSERT_AFTER(np, &marker, link);
++			mtx_unlock(&names->lock);
++			fres = f(np->name, np->ptr, arg);
++			mtx_lock(&names->lock);
++			np = LIST_NEXT(&marker, link);
++			LIST_REMOVE(&marker, link);
++			if (fres)
++				break;
++		}
++	}
++	mtx_unlock(&names->lock);
++}
+diff --git a/sys/dev/drm/drm_gem_names.h b/sys/dev/drm/drm_gem_names.h
+new file mode 100644
+index 0000000..0fe4edd
+--- /dev/null
++++ sys/dev/drm/drm_gem_names.h
+@@ -0,0 +1,64 @@
++/*-
++ * Copyright (c) 2011 The FreeBSD Foundation
++ * All rights reserved.
 + *
-+ * Takes a struct intel_sdvo_output_flags.  Must be preceded by a SET_IN_OUT_MAP
-+ * on multi-output devices.
++ * This software was developed by Konstantin Belousov under sponsorship from
++ * the FreeBSD Foundation.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *    notice, this list of conditions and the following disclaimer in the
++ *    documentation and/or other materials provided with the distribution.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
++ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
++ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
++ * SUCH DAMAGE.
++ *
++ * $FreeBSD$
++ *
 + */
-+#define SDVO_CMD_SET_ACTIVE_OUTPUTS			0x05
 +
-+/**
-+ * Returns the current mapping of SDVO inputs to outputs on the device.
-+ *
-+ * Returns two struct intel_sdvo_output_flags structures.
-+ */
-+#define SDVO_CMD_GET_IN_OUT_MAP				0x06
-+struct intel_sdvo_in_out_map {
-+	u16 in0, in1;
++#ifndef DRM_GEM_NAMES_H
++#define	DRM_GEM_NAMES_H
++
++#include <sys/types.h>
++#include <sys/lock.h>
++#include <sys/mutex.h>
++#include <sys/queue.h>
++
++struct drm_gem_name {
++	uint32_t name;
++	void *ptr;
++	LIST_ENTRY(drm_gem_name) link;
 +};
 +
++struct drm_gem_names {
++	struct mtx lock;
++	LIST_HEAD(drm_gem_names_head, drm_gem_name) *names_hash;
++	u_long hash_mask;
++	struct unrhdr *unr;
++};
++
++void drm_gem_names_init(struct drm_gem_names *names);
++void drm_gem_names_fini(struct drm_gem_names *names);
++uint32_t drm_gem_find_name(struct drm_gem_names *names, void *ptr);
++void *drm_gem_name_ref(struct drm_gem_names *names, uint32_t name,
++    void (*ref)(void *));
++int drm_gem_name_create(struct drm_gem_names *names, void *obj, uint32_t *name);
++void drm_gem_names_foreach(struct drm_gem_names *names,
++    int (*f)(uint32_t, void *, void *), void *arg);
++void *drm_gem_names_remove(struct drm_gem_names *names, uint32_t name);
++
++#endif
+diff --git a/sys/dev/drm/drm_hashtab.h b/sys/dev/drm/drm_hashtab.h
+index 967022d..fc200b5 100644
+--- sys/dev/drm/drm_hashtab.h
++++ sys/dev/drm/drm_hashtab.h
+@@ -48,7 +48,7 @@ struct drm_hash_item {
+ 
+ struct drm_open_hash {
+ 	LIST_HEAD(drm_hash_item_list, drm_hash_item) *table;
+-	unsigned int  size;
++	unsigned int size;
+ 	unsigned int order;
+ 	unsigned long mask;
+ };
+diff --git a/sys/dev/drm/drm_ioctl.c b/sys/dev/drm/drm_ioctl.c
+index b23c45a..cb4f271 100644
+--- sys/dev/drm/drm_ioctl.c
++++ sys/dev/drm/drm_ioctl.c
+@@ -249,6 +249,15 @@ int drm_setversion(struct drm_device *dev, void *data,
+ 	sv->drm_dd_major = dev->driver->major;
+ 	sv->drm_dd_minor = dev->driver->minor;
+ 
++	DRM_DEBUG("ver.drm_di_major %d ver.drm_di_minor %d "
++	    "ver.drm_dd_major %d ver.drm_dd_minor %d\n",
++	    ver.drm_di_major, ver.drm_di_minor, ver.drm_dd_major,
++	    ver.drm_dd_minor);
++	DRM_DEBUG("sv->drm_di_major %d sv->drm_di_minor %d "
++	    "sv->drm_dd_major %d sv->drm_dd_minor %d\n",
++	    sv->drm_di_major, sv->drm_di_minor, sv->drm_dd_major,
++	    sv->drm_dd_minor);
++
+ 	if (ver.drm_di_major != -1) {
+ 		if (ver.drm_di_major != DRM_IF_MAJOR ||
+ 		    ver.drm_di_minor < 0 || ver.drm_di_minor > DRM_IF_MINOR) {
+diff --git a/sys/dev/drm/drm_irq.c b/sys/dev/drm/drm_irq.c
+index 8977bcf..8eee95f 100644
+--- sys/dev/drm/drm_irq.c
++++ sys/dev/drm/drm_irq.c
+@@ -36,6 +36,23 @@ __FBSDID("$FreeBSD$");
+ #include "dev/drm/drmP.h"
+ #include "dev/drm/drm.h"
+ 
++MALLOC_DEFINE(DRM_MEM_VBLANK, "drm_vblank", "DRM VBLANK Handling Data");
++
++/* Access macro for slots in vblank timestamp ringbuffer. */
++#define vblanktimestamp(dev, crtc, count) ( \
++	(dev)->_vblank_time[(crtc) * DRM_VBLANKTIME_RBSIZE + \
++	((count) % DRM_VBLANKTIME_RBSIZE)])
++
++/* Retry timestamp calculation up to 3 times to satisfy
++ * drm_timestamp_precision before giving up.
++ */
++#define DRM_TIMESTAMP_MAXRETRIES 3
++
++/* Threshold in nanoseconds for detection of redundant
++ * vblank irq in drm_handle_vblank(). 1 msec should be ok.
++ */
++#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
++
+ int drm_irq_by_busid(struct drm_device *dev, void *data,
+ 		     struct drm_file *file_priv)
+ {
+@@ -55,109 +72,23 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
+ 	return 0;
+ }
+ 
+-static irqreturn_t
+-drm_irq_handler_wrap(DRM_IRQ_ARGS)
++static void
++drm_irq_handler_wrap(void *arg)
+ {
+ 	struct drm_device *dev = arg;
+ 
+-	DRM_SPINLOCK(&dev->irq_lock);
++	mtx_lock(&dev->irq_lock);
+ 	dev->driver->irq_handler(arg);
+-	DRM_SPINUNLOCK(&dev->irq_lock);
+-}
+-
+-static void vblank_disable_fn(void *arg)
+-{
+-	struct drm_device *dev = (struct drm_device *)arg;
+-	int i;
+-
+-	/* Make sure that we are called with the lock held */
+-	mtx_assert(&dev->vbl_lock, MA_OWNED);
+-
+-	if (callout_pending(&dev->vblank_disable_timer)) {
+-		/* callout was reset */
+-		return;
+-	}
+-	if (!callout_active(&dev->vblank_disable_timer)) {
+-		/* callout was stopped */
+-		return;
+-	}
+-	callout_deactivate(&dev->vblank_disable_timer);
+-
+-	DRM_DEBUG("vblank_disable: %s\n", dev->vblank_disable_allowed ?
+-		"allowed" : "denied");
+-	if (!dev->vblank_disable_allowed)
+-		return;
+-
+-	for (i = 0; i < dev->num_crtcs; i++) {
+-		if (dev->vblank[i].refcount == 0 &&
+-		    dev->vblank[i].enabled && !dev->vblank[i].inmodeset) {
+-			DRM_DEBUG("disabling vblank on crtc %d\n", i);
+-			dev->vblank[i].last =
+-			    dev->driver->get_vblank_counter(dev, i);
+-			dev->driver->disable_vblank(dev, i);
+-			dev->vblank[i].enabled = 0;
+-		}
+-	}
+-}
+-
+-void drm_vblank_cleanup(struct drm_device *dev)
+-{
+-	/* Bail if the driver didn't call drm_vblank_init() */
+-	if (dev->num_crtcs == 0)
+-		return;
+-
+-	DRM_SPINLOCK(&dev->vbl_lock);
+-	callout_stop(&dev->vblank_disable_timer);
+-	DRM_SPINUNLOCK(&dev->vbl_lock);
+-
+-	callout_drain(&dev->vblank_disable_timer);
+-
+-	DRM_SPINLOCK(&dev->vbl_lock);
+-	vblank_disable_fn((void *)dev);
+-	DRM_SPINUNLOCK(&dev->vbl_lock);
+-
+-	free(dev->vblank, DRM_MEM_DRIVER);
+-
+-	dev->num_crtcs = 0;
+-}
+-
+-int drm_vblank_init(struct drm_device *dev, int num_crtcs)
+-{
+-	int i, ret = ENOMEM;
+-
+-	callout_init_mtx(&dev->vblank_disable_timer, &dev->vbl_lock, 0);
+-	dev->num_crtcs = num_crtcs;
+-
+-	dev->vblank = malloc(sizeof(struct drm_vblank_info) * num_crtcs,
+-	    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+-	if (!dev->vblank)
+-	    goto err;
+-
+-	DRM_DEBUG("\n");
+-
+-	/* Zero per-crtc vblank stuff */
+-	DRM_SPINLOCK(&dev->vbl_lock);
+-	for (i = 0; i < num_crtcs; i++) {
+-		DRM_INIT_WAITQUEUE(&dev->vblank[i].queue);
+-		dev->vblank[i].refcount = 0;
+-		atomic_store_rel_32(&dev->vblank[i].count, 0);
+-	}
+-	dev->vblank_disable_allowed = 0;
+-	DRM_SPINUNLOCK(&dev->vbl_lock);
+-
+-	return 0;
+-
+-err:
+-	drm_vblank_cleanup(dev);
+-	return ret;
++	mtx_unlock(&dev->irq_lock);
+ }
+ 
+-int drm_irq_install(struct drm_device *dev)
++int
++drm_irq_install(struct drm_device *dev)
+ {
+-	int crtc, retcode;
++	int retcode;
+ 
+ 	if (dev->irq == 0 || dev->dev_private == NULL)
+-		return EINVAL;
++		return (EINVAL);
+ 
+ 	DRM_DEBUG("irq=%d\n", dev->irq);
+ 
+@@ -171,50 +102,36 @@ int drm_irq_install(struct drm_device *dev)
+ 	dev->context_flag = 0;
+ 
+ 	/* Before installing handler */
+-	dev->driver->irq_preinstall(dev);
++	if (dev->driver->irq_preinstall)
++		dev->driver->irq_preinstall(dev);
+ 	DRM_UNLOCK();
+ 
+ 	/* Install handler */
+-#if __FreeBSD_version >= 700031
+ 	retcode = bus_setup_intr(dev->device, dev->irqr,
+-				 INTR_TYPE_TTY | INTR_MPSAFE,
+-				 NULL, drm_irq_handler_wrap, dev, &dev->irqh);
+-#else
+-	retcode = bus_setup_intr(dev->device, dev->irqr,
+-				 INTR_TYPE_TTY | INTR_MPSAFE,
+-				 drm_irq_handler_wrap, dev, &dev->irqh);
+-#endif
++	    INTR_TYPE_TTY | INTR_MPSAFE, NULL,
++	    (dev->driver->driver_features & DRIVER_LOCKLESS_IRQ) != 0 ?
++		drm_irq_handler_wrap : dev->driver->irq_handler,
++	    dev, &dev->irqh);
+ 	if (retcode != 0)
+ 		goto err;
+ 
+ 	/* After installing handler */
+ 	DRM_LOCK();
+-	dev->driver->irq_postinstall(dev);
++	if (dev->driver->irq_postinstall)
++		dev->driver->irq_postinstall(dev);
+ 	DRM_UNLOCK();
+-	if (dev->driver->enable_vblank) {
+-		DRM_SPINLOCK(&dev->vbl_lock);
+-		for( crtc = 0 ; crtc < dev->num_crtcs ; crtc++) {
+-			if (dev->driver->enable_vblank(dev, crtc) == 0) {
+-				dev->vblank[crtc].enabled = 1;
+-			}
+-		}
+-		callout_reset(&dev->vblank_disable_timer, 5 * DRM_HZ,
+-		    (timeout_t *)vblank_disable_fn, (void *)dev);
+-		DRM_SPINUNLOCK(&dev->vbl_lock);
+-	}
+ 
+-	return 0;
++	return (0);
+ err:
+-	DRM_LOCK();
++	device_printf(dev->device, "Error setting interrupt: %d\n", retcode);
+ 	dev->irq_enabled = 0;
+-	DRM_UNLOCK();
+ 
+-	return retcode;
++	return (retcode);
+ }
+ 
+ int drm_irq_uninstall(struct drm_device *dev)
+ {
+-	int crtc;
++	int i;
+ 
+ 	if (!dev->irq_enabled)
+ 		return EINVAL;
+@@ -224,20 +141,21 @@ int drm_irq_uninstall(struct drm_device *dev)
+ 	/*
+ 	* Wake up any waiters so they don't hang.
+ 	*/
+-	DRM_SPINLOCK(&dev->vbl_lock);
+-	for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
+-		if (dev->vblank[crtc].enabled) {
+-			DRM_WAKEUP(&dev->vblank[crtc].queue);
+-			dev->vblank[crtc].last =
+-			    dev->driver->get_vblank_counter(dev, crtc);
+-			dev->vblank[crtc].enabled = 0;
++	if (dev->num_crtcs) {
++		mtx_lock(&dev->vbl_lock);
++		for (i = 0; i < dev->num_crtcs; i++) {
++			wakeup(&dev->_vblank_count[i]);
++			dev->vblank_enabled[i] = 0;
++			dev->last_vblank[i] =
++				dev->driver->get_vblank_counter(dev, i);
+ 		}
++		mtx_unlock(&dev->vbl_lock);
+ 	}
+-	DRM_SPINUNLOCK(&dev->vbl_lock);
+ 
+ 	DRM_DEBUG("irq=%d\n", dev->irq);
+ 
+-	dev->driver->irq_uninstall(dev);
++	if (dev->driver->irq_uninstall)
++		dev->driver->irq_uninstall(dev);
+ 
+ 	DRM_UNLOCK();
+ 	bus_teardown_intr(dev->device, dev->irqr, dev->irqh);
+@@ -258,6 +176,8 @@ int drm_control(struct drm_device *dev, void *data, struct drm_file *file_priv)
+ 		 */
+ 		if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+ 			return 0;
++		if (drm_core_check_feature(dev, DRIVER_MODESET))
++			return 0;
+ 		if (dev->if_version < DRM_IF_VERSION(1, 2) &&
+ 		    ctl->irq != dev->irq)
+ 			return EINVAL;
+@@ -265,6 +185,8 @@ int drm_control(struct drm_device *dev, void *data, struct drm_file *file_priv)
+ 	case DRM_UNINST_HANDLER:
+ 		if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+ 			return 0;
++		if (drm_core_check_feature(dev, DRIVER_MODESET))
++			return 0;
+ 		DRM_LOCK();
+ 		err = drm_irq_uninstall(dev);
+ 		DRM_UNLOCK();
+@@ -274,14 +196,537 @@ int drm_control(struct drm_device *dev, void *data, struct drm_file *file_priv)
+ 	}
+ }
+ 
++#define NSEC_PER_USEC	1000L
++#define NSEC_PER_SEC	1000000000L
++
++int64_t
++timeval_to_ns(const struct timeval *tv)
++{
++	return ((int64_t)tv->tv_sec * NSEC_PER_SEC) +
++		tv->tv_usec * NSEC_PER_USEC;
++}
++
++struct timeval
++ns_to_timeval(const int64_t nsec)
++{
++        struct timeval tv;
++	uint32_t rem;
++
++	if (nsec == 0) {
++		tv.tv_sec = 0;
++		tv.tv_usec = 0;
++		return (tv);
++	}
++
++        tv.tv_sec = nsec / NSEC_PER_SEC;
++	rem = nsec % NSEC_PER_SEC;
++        if (rem < 0) {
++                tv.tv_sec--;
++                rem += NSEC_PER_SEC;
++        }
++	tv.tv_usec = rem / 1000;
++        return (tv);
++}
++
++/*
++ * Clear vblank timestamp buffer for a crtc.
++ */
++static void clear_vblank_timestamps(struct drm_device *dev, int crtc)
++{
++	memset(&dev->_vblank_time[crtc * DRM_VBLANKTIME_RBSIZE], 0,
++		DRM_VBLANKTIME_RBSIZE * sizeof(struct timeval));
++}
++
++static int64_t
++abs64(int64_t x)
++{
++
++	return (x < 0 ? -x : x);
++}
++
++/*
++ * Disable vblank irq's on crtc, make sure that last vblank count
++ * of hardware and corresponding consistent software vblank counter
++ * are preserved, even if there are any spurious vblank irq's after
++ * disable.
++ */
++static void vblank_disable_and_save(struct drm_device *dev, int crtc)
++{
++	u32 vblcount;
++	int64_t diff_ns;
++	int vblrc;
++	struct timeval tvblank;
++
++	/* Prevent vblank irq processing while disabling vblank irqs,
++	 * so no updates of timestamps or count can happen after we've
++	 * disabled. Needed to prevent races in case of delayed irq's.
++	 */
++	mtx_lock(&dev->vblank_time_lock);
++
++	dev->driver->disable_vblank(dev, crtc);
++	dev->vblank_enabled[crtc] = 0;
++
++	/* No further vblank irq's will be processed after
++	 * this point. Get current hardware vblank count and
++	 * vblank timestamp, repeat until they are consistent.
++	 *
++	 * FIXME: There is still a race condition here and in
++	 * drm_update_vblank_count() which can cause off-by-one
++	 * reinitialization of software vblank counter. If gpu
++	 * vblank counter doesn't increment exactly at the leading
++	 * edge of a vblank interval, then we can lose 1 count if
++	 * we happen to execute between start of vblank and the
++	 * delayed gpu counter increment.
++	 */
++	do {
++		dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
++		vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
++	} while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc));
++
++	/* Compute time difference to stored timestamp of last vblank
++	 * as updated by last invocation of drm_handle_vblank() in vblank irq.
++	 */
++	vblcount = atomic_read(&dev->_vblank_count[crtc]);
++	diff_ns = timeval_to_ns(&tvblank) -
++		  timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
++
++	/* If there is at least 1 msec difference between the last stored
++	 * timestamp and tvblank, then we are currently executing our
++	 * disable inside a new vblank interval, the tvblank timestamp
++	 * corresponds to this new vblank interval and the irq handler
++	 * for this vblank didn't run yet and won't run due to our disable.
++	 * Therefore we need to do the job of drm_handle_vblank() and
++	 * increment the vblank counter by one to account for this vblank.
++	 *
++	 * Skip this step if there isn't any high precision timestamp
++	 * available. In that case we can't account for this and just
++	 * hope for the best.
++	 */
++	if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
++		atomic_inc(&dev->_vblank_count[crtc]);
++	}
++
++	/* Invalidate all timestamps while vblank irq's are off. */
++	clear_vblank_timestamps(dev, crtc);
++
++	mtx_unlock(&dev->vblank_time_lock);
++}
++
++static void vblank_disable_fn(void * arg)
++{
++	struct drm_device *dev = (struct drm_device *)arg;
++	int i;
++
++	if (!dev->vblank_disable_allowed)
++		return;
++
++	for (i = 0; i < dev->num_crtcs; i++) {
++		mtx_lock(&dev->vbl_lock);
++		if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
++		    dev->vblank_enabled[i]) {
++			DRM_DEBUG("disabling vblank on crtc %d\n", i);
++			vblank_disable_and_save(dev, i);
++		}
++		mtx_unlock(&dev->vbl_lock);
++	}
++}
++
++void drm_vblank_cleanup(struct drm_device *dev)
++{
++	/* Bail if the driver didn't call drm_vblank_init() */
++	if (dev->num_crtcs == 0)
++		return;
++
++	callout_stop(&dev->vblank_disable_callout);
++
++	vblank_disable_fn(dev);
++
++	free(dev->_vblank_count, DRM_MEM_VBLANK);
++	free(dev->vblank_refcount, DRM_MEM_VBLANK);
++	free(dev->vblank_enabled, DRM_MEM_VBLANK);
++	free(dev->last_vblank, DRM_MEM_VBLANK);
++	free(dev->last_vblank_wait, DRM_MEM_VBLANK);
++	free(dev->vblank_inmodeset, DRM_MEM_VBLANK);
++	free(dev->_vblank_time, DRM_MEM_VBLANK);
++
++	dev->num_crtcs = 0;
++}
++
++int drm_vblank_init(struct drm_device *dev, int num_crtcs)
++{
++	int i;
++
++	callout_init(&dev->vblank_disable_callout, CALLOUT_MPSAFE);
++#if 0
++	mtx_init(&dev->vbl_lock, "drmvbl", NULL, MTX_DEF);
++#endif
++	mtx_init(&dev->vblank_time_lock, "drmvtl", NULL, MTX_DEF);
++
++	dev->num_crtcs = num_crtcs;
++
++	dev->_vblank_count = malloc(sizeof(atomic_t) * num_crtcs,
++	    DRM_MEM_VBLANK, M_WAITOK);
++	dev->vblank_refcount = malloc(sizeof(atomic_t) * num_crtcs,
++	    DRM_MEM_VBLANK, M_WAITOK);
++	dev->vblank_enabled = malloc(num_crtcs * sizeof(int),
++	    DRM_MEM_VBLANK, M_WAITOK | M_ZERO);
++	dev->last_vblank = malloc(num_crtcs * sizeof(u32),
++	    DRM_MEM_VBLANK, M_WAITOK | M_ZERO);
++	dev->last_vblank_wait = malloc(num_crtcs * sizeof(u32),
++	    DRM_MEM_VBLANK, M_WAITOK | M_ZERO);
++	dev->vblank_inmodeset = malloc(num_crtcs * sizeof(int),
++	    DRM_MEM_VBLANK, M_WAITOK | M_ZERO);
++	dev->_vblank_time = malloc(num_crtcs * DRM_VBLANKTIME_RBSIZE *
++	    sizeof(struct timeval), DRM_MEM_VBLANK, M_WAITOK | M_ZERO);
++	DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n");
++
++	/* Driver specific high-precision vblank timestamping supported? */
++	if (dev->driver->get_vblank_timestamp)
++		DRM_INFO("Driver supports precise vblank timestamp query.\n");
++	else
++		DRM_INFO("No driver support for vblank timestamp query.\n");
++
++	/* Zero per-crtc vblank stuff */
++	for (i = 0; i < num_crtcs; i++) {
++		atomic_set(&dev->_vblank_count[i], 0);
++		atomic_set(&dev->vblank_refcount[i], 0);
++	}
++
++	dev->vblank_disable_allowed = 0;
++	return 0;
++}
++
++void
++drm_calc_timestamping_constants(struct drm_crtc *crtc)
++{
++	int64_t linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0;
++	uint64_t dotclock;
++
++	/* Dot clock in Hz: */
++	dotclock = (uint64_t) crtc->hwmode.clock * 1000;
++
++	/* Fields of interlaced scanout modes are only halve a frame duration.
++	 * Double the dotclock to get halve the frame-/line-/pixelduration.
++	 */
++	if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE)
++		dotclock *= 2;
++
++	/* Valid dotclock? */
++	if (dotclock > 0) {
++		/* Convert scanline length in pixels and video dot clock to
++		 * line duration, frame duration and pixel duration in
++		 * nanoseconds:
++		 */
++		pixeldur_ns = (int64_t)1000000000 / dotclock;
++		linedur_ns  = ((uint64_t)crtc->hwmode.crtc_htotal *
++		    1000000000) / dotclock;
++		framedur_ns = (int64_t)crtc->hwmode.crtc_vtotal * linedur_ns;
++	} else
++		DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n",
++			  crtc->base.id);
++
++	crtc->pixeldur_ns = pixeldur_ns;
++	crtc->linedur_ns  = linedur_ns;
++	crtc->framedur_ns = framedur_ns;
++
++	DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
++		  crtc->base.id, crtc->hwmode.crtc_htotal,
++		  crtc->hwmode.crtc_vtotal, crtc->hwmode.crtc_vdisplay);
++	DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n",
++		  crtc->base.id, (int) dotclock/1000, (int) framedur_ns,
++		  (int) linedur_ns, (int) pixeldur_ns);
++}
++
 +/**
-+ * Sets the current mapping of SDVO inputs to outputs on the device.
++ * drm_calc_vbltimestamp_from_scanoutpos - helper routine for kms
++ * drivers. Implements calculation of exact vblank timestamps from
++ * given drm_display_mode timings and current video scanout position
++ * of a crtc. This can be called from within get_vblank_timestamp()
++ * implementation of a kms driver to implement the actual timestamping.
 + *
-+ * Takes two struct i380_sdvo_output_flags structures.
++ * Should return timestamps conforming to the OML_sync_control OpenML
++ * extension specification. The timestamp corresponds to the end of
++ * the vblank interval, aka start of scanout of topmost-leftmost display
++ * pixel in the following video frame.
++ *
++ * Requires support for optional dev->driver->get_scanout_position()
++ * in kms driver, plus a bit of setup code to provide a drm_display_mode
++ * that corresponds to the true scanout timing.
++ *
++ * The current implementation only handles standard video modes. It
++ * returns as no operation if a doublescan or interlaced video mode is
++ * active. Higher level code is expected to handle this.
++ *
++ * @dev: DRM device.
++ * @crtc: Which crtc's vblank timestamp to retrieve.
++ * @max_error: Desired maximum allowable error in timestamps (nanosecs).
++ *             On return contains true maximum error of timestamp.
++ * @vblank_time: Pointer to struct timeval which should receive the timestamp.
++ * @flags: Flags to pass to driver:
++ *         0 = Default.
++ *         DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
++ * @refcrtc: drm_crtc* of crtc which defines scanout timing.
++ *
++ * Returns negative value on error, failure or if not supported in current
++ * video mode:
++ *
++ * -EINVAL   - Invalid crtc.
++ * -EAGAIN   - Temporary unavailable, e.g., called before initial modeset.
++ * -ENOTSUPP - Function not supported in current display mode.
++ * -EIO      - Failed, e.g., due to failed scanout position query.
++ *
++ * Returns or'ed positive status flags on success:
++ *
++ * DRM_VBLANKTIME_SCANOUTPOS_METHOD - Signal this method used for timestamping.
++ * DRM_VBLANKTIME_INVBL - Timestamp taken while scanout was in vblank interval.
++ *
 + */
-+#define SDVO_CMD_SET_IN_OUT_MAP				0x07
++int
++drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
++    int *max_error, struct timeval *vblank_time, unsigned flags,
++    struct drm_crtc *refcrtc)
++{
++	struct timeval stime, raw_time;
++	struct drm_display_mode *mode;
++	int vbl_status, vtotal, vdisplay;
++	int vpos, hpos, i;
++	int64_t framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
++	bool invbl;
 +
++	if (crtc < 0 || crtc >= dev->num_crtcs) {
++		DRM_ERROR("Invalid crtc %d\n", crtc);
++		return -EINVAL;
++	}
++
++	/* Scanout position query not supported? Should not happen. */
++	if (!dev->driver->get_scanout_position) {
++		DRM_ERROR("Called from driver w/o get_scanout_position()!?\n");
++		return -EIO;
++	}
++
++	mode = &refcrtc->hwmode;
++	vtotal = mode->crtc_vtotal;
++	vdisplay = mode->crtc_vdisplay;
++
++	/* Durations of frames, lines, pixels in nanoseconds. */
++	framedur_ns = refcrtc->framedur_ns;
++	linedur_ns  = refcrtc->linedur_ns;
++	pixeldur_ns = refcrtc->pixeldur_ns;
++
++	/* If mode timing undefined, just return as no-op:
++	 * Happens during initial modesetting of a crtc.
++	 */
++	if (vtotal <= 0 || vdisplay <= 0 || framedur_ns == 0) {
++		DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc);
++		return -EAGAIN;
++	}
++
++	/* Get current scanout position with system timestamp.
++	 * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
++	 * if single query takes longer than max_error nanoseconds.
++	 *
++	 * This guarantees a tight bound on maximum error if
++	 * code gets preempted or delayed for some reason.
++	 */
++	for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) {
++		/* Disable preemption to make it very likely to
++		 * succeed in the first iteration.
++		 */
++		critical_enter();
++
++		/* Get system timestamp before query. */
++		getmicrouptime(&stime);
++
++		/* Get vertical and horizontal scanout pos. vpos, hpos. */
++		vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
++
++		/* Get system timestamp after query. */
++		getmicrouptime(&raw_time);
++
++		critical_exit();
++
++		/* Return as no-op if scanout query unsupported or failed. */
++		if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
++			DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
++				  crtc, vbl_status);
++			return -EIO;
++		}
++
++		duration_ns = timeval_to_ns(&raw_time) - timeval_to_ns(&stime);
++
++		/* Accept result with <  max_error nsecs timing uncertainty. */
++		if (duration_ns <= (int64_t) *max_error)
++			break;
++	}
++
++	/* Noisy system timing? */
++	if (i == DRM_TIMESTAMP_MAXRETRIES) {
++		DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n",
++			  crtc, (int) duration_ns/1000, *max_error/1000, i);
++	}
++
++	/* Return upper bound of timestamp precision error. */
++	*max_error = (int) duration_ns;
++
++	/* Check if in vblank area:
++	 * vpos is >=0 in video scanout area, but negative
++	 * within vblank area, counting down the number of lines until
++	 * start of scanout.
++	 */
++	invbl = vbl_status & DRM_SCANOUTPOS_INVBL;
++
++	/* Convert scanout position into elapsed time at raw_time query
++	 * since start of scanout at first display scanline. delta_ns
++	 * can be negative if start of scanout hasn't happened yet.
++	 */
++	delta_ns = (int64_t)vpos * linedur_ns + (int64_t)hpos * pixeldur_ns;
++
++	/* Is vpos outside nominal vblank area, but less than
++	 * 1/100 of a frame height away from start of vblank?
++	 * If so, assume this isn't a massively delayed vblank
++	 * interrupt, but a vblank interrupt that fired a few
++	 * microseconds before true start of vblank. Compensate
++	 * by adding a full frame duration to the final timestamp.
++	 * Happens, e.g., on ATI R500, R600.
++	 *
++	 * We only do this if DRM_CALLED_FROM_VBLIRQ.
++	 */
++	if ((flags & DRM_CALLED_FROM_VBLIRQ) && !invbl &&
++	    ((vdisplay - vpos) < vtotal / 100)) {
++		delta_ns = delta_ns - framedur_ns;
++
++		/* Signal this correction as "applied". */
++		vbl_status |= 0x8;
++	}
++
++	/* Subtract time delta from raw timestamp to get final
++	 * vblank_time timestamp for end of vblank.
++	 */
++	*vblank_time = ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns);
++
++	DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %jd.%jd -> %jd.%jd [e %d us, %d rep]\n",
++		  crtc, (int)vbl_status, hpos, vpos, (uintmax_t)raw_time.tv_sec,
++		  (uintmax_t)raw_time.tv_usec, (uintmax_t)vblank_time->tv_sec,
++		  (uintmax_t)vblank_time->tv_usec, (int)duration_ns/1000, i);
++
++	vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD;
++	if (invbl)
++		vbl_status |= DRM_VBLANKTIME_INVBL;
++
++	return vbl_status;
++}
++
 +/**
-+ * Returns a struct intel_sdvo_output_flags of attached displays.
++ * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
++ * vblank interval.
++ *
++ * @dev: DRM device
++ * @crtc: which crtc's vblank timestamp to retrieve
++ * @tvblank: Pointer to target struct timeval which should receive the timestamp
++ * @flags: Flags to pass to driver:
++ *         0 = Default.
++ *         DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
++ *
++ * Fetches the system timestamp corresponding to the time of the most recent
++ * vblank interval on specified crtc. May call into kms-driver to
++ * compute the timestamp with a high-precision GPU specific method.
++ *
++ * Returns zero if timestamp originates from uncorrected do_gettimeofday()
++ * call, i.e., it isn't very precisely locked to the true vblank.
++ *
++ * Returns non-zero if timestamp is considered to be very precise.
 + */
-+#define SDVO_CMD_GET_ATTACHED_DISPLAYS			0x0b
++u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
++			      struct timeval *tvblank, unsigned flags)
++{
++	int ret = 0;
 +
++	/* Define requested maximum error on timestamps (nanoseconds). */
++	int max_error = (int) drm_timestamp_precision * 1000;
++
++	/* Query driver if possible and precision timestamping enabled. */
++	if (dev->driver->get_vblank_timestamp && (max_error > 0)) {
++		ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error,
++							tvblank, flags);
++		if (ret > 0)
++			return (u32) ret;
++	}
++
++	/* GPU high precision timestamp query unsupported or failed.
++	 * Return gettimeofday timestamp as best estimate.
++	 */
++	microtime(tvblank);
++
++	return 0;
++}
++
 +/**
-+ * Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging.
++ * drm_vblank_count - retrieve "cooked" vblank counter value
++ * @dev: DRM device
++ * @crtc: which counter to retrieve
++ *
++ * Fetches the "cooked" vblank count value that represents the number of
++ * vblank events since the system was booted, including lost events due to
++ * modesetting activity.
 + */
-+#define SDVO_CMD_GET_HOT_PLUG_SUPPORT			0x0c
+ u32 drm_vblank_count(struct drm_device *dev, int crtc)
+ {
+-	return atomic_load_acq_32(&dev->vblank[crtc].count);
++	return atomic_read(&dev->_vblank_count[crtc]);
++}
 +
 +/**
-+ * Takes a struct intel_sdvo_output_flags.
++ * drm_vblank_count_and_time - retrieve "cooked" vblank counter value
++ * and the system timestamp corresponding to that vblank counter value.
++ *
++ * @dev: DRM device
++ * @crtc: which counter to retrieve
++ * @vblanktime: Pointer to struct timeval to receive the vblank timestamp.
++ *
++ * Fetches the "cooked" vblank count value that represents the number of
++ * vblank events since the system was booted, including lost events due to
++ * modesetting activity. Returns corresponding system timestamp of the time
++ * of the vblank interval that corresponds to the current value vblank counter
++ * value.
 + */
-+#define SDVO_CMD_SET_ACTIVE_HOT_PLUG			0x0d
++u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
++			      struct timeval *vblanktime)
++{
++	u32 cur_vblank;
 +
++	/* Read timestamp from slot of _vblank_time ringbuffer
++	 * that corresponds to current vblank count. Retry if
++	 * count has incremented during readout. This works like
++	 * a seqlock.
++	 */
++	do {
++		cur_vblank = atomic_read(&dev->_vblank_count[crtc]);
++		*vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
++		rmb();
++	} while (cur_vblank != atomic_read(&dev->_vblank_count[crtc]));
++
++	return cur_vblank;
+ }
+ 
 +/**
-+ * Returns a struct intel_sdvo_output_flags of displays with hot plug
-+ * interrupts enabled.
++ * drm_update_vblank_count - update the master vblank counter
++ * @dev: DRM device
++ * @crtc: counter to update
++ *
++ * Call back into the driver to update the appropriate vblank counter
++ * (specified by @crtc).  Deal with wraparound, if it occurred, and
++ * update the last read value so we can deal with wraparound on the next
++ * call if necessary.
++ *
++ * Only necessary when going from off->on, to account for frames we
++ * didn't get an interrupt for.
++ *
++ * Note: caller must hold dev->vbl_lock since this reads & writes
++ * device vblank fields.
 + */
-+#define SDVO_CMD_GET_ACTIVE_HOT_PLUG			0x0e
+ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
+ {
+-	u32 cur_vblank, diff;
++	u32 cur_vblank, diff, tslot, rc;
++	struct timeval t_vblank;
+ 
+ 	/*
+ 	 * Interrupts were disabled prior to this call, so deal with counter
+@@ -289,68 +734,204 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
+ 	 * NOTE!  It's possible we lost a full dev->max_vblank_count events
+ 	 * here if the register is small or we had vblank interrupts off for
+ 	 * a long time.
++	 *
++	 * We repeat the hardware vblank counter & timestamp query until
++	 * we get consistent results. This to prevent races between gpu
++	 * updating its hardware counter while we are retrieving the
++	 * corresponding vblank timestamp.
+ 	 */
+-	cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
+-	diff = cur_vblank - dev->vblank[crtc].last;
+-	if (cur_vblank < dev->vblank[crtc].last) {
++	do {
++		cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
++		rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0);
++	} while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
 +
-+#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE		0x0f
-+struct intel_sdvo_get_interrupt_event_source_response {
-+	u16 interrupt_status;
-+	unsigned int ambient_light_interrupt:1;
-+	unsigned int hdmi_audio_encrypt_change:1;
-+	unsigned int pad:6;
-+} __attribute__((packed));
++	/* Deal with counter wrap */
++	diff = cur_vblank - dev->last_vblank[crtc];
++	if (cur_vblank < dev->last_vblank[crtc]) {
+ 		diff += dev->max_vblank_count;
+ 
+-		DRM_DEBUG("vblank[%d].last=0x%x, cur_vblank=0x%x => diff=0x%x\n",
+-		    crtc, dev->vblank[crtc].last, cur_vblank, diff);
++		DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
++			  crtc, dev->last_vblank[crtc], cur_vblank, diff);
+ 	}
+ 
+ 	DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
+-	    crtc, diff);
++		  crtc, diff);
+ 
+-	atomic_add_rel_32(&dev->vblank[crtc].count, diff);
++	/* Reinitialize corresponding vblank timestamp if high-precision query
++	 * available. Skip this step if query unsupported or failed. Will
++	 * reinitialize delayed at next vblank interrupt in that case.
++	 */
++	if (rc) {
++		tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
++		vblanktimestamp(dev, crtc, tslot) = t_vblank;
++	}
 +
++	atomic_add(diff, &dev->_vblank_count[crtc]);
+ }
+ 
 +/**
-+ * Selects which input is affected by future input commands.
++ * drm_vblank_get - get a reference count on vblank events
++ * @dev: DRM device
++ * @crtc: which CRTC to own
 + *
-+ * Commands affected include SET_INPUT_TIMINGS_PART[12],
-+ * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
-+ * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
++ * Acquire a reference count on vblank events to avoid having them disabled
++ * while in use.
++ *
++ * RETURNS
++ * Zero on success, nonzero on failure.
 + */
-+#define SDVO_CMD_SET_TARGET_INPUT			0x10
-+struct intel_sdvo_set_target_input_args {
-+	unsigned int target_1:1;
-+	unsigned int pad:7;
-+} __attribute__((packed));
+ int drm_vblank_get(struct drm_device *dev, int crtc)
+ {
+ 	int ret = 0;
+ 
+-	/* Make sure that we are called with the lock held */
+-	mtx_assert(&dev->vbl_lock, MA_OWNED);
+-
++	mtx_lock(&dev->vbl_lock);
+ 	/* Going from 0->1 means we have to enable interrupts again */
+-	if (++dev->vblank[crtc].refcount == 1 &&
+-	    !dev->vblank[crtc].enabled) {
+-		ret = dev->driver->enable_vblank(dev, crtc);
+-		DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
+-		if (ret)
+-			--dev->vblank[crtc].refcount;
+-		else {
+-			dev->vblank[crtc].enabled = 1;
+-			drm_update_vblank_count(dev, crtc);
++	if (atomic_fetchadd_int(&dev->vblank_refcount[crtc], 1) == 0) {
++		mtx_lock(&dev->vblank_time_lock);
++		if (!dev->vblank_enabled[crtc]) {
++			/* Enable vblank irqs under vblank_time_lock protection.
++			 * All vblank count & timestamp updates are held off
++			 * until we are done reinitializing master counter and
++			 * timestamps. Filtercode in drm_handle_vblank() will
++			 * prevent double-accounting of same vblank interval.
++			 */
++			ret = -dev->driver->enable_vblank(dev, crtc);
++			DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n",
++				  crtc, ret);
++			if (ret)
++				atomic_dec(&dev->vblank_refcount[crtc]);
++			else {
++				dev->vblank_enabled[crtc] = 1;
++				drm_update_vblank_count(dev, crtc);
++			}
++		}
++		mtx_unlock(&dev->vblank_time_lock);
++	} else {
++		if (!dev->vblank_enabled[crtc]) {
++			atomic_dec(&dev->vblank_refcount[crtc]);
++			ret = EINVAL;
+ 		}
+ 	}
+-
+-	if (dev->vblank[crtc].enabled)
+-		dev->vblank[crtc].last =
+-		    dev->driver->get_vblank_counter(dev, crtc);
++	mtx_unlock(&dev->vbl_lock);
+ 
+ 	return ret;
+ }
+ 
++/**
++ * drm_vblank_put - give up ownership of vblank events
++ * @dev: DRM device
++ * @crtc: which counter to give up
++ *
++ * Release ownership of a given vblank counter, turning off interrupts
++ * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
++ */
+ void drm_vblank_put(struct drm_device *dev, int crtc)
+ {
+-	/* Make sure that we are called with the lock held */
+-	mtx_assert(&dev->vbl_lock, MA_OWNED);
+-
+-	KASSERT(dev->vblank[crtc].refcount > 0,
+-	    ("invalid refcount"));
++	KASSERT(atomic_read(&dev->vblank_refcount[crtc]) != 0,
++	    ("Too many drm_vblank_put for crtc %d", crtc));
+ 
+ 	/* Last user schedules interrupt disable */
+-	if (--dev->vblank[crtc].refcount == 0)
+-	    callout_reset(&dev->vblank_disable_timer, 5 * DRM_HZ,
+-		(timeout_t *)vblank_disable_fn, (void *)dev);
++	if (atomic_fetchadd_int(&dev->vblank_refcount[crtc], -1) == 1 &&
++	    (drm_vblank_offdelay > 0))
++		callout_reset(&dev->vblank_disable_callout,
++		    (drm_vblank_offdelay * DRM_HZ) / 1000,
++		    vblank_disable_fn, dev);
+ }
+ 
++void drm_vblank_off(struct drm_device *dev, int crtc)
++{
++	struct drm_pending_vblank_event *e, *t;
++	struct timeval now;
++	unsigned int seq;
 +
++	mtx_lock(&dev->vbl_lock);
++	vblank_disable_and_save(dev, crtc);
++	mtx_lock(&dev->event_lock);
++	wakeup(&dev->_vblank_count[crtc]);
++
++	/* Send any queued vblank events, lest the natives grow disquiet */
++	seq = drm_vblank_count_and_time(dev, crtc, &now);
++	list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
++		if (e->pipe != crtc)
++			continue;
++		DRM_DEBUG("Sending premature vblank event on disable: \
++			  wanted %d, current %d\n",
++			  e->event.sequence, seq);
++
++		e->event.sequence = seq;
++		e->event.tv_sec = now.tv_sec;
++		e->event.tv_usec = now.tv_usec;
++		drm_vblank_put(dev, e->pipe);
++		list_move_tail(&e->base.link, &e->base.file_priv->event_list);
++		drm_event_wakeup(&e->base);
++		CTR3(KTR_DRM, "vblank_event_delivered %d %d %d",
++		    e->base.pid, e->pipe, e->event.sequence);
++	}
++
++	mtx_unlock(&dev->event_lock);
++	mtx_unlock(&dev->vbl_lock);
++}
++
 +/**
-+ * Takes a struct intel_sdvo_output_flags of which outputs are targeted by
-+ * future output commands.
++ * drm_vblank_pre_modeset - account for vblanks across mode sets
++ * @dev: DRM device
++ * @crtc: CRTC in question
++ * @post: post or pre mode set?
 + *
-+ * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
-+ * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
++ * Account for vblank events across mode setting events, which will likely
++ * reset the hardware frame counter.
 + */
-+#define SDVO_CMD_SET_TARGET_OUTPUT			0x11
++void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
++{
++	/* vblank is not initialized (IRQ not installed ?) */
++	if (!dev->num_crtcs)
++		return;
++	/*
++	 * To avoid all the problems that might happen if interrupts
++	 * were enabled/disabled around or between these calls, we just
++	 * have the kernel take a reference on the CRTC (just once though
++	 * to avoid corrupting the count if multiple, mismatch calls occur),
++	 * so that interrupts remain enabled in the interim.
++	 */
++	if (!dev->vblank_inmodeset[crtc]) {
++		dev->vblank_inmodeset[crtc] = 0x1;
++		if (drm_vblank_get(dev, crtc) == 0)
++			dev->vblank_inmodeset[crtc] |= 0x2;
++	}
++}
 +
-+#define SDVO_CMD_GET_INPUT_TIMINGS_PART1		0x12
-+#define SDVO_CMD_GET_INPUT_TIMINGS_PART2		0x13
-+#define SDVO_CMD_SET_INPUT_TIMINGS_PART1		0x14
-+#define SDVO_CMD_SET_INPUT_TIMINGS_PART2		0x15
-+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1		0x16
-+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2		0x17
-+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1		0x18
-+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2		0x19
-+/* Part 1 */
-+# define SDVO_DTD_CLOCK_LOW				SDVO_I2C_ARG_0
-+# define SDVO_DTD_CLOCK_HIGH				SDVO_I2C_ARG_1
-+# define SDVO_DTD_H_ACTIVE				SDVO_I2C_ARG_2
-+# define SDVO_DTD_H_BLANK				SDVO_I2C_ARG_3
-+# define SDVO_DTD_H_HIGH				SDVO_I2C_ARG_4
-+# define SDVO_DTD_V_ACTIVE				SDVO_I2C_ARG_5
-+# define SDVO_DTD_V_BLANK				SDVO_I2C_ARG_6
-+# define SDVO_DTD_V_HIGH				SDVO_I2C_ARG_7
-+/* Part 2 */
-+# define SDVO_DTD_HSYNC_OFF				SDVO_I2C_ARG_0
-+# define SDVO_DTD_HSYNC_WIDTH				SDVO_I2C_ARG_1
-+# define SDVO_DTD_VSYNC_OFF_WIDTH			SDVO_I2C_ARG_2
-+# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH			SDVO_I2C_ARG_3
-+# define SDVO_DTD_DTD_FLAGS				SDVO_I2C_ARG_4
-+# define SDVO_DTD_DTD_FLAG_INTERLACED				(1 << 7)
-+# define SDVO_DTD_DTD_FLAG_STEREO_MASK				(3 << 5)
-+# define SDVO_DTD_DTD_FLAG_INPUT_MASK				(3 << 3)
-+# define SDVO_DTD_DTD_FLAG_SYNC_MASK				(3 << 1)
-+# define SDVO_DTD_SDVO_FLAS				SDVO_I2C_ARG_5
-+# define SDVO_DTD_SDVO_FLAG_STALL				(1 << 7)
-+# define SDVO_DTD_SDVO_FLAG_CENTERED				(0 << 6)
-+# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT				(1 << 6)
-+# define SDVO_DTD_SDVO_FLAG_SCALING_MASK			(3 << 4)
-+# define SDVO_DTD_SDVO_FLAG_SCALING_NONE			(0 << 4)
-+# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP			(1 << 4)
-+# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH			(2 << 4)
-+# define SDVO_DTD_VSYNC_OFF_HIGH			SDVO_I2C_ARG_6
++void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
++{
 +
++	if (dev->vblank_inmodeset[crtc]) {
++		mtx_lock(&dev->vbl_lock);
++		dev->vblank_disable_allowed = 1;
++		mtx_unlock(&dev->vbl_lock);
++
++		if (dev->vblank_inmodeset[crtc] & 0x2)
++			drm_vblank_put(dev, crtc);
++
++		dev->vblank_inmodeset[crtc] = 0;
++	}
++}
++
 +/**
-+ * Generates a DTD based on the given width, height, and flags.
++ * drm_modeset_ctl - handle vblank event counter changes across mode switch
++ * @DRM_IOCTL_ARGS: standard ioctl arguments
 + *
-+ * This will be supported by any device supporting scaling or interlaced
-+ * modes.
++ * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
++ * ioctls around modesetting so that any lost vblank events are accounted for.
++ *
++ * Generally the counter will reset across mode sets.  If interrupts are
++ * enabled around this call, we don't have to do anything since the counter
++ * will have already been incremented.
 + */
-+#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING		0x1a
-+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW		SDVO_I2C_ARG_0
-+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH		SDVO_I2C_ARG_1
-+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW		SDVO_I2C_ARG_2
-+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH		SDVO_I2C_ARG_3
-+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW		SDVO_I2C_ARG_4
-+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH	SDVO_I2C_ARG_5
-+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS		SDVO_I2C_ARG_6
-+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED		(1 << 0)
-+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED		(1 << 1)
+ int drm_modeset_ctl(struct drm_device *dev, void *data,
+ 		    struct drm_file *file_priv)
+ {
+ 	struct drm_modeset_ctl *modeset = data;
+-	int crtc, ret = 0;
++	int ret = 0;
++	unsigned int crtc;
+ 
+ 	/* If drm_vblank_init() hasn't been called yet, just no-op */
+ 	if (!dev->num_crtcs)
+@@ -358,41 +939,19 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
+ 
+ 	crtc = modeset->crtc;
+ 	if (crtc >= dev->num_crtcs) {
+-		ret = EINVAL;
++		ret = -EINVAL;
+ 		goto out;
+ 	}
+ 
+-	/*
+-	 * To avoid all the problems that might happen if interrupts
+-	 * were enabled/disabled around or between these calls, we just
+-	 * have the kernel take a reference on the CRTC (just once though
+-	 * to avoid corrupting the count if multiple, mismatch calls occur),
+-	 * so that interrupts remain enabled in the interim.
+-	 */
+ 	switch (modeset->cmd) {
+ 	case _DRM_PRE_MODESET:
+-		DRM_DEBUG("pre-modeset, crtc %d\n", crtc);
+-		DRM_SPINLOCK(&dev->vbl_lock);
+-		if (!dev->vblank[crtc].inmodeset) {
+-			dev->vblank[crtc].inmodeset = 0x1;
+-			if (drm_vblank_get(dev, crtc) == 0)
+-				dev->vblank[crtc].inmodeset |= 0x2;
+-		}
+-		DRM_SPINUNLOCK(&dev->vbl_lock);
++		drm_vblank_pre_modeset(dev, crtc);
+ 		break;
+ 	case _DRM_POST_MODESET:
+-		DRM_DEBUG("post-modeset, crtc %d\n", crtc);
+-		DRM_SPINLOCK(&dev->vbl_lock);
+-		if (dev->vblank[crtc].inmodeset) {
+-			if (dev->vblank[crtc].inmodeset & 0x2)
+-				drm_vblank_put(dev, crtc);
+-			dev->vblank[crtc].inmodeset = 0;
+-		}
+-		dev->vblank_disable_allowed = 1;
+-		DRM_SPINUNLOCK(&dev->vbl_lock);
++		drm_vblank_post_modeset(dev, crtc);
+ 		break;
+ 	default:
+-		ret = EINVAL;
++		ret = -EINVAL;
+ 		break;
+ 	}
+ 
+@@ -400,35 +959,133 @@ out:
+ 	return ret;
+ }
+ 
+-int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv)
++static void
++drm_vblank_event_destroy(struct drm_pending_event *e)
++{
 +
-+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1	0x1b
-+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2	0x1c
++	free(e, DRM_MEM_VBLANK);
++}
 +
-+/** Returns a struct intel_sdvo_pixel_clock_range */
-+#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE		0x1d
-+/** Returns a struct intel_sdvo_pixel_clock_range */
-+#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE		0x1e
++static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
++				  union drm_wait_vblank *vblwait,
++				  struct drm_file *file_priv)
++{
++	struct drm_pending_vblank_event *e;
++	struct timeval now;
++	unsigned int seq;
++	int ret;
 +
-+/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
-+#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS		0x1f
++	e = malloc(sizeof *e, DRM_MEM_VBLANK, M_WAITOK | M_ZERO);
 +
-+/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
-+#define SDVO_CMD_GET_CLOCK_RATE_MULT			0x20
-+/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
-+#define SDVO_CMD_SET_CLOCK_RATE_MULT			0x21
-+# define SDVO_CLOCK_RATE_MULT_1X				(1 << 0)
-+# define SDVO_CLOCK_RATE_MULT_2X				(1 << 1)
-+# define SDVO_CLOCK_RATE_MULT_4X				(1 << 3)
++	e->pipe = pipe;
++	e->base.pid = curproc->p_pid;
++	e->event.base.type = DRM_EVENT_VBLANK;
++	e->event.base.length = sizeof e->event;
++	e->event.user_data = vblwait->request.signal;
++	e->base.event = &e->event.base;
++	e->base.file_priv = file_priv;
++	e->base.destroy = drm_vblank_event_destroy;
 +
-+#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS		0x27
-+/** 6 bytes of bit flags for TV formats shared by all TV format functions */
-+struct intel_sdvo_tv_format {
-+	unsigned int ntsc_m:1;
-+	unsigned int ntsc_j:1;
-+	unsigned int ntsc_443:1;
-+	unsigned int pal_b:1;
-+	unsigned int pal_d:1;
-+	unsigned int pal_g:1;
-+	unsigned int pal_h:1;
-+	unsigned int pal_i:1;
++	mtx_lock(&dev->event_lock);
 +
-+	unsigned int pal_m:1;
-+	unsigned int pal_n:1;
-+	unsigned int pal_nc:1;
-+	unsigned int pal_60:1;
-+	unsigned int secam_b:1;
-+	unsigned int secam_d:1;
-+	unsigned int secam_g:1;
-+	unsigned int secam_k:1;
++	if (file_priv->event_space < sizeof e->event) {
++		ret = EBUSY;
++		goto err_unlock;
++	}
 +
-+	unsigned int secam_k1:1;
-+	unsigned int secam_l:1;
-+	unsigned int secam_60:1;
-+	unsigned int hdtv_std_smpte_240m_1080i_59:1;
-+	unsigned int hdtv_std_smpte_240m_1080i_60:1;
-+	unsigned int hdtv_std_smpte_260m_1080i_59:1;
-+	unsigned int hdtv_std_smpte_260m_1080i_60:1;
-+	unsigned int hdtv_std_smpte_274m_1080i_50:1;
++	file_priv->event_space -= sizeof e->event;
++	seq = drm_vblank_count_and_time(dev, pipe, &now);
 +
-+	unsigned int hdtv_std_smpte_274m_1080i_59:1;
-+	unsigned int hdtv_std_smpte_274m_1080i_60:1;
-+	unsigned int hdtv_std_smpte_274m_1080p_23:1;
-+	unsigned int hdtv_std_smpte_274m_1080p_24:1;
-+	unsigned int hdtv_std_smpte_274m_1080p_25:1;
-+	unsigned int hdtv_std_smpte_274m_1080p_29:1;
-+	unsigned int hdtv_std_smpte_274m_1080p_30:1;
-+	unsigned int hdtv_std_smpte_274m_1080p_50:1;
++	if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
++	    (seq - vblwait->request.sequence) <= (1 << 23)) {
++		vblwait->request.sequence = seq + 1;
++		vblwait->reply.sequence = vblwait->request.sequence;
++	}
 +
-+	unsigned int hdtv_std_smpte_274m_1080p_59:1;
-+	unsigned int hdtv_std_smpte_274m_1080p_60:1;
-+	unsigned int hdtv_std_smpte_295m_1080i_50:1;
-+	unsigned int hdtv_std_smpte_295m_1080p_50:1;
-+	unsigned int hdtv_std_smpte_296m_720p_59:1;
-+	unsigned int hdtv_std_smpte_296m_720p_60:1;
-+	unsigned int hdtv_std_smpte_296m_720p_50:1;
-+	unsigned int hdtv_std_smpte_293m_480p_59:1;
++	DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n",
++		  vblwait->request.sequence, seq, pipe);
 +
-+	unsigned int hdtv_std_smpte_170m_480i_59:1;
-+	unsigned int hdtv_std_iturbt601_576i_50:1;
-+	unsigned int hdtv_std_iturbt601_576p_50:1;
-+	unsigned int hdtv_std_eia_7702a_480i_60:1;
-+	unsigned int hdtv_std_eia_7702a_480p_60:1;
-+	unsigned int pad:3;
-+} __attribute__((packed));
++	CTR4(KTR_DRM, "vblank_event_queued %d %d rt %x %d", curproc->p_pid, pipe,
++	    vblwait->request.type, vblwait->request.sequence);
 +
-+#define SDVO_CMD_GET_TV_FORMAT				0x28
++	e->event.sequence = vblwait->request.sequence;
++	if ((seq - vblwait->request.sequence) <= (1 << 23)) {
++		e->event.sequence = seq;
++		e->event.tv_sec = now.tv_sec;
++		e->event.tv_usec = now.tv_usec;
++		drm_vblank_put(dev, pipe);
++		list_add_tail(&e->base.link, &e->base.file_priv->event_list);
++		drm_event_wakeup(&e->base);
++		vblwait->reply.sequence = seq;
++		CTR3(KTR_DRM, "vblank_event_wakeup p1 %d %d %d", curproc->p_pid,
++		    pipe, vblwait->request.sequence);
++	} else {
++		/* drm_handle_vblank_events will call drm_vblank_put */
++		list_add_tail(&e->base.link, &dev->vblank_event_list);
++		vblwait->reply.sequence = vblwait->request.sequence;
++	}
 +
-+#define SDVO_CMD_SET_TV_FORMAT				0x29
++	mtx_unlock(&dev->event_lock);
 +
-+/** Returns the resolutiosn that can be used with the given TV format */
-+#define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT		0x83
-+struct intel_sdvo_sdtv_resolution_request {
-+	unsigned int ntsc_m:1;
-+	unsigned int ntsc_j:1;
-+	unsigned int ntsc_443:1;
-+	unsigned int pal_b:1;
-+	unsigned int pal_d:1;
-+	unsigned int pal_g:1;
-+	unsigned int pal_h:1;
-+	unsigned int pal_i:1;
++	return 0;
 +
-+	unsigned int pal_m:1;
-+	unsigned int pal_n:1;
-+	unsigned int pal_nc:1;
-+	unsigned int pal_60:1;
-+	unsigned int secam_b:1;
-+	unsigned int secam_d:1;
-+	unsigned int secam_g:1;
-+	unsigned int secam_k:1;
++err_unlock:
++	mtx_unlock(&dev->event_lock);
++	free(e, DRM_MEM_VBLANK);
++	drm_vblank_put(dev, pipe);
++	return ret;
++}
 +
-+	unsigned int secam_k1:1;
-+	unsigned int secam_l:1;
-+	unsigned int secam_60:1;
-+	unsigned int pad:5;
-+} __attribute__((packed));
++/**
++ * Wait for VBLANK.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param data user argument, pointing to a drm_wait_vblank structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * This function enables the vblank interrupt on the pipe requested, then
++ * sleeps waiting for the requested sequence number to occur, and drops
++ * the vblank interrupt refcount afterwards. (vblank irq disable follows that
++ * after a timeout with no further vblank waits scheduled).
++ */
++int drm_wait_vblank(struct drm_device *dev, void *data,
++		    struct drm_file *file_priv)
+ {
+ 	union drm_wait_vblank *vblwait = data;
+-	unsigned int flags, seq, crtc;
+ 	int ret = 0;
++	unsigned int flags, seq, crtc, high_crtc;
+ 
+-	if (!dev->irq_enabled)
+-		return EINVAL;
++	if (/*(!drm_dev_to_irq(dev)) || */(!dev->irq_enabled))
++		return (EINVAL);
 +
-+struct intel_sdvo_sdtv_resolution_reply {
-+	unsigned int res_320x200:1;
-+	unsigned int res_320x240:1;
-+	unsigned int res_400x300:1;
-+	unsigned int res_640x350:1;
-+	unsigned int res_640x400:1;
-+	unsigned int res_640x480:1;
-+	unsigned int res_704x480:1;
-+	unsigned int res_704x576:1;
++	if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
++		return (EINVAL);
+ 
+ 	if (vblwait->request.type &
+-	    ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
++	    ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
++	      _DRM_VBLANK_HIGH_CRTC_MASK)) {
+ 		DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
+-		    vblwait->request.type,
+-		    (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
+-		return EINVAL;
++			  vblwait->request.type,
++			  (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
++			   _DRM_VBLANK_HIGH_CRTC_MASK));
++		return (EINVAL);
+ 	}
+ 
+ 	flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
+-	crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
+-
++	high_crtc = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK);
++	if (high_crtc)
++		crtc = high_crtc >> _DRM_VBLANK_HIGH_CRTC_SHIFT;
++	else
++		crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
+ 	if (crtc >= dev->num_crtcs)
+-		return EINVAL;
++		return (EINVAL);
+ 
+-	DRM_SPINLOCK(&dev->vbl_lock);
+ 	ret = drm_vblank_get(dev, crtc);
+-	DRM_SPINUNLOCK(&dev->vbl_lock);
+ 	if (ret) {
+-		DRM_ERROR("failed to acquire vblank counter, %d\n", ret);
+-		return ret;
++		DRM_DEBUG("failed to acquire vblank counter, %d\n", ret);
++		return (ret);
+ 	}
+ 	seq = drm_vblank_count(dev, crtc);
+ 
+@@ -439,59 +1096,158 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
+ 	case _DRM_VBLANK_ABSOLUTE:
+ 		break;
+ 	default:
+-		ret = EINVAL;
++		ret = (EINVAL);
+ 		goto done;
+ 	}
+ 
++	if (flags & _DRM_VBLANK_EVENT) {
++		/* must hold on to the vblank ref until the event fires
++		 * drm_vblank_put will be called asynchronously
++		 */
++		return drm_queue_vblank_event(dev, crtc, vblwait, file_priv);
++	}
 +
-+	unsigned int res_720x350:1;
-+	unsigned int res_720x400:1;
-+	unsigned int res_720x480:1;
-+	unsigned int res_720x540:1;
-+	unsigned int res_720x576:1;
-+	unsigned int res_768x576:1;
-+	unsigned int res_800x600:1;
-+	unsigned int res_832x624:1;
+ 	if ((flags & _DRM_VBLANK_NEXTONMISS) &&
+ 	    (seq - vblwait->request.sequence) <= (1<<23)) {
+ 		vblwait->request.sequence = seq + 1;
+ 	}
+ 
+-	if (flags & _DRM_VBLANK_SIGNAL) {
+-		/* There have never been any consumers */
+-		ret = EINVAL;
++	dev->last_vblank_wait[crtc] = vblwait->request.sequence;
++	mtx_lock(&dev->vblank_time_lock);
++	while (((drm_vblank_count(dev, crtc) - vblwait->request.sequence) >
++	    (1 << 23)) && dev->irq_enabled) {
++		/*
++		 * The wakeups from the drm_irq_uninstall() and
++		 * drm_vblank_off() may be lost there since vbl_lock
++		 * is not held.  Then, the timeout will wake us; the 3
++		 * seconds delay should not be a problem for
++		 * application when crtc is disabled or irq
++		 * uninstalled anyway.
++		 */
++		ret = msleep(&dev->_vblank_count[crtc], &dev->vblank_time_lock,
++		    PCATCH, "drmvbl", 3 * hz);
++		if (ret != 0)
++			break;
++	}
++	mtx_unlock(&dev->vblank_time_lock);
++	if (ret != EINTR) {
++		struct timeval now;
++		long reply_seq;
 +
-+	unsigned int res_920x766:1;
-+	unsigned int res_1024x768:1;
-+	unsigned int res_1280x1024:1;
-+	unsigned int pad:5;
-+} __attribute__((packed));
++		reply_seq = drm_vblank_count_and_time(dev, crtc, &now);
++		CTR5(KTR_DRM, "wait_vblank %d %d rt %x success %d %d",
++		    curproc->p_pid, crtc, vblwait->request.type,
++		    vblwait->request.sequence, reply_seq);
++		vblwait->reply.sequence = reply_seq;
++		vblwait->reply.tval_sec = now.tv_sec;
++		vblwait->reply.tval_usec = now.tv_usec;
+ 	} else {
+-		DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
+-		    vblwait->request.sequence, crtc);
+-		for ( ret = 0 ; !ret && !(((drm_vblank_count(dev, crtc) -
+-		    vblwait->request.sequence) <= (1 << 23)) ||
+-		    !dev->irq_enabled) ; ) {
+-			mtx_lock(&dev->irq_lock);
+-			if (!(((drm_vblank_count(dev, crtc) -
+-			    vblwait->request.sequence) <= (1 << 23)) ||
+-			    !dev->irq_enabled))
+-				ret = mtx_sleep(&dev->vblank[crtc].queue,
+-				    &dev->irq_lock, PCATCH, "vblwtq",
+-				    DRM_HZ);
+-			mtx_unlock(&dev->irq_lock);
+-		}
+-
+-		if (ret != EINTR && ret != ERESTART) {
+-			struct timeval now;
+-
+-			microtime(&now);
+-			vblwait->reply.tval_sec = now.tv_sec;
+-			vblwait->reply.tval_usec = now.tv_usec;
+-			vblwait->reply.sequence = drm_vblank_count(dev, crtc);
+-			DRM_DEBUG("returning %d to client, irq_enabled %d\n",
+-			    vblwait->reply.sequence, dev->irq_enabled);
+-		} else {
+-			DRM_DEBUG("vblank wait interrupted by signal\n");
+-		}
++		CTR5(KTR_DRM, "wait_vblank %d %d rt %x error %d %d",
++		    curproc->p_pid, crtc, vblwait->request.type, ret,
++		    vblwait->request.sequence);
+ 	}
+ 
+ done:
+-	DRM_SPINLOCK(&dev->vbl_lock);
+ 	drm_vblank_put(dev, crtc);
+-	DRM_SPINUNLOCK(&dev->vbl_lock);
+-
+ 	return ret;
+ }
+ 
+-void drm_handle_vblank(struct drm_device *dev, int crtc)
++void drm_handle_vblank_events(struct drm_device *dev, int crtc)
+ {
+-	atomic_add_rel_32(&dev->vblank[crtc].count, 1);
+-	DRM_WAKEUP(&dev->vblank[crtc].queue);
++	struct drm_pending_vblank_event *e, *t;
++	struct timeval now;
++	unsigned int seq;
 +
-+/* Get supported resolution with squire pixel aspect ratio that can be
-+   scaled for the requested HDTV format */
-+#define SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT		0x85
++	seq = drm_vblank_count_and_time(dev, crtc, &now);
++	CTR2(KTR_DRM, "drm_handle_vblank_events %d %d", seq, crtc);
 +
-+struct intel_sdvo_hdtv_resolution_request {
-+	unsigned int hdtv_std_smpte_240m_1080i_59:1;
-+	unsigned int hdtv_std_smpte_240m_1080i_60:1;
-+	unsigned int hdtv_std_smpte_260m_1080i_59:1;
-+	unsigned int hdtv_std_smpte_260m_1080i_60:1;
-+	unsigned int hdtv_std_smpte_274m_1080i_50:1;
-+	unsigned int hdtv_std_smpte_274m_1080i_59:1;
-+	unsigned int hdtv_std_smpte_274m_1080i_60:1;
-+	unsigned int hdtv_std_smpte_274m_1080p_23:1;
++	mtx_lock(&dev->event_lock);
 +
-+	unsigned int hdtv_std_smpte_274m_1080p_24:1;
-+	unsigned int hdtv_std_smpte_274m_1080p_25:1;
-+	unsigned int hdtv_std_smpte_274m_1080p_29:1;
-+	unsigned int hdtv_std_smpte_274m_1080p_30:1;
-+	unsigned int hdtv_std_smpte_274m_1080p_50:1;
-+	unsigned int hdtv_std_smpte_274m_1080p_59:1;
-+	unsigned int hdtv_std_smpte_274m_1080p_60:1;
-+	unsigned int hdtv_std_smpte_295m_1080i_50:1;
++	list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
++		if (e->pipe != crtc)
++			continue;
++		if ((seq - e->event.sequence) > (1<<23))
++			continue;
 +
-+	unsigned int hdtv_std_smpte_295m_1080p_50:1;
-+	unsigned int hdtv_std_smpte_296m_720p_59:1;
-+	unsigned int hdtv_std_smpte_296m_720p_60:1;
-+	unsigned int hdtv_std_smpte_296m_720p_50:1;
-+	unsigned int hdtv_std_smpte_293m_480p_59:1;
-+	unsigned int hdtv_std_smpte_170m_480i_59:1;
-+	unsigned int hdtv_std_iturbt601_576i_50:1;
-+	unsigned int hdtv_std_iturbt601_576p_50:1;
++		e->event.sequence = seq;
++		e->event.tv_sec = now.tv_sec;
++		e->event.tv_usec = now.tv_usec;
++		drm_vblank_put(dev, e->pipe);
++		list_move_tail(&e->base.link, &e->base.file_priv->event_list);
++		drm_event_wakeup(&e->base);
++		CTR3(KTR_DRM, "vblank_event_wakeup p2 %d %d %d", e->base.pid,
++		    e->pipe, e->event.sequence);
++	}
 +
-+	unsigned int hdtv_std_eia_7702a_480i_60:1;
-+	unsigned int hdtv_std_eia_7702a_480p_60:1;
-+	unsigned int pad:6;
-+} __attribute__((packed));
++	mtx_unlock(&dev->event_lock);
+ }
+ 
++/**
++ * drm_handle_vblank - handle a vblank event
++ * @dev: DRM device
++ * @crtc: where this event occurred
++ *
++ * Drivers should call this routine in their vblank interrupt handlers to
++ * update the vblank counter and send any signals that may be pending.
++ */
++bool drm_handle_vblank(struct drm_device *dev, int crtc)
++{
++	u32 vblcount;
++	int64_t diff_ns;
++	struct timeval tvblank;
 +
-+struct intel_sdvo_hdtv_resolution_reply {
-+	unsigned int res_640x480:1;
-+	unsigned int res_800x600:1;
-+	unsigned int res_1024x768:1;
-+	unsigned int res_1280x960:1;
-+	unsigned int res_1400x1050:1;
-+	unsigned int res_1600x1200:1;
-+	unsigned int res_1920x1440:1;
-+	unsigned int res_2048x1536:1;
++	if (!dev->num_crtcs)
++		return false;
 +
-+	unsigned int res_2560x1920:1;
-+	unsigned int res_3200x2400:1;
-+	unsigned int res_3840x2880:1;
-+	unsigned int pad1:5;
++	/* Need timestamp lock to prevent concurrent execution with
++	 * vblank enable/disable, as this would cause inconsistent
++	 * or corrupted timestamps and vblank counts.
++	 */
++	mtx_lock(&dev->vblank_time_lock);
 +
-+	unsigned int res_848x480:1;
-+	unsigned int res_1064x600:1;
-+	unsigned int res_1280x720:1;
-+	unsigned int res_1360x768:1;
-+	unsigned int res_1704x960:1;
-+	unsigned int res_1864x1050:1;
-+	unsigned int res_1920x1080:1;
-+	unsigned int res_2128x1200:1;
++	/* Vblank irq handling disabled. Nothing to do. */
++	if (!dev->vblank_enabled[crtc]) {
++		mtx_unlock(&dev->vblank_time_lock);
++		return false;
++	}
 +
-+	unsigned int res_2560x1400:1;
-+	unsigned int res_2728x1536:1;
-+	unsigned int res_3408x1920:1;
-+	unsigned int res_4264x2400:1;
-+	unsigned int res_5120x2880:1;
-+	unsigned int pad2:3;
++	/* Fetch corresponding timestamp for this vblank interval from
++	 * driver and store it in proper slot of timestamp ringbuffer.
++	 */
 +
-+	unsigned int res_768x480:1;
-+	unsigned int res_960x600:1;
-+	unsigned int res_1152x720:1;
-+	unsigned int res_1124x768:1;
-+	unsigned int res_1536x960:1;
-+	unsigned int res_1680x1050:1;
-+	unsigned int res_1728x1080:1;
-+	unsigned int res_1920x1200:1;
++	/* Get current timestamp and count. */
++	vblcount = atomic_read(&dev->_vblank_count[crtc]);
++	drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
 +
-+	unsigned int res_2304x1440:1;
-+	unsigned int res_2456x1536:1;
-+	unsigned int res_3072x1920:1;
-+	unsigned int res_3840x2400:1;
-+	unsigned int res_4608x2880:1;
-+	unsigned int pad3:3;
++	/* Compute time difference to timestamp of last vblank */
++	diff_ns = timeval_to_ns(&tvblank) -
++		  timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
 +
-+	unsigned int res_1280x1024:1;
-+	unsigned int pad4:7;
++	/* Update vblank timestamp and count if at least
++	 * DRM_REDUNDANT_VBLIRQ_THRESH_NS nanoseconds
++	 * difference between last stored timestamp and current
++	 * timestamp. A smaller difference means basically
++	 * identical timestamps. Happens if this vblank has
++	 * been already processed and this is a redundant call,
++	 * e.g., due to spurious vblank interrupts. We need to
++	 * ignore those for accounting.
++	 */
++	if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
++		/* Store new timestamp in ringbuffer. */
++		vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
 +
-+	unsigned int res_1280x768:1;
-+	unsigned int pad5:7;
-+} __attribute__((packed));
++		/* Increment cooked vblank count. This also atomically commits
++		 * the timestamp computed above.
++		 */
++		atomic_inc(&dev->_vblank_count[crtc]);
++	} else {
++		DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
++			  crtc, (int) diff_ns);
++	}
 +
-+/* Get supported power state returns info for encoder and monitor, rely on
-+   last SetTargetInput and SetTargetOutput calls */
-+#define SDVO_CMD_GET_SUPPORTED_POWER_STATES		0x2a
-+/* Get power state returns info for encoder and monitor, rely on last
-+   SetTargetInput and SetTargetOutput calls */
-+#define SDVO_CMD_GET_POWER_STATE			0x2b
-+#define SDVO_CMD_GET_ENCODER_POWER_STATE		0x2b
-+#define SDVO_CMD_SET_ENCODER_POWER_STATE		0x2c
-+# define SDVO_ENCODER_STATE_ON					(1 << 0)
-+# define SDVO_ENCODER_STATE_STANDBY				(1 << 1)
-+# define SDVO_ENCODER_STATE_SUSPEND				(1 << 2)
-+# define SDVO_ENCODER_STATE_OFF					(1 << 3)
-+# define SDVO_MONITOR_STATE_ON					(1 << 4)
-+# define SDVO_MONITOR_STATE_STANDBY				(1 << 5)
-+# define SDVO_MONITOR_STATE_SUSPEND				(1 << 6)
-+# define SDVO_MONITOR_STATE_OFF					(1 << 7)
++	wakeup(&dev->_vblank_count[crtc]);
++	drm_handle_vblank_events(dev, crtc);
 +
-+#define SDVO_CMD_GET_MAX_PANEL_POWER_SEQUENCING		0x2d
-+#define SDVO_CMD_GET_PANEL_POWER_SEQUENCING		0x2e
-+#define SDVO_CMD_SET_PANEL_POWER_SEQUENCING		0x2f
-+/**
-+ * The panel power sequencing parameters are in units of milliseconds.
-+ * The high fields are bits 8:9 of the 10-bit values.
-+ */
-+struct sdvo_panel_power_sequencing {
-+	u8 t0;
-+	u8 t1;
-+	u8 t2;
-+	u8 t3;
-+	u8 t4;
++	mtx_unlock(&dev->vblank_time_lock);
++	return true;
++}
+diff --git a/sys/dev/drm/drm_linux_list.h b/sys/dev/drm/drm_linux_list.h
+index d412258..3b23a30 100644
+--- sys/dev/drm/drm_linux_list.h
++++ sys/dev/drm/drm_linux_list.h
+@@ -48,8 +48,13 @@ INIT_LIST_HEAD(struct list_head *head) {
+ 	(head)->prev = head;
+ }
+ 
++#define LIST_HEAD_INIT(name) { &(name), &(name) }
 +
-+	unsigned int t0_high:2;
-+	unsigned int t1_high:2;
-+	unsigned int t2_high:2;
-+	unsigned int t3_high:2;
++#define DRM_LIST_HEAD(name) \
++	struct list_head name = LIST_HEAD_INIT(name)
 +
-+	unsigned int t4_high:2;
-+	unsigned int pad:6;
-+} __attribute__((packed));
+ static __inline__ int
+-list_empty(struct list_head *head) {
++list_empty(const struct list_head *head) {
+ 	return (head)->next == head;
+ }
+ 
+@@ -75,6 +80,28 @@ list_del(struct list_head *entry) {
+ 	(entry)->prev->next = (entry)->next;
+ }
+ 
++static inline void list_replace(struct list_head *old,
++				struct list_head *new)
++{
++	new->next = old->next;
++	new->next->prev = new;
++	new->prev = old->prev;
++	new->prev->next = new;
++}
 +
-+#define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL		0x30
-+struct sdvo_max_backlight_reply {
-+	u8 max_value;
-+	u8 default_value;
-+} __attribute__((packed));
++static inline void list_move(struct list_head *list, struct list_head *head)
++{
++	list_del(list);
++	list_add(list, head);
++}
 +
-+#define SDVO_CMD_GET_BACKLIGHT_LEVEL			0x31
-+#define SDVO_CMD_SET_BACKLIGHT_LEVEL			0x32
++static inline void list_move_tail(struct list_head *list,
++    struct list_head *head)
++{
++	list_del(list);
++	list_add_tail(list, head);
++}
 +
-+#define SDVO_CMD_GET_AMBIENT_LIGHT			0x33
-+struct sdvo_get_ambient_light_reply {
-+	u16 trip_low;
-+	u16 trip_high;
-+	u16 value;
-+} __attribute__((packed));
-+#define SDVO_CMD_SET_AMBIENT_LIGHT			0x34
-+struct sdvo_set_ambient_light_reply {
-+	u16 trip_low;
-+	u16 trip_high;
-+	unsigned int enable:1;
-+	unsigned int pad:7;
-+} __attribute__((packed));
+ static __inline__ void
+ list_del_init(struct list_head *entry) {
+ 	(entry)->next->prev = (entry)->prev;
+@@ -94,6 +121,16 @@ list_del_init(struct list_head *entry) {
+ 	entry != head; 						\
+ 	entry = temp, temp = entry->next)
+ 
++#define list_for_each_entry(pos, head, member)				\
++    for (pos = list_entry((head)->next, __typeof(*pos), member);	\
++	&pos->member != (head);					 	\
++	pos = list_entry(pos->member.next, __typeof(*pos), member))
 +
-+/* Set display power state */
-+#define SDVO_CMD_SET_DISPLAY_POWER_STATE		0x7d
-+# define SDVO_DISPLAY_STATE_ON				(1 << 0)
-+# define SDVO_DISPLAY_STATE_STANDBY			(1 << 1)
-+# define SDVO_DISPLAY_STATE_SUSPEND			(1 << 2)
-+# define SDVO_DISPLAY_STATE_OFF				(1 << 3)
++#define list_for_each_entry_continue_reverse(pos, head, member)         \
++        for (pos = list_entry(pos->member.prev, __typeof(*pos), member);  \
++             &pos->member != (head);    				\
++             pos = list_entry(pos->member.prev, __typeof(*pos), member))
 +
-+#define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS		0x84
-+struct intel_sdvo_enhancements_reply {
-+	unsigned int flicker_filter:1;
-+	unsigned int flicker_filter_adaptive:1;
-+	unsigned int flicker_filter_2d:1;
-+	unsigned int saturation:1;
-+	unsigned int hue:1;
-+	unsigned int brightness:1;
-+	unsigned int contrast:1;
-+	unsigned int overscan_h:1;
+ /**
+  * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
+  * @pos:        the type * to use as a loop cursor.
+@@ -107,4 +144,34 @@ list_del_init(struct list_head *entry) {
+ 	    &pos->member != (head);					\
+ 	    pos = n, n = list_entry(n->member.next, __typeof(*n), member))
+ 
++#define list_first_entry(ptr, type, member) \
++	list_entry((ptr)->next, type, member)
 +
-+	unsigned int overscan_v:1;
-+	unsigned int hpos:1;
-+	unsigned int vpos:1;
-+	unsigned int sharpness:1;
-+	unsigned int dot_crawl:1;
-+	unsigned int dither:1;
-+	unsigned int tv_chroma_filter:1;
-+	unsigned int tv_luma_filter:1;
-+} __attribute__((packed));
 +
-+/* Picture enhancement limits below are dependent on the current TV format,
-+ * and thus need to be queried and set after it.
-+ */
-+#define SDVO_CMD_GET_MAX_FLICKER_FILTER			0x4d
-+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE	0x7b
-+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_2D		0x52
-+#define SDVO_CMD_GET_MAX_SATURATION			0x55
-+#define SDVO_CMD_GET_MAX_HUE				0x58
-+#define SDVO_CMD_GET_MAX_BRIGHTNESS			0x5b
-+#define SDVO_CMD_GET_MAX_CONTRAST			0x5e
-+#define SDVO_CMD_GET_MAX_OVERSCAN_H			0x61
-+#define SDVO_CMD_GET_MAX_OVERSCAN_V			0x64
-+#define SDVO_CMD_GET_MAX_HPOS				0x67
-+#define SDVO_CMD_GET_MAX_VPOS				0x6a
-+#define SDVO_CMD_GET_MAX_SHARPNESS			0x6d
-+#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER		0x74
-+#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER			0x77
-+struct intel_sdvo_enhancement_limits_reply {
-+	u16 max_value;
-+	u16 default_value;
-+} __attribute__((packed));
++static inline void
++__list_splice(const struct list_head *list, struct list_head *prev,
++    struct list_head *next)
++{
++	struct list_head *first = list->next;
++	struct list_head *last = list->prev;
 +
-+#define SDVO_CMD_GET_LVDS_PANEL_INFORMATION		0x7f
-+#define SDVO_CMD_SET_LVDS_PANEL_INFORMATION		0x80
-+# define SDVO_LVDS_COLOR_DEPTH_18			(0 << 0)
-+# define SDVO_LVDS_COLOR_DEPTH_24			(1 << 0)
-+# define SDVO_LVDS_CONNECTOR_SPWG			(0 << 2)
-+# define SDVO_LVDS_CONNECTOR_OPENLDI			(1 << 2)
-+# define SDVO_LVDS_SINGLE_CHANNEL			(0 << 4)
-+# define SDVO_LVDS_DUAL_CHANNEL				(1 << 4)
++	first->prev = prev;
++	prev->next = first;
 +
-+#define SDVO_CMD_GET_FLICKER_FILTER			0x4e
-+#define SDVO_CMD_SET_FLICKER_FILTER			0x4f
-+#define SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE		0x50
-+#define SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE		0x51
-+#define SDVO_CMD_GET_FLICKER_FILTER_2D			0x53
-+#define SDVO_CMD_SET_FLICKER_FILTER_2D			0x54
-+#define SDVO_CMD_GET_SATURATION				0x56
-+#define SDVO_CMD_SET_SATURATION				0x57
-+#define SDVO_CMD_GET_HUE				0x59
-+#define SDVO_CMD_SET_HUE				0x5a
-+#define SDVO_CMD_GET_BRIGHTNESS				0x5c
-+#define SDVO_CMD_SET_BRIGHTNESS				0x5d
-+#define SDVO_CMD_GET_CONTRAST				0x5f
-+#define SDVO_CMD_SET_CONTRAST				0x60
-+#define SDVO_CMD_GET_OVERSCAN_H				0x62
-+#define SDVO_CMD_SET_OVERSCAN_H				0x63
-+#define SDVO_CMD_GET_OVERSCAN_V				0x65
-+#define SDVO_CMD_SET_OVERSCAN_V				0x66
-+#define SDVO_CMD_GET_HPOS				0x68
-+#define SDVO_CMD_SET_HPOS				0x69
-+#define SDVO_CMD_GET_VPOS				0x6b
-+#define SDVO_CMD_SET_VPOS				0x6c
-+#define SDVO_CMD_GET_SHARPNESS				0x6e
-+#define SDVO_CMD_SET_SHARPNESS				0x6f
-+#define SDVO_CMD_GET_TV_CHROMA_FILTER			0x75
-+#define SDVO_CMD_SET_TV_CHROMA_FILTER			0x76
-+#define SDVO_CMD_GET_TV_LUMA_FILTER			0x78
-+#define SDVO_CMD_SET_TV_LUMA_FILTER			0x79
-+struct intel_sdvo_enhancements_arg {
-+	u16 value;
-+} __attribute__((packed));
++	last->next = next;
++	next->prev = last;
++}
 +
-+#define SDVO_CMD_GET_DOT_CRAWL				0x70
-+#define SDVO_CMD_SET_DOT_CRAWL				0x71
-+# define SDVO_DOT_CRAWL_ON					(1 << 0)
-+# define SDVO_DOT_CRAWL_DEFAULT_ON				(1 << 1)
++static inline void
++list_splice(const struct list_head *list, struct list_head *head)
++{
++	if (list_empty(list))
++		return;
 +
-+#define SDVO_CMD_GET_DITHER				0x72
-+#define SDVO_CMD_SET_DITHER				0x73
-+# define SDVO_DITHER_ON						(1 << 0)
-+# define SDVO_DITHER_DEFAULT_ON					(1 << 1)
++	__list_splice(list, head, head->next);
++}
 +
-+#define SDVO_CMD_SET_CONTROL_BUS_SWITCH			0x7a
-+# define SDVO_CONTROL_BUS_PROM				(1 << 0)
-+# define SDVO_CONTROL_BUS_DDC1				(1 << 1)
-+# define SDVO_CONTROL_BUS_DDC2				(1 << 2)
-+# define SDVO_CONTROL_BUS_DDC3				(1 << 3)
++void drm_list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv,
++    struct list_head *a, struct list_head *b));
 +
-+/* HDMI op codes */
-+#define SDVO_CMD_GET_SUPP_ENCODE	0x9d
-+#define SDVO_CMD_GET_ENCODE		0x9e
-+#define SDVO_CMD_SET_ENCODE		0x9f
-+  #define SDVO_ENCODE_DVI	0x0
-+  #define SDVO_ENCODE_HDMI	0x1
-+#define SDVO_CMD_SET_PIXEL_REPLI	0x8b
-+#define SDVO_CMD_GET_PIXEL_REPLI	0x8c
-+#define SDVO_CMD_GET_COLORIMETRY_CAP	0x8d
-+#define SDVO_CMD_SET_COLORIMETRY	0x8e
-+  #define SDVO_COLORIMETRY_RGB256   0x0
-+  #define SDVO_COLORIMETRY_RGB220   0x1
-+  #define SDVO_COLORIMETRY_YCrCb422 0x3
-+  #define SDVO_COLORIMETRY_YCrCb444 0x4
-+#define SDVO_CMD_GET_COLORIMETRY	0x8f
-+#define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90
-+#define SDVO_CMD_SET_AUDIO_STAT		0x91
-+#define SDVO_CMD_GET_AUDIO_STAT		0x92
-+#define SDVO_CMD_SET_HBUF_INDEX		0x93
-+#define SDVO_CMD_GET_HBUF_INDEX		0x94
-+#define SDVO_CMD_GET_HBUF_INFO		0x95
-+#define SDVO_CMD_SET_HBUF_AV_SPLIT	0x96
-+#define SDVO_CMD_GET_HBUF_AV_SPLIT	0x97
-+#define SDVO_CMD_SET_HBUF_DATA		0x98
-+#define SDVO_CMD_GET_HBUF_DATA		0x99
-+#define SDVO_CMD_SET_HBUF_TXRATE	0x9a
-+#define SDVO_CMD_GET_HBUF_TXRATE	0x9b
-+  #define SDVO_HBUF_TX_DISABLED	(0 << 6)
-+  #define SDVO_HBUF_TX_ONCE	(2 << 6)
-+  #define SDVO_HBUF_TX_VSYNC	(3 << 6)
-+#define SDVO_CMD_GET_AUDIO_TX_INFO	0x9c
-+#define SDVO_NEED_TO_STALL  (1 << 7)
+ #endif /* _DRM_LINUX_LIST_H_ */
+diff --git a/sys/dev/drm/drm_linux_list_sort.c b/sys/dev/drm/drm_linux_list_sort.c
+new file mode 100644
+index 0000000..e1f2128
+--- /dev/null
++++ sys/dev/drm/drm_linux_list_sort.c
+@@ -0,0 +1,75 @@
++/*
++ * Copyright (c) 2011 The FreeBSD Foundation
++ * All rights reserved.
++ *
++ * This software was developed by Konstantin Belousov under sponsorship from
++ * the FreeBSD Foundation.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *    notice, this list of conditions and the following disclaimer in the
++ *    documentation and/or other materials provided with the distribution.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
++ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
++ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
++ * SUCH DAMAGE.
++ */
 +
-+struct intel_sdvo_encode {
-+	u8 dvi_rev;
-+	u8 hdmi_rev;
-+} __attribute__ ((packed));
-
-Property changes on: stable/9/sys/dev/drm/intel_sdvo_regs.h
-___________________________________________________________________
-Added: svn:mime-type
-## -0,0 +1 ##
-+text/plain
-Added: svn:keywords
-## -0,0 +1 ##
-+FreeBSD=%H
-Added: svn:eol-style
-## -0,0 +1 ##
-+native
-Index: sys/dev/drm/drm_drv.c
-===================================================================
-diff --git sys/dev/drm/drm_drv.c sys/dev/drm/drm_drv.c
---- sys/dev/drm/drm_drv.c	(revision 230124)
-+++ sys/dev/drm/drm_drv.c	(working copy)
-@@ -41,22 +41,47 @@
- #include "dev/drm/drmP.h"
- #include "dev/drm/drm.h"
- #include "dev/drm/drm_sarea.h"
-+#include "dev/drm/drm_mode.h"
- 
- #ifdef DRM_DEBUG_DEFAULT_ON
- int drm_debug_flag = 1;
- #else
--int drm_debug_flag = 0;
-+int drm_debug_flag = 2;
- #endif
-+int drm_notyet_flag = 0;
- 
-+unsigned int drm_vblank_offdelay = 5000;    /* Default to 5000 msecs. */
-+unsigned int drm_timestamp_precision = 20;  /* Default to 20 usecs. */
++#include "dev/drm/drmP.h"
++__FBSDID();
 +
- static int drm_load(struct drm_device *dev);
- static void drm_unload(struct drm_device *dev);
- static drm_pci_id_list_t *drm_find_description(int vendor, int device,
-     drm_pci_id_list_t *idlist);
- 
++struct drm_list_sort_thunk {
++	int (*cmp)(void *, struct list_head *, struct list_head *);
++	void *priv;
++};
++
 +static int
-+drm_modevent(module_t mod, int type, void *data)
++drm_le_cmp(void *priv, const void *d1, const void *d2)
 +{
++	struct list_head *le1, *le2;
++	struct drm_list_sort_thunk *thunk;
 +
-+	switch (type) {
-+	case MOD_LOAD:
-+		TUNABLE_INT_FETCH("drm.debug", &drm_debug_flag);
-+		TUNABLE_INT_FETCH("drm.notyet", &drm_notyet_flag);
-+		break;
-+	}
-+	return (0);
++	thunk = priv;
++	le1 = __DECONST(struct list_head *, d1);
++	le2 = __DECONST(struct list_head *, d2);
++	return ((thunk->cmp)(thunk->priv, le1, le2));
 +}
 +
-+static moduledata_t drm_mod = {
-+	"drm",
-+	drm_modevent,
-+	0
-+}; 
-+DECLARE_MODULE(drm, drm_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
- MODULE_VERSION(drm, 1);
- MODULE_DEPEND(drm, agp, 1, 1, 1);
- MODULE_DEPEND(drm, pci, 1, 1, 1);
- MODULE_DEPEND(drm, mem, 1, 1, 1);
-+MODULE_DEPEND(drm, iicbus, 1, 1, 1);
++/*
++ * Punt and use array sort.
++ */
++void
++drm_list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv,
++    struct list_head *a, struct list_head *b))
++{
++	struct drm_list_sort_thunk thunk;
++	struct list_head **ar, *le;
++	int count, i;
++
++	count = 0;
++	list_for_each(le, head)
++		count++;
++	ar = malloc(sizeof(struct list_head *) * count, M_TEMP, M_WAITOK);
++	i = 0;
++	list_for_each(le, head)
++		ar[i++] = le;
++	thunk.cmp = cmp;
++	thunk.priv = priv;
++	qsort_r(ar, count, sizeof(struct list_head *), &thunk, drm_le_cmp);
++	INIT_LIST_HEAD(head);
++	for (i = 0; i < count; i++)
++		list_add_tail(ar[i], head);
++	free(ar, M_TEMP);
++}
+diff --git a/sys/dev/drm/drm_lock.c b/sys/dev/drm/drm_lock.c
+index 28573c8..1b89bf5 100644
+--- sys/dev/drm/drm_lock.c
++++ sys/dev/drm/drm_lock.c
+@@ -81,7 +81,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
+ 		}
  
- static drm_ioctl_desc_t		  drm_ioctls[256] = {
- 	DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
-@@ -79,6 +104,9 @@
- 	DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- 	DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
+ 		/* Contention */
+-		ret = mtx_sleep((void *)&dev->lock.lock_queue, &dev->dev_lock,
++		ret = DRM_LOCK_SLEEP(dev, &dev->lock.lock_queue,
+ 		    PCATCH, "drmlk2", 0);
+ 		if (ret != 0)
+ 			break;
+diff --git a/sys/dev/drm/drm_memory.c b/sys/dev/drm/drm_memory.c
+index 409ea7d..992aeda 100644
+--- sys/dev/drm/drm_memory.c
++++ sys/dev/drm/drm_memory.c
+@@ -1,7 +1,11 @@
+ /*-
+  *Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+- * All Rights Reserved.
++ * Copyright (c) 2011 The FreeBSD Foundation
++ * All rights reserved.
++ *
++ * Portions of this software were developed by Konstantin Belousov
++ * under sponsorship from the FreeBSD Foundation.
+  *
+  * Permission is hereby granted, free of charge, to any person obtaining a
+  * copy of this software and associated documentation files (the "Software"),
+@@ -62,6 +66,7 @@ MALLOC_DEFINE(DRM_MEM_SGLISTS, "drm_sglists", "DRM SGLISTS Data Structures");
+ MALLOC_DEFINE(DRM_MEM_DRAWABLE, "drm_drawable", "DRM DRAWABLE Data Structures");
+ MALLOC_DEFINE(DRM_MEM_MM, "drm_sman", "DRM MEMORY MANAGER Data Structures");
+ MALLOC_DEFINE(DRM_MEM_HASHTAB, "drm_hashtab", "DRM HASHTABLE Data Structures");
++MALLOC_DEFINE(DRM_MEM_KMS, "drm_kms", "DRM KMS Data Structures");
  
-+	DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
-+	DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
+ void drm_mem_init(void)
+ {
+@@ -113,3 +118,10 @@ drm_mtrr_del(int __unused handle, unsigned long offset, size_t size, int flags)
+ 	strlcpy(mrdesc.mr_owner, "drm", sizeof(mrdesc.mr_owner));
+ 	return mem_range_attr_set(&mrdesc, &act);
+ }
 +
- 	DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
- 	DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- 	DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-@@ -115,9 +143,39 @@
- 
- 	DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- 	DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
--	DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
-+	DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
- 	DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
- 	DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_draw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++void
++drm_clflush_pages(vm_page_t *pages, unsigned long num_pages)
++{
 +
-+	DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED),
-+	DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
-+	DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
-+
-+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED),
-+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
-+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- };
++	pmap_invalidate_cache_pages(pages, num_pages);
++}
+diff --git a/sys/dev/drm/drm_mm.c b/sys/dev/drm/drm_mm.c
+index bab36c1..2ef19d8 100644
+--- sys/dev/drm/drm_mm.c
++++ sys/dev/drm/drm_mm.c
+@@ -49,44 +49,12 @@ __FBSDID("$FreeBSD$");
  
- static struct cdevsw drm_cdevsw = {
-@@ -127,6 +185,7 @@
- 	.d_ioctl =	drm_ioctl,
- 	.d_poll =	drm_poll,
- 	.d_mmap =	drm_mmap,
-+	.d_mmap_single = drm_gem_mmap_single,
- 	.d_name =	"drm",
- 	.d_flags =	D_TRACKCLOSE
- };
-@@ -162,19 +221,9 @@
+ #define MM_UNUSED_TARGET 4
+ 
+-unsigned long drm_mm_tail_space(struct drm_mm *mm)
+-{
+-	struct list_head *tail_node;
+-	struct drm_mm_node *entry;
+-
+-	tail_node = mm->ml_entry.prev;
+-	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
+-	if (!entry->free)
+-		return 0;
+-
+-	return entry->size;
+-}
+-
+-int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
+-{
+-	struct list_head *tail_node;
+-	struct drm_mm_node *entry;
+-
+-	tail_node = mm->ml_entry.prev;
+-	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
+-	if (!entry->free)
+-		return -ENOMEM;
+-
+-	if (entry->size <= size)
+-		return -ENOMEM;
+-
+-	entry->size -= size;
+-	return 0;
+-}
+-
+ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
  {
- 	drm_pci_id_list_t *id_entry;
- 	int vendor, device;
--#if __FreeBSD_version < 700010
--	device_t realdev;
+ 	struct drm_mm_node *child;
  
--	if (!strcmp(device_get_name(kdev), "drmsub"))
--		realdev = device_get_parent(kdev);
+-	if (atomic)
+-		child = malloc(sizeof(*child), DRM_MEM_MM, M_NOWAIT);
 -	else
--		realdev = kdev;
--	vendor = pci_get_vendor(realdev);
--	device = pci_get_device(realdev);
--#else
- 	vendor = pci_get_vendor(kdev);
- 	device = pci_get_device(kdev);
--#endif
+-		child = malloc(sizeof(*child), DRM_MEM_MM, M_WAITOK);
++	child = malloc(sizeof(*child), DRM_MEM_MM, M_ZERO |
++	    (atomic ? M_NOWAIT : M_WAITOK));
  
- 	if (pci_get_class(kdev) != PCIC_DISPLAY
- 	    || pci_get_subclass(kdev) != PCIS_DISPLAY_VGA)
-@@ -201,14 +250,7 @@
- 	unit = device_get_unit(kdev);
- 	dev = device_get_softc(kdev);
+ 	if (unlikely(child == NULL)) {
+ 		mtx_lock(&mm->unused_lock);
+@@ -95,8 +63,8 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
+ 		else {
+ 			child =
+ 			    list_entry(mm->unused_nodes.next,
+-				       struct drm_mm_node, fl_entry);
+-			list_del(&child->fl_entry);
++				       struct drm_mm_node, node_list);
++			list_del(&child->node_list);
+ 			--mm->num_unused;
+ 		}
+ 		mtx_unlock(&mm->unused_lock);
+@@ -120,244 +88,470 @@ int drm_mm_pre_get(struct drm_mm *mm)
+ 			return ret;
+ 		}
+ 		++mm->num_unused;
+-		list_add_tail(&node->fl_entry, &mm->unused_nodes);
++		list_add_tail(&node->node_list, &mm->unused_nodes);
+ 	}
+ 	mtx_unlock(&mm->unused_lock);
+ 	return 0;
+ }
  
--#if __FreeBSD_version < 700010
--	if (!strcmp(device_get_name(kdev), "drmsub"))
--		dev->device = device_get_parent(kdev);
--	else
--		dev->device = kdev;
--#else
- 	dev->device = kdev;
--#endif
- 	dev->devnode = make_dev(&drm_cdevsw,
- 			0,
- 			DRM_DEV_UID,
-@@ -217,11 +259,7 @@
- 			"dri/card%d", unit);
- 	dev->devnode->si_drv1 = dev;
+-static int drm_mm_create_tail_node(struct drm_mm *mm,
+-				   unsigned long start,
+-				   unsigned long size, int atomic)
++static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
+ {
+-	struct drm_mm_node *child;
+-
+-	child = drm_mm_kmalloc(mm, atomic);
+-	if (unlikely(child == NULL))
+-		return -ENOMEM;
+-
+-	child->free = 1;
+-	child->size = size;
+-	child->start = start;
+-	child->mm = mm;
++	return hole_node->start + hole_node->size;
++}
  
--#if __FreeBSD_version >= 700053
- 	dev->pci_domain = pci_get_domain(dev->device);
--#else
--	dev->pci_domain = 0;
--#endif
- 	dev->pci_bus = pci_get_bus(dev->device);
- 	dev->pci_slot = pci_get_slot(dev->device);
- 	dev->pci_func = pci_get_function(dev->device);
-@@ -258,6 +296,8 @@
- 	mtx_init(&dev->irq_lock, "drmirq", NULL, MTX_DEF);
- 	mtx_init(&dev->vbl_lock, "drmvbl", NULL, MTX_DEF);
- 	mtx_init(&dev->drw_lock, "drmdrw", NULL, MTX_DEF);
-+	mtx_init(&dev->event_lock, "drmev", NULL, MTX_DEF);
-+	sx_init(&dev->dev_struct_lock, "drmslk");
+-	list_add_tail(&child->ml_entry, &mm->ml_entry);
+-	list_add_tail(&child->fl_entry, &mm->fl_entry);
++static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
++{
++	struct drm_mm_node *next_node =
++		list_entry(hole_node->node_list.next, struct drm_mm_node,
++			   node_list);
  
- 	id_entry = drm_find_description(dev->pci_vendor,
- 	    dev->pci_device, idlist);
-@@ -313,7 +353,7 @@
- 	drm_local_map_t *map;
- 	int i;
+-	return 0;
++	return next_node->start;
+ }
  
--	DRM_SPINLOCK_ASSERT(&dev->dev_lock);
-+	DRM_LOCK_ASSERT(dev);
+-int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
++static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
++				 struct drm_mm_node *node,
++				 unsigned long size, unsigned alignment)
+ {
+-	struct list_head *tail_node;
+-	struct drm_mm_node *entry;
++	struct drm_mm *mm = hole_node->mm;
++	unsigned long tmp = 0, wasted = 0;
++	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
++	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
++
++	KASSERT(hole_node->hole_follows && !node->allocated, ("hole_node"));
++
++	if (alignment)
++		tmp = hole_start % alignment;
++
++	if (!tmp) {
++		hole_node->hole_follows = 0;
++		list_del_init(&hole_node->hole_stack);
++	} else
++		wasted = alignment - tmp;
++
++	node->start = hole_start + wasted;
++	node->size = size;
++	node->mm = mm;
++	node->allocated = 1;
  
- 	/* prebuild the SAREA */
- 	i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
-@@ -338,7 +378,8 @@
+-	tail_node = mm->ml_entry.prev;
+-	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
+-	if (!entry->free) {
+-		return drm_mm_create_tail_node(mm, entry->start + entry->size,
+-					       size, atomic);
++	INIT_LIST_HEAD(&node->hole_stack);
++	list_add(&node->node_list, &hole_node->node_list);
++
++	KASSERT(node->start + node->size <= hole_end, ("hole pos"));
++
++	if (node->start + node->size < hole_end) {
++		list_add(&node->hole_stack, &mm->hole_stack);
++		node->hole_follows = 1;
++	} else {
++		node->hole_follows = 0;
  	}
+-	entry->size += size;
+-	return 0;
+ }
  
- 	dev->lock.lock_queue = 0;
--	dev->irq_enabled = 0;
-+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-+		dev->irq_enabled = 0;
- 	dev->context_flag = 0;
- 	dev->last_context = 0;
- 	dev->if_version = 0;
-@@ -356,14 +397,14 @@
- 	drm_local_map_t *map, *mapsave;
- 	int i;
+-static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
+-						 unsigned long size,
+-						 int atomic)
++struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
++					     unsigned long size,
++					     unsigned alignment,
++					     int atomic)
+ {
+-	struct drm_mm_node *child;
++	struct drm_mm_node *node;
  
--	DRM_SPINLOCK_ASSERT(&dev->dev_lock);
-+	DRM_LOCK_ASSERT(dev);
+-	child = drm_mm_kmalloc(parent->mm, atomic);
+-	if (unlikely(child == NULL))
++	node = drm_mm_kmalloc(hole_node->mm, atomic);
++	if (unlikely(node == NULL))
+ 		return NULL;
  
- 	DRM_DEBUG("\n");
+-	INIT_LIST_HEAD(&child->fl_entry);
++	drm_mm_insert_helper(hole_node, node, size, alignment);
  
- 	if (dev->driver->lastclose != NULL)
- 		dev->driver->lastclose(dev);
+-	child->free = 0;
+-	child->size = size;
+-	child->start = parent->start;
+-	child->mm = parent->mm;
++	return node;
++}
  
--	if (dev->irq_enabled)
-+	if (!drm_core_check_feature(dev, DRIVER_MODESET) && dev->irq_enabled)
- 		drm_irq_uninstall(dev);
+-	list_add_tail(&child->ml_entry, &parent->ml_entry);
+-	INIT_LIST_HEAD(&child->fl_entry);
++int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
++		       unsigned long size, unsigned alignment)
++{
++	struct drm_mm_node *hole_node;
  
- 	if (dev->unique) {
-@@ -456,17 +497,7 @@
- 	for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++)
- 		atomic_set(&dev->counts[i], 0);
+-	parent->size -= size;
+-	parent->start += size;
+-	return child;
+-}
++	hole_node = drm_mm_search_free(mm, size, alignment, 0);
++	if (!hole_node)
++		return -ENOSPC;
  
--	if (dev->driver->load != NULL) {
--		DRM_LOCK();
--		/* Shared code returns -errno. */
--		retcode = -dev->driver->load(dev,
--		    dev->id_entry->driver_private);
--		if (pci_enable_busmaster(dev->device))
--			DRM_ERROR("Request to enable bus-master failed.\n");
--		DRM_UNLOCK();
--		if (retcode != 0)
--			goto error;
--	}
-+	INIT_LIST_HEAD(&dev->vblank_event_list);
++	drm_mm_insert_helper(hole_node, node, size, alignment);
  
- 	if (drm_core_has_AGP(dev)) {
- 		if (drm_device_is_agp(dev))
-@@ -494,9 +525,31 @@
- 	dev->drw_unrhdr = new_unrhdr(1, INT_MAX, NULL);
- 	if (dev->drw_unrhdr == NULL) {
- 		DRM_ERROR("Couldn't allocate drawable number allocator\n");
-+		retcode = ENOMEM;
- 		goto error;
+-struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
+-					     unsigned long size,
+-					     unsigned alignment,
+-					     int atomic)
++	return 0;
++}
++
++static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
++				       struct drm_mm_node *node,
++				       unsigned long size, unsigned alignment,
++				       unsigned long start, unsigned long end)
+ {
++	struct drm_mm *mm = hole_node->mm;
++	unsigned long tmp = 0, wasted = 0;
++	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
++	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
+ 
+-	struct drm_mm_node *align_splitoff = NULL;
+-	unsigned tmp = 0;
++	KASSERT(hole_node->hole_follows && !node->allocated, ("hole_node"));
+ 
++	if (hole_start < start)
++		wasted += start - hole_start;
+ 	if (alignment)
+-		tmp = node->start % alignment;
++		tmp = (hole_start + wasted) % alignment;
++
++	if (tmp)
++		wasted += alignment - tmp;
+ 
+-	if (tmp) {
+-		align_splitoff =
+-		    drm_mm_split_at_start(node, alignment - tmp, atomic);
+-		if (unlikely(align_splitoff == NULL))
+-			return NULL;
++	if (!wasted) {
++		hole_node->hole_follows = 0;
++		list_del_init(&hole_node->hole_stack);
  	}
  
-+	if (dev->driver->driver_features & DRIVER_GEM) {
-+		retcode = drm_gem_init(dev);
-+		if (retcode != 0) {
-+			DRM_ERROR("Cannot initialize graphics execution "
-+				  "manager (GEM)\n");
-+			goto error1;
-+		}
-+	}
+-	if (node->size == size) {
+-		list_del_init(&node->fl_entry);
+-		node->free = 0;
++	node->start = hole_start + wasted;
++	node->size = size;
++	node->mm = mm;
++	node->allocated = 1;
 +
-+	if (dev->driver->load != NULL) {
-+		DRM_LOCK();
-+		/* Shared code returns -errno. */
-+		retcode = -dev->driver->load(dev,
-+		    dev->id_entry->driver_private);
-+		if (pci_enable_busmaster(dev->device))
-+			DRM_ERROR("Request to enable bus-master failed.\n");
-+		DRM_UNLOCK();
-+		if (retcode != 0)
-+			goto error;
-+	}
++	INIT_LIST_HEAD(&node->hole_stack);
++	list_add(&node->node_list, &hole_node->node_list);
 +
- 	DRM_INFO("Initialized %s %d.%d.%d %s\n",
- 	    dev->driver->name,
- 	    dev->driver->major,
-@@ -506,6 +559,8 @@
++	KASSERT(node->start + node->size <= hole_end, ("hole_end"));
++	KASSERT(node->start + node->size <= end, ("end"));
++
++	if (node->start + node->size < hole_end) {
++		list_add(&node->hole_stack, &mm->hole_stack);
++		node->hole_follows = 1;
+ 	} else {
+-		node = drm_mm_split_at_start(node, size, atomic);
++		node->hole_follows = 0;
+ 	}
++}
  
- 	return 0;
+-	if (align_splitoff)
+-		drm_mm_put_block(align_splitoff);
++struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
++						unsigned long size,
++						unsigned alignment,
++						unsigned long start,
++						unsigned long end,
++						int atomic)
++{
++	struct drm_mm_node *node;
++
++	node = drm_mm_kmalloc(hole_node->mm, atomic);
++	if (unlikely(node == NULL))
++		return NULL;
++
++	drm_mm_insert_helper_range(hole_node, node, size, alignment,
++				   start, end);
  
-+error1:
-+	delete_unrhdr(dev->drw_unrhdr);
- error:
- 	drm_sysctl_cleanup(dev);
- 	DRM_LOCK();
-@@ -517,6 +572,8 @@
- 	mtx_destroy(&dev->vbl_lock);
- 	mtx_destroy(&dev->irq_lock);
- 	mtx_destroy(&dev->dev_lock);
-+	mtx_destroy(&dev->event_lock);
-+	sx_destroy(&dev->dev_struct_lock);
- 
- 	return retcode;
+ 	return node;
  }
-@@ -532,6 +589,9 @@
  
- 	drm_ctxbitmap_cleanup(dev);
++int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
++				unsigned long size, unsigned alignment,
++				unsigned long start, unsigned long end)
++{
++	struct drm_mm_node *hole_node;
++
++	hole_node = drm_mm_search_free_in_range(mm, size, alignment,
++						start, end, 0);
++	if (!hole_node)
++		return -ENOSPC;
++
++	drm_mm_insert_helper_range(hole_node, node, size, alignment,
++				   start, end);
++
++	return 0;
++}
++
++void drm_mm_remove_node(struct drm_mm_node *node)
++{
++	struct drm_mm *mm = node->mm;
++	struct drm_mm_node *prev_node;
++
++	KASSERT(!node->scanned_block && !node->scanned_prev_free
++	    && !node->scanned_next_free, ("node"));
++
++	prev_node =
++	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
++
++	if (node->hole_follows) {
++		KASSERT(drm_mm_hole_node_start(node)
++			!= drm_mm_hole_node_end(node), ("hole_follows"));
++		list_del(&node->hole_stack);
++	} else
++		KASSERT(drm_mm_hole_node_start(node)
++		       == drm_mm_hole_node_end(node), ("!hole_follows"));
++
++	if (!prev_node->hole_follows) {
++		prev_node->hole_follows = 1;
++		list_add(&prev_node->hole_stack, &mm->hole_stack);
++	} else
++		list_move(&prev_node->hole_stack, &mm->hole_stack);
++
++	list_del(&node->node_list);
++	node->allocated = 0;
++}
++
+ /*
+  * Put a block. Merge with the previous and / or next block if they are free.
+  * Otherwise add to the free stack.
+  */
  
-+	if (dev->driver->driver_features & DRIVER_GEM)
-+		drm_gem_destroy(dev);
+-void drm_mm_put_block(struct drm_mm_node *cur)
++void drm_mm_put_block(struct drm_mm_node *node)
+ {
++	struct drm_mm *mm = node->mm;
+ 
+-	struct drm_mm *mm = cur->mm;
+-	struct list_head *cur_head = &cur->ml_entry;
+-	struct list_head *root_head = &mm->ml_entry;
+-	struct drm_mm_node *prev_node = NULL;
+-	struct drm_mm_node *next_node;
++	drm_mm_remove_node(node);
+ 
+-	int merged = 0;
++	mtx_lock(&mm->unused_lock);
++	if (mm->num_unused < MM_UNUSED_TARGET) {
++		list_add(&node->node_list, &mm->unused_nodes);
++		++mm->num_unused;
++	} else
++		free(node, DRM_MEM_MM);
++	mtx_unlock(&mm->unused_lock);
++}
+ 
+-	if (cur_head->prev != root_head) {
+-		prev_node =
+-		    list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
+-		if (prev_node->free) {
+-			prev_node->size += cur->size;
+-			merged = 1;
+-		}
+-	}
+-	if (cur_head->next != root_head) {
+-		next_node =
+-		    list_entry(cur_head->next, struct drm_mm_node, ml_entry);
+-		if (next_node->free) {
+-			if (merged) {
+-				prev_node->size += next_node->size;
+-				list_del(&next_node->ml_entry);
+-				list_del(&next_node->fl_entry);
+-				if (mm->num_unused < MM_UNUSED_TARGET) {
+-					list_add(&next_node->fl_entry,
+-						 &mm->unused_nodes);
+-					++mm->num_unused;
+-				} else
+-					free(next_node, DRM_MEM_MM);
+-			} else {
+-				next_node->size += cur->size;
+-				next_node->start = cur->start;
+-				merged = 1;
+-			}
+-		}
++static int check_free_hole(unsigned long start, unsigned long end,
++			   unsigned long size, unsigned alignment)
++{
++	unsigned wasted = 0;
 +
- 	if (dev->agp && dev->agp->mtrr) {
- 		int __unused retcode;
- 
-@@ -582,6 +642,8 @@
- 	mtx_destroy(&dev->vbl_lock);
- 	mtx_destroy(&dev->irq_lock);
- 	mtx_destroy(&dev->dev_lock);
-+	mtx_destroy(&dev->event_lock);
-+	sx_destroy(&dev->dev_struct_lock);
++	if (end - start < size)
++		return 0;
++
++	if (alignment) {
++		unsigned tmp = start % alignment;
++		if (tmp)
++			wasted = alignment - tmp;
+ 	}
+-	if (!merged) {
+-		cur->free = 1;
+-		list_add(&cur->fl_entry, &mm->fl_entry);
+-	} else {
+-		list_del(&cur->ml_entry);
+-		if (mm->num_unused < MM_UNUSED_TARGET) {
+-			list_add(&cur->fl_entry, &mm->unused_nodes);
+-			++mm->num_unused;
+-		} else
+-			free(cur, DRM_MEM_MM);
++
++	if (end >= start + size + wasted) {
++		return 1;
+ 	}
++
++	return 0;
  }
  
- int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv)
-@@ -652,6 +714,9 @@
- 	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
- 	    DRM_CURRENTPID, (long)dev->device, dev->open_count);
++
+ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
+ 				       unsigned long size,
+ 				       unsigned alignment, int best_match)
+ {
+-	struct list_head *list;
+-	const struct list_head *free_stack = &mm->fl_entry;
+ 	struct drm_mm_node *entry;
+ 	struct drm_mm_node *best;
+ 	unsigned long best_size;
+-	unsigned wasted;
  
-+	if (dev->driver->driver_features & DRIVER_GEM)
-+		drm_gem_release(dev, file_priv);
+ 	best = NULL;
+ 	best_size = ~0UL;
+ 
+-	list_for_each(list, free_stack) {
+-		entry = list_entry(list, struct drm_mm_node, fl_entry);
+-		wasted = 0;
+-
+-		if (entry->size < size)
++	list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
++		KASSERT(entry->hole_follows, ("hole_follows"));
++		if (!check_free_hole(drm_mm_hole_node_start(entry),
++				     drm_mm_hole_node_end(entry),
++				     size, alignment))
+ 			continue;
+ 
+-		if (alignment) {
+-			register unsigned tmp = entry->start % alignment;
+-			if (tmp)
+-				wasted += alignment - tmp;
++		if (!best_match)
++			return entry;
 +
- 	if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
- 	    && dev->lock.file_priv == file_priv) {
- 		DRM_DEBUG("Process %d dead, freeing lock for context %d\n",
-@@ -683,8 +748,8 @@
- 				break;	/* Got lock */
- 			}
- 			/* Contention */
--			retcode = mtx_sleep((void *)&dev->lock.lock_queue,
--			    &dev->dev_lock, PCATCH, "drmlk2", 0);
-+			retcode = DRM_LOCK_SLEEP(dev, &dev->lock.lock_queue,
-+			    PCATCH, "drmlk2", 0);
- 			if (retcode)
- 				break;
++		if (entry->size < best_size) {
++			best = entry;
++			best_size = entry->size;
  		}
-@@ -699,6 +764,7 @@
- 		drm_reclaim_buffers(dev, file_priv);
++	}
  
- 	funsetown(&dev->buf_sigio);
-+	seldrain(&file_priv->event_poll);
+-		if (entry->size >= size + wasted) {
+-			if (!best_match)
+-				return entry;
+-			if (size < best_size) {
+-				best = entry;
+-				best_size = entry->size;
+-			}
++	return best;
++}
++
++struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
++						unsigned long size,
++						unsigned alignment,
++						unsigned long start,
++						unsigned long end,
++						int best_match)
++{
++	struct drm_mm_node *entry;
++	struct drm_mm_node *best;
++	unsigned long best_size;
++
++	KASSERT(!mm->scanned_blocks, ("scanned"));
++
++	best = NULL;
++	best_size = ~0UL;
++
++	list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
++		unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
++			start : drm_mm_hole_node_start(entry);
++		unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
++			end : drm_mm_hole_node_end(entry);
++
++		KASSERT(entry->hole_follows, ("hole_follows"));
++		if (!check_free_hole(adj_start, adj_end, size, alignment))
++			continue;
++
++		if (!best_match)
++			return entry;
++
++		if (entry->size < best_size) {
++			best = entry;
++			best_size = entry->size;
+ 		}
+ 	}
  
- 	if (dev->driver->postclose != NULL)
- 		dev->driver->postclose(dev, file_priv);
-@@ -788,16 +854,25 @@
- 		return EACCES;
+ 	return best;
+ }
  
- 	if (is_driver_ioctl) {
--		DRM_LOCK();
-+		if ((ioctl->flags & DRM_UNLOCKED) == 0)
-+			DRM_LOCK();
- 		/* shared code returns -errno */
- 		retcode = -func(dev, data, file_priv);
--		DRM_UNLOCK();
-+		if ((ioctl->flags & DRM_UNLOCKED) == 0)
-+			DRM_UNLOCK();
- 	} else {
- 		retcode = func(dev, data, file_priv);
- 	}
- 
- 	if (retcode != 0)
- 		DRM_DEBUG("    returning %d\n", retcode);
-+	if (retcode != 0 &&
-+	    (drm_debug_flag & DRM_DEBUGBITS_FAILED_IOCTL) != 0) {
-+		printf(
-+"pid %d, cmd 0x%02lx, nr 0x%02x/%1d, dev 0x%lx, auth %d, res %d\n",
-+		    DRM_CURRENTPID, cmd, nr, is_driver_ioctl, (long)dev->device,
-+		    file_priv->authenticated, retcode);
++void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
++{
++	list_replace(&old->node_list, &new->node_list);
++	list_replace(&old->hole_stack, &new->hole_stack);
++	new->hole_follows = old->hole_follows;
++	new->mm = old->mm;
++	new->start = old->start;
++	new->size = old->size;
++
++	old->allocated = 0;
++	new->allocated = 1;
++}
++
++void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
++		      unsigned alignment)
++{
++	mm->scan_alignment = alignment;
++	mm->scan_size = size;
++	mm->scanned_blocks = 0;
++	mm->scan_hit_start = 0;
++	mm->scan_hit_size = 0;
++	mm->scan_check_range = 0;
++	mm->prev_scanned_node = NULL;
++}
++
++void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
++				 unsigned alignment,
++				 unsigned long start,
++				 unsigned long end)
++{
++	mm->scan_alignment = alignment;
++	mm->scan_size = size;
++	mm->scanned_blocks = 0;
++	mm->scan_hit_start = 0;
++	mm->scan_hit_size = 0;
++	mm->scan_start = start;
++	mm->scan_end = end;
++	mm->scan_check_range = 1;
++	mm->prev_scanned_node = NULL;
++}
++
++int drm_mm_scan_add_block(struct drm_mm_node *node)
++{
++	struct drm_mm *mm = node->mm;
++	struct drm_mm_node *prev_node;
++	unsigned long hole_start, hole_end;
++	unsigned long adj_start;
++	unsigned long adj_end;
++
++	mm->scanned_blocks++;
++
++	KASSERT(!node->scanned_block, ("node->scanned_block"));
++	node->scanned_block = 1;
++
++	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
++			       node_list);
++
++	node->scanned_preceeds_hole = prev_node->hole_follows;
++	prev_node->hole_follows = 1;
++	list_del(&node->node_list);
++	node->node_list.prev = &prev_node->node_list;
++	node->node_list.next = &mm->prev_scanned_node->node_list;
++	mm->prev_scanned_node = node;
++
++	hole_start = drm_mm_hole_node_start(prev_node);
++	hole_end = drm_mm_hole_node_end(prev_node);
++	if (mm->scan_check_range) {
++		adj_start = hole_start < mm->scan_start ?
++			mm->scan_start : hole_start;
++		adj_end = hole_end > mm->scan_end ?
++			mm->scan_end : hole_end;
++	} else {
++		adj_start = hole_start;
++		adj_end = hole_end;
 +	}
++
++	if (check_free_hole(adj_start , adj_end,
++			    mm->scan_size, mm->scan_alignment)) {
++		mm->scan_hit_start = hole_start;
++		mm->scan_hit_size = hole_end;
++
++		return 1;
++	}
++
++	return 0;
++}
++
++int drm_mm_scan_remove_block(struct drm_mm_node *node)
++{
++	struct drm_mm *mm = node->mm;
++	struct drm_mm_node *prev_node;
++
++	mm->scanned_blocks--;
++
++	KASSERT(node->scanned_block, ("scanned_block"));
++	node->scanned_block = 0;
++
++	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
++			       node_list);
++
++	prev_node->hole_follows = node->scanned_preceeds_hole;
++	INIT_LIST_HEAD(&node->node_list);
++	list_add(&node->node_list, &prev_node->node_list);
++
++	/* Only need to check for containement because start&size for the
++	 * complete resulting free block (not just the desired part) is
++	 * stored. */
++	if (node->start >= mm->scan_hit_start &&
++	    node->start + node->size
++	    		<= mm->scan_hit_start + mm->scan_hit_size) {
++		return 1;
++	}
++
++	return 0;
++}
++
+ int drm_mm_clean(struct drm_mm * mm)
+ {
+-	struct list_head *head = &mm->ml_entry;
++	struct list_head *head = &mm->head_node.node_list;
  
- 	return retcode;
+ 	return (head->next->next == head);
  }
-@@ -806,7 +881,7 @@
+ 
+ int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
  {
- 	drm_local_map_t *map;
+-	INIT_LIST_HEAD(&mm->ml_entry);
+-	INIT_LIST_HEAD(&mm->fl_entry);
++	INIT_LIST_HEAD(&mm->hole_stack);
+ 	INIT_LIST_HEAD(&mm->unused_nodes);
+ 	mm->num_unused = 0;
++	mm->scanned_blocks = 0;
+ 	mtx_init(&mm->unused_lock, "drm_unused", NULL, MTX_DEF);
  
--	DRM_SPINLOCK_ASSERT(&dev->dev_lock);
-+	DRM_LOCK_ASSERT(dev);
- 	TAILQ_FOREACH(map, &dev->maplist, link) {
- 		if (map->type == _DRM_SHM && (map->flags & _DRM_CONTAINS_LOCK))
- 			return map;
-@@ -815,6 +890,131 @@
- 	return NULL;
+-	/* XXX This could be non-atomic but gets called from a locked path */
+-	return drm_mm_create_tail_node(mm, start, size, 1);
++	INIT_LIST_HEAD(&mm->head_node.node_list);
++	INIT_LIST_HEAD(&mm->head_node.hole_stack);
++	mm->head_node.hole_follows = 1;
++	mm->head_node.scanned_block = 0;
++	mm->head_node.scanned_prev_free = 0;
++	mm->head_node.scanned_next_free = 0;
++	mm->head_node.mm = mm;
++	mm->head_node.start = start + size;
++	mm->head_node.size = start - mm->head_node.start;
++	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
++
++	return 0;
  }
  
-+void
-+drm_device_lock_mtx(struct drm_device *dev)
+ void drm_mm_takedown(struct drm_mm * mm)
+ {
+-	struct list_head *bnode = mm->fl_entry.next;
+-	struct drm_mm_node *entry;
+-	struct drm_mm_node *next;
++	struct drm_mm_node *entry, *next;
+ 
+-	entry = list_entry(bnode, struct drm_mm_node, fl_entry);
+-
+-	if (entry->ml_entry.next != &mm->ml_entry ||
+-	    entry->fl_entry.next != &mm->fl_entry) {
++	if (!list_empty(&mm->head_node.node_list)) {
+ 		DRM_ERROR("Memory manager not clean. Delaying takedown\n");
+ 		return;
+ 	}
+ 
+-	list_del(&entry->fl_entry);
+-	list_del(&entry->ml_entry);
+-	free(entry, DRM_MEM_MM);
+-
+ 	mtx_lock(&mm->unused_lock);
+-	list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) {
+-		list_del(&entry->fl_entry);
++	list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
++		list_del(&entry->node_list);
+ 		free(entry, DRM_MEM_MM);
+ 		--mm->num_unused;
+ 	}
+diff --git a/sys/dev/drm/drm_mm.h b/sys/dev/drm/drm_mm.h
+index f7479cd..9e1869d 100644
+--- sys/dev/drm/drm_mm.h
++++ sys/dev/drm/drm_mm.h
+@@ -40,9 +40,14 @@ __FBSDID("$FreeBSD$");
+ #include "dev/drm/drm_linux_list.h"
+ 
+ struct drm_mm_node {
+-	struct list_head fl_entry;
+-	struct list_head ml_entry;
+-	int free;
++	struct list_head node_list;
++	struct list_head hole_stack;
++	unsigned hole_follows : 1;
++	unsigned scanned_block : 1;
++	unsigned scanned_prev_free : 1;
++	unsigned scanned_next_free : 1;
++	unsigned scanned_preceeds_hole : 1;
++	unsigned allocated : 1;
+ 	unsigned long start;
+ 	unsigned long size;
+ 	struct drm_mm *mm;
+@@ -50,13 +55,42 @@ struct drm_mm_node {
+ };
+ 
+ struct drm_mm {
+-	struct list_head fl_entry;
+-	struct list_head ml_entry;
++	struct list_head hole_stack;
++	struct drm_mm_node head_node;
+ 	struct list_head unused_nodes;
+ 	int num_unused;
+ 	struct mtx unused_lock;
++	unsigned int scan_check_range : 1;
++	unsigned scan_alignment;
++	unsigned long scan_size;
++	unsigned long scan_hit_start;
++	unsigned scan_hit_size;
++	unsigned scanned_blocks;
++	unsigned long scan_start;
++	unsigned long scan_end;
++	struct drm_mm_node *prev_scanned_node;
+ };
+ 
++static inline bool drm_mm_node_allocated(struct drm_mm_node *node)
 +{
++	return node->allocated;
++}
 +
-+	mtx_lock(&dev->dev_lock);
++static inline bool drm_mm_initialized(struct drm_mm *mm)
++{
++	return (mm->hole_stack.next != NULL);
 +}
++#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
++						&(mm)->head_node.node_list, \
++						node_list)
++#define drm_mm_for_each_scanned_node_reverse(entry, n, mm) \
++	for (entry = (mm)->prev_scanned_node, \
++		next = entry ? list_entry(entry->node_list.next, \
++			struct drm_mm_node, node_list) : NULL; \
++	     entry != NULL; entry = next, \
++		next = entry ? list_entry(entry->node_list.next, \
++			struct drm_mm_node, node_list) : NULL)
 +
-+void
-+drm_device_unlock_mtx(struct drm_device *dev)
+ /*
+  * Basic range manager support (drm_mm.c)
+  */
+@@ -64,6 +98,13 @@ extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
+ 						    unsigned long size,
+ 						    unsigned alignment,
+ 						    int atomic);
++extern struct drm_mm_node *drm_mm_get_block_range_generic(
++						struct drm_mm_node *node,
++						unsigned long size,
++						unsigned alignment,
++						unsigned long start,
++						unsigned long end,
++						int atomic);
+ static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
+ 						   unsigned long size,
+ 						   unsigned alignment)
+@@ -76,11 +117,46 @@ static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *pa
+ {
+ 	return drm_mm_get_block_generic(parent, size, alignment, 1);
+ }
++static inline struct drm_mm_node *drm_mm_get_block_range(
++						struct drm_mm_node *parent,
++						unsigned long size,
++						unsigned alignment,
++						unsigned long start,
++						unsigned long end)
 +{
++	return drm_mm_get_block_range_generic(parent, size, alignment,
++						start, end, 0);
++}
++static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
++						struct drm_mm_node *parent,
++						unsigned long size,
++						unsigned alignment,
++						unsigned long start,
++						unsigned long end)
++{
++	return drm_mm_get_block_range_generic(parent, size, alignment,
++						start, end, 1);
++}
++extern int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
++			      unsigned long size, unsigned alignment);
++extern int drm_mm_insert_node_in_range(struct drm_mm *mm,
++				       struct drm_mm_node *node,
++				       unsigned long size, unsigned alignment,
++				       unsigned long start, unsigned long end);
+ extern void drm_mm_put_block(struct drm_mm_node *cur);
++extern void drm_mm_remove_node(struct drm_mm_node *node);
++extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
+ extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
+ 					      unsigned long size,
+ 					      unsigned alignment,
+ 					      int best_match);
++extern struct drm_mm_node *drm_mm_search_free_in_range(
++						const struct drm_mm *mm,
++						unsigned long size,
++						unsigned alignment,
++						unsigned long start,
++						unsigned long end,
++						int best_match);
+ extern int drm_mm_init(struct drm_mm *mm, unsigned long start,
+ 		       unsigned long size);
+ extern void drm_mm_takedown(struct drm_mm *mm);
+@@ -97,4 +173,13 @@ static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
+ 	return block->mm;
+ }
+ 
++void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
++		      unsigned alignment);
++void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
++				 unsigned alignment,
++				 unsigned long start,
++				 unsigned long end);
++int drm_mm_scan_add_block(struct drm_mm_node *node);
++int drm_mm_scan_remove_block(struct drm_mm_node *node);
 +
-+	mtx_unlock(&dev->dev_lock);
+ #endif
+diff --git a/sys/dev/drm/drm_mode.h b/sys/dev/drm/drm_mode.h
+new file mode 100644
+index 0000000..bc28240
+--- /dev/null
++++ sys/dev/drm/drm_mode.h
+@@ -0,0 +1,444 @@
++/*
++ * Copyright (c) 2007 Dave Airlie <airlied at linux.ie>
++ * Copyright (c) 2007 Jakob Bornecrantz <wallbraker at gmail.com>
++ * Copyright (c) 2008 Red Hat Inc.
++ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * Copyright (c) 2007-2008 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ *
++ * $FreeBSD$
++ */
++
++#ifndef _DRM_MODE_H
++#define _DRM_MODE_H
++
++#define DRM_DISPLAY_INFO_LEN	32
++#define DRM_CONNECTOR_NAME_LEN	32
++#define DRM_DISPLAY_MODE_LEN	32
++#define DRM_PROP_NAME_LEN	32
++
++#define DRM_MODE_TYPE_BUILTIN	(1<<0)
++#define DRM_MODE_TYPE_CLOCK_C	((1<<1) | DRM_MODE_TYPE_BUILTIN)
++#define DRM_MODE_TYPE_CRTC_C	((1<<2) | DRM_MODE_TYPE_BUILTIN)
++#define DRM_MODE_TYPE_PREFERRED	(1<<3)
++#define DRM_MODE_TYPE_DEFAULT	(1<<4)
++#define DRM_MODE_TYPE_USERDEF	(1<<5)
++#define DRM_MODE_TYPE_DRIVER	(1<<6)
++
++/* Video mode flags */
++/* bit compatible with the xorg definitions. */
++#define DRM_MODE_FLAG_PHSYNC	(1<<0)
++#define DRM_MODE_FLAG_NHSYNC	(1<<1)
++#define DRM_MODE_FLAG_PVSYNC	(1<<2)
++#define DRM_MODE_FLAG_NVSYNC	(1<<3)
++#define DRM_MODE_FLAG_INTERLACE	(1<<4)
++#define DRM_MODE_FLAG_DBLSCAN	(1<<5)
++#define DRM_MODE_FLAG_CSYNC	(1<<6)
++#define DRM_MODE_FLAG_PCSYNC	(1<<7)
++#define DRM_MODE_FLAG_NCSYNC	(1<<8)
++#define DRM_MODE_FLAG_HSKEW	(1<<9) /* hskew provided */
++#define DRM_MODE_FLAG_BCAST	(1<<10)
++#define DRM_MODE_FLAG_PIXMUX	(1<<11)
++#define DRM_MODE_FLAG_DBLCLK	(1<<12)
++#define DRM_MODE_FLAG_CLKDIV2	(1<<13)
++
++/* DPMS flags */
++/* bit compatible with the xorg definitions. */
++#define DRM_MODE_DPMS_ON	0
++#define DRM_MODE_DPMS_STANDBY	1
++#define DRM_MODE_DPMS_SUSPEND	2
++#define DRM_MODE_DPMS_OFF	3
++
++/* Scaling mode options */
++#define DRM_MODE_SCALE_NONE		0 /* Unmodified timing (display or
++					     software can still scale) */
++#define DRM_MODE_SCALE_FULLSCREEN	1 /* Full screen, ignore aspect */
++#define DRM_MODE_SCALE_CENTER		2 /* Centered, no scaling */
++#define DRM_MODE_SCALE_ASPECT		3 /* Full screen, preserve aspect */
++
++/* Dithering mode options */
++#define DRM_MODE_DITHERING_OFF	0
++#define DRM_MODE_DITHERING_ON	1
++#define DRM_MODE_DITHERING_AUTO 2
++
++/* Dirty info options */
++#define DRM_MODE_DIRTY_OFF      0
++#define DRM_MODE_DIRTY_ON       1
++#define DRM_MODE_DIRTY_ANNOTATE 2
++
++struct drm_mode_modeinfo {
++	uint32_t clock;
++	uint16_t hdisplay, hsync_start, hsync_end, htotal, hskew;
++	uint16_t vdisplay, vsync_start, vsync_end, vtotal, vscan;
++
++	uint32_t vrefresh;
++
++	uint32_t flags;
++	uint32_t type;
++	char name[DRM_DISPLAY_MODE_LEN];
++};
++
++struct drm_mode_card_res {
++	uint64_t fb_id_ptr;
++	uint64_t crtc_id_ptr;
++	uint64_t connector_id_ptr;
++	uint64_t encoder_id_ptr;
++	uint32_t count_fbs;
++	uint32_t count_crtcs;
++	uint32_t count_connectors;
++	uint32_t count_encoders;
++	uint32_t min_width, max_width;
++	uint32_t min_height, max_height;
++};
++
++struct drm_mode_crtc {
++	uint64_t set_connectors_ptr;
++	uint32_t count_connectors;
++
++	uint32_t crtc_id; /**< Id */
++	uint32_t fb_id; /**< Id of framebuffer */
++
++	uint32_t x, y; /**< Position on the frameuffer */
++
++	uint32_t gamma_size;
++	uint32_t mode_valid;
++	struct drm_mode_modeinfo mode;
++};
++
++#define DRM_MODE_PRESENT_TOP_FIELD	(1<<0)
++#define DRM_MODE_PRESENT_BOTTOM_FIELD	(1<<1)
++
++/* Planes blend with or override other bits on the CRTC */
++struct drm_mode_set_plane {
++	uint32_t plane_id;
++	uint32_t crtc_id;
++	uint32_t fb_id; /* fb object contains surface format type */
++	uint32_t flags; /* see above flags */
++
++	/* Signed dest location allows it to be partially off screen */
++	int32_t crtc_x, crtc_y;
++	uint32_t crtc_w, crtc_h;
++
++	/* Source values are 16.16 fixed point */
++	uint32_t src_x, src_y;
++	uint32_t src_h, src_w;
++};
++
++struct drm_mode_get_plane {
++	uint32_t plane_id;
++
++	uint32_t crtc_id;
++	uint32_t fb_id;
++
++	uint32_t possible_crtcs;
++	uint32_t gamma_size;
++
++	uint32_t count_format_types;
++	uint64_t format_type_ptr;
++};
++
++struct drm_mode_get_plane_res {
++	uint64_t plane_id_ptr;
++	uint32_t count_planes;
++};
++
++#define DRM_MODE_ENCODER_NONE	0
++#define DRM_MODE_ENCODER_DAC	1
++#define DRM_MODE_ENCODER_TMDS	2
++#define DRM_MODE_ENCODER_LVDS	3
++#define DRM_MODE_ENCODER_TVDAC	4
++
++struct drm_mode_get_encoder {
++	uint32_t encoder_id;
++	uint32_t encoder_type;
++
++	uint32_t crtc_id; /**< Id of crtc */
++
++	uint32_t possible_crtcs;
++	uint32_t possible_clones;
++};
++
++/* This is for connectors with multiple signal types. */
++/* Try to match DRM_MODE_CONNECTOR_X as closely as possible. */
++#define DRM_MODE_SUBCONNECTOR_Automatic	0
++#define DRM_MODE_SUBCONNECTOR_Unknown	0
++#define DRM_MODE_SUBCONNECTOR_DVID	3
++#define DRM_MODE_SUBCONNECTOR_DVIA	4
++#define DRM_MODE_SUBCONNECTOR_Composite	5
++#define DRM_MODE_SUBCONNECTOR_SVIDEO	6
++#define DRM_MODE_SUBCONNECTOR_Component	8
++#define DRM_MODE_SUBCONNECTOR_SCART	9
++
++#define DRM_MODE_CONNECTOR_Unknown	0
++#define DRM_MODE_CONNECTOR_VGA		1
++#define DRM_MODE_CONNECTOR_DVII		2
++#define DRM_MODE_CONNECTOR_DVID		3
++#define DRM_MODE_CONNECTOR_DVIA		4
++#define DRM_MODE_CONNECTOR_Composite	5
++#define DRM_MODE_CONNECTOR_SVIDEO	6
++#define DRM_MODE_CONNECTOR_LVDS		7
++#define DRM_MODE_CONNECTOR_Component	8
++#define DRM_MODE_CONNECTOR_9PinDIN	9
++#define DRM_MODE_CONNECTOR_DisplayPort	10
++#define DRM_MODE_CONNECTOR_HDMIA	11
++#define DRM_MODE_CONNECTOR_HDMIB	12
++#define DRM_MODE_CONNECTOR_TV		13
++#define DRM_MODE_CONNECTOR_eDP		14
++
++struct drm_mode_get_connector {
++
++	uint64_t encoders_ptr;
++	uint64_t modes_ptr;
++	uint64_t props_ptr;
++	uint64_t prop_values_ptr;
++
++	uint32_t count_modes;
++	uint32_t count_props;
++	uint32_t count_encoders;
++
++	uint32_t encoder_id; /**< Current Encoder */
++	uint32_t connector_id; /**< Id */
++	uint32_t connector_type;
++	uint32_t connector_type_id;
++
++	uint32_t connection;
++	uint32_t mm_width, mm_height; /**< HxW in millimeters */
++	uint32_t subpixel;
++};
++
++#define DRM_MODE_PROP_PENDING	(1<<0)
++#define DRM_MODE_PROP_RANGE	(1<<1)
++#define DRM_MODE_PROP_IMMUTABLE	(1<<2)
++#define DRM_MODE_PROP_ENUM	(1<<3) /* enumerated type with text strings */
++#define DRM_MODE_PROP_BLOB	(1<<4)
++
++struct drm_mode_property_enum {
++	uint64_t value;
++	char name[DRM_PROP_NAME_LEN];
++};
++
++struct drm_mode_get_property {
++	uint64_t values_ptr; /* values and blob lengths */
++	uint64_t enum_blob_ptr; /* enum and blob id ptrs */
++
++	uint32_t prop_id;
++	uint32_t flags;
++	char name[DRM_PROP_NAME_LEN];
++
++	uint32_t count_values;
++	uint32_t count_enum_blobs;
++};
++
++struct drm_mode_connector_set_property {
++	uint64_t value;
++	uint32_t prop_id;
++	uint32_t connector_id;
++};
++
++struct drm_mode_get_blob {
++	uint32_t blob_id;
++	uint32_t length;
++	uint64_t data;
++};
++
++struct drm_mode_fb_cmd {
++	uint32_t fb_id;
++	uint32_t width, height;
++	uint32_t pitch;
++	uint32_t bpp;
++	uint32_t depth;
++	/* driver specific handle */
++	uint32_t handle;
++};
++
++#define DRM_MODE_FB_INTERLACED	(1<<0 /* for interlaced framebuffers */
++
++struct drm_mode_fb_cmd2 {
++	uint32_t fb_id;
++	uint32_t width, height;
++	uint32_t pixel_format; /* fourcc code from drm_fourcc.h */
++	uint32_t flags; /* see above flags */
++
++	/*
++	 * In case of planar formats, this ioctl allows up to 4
++	 * buffer objects with offets and pitches per plane.
++	 * The pitch and offset order is dictated by the fourcc,
++	 * e.g. NV12 (http://fourcc.org/yuv.php#NV12) is described as:
++	 *
++	 *   YUV 4:2:0 image with a plane of 8 bit Y samples
++	 *   followed by an interleaved U/V plane containing
++	 *   8 bit 2x2 subsampled colour difference samples.
++	 *
++	 * So it would consist of Y as offset[0] and UV as
++	 * offeset[1].  Note that offset[0] will generally
++	 * be 0.
++	 */
++	uint32_t handles[4];
++	uint32_t pitches[4]; /* pitch for each plane */
++	uint32_t offsets[4]; /* offset of each plane */
++};
++
++#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
++#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
++#define DRM_MODE_FB_DIRTY_FLAGS         0x03
++
++#define DRM_MODE_FB_DIRTY_MAX_CLIPS     256
++
++/*
++ * Mark a region of a framebuffer as dirty.
++ *
++ * Some hardware does not automatically update display contents
++ * as a hardware or software draw to a framebuffer. This ioctl
++ * allows userspace to tell the kernel and the hardware what
++ * regions of the framebuffer have changed.
++ *
++ * The kernel or hardware is free to update more then just the
++ * region specified by the clip rects. The kernel or hardware
++ * may also delay and/or coalesce several calls to dirty into a
++ * single update.
++ *
++ * Userspace may annotate the updates, the annotates are a
++ * promise made by the caller that the change is either a copy
++ * of pixels or a fill of a single color in the region specified.
++ *
++ * If the DRM_MODE_FB_DIRTY_ANNOTATE_COPY flag is given then
++ * the number of updated regions are half of num_clips given,
++ * where the clip rects are paired in src and dst. The width and
++ * height of each one of the pairs must match.
++ *
++ * If the DRM_MODE_FB_DIRTY_ANNOTATE_FILL flag is given the caller
++ * promises that the region specified of the clip rects is filled
++ * completely with a single color as given in the color argument.
++ */
++
++struct drm_mode_fb_dirty_cmd {
++	uint32_t fb_id;
++	uint32_t flags;
++	uint32_t color;
++	uint32_t num_clips;
++	uint64_t clips_ptr;
++};
++
++struct drm_mode_mode_cmd {
++	uint32_t connector_id;
++	struct drm_mode_modeinfo mode;
++};
++
++#define DRM_MODE_CURSOR_BO	(1<<0)
++#define DRM_MODE_CURSOR_MOVE	(1<<1)
++
++/*
++ * depending on the value in flags diffrent members are used.
++ *
++ * CURSOR_BO uses
++ *    crtc
++ *    width
++ *    height
++ *    handle - if 0 turns the cursor of
++ *
++ * CURSOR_MOVE uses
++ *    crtc
++ *    x
++ *    y
++ */
++struct drm_mode_cursor {
++	uint32_t flags;
++	uint32_t crtc_id;
++	int32_t x;
++	int32_t y;
++	uint32_t width;
++	uint32_t height;
++	/* driver specific handle */
++	uint32_t handle;
++};
++
++struct drm_mode_crtc_lut {
++	uint32_t crtc_id;
++	uint32_t gamma_size;
++
++	/* pointers to arrays */
++	uint64_t red;
++	uint64_t green;
++	uint64_t blue;
++};
++
++#define DRM_MODE_PAGE_FLIP_EVENT 0x01
++#define DRM_MODE_PAGE_FLIP_FLAGS DRM_MODE_PAGE_FLIP_EVENT
++
++/*
++ * Request a page flip on the specified crtc.
++ *
++ * This ioctl will ask KMS to schedule a page flip for the specified
++ * crtc.  Once any pending rendering targeting the specified fb (as of
++ * ioctl time) has completed, the crtc will be reprogrammed to display
++ * that fb after the next vertical refresh.  The ioctl returns
++ * immediately, but subsequent rendering to the current fb will block
++ * in the execbuffer ioctl until the page flip happens.  If a page
++ * flip is already pending as the ioctl is called, EBUSY will be
++ * returned.
++ *
++ * The ioctl supports one flag, DRM_MODE_PAGE_FLIP_EVENT, which will
++ * request that drm sends back a vblank event (see drm.h: struct
++ * drm_event_vblank) when the page flip is done.  The user_data field
++ * passed in with this ioctl will be returned as the user_data field
++ * in the vblank event struct.
++ *
++ * The reserved field must be zero until we figure out something
++ * clever to use it for.
++ */
++
++struct drm_mode_crtc_page_flip {
++	uint32_t crtc_id;
++	uint32_t fb_id;
++	uint32_t flags;
++	uint32_t reserved;
++	uint64_t user_data;
++};
++
++/* create a dumb scanout buffer */
++struct drm_mode_create_dumb {
++	uint32_t height;
++	uint32_t width;
++	uint32_t bpp;
++	uint32_t flags;
++	/* handle, pitch, size will be returned */
++	uint32_t handle;
++	uint32_t pitch;
++	uint64_t size;
++};
++
++/* set up for mmap of a dumb scanout buffer */
++struct drm_mode_map_dumb {
++	/** Handle for the object being mapped. */
++	uint32_t handle;
++	uint32_t pad;
++	/**
++	 * Fake offset to use for subsequent mmap call
++	 *
++	 * This is a fixed-size type for 32/64 compatibility.
++	 */
++	uint64_t offset;
++};
++
++struct drm_mode_destroy_dumb {
++	uint32_t handle;
++};
++
++#endif
+diff --git a/sys/dev/drm/drm_modes.c b/sys/dev/drm/drm_modes.c
+new file mode 100644
+index 0000000..97023bd
+--- /dev/null
++++ sys/dev/drm/drm_modes.c
+@@ -0,0 +1,1144 @@
++/*
++ * Copyright © 1997-2003 by The XFree86 Project, Inc.
++ * Copyright © 2007 Dave Airlie
++ * Copyright © 2007-2008 Intel Corporation
++ *   Jesse Barnes <jesse.barnes at intel.com>
++ * Copyright 2005-2006 Luc Verhaegen
++ * Copyright (c) 2001, Andy Ritger  aritger at nvidia.com
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Except as contained in this notice, the name of the copyright holder(s)
++ * and author(s) shall not be used in advertising or otherwise to promote
++ * the sale, use or other dealings in this Software without prior written
++ * authorization from the copyright holder(s) and author(s).
++ */
++
++#include "dev/drm/drmP.h"
++#include "dev/drm/drm.h"
++#include "dev/drm/drm_crtc.h"
++
++#define	KHZ2PICOS(a)	(1000000000UL/(a))
++
++/**
++ * drm_mode_debug_printmodeline - debug print a mode
++ * @dev: DRM device
++ * @mode: mode to print
++ *
++ * LOCKING:
++ * None.
++ *
++ * Describe @mode using DRM_DEBUG.
++ */
++void drm_mode_debug_printmodeline(struct drm_display_mode *mode)
++{
++	DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d "
++			"0x%x 0x%x\n",
++		mode->base.id, mode->name, mode->vrefresh, mode->clock,
++		mode->hdisplay, mode->hsync_start,
++		mode->hsync_end, mode->htotal,
++		mode->vdisplay, mode->vsync_start,
++		mode->vsync_end, mode->vtotal, mode->type, mode->flags);
 +}
 +
-+int
-+drm_device_sleep_mtx(struct drm_device *dev, void *chan, int flags,
-+    const char *msg, int timeout)
++/**
++ * drm_cvt_mode -create a modeline based on CVT algorithm
++ * @dev: DRM device
++ * @hdisplay: hdisplay size
++ * @vdisplay: vdisplay size
++ * @vrefresh  : vrefresh rate
++ * @reduced : Whether the GTF calculation is simplified
++ * @interlaced:Whether the interlace is supported
++ *
++ * LOCKING:
++ * none.
++ *
++ * return the modeline based on CVT algorithm
++ *
++ * This function is called to generate the modeline based on CVT algorithm
++ * according to the hdisplay, vdisplay, vrefresh.
++ * It is based from the VESA(TM) Coordinated Video Timing Generator by
++ * Graham Loveridge April 9, 2003 available at
++ * http://www.elo.utfsm.cl/~elo212/docs/CVTd6r1.xls 
++ *
++ * And it is copied from xf86CVTmode in xserver/hw/xfree86/modes/xf86cvt.c.
++ * What I have done is to translate it by using integer calculation.
++ */
++#define HV_FACTOR			1000
++struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
++				      int vdisplay, int vrefresh,
++				      bool reduced, bool interlaced, bool margins)
 +{
++	/* 1) top/bottom margin size (% of height) - default: 1.8, */
++#define	CVT_MARGIN_PERCENTAGE		18
++	/* 2) character cell horizontal granularity (pixels) - default 8 */
++#define	CVT_H_GRANULARITY		8
++	/* 3) Minimum vertical porch (lines) - default 3 */
++#define	CVT_MIN_V_PORCH			3
++	/* 4) Minimum number of vertical back porch lines - default 6 */
++#define	CVT_MIN_V_BPORCH		6
++	/* Pixel Clock step (kHz) */
++#define CVT_CLOCK_STEP			250
++	struct drm_display_mode *drm_mode;
++	unsigned int vfieldrate, hperiod;
++	int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync;
++	int interlace;
 +
-+	return (msleep(chan, &dev->dev_lock, flags, msg, timeout));
++	/* allocate the drm_display_mode structure. If failure, we will
++	 * return directly
++	 */
++	drm_mode = drm_mode_create(dev);
++	if (!drm_mode)
++		return NULL;
++
++	/* the CVT default refresh rate is 60Hz */
++	if (!vrefresh)
++		vrefresh = 60;
++
++	/* the required field fresh rate */
++	if (interlaced)
++		vfieldrate = vrefresh * 2;
++	else
++		vfieldrate = vrefresh;
++
++	/* horizontal pixels */
++	hdisplay_rnd = hdisplay - (hdisplay % CVT_H_GRANULARITY);
++
++	/* determine the left&right borders */
++	hmargin = 0;
++	if (margins) {
++		hmargin = hdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000;
++		hmargin -= hmargin % CVT_H_GRANULARITY;
++	}
++	/* find the total active pixels */
++	drm_mode->hdisplay = hdisplay_rnd + 2 * hmargin;
++
++	/* find the number of lines per field */
++	if (interlaced)
++		vdisplay_rnd = vdisplay / 2;
++	else
++		vdisplay_rnd = vdisplay;
++
++	/* find the top & bottom borders */
++	vmargin = 0;
++	if (margins)
++		vmargin = vdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000;
++
++	drm_mode->vdisplay = vdisplay + 2 * vmargin;
++
++	/* Interlaced */
++	if (interlaced)
++		interlace = 1;
++	else
++		interlace = 0;
++
++	/* Determine VSync Width from aspect ratio */
++	if (!(vdisplay % 3) && ((vdisplay * 4 / 3) == hdisplay))
++		vsync = 4;
++	else if (!(vdisplay % 9) && ((vdisplay * 16 / 9) == hdisplay))
++		vsync = 5;
++	else if (!(vdisplay % 10) && ((vdisplay * 16 / 10) == hdisplay))
++		vsync = 6;
++	else if (!(vdisplay % 4) && ((vdisplay * 5 / 4) == hdisplay))
++		vsync = 7;
++	else if (!(vdisplay % 9) && ((vdisplay * 15 / 9) == hdisplay))
++		vsync = 7;
++	else /* custom */
++		vsync = 10;
++
++	if (!reduced) {
++		/* simplify the GTF calculation */
++		/* 4) Minimum time of vertical sync + back porch interval (µs)
++		 * default 550.0
++		 */
++		int tmp1, tmp2;
++#define CVT_MIN_VSYNC_BP	550
++		/* 3) Nominal HSync width (% of line period) - default 8 */
++#define CVT_HSYNC_PERCENTAGE	8
++		unsigned int hblank_percentage;
++		int vsyncandback_porch, vback_porch, hblank;
++
++		/* estimated the horizontal period */
++		tmp1 = HV_FACTOR * 1000000  -
++				CVT_MIN_VSYNC_BP * HV_FACTOR * vfieldrate;
++		tmp2 = (vdisplay_rnd + 2 * vmargin + CVT_MIN_V_PORCH) * 2 +
++				interlace;
++		hperiod = tmp1 * 2 / (tmp2 * vfieldrate);
++
++		tmp1 = CVT_MIN_VSYNC_BP * HV_FACTOR / hperiod + 1;
++		/* 9. Find number of lines in sync + backporch */
++		if (tmp1 < (vsync + CVT_MIN_V_PORCH))
++			vsyncandback_porch = vsync + CVT_MIN_V_PORCH;
++		else
++			vsyncandback_porch = tmp1;
++		/* 10. Find number of lines in back porch */
++		vback_porch = vsyncandback_porch - vsync;
++		drm_mode->vtotal = vdisplay_rnd + 2 * vmargin +
++				vsyncandback_porch + CVT_MIN_V_PORCH;
++		/* 5) Definition of Horizontal blanking time limitation */
++		/* Gradient (%/kHz) - default 600 */
++#define CVT_M_FACTOR	600
++		/* Offset (%) - default 40 */
++#define CVT_C_FACTOR	40
++		/* Blanking time scaling factor - default 128 */
++#define CVT_K_FACTOR	128
++		/* Scaling factor weighting - default 20 */
++#define CVT_J_FACTOR	20
++#define CVT_M_PRIME	(CVT_M_FACTOR * CVT_K_FACTOR / 256)
++#define CVT_C_PRIME	((CVT_C_FACTOR - CVT_J_FACTOR) * CVT_K_FACTOR / 256 + \
++			 CVT_J_FACTOR)
++		/* 12. Find ideal blanking duty cycle from formula */
++		hblank_percentage = CVT_C_PRIME * HV_FACTOR - CVT_M_PRIME *
++					hperiod / 1000;
++		/* 13. Blanking time */
++		if (hblank_percentage < 20 * HV_FACTOR)
++			hblank_percentage = 20 * HV_FACTOR;
++		hblank = drm_mode->hdisplay * hblank_percentage /
++			 (100 * HV_FACTOR - hblank_percentage);
++		hblank -= hblank % (2 * CVT_H_GRANULARITY);
++		/* 14. find the total pixes per line */
++		drm_mode->htotal = drm_mode->hdisplay + hblank;
++		drm_mode->hsync_end = drm_mode->hdisplay + hblank / 2;
++		drm_mode->hsync_start = drm_mode->hsync_end -
++			(drm_mode->htotal * CVT_HSYNC_PERCENTAGE) / 100;
++		drm_mode->hsync_start += CVT_H_GRANULARITY -
++			drm_mode->hsync_start % CVT_H_GRANULARITY;
++		/* fill the Vsync values */
++		drm_mode->vsync_start = drm_mode->vdisplay + CVT_MIN_V_PORCH;
++		drm_mode->vsync_end = drm_mode->vsync_start + vsync;
++	} else {
++		/* Reduced blanking */
++		/* Minimum vertical blanking interval time (µs)- default 460 */
++#define CVT_RB_MIN_VBLANK	460
++		/* Fixed number of clocks for horizontal sync */
++#define CVT_RB_H_SYNC		32
++		/* Fixed number of clocks for horizontal blanking */
++#define CVT_RB_H_BLANK		160
++		/* Fixed number of lines for vertical front porch - default 3*/
++#define CVT_RB_VFPORCH		3
++		int vbilines;
++		int tmp1, tmp2;
++		/* 8. Estimate Horizontal period. */
++		tmp1 = HV_FACTOR * 1000000 -
++			CVT_RB_MIN_VBLANK * HV_FACTOR * vfieldrate;
++		tmp2 = vdisplay_rnd + 2 * vmargin;
++		hperiod = tmp1 / (tmp2 * vfieldrate);
++		/* 9. Find number of lines in vertical blanking */
++		vbilines = CVT_RB_MIN_VBLANK * HV_FACTOR / hperiod + 1;
++		/* 10. Check if vertical blanking is sufficient */
++		if (vbilines < (CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH))
++			vbilines = CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH;
++		/* 11. Find total number of lines in vertical field */
++		drm_mode->vtotal = vdisplay_rnd + 2 * vmargin + vbilines;
++		/* 12. Find total number of pixels in a line */
++		drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK;
++		/* Fill in HSync values */
++		drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2;
++		drm_mode->hsync_start = drm_mode->hsync_end - CVT_RB_H_SYNC;
++		/* Fill in VSync values */
++		drm_mode->vsync_start = drm_mode->vdisplay + CVT_RB_VFPORCH;
++		drm_mode->vsync_end = drm_mode->vsync_start + vsync;
++	}
++	/* 15/13. Find pixel clock frequency (kHz for xf86) */
++	drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod;
++	drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP;
++	/* 18/16. Find actual vertical frame frequency */
++	/* ignore - just set the mode flag for interlaced */
++	if (interlaced) {
++		drm_mode->vtotal *= 2;
++		drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
++	}
++	/* Fill the mode line name */
++	drm_mode_set_name(drm_mode);
++	if (reduced)
++		drm_mode->flags |= (DRM_MODE_FLAG_PHSYNC |
++					DRM_MODE_FLAG_NVSYNC);
++	else
++		drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC |
++					DRM_MODE_FLAG_NHSYNC);
++
++	return drm_mode;
 +}
 +
-+void
-+drm_device_assert_mtx_locked(struct drm_device *dev)
++/**
++ * drm_gtf_mode_complex - create the modeline based on full GTF algorithm
++ *
++ * @dev		:drm device
++ * @hdisplay	:hdisplay size
++ * @vdisplay	:vdisplay size
++ * @vrefresh	:vrefresh rate.
++ * @interlaced	:whether the interlace is supported
++ * @margins	:desired margin size
++ * @GTF_[MCKJ]  :extended GTF formula parameters
++ *
++ * LOCKING.
++ * none.
++ *
++ * return the modeline based on full GTF algorithm.
++ *
++ * GTF feature blocks specify C and J in multiples of 0.5, so we pass them
++ * in here multiplied by two.  For a C of 40, pass in 80.
++ */
++struct drm_display_mode *
++drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay,
++		     int vrefresh, bool interlaced, int margins,
++		     int GTF_M, int GTF_2C, int GTF_K, int GTF_2J)
++{	/* 1) top/bottom margin size (% of height) - default: 1.8, */
++#define	GTF_MARGIN_PERCENTAGE		18
++	/* 2) character cell horizontal granularity (pixels) - default 8 */
++#define	GTF_CELL_GRAN			8
++	/* 3) Minimum vertical porch (lines) - default 3 */
++#define	GTF_MIN_V_PORCH			1
++	/* width of vsync in lines */
++#define V_SYNC_RQD			3
++	/* width of hsync as % of total line */
++#define H_SYNC_PERCENT			8
++	/* min time of vsync + back porch (microsec) */
++#define MIN_VSYNC_PLUS_BP		550
++	/* C' and M' are part of the Blanking Duty Cycle computation */
++#define GTF_C_PRIME	((((GTF_2C - GTF_2J) * GTF_K / 256) + GTF_2J) / 2)
++#define GTF_M_PRIME	(GTF_K * GTF_M / 256)
++	struct drm_display_mode *drm_mode;
++	unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd;
++	int top_margin, bottom_margin;
++	int interlace;
++	unsigned int hfreq_est;
++	int vsync_plus_bp, vback_porch;
++	unsigned int vtotal_lines, vfieldrate_est, hperiod;
++	unsigned int vfield_rate, vframe_rate;
++	int left_margin, right_margin;
++	unsigned int total_active_pixels, ideal_duty_cycle;
++	unsigned int hblank, total_pixels, pixel_freq;
++	int hsync, hfront_porch, vodd_front_porch_lines;
++	unsigned int tmp1, tmp2;
++
++	drm_mode = drm_mode_create(dev);
++	if (!drm_mode)
++		return NULL;
++
++	/* 1. In order to give correct results, the number of horizontal
++	 * pixels requested is first processed to ensure that it is divisible
++	 * by the character size, by rounding it to the nearest character
++	 * cell boundary:
++	 */
++	hdisplay_rnd = (hdisplay + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN;
++	hdisplay_rnd = hdisplay_rnd * GTF_CELL_GRAN;
++
++	/* 2. If interlace is requested, the number of vertical lines assumed
++	 * by the calculation must be halved, as the computation calculates
++	 * the number of vertical lines per field.
++	 */
++	if (interlaced)
++		vdisplay_rnd = vdisplay / 2;
++	else
++		vdisplay_rnd = vdisplay;
++
++	/* 3. Find the frame rate required: */
++	if (interlaced)
++		vfieldrate_rqd = vrefresh * 2;
++	else
++		vfieldrate_rqd = vrefresh;
++
++	/* 4. Find number of lines in Top margin: */
++	top_margin = 0;
++	if (margins)
++		top_margin = (vdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) /
++				1000;
++	/* 5. Find number of lines in bottom margin: */
++	bottom_margin = top_margin;
++
++	/* 6. If interlace is required, then set variable interlace: */
++	if (interlaced)
++		interlace = 1;
++	else
++		interlace = 0;
++
++	/* 7. Estimate the Horizontal frequency */
++	{
++		tmp1 = (1000000  - MIN_VSYNC_PLUS_BP * vfieldrate_rqd) / 500;
++		tmp2 = (vdisplay_rnd + 2 * top_margin + GTF_MIN_V_PORCH) *
++				2 + interlace;
++		hfreq_est = (tmp2 * 1000 * vfieldrate_rqd) / tmp1;
++	}
++
++	/* 8. Find the number of lines in V sync + back porch */
++	/* [V SYNC+BP] = RINT(([MIN VSYNC+BP] * hfreq_est / 1000000)) */
++	vsync_plus_bp = MIN_VSYNC_PLUS_BP * hfreq_est / 1000;
++	vsync_plus_bp = (vsync_plus_bp + 500) / 1000;
++	/*  9. Find the number of lines in V back porch alone: */
++	vback_porch = vsync_plus_bp - V_SYNC_RQD;
++	/*  10. Find the total number of lines in Vertical field period: */
++	vtotal_lines = vdisplay_rnd + top_margin + bottom_margin +
++			vsync_plus_bp + GTF_MIN_V_PORCH;
++	/*  11. Estimate the Vertical field frequency: */
++	vfieldrate_est = hfreq_est / vtotal_lines;
++	/*  12. Find the actual horizontal period: */
++	hperiod = 1000000 / (vfieldrate_rqd * vtotal_lines);
++
++	/*  13. Find the actual Vertical field frequency: */
++	vfield_rate = hfreq_est / vtotal_lines;
++	/*  14. Find the Vertical frame frequency: */
++	if (interlaced)
++		vframe_rate = vfield_rate / 2;
++	else
++		vframe_rate = vfield_rate;
++	/*  15. Find number of pixels in left margin: */
++	if (margins)
++		left_margin = (hdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) /
++				1000;
++	else
++		left_margin = 0;
++
++	/* 16.Find number of pixels in right margin: */
++	right_margin = left_margin;
++	/* 17.Find total number of active pixels in image and left and right */
++	total_active_pixels = hdisplay_rnd + left_margin + right_margin;
++	/* 18.Find the ideal blanking duty cycle from blanking duty cycle */
++	ideal_duty_cycle = GTF_C_PRIME * 1000 -
++				(GTF_M_PRIME * 1000000 / hfreq_est);
++	/* 19.Find the number of pixels in the blanking time to the nearest
++	 * double character cell: */
++	hblank = total_active_pixels * ideal_duty_cycle /
++			(100000 - ideal_duty_cycle);
++	hblank = (hblank + GTF_CELL_GRAN) / (2 * GTF_CELL_GRAN);
++	hblank = hblank * 2 * GTF_CELL_GRAN;
++	/* 20.Find total number of pixels: */
++	total_pixels = total_active_pixels + hblank;
++	/* 21.Find pixel clock frequency: */
++	pixel_freq = total_pixels * hfreq_est / 1000;
++	/* Stage 1 computations are now complete; I should really pass
++	 * the results to another function and do the Stage 2 computations,
++	 * but I only need a few more values so I'll just append the
++	 * computations here for now */
++	/* 17. Find the number of pixels in the horizontal sync period: */
++	hsync = H_SYNC_PERCENT * total_pixels / 100;
++	hsync = (hsync + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN;
++	hsync = hsync * GTF_CELL_GRAN;
++	/* 18. Find the number of pixels in horizontal front porch period */
++	hfront_porch = hblank / 2 - hsync;
++	/*  36. Find the number of lines in the odd front porch period: */
++	vodd_front_porch_lines = GTF_MIN_V_PORCH ;
++
++	/* finally, pack the results in the mode struct */
++	drm_mode->hdisplay = hdisplay_rnd;
++	drm_mode->hsync_start = hdisplay_rnd + hfront_porch;
++	drm_mode->hsync_end = drm_mode->hsync_start + hsync;
++	drm_mode->htotal = total_pixels;
++	drm_mode->vdisplay = vdisplay_rnd;
++	drm_mode->vsync_start = vdisplay_rnd + vodd_front_porch_lines;
++	drm_mode->vsync_end = drm_mode->vsync_start + V_SYNC_RQD;
++	drm_mode->vtotal = vtotal_lines;
++
++	drm_mode->clock = pixel_freq;
++
++	if (interlaced) {
++		drm_mode->vtotal *= 2;
++		drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
++	}
++
++	drm_mode_set_name(drm_mode);
++	if (GTF_M == 600 && GTF_2C == 80 && GTF_K == 128 && GTF_2J == 40)
++		drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
++	else
++		drm_mode->flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC;
++
++	return drm_mode;
++}
++
++/**
++ * drm_gtf_mode - create the modeline based on GTF algorithm
++ *
++ * @dev		:drm device
++ * @hdisplay	:hdisplay size
++ * @vdisplay	:vdisplay size
++ * @vrefresh	:vrefresh rate.
++ * @interlaced	:whether the interlace is supported
++ * @margins	:whether the margin is supported
++ *
++ * LOCKING.
++ * none.
++ *
++ * return the modeline based on GTF algorithm
++ *
++ * This function is to create the modeline based on the GTF algorithm.
++ * Generalized Timing Formula is derived from:
++ *	GTF Spreadsheet by Andy Morrish (1/5/97)
++ *	available at http://www.vesa.org
++ *
++ * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
++ * What I have done is to translate it by using integer calculation.
++ * I also refer to the function of fb_get_mode in the file of
++ * drivers/video/fbmon.c
++ *
++ * Standard GTF parameters:
++ * M = 600
++ * C = 40
++ * K = 128
++ * J = 20
++ */
++struct drm_display_mode *
++drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh,
++	     bool lace, int margins)
 +{
++	return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh, lace,
++				    margins, 600, 40 * 2, 128, 20 * 2);
++}
 +
-+	mtx_assert(&dev->dev_lock, MA_OWNED);
++/**
++ * drm_mode_set_name - set the name on a mode
++ * @mode: name will be set in this mode
++ *
++ * LOCKING:
++ * None.
++ *
++ * Set the name of @mode to a standard format.
++ */
++void drm_mode_set_name(struct drm_display_mode *mode)
++{
++	bool interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
++
++	snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s",
++		 mode->hdisplay, mode->vdisplay,
++		 interlaced ? "i" : "");
 +}
 +
-+void
-+drm_device_assert_mtx_unlocked(struct drm_device *dev)
++/**
++ * drm_mode_list_concat - move modes from one list to another
++ * @head: source list
++ * @new: dst list
++ *
++ * LOCKING:
++ * Caller must ensure both lists are locked.
++ *
++ * Move all the modes from @head to @new.
++ */
++void drm_mode_list_concat(struct list_head *head, struct list_head *new)
 +{
 +
-+	mtx_assert(&dev->dev_lock, MA_NOTOWNED);
++	struct list_head *entry, *tmp;
++
++	list_for_each_safe(entry, tmp, head) {
++		list_move_tail(entry, new);
++	}
 +}
 +
-+void
-+drm_device_lock_struct(struct drm_device *dev)
++/**
++ * drm_mode_width - get the width of a mode
++ * @mode: mode
++ *
++ * LOCKING:
++ * None.
++ *
++ * Return @mode's width (hdisplay) value.
++ *
++ * FIXME: is this needed?
++ *
++ * RETURNS:
++ * @mode->hdisplay
++ */
++int drm_mode_width(struct drm_display_mode *mode)
 +{
++	return mode->hdisplay;
 +
-+	sx_xlock(&dev->dev_struct_lock);
 +}
 +
-+void
-+drm_device_unlock_struct(struct drm_device *dev)
++/**
++ * drm_mode_height - get the height of a mode
++ * @mode: mode
++ *
++ * LOCKING:
++ * None.
++ *
++ * Return @mode's height (vdisplay) value.
++ *
++ * FIXME: is this needed?
++ *
++ * RETURNS:
++ * @mode->vdisplay
++ */
++int drm_mode_height(struct drm_display_mode *mode)
 +{
++	return mode->vdisplay;
++}
 +
-+	sx_xunlock(&dev->dev_struct_lock);
++/** drm_mode_hsync - get the hsync of a mode
++ * @mode: mode
++ *
++ * LOCKING:
++ * None.
++ *
++ * Return @modes's hsync rate in kHz, rounded to the nearest int.
++ */
++int drm_mode_hsync(const struct drm_display_mode *mode)
++{
++	unsigned int calc_val;
++
++	if (mode->hsync)
++		return mode->hsync;
++
++	if (mode->htotal < 0)
++		return 0;
++
++	calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
++	calc_val += 500;				/* round to 1000Hz */
++	calc_val /= 1000;				/* truncate to kHz */
++
++	return calc_val;
 +}
 +
-+int
-+drm_device_sleep_struct(struct drm_device *dev, void *chan, int flags,
-+    const char *msg, int timeout)
++/**
++ * drm_mode_vrefresh - get the vrefresh of a mode
++ * @mode: mode
++ *
++ * LOCKING:
++ * None.
++ *
++ * Return @mode's vrefresh rate in Hz or calculate it if necessary.
++ *
++ * FIXME: why is this needed?  shouldn't vrefresh be set already?
++ *
++ * RETURNS:
++ * Vertical refresh rate. It will be the result of actual value plus 0.5.
++ * If it is 70.288, it will return 70Hz.
++ * If it is 59.6, it will return 60Hz.
++ */
++int drm_mode_vrefresh(const struct drm_display_mode *mode)
 +{
++	int refresh = 0;
++	unsigned int calc_val;
 +
-+	return (sx_sleep(chan, &dev->dev_struct_lock, flags, msg, timeout));
++	if (mode->vrefresh > 0)
++		refresh = mode->vrefresh;
++	else if (mode->htotal > 0 && mode->vtotal > 0) {
++		int vtotal;
++		vtotal = mode->vtotal;
++		/* work out vrefresh the value will be x1000 */
++		calc_val = (mode->clock * 1000);
++		calc_val /= mode->htotal;
++		refresh = (calc_val + vtotal / 2) / vtotal;
++
++		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
++			refresh *= 2;
++		if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++			refresh /= 2;
++		if (mode->vscan > 1)
++			refresh /= mode->vscan;
++	}
++	return refresh;
 +}
 +
-+void
-+drm_device_assert_struct_locked(struct drm_device *dev)
++/**
++ * drm_mode_set_crtcinfo - set CRTC modesetting parameters
++ * @p: mode
++ * @adjust_flags: unused? (FIXME)
++ *
++ * LOCKING:
++ * None.
++ *
++ * Setup the CRTC modesetting parameters for @p, adjusting if necessary.
++ */
++void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
 +{
++	if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
++		return;
 +
-+	sx_assert(&dev->dev_struct_lock, SA_XLOCKED);
++	p->crtc_hdisplay = p->hdisplay;
++	p->crtc_hsync_start = p->hsync_start;
++	p->crtc_hsync_end = p->hsync_end;
++	p->crtc_htotal = p->htotal;
++	p->crtc_hskew = p->hskew;
++	p->crtc_vdisplay = p->vdisplay;
++	p->crtc_vsync_start = p->vsync_start;
++	p->crtc_vsync_end = p->vsync_end;
++	p->crtc_vtotal = p->vtotal;
++
++	if (p->flags & DRM_MODE_FLAG_INTERLACE) {
++		if (adjust_flags & CRTC_INTERLACE_HALVE_V) {
++			p->crtc_vdisplay /= 2;
++			p->crtc_vsync_start /= 2;
++			p->crtc_vsync_end /= 2;
++			p->crtc_vtotal /= 2;
++		}
++
++		p->crtc_vtotal |= 1;
++	}
++
++	if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
++		p->crtc_vdisplay *= 2;
++		p->crtc_vsync_start *= 2;
++		p->crtc_vsync_end *= 2;
++		p->crtc_vtotal *= 2;
++	}
++
++	if (p->vscan > 1) {
++		p->crtc_vdisplay *= p->vscan;
++		p->crtc_vsync_start *= p->vscan;
++		p->crtc_vsync_end *= p->vscan;
++		p->crtc_vtotal *= p->vscan;
++	}
++
++	p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
++	p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
++	p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
++	p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
++
++	p->crtc_hadjusted = false;
++	p->crtc_vadjusted = false;
 +}
 +
-+void
-+drm_device_assert_struct_unlocked(struct drm_device *dev)
++
++/**
++ * drm_mode_duplicate - allocate and duplicate an existing mode
++ * @m: mode to duplicate
++ *
++ * LOCKING:
++ * None.
++ *
++ * Just allocate a new mode, copy the existing mode into it, and return
++ * a pointer to it.  Used to create new instances of established modes.
++ */
++struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
++					    const struct drm_display_mode *mode)
 +{
++	struct drm_display_mode *nmode;
++	int new_id;
 +
-+	sx_assert(&dev->dev_struct_lock, SA_UNLOCKED);
++	nmode = drm_mode_create(dev);
++	if (!nmode)
++		return NULL;
++
++	new_id = nmode->base.id;
++	*nmode = *mode;
++	nmode->base.id = new_id;
++	INIT_LIST_HEAD(&nmode->head);
++	return nmode;
 +}
 +
-+static void
-+drm_device_assert_struct_nonsleepable_unlocked(struct drm_device *dev)
++/**
++ * drm_mode_equal - test modes for equality
++ * @mode1: first mode
++ * @mode2: second mode
++ *
++ * LOCKING:
++ * None.
++ *
++ * Check to see if @mode1 and @mode2 are equivalent.
++ *
++ * RETURNS:
++ * true if the modes are equal, false otherwise.
++ */
++bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2)
 +{
++	/* do clock check convert to PICOS so fb modes get matched
++	 * the same */
++	if (mode1->clock && mode2->clock) {
++		if (KHZ2PICOS(mode1->clock) != KHZ2PICOS(mode2->clock))
++			return false;
++	} else if (mode1->clock != mode2->clock)
++		return false;
++
++	if (mode1->hdisplay == mode2->hdisplay &&
++	    mode1->hsync_start == mode2->hsync_start &&
++	    mode1->hsync_end == mode2->hsync_end &&
++	    mode1->htotal == mode2->htotal &&
++	    mode1->hskew == mode2->hskew &&
++	    mode1->vdisplay == mode2->vdisplay &&
++	    mode1->vsync_start == mode2->vsync_start &&
++	    mode1->vsync_end == mode2->vsync_end &&
++	    mode1->vtotal == mode2->vtotal &&
++	    mode1->vscan == mode2->vscan &&
++	    mode1->flags == mode2->flags)
++		return true;
++
++	return false;
 +}
 +
-+void
-+drm_compat_locking_init(struct drm_device *dev)
++/**
++ * drm_mode_validate_size - make sure modes adhere to size constraints
++ * @dev: DRM device
++ * @mode_list: list of modes to check
++ * @maxX: maximum width
++ * @maxY: maximum height
++ * @maxPitch: max pitch
++ *
++ * LOCKING:
++ * Caller must hold a lock protecting @mode_list.
++ *
++ * The DRM device (@dev) has size and pitch limits.  Here we validate the
++ * modes we probed for @dev against those limits and set their status as
++ * necessary.
++ */
++void drm_mode_validate_size(struct drm_device *dev,
++			    struct list_head *mode_list,
++			    int maxX, int maxY, int maxPitch)
 +{
++	struct drm_display_mode *mode;
 +
-+	dev->driver->device_lock = drm_device_lock_mtx;
-+	dev->driver->device_unlock = drm_device_unlock_mtx;
-+	dev->driver->device_lock_sleep = drm_device_sleep_mtx;
-+	dev->driver->device_lock_assert = drm_device_assert_mtx_locked;
-+	dev->driver->device_unlock_assert = drm_device_assert_mtx_unlocked;
-+	dev->driver->device_nonsleepable_unlock_assert =
-+	    drm_device_assert_mtx_unlocked;
++	list_for_each_entry(mode, mode_list, head) {
++		if (maxPitch > 0 && mode->hdisplay > maxPitch)
++			mode->status = MODE_BAD_WIDTH;
++
++		if (maxX > 0 && mode->hdisplay > maxX)
++			mode->status = MODE_VIRTUAL_X;
++
++		if (maxY > 0 && mode->vdisplay > maxY)
++			mode->status = MODE_VIRTUAL_Y;
++	}
 +}
 +
-+void
-+drm_sleep_locking_init(struct drm_device *dev)
++/**
++ * drm_mode_validate_clocks - validate modes against clock limits
++ * @dev: DRM device
++ * @mode_list: list of modes to check
++ * @min: minimum clock rate array
++ * @max: maximum clock rate array
++ * @n_ranges: number of clock ranges (size of arrays)
++ *
++ * LOCKING:
++ * Caller must hold a lock protecting @mode_list.
++ *
++ * Some code may need to check a mode list against the clock limits of the
++ * device in question.  This function walks the mode list, testing to make
++ * sure each mode falls within a given range (defined by @min and @max
++ * arrays) and sets @mode->status as needed.
++ */
++void drm_mode_validate_clocks(struct drm_device *dev,
++			      struct list_head *mode_list,
++			      int *min, int *max, int n_ranges)
 +{
++	struct drm_display_mode *mode;
++	int i;
 +
-+	dev->driver->device_lock = drm_device_lock_struct;
-+	dev->driver->device_unlock = drm_device_unlock_struct;
-+	dev->driver->device_lock_sleep = drm_device_sleep_struct;
-+	dev->driver->device_lock_assert = drm_device_assert_struct_locked;
-+	dev->driver->device_unlock_assert = drm_device_assert_struct_unlocked;
-+	dev->driver->device_nonsleepable_unlock_assert =
-+	    drm_device_assert_struct_nonsleepable_unlocked;
++	list_for_each_entry(mode, mode_list, head) {
++		bool good = false;
++		for (i = 0; i < n_ranges; i++) {
++			if (mode->clock >= min[i] && mode->clock <= max[i]) {
++				good = true;
++				break;
++			}
++		}
++		if (!good)
++			mode->status = MODE_CLOCK_RANGE;
++	}
 +}
 +
++/**
++ * drm_mode_prune_invalid - remove invalid modes from mode list
++ * @dev: DRM device
++ * @mode_list: list of modes to check
++ * @verbose: be verbose about it
++ *
++ * LOCKING:
++ * Caller must hold a lock protecting @mode_list.
++ *
++ * Once mode list generation is complete, a caller can use this routine to
++ * remove invalid modes from a mode list.  If any of the modes have a
++ * status other than %MODE_OK, they are removed from @mode_list and freed.
++ */
++void drm_mode_prune_invalid(struct drm_device *dev,
++			    struct list_head *mode_list, bool verbose)
++{
++	struct drm_display_mode *mode, *t;
++
++	list_for_each_entry_safe(mode, t, mode_list, head) {
++		if (mode->status != MODE_OK) {
++			list_del(&mode->head);
++			if (verbose) {
++				drm_mode_debug_printmodeline(mode);
++				DRM_DEBUG_KMS("Not using %s mode %d\n",
++					mode->name, mode->status);
++			}
++			drm_mode_destroy(dev, mode);
++		}
++	}
++}
++
++/**
++ * drm_mode_compare - compare modes for favorability
++ * @priv: unused
++ * @lh_a: list_head for first mode
++ * @lh_b: list_head for second mode
++ *
++ * LOCKING:
++ * None.
++ *
++ * Compare two modes, given by @lh_a and @lh_b, returning a value indicating
++ * which is better.
++ *
++ * RETURNS:
++ * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
++ * positive if @lh_b is better than @lh_a.
++ */
++static int drm_mode_compare(void *priv, struct list_head *lh_a, struct list_head *lh_b)
++{
++	struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head);
++	struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head);
++	int diff;
++
++	diff = ((b->type & DRM_MODE_TYPE_PREFERRED) != 0) -
++		((a->type & DRM_MODE_TYPE_PREFERRED) != 0);
++	if (diff)
++		return diff;
++	diff = b->hdisplay * b->vdisplay - a->hdisplay * a->vdisplay;
++	if (diff)
++		return diff;
++	diff = b->clock - a->clock;
++	return diff;
++}
++
++/**
++ * drm_mode_sort - sort mode list
++ * @mode_list: list to sort
++ *
++ * LOCKING:
++ * Caller must hold a lock protecting @mode_list.
++ *
++ * Sort @mode_list by favorability, putting good modes first.
++ */
++void drm_mode_sort(struct list_head *mode_list)
++{
++	drm_list_sort(NULL, mode_list, drm_mode_compare);
++}
++
++/**
++ * drm_mode_connector_list_update - update the mode list for the connector
++ * @connector: the connector to update
++ *
++ * LOCKING:
++ * Caller must hold a lock protecting @mode_list.
++ *
++ * This moves the modes from the @connector probed_modes list
++ * to the actual mode list. It compares the probed mode against the current
++ * list and only adds different modes. All modes unverified after this point
++ * will be removed by the prune invalid modes.
++ */
++void drm_mode_connector_list_update(struct drm_connector *connector)
++{
++	struct drm_display_mode *mode;
++	struct drm_display_mode *pmode, *pt;
++	int found_it;
++
++	list_for_each_entry_safe(pmode, pt, &connector->probed_modes,
++				 head) {
++		found_it = 0;
++		/* go through current modes checking for the new probed mode */
++		list_for_each_entry(mode, &connector->modes, head) {
++			if (drm_mode_equal(pmode, mode)) {
++				found_it = 1;
++				/* if equal delete the probed mode */
++				mode->status = pmode->status;
++				/* Merge type bits together */
++				mode->type |= pmode->type;
++				list_del(&pmode->head);
++				drm_mode_destroy(connector->dev, pmode);
++				break;
++			}
++		}
++
++		if (!found_it) {
++			list_move_tail(&pmode->head, &connector->modes);
++		}
++	}
++}
++
++/**
++ * drm_mode_parse_command_line_for_connector - parse command line for connector
++ * @mode_option - per connector mode option
++ * @connector - connector to parse line for
++ *
++ * This parses the connector specific then generic command lines for
++ * modes and options to configure the connector.
++ *
++ * This uses the same parameters as the fb modedb.c, except for extra
++ *	<xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
++ *
++ * enable/enable Digital/disable bit at the end
++ */
++bool drm_mode_parse_command_line_for_connector(const char *mode_option,
++					       struct drm_connector *connector,
++					       struct drm_cmdline_mode *mode)
++{
++	const char *name;
++	unsigned int namelen;
++	bool res_specified = false, bpp_specified = false, refresh_specified = false;
++	unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0;
++	bool yres_specified = false, cvt = false, rb = false;
++	bool interlace = false, margins = false, was_digit = false;
++	int i;
++	enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
++
++#ifdef XXX_CONFIG_FB
++	if (!mode_option)
++		mode_option = fb_mode_option;
++#endif
++
++	if (!mode_option) {
++		mode->specified = false;
++		return false;
++	}
++
++	name = mode_option;
++	namelen = strlen(name);
++	for (i = namelen-1; i >= 0; i--) {
++		switch (name[i]) {
++		case '@':
++			if (!refresh_specified && !bpp_specified &&
++			    !yres_specified && !cvt && !rb && was_digit) {
++				refresh = strtol(&name[i+1], NULL, 10);
++				refresh_specified = true;
++				was_digit = false;
++			} else
++				goto done;
++			break;
++		case '-':
++			if (!bpp_specified && !yres_specified && !cvt &&
++			    !rb && was_digit) {
++				bpp = strtol(&name[i+1], NULL, 10);
++				bpp_specified = true;
++				was_digit = false;
++			} else
++				goto done;
++			break;
++		case 'x':
++			if (!yres_specified && was_digit) {
++				yres = strtol(&name[i+1], NULL, 10);
++				yres_specified = true;
++				was_digit = false;
++			} else
++				goto done;
++		case '0' ... '9':
++			was_digit = true;
++			break;
++		case 'M':
++			if (yres_specified || cvt || was_digit)
++				goto done;
++			cvt = true;
++			break;
++		case 'R':
++			if (yres_specified || cvt || rb || was_digit)
++				goto done;
++			rb = true;
++			break;
++		case 'm':
++			if (cvt || yres_specified || was_digit)
++				goto done;
++			margins = true;
++			break;
++		case 'i':
++			if (cvt || yres_specified || was_digit)
++				goto done;
++			interlace = true;
++			break;
++		case 'e':
++			if (yres_specified || bpp_specified || refresh_specified ||
++			    was_digit || (force != DRM_FORCE_UNSPECIFIED))
++				goto done;
++
++			force = DRM_FORCE_ON;
++			break;
++		case 'D':
++			if (yres_specified || bpp_specified || refresh_specified ||
++			    was_digit || (force != DRM_FORCE_UNSPECIFIED))
++				goto done;
++
++			if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) &&
++			    (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB))
++				force = DRM_FORCE_ON;
++			else
++				force = DRM_FORCE_ON_DIGITAL;
++			break;
++		case 'd':
++			if (yres_specified || bpp_specified || refresh_specified ||
++			    was_digit || (force != DRM_FORCE_UNSPECIFIED))
++				goto done;
++
++			force = DRM_FORCE_OFF;
++			break;
++		default:
++			goto done;
++		}
++	}
++
++	if (i < 0 && yres_specified) {
++		char *ch;
++		xres = strtol(name, &ch, 10);
++		if ((ch != NULL) && (*ch == 'x'))
++			res_specified = true;
++		else
++			i = ch - name;
++	} else if (!yres_specified && was_digit) {
++		/* catch mode that begins with digits but has no 'x' */
++		i = 0;
++	}
++done:
++	if (i >= 0) {
++		printf("parse error at position %i in video mode '%s'\n",
++			i, name);
++		mode->specified = false;
++		return false;
++	}
++
++	if (res_specified) {
++		mode->specified = true;
++		mode->xres = xres;
++		mode->yres = yres;
++	}
++
++	if (refresh_specified) {
++		mode->refresh_specified = true;
++		mode->refresh = refresh;
++	}
++
++	if (bpp_specified) {
++		mode->bpp_specified = true;
++		mode->bpp = bpp;
++	}
++	mode->rb = rb;
++	mode->cvt = cvt;
++	mode->interlace = interlace;
++	mode->margins = margins;
++	mode->force = force;
++
++	return true;
++}
++
++struct drm_display_mode *
++drm_mode_create_from_cmdline_mode(struct drm_device *dev,
++				  struct drm_cmdline_mode *cmd)
++{
++	struct drm_display_mode *mode;
++
++	if (cmd->cvt)
++		mode = drm_cvt_mode(dev,
++				    cmd->xres, cmd->yres,
++				    cmd->refresh_specified ? cmd->refresh : 60,
++				    cmd->rb, cmd->interlace,
++				    cmd->margins);
++	else
++		mode = drm_gtf_mode(dev,
++				    cmd->xres, cmd->yres,
++				    cmd->refresh_specified ? cmd->refresh : 60,
++				    cmd->interlace,
++				    cmd->margins);
++	if (!mode)
++		return NULL;
++
++	drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
++	return mode;
++}
+diff --git a/sys/dev/drm/drm_pci.c b/sys/dev/drm/drm_pci.c
+index afd6604..15daa12 100644
+--- sys/dev/drm/drm_pci.c
++++ sys/dev/drm/drm_pci.c
+@@ -71,10 +71,8 @@ drm_pci_alloc(struct drm_device *dev, size_t size,
+ 	if (dmah == NULL)
+ 		return NULL;
+ 
+-	/* Make sure we aren't holding locks here */
+-	mtx_assert(&dev->dev_lock, MA_NOTOWNED);
+-	if (mtx_owned(&dev->dev_lock))
+-	    DRM_ERROR("called while holding dev_lock\n");
++	/* Make sure we aren't holding mutexes here */
++	DRM_NONSLEEPABLE_UNLOCK_ASSERT(dev);
+ 	mtx_assert(&dev->dma_lock, MA_NOTOWNED);
+ 	if (mtx_owned(&dev->dma_lock))
+ 	    DRM_ERROR("called while holding dma_lock\n");
+diff --git a/sys/dev/drm/drm_pciids.h b/sys/dev/drm/drm_pciids.h
+index d6c76f8..fbabb63 100644
+--- sys/dev/drm/drm_pciids.h
++++ sys/dev/drm/drm_pciids.h
+@@ -533,6 +533,7 @@
+ 	{0x8086, 0x3577, CHIP_I8XX, "Intel i830M GMCH"}, \
+ 	{0x8086, 0x2562, CHIP_I8XX, "Intel i845G GMCH"}, \
+ 	{0x8086, 0x3582, CHIP_I8XX, "Intel i852GM/i855GM GMCH"}, \
++	{0x8086, 0x358e, CHIP_I8XX, "Intel i852GM/i855GM GMCH"}, \
+ 	{0x8086, 0x2572, CHIP_I8XX, "Intel i865G GMCH"}, \
+ 	{0x8086, 0x2582, CHIP_I9XX|CHIP_I915, "Intel i915G"}, \
+ 	{0x8086, 0x258a, CHIP_I9XX|CHIP_I915, "Intel E7221 (i915)"}, \
+@@ -544,18 +545,29 @@
+ 	{0x8086, 0x2982, CHIP_I9XX|CHIP_I965, "Intel i965G"}, \
+ 	{0x8086, 0x2992, CHIP_I9XX|CHIP_I965, "Intel i965Q"}, \
+ 	{0x8086, 0x29A2, CHIP_I9XX|CHIP_I965, "Intel i965G"}, \
+-	{0x8086, 0x2A02, CHIP_I9XX|CHIP_I965, "Intel i965GM"}, \
+-	{0x8086, 0x2A12, CHIP_I9XX|CHIP_I965, "Intel i965GME/GLE"}, \
+-	{0x8086, 0x29C2, CHIP_I9XX|CHIP_I915, "Intel G33"}, \
+ 	{0x8086, 0x29B2, CHIP_I9XX|CHIP_I915, "Intel Q35"}, \
++	{0x8086, 0x29C2, CHIP_I9XX|CHIP_I915, "Intel G33"}, \
+ 	{0x8086, 0x29D2, CHIP_I9XX|CHIP_I915, "Intel Q33"}, \
++	{0x8086, 0x2A02, CHIP_I9XX|CHIP_I965, "Intel i965GM"}, \
++	{0x8086, 0x2A12, CHIP_I9XX|CHIP_I965, "Intel i965GME/GLE"}, \
+ 	{0x8086, 0x2A42, CHIP_I9XX|CHIP_I965, "Mobile Intel® GM45 Express Chipset"}, \
+ 	{0x8086, 0x2E02, CHIP_I9XX|CHIP_I965, "Intel Eaglelake"}, \
+-	{0x8086, 0xA001, CHIP_I9XX|CHIP_I965, "Intel Pineview"}, \
+-	{0x8086, 0xA011, CHIP_I9XX|CHIP_I965, "Intel Pineview (M)"}, \
+ 	{0x8086, 0x2E12, CHIP_I9XX|CHIP_I965, "Intel Q45/Q43"}, \
+ 	{0x8086, 0x2E22, CHIP_I9XX|CHIP_I965, "Intel G45/G43"}, \
+ 	{0x8086, 0x2E32, CHIP_I9XX|CHIP_I965, "Intel G41"}, \
++	{0x8086, 0x2e42, CHIP_I9XX|CHIP_I915, "Intel G43 ?"}, \
++	{0x8086, 0x2e92, CHIP_I9XX|CHIP_I915, "Intel G43 ?"}, \
++	{0x8086, 0x0042, CHIP_I9XX|CHIP_I915, "Intel IronLake"}, \
++	{0x8086, 0x0046, CHIP_I9XX|CHIP_I915, "Intel IronLake"}, \
++	{0x8086, 0x0102, CHIP_I9XX|CHIP_I915, "Intel SandyBridge"}, \
++	{0x8086, 0x0112, CHIP_I9XX|CHIP_I915, "Intel SandyBridge"}, \
++	{0x8086, 0x0122, CHIP_I9XX|CHIP_I915, "Intel SandyBridge"}, \
++	{0x8086, 0x0106, CHIP_I9XX|CHIP_I915, "Intel SandyBridge (M)"}, \
++	{0x8086, 0x0116, CHIP_I9XX|CHIP_I915, "Intel SandyBridge (M)"}, \
++	{0x8086, 0x0126, CHIP_I9XX|CHIP_I915, "Intel SandyBridge (M)"}, \
++	{0x8086, 0x010A, CHIP_I9XX|CHIP_I915, "Intel SandyBridge (M)"}, \
++	{0x8086, 0xA001, CHIP_I9XX|CHIP_I965, "Intel Pineview"}, \
++	{0x8086, 0xA011, CHIP_I9XX|CHIP_I965, "Intel Pineview (M)"}, \
+ 	{0, 0, 0, NULL}
+ 
+ #define imagine_PCI_IDS \
+diff --git a/sys/dev/drm/drm_stub.c b/sys/dev/drm/drm_stub.c
+new file mode 100644
+index 0000000..bcf26cc
+--- /dev/null
++++ sys/dev/drm/drm_stub.c
+@@ -0,0 +1,57 @@
++/**
++ * \file drm_stub.h
++ * Stub support
++ *
++ * \author Rickard E. (Rik) Faith <faith at valinux.com>
++ */
++
++/*
++ * Created: Fri Jan 19 10:48:35 2001 by faith at acm.org
++ *
++ * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++
 +int
-+drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx,
-+    struct sysctl_oid *top)
++drm_setmaster_ioctl(struct drm_device *dev, void *data,
++    struct drm_file *file_priv)
 +{
-+	struct sysctl_oid *oid;
 +
-+	snprintf(dev->busid_str, sizeof(dev->busid_str),
-+	     "pci:%04x:%02x:%02x.%d", dev->pci_domain, dev->pci_bus,
-+	     dev->pci_slot, dev->pci_func);
-+	oid = SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "busid",
-+	    CTLFLAG_RD, dev->busid_str, 0, NULL);
-+	if (oid == NULL)
-+		return (ENOMEM);
-+	dev->modesetting = (dev->driver->driver_features & DRIVER_MODESET) != 0;
-+	oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
-+	    "modesetting", CTLFLAG_RD, &dev->modesetting, 0, NULL);
-+	if (oid == NULL)
-+		return (ENOMEM);
++	DRM_DEBUG("setmaster\n");
 +
++	if (file_priv->master != 0)
++		return (0);
++	return (-EPERM);
++}
++
++int
++drm_dropmaster_ioctl(struct drm_device *dev, void *data,
++    struct drm_file *file_priv)
++{
++
++	DRM_DEBUG("dropmaster\n");
++	if (file_priv->master != 0)
++		return (-EINVAL);
 +	return (0);
 +}
-+
- #if DRM_LINUX
+diff --git a/sys/dev/drm/drm_sysctl.c b/sys/dev/drm/drm_sysctl.c
+index ce3e5b4..cdb5098 100644
+--- sys/dev/drm/drm_sysctl.c
++++ sys/dev/drm/drm_sysctl.c
+@@ -65,12 +65,11 @@ int drm_sysctl_init(struct drm_device *dev)
+ 	int		  i;
  
- #include <sys/sysproto.h>
-Index: sys/dev/drm/mach64_drv.c
-===================================================================
-diff --git sys/dev/drm/mach64_drv.c sys/dev/drm/mach64_drv.c
---- sys/dev/drm/mach64_drv.c	(revision 230124)
-+++ sys/dev/drm/mach64_drv.c	(working copy)
-@@ -74,6 +74,8 @@
- 	dev->driver->major		= DRIVER_MAJOR;
- 	dev->driver->minor		= DRIVER_MINOR;
- 	dev->driver->patchlevel		= DRIVER_PATCHLEVEL;
+ 	info = malloc(sizeof *info, DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
+-	if ( !info )
+-		return 1;
+ 	dev->sysctl = info;
+ 
+ 	/* Add the sysctl node for DRI if it doesn't already exist */
+-	drioid = SYSCTL_ADD_NODE( &info->ctx, &sysctl__hw_children, OID_AUTO, "dri", CTLFLAG_RW, NULL, "DRI Graphics");
++	drioid = SYSCTL_ADD_NODE(&info->ctx, &sysctl__hw_children, OID_AUTO,
++	    "dri", CTLFLAG_RW, NULL, "DRI Graphics");
+ 	if (!drioid)
+ 		return 1;
+ 
+@@ -80,46 +79,65 @@ int drm_sysctl_init(struct drm_device *dev)
+ 		if (i <= oid->oid_arg2)
+ 			i = oid->oid_arg2 + 1;
+ 	}
+-	if (i>9)
+-		return 1;
++	if (i > 9)
++		return (1);
+ 	
++	dev->sysctl_node_idx = i;
+ 	/* Add the hw.dri.x for our device */
+ 	info->name[0] = '0' + i;
+ 	info->name[1] = 0;
+-	top = SYSCTL_ADD_NODE( &info->ctx, SYSCTL_CHILDREN(drioid), OID_AUTO, info->name, CTLFLAG_RW, NULL, NULL);
++	top = SYSCTL_ADD_NODE(&info->ctx, SYSCTL_CHILDREN(drioid),
++	    OID_AUTO, info->name, CTLFLAG_RW, NULL, NULL);
+ 	if (!top)
+ 		return 1;
+ 	
+ 	for (i = 0; i < DRM_SYSCTL_ENTRIES; i++) {
+-		oid = SYSCTL_ADD_OID(&info->ctx, 
+-			SYSCTL_CHILDREN(top), 
+-			OID_AUTO, 
+-			drm_sysctl_list[i].name, 
+-			CTLTYPE_STRING | CTLFLAG_RD, 
+-			dev, 
+-			0, 
+-			drm_sysctl_list[i].f, 
+-			"A", 
++		oid = SYSCTL_ADD_OID(&info->ctx,
++			SYSCTL_CHILDREN(top),
++			OID_AUTO,
++			drm_sysctl_list[i].name,
++			CTLTYPE_STRING | CTLFLAG_RD,
++			dev,
++			0,
++			drm_sysctl_list[i].f,
++			"A",
+ 			NULL);
+ 		if (!oid)
+ 			return 1;
+ 	}
+-	SYSCTL_ADD_INT(&info->ctx, SYSCTL_CHILDREN(top), OID_AUTO, "debug",
++	SYSCTL_ADD_INT(&info->ctx, SYSCTL_CHILDREN(drioid), OID_AUTO, "debug",
+ 	    CTLFLAG_RW, &drm_debug_flag, sizeof(drm_debug_flag),
+ 	    "Enable debugging output");
+-
+-	return 0;
++	SYSCTL_ADD_INT(&info->ctx, SYSCTL_CHILDREN(drioid), OID_AUTO, "notyet",
++	    CTLFLAG_RW, &drm_notyet_flag, sizeof(drm_debug_flag),
++	    "Enable notyet reminders");
 +
-+	drm_compat_locking_init(dev);
++	if (dev->driver->sysctl_init != NULL)
++		dev->driver->sysctl_init(dev, &info->ctx, top);
++
++	SYSCTL_ADD_INT(&info->ctx, SYSCTL_CHILDREN(drioid), OID_AUTO,
++	    "vblank_offdelay", CTLFLAG_RW, &drm_vblank_offdelay,
++	    sizeof(drm_vblank_offdelay),
++	    "");
++	SYSCTL_ADD_INT(&info->ctx, SYSCTL_CHILDREN(drioid), OID_AUTO,
++	    "timestamp_precision", CTLFLAG_RW, &drm_timestamp_precision,
++	    sizeof(drm_timestamp_precision),
++	    "");
++
++	return (0);
  }
  
- static int
-Index: sys/dev/drm/intel_iic.c
-===================================================================
-diff --git sys/dev/drm/intel_iic.c sys/dev/drm/intel_iic.c
-new file mode 10644
---- /dev/null	(revision 0)
-+++ sys/dev/drm/intel_iic.c	(working copy)
-@@ -0,0 +1,698 @@
+ int drm_sysctl_cleanup(struct drm_device *dev)
+ {
+ 	int error;
+-	error = sysctl_ctx_free( &dev->sysctl->ctx );
+ 
++	error = sysctl_ctx_free(&dev->sysctl->ctx);
+ 	free(dev->sysctl, DRM_MEM_DRIVER);
+ 	dev->sysctl = NULL;
++	if (dev->driver->sysctl_cleanup != NULL)
++		dev->driver->sysctl_cleanup(dev);
+ 
+-	return error;
++	return (error);
+ }
+ 
+ #define DRM_SYSCTL_PRINT(fmt, arg...)				\
+@@ -327,16 +345,20 @@ static int drm_vblank_info DRM_SYSCTL_HANDLER_ARGS
+ 	int i;
+ 
+ 	DRM_SYSCTL_PRINT("\ncrtc ref count    last     enabled inmodeset\n");
+-	for(i = 0 ; i < dev->num_crtcs ; i++) {
++	DRM_LOCK();
++	if (dev->_vblank_count == NULL)
++		goto done;
++	for (i = 0 ; i < dev->num_crtcs ; i++) {
+ 		DRM_SYSCTL_PRINT("  %02d  %02d %08d %08d %02d      %02d\n",
+-		    i, atomic_load_acq_32(&dev->vblank[i].refcount),
+-		    atomic_load_acq_32(&dev->vblank[i].count),
+-		    atomic_load_acq_32(&dev->vblank[i].last),
+-		    atomic_load_acq_int(&dev->vblank[i].enabled),
+-		    atomic_load_acq_int(&dev->vblank[i].inmodeset));
++		    i, dev->vblank_refcount[i],
++		    dev->_vblank_count[i],
++		    dev->last_vblank[i],
++		    dev->vblank_enabled[i],
++		    dev->vblank_inmodeset[i]);
+ 	}
++done:
++	DRM_UNLOCK();
+ 
+ 	SYSCTL_OUT(req, "", -1);
+-done:
+ 	return retcode;
+ }
+diff --git a/sys/dev/drm/drm_vm.c b/sys/dev/drm/drm_vm.c
+index 7986856..4d581ad 100644
+--- sys/dev/drm/drm_vm.c
++++ sys/dev/drm/drm_vm.c
+@@ -31,7 +31,8 @@ __FBSDID("$FreeBSD$");
+ #include "dev/drm/drmP.h"
+ #include "dev/drm/drm.h"
+ 
+-int drm_mmap(struct cdev *kdev, vm_ooffset_t offset, vm_paddr_t *paddr,
++int
++drm_mmap(struct cdev *kdev, vm_ooffset_t offset, vm_paddr_t *paddr,
+     int prot, vm_memattr_t *memattr)
+ {
+ 	struct drm_device *dev = drm_get_device_from_kdev(kdev);
+diff --git a/sys/dev/drm/i915_debug.c b/sys/dev/drm/i915_debug.c
+new file mode 100644
+index 0000000..9f618ae
+--- /dev/null
++++ sys/dev/drm/i915_debug.c
+@@ -0,0 +1,1579 @@
 +/*
-+ * Copyright (c) 2006 Dave Airlie <airlied at linux.ie>
-+ * Copyright © 2006-2008,2010 Intel Corporation
-+ *   Jesse Barnes <jesse.barnes at intel.com>
++ * Copyright © 2008 Intel Corporation
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the "Software"),
@@ -9120,1387 +20512,3855 @@
 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
 + *
 + * Authors:
-+ *	Eric Anholt <eric at anholt.net>
-+ *	Chris Wilson <chris at chris-wilson.co.uk>
++ *    Eric Anholt <eric at anholt.net>
++ *    Keith Packard <keithp at keithp.com>
 + *
-+ * Copyright (c) 2011 The FreeBSD Foundation
-+ * All rights reserved.
-+ *
-+ * This software was developed by Konstantin Belousov under sponsorship from
-+ * the FreeBSD Foundation.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions
-+ * are met:
-+ * 1. Redistributions of source code must retain the above copyright
-+ *    notice, this list of conditions and the following disclaimer.
-+ * 2. Redistributions in binary form must reproduce the above copyright
-+ *    notice, this list of conditions and the following disclaimer in the
-+ *    documentation and/or other materials provided with the distribution.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
-+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
-+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
-+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
-+ * SUCH DAMAGE.
 + */
++
++#include <sys/cdefs.h>
++__FBSDID("$FreeBSD$");
++
 +#include "dev/drm/drmP.h"
 +#include "dev/drm/drm.h"
 +#include "dev/drm/i915_drm.h"
 +#include "dev/drm/i915_drv.h"
 +#include "dev/drm/intel_drv.h"
-+#include <dev/iicbus/iic.h>
-+#include <dev/iicbus/iiconf.h>
-+#include <dev/iicbus/iicbus.h>
-+#include "iicbus_if.h"
-+#include "iicbb_if.h"
++#include "dev/drm/intel_ringbuffer.h"
 +
-+static int intel_iic_quirk_xfer(device_t idev, struct iic_msg *msgs, int nmsgs);
-+static void intel_teardown_gmbus_m(struct drm_device *dev, int m);
++#include <sys/sysctl.h>
 +
-+/* Intel GPIO access functions */
++enum {
++	ACTIVE_LIST,
++	FLUSHING_LIST,
++	INACTIVE_LIST,
++	PINNED_LIST,
++	DEFERRED_FREE_LIST,
++};
 +
-+#define I2C_RISEFALL_TIME 20
++static const char *
++yesno(int v)
++{
++	return (v ? "yes" : "no");
++}
 +
-+struct intel_iic_softc {
-+	struct drm_device *drm_dev;
-+	device_t iic_dev;
-+	bool force_bit_dev;
-+	char name[32];
-+	uint32_t reg;
-+	uint32_t reg0;
-+};
++static int
++i915_capabilities(struct drm_device *dev, struct sbuf *m, void *data)
++{
++	const struct intel_device_info *info = INTEL_INFO(dev);
 +
++	sbuf_printf(m, "gen: %d\n", info->gen);
++	if (HAS_PCH_SPLIT(dev))
++		sbuf_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
++#define B(x) sbuf_printf(m, #x ": %s\n", yesno(info->x))
++	B(is_mobile);
++	B(is_i85x);
++	B(is_i915g);
++	B(is_i945gm);
++	B(is_g33);
++	B(need_gfx_hws);
++	B(is_g4x);
++	B(is_pineview);
++	B(has_fbc);
++	B(has_pipe_cxsr);
++	B(has_hotplug);
++	B(cursor_needs_physical);
++	B(has_overlay);
++	B(overlay_needs_physical);
++	B(supports_tv);
++	B(has_bsd_ring);
++	B(has_blt_ring);
++#undef B
++
++	return (0);
++}
++
++static const char *
++get_pin_flag(struct drm_i915_gem_object *obj)
++{
++	if (obj->user_pin_count > 0)
++		return "P";
++	else if (obj->pin_count > 0)
++		return "p";
++	else
++		return " ";
++}
++
++static const char *
++get_tiling_flag(struct drm_i915_gem_object *obj)
++{
++	switch (obj->tiling_mode) {
++	default:
++	case I915_TILING_NONE: return (" ");
++	case I915_TILING_X: return ("X");
++	case I915_TILING_Y: return ("Y");
++	}
++}
++
++static const char *
++cache_level_str(int type)
++{
++	switch (type) {
++	case I915_CACHE_NONE: return " uncached";
++	case I915_CACHE_LLC: return " snooped (LLC)";
++	case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
++	default: return ("");
++	}
++}
++
 +static void
-+intel_iic_quirk_set(struct drm_i915_private *dev_priv, bool enable)
++describe_obj(struct sbuf *m, struct drm_i915_gem_object *obj)
 +{
-+	u32 val;
 +
-+	/* When using bit bashing for I2C, this bit needs to be set to 1 */
-+	if (!IS_PINEVIEW(dev_priv->dev))
-+		return;
++	sbuf_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d%s%s%s",
++		   &obj->base,
++		   get_pin_flag(obj),
++		   get_tiling_flag(obj),
++		   obj->base.size / 1024,
++		   obj->base.read_domains,
++		   obj->base.write_domain,
++		   obj->last_rendering_seqno,
++		   obj->last_fenced_seqno,
++		   cache_level_str(obj->cache_level),
++		   obj->dirty ? " dirty" : "",
++		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
++	if (obj->base.name)
++		sbuf_printf(m, " (name: %d)", obj->base.name);
++	if (obj->fence_reg != I915_FENCE_REG_NONE)
++		sbuf_printf(m, " (fence: %d)", obj->fence_reg);
++	if (obj->gtt_space != NULL)
++		sbuf_printf(m, " (gtt offset: %08x, size: %08x)",
++			   obj->gtt_offset, (unsigned int)obj->gtt_space->size);
++	if (obj->pin_mappable || obj->fault_mappable) {
++		char s[3], *t = s;
++		if (obj->pin_mappable)
++			*t++ = 'p';
++		if (obj->fault_mappable)
++			*t++ = 'f';
++		*t = '\0';
++		sbuf_printf(m, " (%s mappable)", s);
++	}
++	if (obj->ring != NULL)
++		sbuf_printf(m, " (%s)", obj->ring->name);
++}
 +
-+	val = I915_READ(DSPCLK_GATE_D);
-+	if (enable)
-+		val |= DPCUNIT_CLOCK_GATE_DISABLE;
-+	else
-+		val &= ~DPCUNIT_CLOCK_GATE_DISABLE;
-+	I915_WRITE(DSPCLK_GATE_D, val);
++static int
++i915_gem_object_list_info(struct drm_device *dev, struct sbuf *m, void *data)
++{
++	uintptr_t list = (uintptr_t)data;
++	struct list_head *head;
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	struct drm_i915_gem_object *obj;
++	size_t total_obj_size, total_gtt_size;
++	int count;
++
++	if (sx_xlock_sig(&dev->dev_struct_lock))
++		return (EINTR);
++
++	switch (list) {
++	case ACTIVE_LIST:
++		sbuf_printf(m, "Active:\n");
++		head = &dev_priv->mm.active_list;
++		break;
++	case INACTIVE_LIST:
++		sbuf_printf(m, "Inactive:\n");
++		head = &dev_priv->mm.inactive_list;
++		break;
++	case PINNED_LIST:
++		sbuf_printf(m, "Pinned:\n");
++		head = &dev_priv->mm.pinned_list;
++		break;
++	case FLUSHING_LIST:
++		sbuf_printf(m, "Flushing:\n");
++		head = &dev_priv->mm.flushing_list;
++		break;
++	case DEFERRED_FREE_LIST:
++		sbuf_printf(m, "Deferred free:\n");
++		head = &dev_priv->mm.deferred_free_list;
++		break;
++	default:
++		DRM_UNLOCK();
++		return (EINVAL);
++	}
++
++	total_obj_size = total_gtt_size = count = 0;
++	list_for_each_entry(obj, head, mm_list) {
++		sbuf_printf(m, "   ");
++		describe_obj(m, obj);
++		sbuf_printf(m, "\n");
++		total_obj_size += obj->base.size;
++		total_gtt_size += obj->gtt_space->size;
++		count++;
++	}
++	DRM_UNLOCK();
++
++	sbuf_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
++		   count, total_obj_size, total_gtt_size);
++	return (0);
 +}
 +
-+static u32
-+intel_iic_get_reserved(device_t idev)
++#define count_objects(list, member) do { \
++	list_for_each_entry(obj, list, member) { \
++		size += obj->gtt_space->size; \
++		++count; \
++		if (obj->map_and_fenceable) { \
++			mappable_size += obj->gtt_space->size; \
++			++mappable_count; \
++		} \
++	} \
++} while (0)
++
++static int
++i915_gem_object_info(struct drm_device *dev, struct sbuf *m, void *data)
 +{
-+	struct intel_iic_softc *sc;
-+	struct drm_device *dev;
-+	struct drm_i915_private *dev_priv;
-+	u32 reserved;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	u32 count, mappable_count;
++	size_t size, mappable_size;
++	struct drm_i915_gem_object *obj;
 +
-+	sc = device_get_softc(idev);
-+	dev = sc->drm_dev;
-+	dev_priv = dev->dev_private;
++	if (sx_xlock_sig(&dev->dev_struct_lock))
++		return (EINTR);
++	sbuf_printf(m, "%u objects, %zu bytes\n",
++		   dev_priv->mm.object_count,
++		   dev_priv->mm.object_memory);
 +
-+	if (!IS_I830(dev) && !IS_845G(dev)) {
-+		reserved = I915_READ_NOTRACE(sc->reg) &
-+		    (GPIO_DATA_PULLUP_DISABLE | GPIO_CLOCK_PULLUP_DISABLE);
-+	} else {
-+		reserved = 0;
++	size = count = mappable_size = mappable_count = 0;
++	count_objects(&dev_priv->mm.gtt_list, gtt_list);
++	sbuf_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
++		   count, mappable_count, size, mappable_size);
++
++	size = count = mappable_size = mappable_count = 0;
++	count_objects(&dev_priv->mm.active_list, mm_list);
++	count_objects(&dev_priv->mm.flushing_list, mm_list);
++	sbuf_printf(m, "  %u [%u] active objects, %zu [%zu] bytes\n",
++		   count, mappable_count, size, mappable_size);
++
++	size = count = mappable_size = mappable_count = 0;
++	count_objects(&dev_priv->mm.pinned_list, mm_list);
++	sbuf_printf(m, "  %u [%u] pinned objects, %zu [%zu] bytes\n",
++		   count, mappable_count, size, mappable_size);
++
++	size = count = mappable_size = mappable_count = 0;
++	count_objects(&dev_priv->mm.inactive_list, mm_list);
++	sbuf_printf(m, "  %u [%u] inactive objects, %zu [%zu] bytes\n",
++		   count, mappable_count, size, mappable_size);
++
++	size = count = mappable_size = mappable_count = 0;
++	count_objects(&dev_priv->mm.deferred_free_list, mm_list);
++	sbuf_printf(m, "  %u [%u] freed objects, %zu [%zu] bytes\n",
++		   count, mappable_count, size, mappable_size);
++
++	size = count = mappable_size = mappable_count = 0;
++	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
++		if (obj->fault_mappable) {
++			size += obj->gtt_space->size;
++			++count;
++		}
++		if (obj->pin_mappable) {
++			mappable_size += obj->gtt_space->size;
++			++mappable_count;
++		}
 +	}
++	sbuf_printf(m, "%u pinned mappable objects, %zu bytes\n",
++		   mappable_count, mappable_size);
++	sbuf_printf(m, "%u fault mappable objects, %zu bytes\n",
++		   count, size);
 +
-+	return (reserved);
++	sbuf_printf(m, "%zu [%zu] gtt total\n",
++		   dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total);
++	DRM_UNLOCK();
++
++	return (0);
 +}
 +
-+void
-+intel_iic_reset(struct drm_device *dev)
++static int
++i915_gem_gtt_info(struct drm_device *dev, struct sbuf *m, void* data)
 +{
-+	struct drm_i915_private *dev_priv;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct drm_i915_gem_object *obj;
++	size_t total_obj_size, total_gtt_size;
++	int count;
 +
-+	dev_priv = dev->dev_private;
-+	if (HAS_PCH_SPLIT(dev))
-+		I915_WRITE(PCH_GMBUS0, 0);
-+	else
-+		I915_WRITE(GMBUS0, 0);
++	if (sx_xlock_sig(&dev->dev_struct_lock))
++		return (EINTR);
++
++	total_obj_size = total_gtt_size = count = 0;
++	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
++		sbuf_printf(m, "   ");
++		describe_obj(m, obj);
++		sbuf_printf(m, "\n");
++		total_obj_size += obj->base.size;
++		total_gtt_size += obj->gtt_space->size;
++		count++;
++	}
++
++	DRM_UNLOCK();
++
++	sbuf_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
++		   count, total_obj_size, total_gtt_size);
++
++	return (0);
 +}
 +
 +static int
-+intel_iicbus_reset(device_t idev, u_char speed, u_char addr, u_char *oldaddr)
++i915_gem_pageflip_info(struct drm_device *dev, struct sbuf *m, void *data)
 +{
-+	struct intel_iic_softc *sc;
-+	struct drm_device *dev;
++	struct intel_crtc *crtc;
++	struct drm_i915_gem_object *obj;
++	struct intel_unpin_work *work;
++	char pipe;
++	char plane;
 +
-+	sc = device_get_softc(idev);
-+	dev = sc->drm_dev;
++	if ((dev->driver->driver_features & DRIVER_MODESET) == 0)
++		return (0);
++	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
++		pipe = pipe_name(crtc->pipe);
++		plane = plane_name(crtc->plane);
 +
-+	intel_iic_reset(dev);
++		mtx_lock(&dev->event_lock);
++		work = crtc->unpin_work;
++		if (work == NULL) {
++			sbuf_printf(m, "No flip due on pipe %c (plane %c)\n",
++				   pipe, plane);
++		} else {
++			if (!work->pending) {
++				sbuf_printf(m, "Flip queued on pipe %c (plane %c)\n",
++					   pipe, plane);
++			} else {
++				sbuf_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
++					   pipe, plane);
++			}
++			if (work->enable_stall_check)
++				sbuf_printf(m, "Stall check enabled, ");
++			else
++				sbuf_printf(m, "Stall check waiting for page flip ioctl, ");
++			sbuf_printf(m, "%d prepares\n", work->pending);
++
++			if (work->old_fb_obj) {
++				obj = work->old_fb_obj;
++				if (obj)
++					sbuf_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
++			}
++			if (work->pending_flip_obj) {
++				obj = work->pending_flip_obj;
++				if (obj)
++					sbuf_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
++			}
++		}
++		mtx_unlock(&dev->event_lock);
++	}
++
 +	return (0);
 +}
 +
-+static void
-+intel_iicbb_setsda(device_t idev, int val)
++static int
++i915_gem_request_info(struct drm_device *dev, struct sbuf *m, void *data)
 +{
-+	struct intel_iic_softc *sc;
-+	struct drm_i915_private *dev_priv;
-+	u32 reserved;
-+	u32 data_bits;
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	struct drm_i915_gem_request *gem_request;
++	int count;
 +
-+	sc = device_get_softc(idev);
-+	dev_priv = sc->drm_dev->dev_private;
++	if (sx_xlock_sig(&dev->dev_struct_lock))
++		return (EINTR);
 +
-+	reserved = intel_iic_get_reserved(idev);
-+	if (val)
-+		data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
-+	else
-+		data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
-+		    GPIO_DATA_VAL_MASK;
++	count = 0;
++	if (!list_empty(&dev_priv->rings[RCS].request_list)) {
++		sbuf_printf(m, "Render requests:\n");
++		list_for_each_entry(gem_request,
++				    &dev_priv->rings[RCS].request_list,
++				    list) {
++			sbuf_printf(m, "    %d @ %d\n",
++				   gem_request->seqno,
++				   (int) (jiffies - gem_request->emitted_jiffies));
++		}
++		count++;
++	}
++	if (!list_empty(&dev_priv->rings[VCS].request_list)) {
++		sbuf_printf(m, "BSD requests:\n");
++		list_for_each_entry(gem_request,
++				    &dev_priv->rings[VCS].request_list,
++				    list) {
++			sbuf_printf(m, "    %d @ %d\n",
++				   gem_request->seqno,
++				   (int) (jiffies - gem_request->emitted_jiffies));
++		}
++		count++;
++	}
++	if (!list_empty(&dev_priv->rings[BCS].request_list)) {
++		sbuf_printf(m, "BLT requests:\n");
++		list_for_each_entry(gem_request,
++				    &dev_priv->rings[BCS].request_list,
++				    list) {
++			sbuf_printf(m, "    %d @ %d\n",
++				   gem_request->seqno,
++				   (int) (jiffies - gem_request->emitted_jiffies));
++		}
++		count++;
++	}
++	DRM_UNLOCK();
 +
-+	I915_WRITE_NOTRACE(sc->reg, reserved | data_bits);
-+	POSTING_READ(sc->reg);
++	if (count == 0)
++		sbuf_printf(m, "No requests\n");
++
++	return 0;
 +}
 +
 +static void
-+intel_iicbb_setscl(device_t idev, int val)
++i915_ring_seqno_info(struct sbuf *m, struct intel_ring_buffer *ring)
 +{
-+	struct intel_iic_softc *sc;
-+	struct drm_i915_private *dev_priv;
-+	u32 clock_bits, reserved;
++	if (ring->get_seqno) {
++		sbuf_printf(m, "Current sequence (%s): %d\n",
++			   ring->name, ring->get_seqno(ring));
++		sbuf_printf(m, "Waiter sequence (%s):  %d\n",
++			   ring->name, ring->waiting_seqno);
++		sbuf_printf(m, "IRQ sequence (%s):     %d\n",
++			   ring->name, ring->irq_seqno);
++	}
++}
 +
-+	sc = device_get_softc(idev);
-+	dev_priv = sc->drm_dev->dev_private;
++static int
++i915_gem_seqno_info(struct drm_device *dev, struct sbuf *m, void *data)
++{
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	int i;
 +
-+	reserved = intel_iic_get_reserved(idev);
-+	if (val)
-+		clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
-+	else
-+		clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
-+		    GPIO_CLOCK_VAL_MASK;
++	if (sx_xlock_sig(&dev->dev_struct_lock))
++		return (EINTR);
++	for (i = 0; i < I915_NUM_RINGS; i++)
++		i915_ring_seqno_info(m, &dev_priv->rings[i]);
++	DRM_UNLOCK();
++	return (0);
++}
 +
-+	I915_WRITE_NOTRACE(sc->reg, reserved | clock_bits);
-+	POSTING_READ(sc->reg);
++
++static int
++i915_interrupt_info(struct drm_device *dev, struct sbuf *m, void *data)
++{
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	int i, pipe;
++
++	if (sx_xlock_sig(&dev->dev_struct_lock))
++		return (EINTR);
++
++	if (!HAS_PCH_SPLIT(dev)) {
++		sbuf_printf(m, "Interrupt enable:    %08x\n",
++			   I915_READ(IER));
++		sbuf_printf(m, "Interrupt identity:  %08x\n",
++			   I915_READ(IIR));
++		sbuf_printf(m, "Interrupt mask:      %08x\n",
++			   I915_READ(IMR));
++		for_each_pipe(pipe)
++			sbuf_printf(m, "Pipe %c stat:         %08x\n",
++				   pipe_name(pipe),
++				   I915_READ(PIPESTAT(pipe)));
++	} else {
++		sbuf_printf(m, "North Display Interrupt enable:		%08x\n",
++			   I915_READ(DEIER));
++		sbuf_printf(m, "North Display Interrupt identity:	%08x\n",
++			   I915_READ(DEIIR));
++		sbuf_printf(m, "North Display Interrupt mask:		%08x\n",
++			   I915_READ(DEIMR));
++		sbuf_printf(m, "South Display Interrupt enable:		%08x\n",
++			   I915_READ(SDEIER));
++		sbuf_printf(m, "South Display Interrupt identity:	%08x\n",
++			   I915_READ(SDEIIR));
++		sbuf_printf(m, "South Display Interrupt mask:		%08x\n",
++			   I915_READ(SDEIMR));
++		sbuf_printf(m, "Graphics Interrupt enable:		%08x\n",
++			   I915_READ(GTIER));
++		sbuf_printf(m, "Graphics Interrupt identity:		%08x\n",
++			   I915_READ(GTIIR));
++		sbuf_printf(m, "Graphics Interrupt mask:		%08x\n",
++			   I915_READ(GTIMR));
++	}
++	sbuf_printf(m, "Interrupts received: %d\n",
++		   atomic_read(&dev_priv->irq_received));
++	for (i = 0; i < I915_NUM_RINGS; i++) {
++		if (IS_GEN6(dev) || IS_GEN7(dev)) {
++			sbuf_printf(m, "Graphics Interrupt mask (%s):	%08x\n",
++				   dev_priv->rings[i].name,
++				   I915_READ_IMR(&dev_priv->rings[i]));
++		}
++		i915_ring_seqno_info(m, &dev_priv->rings[i]);
++	}
++	DRM_UNLOCK();
++
++	return (0);
 +}
 +
 +static int
-+intel_iicbb_getsda(device_t idev)
++i915_gem_fence_regs_info(struct drm_device *dev, struct sbuf *m, void *data)
 +{
-+	struct intel_iic_softc *sc;
-+	struct drm_i915_private *dev_priv;
-+	u32 reserved;
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	int i;
 +
-+	sc = device_get_softc(idev);
-+	dev_priv = sc->drm_dev->dev_private;
++	if (sx_xlock_sig(&dev->dev_struct_lock))
++		return (EINTR);
 +
-+	reserved = intel_iic_get_reserved(idev);
++	sbuf_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
++	sbuf_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
++	for (i = 0; i < dev_priv->num_fence_regs; i++) {
++		struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
 +
-+	I915_WRITE_NOTRACE(sc->reg, reserved | GPIO_DATA_DIR_MASK);
-+	I915_WRITE_NOTRACE(sc->reg, reserved);
-+	return ((I915_READ_NOTRACE(sc->reg) & GPIO_DATA_VAL_IN) != 0);
++		sbuf_printf(m, "Fenced object[%2d] = ", i);
++		if (obj == NULL)
++			sbuf_printf(m, "unused");
++		else
++			describe_obj(m, obj);
++		sbuf_printf(m, "\n");
++	}
++
++	DRM_UNLOCK();
++	return (0);
 +}
 +
 +static int
-+intel_iicbb_getscl(device_t idev)
++i915_hws_info(struct drm_device *dev, struct sbuf *m, void *data)
 +{
-+	struct intel_iic_softc *sc;
-+	struct drm_i915_private *dev_priv;
-+	u32 reserved;
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	struct intel_ring_buffer *ring;
++	const volatile u32 *hws;
++	int i;
 +
-+	sc = device_get_softc(idev);
-+	dev_priv = sc->drm_dev->dev_private;
++	ring = &dev_priv->rings[(uintptr_t)data];
++	hws = (volatile u32 *)ring->status_page.page_addr;
++	if (hws == NULL)
++		return (0);
 +
-+	reserved = intel_iic_get_reserved(idev);
++	for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
++		sbuf_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
++			   i * 4,
++			   hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
++	}
++	return (0);
++}
 +
-+	I915_WRITE_NOTRACE(sc->reg, reserved | GPIO_CLOCK_DIR_MASK);
-+	I915_WRITE_NOTRACE(sc->reg, reserved);
-+	return ((I915_READ_NOTRACE(sc->reg) & GPIO_CLOCK_VAL_IN) != 0);
++static void
++i915_dump_object(struct sbuf *m, struct drm_i915_gem_object *obj)
++{
++	int page, page_count, i;
++	u32 *mem;
++
++	page_count = obj->base.size / PAGE_SIZE;
++	for (page = 0; page < page_count; page++) {
++		mem = pmap_mapdev_attr(obj->base.dev->agp->base +
++		    obj->gtt_offset + page * PAGE_SIZE, PAGE_SIZE,
++		    PAT_WRITE_COMBINING);
++		for (i = 0; i < PAGE_SIZE; i += 4)
++			sbuf_printf(m, "%08x :  %08x\n", i, mem[i / 4]);
++		pmap_unmapdev((vm_offset_t)mem, PAGE_SIZE);
++	}
 +}
 +
 +static int
-+intel_gmbus_transfer(device_t idev, struct iic_msg *msgs, uint32_t nmsgs)
++i915_batchbuffer_info(struct drm_device *dev, struct sbuf *m, void *data)
 +{
-+	struct intel_iic_softc *sc;
-+	struct drm_i915_private *dev_priv;
-+	u8 *buf;
-+	int error, i, reg_offset, unit;
-+	u32 val, loop;
-+	u16 len;
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	struct drm_i915_gem_object *obj;
 +
-+	sc = device_get_softc(idev);
-+	dev_priv = sc->drm_dev->dev_private;
-+	unit = device_get_unit(idev);
++	if (sx_xlock_sig(&dev->dev_struct_lock))
++		return (EINTR);
++	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
++		if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) {
++		    sbuf_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
++		    i915_dump_object(m, obj);
++		}
++	}
++	DRM_UNLOCK();
++	return (0);
++}
 +
-+	if (sc->force_bit_dev) {
-+		return (intel_iic_quirk_xfer(dev_priv->bbbus[unit],
-+		    msgs, nmsgs));
++static int
++i915_ringbuffer_data(struct drm_device *dev, struct sbuf *m, void *data)
++{
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	struct intel_ring_buffer *ring;
++
++	if (sx_xlock_sig(&dev->dev_struct_lock))
++		return (EINTR);
++	ring = &dev_priv->rings[(uintptr_t)data];
++	if (!ring->obj) {
++		sbuf_printf(m, "No ringbuffer setup\n");
++	} else {
++		u8 *virt = ring->virtual_start;
++		uint32_t off;
++
++		for (off = 0; off < ring->size; off += 4) {
++			uint32_t *ptr = (uint32_t *)(virt + off);
++			sbuf_printf(m, "%08x :  %08x\n", off, *ptr);
++		}
 +	}
++	DRM_UNLOCK();
++	return (0);
++}
 +
-+	reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0;
++static int
++i915_ringbuffer_info(struct drm_device *dev, struct sbuf *m, void *data)
++{
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	struct intel_ring_buffer *ring;
 +
-+	I915_WRITE(GMBUS0 + reg_offset, sc->reg0);
++	ring = &dev_priv->rings[(uintptr_t)data];
++	if (ring->size == 0)
++		return (0);
 +
-+	for (i = 0; i < nmsgs; i++) {
-+		len = msgs[i].len;
-+		buf = msgs[i].buf;
++	if (sx_xlock_sig(&dev->dev_struct_lock))
++		return (EINTR);
 +
-+		if ((msgs[i].flags & IIC_M_RD) != 0) {
-+			I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_WAIT |
-+			    (i + 1 == nmsgs ? GMBUS_CYCLE_STOP : 0) |
-+			    (len << GMBUS_BYTE_COUNT_SHIFT) |
-+			    (msgs[i].slave << GMBUS_SLAVE_ADDR_SHIFT) |
-+			    GMBUS_SLAVE_READ | GMBUS_SW_RDY);
-+			POSTING_READ(GMBUS2 + reg_offset);
-+			do {
-+				loop = 0;
++	sbuf_printf(m, "Ring %s:\n", ring->name);
++	sbuf_printf(m, "  Head :    %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
++	sbuf_printf(m, "  Tail :    %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
++	sbuf_printf(m, "  Size :    %08x\n", ring->size);
++	sbuf_printf(m, "  Active :  %08x\n", intel_ring_get_active_head(ring));
++	sbuf_printf(m, "  NOPID :   %08x\n", I915_READ_NOPID(ring));
++	if (IS_GEN6(dev) || IS_GEN7(dev)) {
++		sbuf_printf(m, "  Sync 0 :   %08x\n", I915_READ_SYNC_0(ring));
++		sbuf_printf(m, "  Sync 1 :   %08x\n", I915_READ_SYNC_1(ring));
++	}
++	sbuf_printf(m, "  Control : %08x\n", I915_READ_CTL(ring));
++	sbuf_printf(m, "  Start :   %08x\n", I915_READ_START(ring));
 +
-+				if (_intel_wait_for(sc->drm_dev,
-+				    (I915_READ(GMBUS2 + reg_offset) &
-+					(GMBUS_SATOER | GMBUS_HW_RDY)) != 0,
-+				    50, 1, "915gbr"))
-+					goto timeout;
-+				if ((I915_READ(GMBUS2 + reg_offset) &
-+				    GMBUS_SATOER) != 0)
-+					goto clear_err;
++	DRM_UNLOCK();
 +
-+				val = I915_READ(GMBUS3 + reg_offset);
-+				do {
-+					*buf++ = val & 0xff;
-+					val >>= 8;
-+				} while (--len != 0 && ++loop < 4);
-+			} while (len != 0);
-+		} else {
-+			val = loop = 0;
-+			do {
-+				val |= *buf++ << (8 * loop);
-+			} while (--len != 0 && ++loop < 4);
++	return (0);
++}
 +
-+			I915_WRITE(GMBUS3 + reg_offset, val);
-+			I915_WRITE(GMBUS1 + reg_offset, (i + 1 == nmsgs ?
-+				GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
-+			    (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
-+			    (msgs[i].slave << GMBUS_SLAVE_ADDR_SHIFT) |
-+			    GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
-+			POSTING_READ(GMBUS2+reg_offset);
++static const char *
++ring_str(int ring)
++{
++	switch (ring) {
++	case RING_RENDER: return (" render");
++	case RING_BSD: return (" bsd");
++	case RING_BLT: return (" blt");
++	default: return ("");
++	}
++}
 +
-+			while (len != 0) {
-+				if (_intel_wait_for(sc->drm_dev,
-+				    (I915_READ(GMBUS2 + reg_offset) &
-+					(GMBUS_SATOER | GMBUS_HW_RDY)) != 0,
-+				    50, 1, "915gbw"))
-+					goto timeout;
-+				if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
-+					goto clear_err;
++static const char *
++pin_flag(int pinned)
++{
++	if (pinned > 0)
++		return (" P");
++	else if (pinned < 0)
++		return (" p");
++	else
++		return ("");
++}
 +
-+				val = loop = 0;
-+				do {
-+					val |= *buf++ << (8 * loop);
-+				} while (--len != 0 && ++loop < 4);
++static const char *tiling_flag(int tiling)
++{
++	switch (tiling) {
++	default:
++	case I915_TILING_NONE: return "";
++	case I915_TILING_X: return " X";
++	case I915_TILING_Y: return " Y";
++	}
++}
 +
-+				I915_WRITE(GMBUS3 + reg_offset, val);
-+				POSTING_READ(GMBUS2 + reg_offset);
++static const char *dirty_flag(int dirty)
++{
++	return dirty ? " dirty" : "";
++}
++
++static const char *purgeable_flag(int purgeable)
++{
++	return purgeable ? " purgeable" : "";
++}
++
++static void print_error_buffers(struct sbuf *m, const char *name,
++    struct drm_i915_error_buffer *err, int count)
++{
++
++	sbuf_printf(m, "%s [%d]:\n", name, count);
++
++	while (count--) {
++		sbuf_printf(m, "  %08x %8u %04x %04x %08x%s%s%s%s%s%s",
++			   err->gtt_offset,
++			   err->size,
++			   err->read_domains,
++			   err->write_domain,
++			   err->seqno,
++			   pin_flag(err->pinned),
++			   tiling_flag(err->tiling),
++			   dirty_flag(err->dirty),
++			   purgeable_flag(err->purgeable),
++			   ring_str(err->ring),
++			   cache_level_str(err->cache_level));
++
++		if (err->name)
++			sbuf_printf(m, " (name: %d)", err->name);
++		if (err->fence_reg != I915_FENCE_REG_NONE)
++			sbuf_printf(m, " (fence: %d)", err->fence_reg);
++
++		sbuf_printf(m, "\n");
++		err++;
++	}
++}
++
++static int i915_error_state(struct drm_device *dev, struct sbuf *m,
++    void *unused)
++{
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	struct drm_i915_error_state *error;
++	int i, page, offset, elt;
++
++	mtx_lock(&dev_priv->error_lock);
++	if (!dev_priv->first_error) {
++		sbuf_printf(m, "no error state collected\n");
++		goto out;
++	}
++
++	error = dev_priv->first_error;
++
++	sbuf_printf(m, "Time: %jd s %jd us\n", (intmax_t)error->time.tv_sec,
++	    (intmax_t)error->time.tv_usec);
++	sbuf_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
++	sbuf_printf(m, "EIR: 0x%08x\n", error->eir);
++	sbuf_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
++	if (INTEL_INFO(dev)->gen >= 6) {
++		sbuf_printf(m, "ERROR: 0x%08x\n", error->error);
++		sbuf_printf(m, "Blitter command stream:\n");
++		sbuf_printf(m, "  ACTHD:    0x%08x\n", error->bcs_acthd);
++		sbuf_printf(m, "  IPEIR:    0x%08x\n", error->bcs_ipeir);
++		sbuf_printf(m, "  IPEHR:    0x%08x\n", error->bcs_ipehr);
++		sbuf_printf(m, "  INSTDONE: 0x%08x\n", error->bcs_instdone);
++		sbuf_printf(m, "  seqno:    0x%08x\n", error->bcs_seqno);
++		sbuf_printf(m, "Video (BSD) command stream:\n");
++		sbuf_printf(m, "  ACTHD:    0x%08x\n", error->vcs_acthd);
++		sbuf_printf(m, "  IPEIR:    0x%08x\n", error->vcs_ipeir);
++		sbuf_printf(m, "  IPEHR:    0x%08x\n", error->vcs_ipehr);
++		sbuf_printf(m, "  INSTDONE: 0x%08x\n", error->vcs_instdone);
++		sbuf_printf(m, "  seqno:    0x%08x\n", error->vcs_seqno);
++	}
++	sbuf_printf(m, "Render command stream:\n");
++	sbuf_printf(m, "  ACTHD: 0x%08x\n", error->acthd);
++	sbuf_printf(m, "  IPEIR: 0x%08x\n", error->ipeir);
++	sbuf_printf(m, "  IPEHR: 0x%08x\n", error->ipehr);
++	sbuf_printf(m, "  INSTDONE: 0x%08x\n", error->instdone);
++	if (INTEL_INFO(dev)->gen >= 4) {
++		sbuf_printf(m, "  INSTDONE1: 0x%08x\n", error->instdone1);
++		sbuf_printf(m, "  INSTPS: 0x%08x\n", error->instps);
++	}
++	sbuf_printf(m, "  INSTPM: 0x%08x\n", error->instpm);
++	sbuf_printf(m, "  seqno: 0x%08x\n", error->seqno);
++
++	for (i = 0; i < dev_priv->num_fence_regs; i++)
++		sbuf_printf(m, "  fence[%d] = %08jx\n", i,
++		    (uintmax_t)error->fence[i]);
++
++	if (error->active_bo)
++		print_error_buffers(m, "Active",
++				    error->active_bo,
++				    error->active_bo_count);
++
++	if (error->pinned_bo)
++		print_error_buffers(m, "Pinned",
++				    error->pinned_bo,
++				    error->pinned_bo_count);
++
++	for (i = 0; i < DRM_ARRAY_SIZE(error->batchbuffer); i++) {
++		if (error->batchbuffer[i]) {
++			struct drm_i915_error_object *obj = error->batchbuffer[i];
++
++			sbuf_printf(m, "%s --- gtt_offset = 0x%08x\n",
++				   dev_priv->rings[i].name,
++				   obj->gtt_offset);
++			offset = 0;
++			for (page = 0; page < obj->page_count; page++) {
++				for (elt = 0; elt < PAGE_SIZE/4; elt++) {
++					sbuf_printf(m, "%08x :  %08x\n",
++					    offset, obj->pages[page][elt]);
++					offset += 4;
++				}
 +			}
 +		}
++	}
 +
-+		if (i + 1 < nmsgs && _intel_wait_for(sc->drm_dev,
-+		    (I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER |
-+			GMBUS_HW_WAIT_PHASE)) != 0,
-+		    50, 1, "915gbh"))
-+			goto timeout;
-+		if ((I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) != 0)
-+			goto clear_err;
++	for (i = 0; i < DRM_ARRAY_SIZE(error->ringbuffer); i++) {
++		if (error->ringbuffer[i]) {
++			struct drm_i915_error_object *obj = error->ringbuffer[i];
++			sbuf_printf(m, "%s --- ringbuffer = 0x%08x\n",
++				   dev_priv->rings[i].name,
++				   obj->gtt_offset);
++			offset = 0;
++			for (page = 0; page < obj->page_count; page++) {
++				for (elt = 0; elt < PAGE_SIZE/4; elt++) {
++					sbuf_printf(m, "%08x :  %08x\n",
++						   offset,
++						   obj->pages[page][elt]);
++					offset += 4;
++				}
++			}
++		}
 +	}
 +
-+	error = 0;
-+done:
-+	/* Mark the GMBUS interface as disabled. We will re-enable it at the
-+	 * start of the next xfer, till then let it sleep.
-+	 */
-+	I915_WRITE(GMBUS0 + reg_offset, 0);
-+	return (error);
++	if (error->overlay)
++		intel_overlay_print_error_state(m, error->overlay);
 +
-+clear_err:
-+	/* Toggle the Software Clear Interrupt bit. This has the effect
-+	 * of resetting the GMBUS controller and so clearing the
-+	 * BUS_ERROR raised by the slave's NAK.
-+	 */
-+	I915_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
-+	I915_WRITE(GMBUS1 + reg_offset, 0);
-+	error = EIO;
-+	goto done;
++	if (error->display)
++		intel_display_print_error_state(m, dev, error->display);
 +
-+timeout:
-+	DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
-+	    sc->reg0 & 0xff, sc->name);
-+	I915_WRITE(GMBUS0 + reg_offset, 0);
++out:
++	mtx_unlock(&dev_priv->error_lock);
 +
-+	/*
-+	 * Hardware may not support GMBUS over these pins?
-+	 * Try GPIO bitbanging instead.
-+	 */
-+	sc->force_bit_dev = true;
++	return (0);
++}
 +
-+	return (intel_iic_quirk_xfer(dev_priv->bbbus[unit], msgs, nmsgs));
++static int
++i915_rstdby_delays(struct drm_device *dev, struct sbuf *m, void *unused)
++{
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	u16 crstanddelay;
++
++	if (sx_xlock_sig(&dev->dev_struct_lock))
++		return (EINTR);
++	crstanddelay = I915_READ16(CRSTANDVID);
++	DRM_UNLOCK();
++
++	sbuf_printf(m, "w/ctx: %d, w/o ctx: %d\n",
++	    (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
++
++	return 0;
 +}
 +
-+void
-+intel_gmbus_set_speed(device_t idev, int speed)
++static int
++i915_cur_delayinfo(struct drm_device *dev, struct sbuf *m, void *unused)
 +{
-+	struct intel_iic_softc *sc;
++	drm_i915_private_t *dev_priv = dev->dev_private;
 +
-+	sc = device_get_softc(device_get_parent(idev));
++	if (IS_GEN5(dev)) {
++		u16 rgvswctl = I915_READ16(MEMSWCTL);
++		u16 rgvstat = I915_READ16(MEMSTAT_ILK);
 +
-+	sc->reg0 = (sc->reg0 & ~(0x3 << 8)) | speed;
++		sbuf_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
++		sbuf_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
++		sbuf_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
++			   MEMSTAT_VID_SHIFT);
++		sbuf_printf(m, "Current P-state: %d\n",
++			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
++	} else if (IS_GEN6(dev)) {
++		u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
++		u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
++		u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
++		u32 rpstat;
++		u32 rpupei, rpcurup, rpprevup;
++		u32 rpdownei, rpcurdown, rpprevdown;
++		int max_freq;
++
++		/* RPSTAT1 is in the GT power well */
++		if (sx_xlock_sig(&dev->dev_struct_lock))
++			return (EINTR);
++		gen6_gt_force_wake_get(dev_priv);
++
++		rpstat = I915_READ(GEN6_RPSTAT1);
++		rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
++		rpcurup = I915_READ(GEN6_RP_CUR_UP);
++		rpprevup = I915_READ(GEN6_RP_PREV_UP);
++		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
++		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
++		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
++
++		gen6_gt_force_wake_put(dev_priv);
++		DRM_UNLOCK();
++
++		sbuf_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
++		sbuf_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
++		sbuf_printf(m, "Render p-state ratio: %d\n",
++			   (gt_perf_status & 0xff00) >> 8);
++		sbuf_printf(m, "Render p-state VID: %d\n",
++			   gt_perf_status & 0xff);
++		sbuf_printf(m, "Render p-state limit: %d\n",
++			   rp_state_limits & 0xff);
++		sbuf_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
++						GEN6_CAGF_SHIFT) * 50);
++		sbuf_printf(m, "RP CUR UP EI: %dus\n", rpupei &
++			   GEN6_CURICONT_MASK);
++		sbuf_printf(m, "RP CUR UP: %dus\n", rpcurup &
++			   GEN6_CURBSYTAVG_MASK);
++		sbuf_printf(m, "RP PREV UP: %dus\n", rpprevup &
++			   GEN6_CURBSYTAVG_MASK);
++		sbuf_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
++			   GEN6_CURIAVG_MASK);
++		sbuf_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
++			   GEN6_CURBSYTAVG_MASK);
++		sbuf_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
++			   GEN6_CURBSYTAVG_MASK);
++
++		max_freq = (rp_state_cap & 0xff0000) >> 16;
++		sbuf_printf(m, "Lowest (RPN) frequency: %dMHz\n",
++			   max_freq * 50);
++
++		max_freq = (rp_state_cap & 0xff00) >> 8;
++		sbuf_printf(m, "Nominal (RP1) frequency: %dMHz\n",
++			   max_freq * 50);
++
++		max_freq = rp_state_cap & 0xff;
++		sbuf_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
++			   max_freq * 50);
++	} else {
++		sbuf_printf(m, "no P-state info available\n");
++	}
++
++	return 0;
 +}
 +
-+void
-+intel_gmbus_force_bit(device_t idev, bool force_bit)
++static int
++i915_delayfreq_table(struct drm_device *dev, struct sbuf *m, void *unused)
 +{
-+	struct intel_iic_softc *sc;
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	u32 delayfreq;
++	int i;
 +
-+	sc = device_get_softc(device_get_parent(idev));
-+	sc->force_bit_dev = force_bit;
++	if (sx_xlock_sig(&dev->dev_struct_lock))
++		return (EINTR);
++	for (i = 0; i < 16; i++) {
++		delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
++		sbuf_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
++			   (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
++	}
++	DRM_UNLOCK();
++	return (0);
 +}
 +
++static inline int
++MAP_TO_MV(int map)
++{
++	return 1250 - (map * 25);
++}
++
 +static int
-+intel_iic_quirk_xfer(device_t idev, struct iic_msg *msgs, int nmsgs)
++i915_inttoext_table(struct drm_device *dev, struct sbuf *m, void *unused)
 +{
-+	device_t bridge_dev;
-+	struct intel_iic_softc *sc;
-+	struct drm_i915_private *dev_priv;
-+	int ret;
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	u32 inttoext;
 +	int i;
 +
-+	bridge_dev = device_get_parent(device_get_parent(idev));
-+	sc = device_get_softc(bridge_dev);
-+	dev_priv = sc->drm_dev->dev_private;
++	if (sx_xlock_sig(&dev->dev_struct_lock))
++		return (EINTR);
++	for (i = 1; i <= 32; i++) {
++		inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
++		sbuf_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
++	}
++	DRM_UNLOCK();
 +
-+	intel_iic_reset(sc->drm_dev);
-+	intel_iic_quirk_set(dev_priv, true);
-+	IICBB_SETSDA(bridge_dev, 1);
-+	IICBB_SETSCL(bridge_dev, 1);
-+	DELAY(I2C_RISEFALL_TIME);
++	return (0);
++}
 +
-+	/* convert slave addresses to format expected by iicbb */
-+	for (i = 0; i < nmsgs; i++)
-+		msgs[i].slave <<= 1;
-+	ret = iicbus_transfer(idev, msgs, nmsgs);
-+	/* restore just in case */
-+	for (i = 0; i < nmsgs; i++)
-+		msgs[i].slave >>= 1;
-+	IICBB_SETSDA(bridge_dev, 1);
-+	IICBB_SETSCL(bridge_dev, 1);
-+	intel_iic_quirk_set(dev_priv, false);
++static int
++ironlake_drpc_info(struct drm_device *dev, struct sbuf *m)
++{
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	u32 rgvmodectl;
++	u32 rstdbyctl;
++	u16 crstandvid;
 +
-+	return (ret);
++	if (sx_xlock_sig(&dev->dev_struct_lock))
++		return (EINTR);
++	rgvmodectl = I915_READ(MEMMODECTL);
++	rstdbyctl = I915_READ(RSTDBYCTL);
++	crstandvid = I915_READ16(CRSTANDVID);
++	DRM_UNLOCK();
++
++	sbuf_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
++		   "yes" : "no");
++	sbuf_printf(m, "Boost freq: %d\n",
++		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
++		   MEMMODE_BOOST_FREQ_SHIFT);
++	sbuf_printf(m, "HW control enabled: %s\n",
++		   rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
++	sbuf_printf(m, "SW control enabled: %s\n",
++		   rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
++	sbuf_printf(m, "Gated voltage change: %s\n",
++		   rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
++	sbuf_printf(m, "Starting frequency: P%d\n",
++		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
++	sbuf_printf(m, "Max P-state: P%d\n",
++		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
++	sbuf_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
++	sbuf_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
++	sbuf_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
++	sbuf_printf(m, "Render standby enabled: %s\n",
++		   (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
++	sbuf_printf(m, "Current RS state: ");
++	switch (rstdbyctl & RSX_STATUS_MASK) {
++	case RSX_STATUS_ON:
++		sbuf_printf(m, "on\n");
++		break;
++	case RSX_STATUS_RC1:
++		sbuf_printf(m, "RC1\n");
++		break;
++	case RSX_STATUS_RC1E:
++		sbuf_printf(m, "RC1E\n");
++		break;
++	case RSX_STATUS_RS1:
++		sbuf_printf(m, "RS1\n");
++		break;
++	case RSX_STATUS_RS2:
++		sbuf_printf(m, "RS2 (RC6)\n");
++		break;
++	case RSX_STATUS_RS3:
++		sbuf_printf(m, "RC3 (RC6+)\n");
++		break;
++	default:
++		sbuf_printf(m, "unknown\n");
++		break;
++	}
++
++	return 0;
 +}
 +
-+static const char *gpio_names[GMBUS_NUM_PORTS] = {
-+	"disabled",
-+	"ssc",
-+	"vga",
-+	"panel",
-+	"dpc",
-+	"dpb",
-+	"reserved",
-+	"dpd",
-+};
++static int
++gen6_drpc_info(struct drm_device *dev, struct sbuf *m)
++{
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	u32 rpmodectl1, gt_core_status, rcctl1;
++	unsigned forcewake_count;
++	int count=0;
 +
++	if (sx_xlock_sig(&dev->dev_struct_lock))
++		return (EINTR);
++
++	mtx_lock(&dev_priv->gt_lock);
++	forcewake_count = dev_priv->forcewake_count;
++	mtx_unlock(&dev_priv->gt_lock);
++
++	if (forcewake_count) {
++		sbuf_printf(m, "RC information inaccurate because userspace "
++			      "holds a reference \n");
++	} else {
++		/* NB: we cannot use forcewake, else we read the wrong values */
++		while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
++			DRM_UDELAY(10);
++		sbuf_printf(m, "RC information accurate: %s\n", yesno(count < 51));
++	}
++
++	gt_core_status = DRM_READ32(dev_priv->mmio_map, GEN6_GT_CORE_STATUS);
++	trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4);
++
++	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
++	rcctl1 = I915_READ(GEN6_RC_CONTROL);
++	DRM_UNLOCK();
++
++	sbuf_printf(m, "Video Turbo Mode: %s\n",
++		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
++	sbuf_printf(m, "HW control enabled: %s\n",
++		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
++	sbuf_printf(m, "SW control enabled: %s\n",
++		   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
++			  GEN6_RP_MEDIA_SW_MODE));
++	sbuf_printf(m, "RC1e Enabled: %s\n",
++		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
++	sbuf_printf(m, "RC6 Enabled: %s\n",
++		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
++	sbuf_printf(m, "Deep RC6 Enabled: %s\n",
++		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
++	sbuf_printf(m, "Deepest RC6 Enabled: %s\n",
++		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
++	sbuf_printf(m, "Current RC state: ");
++	switch (gt_core_status & GEN6_RCn_MASK) {
++	case GEN6_RC0:
++		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
++			sbuf_printf(m, "Core Power Down\n");
++		else
++			sbuf_printf(m, "on\n");
++		break;
++	case GEN6_RC3:
++		sbuf_printf(m, "RC3\n");
++		break;
++	case GEN6_RC6:
++		sbuf_printf(m, "RC6\n");
++		break;
++	case GEN6_RC7:
++		sbuf_printf(m, "RC7\n");
++		break;
++	default:
++		sbuf_printf(m, "Unknown\n");
++		break;
++	}
++
++	sbuf_printf(m, "Core Power Down: %s\n",
++		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
++	return 0;
++}
++
++static int i915_drpc_info(struct drm_device *dev, struct sbuf *m, void *unused)
++{
++
++	if (IS_GEN6(dev) || IS_GEN7(dev))
++		return (gen6_drpc_info(dev, m));
++	else
++		return (ironlake_drpc_info(dev, m));
++}
 +static int
-+intel_gmbus_probe(device_t dev)
++i915_fbc_status(struct drm_device *dev, struct sbuf *m, void *unused)
 +{
++	drm_i915_private_t *dev_priv = dev->dev_private;
 +
-+	return (BUS_PROBE_SPECIFIC);
++	if (!I915_HAS_FBC(dev)) {
++		sbuf_printf(m, "FBC unsupported on this chipset");
++		return 0;
++	}
++
++	if (intel_fbc_enabled(dev)) {
++		sbuf_printf(m, "FBC enabled");
++	} else {
++		sbuf_printf(m, "FBC disabled: ");
++		switch (dev_priv->no_fbc_reason) {
++		case FBC_NO_OUTPUT:
++			sbuf_printf(m, "no outputs");
++			break;
++		case FBC_STOLEN_TOO_SMALL:
++			sbuf_printf(m, "not enough stolen memory");
++			break;
++		case FBC_UNSUPPORTED_MODE:
++			sbuf_printf(m, "mode not supported");
++			break;
++		case FBC_MODE_TOO_LARGE:
++			sbuf_printf(m, "mode too large");
++			break;
++		case FBC_BAD_PLANE:
++			sbuf_printf(m, "FBC unsupported on plane");
++			break;
++		case FBC_NOT_TILED:
++			sbuf_printf(m, "scanout buffer not tiled");
++			break;
++		case FBC_MULTIPLE_PIPES:
++			sbuf_printf(m, "multiple pipes are enabled");
++			break;
++		default:
++			sbuf_printf(m, "unknown reason");
++		}
++	}
++	return 0;
 +}
 +
 +static int
-+intel_gmbus_attach(device_t idev)
++i915_sr_status(struct drm_device *dev, struct sbuf *m, void *unused)
 +{
-+	struct drm_i915_private *dev_priv;
-+	struct intel_iic_softc *sc;
-+	int pin;
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	bool sr_enabled = false;
 +
-+	sc = device_get_softc(idev);
-+	sc->drm_dev = device_get_softc(device_get_parent(idev));
-+	dev_priv = sc->drm_dev->dev_private;
-+	pin = device_get_unit(idev);
++	if (HAS_PCH_SPLIT(dev))
++		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
++	else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
++		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
++	else if (IS_I915GM(dev))
++		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
++	else if (IS_PINEVIEW(dev))
++		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
 +
-+	snprintf(sc->name, sizeof(sc->name), "gmbus bus %s", gpio_names[pin]);
-+	device_set_desc(idev, sc->name);
++	sbuf_printf(m, "self-refresh: %s",
++		   sr_enabled ? "enabled" : "disabled");
 +
-+	/* By default use a conservative clock rate */
-+	sc->reg0 = pin | GMBUS_RATE_100KHZ;
++	return (0);
++}
 +
-+	/* XXX force bit banging until GMBUS is fully debugged */
-+	if (IS_GEN2(sc->drm_dev)) {
-+		sc->force_bit_dev = true;
++static int i915_ring_freq_table(struct drm_device *dev, struct sbuf *m,
++    void *unused)
++{
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	int gpu_freq, ia_freq;
++
++	if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
++		sbuf_printf(m, "unsupported on this chipset");
++		return (0);
 +	}
 +
-+	/* add bus interface device */
-+	sc->iic_dev = device_add_child(idev, "iicbus", -1);
-+	if (sc->iic_dev == NULL)
-+		return (ENXIO);
-+	device_quiet(sc->iic_dev);
-+	bus_generic_attach(idev);
++	if (sx_xlock_sig(&dev->dev_struct_lock))
++		return (EINTR);
 +
++	sbuf_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
++
++	for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
++	     gpu_freq++) {
++		I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
++		I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
++			   GEN6_PCODE_READ_MIN_FREQ_TABLE);
++		if (_intel_wait_for(dev,
++		    (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
++		    10, 1, "915frq")) {
++			DRM_ERROR("pcode read of freq table timed out\n");
++			continue;
++		}
++		ia_freq = I915_READ(GEN6_PCODE_DATA);
++		sbuf_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100);
++	}
++
++	DRM_UNLOCK();
++
 +	return (0);
 +}
 +
 +static int
-+intel_gmbus_detach(device_t idev)
++i915_emon_status(struct drm_device *dev, struct sbuf *m, void *unused)
 +{
-+	struct intel_iic_softc *sc;
-+	struct drm_i915_private *dev_priv;
-+	device_t child;
-+	int u;
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	unsigned long temp, chipset, gfx;
 +
-+	sc = device_get_softc(idev);
-+	u = device_get_unit(idev);
-+	dev_priv = sc->drm_dev->dev_private;
++	if (sx_xlock_sig(&dev->dev_struct_lock))
++		return (EINTR);
++	temp = i915_mch_val(dev_priv);
++	chipset = i915_chipset_val(dev_priv);
++	gfx = i915_gfx_val(dev_priv);
++	DRM_UNLOCK();
 +
-+	child = sc->iic_dev;
-+	bus_generic_detach(idev);
-+	if (child != NULL)
-+		device_delete_child(idev, child);
++	sbuf_printf(m, "GMCH temp: %ld\n", temp);
++	sbuf_printf(m, "Chipset power: %ld\n", chipset);
++	sbuf_printf(m, "GFX power: %ld\n", gfx);
++	sbuf_printf(m, "Total power: %ld\n", chipset + gfx);
 +
++	return 0;
++}
++
++static int
++i915_gfxec(struct drm_device *dev, struct sbuf *m, void *unused)
++{
++	drm_i915_private_t *dev_priv = dev->dev_private;
++
++	if (sx_xlock_sig(&dev->dev_struct_lock))
++		return (EINTR);
++	sbuf_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
++	DRM_UNLOCK();
++
 +	return (0);
 +}
 +
++#if 0
 +static int
-+intel_iicbb_probe(device_t dev)
++i915_opregion(struct drm_device *dev, struct sbuf *m, void *unused)
 +{
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	struct intel_opregion *opregion = &dev_priv->opregion;
 +
-+	return (BUS_PROBE_DEFAULT);
++	if (sx_xlock_sig(&dev->dev_struct_lock))
++		return (EINTR);
++	if (opregion->header)
++		seq_write(m, opregion->header, OPREGION_SIZE);
++	DRM_UNLOCK();
++
++	return 0;
 +}
++#endif
 +
 +static int
-+intel_iicbb_attach(device_t idev)
++i915_gem_framebuffer_info(struct drm_device *dev, struct sbuf *m, void *data)
 +{
-+	static const int map_pin_to_reg[] = {
-+		0,
-+		GPIOB,
-+		GPIOA,
-+		GPIOC,
-+		GPIOD,
-+		GPIOE,
-+		0,
-+		GPIOF
-+	};
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	struct intel_fbdev *ifbdev;
++	struct intel_framebuffer *fb;
 +
-+	struct intel_iic_softc *sc;
-+	struct drm_i915_private *dev_priv;
-+	int pin;
++	if (sx_xlock_sig(&dev->dev_struct_lock))
++		return (EINTR);
 +
-+	sc = device_get_softc(idev);
-+	sc->drm_dev = device_get_softc(device_get_parent(idev));
-+	dev_priv = sc->drm_dev->dev_private;
-+	pin = device_get_unit(idev);
++	ifbdev = dev_priv->fbdev;
++	if (ifbdev == NULL) {
++		DRM_UNLOCK();
++		return (0);
++	}
++	fb = to_intel_framebuffer(ifbdev->helper.fb);
 +
-+	snprintf(sc->name, sizeof(sc->name), "i915 iicbb %s", gpio_names[pin]);
-+	device_set_desc(idev, sc->name);
++	sbuf_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
++		   fb->base.width,
++		   fb->base.height,
++		   fb->base.depth,
++		   fb->base.bits_per_pixel);
++	describe_obj(m, fb->obj);
++	sbuf_printf(m, "\n");
 +
-+	sc->reg0 = pin | GMBUS_RATE_100KHZ;
-+	sc->reg = map_pin_to_reg[pin];
-+	if (HAS_PCH_SPLIT(dev_priv->dev))
-+		sc->reg += PCH_GPIOA - GPIOA;
++	list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
++		if (&fb->base == ifbdev->helper.fb)
++			continue;
 +
-+	/* add generic bit-banging code */
-+	sc->iic_dev = device_add_child(idev, "iicbb", -1);
-+	if (sc->iic_dev == NULL)
-+		return (ENXIO);
-+	device_quiet(sc->iic_dev);
-+	bus_generic_attach(idev);
++		sbuf_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
++			   fb->base.width,
++			   fb->base.height,
++			   fb->base.depth,
++			   fb->base.bits_per_pixel);
++		describe_obj(m, fb->obj);
++		sbuf_printf(m, "\n");
++	}
 +
++	DRM_UNLOCK();
++
 +	return (0);
 +}
 +
 +static int
-+intel_iicbb_detach(device_t idev)
++i915_context_status(struct drm_device *dev, struct sbuf *m, void *data)
 +{
-+	struct intel_iic_softc *sc;
-+	device_t child;
++	drm_i915_private_t *dev_priv;
++	int ret;
 +
-+	sc = device_get_softc(idev);
-+	child = sc->iic_dev;
-+	bus_generic_detach(idev);
-+	if (child)
-+		device_delete_child(idev, child);
++	if ((dev->driver->driver_features & DRIVER_MODESET) == 0)
++		return (0);
++
++	dev_priv = dev->dev_private;
++	ret = sx_xlock_sig(&dev->mode_config.mutex);
++	if (ret != 0)
++		return (EINTR);
++
++	if (dev_priv->pwrctx != NULL) {
++		sbuf_printf(m, "power context ");
++		describe_obj(m, dev_priv->pwrctx);
++		sbuf_printf(m, "\n");
++	}
++
++	if (dev_priv->renderctx != NULL) {
++		sbuf_printf(m, "render context ");
++		describe_obj(m, dev_priv->renderctx);
++		sbuf_printf(m, "\n");
++	}
++
++	sx_xunlock(&dev->mode_config.mutex);
++
 +	return (0);
 +}
 +
-+static device_method_t intel_gmbus_methods[] = {
-+	DEVMETHOD(device_probe,		intel_gmbus_probe),
-+	DEVMETHOD(device_attach,	intel_gmbus_attach),
-+	DEVMETHOD(device_detach,	intel_gmbus_detach),
-+	DEVMETHOD(iicbus_reset,		intel_iicbus_reset),
-+	DEVMETHOD(iicbus_transfer,	intel_gmbus_transfer),
-+	DEVMETHOD_END
-+};
-+static driver_t intel_gmbus_driver = {
-+	"intel_gmbus",
-+	intel_gmbus_methods,
-+	sizeof(struct intel_iic_softc)
-+};
-+static devclass_t intel_gmbus_devclass;
-+DRIVER_MODULE_ORDERED(intel_gmbus, drm, intel_gmbus_driver,
-+    intel_gmbus_devclass, 0, 0, SI_ORDER_FIRST);
-+DRIVER_MODULE(iicbus, intel_gmbus, iicbus_driver, iicbus_devclass, 0, 0);
++static int
++i915_gen6_forcewake_count_info(struct drm_device *dev, struct sbuf *m,
++    void *data)
++{
++	struct drm_i915_private *dev_priv;
++	unsigned forcewake_count;
 +
-+static device_method_t intel_iicbb_methods[] =	{
-+	DEVMETHOD(device_probe,		intel_iicbb_probe),
-+	DEVMETHOD(device_attach,	intel_iicbb_attach),
-+	DEVMETHOD(device_detach,	intel_iicbb_detach),
++	dev_priv = dev->dev_private;
++	mtx_lock(&dev_priv->gt_lock);
++	forcewake_count = dev_priv->forcewake_count;
++	mtx_unlock(&dev_priv->gt_lock);
 +
-+	DEVMETHOD(bus_add_child,	bus_generic_add_child),
-+	DEVMETHOD(bus_print_child,	bus_generic_print_child),
++	sbuf_printf(m, "forcewake count = %u\n", forcewake_count);
 +
-+	DEVMETHOD(iicbb_callback,	iicbus_null_callback),
-+	DEVMETHOD(iicbb_reset,		intel_iicbus_reset),
-+	DEVMETHOD(iicbb_setsda,		intel_iicbb_setsda),
-+	DEVMETHOD(iicbb_setscl,		intel_iicbb_setscl),
-+	DEVMETHOD(iicbb_getsda,		intel_iicbb_getsda),
-+	DEVMETHOD(iicbb_getscl,		intel_iicbb_getscl),
-+	DEVMETHOD_END
-+};
-+static driver_t intel_iicbb_driver = {
-+	"intel_iicbb",
-+	intel_iicbb_methods,
-+	sizeof(struct intel_iic_softc)
-+};
-+static devclass_t intel_iicbb_devclass;
-+DRIVER_MODULE_ORDERED(intel_iicbb, drm, intel_iicbb_driver,
-+    intel_iicbb_devclass, 0, 0, SI_ORDER_FIRST);
-+DRIVER_MODULE(iicbb, intel_iicbb, iicbb_driver, iicbb_devclass, 0, 0);
++	return (0);
++}
 +
-+int
-+intel_setup_gmbus(struct drm_device *dev)
++static int
++i915_debug_set_wedged(SYSCTL_HANDLER_ARGS)
 +{
-+	struct drm_i915_private *dev_priv;
-+	device_t iic_dev;
-+	int i, ret;
++	struct drm_device *dev;
++	drm_i915_private_t *dev_priv;
++	int error, wedged;
 +
++	dev = arg1;
 +	dev_priv = dev->dev_private;
-+	dev_priv->gmbus_bridge = malloc(sizeof(device_t) * GMBUS_NUM_PORTS,
-+	    DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
-+	dev_priv->bbbus_bridge = malloc(sizeof(device_t) * GMBUS_NUM_PORTS,
-+	    DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
-+	dev_priv->gmbus = malloc(sizeof(device_t) * GMBUS_NUM_PORTS,
-+	    DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
-+	dev_priv->bbbus = malloc(sizeof(device_t) * GMBUS_NUM_PORTS,
-+	    DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
++	if (dev_priv == NULL)
++		return (EBUSY);
++	wedged = dev_priv->mm.wedged;
++	error = sysctl_handle_int(oidp, &wedged, 0, req);
++	if (error || !req->newptr)
++		return (error);
++	DRM_INFO("Manually setting wedged to %d\n", wedged);
++	i915_handle_error(dev, wedged);
++	return (error);
++}
 +
++static int
++i915_max_freq(SYSCTL_HANDLER_ARGS)
++{
++	struct drm_device *dev;
++	drm_i915_private_t *dev_priv;
++	int error, max_freq;
++
++	dev = arg1;
++	dev_priv = dev->dev_private;
++	if (dev_priv == NULL)
++		return (EBUSY);
++	max_freq = dev_priv->max_delay * 50;
++	error = sysctl_handle_int(oidp, &max_freq, 0, req);
++	if (error || !req->newptr)
++		return (error);
++	DRM_DEBUG("Manually setting max freq to %d\n", max_freq);
 +	/*
-+	 * The Giant there is recursed, most likely.  Normally, the
-+	 * intel_setup_gmbus() is called from the attach method of the
-+	 * driver.
++	 * Turbo will still be enabled, but won't go above the set value.
 +	 */
-+	mtx_lock(&Giant);
-+	for (i = 0; i < GMBUS_NUM_PORTS; i++) {
-+		/*
-+		 * Initialized bbbus_bridge before gmbus_bridge, since
-+		 * gmbus may decide to force quirk transfer in the
-+		 * attachment code.
-+		 */
-+		dev_priv->bbbus_bridge[i] = device_add_child(dev->device,
-+		    "intel_iicbb", i);
-+		if (dev_priv->bbbus_bridge[i] == NULL) {
-+			DRM_ERROR("bbbus bridge %d creation failed\n", i);
-+			ret = ENXIO;
-+			goto err;
-+		}
-+		device_quiet(dev_priv->bbbus_bridge[i]);
-+		ret = device_probe_and_attach(dev_priv->bbbus_bridge[i]);
-+		if (ret != 0) {
-+			DRM_ERROR("bbbus bridge %d attach failed, %d\n", i,
-+			    ret);
-+			goto err;
-+		}
++	dev_priv->max_delay = max_freq / 50;
++	gen6_set_rps(dev, max_freq / 50);
++	return (error);
++}
 +
-+		iic_dev = device_find_child(dev_priv->bbbus_bridge[i], "iicbb",
-+		    -1);
-+		if (iic_dev == NULL) {
-+			DRM_ERROR("bbbus bridge doesn't have iicbb child\n");
-+			goto err;
-+		}
-+		iic_dev = device_find_child(iic_dev, "iicbus", -1);
-+		if (iic_dev == NULL) {
-+			DRM_ERROR(
-+		"bbbus bridge doesn't have iicbus grandchild\n");
-+			goto err;
-+		}
++static int
++i915_cache_sharing(SYSCTL_HANDLER_ARGS)
++{
++	struct drm_device *dev;
++	drm_i915_private_t *dev_priv;
++	int error, snpcr, cache_sharing;
 +
-+		dev_priv->bbbus[i] = iic_dev;
++	dev = arg1;
++	dev_priv = dev->dev_private;
++	if (dev_priv == NULL)
++		return (EBUSY);
++	DRM_LOCK();
++	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
++	DRM_UNLOCK();
++	cache_sharing = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
++	error = sysctl_handle_int(oidp, &cache_sharing, 0, req);
++	if (error || !req->newptr)
++		return (error);
++	if (cache_sharing < 0 || cache_sharing > 3)
++		return (EINVAL);
++	DRM_DEBUG("Manually setting uncore sharing to %d\n", cache_sharing);
 +
-+		dev_priv->gmbus_bridge[i] = device_add_child(dev->device,
-+		    "intel_gmbus", i);
-+		if (dev_priv->gmbus_bridge[i] == NULL) {
-+			DRM_ERROR("gmbus bridge %d creation failed\n", i);
-+			ret = ENXIO;
-+			goto err;
-+		}
-+		device_quiet(dev_priv->gmbus_bridge[i]);
-+		ret = device_probe_and_attach(dev_priv->gmbus_bridge[i]);
-+		if (ret != 0) {
-+			DRM_ERROR("gmbus bridge %d attach failed, %d\n", i,
-+			    ret);
-+			ret = ENXIO;
-+			goto err;
-+		}
++	DRM_LOCK();
++	/* Update the cache sharing policy here as well */
++	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
++	snpcr &= ~GEN6_MBC_SNPCR_MASK;
++	snpcr |= (cache_sharing << GEN6_MBC_SNPCR_SHIFT);
++	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
++	DRM_UNLOCK();
++	return (0);
++}
 +
-+		iic_dev = device_find_child(dev_priv->gmbus_bridge[i],
-+		    "iicbus", -1);
-+		if (iic_dev == NULL) {
-+			DRM_ERROR("gmbus bridge doesn't have iicbus child\n");
-+			goto err;
-+		}
-+		dev_priv->gmbus[i] = iic_dev;
++static struct i915_info_sysctl_list {
++	const char *name;
++	int (*ptr)(struct drm_device *dev, struct sbuf *m, void *data);
++	int flags;
++	void *data;
++} i915_info_sysctl_list[] = {
++	{"i915_capabilities", i915_capabilities, 0},
++	{"i915_gem_objects", i915_gem_object_info, 0},
++	{"i915_gem_gtt", i915_gem_gtt_info, 0},
++	{"i915_gem_active", i915_gem_object_list_info, 0, (void *)ACTIVE_LIST},
++	{"i915_gem_flushing", i915_gem_object_list_info, 0,
++	    (void *)FLUSHING_LIST},
++	{"i915_gem_inactive", i915_gem_object_list_info, 0,
++	    (void *)INACTIVE_LIST},
++	{"i915_gem_pinned", i915_gem_object_list_info, 0,
++	    (void *)PINNED_LIST},
++	{"i915_gem_deferred_free", i915_gem_object_list_info, 0,
++	    (void *)DEFERRED_FREE_LIST},
++	{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
++	{"i915_gem_request", i915_gem_request_info, 0},
++	{"i915_gem_seqno", i915_gem_seqno_info, 0},
++	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
++	{"i915_gem_interrupt", i915_interrupt_info, 0},
++	{"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
++	{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
++	{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
++	{"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS},
++	{"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS},
++	{"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS},
++	{"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
++	{"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
++	{"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
++	{"i915_batchbuffers", i915_batchbuffer_info, 0},
++	{"i915_error_state", i915_error_state, 0},
++	{"i915_rstdby_delays", i915_rstdby_delays, 0},
++	{"i915_cur_delayinfo", i915_cur_delayinfo, 0},
++	{"i915_delayfreq_table", i915_delayfreq_table, 0},
++	{"i915_inttoext_table", i915_inttoext_table, 0},
++	{"i915_drpc_info", i915_drpc_info, 0},
++	{"i915_emon_status", i915_emon_status, 0},
++	{"i915_ring_freq_table", i915_ring_freq_table, 0},
++	{"i915_gfxec", i915_gfxec, 0},
++	{"i915_fbc_status", i915_fbc_status, 0},
++	{"i915_sr_status", i915_sr_status, 0},
++#if 0
++	{"i915_opregion", i915_opregion, 0},
++#endif
++	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
++	{"i915_context_status", i915_context_status, 0},
++	{"i915_gen6_forcewake_count_info", i915_gen6_forcewake_count_info, 0},
++};
 +
-+		intel_iic_reset(dev);
-+	}
++struct i915_info_sysctl_thunk {
++	struct drm_device *dev;
++	int idx;
++	void *arg;
++};
 +
-+	mtx_unlock(&Giant);
-+	return (0);
++static int
++i915_info_sysctl_handler(SYSCTL_HANDLER_ARGS)
++{
++	struct sbuf m;
++	struct i915_info_sysctl_thunk *thunk;
++	struct drm_device *dev;
++	drm_i915_private_t *dev_priv;
++	int error;
 +
-+err:
-+	intel_teardown_gmbus_m(dev, i);
-+	mtx_unlock(&Giant);
-+	return (ret);
++	thunk = arg1;
++	dev = thunk->dev;
++	dev_priv = dev->dev_private;
++	if (dev_priv == NULL)
++		return (EBUSY);
++	error = sysctl_wire_old_buffer(req, 0);
++	if (error != 0)
++		return (error);
++	sbuf_new_for_sysctl(&m, NULL, 128, req);
++	error = i915_info_sysctl_list[thunk->idx].ptr(dev, &m,
++	    thunk->arg);
++	if (error == 0)
++		error = sbuf_finish(&m);
++	sbuf_delete(&m);
++	return (error);
 +}
 +
-+static void
-+intel_teardown_gmbus_m(struct drm_device *dev, int m)
++extern int i915_gem_sync_exec_requests;
++extern int i915_fix_mi_batchbuffer_end;
++extern int i915_intr_pf;
++extern long i915_gem_wired_pages_cnt;
++
++int
++i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx,
++    struct sysctl_oid *top)
 +{
-+	struct drm_i915_private *dev_priv;
++	struct sysctl_oid *oid, *info;
++	struct i915_info_sysctl_thunk *thunks;
++	int i, error;
 +
-+	dev_priv = dev->dev_private;
++	thunks = malloc(sizeof(*thunks) * DRM_ARRAY_SIZE(i915_info_sysctl_list),
++	    DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
++	for (i = 0; i < DRM_ARRAY_SIZE(i915_info_sysctl_list); i++) {
++		thunks[i].dev = dev;
++		thunks[i].idx = i;
++		thunks[i].arg = i915_info_sysctl_list[i].data;
++	}
++	dev->sysctl_private = thunks;
++	info = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "info",
++	    CTLFLAG_RW, NULL, NULL);
++	if (info == NULL)
++		return (ENOMEM);
++	for (i = 0; i < DRM_ARRAY_SIZE(i915_info_sysctl_list); i++) {
++		oid = SYSCTL_ADD_OID(ctx, SYSCTL_CHILDREN(info), OID_AUTO,
++		    i915_info_sysctl_list[i].name, CTLTYPE_STRING | CTLFLAG_RD,
++		    &thunks[i], 0, i915_info_sysctl_handler, "A", NULL);
++		if (oid == NULL)
++			return (ENOMEM);
++	}
++	oid = SYSCTL_ADD_LONG(ctx, SYSCTL_CHILDREN(info), OID_AUTO,
++	    "i915_gem_wired_pages", CTLFLAG_RD, &i915_gem_wired_pages_cnt,
++	    NULL);
++	oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "wedged",
++	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 0,
++	    i915_debug_set_wedged, "I", NULL);
++	if (oid == NULL)
++		return (ENOMEM);
++	oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "max_freq",
++	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 0, i915_max_freq,
++	    "I", NULL);
++	if (oid == NULL)
++		return (ENOMEM);
++	oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
++	    "cache_sharing", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev,
++	    0, i915_cache_sharing, "I", NULL);
++	if (oid == NULL)
++		return (ENOMEM);
++	oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "sync_exec",
++	    CTLFLAG_RW, &i915_gem_sync_exec_requests, 0, NULL);
++	if (oid == NULL)
++		return (ENOMEM);
++	oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "fix_mi",
++	    CTLFLAG_RW, &i915_fix_mi_batchbuffer_end, 0, NULL);
++	if (oid == NULL)
++		return (ENOMEM);
++	oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "intr_pf",
++	    CTLFLAG_RW, &i915_intr_pf, 0, NULL);
++	if (oid == NULL)
++		return (ENOMEM);
 +
-+	free(dev_priv->gmbus, DRM_MEM_DRIVER);
-+	dev_priv->gmbus = NULL;
-+	free(dev_priv->bbbus, DRM_MEM_DRIVER);
-+	dev_priv->bbbus = NULL;
-+	free(dev_priv->gmbus_bridge, DRM_MEM_DRIVER);
-+	dev_priv->gmbus_bridge = NULL;
-+	free(dev_priv->bbbus_bridge, DRM_MEM_DRIVER);
-+	dev_priv->bbbus_bridge = NULL;
++	error = drm_add_busid_modesetting(dev, ctx, top);
++	if (error != 0)
++		return (error);
++
++	return (0);
 +}
 +
 +void
-+intel_teardown_gmbus(struct drm_device *dev)
++i915_sysctl_cleanup(struct drm_device *dev)
 +{
 +
-+	mtx_lock(&Giant);
-+	intel_teardown_gmbus_m(dev, GMBUS_NUM_PORTS);
-+	mtx_unlock(&Giant);
++	free(dev->sysctl_private, DRM_MEM_DRIVER);
 +}
-
-Property changes on: stable/9/sys/dev/drm/intel_iic.c
-___________________________________________________________________
-Added: svn:mime-type
-## -0,0 +1 ##
-+text/plain
-Added: svn:keywords
-## -0,0 +1 ##
-+FreeBSD=%H
-Added: svn:eol-style
-## -0,0 +1 ##
-+native
-Index: sys/dev/drm/intel_sprite.c
-===================================================================
-diff --git sys/dev/drm/intel_sprite.c sys/dev/drm/intel_sprite.c
-new file mode 10644
---- /dev/null	(revision 0)
-+++ sys/dev/drm/intel_sprite.c	(working copy)
-@@ -0,0 +1,667 @@
-+/*
-+ * Copyright © 2011 Intel Corporation
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the next
-+ * paragraph) shall be included in all copies or substantial portions of the
-+ * Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
-+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ *
-+ * Authors:
-+ *   Jesse Barnes <jbarnes at virtuousgeek.org>
-+ *
-+ * New plane/sprite handling.
-+ *
-+ * The older chips had a separate interface for programming plane related
-+ * registers; newer ones are much simpler and we can use the new DRM plane
-+ * support.
-+ */
-+#include "dev/drm/drmP.h"
-+#include "dev/drm/drm.h"
-+#include "dev/drm/i915_drm.h"
-+#include "dev/drm/i915_drv.h"
+diff --git a/sys/dev/drm/i915_dma.c b/sys/dev/drm/i915_dma.c
+index 7f8ddc1..04832fc 100644
+--- sys/dev/drm/i915_dma.c
++++ sys/dev/drm/i915_dma.c
+@@ -33,46 +33,34 @@ __FBSDID("$FreeBSD$");
+ #include "dev/drm/drm.h"
+ #include "dev/drm/i915_drm.h"
+ #include "dev/drm/i915_drv.h"
+-
+-/* Really want an OS-independent resettable timer.  Would like to have
+- * this loop run for (eg) 3 sec, but have the timer reset every time
+- * the head pointer changes, so that EBUSY only happens if the ring
+- * actually stalls for (eg) 3 seconds.
 +#include "dev/drm/intel_drv.h"
-+#include "dev/drm/drm_fourcc.h"
++#include "dev/drm/intel_ringbuffer.h"
 +
-+static void
-+ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
-+		 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
-+		 unsigned int crtc_w, unsigned int crtc_h,
-+		 uint32_t x, uint32_t y,
-+		 uint32_t src_w, uint32_t src_h)
++static struct drm_i915_private *i915_mch_dev;
++/*
++ * Lock protecting IPS related data structures
++ *   - i915_mch_dev
++ *   - dev_priv->max_delay
++ *   - dev_priv->min_delay
++ *   - dev_priv->fmax
++ *   - dev_priv->gpu_busy
+  */
+-int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
+-{
+-	drm_i915_private_t *dev_priv = dev->dev_private;
+-	drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
+-	u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+-	u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
+-	u32 last_acthd = I915_READ(acthd_reg);
+-	u32 acthd;
+-	int i;
+-
+-	for (i = 0; i < 100000; i++) {
+-		ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+-		acthd = I915_READ(acthd_reg);
+-		ring->space = ring->head - (ring->tail + 8);
+-		if (ring->space < 0)
+-			ring->space += ring->Size;
+-		if (ring->space >= n)
+-			return 0;
+-
+-		if (dev_priv->sarea_priv)
+-			dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
++static struct mtx mchdev_lock;
++MTX_SYSINIT(mchdev, &mchdev_lock, "mchdev", MTX_DEF);
+ 
+-		if (ring->head != last_head)
+-			i = 0;
++static void i915_pineview_get_mem_freq(struct drm_device *dev);
++static void i915_ironlake_get_mem_freq(struct drm_device *dev);
++static int i915_driver_unload_int(struct drm_device *dev, bool locked);
+ 
+-		if (acthd != last_acthd)
+-			i = 0;
+-
+-		last_head = ring->head;
+-		last_acthd = acthd;
+-		DRM_UDELAY(10 * 1000);
+-	}
++static void i915_write_hws_pga(struct drm_device *dev)
 +{
-+	struct drm_device *dev = plane->dev;
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	struct intel_plane *intel_plane = to_intel_plane(plane);
-+	int pipe = intel_plane->pipe;
-+	u32 sprctl, sprscale = 0;
-+	int pixel_size;
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	u32 addr;
+ 
+-	return -EBUSY;
++	addr = dev_priv->status_page_dmah->busaddr;
++	if (INTEL_INFO(dev)->gen >= 4)
++		addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
++	I915_WRITE(HWS_PGA, addr);
+ }
+ 
+ /**
+@@ -82,8 +70,14 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
+ static int i915_init_phys_hws(struct drm_device *dev)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
++	struct intel_ring_buffer *ring = LP_RING(dev_priv);
+ 
+-	/* Program Hardware Status Page */
++	/*
++	 * Program Hardware Status Page
++	 * XXXKIB Keep 4GB limit for allocation for now.  This method
++	 * of allocation is used on <= 965 hardware, that has several
++	 * erratas regarding the use of physical memory > 4 GB.
++	 */
+ 	DRM_UNLOCK();
+ 	dev_priv->status_page_dmah =
+ 		drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
+@@ -92,13 +86,15 @@ static int i915_init_phys_hws(struct drm_device *dev)
+ 		DRM_ERROR("Can not allocate hardware status page\n");
+ 		return -ENOMEM;
+ 	}
+-	dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
++	ring->status_page.page_addr = dev_priv->hw_status_page =
++	    dev_priv->status_page_dmah->vaddr;
+ 	dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
+ 
+ 	memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+ 
+-	I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
+-	DRM_DEBUG("Enabled hardware status page\n");
++	i915_write_hws_pga(dev);
++	DRM_DEBUG("Enabled hardware status page, phys %jx\n",
++	    (uintmax_t)dev_priv->dma_status_page);
+ 	return 0;
+ }
+ 
+@@ -109,6 +105,8 @@ static int i915_init_phys_hws(struct drm_device *dev)
+ static void i915_free_hws(struct drm_device *dev)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
++	struct intel_ring_buffer *ring = LP_RING(dev_priv);
 +
-+	sprctl = I915_READ(SPRCTL(pipe));
+ 	if (dev_priv->status_page_dmah) {
+ 		drm_pci_free(dev, dev_priv->status_page_dmah);
+ 		dev_priv->status_page_dmah = NULL;
+@@ -116,6 +114,7 @@ static void i915_free_hws(struct drm_device *dev)
+ 
+ 	if (dev_priv->status_gfx_addr) {
+ 		dev_priv->status_gfx_addr = 0;
++		ring->status_page.gfx_addr = 0;
+ 		drm_core_ioremapfree(&dev_priv->hws_map, dev);
+ 	}
+ 
+@@ -126,13 +125,27 @@ static void i915_free_hws(struct drm_device *dev)
+ void i915_kernel_lost_context(struct drm_device * dev)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+-	drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
++	struct intel_ring_buffer *ring = LP_RING(dev_priv);
 +
-+	/* Mask out pixel format bits in case we change it */
-+	sprctl &= ~SPRITE_PIXFORMAT_MASK;
-+	sprctl &= ~SPRITE_RGB_ORDER_RGBX;
-+	sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK;
++	/*
++	 * We should never lose context on the ring with modesetting
++	 * as we don't expose it to userspace
++	 */
++	if (drm_core_check_feature(dev, DRIVER_MODESET))
++		return;
+ 
+-	ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+-	ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
++	ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
++	ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+ 	ring->space = ring->head - (ring->tail + 8);
+ 	if (ring->space < 0)
+-		ring->space += ring->Size;
++		ring->space += ring->size;
 +
-+	switch (fb->pixel_format) {
-+	case DRM_FORMAT_XBGR8888:
-+		sprctl |= SPRITE_FORMAT_RGBX888;
-+		pixel_size = 4;
++#if 1
++	KIB_NOTYET();
++#else
++	if (!dev->primary->master)
++		return;
++#endif
+ 
+ 	if (ring->head == ring->tail && dev_priv->sarea_priv)
+ 		dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
+@@ -141,6 +154,9 @@ void i915_kernel_lost_context(struct drm_device * dev)
+ static int i915_dma_cleanup(struct drm_device * dev)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
++	int i;
++
++
+ 	/* Make sure interrupts are disabled here because the uninstall ioctl
+ 	 * may not have been called from userspace and after dev_private
+ 	 * is freed, it's too late.
+@@ -148,12 +164,8 @@ static int i915_dma_cleanup(struct drm_device * dev)
+ 	if (dev->irq_enabled)
+ 		drm_irq_uninstall(dev);
+ 
+-	if (dev_priv->ring.virtual_start) {
+-		drm_core_ioremapfree(&dev_priv->ring.map, dev);
+-		dev_priv->ring.virtual_start = NULL;
+-		dev_priv->ring.map.virtual = NULL;
+-		dev_priv->ring.map.size = 0;
+-	}
++	for (i = 0; i < I915_NUM_RINGS; i++)
++		intel_cleanup_ring_buffer(&dev_priv->rings[i]);
+ 
+ 	/* Clear the HWS virtual address at teardown */
+ 	if (I915_NEED_GFX_HWS(dev))
+@@ -165,6 +177,7 @@ static int i915_dma_cleanup(struct drm_device * dev)
+ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
++	int ret;
+ 
+ 	dev_priv->sarea = drm_getsarea(dev);
+ 	if (!dev_priv->sarea) {
+@@ -177,34 +190,22 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
+ 	    ((u8 *) dev_priv->sarea->virtual + init->sarea_priv_offset);
+ 
+ 	if (init->ring_size != 0) {
+-		if (dev_priv->ring.ring_obj != NULL) {
++		if (LP_RING(dev_priv)->obj != NULL) {
+ 			i915_dma_cleanup(dev);
+ 			DRM_ERROR("Client tried to initialize ringbuffer in "
+ 				  "GEM mode\n");
+ 			return -EINVAL;
+ 		}
+ 
+-		dev_priv->ring.Size = init->ring_size;
+-		dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
+-
+-		dev_priv->ring.map.offset = init->ring_start;
+-		dev_priv->ring.map.size = init->ring_size;
+-		dev_priv->ring.map.type = 0;
+-		dev_priv->ring.map.flags = 0;
+-		dev_priv->ring.map.mtrr = 0;
+-
+-		drm_core_ioremap_wc(&dev_priv->ring.map, dev);
+-
+-		if (dev_priv->ring.map.virtual == NULL) {
++		ret = intel_render_ring_init_dri(dev,
++						 init->ring_start,
++						 init->ring_size);
++		if (ret) {
+ 			i915_dma_cleanup(dev);
+-			DRM_ERROR("can not ioremap virtual address for"
+-				  " ring buffer\n");
+-			return -ENOMEM;
++			return ret;
+ 		}
+ 	}
+ 
+-	dev_priv->ring.virtual_start = dev_priv->ring.map.virtual;
+-
+ 	dev_priv->cpp = init->cpp;
+ 	dev_priv->back_offset = init->back_offset;
+ 	dev_priv->front_offset = init->front_offset;
+@@ -221,31 +222,27 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
+ static int i915_dma_resume(struct drm_device * dev)
+ {
+ 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++	struct intel_ring_buffer *ring = LP_RING(dev_priv);
+ 
+ 	DRM_DEBUG("\n");
+ 
+-	if (!dev_priv->sarea) {
+-		DRM_ERROR("can not find sarea!\n");
+-		return -EINVAL;
+-	}
+-
+-	if (dev_priv->ring.map.virtual == NULL) {
++	if (ring->map.handle == NULL) {
+ 		DRM_ERROR("can not ioremap virtual address for"
+ 			  " ring buffer\n");
+ 		return -ENOMEM;
+ 	}
+ 
+ 	/* Program Hardware Status Page */
+-	if (!dev_priv->hw_status_page) {
++	if (!ring->status_page.page_addr) {
+ 		DRM_ERROR("Can not find hardware status page\n");
+ 		return -EINVAL;
+ 	}
+-	DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
+-
+-	if (dev_priv->status_gfx_addr != 0)
+-		I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
++	DRM_DEBUG("hw status page @ %p\n", ring->status_page.page_addr);
++	if (ring->status_page.gfx_addr != 0)
++		intel_ring_setup_status_page(ring);
+ 	else
+-		I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
++		i915_write_hws_pga(dev);
++
+ 	DRM_DEBUG("Enabled hardware status page\n");
+ 
+ 	return 0;
+@@ -356,9 +353,8 @@ static int i915_emit_cmds(struct drm_device *dev, int __user *buffer,
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	int i;
+-	RING_LOCALS;
+ 
+-	if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
++	if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
+ 		return -EINVAL;
+ 
+ 	BEGIN_LP_RING((dwords+1)&~1);
+@@ -392,40 +388,54 @@ static int i915_emit_cmds(struct drm_device *dev, int __user *buffer,
+ }
+ 
+ int i915_emit_box(struct drm_device * dev,
+-		  struct drm_clip_rect __user * boxes,
++		  struct drm_clip_rect *boxes,
+ 		  int i, int DR1, int DR4)
+ {
+-	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	struct drm_clip_rect box;
+-	RING_LOCALS;
+ 
+ 	if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
+ 		return -EFAULT;
+ 	}
+ 
+-	if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
++	return (i915_emit_box_p(dev, &box, DR1, DR4));
++}
++
++int
++i915_emit_box_p(struct drm_device *dev, struct drm_clip_rect *box,
++    int DR1, int DR4)
++{
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	int ret;
++
++	if (box->y2 <= box->y1 || box->x2 <= box->x1 || box->y2 <= 0 ||
++	    box->x2 <= 0) {
+ 		DRM_ERROR("Bad box %d,%d..%d,%d\n",
+-			  box.x1, box.y1, box.x2, box.y2);
++			  box->x1, box->y1, box->x2, box->y2);
+ 		return -EINVAL;
+ 	}
+ 
+-	if (IS_I965G(dev)) {
+-		BEGIN_LP_RING(4);
++	if (INTEL_INFO(dev)->gen >= 4) {
++		ret = BEGIN_LP_RING(4);
++		if (ret != 0)
++			return (ret);
++
+ 		OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
+-		OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
+-		OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
++		OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
++		OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
+ 		OUT_RING(DR4);
+-		ADVANCE_LP_RING();
+ 	} else {
+-		BEGIN_LP_RING(6);
++		ret = BEGIN_LP_RING(6);
++		if (ret != 0)
++			return (ret);
++
+ 		OUT_RING(GFX_OP_DRAWRECT_INFO);
+ 		OUT_RING(DR1);
+-		OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
+-		OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
++		OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
++		OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
+ 		OUT_RING(DR4);
+ 		OUT_RING(0);
+-		ADVANCE_LP_RING();
+ 	}
++	ADVANCE_LP_RING();
+ 
+ 	return 0;
+ }
+@@ -437,23 +447,23 @@ int i915_emit_box(struct drm_device * dev,
+ static void i915_emit_breadcrumb(struct drm_device *dev)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+-	RING_LOCALS;
+ 
+ 	if (++dev_priv->counter > 0x7FFFFFFFUL)
+ 		dev_priv->counter = 0;
+ 	if (dev_priv->sarea_priv)
+ 		dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
+ 
+-	BEGIN_LP_RING(4);
+-	OUT_RING(MI_STORE_DWORD_INDEX);
+-	OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+-	OUT_RING(dev_priv->counter);
+-	OUT_RING(0);
+-	ADVANCE_LP_RING();
++	if (BEGIN_LP_RING(4) == 0) {
++		OUT_RING(MI_STORE_DWORD_INDEX);
++		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
++		OUT_RING(dev_priv->counter);
++		OUT_RING(0);
++		ADVANCE_LP_RING();
++	}
+ }
+ 
+ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
+-				   drm_i915_cmdbuffer_t * cmd)
++    drm_i915_cmdbuffer_t * cmd, struct drm_clip_rect *cliprects, void *cmdbuf)
+ {
+ 	int nbox = cmd->num_cliprects;
+ 	int i = 0, count, ret;
+@@ -469,13 +479,13 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
+ 
+ 	for (i = 0; i < count; i++) {
+ 		if (i < nbox) {
+-			ret = i915_emit_box(dev, cmd->cliprects, i,
+-					    cmd->DR1, cmd->DR4);
++			ret = i915_emit_box_p(dev, &cmd->cliprects[i],
++			    cmd->DR1, cmd->DR4);
+ 			if (ret)
+ 				return ret;
+ 		}
+ 
+-		ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4);
++		ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
+ 		if (ret)
+ 			return ret;
+ 	}
+@@ -484,14 +494,13 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
+ 	return 0;
+ }
+ 
+-static int i915_dispatch_batchbuffer(struct drm_device * dev,
+-				     drm_i915_batchbuffer_t * batch)
++static int
++i915_dispatch_batchbuffer(struct drm_device * dev,
++    drm_i915_batchbuffer_t * batch, struct drm_clip_rect *cliprects)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+-	struct drm_clip_rect __user *boxes = batch->cliprects;
+ 	int nbox = batch->num_cliprects;
+-	int i = 0, count;
+-	RING_LOCALS;
++	int i, count, ret;
+ 
+ 	if ((batch->start | batch->used) & 0x7) {
+ 		DRM_ERROR("alignment\n");
+@@ -504,30 +513,36 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
+ 
+ 	for (i = 0; i < count; i++) {
+ 		if (i < nbox) {
+-			int ret = i915_emit_box(dev, boxes, i,
+-						batch->DR1, batch->DR4);
++			int ret = i915_emit_box_p(dev, &cliprects[i],
++			    batch->DR1, batch->DR4);
+ 			if (ret)
+ 				return ret;
+ 		}
+ 
+ 		if (!IS_I830(dev) && !IS_845G(dev)) {
+-			BEGIN_LP_RING(2);
+-			if (IS_I965G(dev)) {
+-				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
++			ret = BEGIN_LP_RING(2);
++			if (ret != 0)
++				return (ret);
++
++			if (INTEL_INFO(dev)->gen >= 4) {
++				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) |
++				    MI_BATCH_NON_SECURE_I965);
+ 				OUT_RING(batch->start);
+ 			} else {
+ 				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
+ 				OUT_RING(batch->start | MI_BATCH_NON_SECURE);
+ 			}
+-			ADVANCE_LP_RING();
+ 		} else {
+-			BEGIN_LP_RING(4);
++			ret = BEGIN_LP_RING(4);
++			if (ret != 0)
++				return (ret);
++
+ 			OUT_RING(MI_BATCH_BUFFER);
+ 			OUT_RING(batch->start | MI_BATCH_NON_SECURE);
+ 			OUT_RING(batch->start + batch->used - 4);
+ 			OUT_RING(0);
+-			ADVANCE_LP_RING();
+ 		}
++		ADVANCE_LP_RING();
+ 	}
+ 
+ 	i915_emit_breadcrumb(dev);
+@@ -538,7 +553,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
+ static int i915_dispatch_flip(struct drm_device * dev)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+-	RING_LOCALS;
++	int ret;
+ 
+ 	if (!dev_priv->sarea_priv)
+ 		return -EINVAL;
+@@ -550,12 +565,12 @@ static int i915_dispatch_flip(struct drm_device * dev)
+ 
+ 	i915_kernel_lost_context(dev);
+ 
+-	BEGIN_LP_RING(2);
++	ret = BEGIN_LP_RING(10);
++	if (ret)
++		return ret;
+ 	OUT_RING(MI_FLUSH | MI_READ_FLUSH);
+ 	OUT_RING(0);
+-	ADVANCE_LP_RING();
+ 
+-	BEGIN_LP_RING(6);
+ 	OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
+ 	OUT_RING(0);
+ 	if (dev_priv->current_page == 0) {
+@@ -566,11 +581,10 @@ static int i915_dispatch_flip(struct drm_device * dev)
+ 		dev_priv->current_page = 0;
+ 	}
+ 	OUT_RING(0);
+-	ADVANCE_LP_RING();
+ 
+-	BEGIN_LP_RING(2);
+ 	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
+ 	OUT_RING(0);
++
+ 	ADVANCE_LP_RING();
+ 
+ 	if (++dev_priv->counter > 0x7FFFFFFFUL)
+@@ -578,44 +592,48 @@ static int i915_dispatch_flip(struct drm_device * dev)
+ 	if (dev_priv->sarea_priv)
+ 		dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
+ 
+-	BEGIN_LP_RING(4);
+-	OUT_RING(MI_STORE_DWORD_INDEX);
+-	OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+-	OUT_RING(dev_priv->counter);
+-	OUT_RING(0);
+-	ADVANCE_LP_RING();
++	if (BEGIN_LP_RING(4) == 0) {
++		OUT_RING(MI_STORE_DWORD_INDEX);
++		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
++		OUT_RING(dev_priv->counter);
++		OUT_RING(0);
++		ADVANCE_LP_RING();
++	}
+ 
+ 	dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
+ 	return 0;
+ }
+ 
+-static int i915_quiescent(struct drm_device * dev)
++static int
++i915_quiescent(struct drm_device *dev)
+ {
+-	drm_i915_private_t *dev_priv = dev->dev_private;
++	struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
+ 
+ 	i915_kernel_lost_context(dev);
+-	return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
++	return (intel_wait_ring_idle(ring));
+ }
+ 
+-static int i915_flush_ioctl(struct drm_device *dev, void *data,
+-			    struct drm_file *file_priv)
++static int
++i915_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+ {
+ 	int ret;
+ 
+ 	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+ 
++	DRM_LOCK();
+ 	ret = i915_quiescent(dev);
++	DRM_UNLOCK();
+ 
+-	return ret;
++	return (ret);
+ }
+ 
+ static int i915_batchbuffer(struct drm_device *dev, void *data,
+ 			    struct drm_file *file_priv)
+ {
+ 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+-	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
+-	    dev_priv->sarea_priv;
++	drm_i915_sarea_t *sarea_priv;
+ 	drm_i915_batchbuffer_t *batch = data;
++	struct drm_clip_rect *cliprects;
+ 	size_t cliplen;
+ 	int ret;
+ 
+@@ -623,38 +641,38 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
+ 		DRM_ERROR("Batchbuffer ioctl disabled\n");
+ 		return -EINVAL;
+ 	}
++	DRM_UNLOCK();
+ 
+ 	DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
+ 		  batch->start, batch->used, batch->num_cliprects);
+ 
+-	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+-
+-	DRM_UNLOCK();
+ 	cliplen = batch->num_cliprects * sizeof(struct drm_clip_rect);
+-	if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
+-	    cliplen)) {
+-		DRM_LOCK();
++	if (batch->num_cliprects < 0)
+ 		return -EFAULT;
+-	}
+-	if (batch->num_cliprects) {
+-		ret = vslock(batch->cliprects, cliplen);
+-		if (ret) {
+-			DRM_ERROR("Fault wiring cliprects\n");
++	if (batch->num_cliprects != 0) {
++		cliprects = malloc(batch->num_cliprects *
++		    sizeof(struct drm_clip_rect), DRM_MEM_DMA,
++		    M_WAITOK | M_ZERO);
++
++		ret = -copyin(batch->cliprects, cliprects,
++		    batch->num_cliprects * sizeof(struct drm_clip_rect));
++		if (ret != 0) {
+ 			DRM_LOCK();
+-			return -EFAULT;
++			goto fail_free;
+ 		}
+-	}
+-
+-	ret = i915_dispatch_batchbuffer(dev, batch);
+-
+-	if (batch->num_cliprects)
+-		vsunlock(batch->cliprects, cliplen);
++	} else
++		cliprects = NULL;
+ 
+ 	DRM_LOCK();
++	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
++	ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
+ 
++	sarea_priv = (drm_i915_sarea_t *)dev_priv->sarea_priv;
+ 	if (sarea_priv)
+ 		sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+ 
++fail_free:
++	free(cliprects, DRM_MEM_DMA);
+ 	return ret;
+ }
+ 
+@@ -662,56 +680,57 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
+ 			  struct drm_file *file_priv)
+ {
+ 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+-	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
+-	    dev_priv->sarea_priv;
++	drm_i915_sarea_t *sarea_priv;
+ 	drm_i915_cmdbuffer_t *cmdbuf = data;
+-	size_t cliplen;
++	struct drm_clip_rect *cliprects = NULL;
++	void *batch_data;
+ 	int ret;
+ 
+ 	DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
+ 		  cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
+ 
+-	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
++	if (cmdbuf->num_cliprects < 0)
++		return -EINVAL;
+ 
+ 	DRM_UNLOCK();
+-	cliplen = cmdbuf->num_cliprects * sizeof(struct drm_clip_rect);
+-	if (cmdbuf->num_cliprects && DRM_VERIFYAREA_READ(cmdbuf->cliprects,
+-	    cliplen)) {
+-		DRM_ERROR("Fault accessing cliprects\n");
++
++	batch_data = malloc(cmdbuf->sz, DRM_MEM_DMA, M_WAITOK);
++
++	ret = -copyin(cmdbuf->buf, batch_data, cmdbuf->sz);
++	if (ret != 0) {
+ 		DRM_LOCK();
+-		return -EFAULT;
++		goto fail_batch_free;
+ 	}
++
+ 	if (cmdbuf->num_cliprects) {
+-		ret = vslock(cmdbuf->cliprects, cliplen);
+-		if (ret) {
+-			DRM_ERROR("Fault wiring cliprects\n");
+-			DRM_LOCK();
+-			return -EFAULT;
+-		}
+-		ret = vslock(cmdbuf->buf, cmdbuf->sz);
+-		if (ret) {
+-			vsunlock(cmdbuf->cliprects, cliplen);
+-			DRM_ERROR("Fault wiring cmds\n");
++		cliprects = malloc(cmdbuf->num_cliprects *
++		    sizeof(struct drm_clip_rect), DRM_MEM_DMA,
++		    M_WAITOK | M_ZERO);
++		ret = -copyin(cmdbuf->cliprects, cliprects,
++		    cmdbuf->num_cliprects * sizeof(struct drm_clip_rect));
++		if (ret != 0) {
+ 			DRM_LOCK();
+-			return -EFAULT;
++			goto fail_clip_free;
+ 		}
+ 	}
+ 
+-	ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
+-
+-	if (cmdbuf->num_cliprects) {
+-		vsunlock(cmdbuf->buf, cmdbuf->sz);
+-		vsunlock(cmdbuf->cliprects, cliplen);
+-	}
+ 	DRM_LOCK();
++	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
++	ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
+ 	if (ret) {
+ 		DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
+-		return ret;
++		goto fail_clip_free;
+ 	}
+ 
++	sarea_priv = (drm_i915_sarea_t *)dev_priv->sarea_priv;
+ 	if (sarea_priv)
+ 		sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+-	return 0;
++
++fail_clip_free:
++	free(cliprects, DRM_MEM_DMA);
++fail_batch_free:
++	free(batch_data, DRM_MEM_DMA);
++	return ret;
+ }
+ 
+ static int i915_flip_bufs(struct drm_device *dev, void *data,
+@@ -754,11 +773,44 @@ static int i915_getparam(struct drm_device *dev, void *data,
+ 		value = dev->pci_device;
+ 		break;
+ 	case I915_PARAM_HAS_GEM:
+-		/* We need to reset this to 1 once we have GEM */
+-		value = 0;
++		value = 1;
 +		break;
-+	case DRM_FORMAT_XRGB8888:
-+		sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
-+		pixel_size = 4;
++	case I915_PARAM_NUM_FENCES_AVAIL:
++		value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
 +		break;
-+	case DRM_FORMAT_YUYV:
-+		sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
-+		pixel_size = 2;
++	case I915_PARAM_HAS_OVERLAY:
++		value = dev_priv->overlay ? 1 : 0;
 +		break;
-+	case DRM_FORMAT_YVYU:
-+		sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU;
-+		pixel_size = 2;
++	case I915_PARAM_HAS_PAGEFLIPPING:
++		value = 1;
 +		break;
-+	case DRM_FORMAT_UYVY:
-+		sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY;
-+		pixel_size = 2;
++	case I915_PARAM_HAS_EXECBUF2:
++		value = 1;
 +		break;
-+	case DRM_FORMAT_VYUY:
-+		sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
-+		pixel_size = 2;
++	case I915_PARAM_HAS_BSD:
++		value = HAS_BSD(dev);
 +		break;
-+	default:
-+		DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
-+		sprctl |= DVS_FORMAT_RGBX888;
-+		pixel_size = 4;
++	case I915_PARAM_HAS_BLT:
++		value = HAS_BLT(dev);
 +		break;
++	case I915_PARAM_HAS_RELAXED_FENCING:
++		value = 1;
++		break;
++	case I915_PARAM_HAS_COHERENT_RINGS:
++		value = 1;
++		break;
++	case I915_PARAM_HAS_EXEC_CONSTANTS:
++		value = INTEL_INFO(dev)->gen >= 4;
++		break;
++	case I915_PARAM_HAS_RELAXED_DELTA:
++		value = 1;
++		break;
++	case I915_PARAM_HAS_GEN7_SOL_RESET:
++		value = 1;
+ 		break;
+ 	default:
+-		DRM_DEBUG("Unknown parameter %d\n", param->param);
++		DRM_DEBUG_DRIVER("Unknown parameter %d\n",
++				 param->param);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -790,6 +842,13 @@ static int i915_setparam(struct drm_device *dev, void *data,
+ 	case I915_SETPARAM_ALLOW_BATCHBUFFER:
+ 		dev_priv->allow_batchbuffer = param->value;
+ 		break;
++	case I915_SETPARAM_NUM_USED_FENCES:
++		if (param->value > dev_priv->num_fence_regs ||
++		    param->value < 0)
++			return -EINVAL;
++		/* Userspace can use first N regs */
++		dev_priv->fence_reg_start = param->value;
++		break;
+ 	default:
+ 		DRM_DEBUG("unknown parameter %d\n", param->param);
+ 		return -EINVAL;
+@@ -803,6 +862,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	drm_i915_hws_addr_t *hws = data;
++	struct intel_ring_buffer *ring = LP_RING(dev_priv);
+ 
+ 	if (!I915_NEED_GFX_HWS(dev))
+ 		return -EINVAL;
+@@ -813,8 +873,13 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
+ 	}
+ 
+ 	DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr);
++	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
++		DRM_ERROR("tried to set status page when mode setting active\n");
++		return 0;
 +	}
+ 
+-	dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
++	ring->status_page.gfx_addr = dev_priv->status_gfx_addr =
++	    hws->addr & (0x1ffff<<12);
+ 
+ 	dev_priv->hws_map.offset = dev->agp->base + hws->addr;
+ 	dev_priv->hws_map.size = 4*1024;
+@@ -825,12 +890,13 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
+ 	drm_core_ioremap_wc(&dev_priv->hws_map, dev);
+ 	if (dev_priv->hws_map.virtual == NULL) {
+ 		i915_dma_cleanup(dev);
+-		dev_priv->status_gfx_addr = 0;
++		ring->status_page.gfx_addr = dev_priv->status_gfx_addr = 0;
+ 		DRM_ERROR("can not ioremap virtual address for"
+ 				" G33 hw status page\n");
+ 		return -ENOMEM;
+ 	}
+-	dev_priv->hw_status_page = dev_priv->hws_map.virtual;
++	ring->status_page.page_addr = dev_priv->hw_status_page =
++	    dev_priv->hws_map.virtual;
+ 
+ 	memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+ 	I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+@@ -840,11 +906,265 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
+ 	return 0;
+ }
+ 
+-int i915_driver_load(struct drm_device *dev, unsigned long flags)
++static int
++i915_load_gem_init(struct drm_device *dev)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	unsigned long prealloc_size, gtt_size, mappable_size;
++	int ret;
 +
-+	if (obj->tiling_mode != I915_TILING_NONE)
-+		sprctl |= SPRITE_TILED;
++	prealloc_size = dev_priv->mm.gtt.stolen_size;
++	gtt_size = dev_priv->mm.gtt.gtt_total_entries << PAGE_SHIFT;
++	mappable_size = dev_priv->mm.gtt.gtt_mappable_entries << PAGE_SHIFT;
 +
-+	/* must disable */
-+	sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
-+	sprctl |= SPRITE_ENABLE;
-+	sprctl |= SPRITE_DEST_KEY;
++	/* Basic memrange allocator for stolen space */
++	drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
 +
-+	/* Sizes are 0 based */
-+	src_w--;
-+	src_h--;
-+	crtc_w--;
-+	crtc_h--;
++	/* Let GEM Manage all of the aperture.
++	 *
++	 * However, leave one page at the end still bound to the scratch page.
++	 * There are a number of places where the hardware apparently
++	 * prefetches past the end of the object, and we've seen multiple
++	 * hangs with the GPU head pointer stuck in a batchbuffer bound
++	 * at the last page of the aperture.  One page should be enough to
++	 * keep any prefetching inside of the aperture.
++	 */
++	i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
 +
-+	intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
++	DRM_LOCK();
++	ret = i915_gem_init_ringbuffer(dev);
++	DRM_UNLOCK();
++	if (ret != 0)
++		return (ret);
 +
-+	/*
-+	 * IVB workaround: must disable low power watermarks for at least
-+	 * one frame before enabling scaling.  LP watermarks can be re-enabled
-+	 * when scaling is disabled.
-+	 */
-+	if (crtc_w != src_w || crtc_h != src_h) {
-+		dev_priv->sprite_scaling_enabled = true;
-+		sandybridge_update_wm(dev);
-+		intel_wait_for_vblank(dev, pipe);
-+		sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
-+	} else {
-+		dev_priv->sprite_scaling_enabled = false;
-+		/* potentially re-enable LP watermarks */
-+		sandybridge_update_wm(dev);
-+	}
++#if 0
++	/* Try to set up FBC with a reasonable compressed buffer size */
++	if (I915_HAS_FBC(dev) && i915_powersave) {
++		int cfb_size;
 +
-+	I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
-+	I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
-+	if (obj->tiling_mode != I915_TILING_NONE) {
-+		I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
-+	} else {
-+		unsigned long offset;
++		/* Leave 1M for line length buffer & misc. */
 +
-+		offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
-+		I915_WRITE(SPRLINOFF(pipe), offset);
++		/* Try to get a 32M buffer... */
++		if (prealloc_size > (36*1024*1024))
++			cfb_size = 32*1024*1024;
++		else /* fall back to 7/8 of the stolen space */
++			cfb_size = prealloc_size * 7 / 8;
++		i915_setup_compression(dev, cfb_size);
 +	}
-+	I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
-+	I915_WRITE(SPRSCALE(pipe), sprscale);
-+	I915_WRITE(SPRCTL(pipe), sprctl);
-+	I915_WRITE(SPRSURF(pipe), obj->gtt_offset);
-+	POSTING_READ(SPRSURF(pipe));
++#endif
++
++	/* Allow hardware batchbuffers unless told otherwise. */
++	dev_priv->allow_batchbuffer = 1;
++	return 0;
 +}
 +
-+static void
-+ivb_disable_plane(struct drm_plane *plane)
++static int
++i915_load_modeset_init(struct drm_device *dev)
 +{
-+	struct drm_device *dev = plane->dev;
 +	struct drm_i915_private *dev_priv = dev->dev_private;
-+	struct intel_plane *intel_plane = to_intel_plane(plane);
-+	int pipe = intel_plane->pipe;
++	int ret;
 +
-+	I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
-+	/* Can't leave the scaler enabled... */
-+	I915_WRITE(SPRSCALE(pipe), 0);
-+	/* Activate double buffered register update */
-+	I915_WRITE(SPRSURF(pipe), 0);
-+	POSTING_READ(SPRSURF(pipe));
++	ret = intel_parse_bios(dev);
++	if (ret)
++		DRM_INFO("failed to find VBIOS tables\n");
++
++#if 0
++	intel_register_dsm_handler();
++#endif
++
++	/* IIR "flip pending" bit means done if this bit is set */
++	if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE))
++		dev_priv->flip_pending_is_done = true;
++
++	intel_modeset_init(dev);
++
++	ret = i915_load_gem_init(dev);
++	if (ret != 0)
++		goto cleanup_gem;
++
++	intel_modeset_gem_init(dev);
++
++	ret = drm_irq_install(dev);
++	if (ret)
++		goto cleanup_gem;
++
++	dev->vblank_disable_allowed = 1;
++
++	ret = intel_fbdev_init(dev);
++	if (ret)
++		goto cleanup_gem;
++
++	drm_kms_helper_poll_init(dev);
++
++	/* We're off and running w/KMS */
++	dev_priv->mm.suspended = 0;
++
++	return (0);
++
++cleanup_gem:
++	DRM_LOCK();
++	i915_gem_cleanup_ringbuffer(dev);
++	DRM_UNLOCK();
++	return (ret);
 +}
 +
 +static int
-+ivb_update_colorkey(struct drm_plane *plane,
-+		    struct drm_intel_sprite_colorkey *key)
++i915_get_bridge_dev(struct drm_device *dev)
 +{
-+	struct drm_device *dev = plane->dev;
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	struct intel_plane *intel_plane;
-+	u32 sprctl;
-+	int ret = 0;
++	struct drm_i915_private *dev_priv;
 +
-+	intel_plane = to_intel_plane(plane);
++	dev_priv = dev->dev_private;
 +
-+	I915_WRITE(SPRKEYVAL(intel_plane->pipe), key->min_value);
-+	I915_WRITE(SPRKEYMAX(intel_plane->pipe), key->max_value);
-+	I915_WRITE(SPRKEYMSK(intel_plane->pipe), key->channel_mask);
++	dev_priv->bridge_dev = intel_gtt_get_bridge_device();
++	if (dev_priv->bridge_dev == NULL) {
++		DRM_ERROR("bridge device not found\n");
++		return (-1);
++	}
++	return (0);
++}
 +
-+	sprctl = I915_READ(SPRCTL(intel_plane->pipe));
-+	sprctl &= ~(SPRITE_SOURCE_KEY | SPRITE_DEST_KEY);
-+	if (key->flags & I915_SET_COLORKEY_DESTINATION)
-+		sprctl |= SPRITE_DEST_KEY;
-+	else if (key->flags & I915_SET_COLORKEY_SOURCE)
-+		sprctl |= SPRITE_SOURCE_KEY;
-+	I915_WRITE(SPRCTL(intel_plane->pipe), sprctl);
++#define MCHBAR_I915 0x44
++#define MCHBAR_I965 0x48
++#define MCHBAR_SIZE (4*4096)
 +
-+	POSTING_READ(SPRKEYMSK(intel_plane->pipe));
++#define DEVEN_REG 0x54
++#define   DEVEN_MCHBAR_EN (1 << 28)
 +
-+	return ret;
++/* Allocate space for the MCH regs if needed, return nonzero on error */
++static int
++intel_alloc_mchbar_resource(struct drm_device *dev)
++{
++	drm_i915_private_t *dev_priv;
++	device_t vga;
++	int reg;
++	u32 temp_lo, temp_hi;
++	u64 mchbar_addr, temp;
++
++	dev_priv = dev->dev_private;
++	reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
++
++	if (INTEL_INFO(dev)->gen >= 4)
++		temp_hi = pci_read_config(dev_priv->bridge_dev, reg + 4, 4);
++	else
++		temp_hi = 0;
++	temp_lo = pci_read_config(dev_priv->bridge_dev, reg, 4);
++	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
++
++	/* If ACPI doesn't have it, assume we need to allocate it ourselves */
++#ifdef XXX_CONFIG_PNP
++	if (mchbar_addr &&
++	    pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
++		return 0;
++#endif
++
++	/* Get some space for it */
++	vga = device_get_parent(dev->device);
++	dev_priv->mch_res_rid = 0x100;
++	dev_priv->mch_res = BUS_ALLOC_RESOURCE(device_get_parent(vga),
++	    dev->device, SYS_RES_MEMORY, &dev_priv->mch_res_rid, 0, ~0UL,
++	    MCHBAR_SIZE, RF_ACTIVE | RF_SHAREABLE);
++	if (dev_priv->mch_res == NULL) {
++		DRM_ERROR("failed mchbar resource alloc\n");
++		return (-ENOMEM);
++	}
++
++	if (INTEL_INFO(dev)->gen >= 4) {
++		temp = rman_get_start(dev_priv->mch_res);
++		temp >>= 32;
++		pci_write_config(dev_priv->bridge_dev, reg + 4, temp, 4);
++	}
++	pci_write_config(dev_priv->bridge_dev, reg,
++	    rman_get_start(dev_priv->mch_res) & UINT32_MAX, 4);
++	return (0);
 +}
 +
 +static void
-+ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
++intel_setup_mchbar(struct drm_device *dev)
 +{
-+	struct drm_device *dev = plane->dev;
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	struct intel_plane *intel_plane;
-+	u32 sprctl;
++	drm_i915_private_t *dev_priv;
++	int mchbar_reg;
++	u32 temp;
++	bool enabled;
 +
-+	intel_plane = to_intel_plane(plane);
++	dev_priv = dev->dev_private;
++	mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
 +
-+	key->min_value = I915_READ(SPRKEYVAL(intel_plane->pipe));
-+	key->max_value = I915_READ(SPRKEYMAX(intel_plane->pipe));
-+	key->channel_mask = I915_READ(SPRKEYMSK(intel_plane->pipe));
-+	key->flags = 0;
++	dev_priv->mchbar_need_disable = false;
 +
-+	sprctl = I915_READ(SPRCTL(intel_plane->pipe));
++	if (IS_I915G(dev) || IS_I915GM(dev)) {
++		temp = pci_read_config(dev_priv->bridge_dev, DEVEN_REG, 4);
++		enabled = (temp & DEVEN_MCHBAR_EN) != 0;
++	} else {
++		temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4);
++		enabled = temp & 1;
++	}
 +
-+	if (sprctl & SPRITE_DEST_KEY)
-+		key->flags = I915_SET_COLORKEY_DESTINATION;
-+	else if (sprctl & SPRITE_SOURCE_KEY)
-+		key->flags = I915_SET_COLORKEY_SOURCE;
-+	else
-+		key->flags = I915_SET_COLORKEY_NONE;
++	/* If it's already enabled, don't have to do anything */
++	if (enabled) {
++		DRM_DEBUG("mchbar already enabled\n");
++		return;
++	}
++
++	if (intel_alloc_mchbar_resource(dev))
++		return;
++
++	dev_priv->mchbar_need_disable = true;
++
++	/* Space is allocated or reserved, so enable it. */
++	if (IS_I915G(dev) || IS_I915GM(dev)) {
++		pci_write_config(dev_priv->bridge_dev, DEVEN_REG,
++		    temp | DEVEN_MCHBAR_EN, 4);
++	} else {
++		temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4);
++		pci_write_config(dev_priv->bridge_dev, mchbar_reg, temp | 1, 4);
++	}
 +}
 +
 +static void
-+snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
-+		 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
-+		 unsigned int crtc_w, unsigned int crtc_h,
-+		 uint32_t x, uint32_t y,
-+		 uint32_t src_w, uint32_t src_h)
++intel_teardown_mchbar(struct drm_device *dev)
 +{
-+	struct drm_device *dev = plane->dev;
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	struct intel_plane *intel_plane = to_intel_plane(plane);
-+	int pipe = intel_plane->pipe, pixel_size;
-+	u32 dvscntr, dvsscale = 0;
++	drm_i915_private_t *dev_priv;
++	device_t vga;
++	int mchbar_reg;
++	u32 temp;
 +
-+	dvscntr = I915_READ(DVSCNTR(pipe));
++	dev_priv = dev->dev_private;
++	mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
 +
-+	/* Mask out pixel format bits in case we change it */
-+	dvscntr &= ~DVS_PIXFORMAT_MASK;
-+	dvscntr &= ~DVS_RGB_ORDER_RGBX;
-+	dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK;
++	if (dev_priv->mchbar_need_disable) {
++		if (IS_I915G(dev) || IS_I915GM(dev)) {
++			temp = pci_read_config(dev_priv->bridge_dev,
++			    DEVEN_REG, 4);
++			temp &= ~DEVEN_MCHBAR_EN;
++			pci_write_config(dev_priv->bridge_dev, DEVEN_REG,
++			    temp, 4);
++		} else {
++			temp = pci_read_config(dev_priv->bridge_dev,
++			    mchbar_reg, 4);
++			temp &= ~1;
++			pci_write_config(dev_priv->bridge_dev, mchbar_reg,
++			    temp, 4);
++		}
++	}
 +
-+	switch (fb->pixel_format) {
-+	case DRM_FORMAT_XBGR8888:
-+		dvscntr |= DVS_FORMAT_RGBX888;
-+		pixel_size = 4;
++	if (dev_priv->mch_res != NULL) {
++		vga = device_get_parent(dev->device);
++		BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), dev->device,
++		    SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
++		BUS_RELEASE_RESOURCE(device_get_parent(vga), dev->device,
++		    SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
++		dev_priv->mch_res = NULL;
++	}
++}
++
++int
++i915_driver_load(struct drm_device *dev, unsigned long flags)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	unsigned long base, size;
+-	int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
++	int mmio_bar, ret;
++
++	ret = 0;
+ 
+ 	/* i915 has 4 more counters */
+ 	dev->counters += 4;
+@@ -853,33 +1173,48 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ 	dev->types[8] = _DRM_STAT_SECONDARY;
+ 	dev->types[9] = _DRM_STAT_DMA;
+ 
+-	dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER);
++	dev_priv = malloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER,
++	    M_ZERO | M_WAITOK);
+ 	if (dev_priv == NULL)
+ 		return -ENOMEM;
+ 
+-	memset(dev_priv, 0, sizeof(drm_i915_private_t));
+-
+ 	dev->dev_private = (void *)dev_priv;
+ 	dev_priv->dev = dev;
++	dev_priv->info = i915_get_device_id(dev->pci_device);
++
++	if (i915_get_bridge_dev(dev)) {
++		free(dev_priv, DRM_MEM_DRIVER);
++		return (-EIO);
++	}
++	dev_priv->mm.gtt = intel_gtt_get();
+ 
+ 	/* Add register map (needed for suspend/resume) */
++	mmio_bar = IS_GEN2(dev) ? 1 : 0;
+ 	base = drm_get_resource_start(dev, mmio_bar);
+ 	size = drm_get_resource_len(dev, mmio_bar);
+ 
+ 	ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
+ 	    _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
+ 
+-	if (IS_G4X(dev)) {
+-		dev->driver->get_vblank_counter = g45_get_vblank_counter;
+-		dev->max_vblank_count = 0xffffffff; /* 32 bits of frame count */
+-	} else {
+-		dev->driver->get_vblank_counter = i915_get_vblank_counter;
+-		dev->max_vblank_count = 0x00ffffff; /* 24 bits of frame count */
+-	}
++	dev_priv->tq = taskqueue_create("915", M_WAITOK,
++	    taskqueue_thread_enqueue, &dev_priv->tq);
++	taskqueue_start_threads(&dev_priv->tq, 1, PWAIT, "i915 taskq");
++	mtx_init(&dev_priv->gt_lock, "915gt", NULL, MTX_DEF);
++	mtx_init(&dev_priv->error_lock, "915err", NULL, MTX_DEF);
++	mtx_init(&dev_priv->error_completion_lock, "915cmp", NULL, MTX_DEF);
++	mtx_init(&dev_priv->rps_lock, "915rps", NULL, MTX_DEF);
++
++	dev_priv->has_gem = 1;
++	intel_irq_init(dev);
++
++	intel_setup_mchbar(dev);
++	intel_setup_gmbus(dev);
++	intel_opregion_setup(dev);
++
++	intel_setup_bios(dev);
+ 
+-#ifdef I915_HAVE_GEM
+ 	i915_gem_load(dev);
+-#endif
++
+ 	/* Init HWS */
+ 	if (!I915_NEED_GFX_HWS(dev)) {
+ 		ret = i915_init_phys_hws(dev);
+@@ -890,82 +1225,148 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ 			return ret;
+ 		}
+ 	}
+-#ifdef __linux__
+-	/* On the 945G/GM, the chipset reports the MSI capability on the
+-	 * integrated graphics even though the support isn't actually there
+-	 * according to the published specs.  It doesn't appear to function
+-	 * correctly in testing on 945G.
+-	 * This may be a side effect of MSI having been made available for PEG
+-	 * and the registers being closely associated.
+-	 *
+-	 * According to chipset errata, on the 965GM, MSI interrupts may
+-	 * be lost or delayed
+-	 */
+-	if (!IS_I945G(dev) && !IS_I945GM(dev) && !IS_I965GM(dev))
+-		if (pci_enable_msi(dev->pdev))
+-			DRM_ERROR("failed to enable MSI\n");
+ 
+-	intel_opregion_init(dev);
+-#endif
+-	DRM_SPININIT(&dev_priv->user_irq_lock, "userirq");
+-	dev_priv->user_irq_refcount = 0;
++	if (IS_PINEVIEW(dev))
++		i915_pineview_get_mem_freq(dev);
++	else if (IS_GEN5(dev))
++		i915_ironlake_get_mem_freq(dev);
+ 
+-	ret = drm_vblank_init(dev, I915_NUM_PIPE);
++	mtx_init(&dev_priv->irq_lock, "userirq", NULL, MTX_DEF);
+ 
+-	if (ret) {
+-		(void) i915_driver_unload(dev);
+-		return ret;
++	if (IS_IVYBRIDGE(dev))
++		dev_priv->num_pipe = 3;
++	else if (IS_MOBILE(dev) || !IS_GEN2(dev))
++		dev_priv->num_pipe = 2;
++	else
++		dev_priv->num_pipe = 1;
++
++	ret = drm_vblank_init(dev, dev_priv->num_pipe);
++	if (ret)
++		goto out_gem_unload;
++
++	/* Start out suspended */
++	dev_priv->mm.suspended = 1;
++
++	intel_detect_pch(dev);
++
++	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
++		DRM_UNLOCK();
++		ret = i915_load_modeset_init(dev);
++		DRM_LOCK();
++		if (ret < 0) {
++			DRM_ERROR("failed to init modeset\n");
++			goto out_gem_unload;
++		}
+ 	}
+ 
+-	return ret;
++	intel_opregion_init(dev);
++
++	callout_init(&dev_priv->hangcheck_timer, 1);
++	callout_reset(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD,
++	    i915_hangcheck_elapsed, dev);
++
++	mtx_lock(&mchdev_lock);
++	i915_mch_dev = dev_priv;
++	dev_priv->mchdev_lock = &mchdev_lock;
++	mtx_unlock(&mchdev_lock);
++
++	return (0);
++
++out_gem_unload:
++	/* XXXKIB */
++	(void) i915_driver_unload_int(dev, true);
++	return (ret);
+ }
+ 
+-int i915_driver_unload(struct drm_device *dev)
++static int
++i915_driver_unload_int(struct drm_device *dev, bool locked)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 
+ 	i915_free_hws(dev);
+ 
++	intel_teardown_mchbar(dev);
++
+ 	drm_rmmap(dev, dev_priv->mmio_map);
+-#ifdef __linux__
+-	intel_opregion_free(dev);
++	intel_opregion_fini(dev);
++
++	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
++		if (!locked)
++			DRM_LOCK();
++		i915_gem_free_all_phys_object(dev);
++		i915_gem_cleanup_ringbuffer(dev);
++		if (!locked)
++			DRM_UNLOCK();
++#if 1
++		KIB_NOTYET();
++#else
++		if (I915_HAS_FBC(dev) && i915_powersave)
++			i915_cleanup_compression(dev);
+ #endif
+-	DRM_SPINUNINIT(&dev_priv->user_irq_lock);
++		drm_mm_takedown(&dev_priv->mm.stolen);
++
++		intel_cleanup_overlay(dev);
++
++		if (!I915_NEED_GFX_HWS(dev))
++			i915_free_hws(dev);
++	}
+ 
++	mtx_destroy(&dev_priv->irq_lock);
++
++	if (dev_priv->tq != NULL)
++		taskqueue_free(dev_priv->tq);
++
++	i915_gem_unload(dev);
++
++	/* XXXKIB check, this is racy */
++	callout_stop(&dev_priv->hangcheck_timer);
++	callout_drain(&dev_priv->hangcheck_timer);
++	mtx_destroy(&dev_priv->error_lock);
++	mtx_destroy(&dev_priv->error_completion_lock);
++	mtx_destroy(&dev_priv->rps_lock);
+ 	drm_free(dev->dev_private, sizeof(drm_i915_private_t),
+ 		 DRM_MEM_DRIVER);
+ 
+ 	return 0;
+ }
+ 
+-int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
++int
++i915_driver_unload(struct drm_device *dev)
+ {
+-	struct drm_i915_file_private *i915_file_priv;
+ 
+-	DRM_DEBUG("\n");
+-	i915_file_priv = (struct drm_i915_file_private *)
+-	    drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES);
++	return (i915_driver_unload_int(dev, true));
++}
+ 
+-	if (!i915_file_priv)
+-		return -ENOMEM;
++int
++i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
++{
++	struct drm_i915_file_private *i915_file_priv;
+ 
+-	file_priv->driver_priv = i915_file_priv;
++	i915_file_priv = malloc(sizeof(*i915_file_priv), DRM_MEM_FILES,
++	    M_WAITOK | M_ZERO);
+ 
+-	i915_file_priv->mm.last_gem_seqno = 0;
+-	i915_file_priv->mm.last_gem_throttle_seqno = 0;
++	mtx_init(&i915_file_priv->mm.lck, "915fp", NULL, MTX_DEF);
++	INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
++	file_priv->driver_priv = i915_file_priv;
+ 
+-	return 0;
++	return (0);
+ }
+ 
+-void i915_driver_lastclose(struct drm_device * dev)
++void
++i915_driver_lastclose(struct drm_device * dev)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 
+-	if (!dev_priv)
++	if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
++#if 1
++		KIB_NOTYET();
++#else
++		drm_fb_helper_restore();
++		vga_switcheroo_process_delayed_switch();
++#endif
+ 		return;
+-#ifdef I915_HAVE_GEM
++	}
+ 	i915_gem_lastclose(dev);
+-#endif
+ 	if (dev_priv->agp_heap)
+ 		i915_mem_takedown(&(dev_priv->agp_heap));
+ 
+@@ -975,13 +1376,17 @@ void i915_driver_lastclose(struct drm_device * dev)
+ void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+-	i915_mem_release(dev, file_priv, dev_priv->agp_heap);
++
++	i915_gem_release(dev, file_priv);
++	if (!drm_core_check_feature(dev, DRIVER_MODESET))
++		i915_mem_release(dev, file_priv, dev_priv->agp_heap);
+ }
+ 
+ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
+ {
+ 	struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+ 
++	mtx_destroy(&i915_file_priv->mm.lck);
+ 	drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES);
+ }
+ 
+@@ -1003,33 +1408,71 @@ struct drm_ioctl_desc i915_ioctls[] = {
+ 	DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH ),
+ 	DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
+ 	DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+-#ifdef I915_HAVE_GEM
+ 	DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+-	DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
+-	DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+-	DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+-	DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
++	DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH | DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH | DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ 	DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
+ 	DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ 	DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ 	DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
+-	DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
+-	DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
++	DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
+ 	DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
+-	DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
+-	DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
++	DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
+ 	DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
+ 	DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
+-#endif
++	DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ };
+ 
+-int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
++struct drm_driver_info i915_driver_info = {
++	.driver_features =   DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
++	    DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | DRIVER_LOCKLESS_IRQ |
++	    DRIVER_GEM /*| DRIVER_MODESET*/,
++
++	.buf_priv_size	= sizeof(drm_i915_private_t),
++	.load		= i915_driver_load,
++	.open		= i915_driver_open,
++	.unload		= i915_driver_unload,
++	.preclose	= i915_driver_preclose,
++	.lastclose	= i915_driver_lastclose,
++	.postclose	= i915_driver_postclose,
++	.device_is_agp	= i915_driver_device_is_agp,
++	.gem_init_object = i915_gem_init_object,
++	.gem_free_object = i915_gem_free_object,
++	.gem_pager_ops	= &i915_gem_pager_ops,
++	.dumb_create	= i915_gem_dumb_create,
++	.dumb_map_offset = i915_gem_mmap_gtt,
++	.dumb_destroy	= i915_gem_dumb_destroy,
++	.sysctl_init	= i915_sysctl_init,
++	.sysctl_cleanup	= i915_sysctl_cleanup,
++
++	.ioctls		= i915_ioctls,
++	.max_ioctl	= DRM_ARRAY_SIZE(i915_ioctls),
++
++	.name		= DRIVER_NAME,
++	.desc		= DRIVER_DESC,
++	.date		= DRIVER_DATE,
++	.major		= DRIVER_MAJOR,
++	.minor		= DRIVER_MINOR,
++	.patchlevel	= DRIVER_PATCHLEVEL,
++};
+ 
+ /**
+  * Determine if the device really is AGP or not.
+  *
+  * All Intel graphics chipsets are treated as AGP, even if they are really
+- * PCI-e.
++ * built-in.
+  *
+  * \param dev   The device to be tested.
+  *
+@@ -1040,3 +1483,532 @@ int i915_driver_device_is_agp(struct drm_device * dev)
+ {
+ 	return 1;
+ }
++
++static void i915_pineview_get_mem_freq(struct drm_device *dev)
++{
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	u32 tmp;
++
++	tmp = I915_READ(CLKCFG);
++
++	switch (tmp & CLKCFG_FSB_MASK) {
++	case CLKCFG_FSB_533:
++		dev_priv->fsb_freq = 533; /* 133*4 */
 +		break;
-+	case DRM_FORMAT_XRGB8888:
-+		dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_RGBX;
-+		pixel_size = 4;
++	case CLKCFG_FSB_800:
++		dev_priv->fsb_freq = 800; /* 200*4 */
 +		break;
-+	case DRM_FORMAT_YUYV:
-+		dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
-+		pixel_size = 2;
++	case CLKCFG_FSB_667:
++		dev_priv->fsb_freq =  667; /* 167*4 */
 +		break;
-+	case DRM_FORMAT_YVYU:
-+		dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU;
-+		pixel_size = 2;
++	case CLKCFG_FSB_400:
++		dev_priv->fsb_freq = 400; /* 100*4 */
 +		break;
-+	case DRM_FORMAT_UYVY:
-+		dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY;
-+		pixel_size = 2;
++	}
++
++	switch (tmp & CLKCFG_MEM_MASK) {
++	case CLKCFG_MEM_533:
++		dev_priv->mem_freq = 533;
 +		break;
-+	case DRM_FORMAT_VYUY:
-+		dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
-+		pixel_size = 2;
++	case CLKCFG_MEM_667:
++		dev_priv->mem_freq = 667;
 +		break;
-+	default:
-+		DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
-+		dvscntr |= DVS_FORMAT_RGBX888;
-+		pixel_size = 4;
++	case CLKCFG_MEM_800:
++		dev_priv->mem_freq = 800;
 +		break;
 +	}
 +
-+	if (obj->tiling_mode != I915_TILING_NONE)
-+		dvscntr |= DVS_TILED;
++	/* detect pineview DDR3 setting */
++	tmp = I915_READ(CSHRDDR3CTL);
++	dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
++}
 +
-+	/* must disable */
-+	dvscntr |= DVS_TRICKLE_FEED_DISABLE;
-+	dvscntr |= DVS_ENABLE;
++static void i915_ironlake_get_mem_freq(struct drm_device *dev)
++{
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	u16 ddrpll, csipll;
 +
-+	/* Sizes are 0 based */
-+	src_w--;
-+	src_h--;
-+	crtc_w--;
-+	crtc_h--;
++	ddrpll = I915_READ16(DDRMPLL1);
++	csipll = I915_READ16(CSIPLL0);
 +
-+	intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
++	switch (ddrpll & 0xff) {
++	case 0xc:
++		dev_priv->mem_freq = 800;
++		break;
++	case 0x10:
++		dev_priv->mem_freq = 1066;
++		break;
++	case 0x14:
++		dev_priv->mem_freq = 1333;
++		break;
++	case 0x18:
++		dev_priv->mem_freq = 1600;
++		break;
++	default:
++		DRM_DEBUG("unknown memory frequency 0x%02x\n",
++				 ddrpll & 0xff);
++		dev_priv->mem_freq = 0;
++		break;
++	}
 +
-+	if (crtc_w != src_w || crtc_h != src_h)
-+		dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
++	dev_priv->r_t = dev_priv->mem_freq;
 +
-+	I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
-+	I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
-+	if (obj->tiling_mode != I915_TILING_NONE) {
-+		I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
++	switch (csipll & 0x3ff) {
++	case 0x00c:
++		dev_priv->fsb_freq = 3200;
++		break;
++	case 0x00e:
++		dev_priv->fsb_freq = 3733;
++		break;
++	case 0x010:
++		dev_priv->fsb_freq = 4266;
++		break;
++	case 0x012:
++		dev_priv->fsb_freq = 4800;
++		break;
++	case 0x014:
++		dev_priv->fsb_freq = 5333;
++		break;
++	case 0x016:
++		dev_priv->fsb_freq = 5866;
++		break;
++	case 0x018:
++		dev_priv->fsb_freq = 6400;
++		break;
++	default:
++		DRM_DEBUG("unknown fsb frequency 0x%04x\n",
++				 csipll & 0x3ff);
++		dev_priv->fsb_freq = 0;
++		break;
++	}
++
++	if (dev_priv->fsb_freq == 3200) {
++		dev_priv->c_m = 0;
++	} else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
++		dev_priv->c_m = 1;
 +	} else {
-+		unsigned long offset;
-+
-+		offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
-+		I915_WRITE(DVSLINOFF(pipe), offset);
++		dev_priv->c_m = 2;
 +	}
-+	I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
-+	I915_WRITE(DVSSCALE(pipe), dvsscale);
-+	I915_WRITE(DVSCNTR(pipe), dvscntr);
-+	I915_WRITE(DVSSURF(pipe), obj->gtt_offset);
-+	POSTING_READ(DVSSURF(pipe));
 +}
 +
-+static void
-+snb_disable_plane(struct drm_plane *plane)
++static const struct cparams {
++	u16 i;
++	u16 t;
++	u16 m;
++	u16 c;
++} cparams[] = {
++	{ 1, 1333, 301, 28664 },
++	{ 1, 1066, 294, 24460 },
++	{ 1, 800, 294, 25192 },
++	{ 0, 1333, 276, 27605 },
++	{ 0, 1066, 276, 27605 },
++	{ 0, 800, 231, 23784 },
++};
++
++unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
 +{
-+	struct drm_device *dev = plane->dev;
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	struct intel_plane *intel_plane = to_intel_plane(plane);
-+	int pipe = intel_plane->pipe;
++	u64 total_count, diff, ret;
++	u32 count1, count2, count3, m = 0, c = 0;
++	unsigned long now = jiffies_to_msecs(jiffies), diff1;
++	int i;
 +
-+	I915_WRITE(DVSCNTR(pipe), I915_READ(DVSCNTR(pipe)) & ~DVS_ENABLE);
-+	/* Disable the scaler */
-+	I915_WRITE(DVSSCALE(pipe), 0);
-+	/* Flush double buffered register updates */
-+	I915_WRITE(DVSSURF(pipe), 0);
-+	POSTING_READ(DVSSURF(pipe));
-+}
++	diff1 = now - dev_priv->last_time1;
++	/*
++	 * sysctl(8) reads the value of sysctl twice in rapid
++	 * succession.  There is high chance that it happens in the
++	 * same timer tick.  Use the cached value to not divide by
++	 * zero and give the hw a chance to gather more samples.
++	 */
++	if (diff1 <= 10)
++		return (dev_priv->chipset_power);
 +
-+static void
-+intel_enable_primary(struct drm_crtc *crtc)
-+{
-+	struct drm_device *dev = crtc->dev;
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-+	int reg = DSPCNTR(intel_crtc->plane);
++	count1 = I915_READ(DMIEC);
++	count2 = I915_READ(DDREC);
++	count3 = I915_READ(CSIEC);
 +
-+	I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
-+}
++	total_count = count1 + count2 + count3;
 +
-+static void
-+intel_disable_primary(struct drm_crtc *crtc)
-+{
-+	struct drm_device *dev = crtc->dev;
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-+	int reg = DSPCNTR(intel_crtc->plane);
++	/* FIXME: handle per-counter overflow */
++	if (total_count < dev_priv->last_count1) {
++		diff = ~0UL - dev_priv->last_count1;
++		diff += total_count;
++	} else {
++		diff = total_count - dev_priv->last_count1;
++	}
 +
-+	I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
++	for (i = 0; i < DRM_ARRAY_SIZE(cparams); i++) {
++		if (cparams[i].i == dev_priv->c_m &&
++		    cparams[i].t == dev_priv->r_t) {
++			m = cparams[i].m;
++			c = cparams[i].c;
++			break;
++		}
++	}
++
++	diff = diff / diff1;
++	ret = ((m * diff) + c);
++	ret = ret / 10;
++
++	dev_priv->last_count1 = total_count;
++	dev_priv->last_time1 = now;
++
++	dev_priv->chipset_power = ret;
++	return (ret);
 +}
 +
-+static int
-+snb_update_colorkey(struct drm_plane *plane,
-+		    struct drm_intel_sprite_colorkey *key)
++unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
 +{
-+	struct drm_device *dev = plane->dev;
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	struct intel_plane *intel_plane;
-+	u32 dvscntr;
-+	int ret = 0;
++	unsigned long m, x, b;
++	u32 tsfs;
 +
-+	intel_plane = to_intel_plane(plane);
++	tsfs = I915_READ(TSFS);
 +
-+	I915_WRITE(DVSKEYVAL(intel_plane->pipe), key->min_value);
-+	I915_WRITE(DVSKEYMAX(intel_plane->pipe), key->max_value);
-+	I915_WRITE(DVSKEYMSK(intel_plane->pipe), key->channel_mask);
++	m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
++	x = I915_READ8(I915_TR1);
 +
-+	dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
-+	dvscntr &= ~(DVS_SOURCE_KEY | DVS_DEST_KEY);
-+	if (key->flags & I915_SET_COLORKEY_DESTINATION)
-+		dvscntr |= DVS_DEST_KEY;
-+	else if (key->flags & I915_SET_COLORKEY_SOURCE)
-+		dvscntr |= DVS_SOURCE_KEY;
-+	I915_WRITE(DVSCNTR(intel_plane->pipe), dvscntr);
++	b = tsfs & TSFS_INTR_MASK;
 +
-+	POSTING_READ(DVSKEYMSK(intel_plane->pipe));
++	return ((m * x) / 127) - b;
++}
 +
-+	return ret;
++static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
++{
++	static const struct v_table {
++		u16 vd; /* in .1 mil */
++		u16 vm; /* in .1 mil */
++	} v_table[] = {
++		{ 0, 0, },
++		{ 375, 0, },
++		{ 500, 0, },
++		{ 625, 0, },
++		{ 750, 0, },
++		{ 875, 0, },
++		{ 1000, 0, },
++		{ 1125, 0, },
++		{ 4125, 3000, },
++		{ 4125, 3000, },
++		{ 4125, 3000, },
++		{ 4125, 3000, },
++		{ 4125, 3000, },
++		{ 4125, 3000, },
++		{ 4125, 3000, },
++		{ 4125, 3000, },
++		{ 4125, 3000, },
++		{ 4125, 3000, },
++		{ 4125, 3000, },
++		{ 4125, 3000, },
++		{ 4125, 3000, },
++		{ 4125, 3000, },
++		{ 4125, 3000, },
++		{ 4125, 3000, },
++		{ 4125, 3000, },
++		{ 4125, 3000, },
++		{ 4125, 3000, },
++		{ 4125, 3000, },
++		{ 4125, 3000, },
++		{ 4125, 3000, },
++		{ 4125, 3000, },
++		{ 4125, 3000, },
++		{ 4250, 3125, },
++		{ 4375, 3250, },
++		{ 4500, 3375, },
++		{ 4625, 3500, },
++		{ 4750, 3625, },
++		{ 4875, 3750, },
++		{ 5000, 3875, },
++		{ 5125, 4000, },
++		{ 5250, 4125, },
++		{ 5375, 4250, },
++		{ 5500, 4375, },
++		{ 5625, 4500, },
++		{ 5750, 4625, },
++		{ 5875, 4750, },
++		{ 6000, 4875, },
++		{ 6125, 5000, },
++		{ 6250, 5125, },
++		{ 6375, 5250, },
++		{ 6500, 5375, },
++		{ 6625, 5500, },
++		{ 6750, 5625, },
++		{ 6875, 5750, },
++		{ 7000, 5875, },
++		{ 7125, 6000, },
++		{ 7250, 6125, },
++		{ 7375, 6250, },
++		{ 7500, 6375, },
++		{ 7625, 6500, },
++		{ 7750, 6625, },
++		{ 7875, 6750, },
++		{ 8000, 6875, },
++		{ 8125, 7000, },
++		{ 8250, 7125, },
++		{ 8375, 7250, },
++		{ 8500, 7375, },
++		{ 8625, 7500, },
++		{ 8750, 7625, },
++		{ 8875, 7750, },
++		{ 9000, 7875, },
++		{ 9125, 8000, },
++		{ 9250, 8125, },
++		{ 9375, 8250, },
++		{ 9500, 8375, },
++		{ 9625, 8500, },
++		{ 9750, 8625, },
++		{ 9875, 8750, },
++		{ 10000, 8875, },
++		{ 10125, 9000, },
++		{ 10250, 9125, },
++		{ 10375, 9250, },
++		{ 10500, 9375, },
++		{ 10625, 9500, },
++		{ 10750, 9625, },
++		{ 10875, 9750, },
++		{ 11000, 9875, },
++		{ 11125, 10000, },
++		{ 11250, 10125, },
++		{ 11375, 10250, },
++		{ 11500, 10375, },
++		{ 11625, 10500, },
++		{ 11750, 10625, },
++		{ 11875, 10750, },
++		{ 12000, 10875, },
++		{ 12125, 11000, },
++		{ 12250, 11125, },
++		{ 12375, 11250, },
++		{ 12500, 11375, },
++		{ 12625, 11500, },
++		{ 12750, 11625, },
++		{ 12875, 11750, },
++		{ 13000, 11875, },
++		{ 13125, 12000, },
++		{ 13250, 12125, },
++		{ 13375, 12250, },
++		{ 13500, 12375, },
++		{ 13625, 12500, },
++		{ 13750, 12625, },
++		{ 13875, 12750, },
++		{ 14000, 12875, },
++		{ 14125, 13000, },
++		{ 14250, 13125, },
++		{ 14375, 13250, },
++		{ 14500, 13375, },
++		{ 14625, 13500, },
++		{ 14750, 13625, },
++		{ 14875, 13750, },
++		{ 15000, 13875, },
++		{ 15125, 14000, },
++		{ 15250, 14125, },
++		{ 15375, 14250, },
++		{ 15500, 14375, },
++		{ 15625, 14500, },
++		{ 15750, 14625, },
++		{ 15875, 14750, },
++		{ 16000, 14875, },
++		{ 16125, 15000, },
++	};
++	if (dev_priv->info->is_mobile)
++		return v_table[pxvid].vm;
++	else
++		return v_table[pxvid].vd;
 +}
 +
-+static void
-+snb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
++void i915_update_gfx_val(struct drm_i915_private *dev_priv)
 +{
-+	struct drm_device *dev = plane->dev;
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	struct intel_plane *intel_plane;
-+	u32 dvscntr;
++	struct timespec now, diff1;
++	u64 diff;
++	unsigned long diffms;
++	u32 count;
 +
-+	intel_plane = to_intel_plane(plane);
++	nanotime(&now);
++	diff1 = now;
++	timespecsub(&diff1, &dev_priv->last_time2);
 +
-+	key->min_value = I915_READ(DVSKEYVAL(intel_plane->pipe));
-+	key->max_value = I915_READ(DVSKEYMAX(intel_plane->pipe));
-+	key->channel_mask = I915_READ(DVSKEYMSK(intel_plane->pipe));
-+	key->flags = 0;
++	/* Don't divide by 0 */
++	diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
++	if (!diffms)
++		return;
 +
-+	dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
++	count = I915_READ(GFXEC);
 +
-+	if (dvscntr & DVS_DEST_KEY)
-+		key->flags = I915_SET_COLORKEY_DESTINATION;
-+	else if (dvscntr & DVS_SOURCE_KEY)
-+		key->flags = I915_SET_COLORKEY_SOURCE;
-+	else
-+		key->flags = I915_SET_COLORKEY_NONE;
++	if (count < dev_priv->last_count2) {
++		diff = ~0UL - dev_priv->last_count2;
++		diff += count;
++	} else {
++		diff = count - dev_priv->last_count2;
++	}
++
++	dev_priv->last_count2 = count;
++	dev_priv->last_time2 = now;
++
++	/* More magic constants... */
++	diff = diff * 1181;
++	diff = diff / (diffms * 10);
++	dev_priv->gfx_power = diff;
 +}
 +
-+static int
-+intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
-+		   struct drm_framebuffer *fb, int crtc_x, int crtc_y,
-+		   unsigned int crtc_w, unsigned int crtc_h,
-+		   uint32_t src_x, uint32_t src_y,
-+		   uint32_t src_w, uint32_t src_h)
++unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
 +{
-+	struct drm_device *dev = plane->dev;
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-+	struct intel_plane *intel_plane = to_intel_plane(plane);
-+	struct intel_framebuffer *intel_fb;
-+	struct drm_i915_gem_object *obj, *old_obj;
-+	int pipe = intel_plane->pipe;
-+	int ret = 0;
-+	int x = src_x >> 16, y = src_y >> 16;
-+	int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay;
-+	bool disable_primary = false;
++	unsigned long t, corr, state1, corr2, state2;
++	u32 pxvid, ext_v;
 +
-+	intel_fb = to_intel_framebuffer(fb);
-+	obj = intel_fb->obj;
++	pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
++	pxvid = (pxvid >> 24) & 0x7f;
++	ext_v = pvid_to_extvid(dev_priv, pxvid);
 +
-+	old_obj = intel_plane->obj;
++	state1 = ext_v;
 +
-+	/* Pipe must be running... */
-+	if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE))
-+		return -EINVAL;
++	t = i915_mch_val(dev_priv);
 +
-+	if (crtc_x >= primary_w || crtc_y >= primary_h)
-+		return -EINVAL;
++	/* Revel in the empirically derived constants */
 +
-+	/* Don't modify another pipe's plane */
-+	if (intel_plane->pipe != intel_crtc->pipe)
-+		return -EINVAL;
++	/* Correction factor in 1/100000 units */
++	if (t > 80)
++		corr = ((t * 2349) + 135940);
++	else if (t >= 50)
++		corr = ((t * 964) + 29317);
++	else /* < 50 */
++		corr = ((t * 301) + 1004);
 +
-+	/*
-+	 * Clamp the width & height into the visible area.  Note we don't
-+	 * try to scale the source if part of the visible region is offscreen.
-+	 * The caller must handle that by adjusting source offset and size.
-+	 */
-+	if ((crtc_x < 0) && ((crtc_x + crtc_w) > 0)) {
-+		crtc_w += crtc_x;
-+		crtc_x = 0;
-+	}
-+	if ((crtc_x + crtc_w) <= 0) /* Nothing to display */
-+		goto out;
-+	if ((crtc_x + crtc_w) > primary_w)
-+		crtc_w = primary_w - crtc_x;
++	corr = corr * ((150142 * state1) / 10000 - 78642);
++	corr /= 100000;
++	corr2 = (corr * dev_priv->corr);
 +
-+	if ((crtc_y < 0) && ((crtc_y + crtc_h) > 0)) {
-+		crtc_h += crtc_y;
-+		crtc_y = 0;
-+	}
-+	if ((crtc_y + crtc_h) <= 0) /* Nothing to display */
-+		goto out;
-+	if (crtc_y + crtc_h > primary_h)
-+		crtc_h = primary_h - crtc_y;
++	state2 = (corr2 * state1) / 10000;
++	state2 /= 100; /* convert to mW */
 +
-+	if (!crtc_w || !crtc_h) /* Again, nothing to display */
-+		goto out;
++	i915_update_gfx_val(dev_priv);
 +
-+	/*
-+	 * We can take a larger source and scale it down, but
-+	 * only so much...  16x is the max on SNB.
-+	 */
-+	if (((src_w * src_h) / (crtc_w * crtc_h)) > intel_plane->max_downscale)
-+		return -EINVAL;
++	return dev_priv->gfx_power + state2;
++}
 +
-+	/*
-+	 * If the sprite is completely covering the primary plane,
-+	 * we can disable the primary and save power.
-+	 */
-+	if ((crtc_x == 0) && (crtc_y == 0) &&
-+	    (crtc_w == primary_w) && (crtc_h == primary_h))
-+		disable_primary = true;
++/**
++ * i915_read_mch_val - return value for IPS use
++ *
++ * Calculate and return a value for the IPS driver to use when deciding whether
++ * we have thermal and power headroom to increase CPU or GPU power budget.
++ */
++unsigned long i915_read_mch_val(void)
++{
++	struct drm_i915_private *dev_priv;
++	unsigned long chipset_val, graphics_val, ret = 0;
 +
-+	DRM_LOCK();
-+
-+	ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
-+	if (ret) {
-+		DRM_ERROR("failed to pin object\n");
++	mtx_lock(&mchdev_lock);
++	if (!i915_mch_dev)
 +		goto out_unlock;
-+	}
++	dev_priv = i915_mch_dev;
 +
-+	intel_plane->obj = obj;
++	chipset_val = i915_chipset_val(dev_priv);
++	graphics_val = i915_gfx_val(dev_priv);
 +
-+	/*
-+	 * Be sure to re-enable the primary before the sprite is no longer
-+	 * covering it fully.
-+	 */
-+	if (!disable_primary && intel_plane->primary_disabled) {
-+		intel_enable_primary(crtc);
-+		intel_plane->primary_disabled = false;
-+	}
++	ret = chipset_val + graphics_val;
 +
-+	intel_plane->update_plane(plane, fb, obj, crtc_x, crtc_y,
-+				  crtc_w, crtc_h, x, y, src_w, src_h);
++out_unlock:
++	mtx_unlock(&mchdev_lock);
 +
-+	if (disable_primary) {
-+		intel_disable_primary(crtc);
-+		intel_plane->primary_disabled = true;
-+	}
++	return ret;
++}
 +
-+	/* Unpin old obj after new one is active to avoid ugliness */
-+	if (old_obj) {
-+		/*
-+		 * It's fairly common to simply update the position of
-+		 * an existing object.  In that case, we don't need to
-+		 * wait for vblank to avoid ugliness, we only need to
-+		 * do the pin & ref bookkeeping.
-+		 */
-+		if (old_obj != obj) {
-+			DRM_UNLOCK();
-+			intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
-+			DRM_LOCK();
-+		}
-+		i915_gem_object_unpin(old_obj);
++/**
++ * i915_gpu_raise - raise GPU frequency limit
++ *
++ * Raise the limit; IPS indicates we have thermal headroom.
++ */
++bool i915_gpu_raise(void)
++{
++	struct drm_i915_private *dev_priv;
++	bool ret = true;
++
++	mtx_lock(&mchdev_lock);
++	if (!i915_mch_dev) {
++		ret = false;
++		goto out_unlock;
 +	}
++	dev_priv = i915_mch_dev;
 +
++	if (dev_priv->max_delay > dev_priv->fmax)
++		dev_priv->max_delay--;
++
 +out_unlock:
-+	DRM_UNLOCK();
-+out:
++	mtx_unlock(&mchdev_lock);
++
 +	return ret;
 +}
 +
-+static int
-+intel_disable_plane(struct drm_plane *plane)
++/**
++ * i915_gpu_lower - lower GPU frequency limit
++ *
++ * IPS indicates we're close to a thermal limit, so throttle back the GPU
++ * frequency maximum.
++ */
++bool i915_gpu_lower(void)
 +{
-+	struct drm_device *dev = plane->dev;
-+	struct intel_plane *intel_plane = to_intel_plane(plane);
-+	int ret = 0;
++	struct drm_i915_private *dev_priv;
++	bool ret = true;
 +
-+	if (intel_plane->primary_disabled) {
-+		intel_enable_primary(plane->crtc);
-+		intel_plane->primary_disabled = false;
++	mtx_lock(&mchdev_lock);
++	if (!i915_mch_dev) {
++		ret = false;
++		goto out_unlock;
 +	}
++	dev_priv = i915_mch_dev;
 +
-+	intel_plane->disable_plane(plane);
++	if (dev_priv->max_delay < dev_priv->min_delay)
++		dev_priv->max_delay++;
 +
-+	if (!intel_plane->obj)
-+		goto out;
++out_unlock:
++	mtx_unlock(&mchdev_lock);
 +
-+	DRM_LOCK();
-+	i915_gem_object_unpin(intel_plane->obj);
-+	intel_plane->obj = NULL;
-+	DRM_UNLOCK();
-+out:
-+
 +	return ret;
 +}
 +
-+static void intel_destroy_plane(struct drm_plane *plane)
++/**
++ * i915_gpu_busy - indicate GPU business to IPS
++ *
++ * Tell the IPS driver whether or not the GPU is busy.
++ */
++bool i915_gpu_busy(void)
 +{
-+	struct intel_plane *intel_plane = to_intel_plane(plane);
-+	intel_disable_plane(plane);
-+	drm_plane_cleanup(plane);
-+	free(intel_plane, DRM_MEM_KMS);
-+}
++	struct drm_i915_private *dev_priv;
++	bool ret = false;
 +
-+int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
-+			      struct drm_file *file_priv)
-+{
-+	struct drm_intel_sprite_colorkey *set = data;
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	struct drm_mode_object *obj;
-+	struct drm_plane *plane;
-+	struct intel_plane *intel_plane;
-+	int ret = 0;
-+
-+	if (!dev_priv)
-+		return -EINVAL;
-+
-+	/* Make sure we don't try to enable both src & dest simultaneously */
-+	if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
-+		return -EINVAL;
-+
-+	sx_xlock(&dev->mode_config.mutex);
-+	
-+	obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
-+	if (!obj) {
-+		ret = -EINVAL;
++	mtx_lock(&mchdev_lock);
++	if (!i915_mch_dev)
 +		goto out_unlock;
-+	}
++	dev_priv = i915_mch_dev;
 +
-+	plane = obj_to_plane(obj);
-+	intel_plane = to_intel_plane(plane);
-+	ret = intel_plane->update_colorkey(plane, set);
++	ret = dev_priv->busy;
 +
 +out_unlock:
-+	sx_xunlock(&dev->mode_config.mutex);
++	mtx_unlock(&mchdev_lock);
++
 +	return ret;
 +}
 +
-+int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
-+			      struct drm_file *file_priv)
++/**
++ * i915_gpu_turbo_disable - disable graphics turbo
++ *
++ * Disable graphics turbo by resetting the max frequency and setting the
++ * current frequency to the default.
++ */
++bool i915_gpu_turbo_disable(void)
 +{
-+	struct drm_intel_sprite_colorkey *get = data;
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	struct drm_mode_object *obj;
-+	struct drm_plane *plane;
-+	struct intel_plane *intel_plane;
-+	int ret = 0;
++	struct drm_i915_private *dev_priv;
++	bool ret = true;
 +
-+	if (!dev_priv)
-+		return -EINVAL;
-+
-+	sx_xlock(&dev->mode_config.mutex);
-+
-+	obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
-+	if (!obj) {
-+		ret = -EINVAL;
++	mtx_lock(&mchdev_lock);
++	if (!i915_mch_dev) {
++		ret = false;
 +		goto out_unlock;
 +	}
++	dev_priv = i915_mch_dev;
 +
-+	plane = obj_to_plane(obj);
-+	intel_plane = to_intel_plane(plane);
-+	intel_plane->get_colorkey(plane, get);
++	dev_priv->max_delay = dev_priv->fstart;
 +
++	if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
++		ret = false;
++
 +out_unlock:
-+	sx_xunlock(&dev->mode_config.mutex);
++	mtx_unlock(&mchdev_lock);
++
 +	return ret;
 +}
+diff --git a/sys/dev/drm/i915_drm.h b/sys/dev/drm/i915_drm.h
+index 18db791..fcb9f32 100644
+--- sys/dev/drm/i915_drm.h
++++ sys/dev/drm/i915_drm.h
+@@ -195,6 +195,15 @@ typedef struct drm_i915_sarea {
+ #define DRM_I915_GEM_SW_FINISH	0x20
+ #define DRM_I915_GEM_SET_TILING	0x21
+ #define DRM_I915_GEM_GET_TILING	0x22
++#define DRM_I915_GEM_GET_APERTURE 0x23
++#define DRM_I915_GEM_MMAP_GTT	0x24
++#define DRM_I915_GET_PIPE_FROM_CRTC_ID	0x25
++#define DRM_I915_GEM_MADVISE	0x26
++#define DRM_I915_OVERLAY_PUT_IMAGE	0x27
++#define DRM_I915_OVERLAY_ATTRS	0x28
++#define DRM_I915_GEM_EXECBUFFER2	0x29
++#define DRM_I915_GET_SPRITE_COLORKEY 0x2a
++#define DRM_I915_SET_SPRITE_COLORKEY 0x2b
+ 
+ #define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
+ #define DRM_IOCTL_I915_FLUSH		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
+@@ -216,6 +225,7 @@ typedef struct drm_i915_sarea {
+ #define DRM_IOCTL_I915_EXECBUFFER	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_EXECBUFFER, struct drm_i915_execbuffer)
+ #define DRM_IOCTL_I915_GEM_INIT		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
+ #define DRM_IOCTL_I915_GEM_EXECBUFFER	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
++#define DRM_IOCTL_I915_GEM_EXECBUFFER2	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
+ #define DRM_IOCTL_I915_GEM_PIN		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
+ #define DRM_IOCTL_I915_GEM_UNPIN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
+ #define DRM_IOCTL_I915_GEM_BUSY		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
+@@ -226,10 +236,18 @@ typedef struct drm_i915_sarea {
+ #define DRM_IOCTL_I915_GEM_PREAD	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
+ #define DRM_IOCTL_I915_GEM_PWRITE	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
+ #define DRM_IOCTL_I915_GEM_MMAP		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
++#define DRM_IOCTL_I915_GEM_MMAP_GTT	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
+ #define DRM_IOCTL_I915_GEM_SET_DOMAIN	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
+ #define DRM_IOCTL_I915_GEM_SW_FINISH	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
+ #define DRM_IOCTL_I915_GEM_SET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
+ #define DRM_IOCTL_I915_GEM_GET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
++#define DRM_IOCTL_I915_GEM_GET_APERTURE	DRM_IOR  (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
++#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
++#define DRM_IOCTL_I915_GEM_MADVISE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
++#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE	DRM_IOW(DRM_COMMAND_BASE + DRM_IOCTL_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
++#define DRM_IOCTL_I915_OVERLAY_ATTRS	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
++#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
++#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
+ 
+ /* Asynchronous page flipping:
+  */
+@@ -284,6 +302,17 @@ typedef struct drm_i915_irq_wait {
+ #define I915_PARAM_LAST_DISPATCH         3
+ #define I915_PARAM_CHIPSET_ID            4
+ #define I915_PARAM_HAS_GEM               5
++#define I915_PARAM_NUM_FENCES_AVAIL      6
++#define I915_PARAM_HAS_OVERLAY           7
++#define I915_PARAM_HAS_PAGEFLIPPING	 8
++#define I915_PARAM_HAS_EXECBUF2          9
++#define I915_PARAM_HAS_BSD		 10
++#define I915_PARAM_HAS_BLT		 11
++#define I915_PARAM_HAS_RELAXED_FENCING	 12
++#define I915_PARAM_HAS_COHERENT_RINGS	 13
++#define I915_PARAM_HAS_EXEC_CONSTANTS	 14
++#define I915_PARAM_HAS_RELAXED_DELTA	 15
++#define I915_PARAM_HAS_GEN7_SOL_RESET	 16
+ 
+ typedef struct drm_i915_getparam {
+ 	int param;
+@@ -295,6 +324,7 @@ typedef struct drm_i915_getparam {
+ #define I915_SETPARAM_USE_MI_BATCHBUFFER_START            1
+ #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY             2
+ #define I915_SETPARAM_ALLOW_BATCHBUFFER                   3
++#define I915_SETPARAM_NUM_USED_FENCES                     4
+ 
+ typedef struct drm_i915_setparam {
+ 	int param;
+@@ -500,6 +530,18 @@ struct drm_i915_gem_mmap {
+ 	uint64_t addr_ptr;	/* void *, but pointers are not 32/64 compatible */
+ };
+ 
++struct drm_i915_gem_mmap_gtt {
++	/** Handle for the object being mapped. */
++	uint32_t handle;
++	uint32_t pad;
++	/**
++	 * Fake offset to use for subsequent mmap call
++	 *
++	 * This is a fixed-size type for 32/64 compatibility.
++	 */
++	uint64_t offset;
++};
 +
-+static const struct drm_plane_funcs intel_plane_funcs = {
-+	.update_plane = intel_update_plane,
-+	.disable_plane = intel_disable_plane,
-+	.destroy = intel_destroy_plane,
+ struct drm_i915_gem_set_domain {
+ 	/** Handle for the object */
+ 	uint32_t handle;
+@@ -633,6 +675,76 @@ struct drm_i915_gem_execbuffer {
+ 	uint64_t cliprects_ptr;	/* struct drm_clip_rect *cliprects */
+ };
+ 
++struct drm_i915_gem_exec_object2 {
++	/**
++	 * User's handle for a buffer to be bound into the GTT for this
++	 * operation.
++	 */
++	uint32_t handle;
++
++	/** Number of relocations to be performed on this buffer */
++	uint32_t relocation_count;
++	/**
++	 * Pointer to array of struct drm_i915_gem_relocation_entry containing
++	 * the relocations to be performed in this buffer.
++	 */
++	uint64_t relocs_ptr;
++
++	/** Required alignment in graphics aperture */
++	uint64_t alignment;
++
++	/**
++	 * Returned value of the updated offset of the object, for future
++	 * presumed_offset writes.
++	 */
++	uint64_t offset;
++
++#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
++	uint64_t flags;
++	uint64_t rsvd1;
++	uint64_t rsvd2;
 +};
 +
-+static uint32_t snb_plane_formats[] = {
-+	DRM_FORMAT_XBGR8888,
-+	DRM_FORMAT_XRGB8888,
-+	DRM_FORMAT_YUYV,
-+	DRM_FORMAT_YVYU,
-+	DRM_FORMAT_UYVY,
-+	DRM_FORMAT_VYUY,
++struct drm_i915_gem_execbuffer2 {
++	/**
++	 * List of gem_exec_object2 structs
++	 */
++	uint64_t buffers_ptr;
++	uint32_t buffer_count;
++
++	/** Offset in the batchbuffer to start execution from. */
++	uint32_t batch_start_offset;
++	/** Bytes used in batchbuffer from batch_start_offset */
++	uint32_t batch_len;
++	uint32_t DR1;
++	uint32_t DR4;
++	uint32_t num_cliprects;
++	/** This is a struct drm_clip_rect *cliprects */
++	uint64_t cliprects_ptr;
++#define I915_EXEC_RING_MASK              (7<<0)
++#define I915_EXEC_DEFAULT                (0<<0)
++#define I915_EXEC_RENDER                 (1<<0)
++#define I915_EXEC_BSD                    (2<<0)
++#define I915_EXEC_BLT                    (3<<0)
++
++/* Used for switching the constants addressing mode on gen4+ RENDER ring.
++ * Gen6+ only supports relative addressing to dynamic state (default) and
++ * absolute addressing.
++ *
++ * These flags are ignored for the BSD and BLT rings.
++ */
++#define I915_EXEC_CONSTANTS_MASK 	(3<<6)
++#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
++#define I915_EXEC_CONSTANTS_ABSOLUTE 	(1<<6)
++#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
++	uint64_t flags;
++	uint64_t rsvd1;
++	uint64_t rsvd2;
 +};
 +
-+int
-+intel_plane_init(struct drm_device *dev, enum pipe pipe)
-+{
-+	struct intel_plane *intel_plane;
-+	unsigned long possible_crtcs;
-+	int ret;
++/** Resets the SO write offset registers for transform feedback on gen7. */
++#define I915_EXEC_GEN7_SOL_RESET	(1<<8)
 +
-+	if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
-+		DRM_ERROR("new plane code only for SNB+\n");
-+		return -ENODEV;
-+	}
+ struct drm_i915_gem_pin {
+ 	/** Handle of the buffer to be pinned. */
+ 	uint32_t handle;
+@@ -670,6 +782,9 @@ struct drm_i915_gem_busy {
+ #define I915_BIT_6_SWIZZLE_9_10_11	4
+ /* Not seen by userland */
+ #define I915_BIT_6_SWIZZLE_UNKNOWN	5
++/* Seen by userland. */
++#define I915_BIT_6_SWIZZLE_9_17		6
++#define I915_BIT_6_SWIZZLE_9_10_17	7
+ 
+ struct drm_i915_gem_set_tiling {
+ 	/** Handle of the buffer to have its tiling state updated */
+@@ -719,4 +834,137 @@ struct drm_i915_gem_get_tiling {
+ 	uint32_t swizzle_mode;
+ };
+ 
++struct drm_i915_gem_get_aperture {
++	/** Total size of the aperture used by i915_gem_execbuffer, in bytes */
++	uint64_t aper_size;
 +
-+	intel_plane = malloc(sizeof(struct intel_plane), DRM_MEM_KMS,
-+	    M_WAITOK | M_ZERO);
++	/**
++	 * Available space in the aperture used by i915_gem_execbuffer, in
++	 * bytes
++	 */
++	uint64_t aper_available_size;
++};
 +
-+	if (IS_GEN6(dev)) {
-+		intel_plane->max_downscale = 16;
-+		intel_plane->update_plane = snb_update_plane;
-+		intel_plane->disable_plane = snb_disable_plane;
-+		intel_plane->update_colorkey = snb_update_colorkey;
-+		intel_plane->get_colorkey = snb_get_colorkey;
-+	} else if (IS_GEN7(dev)) {
-+		intel_plane->max_downscale = 2;
-+		intel_plane->update_plane = ivb_update_plane;
-+		intel_plane->disable_plane = ivb_disable_plane;
-+		intel_plane->update_colorkey = ivb_update_colorkey;
-+		intel_plane->get_colorkey = ivb_get_colorkey;
-+	}
++struct drm_i915_get_pipe_from_crtc_id {
++        /** ID of CRTC being requested **/
++        uint32_t crtc_id;
 +
-+	intel_plane->pipe = pipe;
-+	possible_crtcs = (1 << pipe);
-+	ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
-+			     &intel_plane_funcs, snb_plane_formats,
-+			     DRM_ARRAY_SIZE(snb_plane_formats), false);
-+	if (ret)
-+		free(intel_plane, DRM_MEM_KMS);
++        /** pipe of requested CRTC **/
++        uint32_t pipe;
++};
 +
-+	return ret;
-+}
++#define I915_MADV_WILLNEED 0
++#define I915_MADV_DONTNEED 1
++#define I915_MADV_PURGED_INTERNAL 2 /* internal state */
 +
-
-Property changes on: stable/9/sys/dev/drm/intel_sprite.c
-___________________________________________________________________
-Added: svn:mime-type
-## -0,0 +1 ##
-+text/plain
-Added: svn:keywords
-## -0,0 +1 ##
-+FreeBSD=%H
-Added: svn:eol-style
-## -0,0 +1 ##
-+native
-Index: sys/dev/drm/i915_drv.c
-===================================================================
-diff --git sys/dev/drm/i915_drv.c sys/dev/drm/i915_drv.c
---- sys/dev/drm/i915_drv.c	(revision 230124)
-+++ sys/dev/drm/i915_drv.c	(working copy)
-@@ -38,101 +38,355 @@
++struct drm_i915_gem_madvise {
++	/** Handle of the buffer to change the backing store advice */
++	uint32_t handle;
++
++	/* Advice: either the buffer will be needed again in the near future,
++	 *         or wont be and could be discarded under memory pressure.
++	 */
++	uint32_t madv;
++
++	/** Whether the backing store still exists. */
++	uint32_t retained;
++};
++
++#define I915_OVERLAY_TYPE_MASK 		0xff
++#define I915_OVERLAY_YUV_PLANAR 	0x01
++#define I915_OVERLAY_YUV_PACKED 	0x02
++#define I915_OVERLAY_RGB		0x03
++
++#define I915_OVERLAY_DEPTH_MASK		0xff00
++#define I915_OVERLAY_RGB24		0x1000
++#define I915_OVERLAY_RGB16		0x2000
++#define I915_OVERLAY_RGB15		0x3000
++#define I915_OVERLAY_YUV422		0x0100
++#define I915_OVERLAY_YUV411		0x0200
++#define I915_OVERLAY_YUV420		0x0300
++#define I915_OVERLAY_YUV410		0x0400
++
++#define I915_OVERLAY_SWAP_MASK		0xff0000
++#define I915_OVERLAY_NO_SWAP		0x000000
++#define I915_OVERLAY_UV_SWAP		0x010000
++#define I915_OVERLAY_Y_SWAP		0x020000
++#define I915_OVERLAY_Y_AND_UV_SWAP	0x030000
++
++#define I915_OVERLAY_FLAGS_MASK		0xff000000
++#define I915_OVERLAY_ENABLE		0x01000000
++
++struct drm_intel_overlay_put_image {
++	/* various flags and src format description */
++	uint32_t flags;
++	/* source picture description */
++	uint32_t bo_handle;
++	/* stride values and offsets are in bytes, buffer relative */
++	uint16_t stride_Y; /* stride for packed formats */
++	uint16_t stride_UV;
++	uint32_t offset_Y; /* offset for packet formats */
++	uint32_t offset_U;
++	uint32_t offset_V;
++	/* in pixels */
++	uint16_t src_width;
++	uint16_t src_height;
++	/* to compensate the scaling factors for partially covered surfaces */
++	uint16_t src_scan_width;
++	uint16_t src_scan_height;
++	/* output crtc description */
++	uint32_t crtc_id;
++	uint16_t dst_x;
++	uint16_t dst_y;
++	uint16_t dst_width;
++	uint16_t dst_height;
++};
++
++/* flags */
++#define I915_OVERLAY_UPDATE_ATTRS	(1<<0)
++#define I915_OVERLAY_UPDATE_GAMMA	(1<<1)
++struct drm_intel_overlay_attrs {
++	uint32_t flags;
++	uint32_t color_key;
++	int32_t brightness;
++	uint32_t contrast;
++	uint32_t saturation;
++	uint32_t gamma0;
++	uint32_t gamma1;
++	uint32_t gamma2;
++	uint32_t gamma3;
++	uint32_t gamma4;
++	uint32_t gamma5;
++};
++
++/*
++ * Intel sprite handling
++ *
++ * Color keying works with a min/mask/max tuple.  Both source and destination
++ * color keying is allowed.
++ *
++ * Source keying:
++ * Sprite pixels within the min & max values, masked against the color channels
++ * specified in the mask field, will be transparent.  All other pixels will
++ * be displayed on top of the primary plane.  For RGB surfaces, only the min
++ * and mask fields will be used; ranged compares are not allowed.
++ *
++ * Destination keying:
++ * Primary plane pixels that match the min value, masked against the color
++ * channels specified in the mask field, will be replaced by corresponding
++ * pixels from the sprite plane.
++ *
++ * Note that source & destination keying are exclusive; only one can be
++ * active on a given plane.
++ */
++
++#define I915_SET_COLORKEY_NONE		(1<<0) /* disable color key matching */
++#define I915_SET_COLORKEY_DESTINATION	(1<<1)
++#define I915_SET_COLORKEY_SOURCE	(1<<2)
++struct drm_intel_sprite_colorkey {
++	uint32_t plane_id;
++	uint32_t min_value;
++	uint32_t channel_mask;
++	uint32_t max_value;
++	uint32_t flags;
++};
++
+ #endif				/* _I915_DRM_H_ */
+diff --git a/sys/dev/drm/i915_drv.c b/sys/dev/drm/i915_drv.c
+index 8638df1..89a353c 100644
+--- sys/dev/drm/i915_drv.c
++++ sys/dev/drm/i915_drv.c
+@@ -38,101 +38,355 @@ __FBSDID("$FreeBSD$");
  #include "dev/drm/i915_drm.h"
  #include "dev/drm/i915_drv.h"
  #include "dev/drm/drm_pciids.h"
@@ -10700,14 +24560,16 @@
 -	if (!dev || !dev->dev_private) {
 -		DRM_ERROR("DRM not initialized, aborting suspend.\n");
 -		return -ENODEV;
+-	}
 +	dev_priv = dev->dev_private;
 +	drm_kms_helper_poll_disable(dev);
 +
 +#if 0
 +	pci_save_state(dev->pdev);
 +#endif
-+
-+	DRM_LOCK();
+ 
+ 	DRM_LOCK();
+-	DRM_DEBUG("starting suspend\n");
 +	/* If KMS is active, we do the leavevt stuff here */
 +	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 +		error = -i915_gem_idle(dev);
@@ -10718,10 +24580,8 @@
 +			return (error);
 +		}
 +		drm_irq_uninstall(dev);
- 	}
- 
--	DRM_LOCK();
--	DRM_DEBUG("starting suspend\n");
++	}
++
  	i915_save_state(dev);
 +
 +	intel_opregion_fini(dev);
@@ -10732,16 +24592,14 @@
  
 -	return (bus_generic_suspend(kdev));
 +	return 0;
- }
- 
--static int i915_resume(device_t kdev)
++}
++
 +static int
 +i915_suspend(device_t kdev)
- {
--	struct drm_device *dev = device_get_softc(kdev);
++{
 +	struct drm_device *dev;
 +	int error;
- 
++
 +	dev = device_get_softc(kdev);
 +	if (dev == NULL || dev->dev_private == NULL) {
 +		DRM_ERROR("DRM not initialized, aborting suspend.\n");
@@ -10756,13 +24614,15 @@
 +	error = bus_generic_suspend(kdev);
 +	DRM_DEBUG_KMS("finished suspend %d\n", error);
 +	return (error);
-+}
-+
+ }
+ 
+-static int i915_resume(device_t kdev)
 +static int i915_drm_thaw(struct drm_device *dev)
-+{
+ {
+-	struct drm_device *dev = device_get_softc(kdev);
 +	struct drm_i915_private *dev_priv = dev->dev_private;
 +	int error = 0;
-+
+ 
  	DRM_LOCK();
 +	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 +		i915_gem_restore_gtt_mappings(dev);
@@ -10816,6 +24676,12 @@
 -	   DRIVER_HAVE_IRQ;
 +	struct drm_device *dev;
 +	int ret;
++
++	dev = device_get_softc(kdev);
++	DRM_DEBUG_KMS("starting resume\n");
++#if 0
++	if (pci_enable_device(dev->pdev))
++		return -EIO;
  
 -	dev->driver->buf_priv_size	= sizeof(drm_i915_private_t);
 -	dev->driver->load		= i915_driver_load;
@@ -10829,16 +24695,14 @@
 -	dev->driver->irq_postinstall	= i915_driver_irq_postinstall;
 -	dev->driver->irq_uninstall	= i915_driver_irq_uninstall;
 -	dev->driver->irq_handler	= i915_driver_irq_handler;
-+	dev = device_get_softc(kdev);
-+	DRM_DEBUG_KMS("starting resume\n");
-+#if 0
-+	if (pci_enable_device(dev->pdev))
-+		return -EIO;
++	pci_set_master(dev->pdev);
++#endif
  
 -	dev->driver->ioctls		= i915_ioctls;
 -	dev->driver->max_ioctl		= i915_max_ioctl;
-+	pci_set_master(dev->pdev);
-+#endif
++	ret = -i915_drm_thaw(dev);
++	if (ret != 0)
++		return (ret);
  
 -	dev->driver->name		= DRIVER_NAME;
 -	dev->driver->desc		= DRIVER_DESC;
@@ -10846,10 +24710,6 @@
 -	dev->driver->major		= DRIVER_MAJOR;
 -	dev->driver->minor		= DRIVER_MINOR;
 -	dev->driver->patchlevel		= DRIVER_PATCHLEVEL;
-+	ret = -i915_drm_thaw(dev);
-+	if (ret != 0)
-+		return (ret);
-+
 +	drm_kms_helper_poll_enable(dev);
 +	ret = bus_generic_resume(kdev);
 +	DRM_DEBUG_KMS("finished resume %d\n", ret);
@@ -10869,11 +24729,11 @@
  i915_attach(device_t kdev)
  {
 -	struct drm_device *dev = device_get_softc(kdev);
+-
+-	dev->driver = malloc(sizeof(struct drm_driver_info), DRM_MEM_DRIVER,
+-	    M_WAITOK | M_ZERO);
 +	struct drm_device *dev;
  
--	dev->driver = malloc(sizeof(struct drm_driver_info), DRM_MEM_DRIVER,
--	    M_WAITOK | M_ZERO);
--
 -	i915_configure(dev);
 -
 -	return drm_attach(kdev, i915_pciidlist);
@@ -10908,7 +24768,7 @@
  }
  
  static device_method_t i915_methods[] = {
-@@ -141,25 +395,358 @@
+@@ -141,25 +395,389 @@ static device_method_t i915_methods[] = {
  	DEVMETHOD(device_attach,	i915_attach),
  	DEVMETHOD(device_suspend,	i915_suspend),
  	DEVMETHOD(device_resume,	i915_resume),
@@ -11037,9 +24897,10 @@
 +gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
 +{
 +
-+	/* Forcewake is atomic in case we get in here without the lock */
-+	if (atomic_fetchadd_32(&dev_priv->forcewake_count, 1) == 0)
++	mtx_lock(&dev_priv->gt_lock);
++	if (dev_priv->forcewake_count++ == 0)
 +		dev_priv->display.force_wake_get(dev_priv);
++	mtx_unlock(&dev_priv->gt_lock);
 +}
 +
 +void
@@ -11062,8 +24923,10 @@
 +gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
 +{
 +
-+	if (atomic_fetchadd_32(&dev_priv->forcewake_count, -1) == 1)
-+		dev_priv->display.force_wake_put(dev_priv);
++	mtx_lock(&dev_priv->gt_lock);
++	if (--dev_priv->forcewake_count == 0)
++ 		dev_priv->display.force_wake_put(dev_priv);
++	mtx_unlock(&dev_priv->gt_lock);
 +}
 +
 +void
@@ -11162,12 +25025,39 @@
 +gen6_do_reset(struct drm_device *dev, u8 flags)
 +{
 +	struct drm_i915_private *dev_priv;
++	int ret;
 +
 +	dev_priv = dev->dev_private;
-+	I915_WRITE(GEN6_GDRST, GEN6_GRDOM_FULL);
-+	return (_intel_wait_for(dev,
++
++	/* Hold gt_lock across reset to prevent any register access
++	 * with forcewake not set correctly
++	 */
++	mtx_lock(&dev_priv->gt_lock);
++
++	/* Reset the chip */
++
++	/* GEN6_GDRST is not in the gt power well, no need to check
++	 * for fifo space for the write or forcewake the chip for
++	 * the read
++	 */
++	I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
++
++	/* Spin waiting for the device to ack the reset request */
++	ret = _intel_wait_for(dev,
 +	    (I915_READ(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0,
-+	    500, 1, "915rst"));
++	    500, 1, "915rst");
++
++	/* If reset with a user forcewake, try to restore, otherwise turn it off */
++ 	if (dev_priv->forcewake_count)
++ 		dev_priv->display.force_wake_get(dev_priv);
++	else
++		dev_priv->display.force_wake_put(dev_priv);
++
++	/* Restore fifo count */
++	dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
++
++	mtx_unlock(&dev_priv->gt_lock);
++	return (ret);
 +}
 +
 +int
@@ -11196,14 +25086,11 @@
 +		switch (INTEL_INFO(dev)->gen) {
 +		case 7:
 +		case 6:
-+			ret = gen6_do_reset(dev, flags);
-+			/* If reset with a user forcewake, try to restore */
-+			if (atomic_read(&dev_priv->forcewake_count))
-+				__gen6_gt_force_wake_get(dev_priv);
++		ret = gen6_do_reset(dev, flags);
++		break;
++	case 5:
++		ret = ironlake_do_reset(dev, flags);
 +			break;
-+		case 5:
-+			ret = ironlake_do_reset(dev, flags);
-+			break;
 +		case 4:
 +			ret = i965_do_reset(dev, flags);
 +			break;
@@ -11250,9 +25137,13 @@
 +u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
 +	u##x val = 0; \
 +	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
-+		gen6_gt_force_wake_get(dev_priv); \
++		mtx_lock(&dev_priv->gt_lock); \
++		if (dev_priv->forcewake_count == 0) \
++			dev_priv->display.force_wake_get(dev_priv); \
 +		val = DRM_READ##y(dev_priv->mmio_map, reg);	\
-+		gen6_gt_force_wake_put(dev_priv); \
++		if (dev_priv->forcewake_count == 0) \
++			dev_priv->display.force_wake_put(dev_priv); \
++		mtx_unlock(&dev_priv->gt_lock); \
 +	} else { \
 +		val = DRM_READ##y(dev_priv->mmio_map, reg);	\
 +	} \
@@ -11279,1192 +25170,11 @@
 +__i915_write(32, 32)
 +__i915_write(64, 64)
 +#undef __i915_write
-Index: sys/dev/drm/intel_lvds.c
-===================================================================
-diff --git sys/dev/drm/intel_lvds.c sys/dev/drm/intel_lvds.c
-new file mode 10644
---- /dev/null	(revision 0)
-+++ sys/dev/drm/intel_lvds.c	(working copy)
-@@ -0,0 +1,1067 @@
-+/*
-+ * Copyright © 2006-2007 Intel Corporation
-+ * Copyright (c) 2006 Dave Airlie <airlied at linux.ie>
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the next
-+ * paragraph) shall be included in all copies or substantial portions of the
-+ * Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
-+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Authors:
-+ *	Eric Anholt <eric at anholt.net>
-+ *      Dave Airlie <airlied at linux.ie>
-+ *      Jesse Barnes <jesse.barnes at intel.com>
-+ */
-+
-+#include "dev/drm/drmP.h"
-+#include "dev/drm/drm.h"
-+#include "dev/drm/drm_crtc.h"
-+#include "dev/drm/drm_edid.h"
-+#include "dev/drm/i915_drm.h"
-+#include "dev/drm/i915_drv.h"
-+#include "dev/drm/intel_drv.h"
-+
-+/* Private structure for the integrated LVDS support */
-+struct intel_lvds {
-+	struct intel_encoder base;
-+
-+	struct edid *edid;
-+
-+	int fitting_mode;
-+	u32 pfit_control;
-+	u32 pfit_pgm_ratios;
-+	bool pfit_dirty;
-+
-+	struct drm_display_mode *fixed_mode;
-+};
-+
-+static struct intel_lvds *to_intel_lvds(struct drm_encoder *encoder)
-+{
-+	return container_of(encoder, struct intel_lvds, base.base);
-+}
-+
-+static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
-+{
-+	return container_of(intel_attached_encoder(connector),
-+			    struct intel_lvds, base);
-+}
-+
-+/**
-+ * Sets the power state for the panel.
-+ */
-+static void intel_lvds_enable(struct intel_lvds *intel_lvds)
-+{
-+	struct drm_device *dev = intel_lvds->base.base.dev;
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	u32 ctl_reg, lvds_reg, stat_reg;
-+
-+	if (HAS_PCH_SPLIT(dev)) {
-+		ctl_reg = PCH_PP_CONTROL;
-+		lvds_reg = PCH_LVDS;
-+		stat_reg = PCH_PP_STATUS;
-+	} else {
-+		ctl_reg = PP_CONTROL;
-+		lvds_reg = LVDS;
-+		stat_reg = PP_STATUS;
-+	}
-+
-+	I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
-+
-+	if (intel_lvds->pfit_dirty) {
-+		/*
-+		 * Enable automatic panel scaling so that non-native modes
-+		 * fill the screen.  The panel fitter should only be
-+		 * adjusted whilst the pipe is disabled, according to
-+		 * register description and PRM.
-+		 */
-+		DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
-+			      intel_lvds->pfit_control,
-+			      intel_lvds->pfit_pgm_ratios);
-+
-+		I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
-+		I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
-+		intel_lvds->pfit_dirty = false;
-+	}
-+
-+	I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
-+	POSTING_READ(lvds_reg);
-+	if (_intel_wait_for(dev,
-+	    (I915_READ(stat_reg) & PP_ON) == 0, 1000,
-+	    1, "915lvds"))
-+		DRM_ERROR("timed out waiting for panel to power off\n");
-+
-+	intel_panel_enable_backlight(dev);
-+}
-+
-+static void intel_lvds_disable(struct intel_lvds *intel_lvds)
-+{
-+	struct drm_device *dev = intel_lvds->base.base.dev;
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	u32 ctl_reg, lvds_reg, stat_reg;
-+
-+	if (HAS_PCH_SPLIT(dev)) {
-+		ctl_reg = PCH_PP_CONTROL;
-+		lvds_reg = PCH_LVDS;
-+		stat_reg = PCH_PP_STATUS;
-+	} else {
-+		ctl_reg = PP_CONTROL;
-+		lvds_reg = LVDS;
-+		stat_reg = PP_STATUS;
-+	}
-+
-+	intel_panel_disable_backlight(dev);
-+
-+	I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
-+	if (_intel_wait_for(dev,
-+	    (I915_READ(stat_reg) & PP_ON) == 0, 1000,
-+	    1, "915lvo"))
-+		DRM_ERROR("timed out waiting for panel to power off\n");
-+
-+	if (intel_lvds->pfit_control) {
-+		I915_WRITE(PFIT_CONTROL, 0);
-+		intel_lvds->pfit_dirty = true;
-+	}
-+
-+	I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
-+	POSTING_READ(lvds_reg);
-+}
-+
-+static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
-+{
-+	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
-+
-+	if (mode == DRM_MODE_DPMS_ON)
-+		intel_lvds_enable(intel_lvds);
-+	else
-+		intel_lvds_disable(intel_lvds);
-+
-+	/* XXX: We never power down the LVDS pairs. */
-+}
-+
-+static int intel_lvds_mode_valid(struct drm_connector *connector,
-+				 struct drm_display_mode *mode)
-+{
-+	struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
-+	struct drm_display_mode *fixed_mode = intel_lvds->fixed_mode;
-+
-+	if (mode->hdisplay > fixed_mode->hdisplay)
-+		return MODE_PANEL;
-+	if (mode->vdisplay > fixed_mode->vdisplay)
-+		return MODE_PANEL;
-+
-+	return MODE_OK;
-+}
-+
-+static void
-+centre_horizontally(struct drm_display_mode *mode,
-+		    int width)
-+{
-+	u32 border, sync_pos, blank_width, sync_width;
-+
-+	/* keep the hsync and hblank widths constant */
-+	sync_width = mode->crtc_hsync_end - mode->crtc_hsync_start;
-+	blank_width = mode->crtc_hblank_end - mode->crtc_hblank_start;
-+	sync_pos = (blank_width - sync_width + 1) / 2;
-+
-+	border = (mode->hdisplay - width + 1) / 2;
-+	border += border & 1; /* make the border even */
-+
-+	mode->crtc_hdisplay = width;
-+	mode->crtc_hblank_start = width + border;
-+	mode->crtc_hblank_end = mode->crtc_hblank_start + blank_width;
-+
-+	mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos;
-+	mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width;
-+}
-+
-+static void
-+centre_vertically(struct drm_display_mode *mode,
-+		  int height)
-+{
-+	u32 border, sync_pos, blank_width, sync_width;
-+
-+	/* keep the vsync and vblank widths constant */
-+	sync_width = mode->crtc_vsync_end - mode->crtc_vsync_start;
-+	blank_width = mode->crtc_vblank_end - mode->crtc_vblank_start;
-+	sync_pos = (blank_width - sync_width + 1) / 2;
-+
-+	border = (mode->vdisplay - height + 1) / 2;
-+
-+	mode->crtc_vdisplay = height;
-+	mode->crtc_vblank_start = height + border;
-+	mode->crtc_vblank_end = mode->crtc_vblank_start + blank_width;
-+
-+	mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos;
-+	mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width;
-+}
-+
-+static inline u32 panel_fitter_scaling(u32 source, u32 target)
-+{
-+	/*
-+	 * Floating point operation is not supported. So the FACTOR
-+	 * is defined, which can avoid the floating point computation
-+	 * when calculating the panel ratio.
-+	 */
-+#define ACCURACY 12
-+#define FACTOR (1 << ACCURACY)
-+	u32 ratio = source * FACTOR / target;
-+	return (FACTOR * ratio + FACTOR/2) / FACTOR;
-+}
-+
-+static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
-+				  struct drm_display_mode *mode,
-+				  struct drm_display_mode *adjusted_mode)
-+{
-+	struct drm_device *dev = encoder->dev;
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
-+	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
-+	struct drm_encoder *tmp_encoder;
-+	u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
-+	int pipe;
-+
-+	/* Should never happen!! */
-+	if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) {
-+		DRM_ERROR("Can't support LVDS on pipe A\n");
-+		return false;
-+	}
-+
-+	/* Should never happen!! */
-+	list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list, head) {
-+		if (tmp_encoder != encoder && tmp_encoder->crtc == encoder->crtc) {
-+			DRM_ERROR("Can't enable LVDS and another "
-+			       "encoder on the same pipe\n");
-+			return false;
-+		}
-+	}
-+
-+	/*
-+	 * We have timings from the BIOS for the panel, put them in
-+	 * to the adjusted mode.  The CRTC will be set up for this mode,
-+	 * with the panel scaling set up to source from the H/VDisplay
-+	 * of the original mode.
-+	 */
-+	intel_fixed_panel_mode(intel_lvds->fixed_mode, adjusted_mode);
-+
-+	if (HAS_PCH_SPLIT(dev)) {
-+		intel_pch_panel_fitting(dev, intel_lvds->fitting_mode,
-+					mode, adjusted_mode);
-+		return true;
-+	}
-+
-+	/* Native modes don't need fitting */
-+	if (adjusted_mode->hdisplay == mode->hdisplay &&
-+	    adjusted_mode->vdisplay == mode->vdisplay)
-+		goto out;
-+
-+	/* 965+ wants fuzzy fitting */
-+	if (INTEL_INFO(dev)->gen >= 4)
-+		pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
-+				 PFIT_FILTER_FUZZY);
-+
-+	/*
-+	 * Enable automatic panel scaling for non-native modes so that they fill
-+	 * the screen.  Should be enabled before the pipe is enabled, according
-+	 * to register description and PRM.
-+	 * Change the value here to see the borders for debugging
-+	 */
-+	for_each_pipe(pipe)
-+		I915_WRITE(BCLRPAT(pipe), 0);
-+
-+	switch (intel_lvds->fitting_mode) {
-+	case DRM_MODE_SCALE_CENTER:
-+		/*
-+		 * For centered modes, we have to calculate border widths &
-+		 * heights and modify the values programmed into the CRTC.
-+		 */
-+		centre_horizontally(adjusted_mode, mode->hdisplay);
-+		centre_vertically(adjusted_mode, mode->vdisplay);
-+		border = LVDS_BORDER_ENABLE;
-+		break;
-+
-+	case DRM_MODE_SCALE_ASPECT:
-+		/* Scale but preserve the aspect ratio */
-+		if (INTEL_INFO(dev)->gen >= 4) {
-+			u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
-+			u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
-+
-+			/* 965+ is easy, it does everything in hw */
-+			if (scaled_width > scaled_height)
-+				pfit_control |= PFIT_ENABLE | PFIT_SCALING_PILLAR;
-+			else if (scaled_width < scaled_height)
-+				pfit_control |= PFIT_ENABLE | PFIT_SCALING_LETTER;
-+			else if (adjusted_mode->hdisplay != mode->hdisplay)
-+				pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
-+		} else {
-+			u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
-+			u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
-+			/*
-+			 * For earlier chips we have to calculate the scaling
-+			 * ratio by hand and program it into the
-+			 * PFIT_PGM_RATIO register
-+			 */
-+			if (scaled_width > scaled_height) { /* pillar */
-+				centre_horizontally(adjusted_mode, scaled_height / mode->vdisplay);
-+
-+				border = LVDS_BORDER_ENABLE;
-+				if (mode->vdisplay != adjusted_mode->vdisplay) {
-+					u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay);
-+					pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
-+							    bits << PFIT_VERT_SCALE_SHIFT);
-+					pfit_control |= (PFIT_ENABLE |
-+							 VERT_INTERP_BILINEAR |
-+							 HORIZ_INTERP_BILINEAR);
-+				}
-+			} else if (scaled_width < scaled_height) { /* letter */
-+				centre_vertically(adjusted_mode, scaled_width / mode->hdisplay);
-+
-+				border = LVDS_BORDER_ENABLE;
-+				if (mode->hdisplay != adjusted_mode->hdisplay) {
-+					u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay);
-+					pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
-+							    bits << PFIT_VERT_SCALE_SHIFT);
-+					pfit_control |= (PFIT_ENABLE |
-+							 VERT_INTERP_BILINEAR |
-+							 HORIZ_INTERP_BILINEAR);
-+				}
-+			} else
-+				/* Aspects match, Let hw scale both directions */
-+				pfit_control |= (PFIT_ENABLE |
-+						 VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
-+						 VERT_INTERP_BILINEAR |
-+						 HORIZ_INTERP_BILINEAR);
-+		}
-+		break;
-+
-+	case DRM_MODE_SCALE_FULLSCREEN:
-+		/*
-+		 * Full scaling, even if it changes the aspect ratio.
-+		 * Fortunately this is all done for us in hw.
-+		 */
-+		if (mode->vdisplay != adjusted_mode->vdisplay ||
-+		    mode->hdisplay != adjusted_mode->hdisplay) {
-+			pfit_control |= PFIT_ENABLE;
-+			if (INTEL_INFO(dev)->gen >= 4)
-+				pfit_control |= PFIT_SCALING_AUTO;
-+			else
-+				pfit_control |= (VERT_AUTO_SCALE |
-+						 VERT_INTERP_BILINEAR |
-+						 HORIZ_AUTO_SCALE |
-+						 HORIZ_INTERP_BILINEAR);
-+		}
-+		break;
-+
-+	default:
-+		break;
-+	}
-+
-+out:
-+	/* If not enabling scaling, be consistent and always use 0. */
-+	if ((pfit_control & PFIT_ENABLE) == 0) {
-+		pfit_control = 0;
-+		pfit_pgm_ratios = 0;
-+	}
-+
-+	/* Make sure pre-965 set dither correctly */
-+	if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
-+		pfit_control |= PANEL_8TO6_DITHER_ENABLE;
-+
-+	if (pfit_control != intel_lvds->pfit_control ||
-+	    pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
-+		intel_lvds->pfit_control = pfit_control;
-+		intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
-+		intel_lvds->pfit_dirty = true;
-+	}
-+	dev_priv->lvds_border_bits = border;
-+
-+	/*
-+	 * XXX: It would be nice to support lower refresh rates on the
-+	 * panels to reduce power consumption, and perhaps match the
-+	 * user's requested refresh rate.
-+	 */
-+
-+	return true;
-+}
-+
-+static void intel_lvds_prepare(struct drm_encoder *encoder)
-+{
-+	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
-+
-+	/*
-+	 * Prior to Ironlake, we must disable the pipe if we want to adjust
-+	 * the panel fitter. However at all other times we can just reset
-+	 * the registers regardless.
-+	 */
-+	if (!HAS_PCH_SPLIT(encoder->dev) && intel_lvds->pfit_dirty)
-+		intel_lvds_disable(intel_lvds);
-+}
-+
-+static void intel_lvds_commit(struct drm_encoder *encoder)
-+{
-+	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
-+
-+	/* Always do a full power on as we do not know what state
-+	 * we were left in.
-+	 */
-+	intel_lvds_enable(intel_lvds);
-+}
-+
-+static void intel_lvds_mode_set(struct drm_encoder *encoder,
-+				struct drm_display_mode *mode,
-+				struct drm_display_mode *adjusted_mode)
-+{
-+	/*
-+	 * The LVDS pin pair will already have been turned on in the
-+	 * intel_crtc_mode_set since it has a large impact on the DPLL
-+	 * settings.
-+	 */
-+}
-+
-+/**
-+ * Detect the LVDS connection.
-+ *
-+ * Since LVDS doesn't have hotlug, we use the lid as a proxy.  Open means
-+ * connected and closed means disconnected.  We also send hotplug events as
-+ * needed, using lid status notification from the input layer.
-+ */
-+static enum drm_connector_status
-+intel_lvds_detect(struct drm_connector *connector, bool force)
-+{
-+	struct drm_device *dev = connector->dev;
-+	enum drm_connector_status status;
-+
-+	status = intel_panel_detect(dev);
-+	if (status != connector_status_unknown)
-+		return status;
-+
-+	return connector_status_connected;
-+}
-+
-+/**
-+ * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
-+ */
-+static int intel_lvds_get_modes(struct drm_connector *connector)
-+{
-+	struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
-+	struct drm_device *dev = connector->dev;
-+	struct drm_display_mode *mode;
-+
-+	if (intel_lvds->edid)
-+		return drm_add_edid_modes(connector, intel_lvds->edid);
-+
-+	mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
-+	if (mode == NULL)
-+		return 0;
-+
-+	drm_mode_probed_add(connector, mode);
-+	return 1;
-+}
-+
-+#ifdef NOTYET
-+static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
-+{
-+	DRM_DEBUG_KMS("Skipping forced modeset for %s\n", id->ident);
-+	return 1;
-+}
-+
-+/* The GPU hangs up on these systems if modeset is performed on LID open */
-+static const struct dmi_system_id intel_no_modeset_on_lid[] = {
-+	{
-+		.callback = intel_no_modeset_on_lid_dmi_callback,
-+		.ident = "Toshiba Tecra A11",
-+		.matches = {
-+			DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
-+			DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"),
-+		},
-+	},
-+
-+	{ }	/* terminating entry */
-+};
-+
-+/*
-+ * Lid events. Note the use of 'modeset_on_lid':
-+ *  - we set it on lid close, and reset it on open
-+ *  - we use it as a "only once" bit (ie we ignore
-+ *    duplicate events where it was already properly
-+ *    set/reset)
-+ *  - the suspend/resume paths will also set it to
-+ *    zero, since they restore the mode ("lid open").
-+ */
-+static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
-+			    void *unused)
-+{
-+	struct drm_i915_private *dev_priv =
-+		container_of(nb, struct drm_i915_private, lid_notifier);
-+	struct drm_device *dev = dev_priv->dev;
-+	struct drm_connector *connector = dev_priv->int_lvds_connector;
-+
-+	if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
-+		return NOTIFY_OK;
-+
-+	/*
-+	 * check and update the status of LVDS connector after receiving
-+	 * the LID nofication event.
-+	 */
-+	if (connector)
-+		connector->status = connector->funcs->detect(connector,
-+							     false);
-+
-+	/* Don't force modeset on machines where it causes a GPU lockup */
-+	if (dmi_check_system(intel_no_modeset_on_lid))
-+		return NOTIFY_OK;
-+	if (!acpi_lid_open()) {
-+		dev_priv->modeset_on_lid = 1;
-+		return NOTIFY_OK;
-+	}
-+
-+	if (!dev_priv->modeset_on_lid)
-+		return NOTIFY_OK;
-+
-+	dev_priv->modeset_on_lid = 0;
-+
-+	mutex_lock(&dev->mode_config.mutex);
-+	drm_helper_resume_force_mode(dev);
-+	mutex_unlock(&dev->mode_config.mutex);
-+
-+	return NOTIFY_OK;
-+}
-+#endif
-+
-+/**
-+ * intel_lvds_destroy - unregister and free LVDS structures
-+ * @connector: connector to free
-+ *
-+ * Unregister the DDC bus for this connector then free the driver private
-+ * structure.
-+ */
-+static void intel_lvds_destroy(struct drm_connector *connector)
-+{
-+	struct drm_device *dev = connector->dev;
-+#if 0
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+#endif
-+
-+	intel_panel_destroy_backlight(dev);
-+
-+#if 0
-+	if (dev_priv->lid_notifier.notifier_call)
-+		acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
-+#endif
-+#if 0
-+	drm_sysfs_connector_remove(connector);
-+#endif
-+	drm_connector_cleanup(connector);
-+	free(connector, DRM_MEM_KMS);
-+}
-+
-+static int intel_lvds_set_property(struct drm_connector *connector,
-+				   struct drm_property *property,
-+				   uint64_t value)
-+{
-+	struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
-+	struct drm_device *dev = connector->dev;
-+
-+	if (property == dev->mode_config.scaling_mode_property) {
-+		struct drm_crtc *crtc = intel_lvds->base.base.crtc;
-+
-+		if (value == DRM_MODE_SCALE_NONE) {
-+			DRM_DEBUG_KMS("no scaling not supported\n");
-+			return -EINVAL;
-+		}
-+
-+		if (intel_lvds->fitting_mode == value) {
-+			/* the LVDS scaling property is not changed */
-+			return 0;
-+		}
-+		intel_lvds->fitting_mode = value;
-+		if (crtc && crtc->enabled) {
-+			/*
-+			 * If the CRTC is enabled, the display will be changed
-+			 * according to the new panel fitting mode.
-+			 */
-+			drm_crtc_helper_set_mode(crtc, &crtc->mode,
-+				crtc->x, crtc->y, crtc->fb);
-+		}
-+	}
-+
-+	return 0;
-+}
-+
-+static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
-+	.dpms = intel_lvds_dpms,
-+	.mode_fixup = intel_lvds_mode_fixup,
-+	.prepare = intel_lvds_prepare,
-+	.mode_set = intel_lvds_mode_set,
-+	.commit = intel_lvds_commit,
-+};
-+
-+static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
-+	.get_modes = intel_lvds_get_modes,
-+	.mode_valid = intel_lvds_mode_valid,
-+	.best_encoder = intel_best_encoder,
-+};
-+
-+static const struct drm_connector_funcs intel_lvds_connector_funcs = {
-+	.dpms = drm_helper_connector_dpms,
-+	.detect = intel_lvds_detect,
-+	.fill_modes = drm_helper_probe_single_connector_modes,
-+	.set_property = intel_lvds_set_property,
-+	.destroy = intel_lvds_destroy,
-+};
-+
-+static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
-+	.destroy = intel_encoder_destroy,
-+};
-+
-+#ifdef NOTYET
-+static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
-+{
-+	DRM_DEBUG_KMS("Skipping LVDS initialization for %s\n", id->ident);
-+	return 1;
-+}
-+
-+/* These systems claim to have LVDS, but really don't */
-+static const struct dmi_system_id intel_no_lvds[] = {
-+	{
-+		.callback = intel_no_lvds_dmi_callback,
-+		.ident = "Apple Mac Mini (Core series)",
-+		.matches = {
-+			DMI_MATCH(DMI_SYS_VENDOR, "Apple"),
-+			DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"),
-+		},
-+	},
-+	{
-+		.callback = intel_no_lvds_dmi_callback,
-+		.ident = "Apple Mac Mini (Core 2 series)",
-+		.matches = {
-+			DMI_MATCH(DMI_SYS_VENDOR, "Apple"),
-+			DMI_MATCH(DMI_PRODUCT_NAME, "Macmini2,1"),
-+		},
-+	},
-+	{
-+		.callback = intel_no_lvds_dmi_callback,
-+		.ident = "MSI IM-945GSE-A",
-+		.matches = {
-+			DMI_MATCH(DMI_SYS_VENDOR, "MSI"),
-+			DMI_MATCH(DMI_PRODUCT_NAME, "A9830IMS"),
-+		},
-+	},
-+	{
-+		.callback = intel_no_lvds_dmi_callback,
-+		.ident = "Dell Studio Hybrid",
-+		.matches = {
-+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-+			DMI_MATCH(DMI_PRODUCT_NAME, "Studio Hybrid 140g"),
-+		},
-+	},
-+	{
-+		.callback = intel_no_lvds_dmi_callback,
-+		.ident = "Dell OptiPlex FX170",
-+		.matches = {
-+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-+			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex FX170"),
-+		},
-+	},
-+	{
-+		.callback = intel_no_lvds_dmi_callback,
-+		.ident = "AOpen Mini PC",
-+		.matches = {
-+			DMI_MATCH(DMI_SYS_VENDOR, "AOpen"),
-+			DMI_MATCH(DMI_PRODUCT_NAME, "i965GMx-IF"),
-+		},
-+	},
-+	{
-+		.callback = intel_no_lvds_dmi_callback,
-+		.ident = "AOpen Mini PC MP915",
-+		.matches = {
-+			DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
-+			DMI_MATCH(DMI_BOARD_NAME, "i915GMx-F"),
-+		},
-+	},
-+	{
-+		.callback = intel_no_lvds_dmi_callback,
-+		.ident = "AOpen i915GMm-HFS",
-+		.matches = {
-+			DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
-+			DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"),
-+		},
-+	},
-+	{
-+		.callback = intel_no_lvds_dmi_callback,
-+		.ident = "Aopen i945GTt-VFA",
-+		.matches = {
-+			DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
-+		},
-+	},
-+	{
-+		.callback = intel_no_lvds_dmi_callback,
-+		.ident = "Clientron U800",
-+		.matches = {
-+			DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
-+			DMI_MATCH(DMI_PRODUCT_NAME, "U800"),
-+		},
-+	},
-+	{
-+		.callback = intel_no_lvds_dmi_callback,
-+		.ident = "Asus EeeBox PC EB1007",
-+		.matches = {
-+			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
-+			DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
-+		},
-+	},
-+	{
-+		.callback = intel_no_lvds_dmi_callback,
-+		.ident = "Asus AT5NM10T-I",
-+		.matches = {
-+			DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
-+			DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
-+		},
-+	},
-+
-+	{ }	/* terminating entry */
-+};
-+#endif
-+
-+/**
-+ * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID
-+ * @dev: drm device
-+ * @connector: LVDS connector
-+ *
-+ * Find the reduced downclock for LVDS in EDID.
-+ */
-+static void intel_find_lvds_downclock(struct drm_device *dev,
-+				      struct drm_display_mode *fixed_mode,
-+				      struct drm_connector *connector)
-+{
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	struct drm_display_mode *scan;
-+	int temp_downclock;
-+
-+	temp_downclock = fixed_mode->clock;
-+	list_for_each_entry(scan, &connector->probed_modes, head) {
-+		/*
-+		 * If one mode has the same resolution with the fixed_panel
-+		 * mode while they have the different refresh rate, it means
-+		 * that the reduced downclock is found for the LVDS. In such
-+		 * case we can set the different FPx0/1 to dynamically select
-+		 * between low and high frequency.
-+		 */
-+		if (scan->hdisplay == fixed_mode->hdisplay &&
-+		    scan->hsync_start == fixed_mode->hsync_start &&
-+		    scan->hsync_end == fixed_mode->hsync_end &&
-+		    scan->htotal == fixed_mode->htotal &&
-+		    scan->vdisplay == fixed_mode->vdisplay &&
-+		    scan->vsync_start == fixed_mode->vsync_start &&
-+		    scan->vsync_end == fixed_mode->vsync_end &&
-+		    scan->vtotal == fixed_mode->vtotal) {
-+			if (scan->clock < temp_downclock) {
-+				/*
-+				 * The downclock is already found. But we
-+				 * expect to find the lower downclock.
-+				 */
-+				temp_downclock = scan->clock;
-+			}
-+		}
-+	}
-+	if (temp_downclock < fixed_mode->clock && i915_lvds_downclock) {
-+		/* We found the downclock for LVDS. */
-+		dev_priv->lvds_downclock_avail = 1;
-+		dev_priv->lvds_downclock = temp_downclock;
-+		DRM_DEBUG_KMS("LVDS downclock is found in EDID. "
-+			      "Normal clock %dKhz, downclock %dKhz\n",
-+			      fixed_mode->clock, temp_downclock);
-+	}
-+}
-+
-+/*
-+ * Enumerate the child dev array parsed from VBT to check whether
-+ * the LVDS is present.
-+ * If it is present, return 1.
-+ * If it is not present, return false.
-+ * If no child dev is parsed from VBT, it assumes that the LVDS is present.
-+ */
-+static bool lvds_is_present_in_vbt(struct drm_device *dev,
-+				   u8 *i2c_pin)
-+{
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	int i;
-+
-+	if (!dev_priv->child_dev_num)
-+		return true;
-+
-+	for (i = 0; i < dev_priv->child_dev_num; i++) {
-+		struct child_device_config *child = dev_priv->child_dev + i;
-+
-+		/* If the device type is not LFP, continue.
-+		 * We have to check both the new identifiers as well as the
-+		 * old for compatibility with some BIOSes.
-+		 */
-+		if (child->device_type != DEVICE_TYPE_INT_LFP &&
-+		    child->device_type != DEVICE_TYPE_LFP)
-+			continue;
-+
-+		if (child->i2c_pin)
-+		    *i2c_pin = child->i2c_pin;
-+
-+		/* However, we cannot trust the BIOS writers to populate
-+		 * the VBT correctly.  Since LVDS requires additional
-+		 * information from AIM blocks, a non-zero addin offset is
-+		 * a good indicator that the LVDS is actually present.
-+		 */
-+		if (child->addin_offset)
-+			return true;
-+
-+		/* But even then some BIOS writers perform some black magic
-+		 * and instantiate the device without reference to any
-+		 * additional data.  Trust that if the VBT was written into
-+		 * the OpRegion then they have validated the LVDS's existence.
-+		 */
-+		if (dev_priv->opregion.vbt)
-+			return true;
-+	}
-+
-+	return false;
-+}
-+
-+/**
-+ * intel_lvds_init - setup LVDS connectors on this device
-+ * @dev: drm device
-+ *
-+ * Create the connector, register the LVDS DDC bus, and try to figure out what
-+ * modes we can display on the LVDS panel (if present).
-+ */
-+bool intel_lvds_init(struct drm_device *dev)
-+{
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	struct intel_lvds *intel_lvds;
-+	struct intel_encoder *intel_encoder;
-+	struct intel_connector *intel_connector;
-+	struct drm_connector *connector;
-+	struct drm_encoder *encoder;
-+	struct drm_display_mode *scan; /* *modes, *bios_mode; */
-+	struct drm_crtc *crtc;
-+	u32 lvds;
-+	int pipe;
-+	u8 pin;
-+
-+#if 1
-+	KIB_NOTYET();
-+#else
-+	/* Skip init on machines we know falsely report LVDS */
-+	if (dmi_check_system(intel_no_lvds))
-+		return false;
-+#endif
-+
-+	pin = GMBUS_PORT_PANEL;
-+	if (!lvds_is_present_in_vbt(dev, &pin)) {
-+		DRM_DEBUG_KMS("LVDS is not present in VBT\n");
-+		return false;
-+	}
-+
-+	if (HAS_PCH_SPLIT(dev)) {
-+		if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
-+			return false;
-+		if (dev_priv->edp.support) {
-+			DRM_DEBUG_KMS("disable LVDS for eDP support\n");
-+			return false;
-+		}
-+	}
-+
-+	intel_lvds = malloc(sizeof(struct intel_lvds), DRM_MEM_KMS,
-+	    M_WAITOK | M_ZERO);
-+	intel_connector = malloc(sizeof(struct intel_connector), DRM_MEM_KMS,
-+	    M_WAITOK | M_ZERO);
-+
-+	if (!HAS_PCH_SPLIT(dev)) {
-+		intel_lvds->pfit_control = I915_READ(PFIT_CONTROL);
-+	}
-+
-+	intel_encoder = &intel_lvds->base;
-+	encoder = &intel_encoder->base;
-+	connector = &intel_connector->base;
-+	drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
-+			   DRM_MODE_CONNECTOR_LVDS);
-+
-+	drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
-+			 DRM_MODE_ENCODER_LVDS);
-+
-+	intel_connector_attach_encoder(intel_connector, intel_encoder);
-+	intel_encoder->type = INTEL_OUTPUT_LVDS;
-+
-+	intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
-+	if (HAS_PCH_SPLIT(dev))
-+		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
-+	else
-+		intel_encoder->crtc_mask = (1 << 1);
-+
-+	drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
-+	drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
-+	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
-+	connector->interlace_allowed = false;
-+	connector->doublescan_allowed = false;
-+
-+	/* create the scaling mode property */
-+	drm_mode_create_scaling_mode_property(dev);
-+	/*
-+	 * the initial panel fitting mode will be FULL_SCREEN.
-+	 */
-+
-+	drm_connector_attach_property(&intel_connector->base,
-+				      dev->mode_config.scaling_mode_property,
-+				      DRM_MODE_SCALE_ASPECT);
-+	intel_lvds->fitting_mode = DRM_MODE_SCALE_ASPECT;
-+	/*
-+	 * LVDS discovery:
-+	 * 1) check for EDID on DDC
-+	 * 2) check for VBT data
-+	 * 3) check to see if LVDS is already on
-+	 *    if none of the above, no panel
-+	 * 4) make sure lid is open
-+	 *    if closed, act like it's not there for now
-+	 */
-+
-+	/*
-+	 * Attempt to get the fixed panel mode from DDC.  Assume that the
-+	 * preferred mode is the right one.
-+	 */
-+	intel_lvds->edid = drm_get_edid(connector, dev_priv->gmbus[pin]);
-+	if (intel_lvds->edid) {
-+		if (drm_add_edid_modes(connector,
-+				       intel_lvds->edid)) {
-+			drm_mode_connector_update_edid_property(connector,
-+								intel_lvds->edid);
-+		} else {
-+			free(intel_lvds->edid, DRM_MEM_KMS);
-+			intel_lvds->edid = NULL;
-+		}
-+	}
-+	if (!intel_lvds->edid) {
-+		/* Didn't get an EDID, so
-+		 * Set wide sync ranges so we get all modes
-+		 * handed to valid_mode for checking
-+		 */
-+		connector->display_info.min_vfreq = 0;
-+		connector->display_info.max_vfreq = 200;
-+		connector->display_info.min_hfreq = 0;
-+		connector->display_info.max_hfreq = 200;
-+	}
-+
-+	list_for_each_entry(scan, &connector->probed_modes, head) {
-+		if (scan->type & DRM_MODE_TYPE_PREFERRED) {
-+			intel_lvds->fixed_mode =
-+				drm_mode_duplicate(dev, scan);
-+			intel_find_lvds_downclock(dev,
-+						  intel_lvds->fixed_mode,
-+						  connector);
-+			goto out;
-+		}
-+	}
-+
-+	/* Failed to get EDID, what about VBT? */
-+	if (dev_priv->lfp_lvds_vbt_mode) {
-+		intel_lvds->fixed_mode =
-+			drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
-+		if (intel_lvds->fixed_mode) {
-+			intel_lvds->fixed_mode->type |=
-+				DRM_MODE_TYPE_PREFERRED;
-+			goto out;
-+		}
-+	}
-+
-+	/*
-+	 * If we didn't get EDID, try checking if the panel is already turned
-+	 * on.  If so, assume that whatever is currently programmed is the
-+	 * correct mode.
-+	 */
-+
-+	/* Ironlake: FIXME if still fail, not try pipe mode now */
-+	if (HAS_PCH_SPLIT(dev))
-+		goto failed;
-+
-+	lvds = I915_READ(LVDS);
-+	pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
-+	crtc = intel_get_crtc_for_pipe(dev, pipe);
-+
-+	if (crtc && (lvds & LVDS_PORT_EN)) {
-+		intel_lvds->fixed_mode = intel_crtc_mode_get(dev, crtc);
-+		if (intel_lvds->fixed_mode) {
-+			intel_lvds->fixed_mode->type |=
-+				DRM_MODE_TYPE_PREFERRED;
-+			goto out;
-+		}
-+	}
-+
-+	/* If we still don't have a mode after all that, give up. */
-+	if (!intel_lvds->fixed_mode)
-+		goto failed;
-+
-+out:
-+	if (HAS_PCH_SPLIT(dev)) {
-+		u32 pwm;
-+
-+		pipe = (I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) ? 1 : 0;
-+
-+		/* make sure PWM is enabled and locked to the LVDS pipe */
-+		pwm = I915_READ(BLC_PWM_CPU_CTL2);
-+		if (pipe == 0 && (pwm & PWM_PIPE_B))
-+			I915_WRITE(BLC_PWM_CPU_CTL2, pwm & ~PWM_ENABLE);
-+		if (pipe)
-+			pwm |= PWM_PIPE_B;
-+		else
-+			pwm &= ~PWM_PIPE_B;
-+		I915_WRITE(BLC_PWM_CPU_CTL2, pwm | PWM_ENABLE);
-+
-+		pwm = I915_READ(BLC_PWM_PCH_CTL1);
-+		pwm |= PWM_PCH_ENABLE;
-+		I915_WRITE(BLC_PWM_PCH_CTL1, pwm);
-+		/*
-+		 * Unlock registers and just
-+		 * leave them unlocked
-+		 */
-+		I915_WRITE(PCH_PP_CONTROL,
-+			   I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
-+	} else {
-+		/*
-+		 * Unlock registers and just
-+		 * leave them unlocked
-+		 */
-+		I915_WRITE(PP_CONTROL,
-+			   I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
-+	}
-+#ifdef NOTYET
-+	dev_priv->lid_notifier.notifier_call = intel_lid_notify;
-+	if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
-+		DRM_DEBUG_KMS("lid notifier registration failed\n");
-+		dev_priv->lid_notifier.notifier_call = NULL;
-+	}
-+#endif
-+	/* keep the LVDS connector */
-+	dev_priv->int_lvds_connector = connector;
-+#if 0
-+	drm_sysfs_connector_add(connector);
-+#endif
-+	intel_panel_setup_backlight(dev);
-+	return true;
-+
-+failed:
-+	DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
-+	drm_connector_cleanup(connector);
-+	drm_encoder_cleanup(encoder);
-+	free(intel_lvds, DRM_MEM_KMS);
-+	free(intel_connector, DRM_MEM_KMS);
-+	return false;
-+}
-
-Property changes on: stable/9/sys/dev/drm/intel_lvds.c
-___________________________________________________________________
-Added: svn:mime-type
-## -0,0 +1 ##
-+text/plain
-Added: svn:keywords
-## -0,0 +1 ##
-+FreeBSD=%H
-Added: svn:eol-style
-## -0,0 +1 ##
-+native
-Index: sys/dev/drm/drm_linux_list_sort.c
-===================================================================
-diff --git sys/dev/drm/drm_linux_list_sort.c sys/dev/drm/drm_linux_list_sort.c
-new file mode 10644
---- /dev/null	(revision 0)
-+++ sys/dev/drm/drm_linux_list_sort.c	(working copy)
-@@ -0,0 +1,75 @@
-+/*
-+ * Copyright (c) 2011 The FreeBSD Foundation
-+ * All rights reserved.
-+ *
-+ * This software was developed by Konstantin Belousov under sponsorship from
-+ * the FreeBSD Foundation.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions
-+ * are met:
-+ * 1. Redistributions of source code must retain the above copyright
-+ *    notice, this list of conditions and the following disclaimer.
-+ * 2. Redistributions in binary form must reproduce the above copyright
-+ *    notice, this list of conditions and the following disclaimer in the
-+ *    documentation and/or other materials provided with the distribution.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
-+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
-+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
-+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
-+ * SUCH DAMAGE.
-+ */
-+
-+#include "dev/drm/drmP.h"
-+__FBSDID();
-+
-+struct drm_list_sort_thunk {
-+	int (*cmp)(void *, struct list_head *, struct list_head *);
-+	void *priv;
-+};
-+
-+static int
-+drm_le_cmp(void *priv, const void *d1, const void *d2)
-+{
-+	struct list_head *le1, *le2;
-+	struct drm_list_sort_thunk *thunk;
-+
-+	thunk = priv;
-+	le1 = __DECONST(struct list_head *, d1);
-+	le2 = __DECONST(struct list_head *, d2);
-+	return ((thunk->cmp)(thunk->priv, le1, le2));
-+}
-+
-+/*
-+ * Punt and use array sort.
-+ */
-+void
-+drm_list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv,
-+    struct list_head *a, struct list_head *b))
-+{
-+	struct drm_list_sort_thunk thunk;
-+	struct list_head **ar, *le;
-+	int count, i;
-+
-+	count = 0;
-+	list_for_each(le, head)
-+		count++;
-+	ar = malloc(sizeof(struct list_head *) * count, M_TEMP, M_WAITOK);
-+	i = 0;
-+	list_for_each(le, head)
-+		ar[i++] = le;
-+	thunk.cmp = cmp;
-+	thunk.priv = priv;
-+	qsort_r(ar, count, sizeof(struct list_head *), &thunk, drm_le_cmp);
-+	INIT_LIST_HEAD(head);
-+	for (i = 0; i < count; i++)
-+		list_add_tail(ar[i], head);
-+	free(ar, M_TEMP);
-+}
-
-Property changes on: stable/9/sys/dev/drm/drm_linux_list_sort.c
-___________________________________________________________________
-Added: svn:mime-type
-## -0,0 +1 ##
-+text/plain
-Added: svn:keywords
-## -0,0 +1 ##
-+FreeBSD=%H
-Added: svn:eol-style
-## -0,0 +1 ##
-+native
-Index: sys/dev/drm/i915_drv.h
-===================================================================
-diff --git sys/dev/drm/i915_drv.h sys/dev/drm/i915_drv.h
---- sys/dev/drm/i915_drv.h	(revision 230124)
-+++ sys/dev/drm/i915_drv.h	(working copy)
-@@ -33,8 +33,11 @@
+diff --git a/sys/dev/drm/i915_drv.h b/sys/dev/drm/i915_drv.h
+index 3896732..1218e4c 100644
+--- sys/dev/drm/i915_drv.h
++++ sys/dev/drm/i915_drv.h
+@@ -33,8 +33,11 @@ __FBSDID("$FreeBSD$");
  #ifndef _I915_DRV_H_
  #define _I915_DRV_H_
  
@@ -12476,7 +25186,7 @@
  
  /* General customization:
   */
-@@ -45,13 +48,28 @@
+@@ -45,13 +48,28 @@ __FBSDID("$FreeBSD$");
  #define DRIVER_DESC		"Intel Graphics"
  #define DRIVER_DATE		"20080730"
  
@@ -12506,7 +25216,7 @@
  /* Interface history:
   *
   * 1.1: Original.
-@@ -74,17 +92,88 @@
+@@ -74,16 +92,87 @@ enum pipe {
  #define WATCH_INACTIVE	0
  #define WATCH_PWRITE	0
  
@@ -12524,7 +25234,7 @@
 +#define I915_GEM_PHYS_CURSOR_1 2
 +#define I915_GEM_PHYS_OVERLAY_REGS 3
 +#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
- 
++
 +struct drm_i915_gem_phys_object {
 +	int id;
 +	drm_dma_handle_t *handle;
@@ -12601,11 +25311,10 @@
 +	FBC_MULTIPLE_PIPES, /* more than one pipe active */
 +	FBC_MODULE_PARAM,
 +};
-+
+ 
  struct mem_block {
  	struct mem_block *next;
- 	struct mem_block *prev;
-@@ -103,17 +192,62 @@
+@@ -103,17 +192,68 @@ struct intel_opregion {
  	struct opregion_acpi *acpi;
  	struct opregion_swsci *swsci;
  	struct opregion_asle *asle;
@@ -12614,7 +25323,7 @@
 +	u32 *lid_state;
  };
 +#define OPREGION_SIZE            (8*1024)
- 
++
 +#define I915_FENCE_REG_NONE -1
 +#define I915_MAX_NUM_FENCES 16
 +/* 16 fences + sign bit for FENCE_REG_NONE */
@@ -12645,7 +25354,7 @@
 +
 +struct intel_fbdev;
 +struct intel_fbc_work;
-+
+ 
  typedef struct drm_i915_private {
  	struct drm_device *dev;
  
@@ -12660,7 +25369,13 @@
  	drm_local_map_t *sarea;
  	drm_local_map_t *mmio_map;
  
-+	uint32_t gt_fifo_count;
++	/** gt_fifo_count and the subsequent register write are synchronized
++	 * with dev->struct_mutex. */
++	unsigned gt_fifo_count;
++	/** forcewake_count is protected by gt_lock */
++	unsigned forcewake_count;
++	/** gt_lock is also taken in irq contexts. */
++	struct mtx gt_lock;
 +
  	drm_i915_sarea_t *sarea_priv;
 -	drm_i915_ring_buffer_t ring;
@@ -12670,7 +25385,7 @@
  
  	drm_dma_handle_t *status_page_dmah;
  	void *hw_status_page;
-@@ -123,35 +257,96 @@
+@@ -123,35 +263,96 @@ typedef struct drm_i915_private {
  	drm_local_map_t hws_map;
  	struct drm_gem_object *hws_obj;
  
@@ -12698,16 +25413,16 @@
 +	u32 gt_irq_mask;
 +	u32 pch_irq_mask;
 +	struct mtx irq_lock;
++
++	u32 hotplug_supported_mask;
  
-+	u32 hotplug_supported_mask;
-+
  	int tex_lru_log_granularity;
  	int allow_batchbuffer;
  	struct mem_block *agp_heap;
  	unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
  	int vblank_pipe;
 +	int num_pipe;
- 
++
 +	/* For hangcheck timer */
 +#define DRM_I915_HANGCHECK_PERIOD ((1500 /* in ms */ * hz) / 1000)
 +	int hangcheck_count;
@@ -12716,7 +25431,7 @@
 +	uint32_t last_acthd_blt;
 +	uint32_t last_instdone;
 +	uint32_t last_instdone1;
-+
+ 
  	struct intel_opregion opregion;
  
 +
@@ -12774,7 +25489,7 @@
  	u32 saveHWS;
  	u32 savePIPEACONF;
  	u32 savePIPEBCONF;
-@@ -168,6 +363,13 @@
+@@ -168,6 +369,13 @@ typedef struct drm_i915_private {
  	u32 saveVBLANK_A;
  	u32 saveVSYNC_A;
  	u32 saveBCLRPAT_A;
@@ -12788,7 +25503,7 @@
  	u32 savePIPEASTAT;
  	u32 saveDSPASTRIDE;
  	u32 saveDSPASIZE;
-@@ -176,8 +378,11 @@
+@@ -176,8 +384,11 @@ typedef struct drm_i915_private {
  	u32 saveDSPASURF;
  	u32 saveDSPATILEOFF;
  	u32 savePFIT_PGM_RATIOS;
@@ -12800,7 +25515,7 @@
  	u32 saveFPB0;
  	u32 saveFPB1;
  	u32 saveDPLL_B;
-@@ -189,6 +394,13 @@
+@@ -189,6 +400,13 @@ typedef struct drm_i915_private {
  	u32 saveVBLANK_B;
  	u32 saveVSYNC_B;
  	u32 saveBCLRPAT_B;
@@ -12814,7 +25529,7 @@
  	u32 savePIPEBSTAT;
  	u32 saveDSPBSTRIDE;
  	u32 saveDSPBSIZE;
-@@ -214,6 +426,7 @@
+@@ -214,6 +432,7 @@ typedef struct drm_i915_private {
  	u32 savePFIT_CONTROL;
  	u32 save_palette_a[256];
  	u32 save_palette_b[256];
@@ -12822,7 +25537,7 @@
  	u32 saveFBC_CFB_BASE;
  	u32 saveFBC_LL_BASE;
  	u32 saveFBC_CONTROL;
-@@ -221,9 +434,13 @@
+@@ -221,9 +440,13 @@ typedef struct drm_i915_private {
  	u32 saveIER;
  	u32 saveIIR;
  	u32 saveIMR;
@@ -12838,7 +25553,7 @@
  	u32 saveMI_ARB_STATE;
  	u32 saveSWF0[16];
  	u32 saveSWF1[16];
-@@ -235,14 +452,70 @@
+@@ -235,14 +458,70 @@ typedef struct drm_i915_private {
  	u8 saveAR[21];
  	u8 saveDACMASK;
  	u8 saveCR[37];
@@ -12892,12 +25607,12 @@
 +		/** List of all objects in gtt_space. Used to restore gtt
 +		 * mappings on resume */
 +		struct list_head gtt_list;
- 
++
 +		/** Usable portion of the GTT for GEM */
 +		unsigned long gtt_start;
 +		unsigned long gtt_mappable_end;
 +		unsigned long gtt_end;
-+
+ 
  		/**
  		 * List of objects currently involved in rendering from the
  		 * ringbuffer.
@@ -12909,7 +25624,7 @@
  		 * A reference is held on the buffer while on this list.
  		 */
  		struct list_head active_list;
-@@ -260,6 +533,8 @@
+@@ -260,6 +539,8 @@ typedef struct drm_i915_private {
  		 * LRU list of objects which are not in the ringbuffer and
  		 * are ready to unbind, but are still in the GTT.
  		 *
@@ -12918,7 +25633,7 @@
  		 * A reference is not held on the buffer while on this list,
  		 * as merely being GTT-bound shouldn't prevent its being
  		 * freed, and we'll pull it off the list in the free path.
-@@ -267,20 +542,37 @@
+@@ -267,11 +548,22 @@ typedef struct drm_i915_private {
  		struct list_head inactive_list;
  
  		/**
@@ -12934,7 +25649,7 @@
 +		/** LRU list of objects with fence regs on them. */
 +		struct list_head fence_list;
 +
- 		/**
++		/**
 +		 * List of objects currently pending being freed.
 +		 *
 +		 * These objects are no longer in use, but due to a signal
@@ -12942,10 +25657,10 @@
 +		 */
 +		struct list_head deferred_free_list;
 +
-+		/**
+ 		/**
  		 * We leave the user IRQ off as much as possible,
  		 * but this means that requests will finish and never
- 		 * be retired once the system goes idle. Set a timer to
+@@ -279,8 +571,14 @@ typedef struct drm_i915_private {
  		 * fire periodically while the ring is running. When it
  		 * fires, go retire requests.
  		 */
@@ -12962,7 +25677,7 @@
  		uint32_t next_gem_seqno;
  
  		/**
-@@ -316,9 +608,108 @@
+@@ -316,9 +614,106 @@ typedef struct drm_i915_private {
  		uint32_t bit_6_swizzle_x;
  		/** Bit 6 swizzling required for Y tiling */
  		uint32_t bit_6_swizzle_y;
@@ -13058,8 +25773,6 @@
 +
 +	struct drm_property *broadcast_rgb_property;
 +	struct drm_property *force_audio_property;
-+
-+	uint32_t forcewake_count;
  } drm_i915_private_t;
  
 +enum i915_cache_level {
@@ -13071,7 +25784,7 @@
  enum intel_chip_family {
  	CHIP_I8XX = 0x01,
  	CHIP_I9XX = 0x02,
-@@ -328,61 +719,148 @@
+@@ -328,31 +723,103 @@ enum intel_chip_family {
  
  /** driver private structure attached to each drm_gem_object */
  struct drm_i915_gem_object {
@@ -13121,8 +25834,8 @@
 +	 * Protected by dev->struct_mutex.
 +	 */
 +	signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
- 
- 	/**
++
++	/**
 +	 * Advice: are the backing pages purgeable?
 +	 */
 +	unsigned int madv:2;
@@ -13180,11 +25893,10 @@
 +	LIST_ENTRY(drm_i915_gem_object) exec_node;
 +	unsigned long exec_handle;
 +	struct drm_i915_gem_exec_object2 *exec_entry;
-+
-+	/**
+ 
+ 	/**
  	 * Current offset of the object in GTT space.
- 	 *
- 	 * This is the same as gtt_space->start
+@@ -361,28 +828,43 @@ struct drm_i915_gem_object {
  	 */
  	uint32_t gtt_offset;
  
@@ -13197,21 +25909,21 @@
  	/** Breadcrumb of last rendering to the buffer. */
  	uint32_t last_rendering_seqno;
 +	struct intel_ring_buffer *ring;
- 
--	/** Current tiling mode for the object. */
--	uint32_t tiling_mode;
++
 +	/** Breadcrumb of last fenced GPU access to the buffer. */
 +	uint32_t last_fenced_seqno;
 +	struct intel_ring_buffer *last_fenced_ring;
  
--	/** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */
--	uint32_t agp_type;
+-	/** Current tiling mode for the object. */
+-	uint32_t tiling_mode;
 +	/** Current tiling stride for the object, if it's tiled. */
 +	uint32_t stride;
  
+-	/** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */
+-	uint32_t agp_type;
 +	/** Record of address bit 17 of each page at last unbind. */
 +	unsigned long *bit_17;
-+
+ 
  	/**
 -	 * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when
 -	 * GEM_DOMAIN_CPU is not in the object's read domain.
@@ -13240,7 +25952,7 @@
  /**
   * Request queue structure.
   *
-@@ -394,28 +872,104 @@
+@@ -394,27 +876,103 @@ struct drm_i915_gem_object {
   * an emission time with seqnos for tracking how far ahead of the GPU we are.
   */
  struct drm_i915_gem_request {
@@ -13255,10 +25967,10 @@
  
 -	/** Cache domains that were flushed at the start of the request. */
 -	uint32_t flush_domains;
+-
 +	/** global list entry for this request */
-+	struct list_head list;
- 
--	struct list_head list;
+ 	struct list_head list;
++
 +	struct drm_i915_file_private *file_priv;
 +	/** file_priv list entry for this request */
 +	struct list_head client_list;
@@ -13338,7 +26050,7 @@
 +extern int i915_vbt_sdvo_panel_type;
 +extern int i915_enable_rc6;
 +extern int i915_enable_fbc;
- 
++
 +const struct intel_device_info *i915_get_device_id(int device);
 +
 +int i915_reset(struct drm_device *dev, u8 flags);
@@ -13347,17 +26059,16 @@
 +int i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx,
 +    struct sysctl_oid *top);
 +void i915_sysctl_cleanup(struct drm_device *dev);
-+
+ 
  				/* i915_dma.c */
  extern void i915_kernel_lost_context(struct drm_device * dev);
- extern int i915_driver_load(struct drm_device *, unsigned long flags);
-@@ -432,37 +986,42 @@
+@@ -432,36 +990,41 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
  extern int i915_emit_box(struct drm_device *dev,
  			 struct drm_clip_rect __user *boxes,
  			 int i, int DR1, int DR4);
 +int i915_emit_box_p(struct drm_device *dev, struct drm_clip_rect *box,
 +    int DR1, int DR4);
- 
++
 +unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
 +unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
 +void i915_update_gfx_val(struct drm_i915_private *dev_priv);
@@ -13367,7 +26078,7 @@
 +bool i915_gpu_lower(void);
 +bool i915_gpu_busy(void);
 +bool i915_gpu_turbo_disable(void);
-+
+ 
  /* i915_irq.c */
  extern int i915_irq_emit(struct drm_device *dev, void *data,
  			 struct drm_file *file_priv);
@@ -13398,18 +26109,17 @@
  
 -void
 -i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
+-
+-void
+-i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
 +void i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
 +void i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
  
--void
--i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
 +void i915_destroy_error_state(struct drm_device *dev);
  
--
  /* i915_mem.c */
  extern int i915_mem_alloc(struct drm_device *dev, void *data,
- 			  struct drm_file *file_priv);
-@@ -475,8 +1034,9 @@
+@@ -475,8 +1038,9 @@ extern int i915_mem_destroy_heap(struct drm_device *dev, void *data,
  extern void i915_mem_takedown(struct mem_block **heap);
  extern void i915_mem_release(struct drm_device * dev,
  			     struct drm_file *file_priv, struct mem_block *heap);
@@ -13420,7 +26130,7 @@
  int i915_gem_init_ioctl(struct drm_device *dev, void *data,
  			struct drm_file *file_priv);
  int i915_gem_create_ioctl(struct drm_device *dev, void *data,
-@@ -487,12 +1047,16 @@
+@@ -487,12 +1051,16 @@ int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
  			  struct drm_file *file_priv);
  int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
  			struct drm_file *file_priv);
@@ -13437,7 +26147,7 @@
  int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
  		       struct drm_file *file_priv);
  int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
-@@ -501,6 +1065,8 @@
+@@ -501,6 +1069,8 @@ int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
  			struct drm_file *file_priv);
  int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
  			    struct drm_file *file_priv);
@@ -13446,7 +26156,7 @@
  int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
  			   struct drm_file *file_priv);
  int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
-@@ -509,99 +1075,207 @@
+@@ -509,98 +1079,206 @@ int i915_gem_set_tiling(struct drm_device *dev, void *data,
  			struct drm_file *file_priv);
  int i915_gem_get_tiling(struct drm_device *dev, void *data,
  			struct drm_file *file_priv);
@@ -13505,7 +26215,7 @@
 +void i915_gem_release(struct drm_device *dev, struct drm_file *file);
 +int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 +    enum i915_cache_level cache_level);
- 
++
 +void i915_gem_free_all_phys_object(struct drm_device *dev);
 +void i915_gem_detach_phys_object(struct drm_device *dev,
 +    struct drm_i915_gem_object *obj);
@@ -13518,7 +26228,7 @@
 +     uint32_t handle, uint64_t *offset);
 +int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
 +     uint32_t handle);
-+
+ 
  /* i915_gem_tiling.c */
  void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
 +void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
@@ -13628,16 +26338,10 @@
 +	(((dev_priv)->info->gen >= 6) && \
 +	 ((reg) < 0x40000) &&		 \
 +	 ((reg) != FORCEWAKE))
- 
--#define I915_READ(reg)		DRM_READ32(dev_priv->mmio_map, (reg))
--#define I915_WRITE(reg,val)	DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
--#define I915_READ16(reg)	DRM_READ16(dev_priv->mmio_map, (reg))
--#define I915_WRITE16(reg,val)	DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
--#define I915_READ8(reg)		DRM_READ8(dev_priv->mmio_map, (reg))
--#define I915_WRITE8(reg,val)	DRM_WRITE8(dev_priv->mmio_map, (reg), (val))
++
 +#define __i915_read(x, y) \
 +	u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
- 
++
 +__i915_read(8, 8)
 +__i915_read(16, 16)
 +__i915_read(32, 32)
@@ -13668,16 +26372,21 @@
 +
 +#define I915_WRITE64(reg, val)	i915_write64(dev_priv, (reg), (val))
 +#define I915_READ64(reg)	i915_read64(dev_priv, (reg))
-+
+ 
+-#define I915_READ(reg)		DRM_READ32(dev_priv->mmio_map, (reg))
+-#define I915_WRITE(reg,val)	DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
+-#define I915_READ16(reg)	DRM_READ16(dev_priv->mmio_map, (reg))
+-#define I915_WRITE16(reg,val)	DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
+-#define I915_READ8(reg)		DRM_READ8(dev_priv->mmio_map, (reg))
+-#define I915_WRITE8(reg,val)	DRM_WRITE8(dev_priv->mmio_map, (reg), (val))
 +#define POSTING_READ(reg)	(void)I915_READ_NOTRACE(reg)
 +#define POSTING_READ16(reg)	(void)I915_READ16_NOTRACE(reg)
-+
+ 
  #define I915_VERBOSE 0
  
 -#define RING_LOCALS	unsigned int outring, ringmask, outcount; \
 -                        volatile char *virt;
-+#define LP_RING(d) (&((struct drm_i915_private *)(d))->rings[RCS])
- 
+-
 -#define BEGIN_LP_RING(n) do {				\
 -	if (I915_VERBOSE)				\
 -		DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n));	\
@@ -13688,8 +26397,7 @@
 -	ringmask = dev_priv->ring.tail_mask;		\
 -	virt = dev_priv->ring.virtual_start;		\
 -} while (0)
-+#define BEGIN_LP_RING(n) \
-+	intel_ring_begin(LP_RING(dev_priv), (n))
++#define LP_RING(d) (&((struct drm_i915_private *)(d))->rings[RCS])
  
 -#define OUT_RING(n) do {					\
 -	if (I915_VERBOSE) DRM_DEBUG("   OUT_RING %x\n", (int)(n));	\
@@ -13697,39 +26405,48 @@
 -        outcount++;						\
 -	outring += 4;						\
 -	outring &= ringmask;					\
+-} while (0)
++#define BEGIN_LP_RING(n) \
++	intel_ring_begin(LP_RING(dev_priv), (n))
++
 +#define OUT_RING(x) \
 +	intel_ring_emit(LP_RING(dev_priv), x)
-+
+ 
+-#define ADVANCE_LP_RING() do {						\
+-	if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring);	\
+-	dev_priv->ring.tail = outring;					\
+-	dev_priv->ring.space -= outcount * 4;				\
+-	I915_WRITE(PRB0_TAIL, outring);			\
+-} while(0)
 +#define ADVANCE_LP_RING() \
 +	intel_ring_advance(LP_RING(dev_priv))
 +
 +#define RING_LOCK_TEST_WITH_RETURN(dev, file) do {			\
 +	if (LP_RING(dev->dev_private)->obj == NULL)			\
 +		LOCK_TEST_WITH_RETURN(dev, file);			\
- } while (0)
++} while (0)
  
--#define ADVANCE_LP_RING() do {						\
--	if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring);	\
--	dev_priv->ring.tail = outring;					\
--	dev_priv->ring.space -= outcount * 4;				\
--	I915_WRITE(PRB0_TAIL, outring);			\
--} while(0)
--
  /**
   * Reads a dword out of the status page, which is written to from the command
-  * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
-@@ -622,19 +1296,30 @@
+@@ -622,19 +1300,30 @@ typedef boolean_t bool;
  #define I915_GEM_HWS_INDEX		0x20
  #define I915_BREADCRUMB_INDEX		0x21
  
 -extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
-+#define INTEL_INFO(dev)	(((struct drm_i915_private *) (dev)->dev_private)->info)
- 
+-
 -#define IS_I830(dev) ((dev)->pci_device == 0x3577)
 -#define IS_845G(dev) ((dev)->pci_device == 0x2562)
 -#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
 -#define IS_I855(dev) ((dev)->pci_device == 0x3582)
 -#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
+-
+-#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a)
+-#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
+-#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
+-#define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\
+-		        (dev)->pci_device == 0x27AE)
++#define INTEL_INFO(dev)	(((struct drm_i915_private *) (dev)->dev_private)->info)
++
 +#define IS_I830(dev)		((dev)->pci_device == 0x3577)
 +#define IS_845G(dev)		((dev)->pci_device == 0x2562)
 +#define IS_I85X(dev)		(INTEL_INFO(dev)->is_i85x)
@@ -13750,17 +26467,12 @@
 +#define IS_IRONLAKE_M(dev)	((dev)->pci_device == 0x0046)
 +#define	IS_IVYBRIDGE(dev)	(INTEL_INFO(dev)->is_ivybridge)
 +#define IS_MOBILE(dev)		(INTEL_INFO(dev)->is_mobile)
- 
--#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a)
--#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
--#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
--#define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\
--		        (dev)->pci_device == 0x27AE)
++
 +/* XXXKIB LEGACY */
  #define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
  		       (dev)->pci_device == 0x2982 || \
  		       (dev)->pci_device == 0x2992 || \
-@@ -649,32 +1334,69 @@
+@@ -649,32 +1338,69 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
  
  #define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
  
@@ -13784,10 +26496,7 @@
  #define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
  		      IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
 +/* XXXKIB LEGACY END */
- 
--#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
--			IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
--			IS_IGD(dev))
++
 +#define IS_GEN2(dev)	(INTEL_INFO(dev)->gen == 2)
 +#define IS_GEN3(dev)	(INTEL_INFO(dev)->gen == 3)
 +#define IS_GEN4(dev)	(INTEL_INFO(dev)->gen == 4)
@@ -13795,11 +26504,14 @@
 +#define IS_GEN6(dev)	(INTEL_INFO(dev)->gen == 6)
 +#define IS_GEN7(dev)	(INTEL_INFO(dev)->gen == 7)
  
--#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev))
+-#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
+-			IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
+-			IS_IGD(dev))
 +#define HAS_BSD(dev)            (INTEL_INFO(dev)->has_bsd_ring)
 +#define HAS_BLT(dev)            (INTEL_INFO(dev)->has_blt_ring)
 +#define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
  
+-#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev))
 +#define HAS_OVERLAY(dev)		(INTEL_INFO(dev)->has_overlay)
 +#define OVERLAY_NEEDS_PHYSICAL(dev)	(INTEL_INFO(dev)->overlay_needs_physical)
 +
@@ -13827,7 +26539,7 @@
 +#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
 +#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
 +#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
-+
+ 
  #define PRIMARY_RINGBUFFER_SIZE         (128*1024)
  
 +static inline bool
@@ -13847,2407 +26559,11 @@
 +}
 +
  #endif
-Index: sys/dev/drm/i915_gem_evict.c
-===================================================================
-diff --git sys/dev/drm/i915_gem_evict.c sys/dev/drm/i915_gem_evict.c
-new file mode 10644
---- /dev/null	(revision 0)
-+++ sys/dev/drm/i915_gem_evict.c	(working copy)
-@@ -0,0 +1,227 @@
-+/*
-+ * Copyright © 2008-2010 Intel Corporation
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the next
-+ * paragraph) shall be included in all copies or substantial portions of the
-+ * Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
-+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ *
-+ * Authors:
-+ *    Eric Anholt <eric at anholt.net>
-+ *    Chris Wilson <chris at chris-wilson.co.uuk>
-+ *
-+ */
-+
-+#include "dev/drm/drmP.h"
-+#include "dev/drm/drm.h"
-+#include "dev/drm/i915_drm.h"
-+#include "dev/drm/i915_drv.h"
-+
-+static bool
-+mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
-+{
-+	list_add(&obj->exec_list, unwind);
-+	drm_gem_object_reference(&obj->base);
-+	return drm_mm_scan_add_block(obj->gtt_space);
-+}
-+
-+int
-+i915_gem_evict_something(struct drm_device *dev, int min_size,
-+			 unsigned alignment, bool mappable)
-+{
-+	drm_i915_private_t *dev_priv = dev->dev_private;
-+	struct list_head eviction_list, unwind_list;
-+	struct drm_i915_gem_object *obj;
-+	int ret = 0;
-+
-+	i915_gem_retire_requests(dev);
-+
-+	/* Re-check for free space after retiring requests */
-+	if (mappable) {
-+		if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
-+						min_size, alignment, 0,
-+						dev_priv->mm.gtt_mappable_end,
-+						0))
-+			return 0;
-+	} else {
-+		if (drm_mm_search_free(&dev_priv->mm.gtt_space,
-+				       min_size, alignment, 0))
-+			return 0;
-+	}
-+
-+	CTR4(KTR_DRM, "evict_something %p %d %u %d", dev, min_size,
-+	    alignment, mappable);
-+
-+	/*
-+	 * The goal is to evict objects and amalgamate space in LRU order.
-+	 * The oldest idle objects reside on the inactive list, which is in
-+	 * retirement order. The next objects to retire are those on the (per
-+	 * ring) active list that do not have an outstanding flush. Once the
-+	 * hardware reports completion (the seqno is updated after the
-+	 * batchbuffer has been finished) the clean buffer objects would
-+	 * be retired to the inactive list. Any dirty objects would be added
-+	 * to the tail of the flushing list. So after processing the clean
-+	 * active objects we need to emit a MI_FLUSH to retire the flushing
-+	 * list, hence the retirement order of the flushing list is in
-+	 * advance of the dirty objects on the active lists.
-+	 *
-+	 * The retirement sequence is thus:
-+	 *   1. Inactive objects (already retired)
-+	 *   2. Clean active objects
-+	 *   3. Flushing list
-+	 *   4. Dirty active objects.
-+	 *
-+	 * On each list, the oldest objects lie at the HEAD with the freshest
-+	 * object on the TAIL.
-+	 */
-+
-+	INIT_LIST_HEAD(&unwind_list);
-+	if (mappable)
-+		drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size,
-+					    alignment, 0,
-+					    dev_priv->mm.gtt_mappable_end);
-+	else
-+		drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
-+
-+	/* First see if there is a large enough contiguous idle region... */
-+	list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
-+		if (mark_free(obj, &unwind_list))
-+			goto found;
-+	}
-+
-+	/* Now merge in the soon-to-be-expired objects... */
-+	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
-+		/* Does the object require an outstanding flush? */
-+		if (obj->base.write_domain || obj->pin_count)
-+			continue;
-+
-+		if (mark_free(obj, &unwind_list))
-+			goto found;
-+	}
-+
-+	/* Finally add anything with a pending flush (in order of retirement) */
-+	list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
-+		if (obj->pin_count)
-+			continue;
-+
-+		if (mark_free(obj, &unwind_list))
-+			goto found;
-+	}
-+	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
-+		if (!obj->base.write_domain || obj->pin_count)
-+			continue;
-+
-+		if (mark_free(obj, &unwind_list))
-+			goto found;
-+	}
-+
-+	/* Nothing found, clean up and bail out! */
-+	while (!list_empty(&unwind_list)) {
-+		obj = list_first_entry(&unwind_list,
-+				       struct drm_i915_gem_object,
-+				       exec_list);
-+
-+		ret = drm_mm_scan_remove_block(obj->gtt_space);
-+		KASSERT(ret == 0, ("drm_mm_scan_remove_block failed %d", ret));
-+
-+		list_del_init(&obj->exec_list);
-+		drm_gem_object_unreference(&obj->base);
-+	}
-+
-+	/* We expect the caller to unpin, evict all and try again, or give up.
-+	 * So calling i915_gem_evict_everything() is unnecessary.
-+	 */
-+	return -ENOSPC;
-+
-+found:
-+	/* drm_mm doesn't allow any other other operations while
-+	 * scanning, therefore store to be evicted objects on a
-+	 * temporary list. */
-+	INIT_LIST_HEAD(&eviction_list);
-+	while (!list_empty(&unwind_list)) {
-+		obj = list_first_entry(&unwind_list,
-+				       struct drm_i915_gem_object,
-+				       exec_list);
-+		if (drm_mm_scan_remove_block(obj->gtt_space)) {
-+			list_move(&obj->exec_list, &eviction_list);
-+			continue;
-+		}
-+		list_del_init(&obj->exec_list);
-+		drm_gem_object_unreference(&obj->base);
-+	}
-+
-+	/* Unbinding will emit any required flushes */
-+	while (!list_empty(&eviction_list)) {
-+		obj = list_first_entry(&eviction_list,
-+				       struct drm_i915_gem_object,
-+				       exec_list);
-+		if (ret == 0)
-+			ret = i915_gem_object_unbind(obj);
-+
-+		list_del_init(&obj->exec_list);
-+		drm_gem_object_unreference(&obj->base);
-+	}
-+
-+	return ret;
-+}
-+
-+int
-+i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
-+{
-+	drm_i915_private_t *dev_priv = dev->dev_private;
-+	int ret;
-+	bool lists_empty;
-+
-+	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
-+		       list_empty(&dev_priv->mm.flushing_list) &&
-+		       list_empty(&dev_priv->mm.active_list));
-+	if (lists_empty)
-+		return -ENOSPC;
-+
-+	CTR2(KTR_DRM, "evict_everything %p %d", dev, purgeable_only);
-+
-+	/* Flush everything (on to the inactive lists) and evict */
-+	ret = i915_gpu_idle(dev);
-+	if (ret)
-+		return ret;
-+
-+	KASSERT(list_empty(&dev_priv->mm.flushing_list),
-+	    ("flush list not empty"));
-+
-+	return i915_gem_evict_inactive(dev, purgeable_only);
-+}
-+
-+/** Unbinds all inactive objects. */
-+int
-+i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
-+{
-+	drm_i915_private_t *dev_priv = dev->dev_private;
-+	struct drm_i915_gem_object *obj, *next;
-+
-+	CTR2(KTR_DRM, "evict_inactive %p %d", dev, purgeable_only);
-+
-+	list_for_each_entry_safe(obj, next,
-+				 &dev_priv->mm.inactive_list, mm_list) {
-+		if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
-+			int ret = i915_gem_object_unbind(obj);
-+			if (ret)
-+				return ret;
-+		}
-+	}
-+
-+	return 0;
-+}
-
-Property changes on: stable/9/sys/dev/drm/i915_gem_evict.c
-___________________________________________________________________
-Added: svn:mime-type
-## -0,0 +1 ##
-+text/plain
-Added: svn:keywords
-## -0,0 +1 ##
-+FreeBSD=%H
-Added: svn:eol-style
-## -0,0 +1 ##
-+native
-Index: sys/dev/drm/drm_fourcc.h
-===================================================================
-diff --git sys/dev/drm/drm_fourcc.h sys/dev/drm/drm_fourcc.h
-new file mode 10644
---- /dev/null	(revision 0)
-+++ sys/dev/drm/drm_fourcc.h	(working copy)
-@@ -0,0 +1,137 @@
-+/*
-+ * Copyright 2011 Intel Corporation
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the next
-+ * paragraph) shall be included in all copies or substantial portions of the
-+ * Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
-+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
-+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
-+ */
-+
-+#ifndef DRM_FOURCC_H
-+#define DRM_FOURCC_H
-+
-+#include <sys/types.h>
-+
-+#define fourcc_code(a, b, c, d) ((uint32_t)(a) | ((uint32_t)(b) << 8) | \
-+				 ((uint32_t)(c) << 16) | ((uint32_t)(d) << 24))
-+
-+#define DRM_FORMAT_BIG_ENDIAN (1<<31) /* format is big endian instead of little endian */
-+
-+/* color index */
-+#define DRM_FORMAT_C8		fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
-+
-+/* 8 bpp RGB */
-+#define DRM_FORMAT_RGB332	fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */
-+#define DRM_FORMAT_BGR233	fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */
-+
-+/* 16 bpp RGB */
-+#define DRM_FORMAT_XRGB4444	fourcc_code('X', 'R', '1', '2') /* [15:0] x:R:G:B 4:4:4:4 little endian */
-+#define DRM_FORMAT_XBGR4444	fourcc_code('X', 'B', '1', '2') /* [15:0] x:B:G:R 4:4:4:4 little endian */
-+#define DRM_FORMAT_RGBX4444	fourcc_code('R', 'X', '1', '2') /* [15:0] R:G:B:x 4:4:4:4 little endian */
-+#define DRM_FORMAT_BGRX4444	fourcc_code('B', 'X', '1', '2') /* [15:0] B:G:R:x 4:4:4:4 little endian */
-+
-+#define DRM_FORMAT_ARGB4444	fourcc_code('A', 'R', '1', '2') /* [15:0] A:R:G:B 4:4:4:4 little endian */
-+#define DRM_FORMAT_ABGR4444	fourcc_code('A', 'B', '1', '2') /* [15:0] A:B:G:R 4:4:4:4 little endian */
-+#define DRM_FORMAT_RGBA4444	fourcc_code('R', 'A', '1', '2') /* [15:0] R:G:B:A 4:4:4:4 little endian */
-+#define DRM_FORMAT_BGRA4444	fourcc_code('B', 'A', '1', '2') /* [15:0] B:G:R:A 4:4:4:4 little endian */
-+
-+#define DRM_FORMAT_XRGB1555	fourcc_code('X', 'R', '1', '5') /* [15:0] x:R:G:B 1:5:5:5 little endian */
-+#define DRM_FORMAT_XBGR1555	fourcc_code('X', 'B', '1', '5') /* [15:0] x:B:G:R 1:5:5:5 little endian */
-+#define DRM_FORMAT_RGBX5551	fourcc_code('R', 'X', '1', '5') /* [15:0] R:G:B:x 5:5:5:1 little endian */
-+#define DRM_FORMAT_BGRX5551	fourcc_code('B', 'X', '1', '5') /* [15:0] B:G:R:x 5:5:5:1 little endian */
-+
-+#define DRM_FORMAT_ARGB1555	fourcc_code('A', 'R', '1', '5') /* [15:0] A:R:G:B 1:5:5:5 little endian */
-+#define DRM_FORMAT_ABGR1555	fourcc_code('A', 'B', '1', '5') /* [15:0] A:B:G:R 1:5:5:5 little endian */
-+#define DRM_FORMAT_RGBA5551	fourcc_code('R', 'A', '1', '5') /* [15:0] R:G:B:A 5:5:5:1 little endian */
-+#define DRM_FORMAT_BGRA5551	fourcc_code('B', 'A', '1', '5') /* [15:0] B:G:R:A 5:5:5:1 little endian */
-+
-+#define DRM_FORMAT_RGB565	fourcc_code('R', 'G', '1', '6') /* [15:0] R:G:B 5:6:5 little endian */
-+#define DRM_FORMAT_BGR565	fourcc_code('B', 'G', '1', '6') /* [15:0] B:G:R 5:6:5 little endian */
-+
-+/* 24 bpp RGB */
-+#define DRM_FORMAT_RGB888	fourcc_code('R', 'G', '2', '4') /* [23:0] R:G:B little endian */
-+#define DRM_FORMAT_BGR888	fourcc_code('B', 'G', '2', '4') /* [23:0] B:G:R little endian */
-+
-+/* 32 bpp RGB */
-+#define DRM_FORMAT_XRGB8888	fourcc_code('X', 'R', '2', '4') /* [31:0] x:R:G:B 8:8:8:8 little endian */
-+#define DRM_FORMAT_XBGR8888	fourcc_code('X', 'B', '2', '4') /* [31:0] x:B:G:R 8:8:8:8 little endian */
-+#define DRM_FORMAT_RGBX8888	fourcc_code('R', 'X', '2', '4') /* [31:0] R:G:B:x 8:8:8:8 little endian */
-+#define DRM_FORMAT_BGRX8888	fourcc_code('B', 'X', '2', '4') /* [31:0] B:G:R:x 8:8:8:8 little endian */
-+
-+#define DRM_FORMAT_ARGB8888	fourcc_code('A', 'R', '2', '4') /* [31:0] A:R:G:B 8:8:8:8 little endian */
-+#define DRM_FORMAT_ABGR8888	fourcc_code('A', 'B', '2', '4') /* [31:0] A:B:G:R 8:8:8:8 little endian */
-+#define DRM_FORMAT_RGBA8888	fourcc_code('R', 'A', '2', '4') /* [31:0] R:G:B:A 8:8:8:8 little endian */
-+#define DRM_FORMAT_BGRA8888	fourcc_code('B', 'A', '2', '4') /* [31:0] B:G:R:A 8:8:8:8 little endian */
-+
-+#define DRM_FORMAT_XRGB2101010	fourcc_code('X', 'R', '3', '0') /* [31:0] x:R:G:B 2:10:10:10 little endian */
-+#define DRM_FORMAT_XBGR2101010	fourcc_code('X', 'B', '3', '0') /* [31:0] x:B:G:R 2:10:10:10 little endian */
-+#define DRM_FORMAT_RGBX1010102	fourcc_code('R', 'X', '3', '0') /* [31:0] R:G:B:x 10:10:10:2 little endian */
-+#define DRM_FORMAT_BGRX1010102	fourcc_code('B', 'X', '3', '0') /* [31:0] B:G:R:x 10:10:10:2 little endian */
-+
-+#define DRM_FORMAT_ARGB2101010	fourcc_code('A', 'R', '3', '0') /* [31:0] A:R:G:B 2:10:10:10 little endian */
-+#define DRM_FORMAT_ABGR2101010	fourcc_code('A', 'B', '3', '0') /* [31:0] A:B:G:R 2:10:10:10 little endian */
-+#define DRM_FORMAT_RGBA1010102	fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
-+#define DRM_FORMAT_BGRA1010102	fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
-+
-+/* packed YCbCr */
-+#define DRM_FORMAT_YUYV		fourcc_code('Y', 'U', 'Y', 'V') /* [31:0] Cr0:Y1:Cb0:Y0 8:8:8:8 little endian */
-+#define DRM_FORMAT_YVYU		fourcc_code('Y', 'V', 'Y', 'U') /* [31:0] Cb0:Y1:Cr0:Y0 8:8:8:8 little endian */
-+#define DRM_FORMAT_UYVY		fourcc_code('U', 'Y', 'V', 'Y') /* [31:0] Y1:Cr0:Y0:Cb0 8:8:8:8 little endian */
-+#define DRM_FORMAT_VYUY		fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */
-+
-+#define DRM_FORMAT_AYUV		fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
-+
-+/*
-+ * 2 plane YCbCr
-+ * index 0 = Y plane, [7:0] Y
-+ * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
-+ * or
-+ * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
-+ */
-+#define DRM_FORMAT_NV12		fourcc_code('N', 'V', '1', '2') /* 2x2 subsampled Cr:Cb plane */
-+#define DRM_FORMAT_NV21		fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */
-+#define DRM_FORMAT_NV16		fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */
-+#define DRM_FORMAT_NV61		fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */
-+
-+/* 2 non contiguous plane YCbCr */
-+#define DRM_FORMAT_NV12M	fourcc_code('N', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane */
-+#define DRM_FORMAT_NV12MT	fourcc_code('T', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane 64x32 macroblocks */
-+
-+/*
-+ * 3 plane YCbCr
-+ * index 0: Y plane, [7:0] Y
-+ * index 1: Cb plane, [7:0] Cb
-+ * index 2: Cr plane, [7:0] Cr
-+ * or
-+ * index 1: Cr plane, [7:0] Cr
-+ * index 2: Cb plane, [7:0] Cb
-+ */
-+#define DRM_FORMAT_YUV410	fourcc_code('Y', 'U', 'V', '9') /* 4x4 subsampled Cb (1) and Cr (2) planes */
-+#define DRM_FORMAT_YVU410	fourcc_code('Y', 'V', 'U', '9') /* 4x4 subsampled Cr (1) and Cb (2) planes */
-+#define DRM_FORMAT_YUV411	fourcc_code('Y', 'U', '1', '1') /* 4x1 subsampled Cb (1) and Cr (2) planes */
-+#define DRM_FORMAT_YVU411	fourcc_code('Y', 'V', '1', '1') /* 4x1 subsampled Cr (1) and Cb (2) planes */
-+#define DRM_FORMAT_YUV420	fourcc_code('Y', 'U', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */
-+#define DRM_FORMAT_YVU420	fourcc_code('Y', 'V', '1', '2') /* 2x2 subsampled Cr (1) and Cb (2) planes */
-+#define DRM_FORMAT_YUV422	fourcc_code('Y', 'U', '1', '6') /* 2x1 subsampled Cb (1) and Cr (2) planes */
-+#define DRM_FORMAT_YVU422	fourcc_code('Y', 'V', '1', '6') /* 2x1 subsampled Cr (1) and Cb (2) planes */
-+#define DRM_FORMAT_YUV444	fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */
-+#define DRM_FORMAT_YVU444	fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */
-+
-+/* 3 non contiguous plane YCbCr */
-+#define DRM_FORMAT_YUV420M	fourcc_code('Y', 'M', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */
-+
-+#endif /* DRM_FOURCC_H */
-
-Property changes on: stable/9/sys/dev/drm/drm_fourcc.h
-___________________________________________________________________
-Added: svn:mime-type
-## -0,0 +1 ##
-+text/plain
-Added: svn:keywords
-## -0,0 +1 ##
-+FreeBSD=%H
-Added: svn:eol-style
-## -0,0 +1 ##
-+native
-Index: sys/dev/drm/drm_crtc_helper.c
-===================================================================
-diff --git sys/dev/drm/drm_crtc_helper.c sys/dev/drm/drm_crtc_helper.c
-new file mode 10644
---- /dev/null	(revision 0)
-+++ sys/dev/drm/drm_crtc_helper.c	(working copy)
-@@ -0,0 +1,1038 @@
-+/*
-+ * Copyright (c) 2006-2008 Intel Corporation
-+ * Copyright (c) 2007 Dave Airlie <airlied at linux.ie>
-+ *
-+ * DRM core CRTC related functions
-+ *
-+ * Permission to use, copy, modify, distribute, and sell this software and its
-+ * documentation for any purpose is hereby granted without fee, provided that
-+ * the above copyright notice appear in all copies and that both that copyright
-+ * notice and this permission notice appear in supporting documentation, and
-+ * that the name of the copyright holders not be used in advertising or
-+ * publicity pertaining to distribution of the software without specific,
-+ * written prior permission.  The copyright holders make no representations
-+ * about the suitability of this software for any purpose.  It is provided "as
-+ * is" without express or implied warranty.
-+ *
-+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
-+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
-+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
-+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
-+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
-+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
-+ * OF THIS SOFTWARE.
-+ *
-+ * Authors:
-+ *      Keith Packard
-+ *	Eric Anholt <eric at anholt.net>
-+ *      Dave Airlie <airlied at linux.ie>
-+ *      Jesse Barnes <jesse.barnes at intel.com>
-+ */
-+
-+#include <sys/param.h>
-+#include <sys/systm.h>
-+#include "dev/drm/drmP.h"
-+#include "dev/drm/drm_crtc.h"
-+#include "dev/drm/drm_fourcc.h"
-+#include "dev/drm/drm_crtc_helper.h"
-+#include "dev/drm/drm_fb_helper.h"
-+
-+bool
-+drm_fetch_cmdline_mode_from_kenv(struct drm_connector *connector,
-+    struct drm_cmdline_mode *cmdline_mode)
-+{
-+	char *tun_var_name, *tun_mode;
-+	static const char tun_prefix[] = "drm_mode.";
-+	bool res;
-+
-+	res = false;
-+	tun_var_name = malloc(sizeof(tun_prefix) +
-+	    strlen(drm_get_connector_name(connector)), M_TEMP, M_WAITOK);
-+	strcpy(tun_var_name, tun_prefix);
-+	strcat(tun_var_name, drm_get_connector_name(connector));
-+	tun_mode = getenv(tun_var_name);
-+	if (tun_mode != NULL) {
-+		res = drm_mode_parse_command_line_for_connector(tun_mode,
-+		    connector, cmdline_mode);
-+		freeenv(tun_mode);
-+	}
-+	free(tun_var_name, M_TEMP);
-+	return (res);
-+}
-+
-+static bool drm_kms_helper_poll = true;
-+
-+static void drm_mode_validate_flag(struct drm_connector *connector,
-+				   int flags)
-+{
-+	struct drm_display_mode *mode, *t;
-+
-+	if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE))
-+		return;
-+
-+	list_for_each_entry_safe(mode, t, &connector->modes, head) {
-+		if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
-+				!(flags & DRM_MODE_FLAG_INTERLACE))
-+			mode->status = MODE_NO_INTERLACE;
-+		if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
-+				!(flags & DRM_MODE_FLAG_DBLSCAN))
-+			mode->status = MODE_NO_DBLESCAN;
-+	}
-+
-+	return;
-+}
-+
-+/**
-+ * drm_helper_probe_single_connector_modes - get complete set of display modes
-+ * @dev: DRM device
-+ * @maxX: max width for modes
-+ * @maxY: max height for modes
-+ *
-+ * LOCKING:
-+ * Caller must hold mode config lock.
-+ *
-+ * Based on @dev's mode_config layout, scan all the connectors and try to detect
-+ * modes on them.  Modes will first be added to the connector's probed_modes
-+ * list, then culled (based on validity and the @maxX, @maxY parameters) and
-+ * put into the normal modes list.
-+ *
-+ * Intended to be used either at bootup time or when major configuration
-+ * changes have occurred.
-+ *
-+ * FIXME: take into account monitor limits
-+ *
-+ * RETURNS:
-+ * Number of modes found on @connector.
-+ */
-+int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
-+					    uint32_t maxX, uint32_t maxY)
-+{
-+	struct drm_device *dev = connector->dev;
-+	struct drm_display_mode *mode, *t;
-+	struct drm_connector_helper_funcs *connector_funcs =
-+		connector->helper_private;
-+	struct drm_cmdline_mode cmdline_mode;
-+	int count = 0;
-+	int mode_flags = 0;
-+
-+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
-+			drm_get_connector_name(connector));
-+	/* set all modes to the unverified state */
-+	list_for_each_entry_safe(mode, t, &connector->modes, head)
-+		mode->status = MODE_UNVERIFIED;
-+
-+	if (connector->force) {
-+		if (connector->force == DRM_FORCE_ON)
-+			connector->status = connector_status_connected;
-+		else
-+			connector->status = connector_status_disconnected;
-+		if (connector->funcs->force)
-+			connector->funcs->force(connector);
-+	} else {
-+		connector->status = connector->funcs->detect(connector, true);
-+		drm_kms_helper_poll_enable(dev);
-+	}
-+
-+	if (connector->status == connector_status_disconnected) {
-+		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
-+			connector->base.id, drm_get_connector_name(connector));
-+		drm_mode_connector_update_edid_property(connector, NULL);
-+		goto prune;
-+	}
-+
-+	count = (*connector_funcs->get_modes)(connector);
-+	if (count == 0 && drm_fetch_cmdline_mode_from_kenv(connector,
-+	    &cmdline_mode)) {
-+		mode = drm_mode_create_from_cmdline_mode(dev,
-+		    &cmdline_mode);
-+		if (mode != NULL) {
-+			DRM_DEBUG_KMS(
-+	"[CONNECTOR:%d:%s] found manual override ",
-+			    connector->base.id,
-+			    drm_get_connector_name(connector));
-+			drm_mode_debug_printmodeline(mode);
-+			drm_mode_probed_add(connector, mode);
-+			count++;
-+		} else {
-+			DRM_ERROR(
-+	"[CONNECTOR:%d:%s] manual override mode: parse error\n",
-+			    connector->base.id,
-+			    drm_get_connector_name(connector));
-+		}
-+	}
-+	if (count == 0 && connector->status == connector_status_connected)
-+		count = drm_add_modes_noedid(connector, 1024, 768);
-+	if (count == 0)
-+		goto prune;
-+
-+	drm_mode_connector_list_update(connector);
-+
-+	if (maxX && maxY)
-+		drm_mode_validate_size(dev, &connector->modes, maxX,
-+				       maxY, 0);
-+
-+	if (connector->interlace_allowed)
-+		mode_flags |= DRM_MODE_FLAG_INTERLACE;
-+	if (connector->doublescan_allowed)
-+		mode_flags |= DRM_MODE_FLAG_DBLSCAN;
-+	drm_mode_validate_flag(connector, mode_flags);
-+
-+	list_for_each_entry_safe(mode, t, &connector->modes, head) {
-+		if (mode->status == MODE_OK)
-+			mode->status = connector_funcs->mode_valid(connector,
-+								   mode);
-+	}
-+
-+prune:
-+	drm_mode_prune_invalid(dev, &connector->modes, true);
-+
-+	if (list_empty(&connector->modes))
-+		return 0;
-+
-+	drm_mode_sort(&connector->modes);
-+
-+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
-+			drm_get_connector_name(connector));
-+	list_for_each_entry_safe(mode, t, &connector->modes, head) {
-+		mode->vrefresh = drm_mode_vrefresh(mode);
-+
-+		drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
-+		drm_mode_debug_printmodeline(mode);
-+	}
-+
-+	return count;
-+}
-+
-+/**
-+ * drm_helper_encoder_in_use - check if a given encoder is in use
-+ * @encoder: encoder to check
-+ *
-+ * LOCKING:
-+ * Caller must hold mode config lock.
-+ *
-+ * Walk @encoders's DRM device's mode_config and see if it's in use.
-+ *
-+ * RETURNS:
-+ * True if @encoder is part of the mode_config, false otherwise.
-+ */
-+bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
-+{
-+	struct drm_connector *connector;
-+	struct drm_device *dev = encoder->dev;
-+	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
-+		if (connector->encoder == encoder)
-+			return true;
-+	return false;
-+}
-+
-+/**
-+ * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config
-+ * @crtc: CRTC to check
-+ *
-+ * LOCKING:
-+ * Caller must hold mode config lock.
-+ *
-+ * Walk @crtc's DRM device's mode_config and see if it's in use.
-+ *
-+ * RETURNS:
-+ * True if @crtc is part of the mode_config, false otherwise.
-+ */
-+bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
-+{
-+	struct drm_encoder *encoder;
-+	struct drm_device *dev = crtc->dev;
-+	/* FIXME: Locking around list access? */
-+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
-+		if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder))
-+			return true;
-+	return false;
-+}
-+
-+static void
-+drm_encoder_disable(struct drm_encoder *encoder)
-+{
-+	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
-+
-+	if (encoder_funcs->disable)
-+		(*encoder_funcs->disable)(encoder);
-+	else
-+		(*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
-+}
-+
-+/**
-+ * drm_helper_disable_unused_functions - disable unused objects
-+ * @dev: DRM device
-+ *
-+ * LOCKING:
-+ * Caller must hold mode config lock.
-+ *
-+ * If an connector or CRTC isn't part of @dev's mode_config, it can be disabled
-+ * by calling its dpms function, which should power it off.
-+ */
-+void drm_helper_disable_unused_functions(struct drm_device *dev)
-+{
-+	struct drm_encoder *encoder;
-+	struct drm_connector *connector;
-+	struct drm_crtc *crtc;
-+
-+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-+		if (!connector->encoder)
-+			continue;
-+		if (connector->status == connector_status_disconnected)
-+			connector->encoder = NULL;
-+	}
-+
-+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-+		if (!drm_helper_encoder_in_use(encoder)) {
-+			drm_encoder_disable(encoder);
-+			/* disconnector encoder from any connector */
-+			encoder->crtc = NULL;
-+		}
-+	}
-+
-+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-+		struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-+		crtc->enabled = drm_helper_crtc_in_use(crtc);
-+		if (!crtc->enabled) {
-+			if (crtc_funcs->disable)
-+				(*crtc_funcs->disable)(crtc);
-+			else
-+				(*crtc_funcs->dpms)(crtc, DRM_MODE_DPMS_OFF);
-+			crtc->fb = NULL;
-+		}
-+	}
-+}
-+
-+/**
-+ * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
-+ * @encoder: encoder to test
-+ * @crtc: crtc to test
-+ *
-+ * Return false if @encoder can't be driven by @crtc, true otherwise.
-+ */
-+static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
-+				struct drm_crtc *crtc)
-+{
-+	struct drm_device *dev;
-+	struct drm_crtc *tmp;
-+	int crtc_mask = 1;
-+
-+	if (crtc == NULL)
-+		printf("checking null crtc?\n");
-+
-+	dev = crtc->dev;
-+
-+	list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
-+		if (tmp == crtc)
-+			break;
-+		crtc_mask <<= 1;
-+	}
-+
-+	if (encoder->possible_crtcs & crtc_mask)
-+		return true;
-+	return false;
-+}
-+
-+/*
-+ * Check the CRTC we're going to map each output to vs. its current
-+ * CRTC.  If they don't match, we have to disable the output and the CRTC
-+ * since the driver will have to re-route things.
-+ */
-+static void
-+drm_crtc_prepare_encoders(struct drm_device *dev)
-+{
-+	struct drm_encoder_helper_funcs *encoder_funcs;
-+	struct drm_encoder *encoder;
-+
-+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-+		encoder_funcs = encoder->helper_private;
-+		/* Disable unused encoders */
-+		if (encoder->crtc == NULL)
-+			drm_encoder_disable(encoder);
-+		/* Disable encoders whose CRTC is about to change */
-+		if (encoder_funcs->get_crtc &&
-+		    encoder->crtc != (*encoder_funcs->get_crtc)(encoder))
-+			drm_encoder_disable(encoder);
-+	}
-+}
-+
-+/**
-+ * drm_crtc_set_mode - set a mode
-+ * @crtc: CRTC to program
-+ * @mode: mode to use
-+ * @x: width of mode
-+ * @y: height of mode
-+ *
-+ * LOCKING:
-+ * Caller must hold mode config lock.
-+ *
-+ * Try to set @mode on @crtc.  Give @crtc and its associated connectors a chance
-+ * to fixup or reject the mode prior to trying to set it.
-+ *
-+ * RETURNS:
-+ * True if the mode was set successfully, or false otherwise.
-+ */
-+bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
-+			      struct drm_display_mode *mode,
-+			      int x, int y,
-+			      struct drm_framebuffer *old_fb)
-+{
-+	struct drm_device *dev = crtc->dev;
-+	struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
-+	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-+	struct drm_encoder_helper_funcs *encoder_funcs;
-+	int saved_x, saved_y;
-+	struct drm_encoder *encoder;
-+	bool ret = true;
-+
-+	crtc->enabled = drm_helper_crtc_in_use(crtc);
-+	if (!crtc->enabled)
-+		return true;
-+
-+	adjusted_mode = drm_mode_duplicate(dev, mode);
-+
-+	saved_hwmode = crtc->hwmode;
-+	saved_mode = crtc->mode;
-+	saved_x = crtc->x;
-+	saved_y = crtc->y;
-+
-+	/* Update crtc values up front so the driver can rely on them for mode
-+	 * setting.
-+	 */
-+	crtc->mode = *mode;
-+	crtc->x = x;
-+	crtc->y = y;
-+
-+	/* Pass our mode to the connectors and the CRTC to give them a chance to
-+	 * adjust it according to limitations or connector properties, and also
-+	 * a chance to reject the mode entirely.
-+	 */
-+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-+
-+		if (encoder->crtc != crtc)
-+			continue;
-+		encoder_funcs = encoder->helper_private;
-+		if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
-+						      adjusted_mode))) {
-+			goto done;
-+		}
-+	}
-+
-+	if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) {
-+		goto done;
-+	}
-+	DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
-+
-+	/* Prepare the encoders and CRTCs before setting the mode. */
-+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-+
-+		if (encoder->crtc != crtc)
-+			continue;
-+		encoder_funcs = encoder->helper_private;
-+		/* Disable the encoders as the first thing we do. */
-+		encoder_funcs->prepare(encoder);
-+	}
-+
-+	drm_crtc_prepare_encoders(dev);
-+
-+	crtc_funcs->prepare(crtc);
-+
-+	/* Set up the DPLL and any encoders state that needs to adjust or depend
-+	 * on the DPLL.
-+	 */
-+	ret = !crtc_funcs->mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
-+	if (!ret)
-+	    goto done;
-+
-+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-+
-+		if (encoder->crtc != crtc)
-+			continue;
-+
-+		DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
-+			encoder->base.id, drm_get_encoder_name(encoder),
-+			mode->base.id, mode->name);
-+		encoder_funcs = encoder->helper_private;
-+		encoder_funcs->mode_set(encoder, mode, adjusted_mode);
-+	}
-+
-+	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
-+	crtc_funcs->commit(crtc);
-+
-+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-+
-+		if (encoder->crtc != crtc)
-+			continue;
-+
-+		encoder_funcs = encoder->helper_private;
-+		encoder_funcs->commit(encoder);
-+
-+	}
-+
-+	/* Store real post-adjustment hardware mode. */
-+	crtc->hwmode = *adjusted_mode;
-+
-+	/* Calculate and store various constants which
-+	 * are later needed by vblank and swap-completion
-+	 * timestamping. They are derived from true hwmode.
-+	 */
-+	drm_calc_timestamping_constants(crtc);
-+
-+	/* FIXME: add subpixel order */
-+done:
-+	drm_mode_destroy(dev, adjusted_mode);
-+	if (!ret) {
-+		crtc->hwmode = saved_hwmode;
-+		crtc->mode = saved_mode;
-+		crtc->x = saved_x;
-+		crtc->y = saved_y;
-+	}
-+
-+	return ret;
-+}
-+
-+static int
-+drm_crtc_helper_disable(struct drm_crtc *crtc)
-+{
-+	struct drm_device *dev = crtc->dev;
-+	struct drm_connector *connector;
-+	struct drm_encoder *encoder;
-+
-+	/* Decouple all encoders and their attached connectors from this crtc */
-+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-+		if (encoder->crtc != crtc)
-+			continue;
-+
-+		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-+			if (connector->encoder != encoder)
-+				continue;
-+
-+			connector->encoder = NULL;
-+		}
-+	}
-+
-+	drm_helper_disable_unused_functions(dev);
-+	return 0;
-+}
-+
-+/**
-+ * drm_crtc_helper_set_config - set a new config from userspace
-+ * @crtc: CRTC to setup
-+ * @crtc_info: user provided configuration
-+ * @new_mode: new mode to set
-+ * @connector_set: set of connectors for the new config
-+ * @fb: new framebuffer
-+ *
-+ * LOCKING:
-+ * Caller must hold mode config lock.
-+ *
-+ * Setup a new configuration, provided by the user in @crtc_info, and enable
-+ * it.
-+ *
-+ * RETURNS:
-+ * Zero. (FIXME)
-+ */
-+int drm_crtc_helper_set_config(struct drm_mode_set *set)
-+{
-+	struct drm_device *dev;
-+	struct drm_crtc *save_crtcs, *new_crtc, *crtc;
-+	struct drm_encoder *save_encoders, *new_encoder, *encoder;
-+	struct drm_framebuffer *old_fb = NULL;
-+	bool mode_changed = false; /* if true do a full mode set */
-+	bool fb_changed = false; /* if true and !mode_changed just do a flip */
-+	struct drm_connector *save_connectors, *connector;
-+	int count = 0, ro, fail = 0;
-+	struct drm_crtc_helper_funcs *crtc_funcs;
-+	struct drm_mode_set save_set;
-+	int ret = 0;
-+	int i;
-+
-+	DRM_DEBUG_KMS("\n");
-+
-+	if (!set)
-+		return -EINVAL;
-+
-+	if (!set->crtc)
-+		return -EINVAL;
-+
-+	if (!set->crtc->helper_private)
-+		return -EINVAL;
-+
-+	crtc_funcs = set->crtc->helper_private;
-+
-+	if (!set->mode)
-+		set->fb = NULL;
-+
-+	if (set->fb) {
-+		DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
-+				set->crtc->base.id, set->fb->base.id,
-+				(int)set->num_connectors, set->x, set->y);
-+	} else {
-+		DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
-+		return drm_crtc_helper_disable(set->crtc);
-+	}
-+
-+	dev = set->crtc->dev;
-+
-+	/* Allocate space for the backup of all (non-pointer) crtc, encoder and
-+	 * connector data. */
-+	save_crtcs = malloc(dev->mode_config.num_crtc * sizeof(struct drm_crtc),
-+	    DRM_MEM_KMS, M_WAITOK | M_ZERO);
-+	save_encoders = malloc(dev->mode_config.num_encoder *
-+	    sizeof(struct drm_encoder), DRM_MEM_KMS, M_WAITOK | M_ZERO);
-+	save_connectors = malloc(dev->mode_config.num_connector *
-+	    sizeof(struct drm_connector), DRM_MEM_KMS, M_WAITOK | M_ZERO);
-+
-+	/* Copy data. Note that driver private data is not affected.
-+	 * Should anything bad happen only the expected state is
-+	 * restored, not the drivers personal bookkeeping.
-+	 */
-+	count = 0;
-+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-+		save_crtcs[count++] = *crtc;
-+	}
-+
-+	count = 0;
-+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-+		save_encoders[count++] = *encoder;
-+	}
-+
-+	count = 0;
-+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-+		save_connectors[count++] = *connector;
-+	}
-+
-+	save_set.crtc = set->crtc;
-+	save_set.mode = &set->crtc->mode;
-+	save_set.x = set->crtc->x;
-+	save_set.y = set->crtc->y;
-+	save_set.fb = set->crtc->fb;
-+
-+	/* We should be able to check here if the fb has the same properties
-+	 * and then just flip_or_move it */
-+	if (set->crtc->fb != set->fb) {
-+		/* If we have no fb then treat it as a full mode set */
-+		if (set->crtc->fb == NULL) {
-+			DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
-+			mode_changed = true;
-+		} else if (set->fb == NULL) {
-+			mode_changed = true;
-+		} else
-+			fb_changed = true;
-+	}
-+
-+	if (set->x != set->crtc->x || set->y != set->crtc->y)
-+		fb_changed = true;
-+
-+	if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
-+		DRM_DEBUG_KMS("modes are different, full mode set\n");
-+		drm_mode_debug_printmodeline(&set->crtc->mode);
-+		drm_mode_debug_printmodeline(set->mode);
-+		mode_changed = true;
-+	}
-+
-+	/* a) traverse passed in connector list and get encoders for them */
-+	count = 0;
-+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-+		struct drm_connector_helper_funcs *connector_funcs =
-+			connector->helper_private;
-+		new_encoder = connector->encoder;
-+		for (ro = 0; ro < set->num_connectors; ro++) {
-+			if (set->connectors[ro] == connector) {
-+				new_encoder = connector_funcs->best_encoder(connector);
-+				/* if we can't get an encoder for a connector
-+				   we are setting now - then fail */
-+				if (new_encoder == NULL)
-+					/* don't break so fail path works correct */
-+					fail = 1;
-+				break;
-+			}
-+		}
-+
-+		if (new_encoder != connector->encoder) {
-+			DRM_DEBUG_KMS("encoder changed, full mode switch\n");
-+			mode_changed = true;
-+			/* If the encoder is reused for another connector, then
-+			 * the appropriate crtc will be set later.
-+			 */
-+			if (connector->encoder)
-+				connector->encoder->crtc = NULL;
-+			connector->encoder = new_encoder;
-+		}
-+	}
-+
-+	if (fail) {
-+		ret = -EINVAL;
-+		goto fail;
-+	}
-+
-+	count = 0;
-+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-+		if (!connector->encoder)
-+			continue;
-+
-+		if (connector->encoder->crtc == set->crtc)
-+			new_crtc = NULL;
-+		else
-+			new_crtc = connector->encoder->crtc;
-+
-+		for (ro = 0; ro < set->num_connectors; ro++) {
-+			if (set->connectors[ro] == connector)
-+				new_crtc = set->crtc;
-+		}
-+
-+		/* Make sure the new CRTC will work with the encoder */
-+		if (new_crtc &&
-+		    !drm_encoder_crtc_ok(connector->encoder, new_crtc)) {
-+			ret = -EINVAL;
-+			goto fail;
-+		}
-+		if (new_crtc != connector->encoder->crtc) {
-+			DRM_DEBUG_KMS("crtc changed, full mode switch\n");
-+			mode_changed = true;
-+			connector->encoder->crtc = new_crtc;
-+		}
-+		if (new_crtc) {
-+			DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
-+				connector->base.id, drm_get_connector_name(connector),
-+				new_crtc->base.id);
-+		} else {
-+			DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
-+				connector->base.id, drm_get_connector_name(connector));
-+		}
-+	}
-+
-+	/* mode_set_base is not a required function */
-+	if (fb_changed && !crtc_funcs->mode_set_base)
-+		mode_changed = true;
-+
-+	if (mode_changed) {
-+		set->crtc->enabled = drm_helper_crtc_in_use(set->crtc);
-+		if (set->crtc->enabled) {
-+			DRM_DEBUG_KMS("attempting to set mode from"
-+					" userspace\n");
-+			drm_mode_debug_printmodeline(set->mode);
-+			old_fb = set->crtc->fb;
-+			set->crtc->fb = set->fb;
-+			if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
-+						      set->x, set->y,
-+						      old_fb)) {
-+				DRM_ERROR("failed to set mode on [CRTC:%d]\n",
-+					  set->crtc->base.id);
-+				set->crtc->fb = old_fb;
-+				ret = -EINVAL;
-+				goto fail;
-+			}
-+			DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
-+			for (i = 0; i < set->num_connectors; i++) {
-+				DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
-+					      drm_get_connector_name(set->connectors[i]));
-+				set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
-+			}
-+		}
-+		drm_helper_disable_unused_functions(dev);
-+	} else if (fb_changed) {
-+		set->crtc->x = set->x;
-+		set->crtc->y = set->y;
-+
-+		old_fb = set->crtc->fb;
-+		if (set->crtc->fb != set->fb)
-+			set->crtc->fb = set->fb;
-+		ret = crtc_funcs->mode_set_base(set->crtc,
-+						set->x, set->y, old_fb);
-+		if (ret != 0) {
-+			set->crtc->fb = old_fb;
-+			goto fail;
-+		}
-+	}
-+
-+	free(save_connectors, DRM_MEM_KMS);
-+	free(save_encoders, DRM_MEM_KMS);
-+	free(save_crtcs, DRM_MEM_KMS);
-+	return 0;
-+
-+fail:
-+	/* Restore all previous data. */
-+	count = 0;
-+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-+		*crtc = save_crtcs[count++];
-+	}
-+
-+	count = 0;
-+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-+		*encoder = save_encoders[count++];
-+	}
-+
-+	count = 0;
-+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-+		*connector = save_connectors[count++];
-+	}
-+
-+	/* Try to restore the config */
-+	if (mode_changed &&
-+	    !drm_crtc_helper_set_mode(save_set.crtc, save_set.mode, save_set.x,
-+				      save_set.y, save_set.fb))
-+		DRM_ERROR("failed to restore config after modeset failure\n");
-+
-+	free(save_connectors, DRM_MEM_KMS);
-+	free(save_encoders, DRM_MEM_KMS);
-+	free(save_crtcs, DRM_MEM_KMS);
-+	return ret;
-+}
-+
-+static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
-+{
-+	int dpms = DRM_MODE_DPMS_OFF;
-+	struct drm_connector *connector;
-+	struct drm_device *dev = encoder->dev;
-+
-+	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
-+		if (connector->encoder == encoder)
-+			if (connector->dpms < dpms)
-+				dpms = connector->dpms;
-+	return dpms;
-+}
-+
-+static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
-+{
-+	int dpms = DRM_MODE_DPMS_OFF;
-+	struct drm_connector *connector;
-+	struct drm_device *dev = crtc->dev;
-+
-+	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
-+		if (connector->encoder && connector->encoder->crtc == crtc)
-+			if (connector->dpms < dpms)
-+				dpms = connector->dpms;
-+	return dpms;
-+}
-+
-+/**
-+ * drm_helper_connector_dpms
-+ * @connector affected connector
-+ * @mode DPMS mode
-+ *
-+ * Calls the low-level connector DPMS function, then
-+ * calls appropriate encoder and crtc DPMS functions as well
-+ */
-+void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
-+{
-+	struct drm_encoder *encoder = connector->encoder;
-+	struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
-+	int old_dpms;
-+
-+	if (mode == connector->dpms)
-+		return;
-+
-+	old_dpms = connector->dpms;
-+	connector->dpms = mode;
-+
-+	/* from off to on, do crtc then encoder */
-+	if (mode < old_dpms) {
-+		if (crtc) {
-+			struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-+			if (crtc_funcs->dpms)
-+				(*crtc_funcs->dpms) (crtc,
-+						     drm_helper_choose_crtc_dpms(crtc));
-+		}
-+		if (encoder) {
-+			struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
-+			if (encoder_funcs->dpms)
-+				(*encoder_funcs->dpms) (encoder,
-+							drm_helper_choose_encoder_dpms(encoder));
-+		}
-+	}
-+
-+	/* from on to off, do encoder then crtc */
-+	if (mode > old_dpms) {
-+		if (encoder) {
-+			struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
-+			if (encoder_funcs->dpms)
-+				(*encoder_funcs->dpms) (encoder,
-+							drm_helper_choose_encoder_dpms(encoder));
-+		}
-+		if (crtc) {
-+			struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-+			if (crtc_funcs->dpms)
-+				(*crtc_funcs->dpms) (crtc,
-+						     drm_helper_choose_crtc_dpms(crtc));
-+		}
-+	}
-+
-+	return;
-+}
-+
-+int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
-+				   struct drm_mode_fb_cmd2 *mode_cmd)
-+{
-+	int i;
-+
-+	fb->width = mode_cmd->width;
-+	fb->height = mode_cmd->height;
-+	for (i = 0; i < 4; i++) {
-+		fb->pitches[i] = mode_cmd->pitches[i];
-+		fb->offsets[i] = mode_cmd->offsets[i];
-+	}
-+	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
-+				    &fb->bits_per_pixel);
-+	fb->pixel_format = mode_cmd->pixel_format;
-+
-+	return 0;
-+}
-+
-+int drm_helper_resume_force_mode(struct drm_device *dev)
-+{
-+	struct drm_crtc *crtc;
-+	struct drm_encoder *encoder;
-+	struct drm_encoder_helper_funcs *encoder_funcs;
-+	struct drm_crtc_helper_funcs *crtc_funcs;
-+	int ret;
-+
-+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-+
-+		if (!crtc->enabled)
-+			continue;
-+
-+		ret = drm_crtc_helper_set_mode(crtc, &crtc->mode,
-+					       crtc->x, crtc->y, crtc->fb);
-+
-+		if (!ret)
-+			DRM_ERROR("failed to set mode on crtc %p\n", crtc);
-+
-+		/* Turn off outputs that were already powered off */
-+		if (drm_helper_choose_crtc_dpms(crtc)) {
-+			list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-+
-+				if(encoder->crtc != crtc)
-+					continue;
-+
-+				encoder_funcs = encoder->helper_private;
-+				if (encoder_funcs->dpms)
-+					(*encoder_funcs->dpms) (encoder,
-+					    drm_helper_choose_encoder_dpms(encoder));
-+			}
-+
-+			crtc_funcs = crtc->helper_private;
-+			if (crtc_funcs->dpms)
-+				(*crtc_funcs->dpms) (crtc,
-+				    drm_helper_choose_crtc_dpms(crtc));
-+		}
-+	}
-+	/* disable the unused connectors while restoring the modesetting */
-+	drm_helper_disable_unused_functions(dev);
-+	return 0;
-+}
-+
-+#define DRM_OUTPUT_POLL_PERIOD (10 * hz)
-+static void output_poll_execute(void *ctx, int pending)
-+{
-+	struct drm_device *dev;
-+	struct drm_connector *connector;
-+	enum drm_connector_status old_status;
-+	bool repoll = false, changed = false;
-+
-+	if (!drm_kms_helper_poll)
-+		return;
-+
-+	dev = ctx;
-+
-+	sx_xlock(&dev->mode_config.mutex);
-+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-+
-+		/* if this is HPD or polled don't check it -
-+		   TV out for instance */
-+		if (!connector->polled)
-+			continue;
-+
-+		else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
-+		    DRM_CONNECTOR_POLL_DISCONNECT))
-+			repoll = true;
-+
-+		old_status = connector->status;
-+		/* if we are connected and don't want to poll for disconnect
-+		   skip it */
-+		if (old_status == connector_status_connected &&
-+		    !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) &&
-+		    !(connector->polled & DRM_CONNECTOR_POLL_HPD))
-+			continue;
-+
-+		connector->status = connector->funcs->detect(connector, false);
-+		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
-+			      connector->base.id,
-+			      drm_get_connector_name(connector),
-+			      old_status, connector->status);
-+		if (old_status != connector->status)
-+			changed = true;
-+	}
-+
-+	sx_xunlock(&dev->mode_config.mutex);
-+
-+	if (changed) {
-+#if 0
-+		/* send a uevent + call fbdev */
-+		drm_sysfs_hotplug_event(dev);
-+#endif
-+		if (dev->mode_config.funcs->output_poll_changed)
-+			dev->mode_config.funcs->output_poll_changed(dev);
-+	}
-+
-+	if (repoll) {
-+		taskqueue_enqueue_timeout(taskqueue_thread,
-+		    &dev->mode_config.output_poll_task,
-+		    DRM_OUTPUT_POLL_PERIOD);
-+	}
-+}
-+
-+void drm_kms_helper_poll_disable(struct drm_device *dev)
-+{
-+	if (!dev->mode_config.poll_enabled)
-+		return;
-+	taskqueue_cancel_timeout(taskqueue_thread,
-+	    &dev->mode_config.output_poll_task, NULL);
-+}
-+
-+void drm_kms_helper_poll_enable(struct drm_device *dev)
-+{
-+	bool poll = false;
-+	struct drm_connector *connector;
-+
-+	if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
-+		return;
-+
-+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-+		if (connector->polled)
-+			poll = true;
-+	}
-+
-+	if (poll) {
-+		taskqueue_enqueue_timeout(taskqueue_thread,
-+		    &dev->mode_config.output_poll_task, DRM_OUTPUT_POLL_PERIOD);
-+	}
-+}
-+
-+void drm_kms_helper_poll_init(struct drm_device *dev)
-+{
-+
-+	TIMEOUT_TASK_INIT(taskqueue_thread, &dev->mode_config.output_poll_task,
-+	    0, output_poll_execute, dev);
-+	dev->mode_config.poll_enabled = true;
-+
-+	drm_kms_helper_poll_enable(dev);
-+}
-+
-+void drm_kms_helper_poll_fini(struct drm_device *dev)
-+{
-+	drm_kms_helper_poll_disable(dev);
-+}
-+
-+void drm_helper_hpd_irq_event(struct drm_device *dev)
-+{
-+	if (!dev->mode_config.poll_enabled)
-+		return;
-+
-+	/* kill timer and schedule immediate execution, this doesn't block */
-+	taskqueue_cancel_timeout(taskqueue_thread,
-+	    &dev->mode_config.output_poll_task, NULL);
-+	if (drm_kms_helper_poll)
-+		taskqueue_enqueue_timeout(taskqueue_thread,
-+		    &dev->mode_config.output_poll_task, 0);
-+}
-
-Property changes on: stable/9/sys/dev/drm/drm_crtc_helper.c
-___________________________________________________________________
-Added: svn:mime-type
-## -0,0 +1 ##
-+text/plain
-Added: svn:keywords
-## -0,0 +1 ##
-+FreeBSD=%H
-Added: svn:eol-style
-## -0,0 +1 ##
-+native
-Index: sys/dev/drm/drm_crtc_helper.h
-===================================================================
-diff --git sys/dev/drm/drm_crtc_helper.h sys/dev/drm/drm_crtc_helper.h
-new file mode 10644
---- /dev/null	(revision 0)
-+++ sys/dev/drm/drm_crtc_helper.h	(working copy)
-@@ -0,0 +1,144 @@
-+/*
-+ * Copyright © 2006 Keith Packard
-+ * Copyright © 2007-2008 Dave Airlie
-+ * Copyright © 2007-2008 Intel Corporation
-+ *   Jesse Barnes <jesse.barnes at intel.com>
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
-+ */
-+
-+/*
-+ * The DRM mode setting helper functions are common code for drivers to use if
-+ * they wish.  Drivers are not forced to use this code in their
-+ * implementations but it would be useful if they code they do use at least
-+ * provides a consistent interface and operation to userspace
-+ */
-+
-+#ifndef __DRM_CRTC_HELPER_H__
-+#define __DRM_CRTC_HELPER_H__
-+
-+enum mode_set_atomic {
-+	LEAVE_ATOMIC_MODE_SET,
-+	ENTER_ATOMIC_MODE_SET,
-+};
-+
-+struct drm_crtc_helper_funcs {
-+	/*
-+	 * Control power levels on the CRTC.  If the mode passed in is
-+	 * unsupported, the provider must use the next lowest power level.
-+	 */
-+	void (*dpms)(struct drm_crtc *crtc, int mode);
-+	void (*prepare)(struct drm_crtc *crtc);
-+	void (*commit)(struct drm_crtc *crtc);
-+
-+	/* Provider can fixup or change mode timings before modeset occurs */
-+	bool (*mode_fixup)(struct drm_crtc *crtc,
-+			   struct drm_display_mode *mode,
-+			   struct drm_display_mode *adjusted_mode);
-+	/* Actually set the mode */
-+	int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
-+			struct drm_display_mode *adjusted_mode, int x, int y,
-+			struct drm_framebuffer *old_fb);
-+
-+	/* Move the crtc on the current fb to the given position *optional* */
-+	int (*mode_set_base)(struct drm_crtc *crtc, int x, int y,
-+			     struct drm_framebuffer *old_fb);
-+	int (*mode_set_base_atomic)(struct drm_crtc *crtc,
-+				    struct drm_framebuffer *fb, int x, int y,
-+				    enum mode_set_atomic);
-+
-+	/* reload the current crtc LUT */
-+	void (*load_lut)(struct drm_crtc *crtc);
-+
-+	/* disable crtc when not in use - more explicit than dpms off */
-+	void (*disable)(struct drm_crtc *crtc);
-+};
-+
-+struct drm_encoder_helper_funcs {
-+	void (*dpms)(struct drm_encoder *encoder, int mode);
-+	void (*save)(struct drm_encoder *encoder);
-+	void (*restore)(struct drm_encoder *encoder);
-+
-+	bool (*mode_fixup)(struct drm_encoder *encoder,
-+			   struct drm_display_mode *mode,
-+			   struct drm_display_mode *adjusted_mode);
-+	void (*prepare)(struct drm_encoder *encoder);
-+	void (*commit)(struct drm_encoder *encoder);
-+	void (*mode_set)(struct drm_encoder *encoder,
-+			 struct drm_display_mode *mode,
-+			 struct drm_display_mode *adjusted_mode);
-+	struct drm_crtc *(*get_crtc)(struct drm_encoder *encoder);
-+	/* detect for DAC style encoders */
-+	enum drm_connector_status (*detect)(struct drm_encoder *encoder,
-+					    struct drm_connector *connector);
-+	/* disable encoder when not in use - more explicit than dpms off */
-+	void (*disable)(struct drm_encoder *encoder);
-+};
-+
-+struct drm_connector_helper_funcs {
-+	int (*get_modes)(struct drm_connector *connector);
-+	int (*mode_valid)(struct drm_connector *connector,
-+			  struct drm_display_mode *mode);
-+	struct drm_encoder *(*best_encoder)(struct drm_connector *connector);
-+};
-+
-+extern int drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY);
-+extern void drm_helper_disable_unused_functions(struct drm_device *dev);
-+extern int drm_crtc_helper_set_config(struct drm_mode_set *set);
-+extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
-+				     struct drm_display_mode *mode,
-+				     int x, int y,
-+				     struct drm_framebuffer *old_fb);
-+extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
-+extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
-+
-+extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode);
-+
-+extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
-+					  struct drm_mode_fb_cmd2 *mode_cmd);
-+
-+static inline void drm_crtc_helper_add(struct drm_crtc *crtc,
-+				       const struct drm_crtc_helper_funcs *funcs)
-+{
-+	crtc->helper_private = __DECONST(void *, funcs);
-+}
-+
-+static inline void drm_encoder_helper_add(struct drm_encoder *encoder,
-+					  const struct drm_encoder_helper_funcs *funcs)
-+{
-+	encoder->helper_private = __DECONST(void *, funcs);
-+}
-+
-+static inline void drm_connector_helper_add(struct drm_connector *connector,
-+					    const struct drm_connector_helper_funcs *funcs)
-+{
-+	connector->helper_private = __DECONST(void *, funcs);
-+}
-+
-+extern int drm_helper_resume_force_mode(struct drm_device *dev);
-+extern void drm_kms_helper_poll_init(struct drm_device *dev);
-+extern void drm_kms_helper_poll_fini(struct drm_device *dev);
-+extern void drm_helper_hpd_irq_event(struct drm_device *dev);
-+
-+extern void drm_kms_helper_poll_disable(struct drm_device *dev);
-+extern void drm_kms_helper_poll_enable(struct drm_device *dev);
-+
-+extern bool drm_fetch_cmdline_mode_from_kenv(struct drm_connector *connector,
-+    struct drm_cmdline_mode *cmdline_mode);
-+#endif
-
-Property changes on: stable/9/sys/dev/drm/drm_crtc_helper.h
-___________________________________________________________________
-Added: svn:mime-type
-## -0,0 +1 ##
-+text/plain
-Added: svn:keywords
-## -0,0 +1 ##
-+FreeBSD=%H
-Added: svn:eol-style
-## -0,0 +1 ##
-+native
-Index: sys/dev/drm/drm_dp_helper.h
-===================================================================
-diff --git sys/dev/drm/drm_dp_helper.h sys/dev/drm/drm_dp_helper.h
-new file mode 10644
---- /dev/null	(revision 0)
-+++ sys/dev/drm/drm_dp_helper.h	(working copy)
-@@ -0,0 +1,248 @@
-+/*
-+ * Copyright © 2008 Keith Packard
-+ *
-+ * Permission to use, copy, modify, distribute, and sell this software and its
-+ * documentation for any purpose is hereby granted without fee, provided that
-+ * the above copyright notice appear in all copies and that both that copyright
-+ * notice and this permission notice appear in supporting documentation, and
-+ * that the name of the copyright holders not be used in advertising or
-+ * publicity pertaining to distribution of the software without specific,
-+ * written prior permission.  The copyright holders make no representations
-+ * about the suitability of this software for any purpose.  It is provided "as
-+ * is" without express or implied warranty.
-+ *
-+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
-+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
-+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
-+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
-+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
-+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
-+ * OF THIS SOFTWARE.
-+ */
-+
-+#ifndef _DRM_DP_HELPER_H_
-+#define _DRM_DP_HELPER_H_
-+
-+/* From the VESA DisplayPort spec */
-+
-+#define AUX_NATIVE_WRITE	0x8
-+#define AUX_NATIVE_READ		0x9
-+#define AUX_I2C_WRITE		0x0
-+#define AUX_I2C_READ		0x1
-+#define AUX_I2C_STATUS		0x2
-+#define AUX_I2C_MOT		0x4
-+
-+#define AUX_NATIVE_REPLY_ACK	(0x0 << 4)
-+#define AUX_NATIVE_REPLY_NACK	(0x1 << 4)
-+#define AUX_NATIVE_REPLY_DEFER	(0x2 << 4)
-+#define AUX_NATIVE_REPLY_MASK	(0x3 << 4)
-+
-+#define AUX_I2C_REPLY_ACK	(0x0 << 6)
-+#define AUX_I2C_REPLY_NACK	(0x1 << 6)
-+#define AUX_I2C_REPLY_DEFER	(0x2 << 6)
-+#define AUX_I2C_REPLY_MASK	(0x3 << 6)
-+
-+/* AUX CH addresses */
-+/* DPCD */
-+#define DP_DPCD_REV                         0x000
-+
-+#define DP_MAX_LINK_RATE                    0x001
-+
-+#define DP_MAX_LANE_COUNT                   0x002
-+# define DP_MAX_LANE_COUNT_MASK		    0x1f
-+# define DP_TPS3_SUPPORTED		    (1 << 6)
-+# define DP_ENHANCED_FRAME_CAP		    (1 << 7)
-+
-+#define DP_MAX_DOWNSPREAD                   0x003
-+# define DP_NO_AUX_HANDSHAKE_LINK_TRAINING  (1 << 6)
-+
-+#define DP_NORP                             0x004
-+
-+#define DP_DOWNSTREAMPORT_PRESENT           0x005
-+# define DP_DWN_STRM_PORT_PRESENT           (1 << 0)
-+# define DP_DWN_STRM_PORT_TYPE_MASK         0x06
-+/* 00b = DisplayPort */
-+/* 01b = Analog */
-+/* 10b = TMDS or HDMI */
-+/* 11b = Other */
-+# define DP_FORMAT_CONVERSION               (1 << 3)
-+
-+#define DP_MAIN_LINK_CHANNEL_CODING         0x006
-+
-+#define DP_TRAINING_AUX_RD_INTERVAL         0x00e
-+
-+#define DP_PSR_SUPPORT                      0x070
-+# define DP_PSR_IS_SUPPORTED                1
-+#define DP_PSR_CAPS                         0x071
-+# define DP_PSR_NO_TRAIN_ON_EXIT            1
-+# define DP_PSR_SETUP_TIME_330              (0 << 1)
-+# define DP_PSR_SETUP_TIME_275              (1 << 1)
-+# define DP_PSR_SETUP_TIME_220              (2 << 1)
-+# define DP_PSR_SETUP_TIME_165              (3 << 1)
-+# define DP_PSR_SETUP_TIME_110              (4 << 1)
-+# define DP_PSR_SETUP_TIME_55               (5 << 1)
-+# define DP_PSR_SETUP_TIME_0                (6 << 1)
-+# define DP_PSR_SETUP_TIME_MASK             (7 << 1)
-+# define DP_PSR_SETUP_TIME_SHIFT            1
-+
-+/* link configuration */
-+#define	DP_LINK_BW_SET		            0x100
-+# define DP_LINK_BW_1_62		    0x06
-+# define DP_LINK_BW_2_7			    0x0a
-+# define DP_LINK_BW_5_4			    0x14
-+
-+#define DP_LANE_COUNT_SET	            0x101
-+# define DP_LANE_COUNT_MASK		    0x0f
-+# define DP_LANE_COUNT_ENHANCED_FRAME_EN    (1 << 7)
-+
-+#define DP_TRAINING_PATTERN_SET	            0x102
-+# define DP_TRAINING_PATTERN_DISABLE	    0
-+# define DP_TRAINING_PATTERN_1		    1
-+# define DP_TRAINING_PATTERN_2		    2
-+# define DP_TRAINING_PATTERN_3		    3
-+# define DP_TRAINING_PATTERN_MASK	    0x3
-+
-+# define DP_LINK_QUAL_PATTERN_DISABLE	    (0 << 2)
-+# define DP_LINK_QUAL_PATTERN_D10_2	    (1 << 2)
-+# define DP_LINK_QUAL_PATTERN_ERROR_RATE    (2 << 2)
-+# define DP_LINK_QUAL_PATTERN_PRBS7	    (3 << 2)
-+# define DP_LINK_QUAL_PATTERN_MASK	    (3 << 2)
-+
-+# define DP_RECOVERED_CLOCK_OUT_EN	    (1 << 4)
-+# define DP_LINK_SCRAMBLING_DISABLE	    (1 << 5)
-+
-+# define DP_SYMBOL_ERROR_COUNT_BOTH	    (0 << 6)
-+# define DP_SYMBOL_ERROR_COUNT_DISPARITY    (1 << 6)
-+# define DP_SYMBOL_ERROR_COUNT_SYMBOL	    (2 << 6)
-+# define DP_SYMBOL_ERROR_COUNT_MASK	    (3 << 6)
-+
-+#define DP_TRAINING_LANE0_SET		    0x103
-+#define DP_TRAINING_LANE1_SET		    0x104
-+#define DP_TRAINING_LANE2_SET		    0x105
-+#define DP_TRAINING_LANE3_SET		    0x106
-+
-+# define DP_TRAIN_VOLTAGE_SWING_MASK	    0x3
-+# define DP_TRAIN_VOLTAGE_SWING_SHIFT	    0
-+# define DP_TRAIN_MAX_SWING_REACHED	    (1 << 2)
-+# define DP_TRAIN_VOLTAGE_SWING_400	    (0 << 0)
-+# define DP_TRAIN_VOLTAGE_SWING_600	    (1 << 0)
-+# define DP_TRAIN_VOLTAGE_SWING_800	    (2 << 0)
-+# define DP_TRAIN_VOLTAGE_SWING_1200	    (3 << 0)
-+
-+# define DP_TRAIN_PRE_EMPHASIS_MASK	    (3 << 3)
-+# define DP_TRAIN_PRE_EMPHASIS_0	    (0 << 3)
-+# define DP_TRAIN_PRE_EMPHASIS_3_5	    (1 << 3)
-+# define DP_TRAIN_PRE_EMPHASIS_6	    (2 << 3)
-+# define DP_TRAIN_PRE_EMPHASIS_9_5	    (3 << 3)
-+
-+# define DP_TRAIN_PRE_EMPHASIS_SHIFT	    3
-+# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED  (1 << 5)
-+
-+#define DP_DOWNSPREAD_CTRL		    0x107
-+# define DP_SPREAD_AMP_0_5		    (1 << 4)
-+
-+#define DP_MAIN_LINK_CHANNEL_CODING_SET	    0x108
-+# define DP_SET_ANSI_8B10B		    (1 << 0)
-+
-+#define DP_PSR_EN_CFG			    0x170
-+# define DP_PSR_ENABLE			    (1 << 0)
-+# define DP_PSR_MAIN_LINK_ACTIVE	    (1 << 1)
-+# define DP_PSR_CRC_VERIFICATION	    (1 << 2)
-+# define DP_PSR_FRAME_CAPTURE		    (1 << 3)
-+
-+#define DP_DEVICE_SERVICE_IRQ_VECTOR	    0x201
-+# define DP_REMOTE_CONTROL_COMMAND_PENDING  (1 << 0)
-+# define DP_AUTOMATED_TEST_REQUEST	    (1 << 1)
-+# define DP_CP_IRQ			    (1 << 2)
-+# define DP_SINK_SPECIFIC_IRQ		    (1 << 6)
-+
-+#define DP_LANE0_1_STATUS		    0x202
-+#define DP_LANE2_3_STATUS		    0x203
-+# define DP_LANE_CR_DONE		    (1 << 0)
-+# define DP_LANE_CHANNEL_EQ_DONE	    (1 << 1)
-+# define DP_LANE_SYMBOL_LOCKED		    (1 << 2)
-+
-+#define DP_CHANNEL_EQ_BITS (DP_LANE_CR_DONE |		\
-+			    DP_LANE_CHANNEL_EQ_DONE |	\
-+			    DP_LANE_SYMBOL_LOCKED)
-+
-+#define DP_LANE_ALIGN_STATUS_UPDATED	    0x204
-+
-+#define DP_INTERLANE_ALIGN_DONE		    (1 << 0)
-+#define DP_DOWNSTREAM_PORT_STATUS_CHANGED   (1 << 6)
-+#define DP_LINK_STATUS_UPDATED		    (1 << 7)
-+
-+#define DP_SINK_STATUS			    0x205
-+
-+#define DP_RECEIVE_PORT_0_STATUS	    (1 << 0)
-+#define DP_RECEIVE_PORT_1_STATUS	    (1 << 1)
-+
-+#define DP_ADJUST_REQUEST_LANE0_1	    0x206
-+#define DP_ADJUST_REQUEST_LANE2_3	    0x207
-+# define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK  0x03
-+# define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0
-+# define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK   0x0c
-+# define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT  2
-+# define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK  0x30
-+# define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4
-+# define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK   0xc0
-+# define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT  6
-+
-+#define DP_TEST_REQUEST			    0x218
-+# define DP_TEST_LINK_TRAINING		    (1 << 0)
-+# define DP_TEST_LINK_PATTERN		    (1 << 1)
-+# define DP_TEST_LINK_EDID_READ		    (1 << 2)
-+# define DP_TEST_LINK_PHY_TEST_PATTERN	    (1 << 3) /* DPCD >= 1.1 */
-+
-+#define DP_TEST_LINK_RATE		    0x219
-+# define DP_LINK_RATE_162		    (0x6)
-+# define DP_LINK_RATE_27		    (0xa)
-+
-+#define DP_TEST_LANE_COUNT		    0x220
-+
-+#define DP_TEST_PATTERN			    0x221
-+
-+#define DP_TEST_RESPONSE		    0x260
-+# define DP_TEST_ACK			    (1 << 0)
-+# define DP_TEST_NAK			    (1 << 1)
-+# define DP_TEST_EDID_CHECKSUM_WRITE	    (1 << 2)
-+
-+#define DP_SET_POWER                        0x600
-+# define DP_SET_POWER_D0                    0x1
-+# define DP_SET_POWER_D3                    0x2
-+
-+#define DP_PSR_ERROR_STATUS                 0x2006
-+# define DP_PSR_LINK_CRC_ERROR              (1 << 0)
-+# define DP_PSR_RFB_STORAGE_ERROR           (1 << 1)
-+
-+#define DP_PSR_ESI                          0x2007
-+# define DP_PSR_CAPS_CHANGE                 (1 << 0)
-+
-+#define DP_PSR_STATUS                       0x2008
-+# define DP_PSR_SINK_INACTIVE               0
-+# define DP_PSR_SINK_ACTIVE_SRC_SYNCED      1
-+# define DP_PSR_SINK_ACTIVE_RFB             2
-+# define DP_PSR_SINK_ACTIVE_SINK_SYNCED     3
-+# define DP_PSR_SINK_ACTIVE_RESYNC          4
-+# define DP_PSR_SINK_INTERNAL_ERROR         7
-+# define DP_PSR_SINK_STATE_MASK             0x07
-+
-+#define MODE_I2C_START	1
-+#define MODE_I2C_WRITE	2
-+#define MODE_I2C_READ	4
-+#define MODE_I2C_STOP	8
-+
-+struct iic_dp_aux_data {
-+	bool running;
-+	u16 address;
-+	void *priv;
-+	int (*aux_ch)(device_t adapter, int mode, uint8_t write_byte,
-+	    uint8_t *read_byte);
-+	device_t port;
-+};
-+
-+int iic_dp_aux_add_bus(device_t dev, const char *name,
-+    int (*ch)(device_t idev, int mode, uint8_t write_byte, uint8_t *read_byte),
-+    void *priv, device_t *bus, device_t *adapter);
-+
-+#endif /* _DRM_DP_HELPER_H_ */
-
-Property changes on: stable/9/sys/dev/drm/drm_dp_helper.h
-___________________________________________________________________
-Added: svn:mime-type
-## -0,0 +1 ##
-+text/plain
-Added: svn:keywords
-## -0,0 +1 ##
-+FreeBSD=%H
-Added: svn:eol-style
-## -0,0 +1 ##
-+native
-Index: sys/dev/drm/drm_gem.c
-===================================================================
-diff --git sys/dev/drm/drm_gem.c sys/dev/drm/drm_gem.c
-new file mode 10644
---- /dev/null	(revision 0)
-+++ sys/dev/drm/drm_gem.c	(working copy)
-@@ -0,0 +1,487 @@
-+/*-
-+ * Copyright (c) 2011 The FreeBSD Foundation
-+ * All rights reserved.
-+ *
-+ * This software was developed by Konstantin Belousov under sponsorship from
-+ * the FreeBSD Foundation.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions
-+ * are met:
-+ * 1. Redistributions of source code must retain the above copyright
-+ *    notice, this list of conditions and the following disclaimer.
-+ * 2. Redistributions in binary form must reproduce the above copyright
-+ *    notice, this list of conditions and the following disclaimer in the
-+ *    documentation and/or other materials provided with the distribution.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
-+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
-+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
-+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
-+ * SUCH DAMAGE.
-+ */
-+
-+#include <sys/cdefs.h>
-+__FBSDID("$FreeBSD$");
-+
-+#include "opt_vm.h"
-+
-+#include <sys/param.h>
-+#include <sys/systm.h>
-+#include <sys/limits.h>
-+#include <sys/lock.h>
-+#include <sys/mutex.h>
-+
-+#include <vm/vm.h>
-+#include <vm/vm_page.h>
-+
-+#include "dev/drm/drmP.h"
-+#include "dev/drm/drm.h"
-+#include "dev/drm/drm_sarea.h"
-+
-+/*
-+ * We make up offsets for buffer objects so we can recognize them at
-+ * mmap time.
-+ */
-+
-+/* pgoff in mmap is an unsigned long, so we need to make sure that
-+ * the faked up offset will fit
-+ */
-+
-+#if ULONG_MAX == UINT64_MAX
-+#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
-+#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
-+#else
-+#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
-+#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
-+#endif
-+
-+int
-+drm_gem_init(struct drm_device *dev)
-+{
-+	struct drm_gem_mm *mm;
-+
-+	drm_gem_names_init(&dev->object_names);
-+	mm = malloc(sizeof(*mm), DRM_MEM_DRIVER, M_WAITOK);
-+	dev->mm_private = mm;
-+	if (drm_ht_create(&mm->offset_hash, 19) != 0) {
-+		free(mm, DRM_MEM_DRIVER);
-+		return (ENOMEM);
-+	}
-+	mm->idxunr = new_unrhdr(0, DRM_GEM_MAX_IDX, NULL);
-+	return (0);
-+}
-+
-+void
-+drm_gem_destroy(struct drm_device *dev)
-+{
-+	struct drm_gem_mm *mm;
-+
-+	mm = dev->mm_private;
-+	dev->mm_private = NULL;
-+	drm_ht_remove(&mm->offset_hash);
-+	delete_unrhdr(mm->idxunr);
-+	free(mm, DRM_MEM_DRIVER);
-+	drm_gem_names_fini(&dev->object_names);
-+}
-+
-+int
-+drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj,
-+    size_t size)
-+{
-+
-+	KASSERT((size & (PAGE_SIZE - 1)) == 0,
-+	    ("Bad size %ju", (uintmax_t)size));
-+
-+	obj->dev = dev;
-+	obj->vm_obj = vm_pager_allocate(OBJT_DEFAULT, NULL, size,
-+	    VM_PROT_READ | VM_PROT_WRITE, 0, curthread->td_ucred);
-+
-+	obj->refcount = 1;
-+	obj->handle_count = 0;
-+	obj->size = size;
-+
-+	return (0);
-+}
-+
-+int
-+drm_gem_private_object_init(struct drm_device *dev, struct drm_gem_object *obj,
-+    size_t size)
-+{
-+
-+	MPASS((size & (PAGE_SIZE - 1)) == 0);
-+
-+	obj->dev = dev;
-+	obj->vm_obj = NULL;
-+
-+	obj->refcount = 1;
-+	atomic_set(&obj->handle_count, 0);
-+	obj->size = size;
-+
-+	return (0);
-+}
-+
-+
-+struct drm_gem_object *
-+drm_gem_object_alloc(struct drm_device *dev, size_t size)
-+{
-+	struct drm_gem_object *obj;
-+
-+	obj = malloc(sizeof(*obj), DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
-+	if (drm_gem_object_init(dev, obj, size) != 0)
-+		goto free;
-+
-+	if (dev->driver->gem_init_object != NULL &&
-+	    dev->driver->gem_init_object(obj) != 0)
-+		goto dealloc;
-+	return (obj);
-+dealloc:
-+	vm_object_deallocate(obj->vm_obj);
-+free:
-+	free(obj, DRM_MEM_DRIVER);
-+	return (NULL);
-+}
-+
-+void
-+drm_gem_object_free(struct drm_gem_object *obj)
-+{
-+	struct drm_device *dev;
-+
-+	dev = obj->dev;
-+	DRM_LOCK_ASSERT(dev);
-+	if (dev->driver->gem_free_object != NULL)
-+		dev->driver->gem_free_object(obj);
-+}
-+
-+void
-+drm_gem_object_reference(struct drm_gem_object *obj)
-+{
-+
-+	KASSERT(obj->refcount > 0, ("Dandling obj %p", obj));
-+	refcount_acquire(&obj->refcount);
-+}
-+
-+void
-+drm_gem_object_unreference(struct drm_gem_object *obj)
-+{
-+
-+	if (obj == NULL)
-+		return;
-+	if (refcount_release(&obj->refcount))
-+		drm_gem_object_free(obj);
-+}
-+
-+void
-+drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
-+{
-+	struct drm_device *dev;
-+
-+	if (obj == NULL)
-+		return;
-+	dev = obj->dev;
-+	DRM_LOCK();
-+	drm_gem_object_unreference(obj);
-+	DRM_UNLOCK();
-+}
-+
-+void
-+drm_gem_object_handle_reference(struct drm_gem_object *obj)
-+{
-+
-+	drm_gem_object_reference(obj);
-+	atomic_add_rel_int(&obj->handle_count, 1);
-+}
-+
-+void
-+drm_gem_object_handle_free(struct drm_gem_object *obj)
-+{
-+	struct drm_device *dev;
-+	struct drm_gem_object *obj1;
-+
-+	dev = obj->dev;
-+	if (obj->name != 0) {
-+		obj1 = drm_gem_names_remove(&dev->object_names, obj->name);
-+		obj->name = 0;
-+		drm_gem_object_unreference(obj1);
-+	}
-+}
-+
-+void
-+drm_gem_object_handle_unreference(struct drm_gem_object *obj)
-+{
-+
-+	if (obj == NULL ||
-+	    atomic_load_acq_int(&obj->handle_count) == 0)
-+		return;
-+
-+	if (atomic_fetchadd_int(&obj->handle_count, -1) == 1)
-+		drm_gem_object_handle_free(obj);
-+	drm_gem_object_unreference(obj);
-+}
-+
-+void
-+drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
-+{
-+
-+	if (obj == NULL ||
-+	    atomic_load_acq_int(&obj->handle_count) == 0)
-+		return;
-+
-+	if (atomic_fetchadd_int(&obj->handle_count, -1) == 1)
-+		drm_gem_object_handle_free(obj);
-+	drm_gem_object_unreference_unlocked(obj);
-+}
-+
-+int
-+drm_gem_handle_create(struct drm_file *file_priv, struct drm_gem_object *obj,
-+    uint32_t *handle)
-+{
-+	int error;
-+
-+	error = drm_gem_name_create(&file_priv->object_names, obj, handle);
-+	if (error != 0)
-+		return (error);
-+	drm_gem_object_handle_reference(obj);
-+	return (0);
-+}
-+
-+int
-+drm_gem_handle_delete(struct drm_file *file_priv, uint32_t handle)
-+{
-+	struct drm_gem_object *obj;
-+
-+	obj = drm_gem_names_remove(&file_priv->object_names, handle);
-+	if (obj == NULL)
-+		return (EINVAL);
-+	drm_gem_object_handle_unreference_unlocked(obj);
-+	return (0);
-+}
-+
-+void
-+drm_gem_object_release(struct drm_gem_object *obj)
-+{
-+
-+	/*
-+	 * obj->vm_obj can be NULL for private gem objects.
-+	 */
-+	vm_object_deallocate(obj->vm_obj);
-+}
-+
-+int
-+drm_gem_open_ioctl(struct drm_device *dev, void *data,
-+    struct drm_file *file_priv)
-+{
-+	struct drm_gem_open *args;
-+	struct drm_gem_object *obj;
-+	int ret;
-+	uint32_t handle;
-+
-+	if (!drm_core_check_feature(dev, DRIVER_GEM))
-+		return (ENODEV);
-+	args = data;
-+
-+	obj = drm_gem_name_ref(&dev->object_names, args->name,
-+	    (void (*)(void *))drm_gem_object_reference);
-+	if (obj == NULL)
-+		return (ENOENT);
-+	handle = 0;
-+	ret = drm_gem_handle_create(file_priv, obj, &handle);
-+	drm_gem_object_unreference_unlocked(obj);
-+	if (ret != 0)
-+		return (ret);
-+	
-+	args->handle = handle;
-+	args->size = obj->size;
-+
-+	return (0);
-+}
-+
-+void
-+drm_gem_open(struct drm_device *dev, struct drm_file *file_priv)
-+{
-+
-+	drm_gem_names_init(&file_priv->object_names);
-+}
-+
-+static int
-+drm_gem_object_release_handle(uint32_t name, void *ptr, void *arg)
-+{
-+	struct drm_gem_object *obj;
-+
-+	obj = ptr;
-+	drm_gem_object_handle_unreference(obj);
-+	return (0);
-+}
-+
-+void
-+drm_gem_release(struct drm_device *dev, struct drm_file *file_priv)
-+{
-+
-+	drm_gem_names_foreach(&file_priv->object_names,
-+	    drm_gem_object_release_handle, NULL);
-+	drm_gem_names_fini(&file_priv->object_names);
-+}
-+
-+int
-+drm_gem_close_ioctl(struct drm_device *dev, void *data,
-+    struct drm_file *file_priv)
-+{
-+	struct drm_gem_close *args;
-+
-+	if (!drm_core_check_feature(dev, DRIVER_GEM))
-+		return (ENODEV);
-+	args = data;
-+
-+	return (drm_gem_handle_delete(file_priv, args->handle));
-+}
-+
-+int
-+drm_gem_flink_ioctl(struct drm_device *dev, void *data,
-+    struct drm_file *file_priv)
-+{
-+	struct drm_gem_flink *args;
-+	struct drm_gem_object *obj;
-+	int error;
-+
-+	if (!drm_core_check_feature(dev, DRIVER_GEM))
-+		return (ENODEV);
-+	args = data;
-+
-+	obj = drm_gem_name_ref(&file_priv->object_names, args->handle,
-+	    (void (*)(void *))drm_gem_object_reference);
-+	if (obj == NULL)
-+		return (ENOENT);
-+	error = drm_gem_name_create(&dev->object_names, obj, &obj->name);
-+	if (error != 0) {
-+		if (error == EALREADY)
-+			error = 0;
-+		drm_gem_object_unreference_unlocked(obj);
-+	}
-+	if (error == 0)
-+		args->name = obj->name;
-+	return (error);
-+}
-+
-+struct drm_gem_object *
-+drm_gem_object_lookup(struct drm_device *dev, struct drm_file *file_priv,
-+    uint32_t handle)
-+{
-+	struct drm_gem_object *obj;
-+
-+	obj = drm_gem_name_ref(&file_priv->object_names, handle,
-+	    (void (*)(void *))drm_gem_object_reference);
-+	return (obj);
-+}
-+
-+static struct drm_gem_object *
-+drm_gem_object_from_offset(struct drm_device *dev, vm_ooffset_t offset)
-+{
-+	struct drm_gem_object *obj;
-+	struct drm_gem_mm *mm;
-+	struct drm_hash_item *map_list;
-+
-+	if ((offset & DRM_GEM_MAPPING_MASK) != DRM_GEM_MAPPING_KEY)
-+		return (NULL);
-+	offset &= ~DRM_GEM_MAPPING_KEY;
-+	mm = dev->mm_private;
-+	if (drm_ht_find_item(&mm->offset_hash, DRM_GEM_MAPPING_IDX(offset),
-+	    &map_list) != 0) {
-+	DRM_DEBUG("drm_gem_object_from_offset: offset 0x%jx obj not found\n",
-+		    (uintmax_t)offset);
-+		return (NULL);
-+	}
-+	obj = member2struct(drm_gem_object, map_list, map_list);
-+	return (obj);
-+}
-+
-+int
-+drm_gem_create_mmap_offset(struct drm_gem_object *obj)
-+{
-+	struct drm_device *dev;
-+	struct drm_gem_mm *mm;
-+	int ret;
-+
-+	if (obj->on_map)
-+		return (0);
-+	dev = obj->dev;
-+	mm = dev->mm_private;
-+	ret = 0;
-+
-+	obj->map_list.key = alloc_unr(mm->idxunr);
-+	ret = drm_ht_insert_item(&mm->offset_hash, &obj->map_list);
-+	if (ret != 0) {
-+		DRM_ERROR("failed to add to map hash\n");
-+		free_unr(mm->idxunr, obj->map_list.key);
-+		return (ret);
-+	}
-+	obj->on_map = true;
-+	return (0);
-+}
-+
-+void
-+drm_gem_free_mmap_offset(struct drm_gem_object *obj)
-+{
-+	struct drm_hash_item *list;
-+	struct drm_gem_mm *mm;
-+
-+	if (!obj->on_map)
-+		return;
-+	mm = obj->dev->mm_private;
-+	list = &obj->map_list;
-+
-+	drm_ht_remove_item(&mm->offset_hash, list);
-+	free_unr(mm->idxunr, list->key);
-+	obj->on_map = false;
-+}
-+
-+int
-+drm_gem_mmap_single(struct cdev *kdev, vm_ooffset_t *offset, vm_size_t size,
-+    struct vm_object **obj_res, int nprot)
-+{
-+	struct drm_device *dev;
-+	struct drm_gem_object *gem_obj;
-+	struct vm_object *vm_obj;
-+
-+	dev = drm_get_device_from_kdev(kdev);
-+	if ((dev->driver->driver_features & DRIVER_GEM) == 0)
-+		return (ENODEV);
-+	DRM_LOCK();
-+	gem_obj = drm_gem_object_from_offset(dev, *offset);
-+	if (gem_obj == NULL) {
-+		DRM_UNLOCK();
-+		return (ENODEV);
-+	}
-+	drm_gem_object_reference(gem_obj);
-+	DRM_UNLOCK();
-+	vm_obj = cdev_pager_allocate(gem_obj, OBJT_MGTDEVICE,
-+	    dev->driver->gem_pager_ops, size, nprot,
-+	    DRM_GEM_MAPPING_MAPOFF(*offset), curthread->td_ucred);
-+	if (vm_obj == NULL) {
-+		drm_gem_object_unreference_unlocked(gem_obj);
-+		return (EINVAL);
-+	}
-+	*offset = DRM_GEM_MAPPING_MAPOFF(*offset);
-+	*obj_res = vm_obj;
-+	return (0);
-+}
-+
-+void
-+drm_gem_pager_dtr(void *handle)
-+{
-+	struct drm_gem_object *obj;
-+	struct drm_device *dev;
-+
-+	obj = handle;
-+	dev = obj->dev;
-+
-+	DRM_LOCK();
-+	drm_gem_free_mmap_offset(obj);
-+	drm_gem_object_unreference(obj);
-+	DRM_UNLOCK();
-+}
-
-Property changes on: stable/9/sys/dev/drm/drm_gem.c
-___________________________________________________________________
-Added: svn:mime-type
-## -0,0 +1 ##
-+text/plain
-Added: svn:keywords
-## -0,0 +1 ##
-+FreeBSD=%H
-Added: svn:eol-style
-## -0,0 +1 ##
-+native
-Index: sys/dev/drm/i915_gem.c
-===================================================================
-diff --git sys/dev/drm/i915_gem.c sys/dev/drm/i915_gem.c
-new file mode 10644
---- /dev/null	(revision 0)
-+++ sys/dev/drm/i915_gem.c	(working copy)
+diff --git a/sys/dev/drm/i915_gem.c b/sys/dev/drm/i915_gem.c
+new file mode 100644
+index 0000000..9846938
+--- /dev/null
++++ sys/dev/drm/i915_gem.c
 @@ -0,0 +1,3642 @@
 +/*-
 + * Copyright © 2008 Intel Corporation
@@ -19891,89 +30207,14 @@
 +	dev_priv = dev->dev_private;
 +	EVENTHANDLER_DEREGISTER(vm_lowmem, dev_priv->mm.i915_lowmem);
 +}
-
-Property changes on: stable/9/sys/dev/drm/i915_gem.c
-___________________________________________________________________
-Added: svn:mime-type
-## -0,0 +1 ##
-+text/plain
-Added: svn:keywords
-## -0,0 +1 ##
-+FreeBSD=%H
-Added: svn:eol-style
-## -0,0 +1 ##
-+native
-Index: sys/dev/drm/mga_drv.c
-===================================================================
-diff --git sys/dev/drm/mga_drv.c sys/dev/drm/mga_drv.c
---- sys/dev/drm/mga_drv.c	(revision 230124)
-+++ sys/dev/drm/mga_drv.c	(working copy)
-@@ -117,6 +117,8 @@
- 	dev->driver->major		= DRIVER_MAJOR;
- 	dev->driver->minor		= DRIVER_MINOR;
- 	dev->driver->patchlevel		= DRIVER_PATCHLEVEL;
-+
-+	drm_compat_locking_init(dev);
- }
- 
- static int
-Index: sys/dev/drm/drm_pciids.h
-===================================================================
-diff --git sys/dev/drm/drm_pciids.h sys/dev/drm/drm_pciids.h
---- sys/dev/drm/drm_pciids.h	(revision 230124)
-+++ sys/dev/drm/drm_pciids.h	(working copy)
-@@ -533,6 +533,7 @@
- 	{0x8086, 0x3577, CHIP_I8XX, "Intel i830M GMCH"}, \
- 	{0x8086, 0x2562, CHIP_I8XX, "Intel i845G GMCH"}, \
- 	{0x8086, 0x3582, CHIP_I8XX, "Intel i852GM/i855GM GMCH"}, \
-+	{0x8086, 0x358e, CHIP_I8XX, "Intel i852GM/i855GM GMCH"}, \
- 	{0x8086, 0x2572, CHIP_I8XX, "Intel i865G GMCH"}, \
- 	{0x8086, 0x2582, CHIP_I9XX|CHIP_I915, "Intel i915G"}, \
- 	{0x8086, 0x258a, CHIP_I9XX|CHIP_I915, "Intel E7221 (i915)"}, \
-@@ -544,18 +545,29 @@
- 	{0x8086, 0x2982, CHIP_I9XX|CHIP_I965, "Intel i965G"}, \
- 	{0x8086, 0x2992, CHIP_I9XX|CHIP_I965, "Intel i965Q"}, \
- 	{0x8086, 0x29A2, CHIP_I9XX|CHIP_I965, "Intel i965G"}, \
-+	{0x8086, 0x29B2, CHIP_I9XX|CHIP_I915, "Intel Q35"}, \
-+	{0x8086, 0x29C2, CHIP_I9XX|CHIP_I915, "Intel G33"}, \
-+	{0x8086, 0x29D2, CHIP_I9XX|CHIP_I915, "Intel Q33"}, \
- 	{0x8086, 0x2A02, CHIP_I9XX|CHIP_I965, "Intel i965GM"}, \
- 	{0x8086, 0x2A12, CHIP_I9XX|CHIP_I965, "Intel i965GME/GLE"}, \
--	{0x8086, 0x29C2, CHIP_I9XX|CHIP_I915, "Intel G33"}, \
--	{0x8086, 0x29B2, CHIP_I9XX|CHIP_I915, "Intel Q35"}, \
--	{0x8086, 0x29D2, CHIP_I9XX|CHIP_I915, "Intel Q33"}, \
- 	{0x8086, 0x2A42, CHIP_I9XX|CHIP_I965, "Mobile Intel® GM45 Express Chipset"}, \
- 	{0x8086, 0x2E02, CHIP_I9XX|CHIP_I965, "Intel Eaglelake"}, \
--	{0x8086, 0xA001, CHIP_I9XX|CHIP_I965, "Intel Pineview"}, \
--	{0x8086, 0xA011, CHIP_I9XX|CHIP_I965, "Intel Pineview (M)"}, \
- 	{0x8086, 0x2E12, CHIP_I9XX|CHIP_I965, "Intel Q45/Q43"}, \
- 	{0x8086, 0x2E22, CHIP_I9XX|CHIP_I965, "Intel G45/G43"}, \
- 	{0x8086, 0x2E32, CHIP_I9XX|CHIP_I965, "Intel G41"}, \
-+	{0x8086, 0x2e42, CHIP_I9XX|CHIP_I915, "Intel G43 ?"}, \
-+	{0x8086, 0x2e92, CHIP_I9XX|CHIP_I915, "Intel G43 ?"}, \
-+	{0x8086, 0x0042, CHIP_I9XX|CHIP_I915, "Intel IronLake"}, \
-+	{0x8086, 0x0046, CHIP_I9XX|CHIP_I915, "Intel IronLake"}, \
-+	{0x8086, 0x0102, CHIP_I9XX|CHIP_I915, "Intel SandyBridge"}, \
-+	{0x8086, 0x0112, CHIP_I9XX|CHIP_I915, "Intel SandyBridge"}, \
-+	{0x8086, 0x0122, CHIP_I9XX|CHIP_I915, "Intel SandyBridge"}, \
-+	{0x8086, 0x0106, CHIP_I9XX|CHIP_I915, "Intel SandyBridge (M)"}, \
-+	{0x8086, 0x0116, CHIP_I9XX|CHIP_I915, "Intel SandyBridge (M)"}, \
-+	{0x8086, 0x0126, CHIP_I9XX|CHIP_I915, "Intel SandyBridge (M)"}, \
-+	{0x8086, 0x010A, CHIP_I9XX|CHIP_I915, "Intel SandyBridge (M)"}, \
-+	{0x8086, 0xA001, CHIP_I9XX|CHIP_I965, "Intel Pineview"}, \
-+	{0x8086, 0xA011, CHIP_I9XX|CHIP_I965, "Intel Pineview (M)"}, \
- 	{0, 0, 0, NULL}
- 
- #define imagine_PCI_IDS \
-Index: sys/dev/drm/intel_fb.c
-===================================================================
-diff --git sys/dev/drm/intel_fb.c sys/dev/drm/intel_fb.c
-new file mode 10644
---- /dev/null	(revision 0)
-+++ sys/dev/drm/intel_fb.c	(working copy)
-@@ -0,0 +1,267 @@
+diff --git a/sys/dev/drm/i915_gem_evict.c b/sys/dev/drm/i915_gem_evict.c
+new file mode 100644
+index 0000000..c6631a9
+--- /dev/null
++++ sys/dev/drm/i915_gem_evict.c
+@@ -0,0 +1,227 @@
 +/*
-+ * Copyright © 2007 David Airlie
++ * Copyright © 2008-2010 Intel Corporation
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the "Software"),
@@ -19991,580 +30232,4630 @@
 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
 + *
 + * Authors:
-+ *     David Airlie
++ *    Eric Anholt <eric at anholt.net>
++ *    Chris Wilson <chris at chris-wilson.co.uuk>
++ *
 + */
 +
 +#include "dev/drm/drmP.h"
 +#include "dev/drm/drm.h"
-+#include "dev/drm/drm_crtc.h"
-+#include "dev/drm/drm_fb_helper.h"
 +#include "dev/drm/i915_drm.h"
 +#include "dev/drm/i915_drv.h"
-+#include "dev/drm/intel_drv.h"
 +
-+static int intelfb_create(struct intel_fbdev *ifbdev,
-+			  struct drm_fb_helper_surface_size *sizes)
++static bool
++mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
 +{
-+	struct drm_device *dev = ifbdev->helper.dev;
-+#if 0
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	struct fb_info *info;
-+#endif
-+	struct drm_framebuffer *fb;
-+	struct drm_mode_fb_cmd2 mode_cmd;
++	list_add(&obj->exec_list, unwind);
++	drm_gem_object_reference(&obj->base);
++	return drm_mm_scan_add_block(obj->gtt_space);
++}
++
++int
++i915_gem_evict_something(struct drm_device *dev, int min_size,
++			 unsigned alignment, bool mappable)
++{
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	struct list_head eviction_list, unwind_list;
 +	struct drm_i915_gem_object *obj;
-+	int size, ret;
++	int ret = 0;
 +
-+	/* we don't do packed 24bpp */
-+	if (sizes->surface_bpp == 24)
-+		sizes->surface_bpp = 32;
++	i915_gem_retire_requests(dev);
 +
-+	mode_cmd.width = sizes->surface_width;
-+	mode_cmd.height = sizes->surface_height;
++	/* Re-check for free space after retiring requests */
++	if (mappable) {
++		if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
++						min_size, alignment, 0,
++						dev_priv->mm.gtt_mappable_end,
++						0))
++			return 0;
++	} else {
++		if (drm_mm_search_free(&dev_priv->mm.gtt_space,
++				       min_size, alignment, 0))
++			return 0;
++	}
 +
-+	mode_cmd.pitches[0] = roundup2(mode_cmd.width * ((sizes->surface_bpp + 7) /
-+							 8), 64);
-+	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
-+							  sizes->surface_depth);
++	CTR4(KTR_DRM, "evict_something %p %d %u %d", dev, min_size,
++	    alignment, mappable);
 +
-+	size = mode_cmd.pitches[0] * mode_cmd.height;
-+	size = roundup2(size, PAGE_SIZE);
-+	obj = i915_gem_alloc_object(dev, size);
-+	if (!obj) {
-+		DRM_ERROR("failed to allocate framebuffer\n");
-+		ret = -ENOMEM;
-+		goto out;
++	/*
++	 * The goal is to evict objects and amalgamate space in LRU order.
++	 * The oldest idle objects reside on the inactive list, which is in
++	 * retirement order. The next objects to retire are those on the (per
++	 * ring) active list that do not have an outstanding flush. Once the
++	 * hardware reports completion (the seqno is updated after the
++	 * batchbuffer has been finished) the clean buffer objects would
++	 * be retired to the inactive list. Any dirty objects would be added
++	 * to the tail of the flushing list. So after processing the clean
++	 * active objects we need to emit a MI_FLUSH to retire the flushing
++	 * list, hence the retirement order of the flushing list is in
++	 * advance of the dirty objects on the active lists.
++	 *
++	 * The retirement sequence is thus:
++	 *   1. Inactive objects (already retired)
++	 *   2. Clean active objects
++	 *   3. Flushing list
++	 *   4. Dirty active objects.
++	 *
++	 * On each list, the oldest objects lie at the HEAD with the freshest
++	 * object on the TAIL.
++	 */
++
++	INIT_LIST_HEAD(&unwind_list);
++	if (mappable)
++		drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size,
++					    alignment, 0,
++					    dev_priv->mm.gtt_mappable_end);
++	else
++		drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
++
++	/* First see if there is a large enough contiguous idle region... */
++	list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
++		if (mark_free(obj, &unwind_list))
++			goto found;
 +	}
 +
-+	DRM_LOCK();
++	/* Now merge in the soon-to-be-expired objects... */
++	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
++		/* Does the object require an outstanding flush? */
++		if (obj->base.write_domain || obj->pin_count)
++			continue;
 +
-+	/* Flush everything out, we'll be doing GTT only from now on */
-+	ret = intel_pin_and_fence_fb_obj(dev, obj, false);
-+	if (ret) {
-+		DRM_ERROR("failed to pin fb: %d\n", ret);
-+		goto out_unref;
++		if (mark_free(obj, &unwind_list))
++			goto found;
 +	}
 +
-+#if 0
-+	info = framebuffer_alloc(0, device);
-+	if (!info) {
-+		ret = -ENOMEM;
-+		goto out_unpin;
++	/* Finally add anything with a pending flush (in order of retirement) */
++	list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
++		if (obj->pin_count)
++			continue;
++
++		if (mark_free(obj, &unwind_list))
++			goto found;
 +	}
++	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
++		if (!obj->base.write_domain || obj->pin_count)
++			continue;
 +
-+	info->par = ifbdev;
-+#endif
++		if (mark_free(obj, &unwind_list))
++			goto found;
++	}
 +
-+	ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
++	/* Nothing found, clean up and bail out! */
++	while (!list_empty(&unwind_list)) {
++		obj = list_first_entry(&unwind_list,
++				       struct drm_i915_gem_object,
++				       exec_list);
++
++		ret = drm_mm_scan_remove_block(obj->gtt_space);
++		KASSERT(ret == 0, ("drm_mm_scan_remove_block failed %d", ret));
++
++		list_del_init(&obj->exec_list);
++		drm_gem_object_unreference(&obj->base);
++	}
++
++	/* We expect the caller to unpin, evict all and try again, or give up.
++	 * So calling i915_gem_evict_everything() is unnecessary.
++	 */
++	return -ENOSPC;
++
++found:
++	/* drm_mm doesn't allow any other other operations while
++	 * scanning, therefore store to be evicted objects on a
++	 * temporary list. */
++	INIT_LIST_HEAD(&eviction_list);
++	while (!list_empty(&unwind_list)) {
++		obj = list_first_entry(&unwind_list,
++				       struct drm_i915_gem_object,
++				       exec_list);
++		if (drm_mm_scan_remove_block(obj->gtt_space)) {
++			list_move(&obj->exec_list, &eviction_list);
++			continue;
++		}
++		list_del_init(&obj->exec_list);
++		drm_gem_object_unreference(&obj->base);
++	}
++
++	/* Unbinding will emit any required flushes */
++	while (!list_empty(&eviction_list)) {
++		obj = list_first_entry(&eviction_list,
++				       struct drm_i915_gem_object,
++				       exec_list);
++		if (ret == 0)
++			ret = i915_gem_object_unbind(obj);
++
++		list_del_init(&obj->exec_list);
++		drm_gem_object_unreference(&obj->base);
++	}
++
++	return ret;
++}
++
++int
++i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
++{
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	int ret;
++	bool lists_empty;
++
++	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
++		       list_empty(&dev_priv->mm.flushing_list) &&
++		       list_empty(&dev_priv->mm.active_list));
++	if (lists_empty)
++		return -ENOSPC;
++
++	CTR2(KTR_DRM, "evict_everything %p %d", dev, purgeable_only);
++
++	/* Flush everything (on to the inactive lists) and evict */
++	ret = i915_gpu_idle(dev);
 +	if (ret)
-+		goto out_unpin;
++		return ret;
 +
-+	fb = &ifbdev->ifb.base;
++	KASSERT(list_empty(&dev_priv->mm.flushing_list),
++	    ("flush list not empty"));
 +
-+	ifbdev->helper.fb = fb;
-+#if 0
-+	ifbdev->helper.fbdev = info;
++	return i915_gem_evict_inactive(dev, purgeable_only);
++}
 +
-+	strcpy(info->fix.id, "inteldrmfb");
++/** Unbinds all inactive objects. */
++int
++i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
++{
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	struct drm_i915_gem_object *obj, *next;
 +
-+	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
-+	info->fbops = &intelfb_ops;
++	CTR2(KTR_DRM, "evict_inactive %p %d", dev, purgeable_only);
 +
-+	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-+	if (ret) {
-+		ret = -ENOMEM;
-+		goto out_unpin;
++	list_for_each_entry_safe(obj, next,
++				 &dev_priv->mm.inactive_list, mm_list) {
++		if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
++			int ret = i915_gem_object_unbind(obj);
++			if (ret)
++				return ret;
++		}
 +	}
-+	/* setup aperture base/size for vesafb takeover */
-+	info->apertures = alloc_apertures(1);
-+	if (!info->apertures) {
-+		ret = -ENOMEM;
-+		goto out_unpin;
++
++	return 0;
++}
+diff --git a/sys/dev/drm/i915_gem_execbuffer.c b/sys/dev/drm/i915_gem_execbuffer.c
+new file mode 100644
+index 0000000..574fe2d
+--- /dev/null
++++ sys/dev/drm/i915_gem_execbuffer.c
+@@ -0,0 +1,1479 @@
++/*
++ * Copyright © 2008,2010 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Eric Anholt <eric at anholt.net>
++ *    Chris Wilson <chris at chris-wilson.co.uk>
++ *
++ */
++
++#include "dev/drm/drmP.h"
++#include "dev/drm/drm.h"
++#include "dev/drm/i915_drm.h"
++#include "dev/drm/i915_drv.h"
++#include "dev/drm/intel_drv.h"
++#include <sys/limits.h>
++#include <sys/sf_buf.h>
++
++struct change_domains {
++	uint32_t invalidate_domains;
++	uint32_t flush_domains;
++	uint32_t flush_rings;
++	uint32_t flips;
++};
++
++/*
++ * Set the next domain for the specified object. This
++ * may not actually perform the necessary flushing/invaliding though,
++ * as that may want to be batched with other set_domain operations
++ *
++ * This is (we hope) the only really tricky part of gem. The goal
++ * is fairly simple -- track which caches hold bits of the object
++ * and make sure they remain coherent. A few concrete examples may
++ * help to explain how it works. For shorthand, we use the notation
++ * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
++ * a pair of read and write domain masks.
++ *
++ * Case 1: the batch buffer
++ *
++ *	1. Allocated
++ *	2. Written by CPU
++ *	3. Mapped to GTT
++ *	4. Read by GPU
++ *	5. Unmapped from GTT
++ *	6. Freed
++ *
++ *	Let's take these a step at a time
++ *
++ *	1. Allocated
++ *		Pages allocated from the kernel may still have
++ *		cache contents, so we set them to (CPU, CPU) always.
++ *	2. Written by CPU (using pwrite)
++ *		The pwrite function calls set_domain (CPU, CPU) and
++ *		this function does nothing (as nothing changes)
++ *	3. Mapped by GTT
++ *		This function asserts that the object is not
++ *		currently in any GPU-based read or write domains
++ *	4. Read by GPU
++ *		i915_gem_execbuffer calls set_domain (COMMAND, 0).
++ *		As write_domain is zero, this function adds in the
++ *		current read domains (CPU+COMMAND, 0).
++ *		flush_domains is set to CPU.
++ *		invalidate_domains is set to COMMAND
++ *		clflush is run to get data out of the CPU caches
++ *		then i915_dev_set_domain calls i915_gem_flush to
++ *		emit an MI_FLUSH and drm_agp_chipset_flush
++ *	5. Unmapped from GTT
++ *		i915_gem_object_unbind calls set_domain (CPU, CPU)
++ *		flush_domains and invalidate_domains end up both zero
++ *		so no flushing/invalidating happens
++ *	6. Freed
++ *		yay, done
++ *
++ * Case 2: The shared render buffer
++ *
++ *	1. Allocated
++ *	2. Mapped to GTT
++ *	3. Read/written by GPU
++ *	4. set_domain to (CPU,CPU)
++ *	5. Read/written by CPU
++ *	6. Read/written by GPU
++ *
++ *	1. Allocated
++ *		Same as last example, (CPU, CPU)
++ *	2. Mapped to GTT
++ *		Nothing changes (assertions find that it is not in the GPU)
++ *	3. Read/written by GPU
++ *		execbuffer calls set_domain (RENDER, RENDER)
++ *		flush_domains gets CPU
++ *		invalidate_domains gets GPU
++ *		clflush (obj)
++ *		MI_FLUSH and drm_agp_chipset_flush
++ *	4. set_domain (CPU, CPU)
++ *		flush_domains gets GPU
++ *		invalidate_domains gets CPU
++ *		wait_rendering (obj) to make sure all drawing is complete.
++ *		This will include an MI_FLUSH to get the data from GPU
++ *		to memory
++ *		clflush (obj) to invalidate the CPU cache
++ *		Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
++ *	5. Read/written by CPU
++ *		cache lines are loaded and dirtied
++ *	6. Read written by GPU
++ *		Same as last GPU access
++ *
++ * Case 3: The constant buffer
++ *
++ *	1. Allocated
++ *	2. Written by CPU
++ *	3. Read by GPU
++ *	4. Updated (written) by CPU again
++ *	5. Read by GPU
++ *
++ *	1. Allocated
++ *		(CPU, CPU)
++ *	2. Written by CPU
++ *		(CPU, CPU)
++ *	3. Read by GPU
++ *		(CPU+RENDER, 0)
++ *		flush_domains = CPU
++ *		invalidate_domains = RENDER
++ *		clflush (obj)
++ *		MI_FLUSH
++ *		drm_agp_chipset_flush
++ *	4. Updated (written) by CPU again
++ *		(CPU, CPU)
++ *		flush_domains = 0 (no previous write domain)
++ *		invalidate_domains = 0 (no new read domains)
++ *	5. Read by GPU
++ *		(CPU+RENDER, 0)
++ *		flush_domains = CPU
++ *		invalidate_domains = RENDER
++ *		clflush (obj)
++ *		MI_FLUSH
++ *		drm_agp_chipset_flush
++ */
++static void
++i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
++				  struct intel_ring_buffer *ring,
++				  struct change_domains *cd)
++{
++	uint32_t invalidate_domains = 0, flush_domains = 0;
++
++	/*
++	 * If the object isn't moving to a new write domain,
++	 * let the object stay in multiple read domains
++	 */
++	if (obj->base.pending_write_domain == 0)
++		obj->base.pending_read_domains |= obj->base.read_domains;
++
++	/*
++	 * Flush the current write domain if
++	 * the new read domains don't match. Invalidate
++	 * any read domains which differ from the old
++	 * write domain
++	 */
++	if (obj->base.write_domain &&
++	    (((obj->base.write_domain != obj->base.pending_read_domains ||
++	       obj->ring != ring)) ||
++	     (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
++		flush_domains |= obj->base.write_domain;
++		invalidate_domains |=
++			obj->base.pending_read_domains & ~obj->base.write_domain;
 +	}
-+	info->apertures->ranges[0].base = dev->mode_config.fb_base;
-+	info->apertures->ranges[0].size =
-+		dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
++	/*
++	 * Invalidate any read caches which may have
++	 * stale data. That is, any new read domains.
++	 */
++	invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
++	if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
++		i915_gem_clflush_object(obj);
 +
-+	info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
-+	info->fix.smem_len = size;
++	if (obj->base.pending_write_domain)
++		cd->flips |= atomic_read(&obj->pending_flip);
 +
-+	info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size);
-+	if (!info->screen_base) {
-+		ret = -ENOSPC;
-+		goto out_unpin;
++	/* The actual obj->write_domain will be updated with
++	 * pending_write_domain after we emit the accumulated flush for all
++	 * of our domain changes in execbuffers (which clears objects'
++	 * write_domains).  So if we have a current write domain that we
++	 * aren't changing, set pending_write_domain to that.
++	 */
++	if (flush_domains == 0 && obj->base.pending_write_domain == 0)
++		obj->base.pending_write_domain = obj->base.write_domain;
++
++	cd->invalidate_domains |= invalidate_domains;
++	cd->flush_domains |= flush_domains;
++	if (flush_domains & I915_GEM_GPU_DOMAINS)
++		cd->flush_rings |= obj->ring->id;
++	if (invalidate_domains & I915_GEM_GPU_DOMAINS)
++		cd->flush_rings |= ring->id;
++}
++
++struct eb_objects {
++	u_long hashmask;
++	LIST_HEAD(, drm_i915_gem_object) *buckets;
++};
++
++static struct eb_objects *
++eb_create(int size)
++{
++	struct eb_objects *eb;
++
++	eb = malloc(sizeof(*eb), DRM_I915_GEM, M_WAITOK | M_ZERO);
++	eb->buckets = hashinit(size, DRM_I915_GEM, &eb->hashmask);
++	return (eb);
++}
++
++static void
++eb_reset(struct eb_objects *eb)
++{
++	int i;
++
++	for (i = 0; i <= eb->hashmask; i++)
++		LIST_INIT(&eb->buckets[i]);
++}
++
++static void
++eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
++{
++
++	LIST_INSERT_HEAD(&eb->buckets[obj->exec_handle & eb->hashmask],
++	    obj, exec_node);
++}
++
++static struct drm_i915_gem_object *
++eb_get_object(struct eb_objects *eb, unsigned long handle)
++{
++	struct drm_i915_gem_object *obj;
++
++	LIST_FOREACH(obj, &eb->buckets[handle & eb->hashmask], exec_node) {
++		if (obj->exec_handle == handle)
++			return (obj);
 +	}
-+	info->screen_size = size;
++	return (NULL);
++}
 +
-+//	memset(info->screen_base, 0, size);
++static void
++eb_destroy(struct eb_objects *eb)
++{
 +
-+	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
-+	drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
++	free(eb->buckets, DRM_I915_GEM);
++	free(eb, DRM_I915_GEM);
++}
 +
-+	info->pixmap.size = 64*1024;
-+	info->pixmap.buf_align = 8;
-+	info->pixmap.access_align = 32;
-+	info->pixmap.flags = FB_PIXMAP_SYSTEM;
-+	info->pixmap.scan_align = 1;
++static int
++i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
++				   struct eb_objects *eb,
++				   struct drm_i915_gem_relocation_entry *reloc)
++{
++	struct drm_device *dev = obj->base.dev;
++	struct drm_gem_object *target_obj;
++	uint32_t target_offset;
++	int ret = -EINVAL;
++
++	/* we've already hold a reference to all valid objects */
++	target_obj = &eb_get_object(eb, reloc->target_handle)->base;
++	if (unlikely(target_obj == NULL))
++		return -ENOENT;
++
++	target_offset = to_intel_bo(target_obj)->gtt_offset;
++
++#if WATCH_RELOC
++	DRM_INFO("%s: obj %p offset %08x target %d "
++		 "read %08x write %08x gtt %08x "
++		 "presumed %08x delta %08x\n",
++		 __func__,
++		 obj,
++		 (int) reloc->offset,
++		 (int) reloc->target_handle,
++		 (int) reloc->read_domains,
++		 (int) reloc->write_domain,
++		 (int) target_offset,
++		 (int) reloc->presumed_offset,
++		 reloc->delta);
 +#endif
 +
-+	DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
-+		      fb->width, fb->height,
-+		      obj->gtt_offset, obj);
++	/* The target buffer should have appeared before us in the
++	 * exec_object list, so it should have a GTT space bound by now.
++	 */
++	if (unlikely(target_offset == 0)) {
++		DRM_ERROR("No GTT space found for object %d\n",
++			  reloc->target_handle);
++		return ret;
++	}
 +
-+	DRM_UNLOCK();
-+#if 1
-+	KIB_NOTYET();
-+#else
-+	vga_switcheroo_client_fb_set(dev->pdev, info);
-+#endif
++	/* Validate that the target is in a valid r/w GPU domain */
++	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
++		DRM_ERROR("reloc with multiple write domains: "
++			  "obj %p target %d offset %d "
++			  "read %08x write %08x",
++			  obj, reloc->target_handle,
++			  (int) reloc->offset,
++			  reloc->read_domains,
++			  reloc->write_domain);
++		return ret;
++	}
++	if (unlikely((reloc->write_domain | reloc->read_domains) & I915_GEM_DOMAIN_CPU)) {
++		DRM_ERROR("reloc with read/write CPU domains: "
++			  "obj %p target %d offset %d "
++			  "read %08x write %08x",
++			  obj, reloc->target_handle,
++			  (int) reloc->offset,
++			  reloc->read_domains,
++			  reloc->write_domain);
++		return ret;
++	}
++	if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
++		     reloc->write_domain != target_obj->pending_write_domain)) {
++		DRM_ERROR("Write domain conflict: "
++			  "obj %p target %d offset %d "
++			  "new %08x old %08x\n",
++			  obj, reloc->target_handle,
++			  (int) reloc->offset,
++			  reloc->write_domain,
++			  target_obj->pending_write_domain);
++		return ret;
++	}
++
++	target_obj->pending_read_domains |= reloc->read_domains;
++	target_obj->pending_write_domain |= reloc->write_domain;
++
++	/* If the relocation already has the right value in it, no
++	 * more work needs to be done.
++	 */
++	if (target_offset == reloc->presumed_offset)
++		return 0;
++
++	/* Check that the relocation address is valid... */
++	if (unlikely(reloc->offset > obj->base.size - 4)) {
++		DRM_ERROR("Relocation beyond object bounds: "
++			  "obj %p target %d offset %d size %d.\n",
++			  obj, reloc->target_handle,
++			  (int) reloc->offset,
++			  (int) obj->base.size);
++		return ret;
++	}
++	if (unlikely(reloc->offset & 3)) {
++		DRM_ERROR("Relocation not 4-byte aligned: "
++			  "obj %p target %d offset %d.\n",
++			  obj, reloc->target_handle,
++			  (int) reloc->offset);
++		return ret;
++	}
++
++	reloc->delta += target_offset;
++	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
++		uint32_t page_offset = reloc->offset & PAGE_MASK;
++		char *vaddr;
++		struct sf_buf *sf;
++
++		sf = sf_buf_alloc(obj->pages[OFF_TO_IDX(reloc->offset)],
++		    SFB_NOWAIT);
++		if (sf == NULL)
++			return (-ENOMEM);
++		vaddr = (void *)sf_buf_kva(sf);
++		*(uint32_t *)(vaddr + page_offset) = reloc->delta;
++		sf_buf_free(sf);
++	} else {
++		uint32_t *reloc_entry;
++		char *reloc_page;
++
++		/* We can't wait for rendering with pagefaults disabled */
++		if (obj->active && (curthread->td_pflags & TDP_NOFAULTING) != 0)
++			return (-EFAULT);
++		ret = i915_gem_object_set_to_gtt_domain(obj, 1);
++		if (ret)
++			return ret;
++
++		/*
++		 * Map the page containing the relocation we're going
++		 * to perform.
++		 */
++		reloc->offset += obj->gtt_offset;
++		reloc_page = pmap_mapdev_attr(dev->agp->base + (reloc->offset &
++		    ~PAGE_MASK), PAGE_SIZE, PAT_WRITE_COMBINING);
++		reloc_entry = (uint32_t *)(reloc_page + (reloc->offset &
++		    PAGE_MASK));
++		*(volatile uint32_t *)reloc_entry = reloc->delta;
++		pmap_unmapdev((vm_offset_t)reloc_page, PAGE_SIZE);
++	}
++
++	/* and update the user's relocation entry */
++	reloc->presumed_offset = target_offset;
++
 +	return 0;
++}
 +
-+out_unpin:
-+	i915_gem_object_unpin(obj);
-+out_unref:
-+	drm_gem_object_unreference(&obj->base);
-+	DRM_UNLOCK();
-+out:
-+	return ret;
++static int
++i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
++    struct eb_objects *eb)
++{
++	struct drm_i915_gem_relocation_entry *user_relocs;
++	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
++	struct drm_i915_gem_relocation_entry reloc;
++	int i, ret;
++
++	user_relocs = (void *)(uintptr_t)entry->relocs_ptr;
++	for (i = 0; i < entry->relocation_count; i++) {
++		ret = -copyin_nofault(user_relocs + i, &reloc, sizeof(reloc));
++		if (ret != 0)
++			return (ret);
++
++		ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc);
++		if (ret != 0)
++			return (ret);
++
++		ret = -copyout_nofault(&reloc.presumed_offset,
++		    &user_relocs[i].presumed_offset,
++		    sizeof(reloc.presumed_offset));
++		if (ret != 0)
++			return (ret);
++	}
++
++	return (0);
 +}
 +
-+static int intel_fb_find_or_create_single(struct drm_fb_helper *helper,
-+					  struct drm_fb_helper_surface_size *sizes)
++static int
++i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
++    struct eb_objects *eb, struct drm_i915_gem_relocation_entry *relocs)
 +{
-+	struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper;
-+	int new_fb = 0;
-+	int ret;
++	const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
++	int i, ret;
 +
-+	if (!helper->fb) {
-+		ret = intelfb_create(ifbdev, sizes);
++	for (i = 0; i < entry->relocation_count; i++) {
++		ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
 +		if (ret)
 +			return ret;
-+		new_fb = 1;
 +	}
-+	return new_fb;
++
++	return 0;
 +}
 +
-+static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
-+	.gamma_set = intel_crtc_fb_gamma_set,
-+	.gamma_get = intel_crtc_fb_gamma_get,
-+	.fb_probe = intel_fb_find_or_create_single,
-+};
++static int
++i915_gem_execbuffer_relocate(struct drm_device *dev,
++			     struct eb_objects *eb,
++			     struct list_head *objects)
++{
++	struct drm_i915_gem_object *obj;
++	int ret, pflags;
 +
-+static void intel_fbdev_destroy(struct drm_device *dev,
-+				struct intel_fbdev *ifbdev)
++	/* Try to move as many of the relocation targets off the active list
++	 * to avoid unnecessary fallbacks to the slow path, as we cannot wait
++	 * for the retirement with pagefaults disabled.
++	 */
++	i915_gem_retire_requests(dev);
++
++	ret = 0;
++	pflags = vm_fault_disable_pagefaults();
++	/* This is the fast path and we cannot handle a pagefault whilst
++	 * holding the device lock lest the user pass in the relocations
++	 * contained within a mmaped bo. For in such a case we, the page
++	 * fault handler would call i915_gem_fault() and we would try to
++	 * acquire the device lock again. Obviously this is bad.
++	 */
++
++	list_for_each_entry(obj, objects, exec_list) {
++		ret = i915_gem_execbuffer_relocate_object(obj, eb);
++		if (ret != 0)
++			break;
++	}
++	vm_fault_enable_pagefaults(pflags);
++	return (ret);
++}
++
++static int
++i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
++			    struct drm_file *file,
++			    struct list_head *objects)
 +{
-+#if 0
-+	struct fb_info *info;
-+#endif
-+	struct intel_framebuffer *ifb = &ifbdev->ifb;
++	struct drm_i915_gem_object *obj;
++	int ret, retry;
++	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
++	struct list_head ordered_objects;
 +
-+#if 0
-+	if (ifbdev->helper.fbdev) {
-+		info = ifbdev->helper.fbdev;
-+		unregister_framebuffer(info);
-+		iounmap(info->screen_base);
-+		if (info->cmap.len)
-+			fb_dealloc_cmap(&info->cmap);
-+		framebuffer_release(info);
++	INIT_LIST_HEAD(&ordered_objects);
++	while (!list_empty(objects)) {
++		struct drm_i915_gem_exec_object2 *entry;
++		bool need_fence, need_mappable;
++
++		obj = list_first_entry(objects,
++				       struct drm_i915_gem_object,
++				       exec_list);
++		entry = obj->exec_entry;
++
++		need_fence =
++			has_fenced_gpu_access &&
++			entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
++			obj->tiling_mode != I915_TILING_NONE;
++		need_mappable =
++			entry->relocation_count ? true : need_fence;
++
++		if (need_mappable)
++			list_move(&obj->exec_list, &ordered_objects);
++		else
++			list_move_tail(&obj->exec_list, &ordered_objects);
++
++		obj->base.pending_read_domains = 0;
++		obj->base.pending_write_domain = 0;
 +	}
-+#endif
++	list_splice(&ordered_objects, objects);
 +
-+	drm_fb_helper_fini(&ifbdev->helper);
++	/* Attempt to pin all of the buffers into the GTT.
++	 * This is done in 3 phases:
++	 *
++	 * 1a. Unbind all objects that do not match the GTT constraints for
++	 *     the execbuffer (fenceable, mappable, alignment etc).
++	 * 1b. Increment pin count for already bound objects and obtain
++	 *     a fence register if required.
++	 * 2.  Bind new objects.
++	 * 3.  Decrement pin count.
++	 *
++	 * This avoid unnecessary unbinding of later objects in order to makr
++	 * room for the earlier objects *unless* we need to defragment.
++	 */
++	retry = 0;
++	do {
++		ret = 0;
 +
-+	drm_framebuffer_cleanup(&ifb->base);
-+	if (ifb->obj) {
-+		drm_gem_object_unreference_unlocked(&ifb->obj->base);
-+		ifb->obj = NULL;
++		/* Unbind any ill-fitting objects or pin. */
++		list_for_each_entry(obj, objects, exec_list) {
++			struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
++			bool need_fence, need_mappable;
++
++			if (!obj->gtt_space)
++				continue;
++
++			need_fence =
++				has_fenced_gpu_access &&
++				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
++				obj->tiling_mode != I915_TILING_NONE;
++			need_mappable =
++				entry->relocation_count ? true : need_fence;
++
++			if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
++			    (need_mappable && !obj->map_and_fenceable))
++				ret = i915_gem_object_unbind(obj);
++			else
++				ret = i915_gem_object_pin(obj,
++							  entry->alignment,
++							  need_mappable);
++			if (ret)
++				goto err;
++
++			entry++;
++		}
++
++		/* Bind fresh objects */
++		list_for_each_entry(obj, objects, exec_list) {
++			struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
++			bool need_fence;
++
++			need_fence =
++				has_fenced_gpu_access &&
++				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
++				obj->tiling_mode != I915_TILING_NONE;
++
++			if (!obj->gtt_space) {
++				bool need_mappable =
++					entry->relocation_count ? true : need_fence;
++
++				ret = i915_gem_object_pin(obj,
++							  entry->alignment,
++							  need_mappable);
++				if (ret)
++					break;
++			}
++
++			if (has_fenced_gpu_access) {
++				if (need_fence) {
++					ret = i915_gem_object_get_fence(obj, ring);
++					if (ret)
++						break;
++				} else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
++					   obj->tiling_mode == I915_TILING_NONE) {
++					/* XXX pipelined! */
++					ret = i915_gem_object_put_fence(obj);
++					if (ret)
++						break;
++				}
++				obj->pending_fenced_gpu_access = need_fence;
++			}
++
++			entry->offset = obj->gtt_offset;
++		}
++
++		/* Decrement pin count for bound objects */
++		list_for_each_entry(obj, objects, exec_list) {
++			if (obj->gtt_space)
++				i915_gem_object_unpin(obj);
++		}
++
++		if (ret != -ENOSPC || retry > 1)
++			return ret;
++
++		/* First attempt, just clear anything that is purgeable.
++		 * Second attempt, clear the entire GTT.
++		 */
++		ret = i915_gem_evict_everything(ring->dev, retry == 0);
++		if (ret)
++			return ret;
++
++		retry++;
++	} while (1);
++
++err:
++	obj = list_entry(obj->exec_list.prev,
++			 struct drm_i915_gem_object,
++			 exec_list);
++	while (objects != &obj->exec_list) {
++		if (obj->gtt_space)
++			i915_gem_object_unpin(obj);
++
++		obj = list_entry(obj->exec_list.prev,
++				 struct drm_i915_gem_object,
++				 exec_list);
 +	}
++
++	return ret;
 +}
 +
-+int intel_fbdev_init(struct drm_device *dev)
++static int
++i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
++    struct drm_file *file, struct intel_ring_buffer *ring,
++    struct list_head *objects, struct eb_objects *eb,
++    struct drm_i915_gem_exec_object2 *exec, int count)
 +{
-+	struct intel_fbdev *ifbdev;
-+	drm_i915_private_t *dev_priv = dev->dev_private;
-+	int ret;
++	struct drm_i915_gem_relocation_entry *reloc;
++	struct drm_i915_gem_object *obj;
++	int *reloc_offset;
++	int i, total, ret;
 +
-+	ifbdev = malloc(sizeof(struct intel_fbdev), DRM_MEM_KMS,
++	/* We may process another execbuffer during the unlock... */
++	while (!list_empty(objects)) {
++		obj = list_first_entry(objects,
++				       struct drm_i915_gem_object,
++				       exec_list);
++		list_del_init(&obj->exec_list);
++		drm_gem_object_unreference(&obj->base);
++	}
++
++	DRM_UNLOCK();
++
++	total = 0;
++	for (i = 0; i < count; i++)
++		total += exec[i].relocation_count;
++
++	reloc_offset = malloc(count * sizeof(*reloc_offset), DRM_I915_GEM,
 +	    M_WAITOK | M_ZERO);
++	reloc = malloc(total * sizeof(*reloc), DRM_I915_GEM, M_WAITOK | M_ZERO);
 +
-+	dev_priv->fbdev = ifbdev;
-+	ifbdev->helper.funcs = &intel_fb_helper_funcs;
++	total = 0;
++	for (i = 0; i < count; i++) {
++		struct drm_i915_gem_relocation_entry *user_relocs;
 +
-+	ret = drm_fb_helper_init(dev, &ifbdev->helper,
-+				 dev_priv->num_pipe,
-+				 INTELFB_CONN_LIMIT);
++		user_relocs = (void *)(uintptr_t)exec[i].relocs_ptr;
++		ret = -copyin(user_relocs, reloc + total,
++		    exec[i].relocation_count * sizeof(*reloc));
++		if (ret != 0) {
++			DRM_LOCK();
++			goto err;
++		}
++
++		reloc_offset[i] = total;
++		total += exec[i].relocation_count;
++	}
++
++	ret = i915_mutex_lock_interruptible(dev);
 +	if (ret) {
-+		free(ifbdev, DRM_MEM_KMS);
-+		return ret;
++		DRM_LOCK();
++		goto err;
 +	}
 +
-+	drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
-+	drm_fb_helper_initial_config(&ifbdev->helper, 32);
++	/* reacquire the objects */
++	eb_reset(eb);
++	for (i = 0; i < count; i++) {
++		struct drm_i915_gem_object *obj;
++
++		obj = to_intel_bo(drm_gem_object_lookup(dev, file,
++		    exec[i].handle));
++		if (&obj->base == NULL) {
++			DRM_ERROR("Invalid object handle %d at index %d\n",
++				   exec[i].handle, i);
++			ret = -ENOENT;
++			goto err;
++		}
++
++		list_add_tail(&obj->exec_list, objects);
++		obj->exec_handle = exec[i].handle;
++		obj->exec_entry = &exec[i];
++		eb_add_object(eb, obj);
++	}
++
++	ret = i915_gem_execbuffer_reserve(ring, file, objects);
++	if (ret)
++		goto err;
++
++	list_for_each_entry(obj, objects, exec_list) {
++		int offset = obj->exec_entry - exec;
++		ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
++		    reloc + reloc_offset[offset]);
++		if (ret)
++			goto err;
++	}
++
++	/* Leave the user relocations as are, this is the painfully slow path,
++	 * and we want to avoid the complication of dropping the lock whilst
++	 * having buffers reserved in the aperture and so causing spurious
++	 * ENOSPC for random operations.
++	 */
++
++err:
++	free(reloc, DRM_I915_GEM);
++	free(reloc_offset, DRM_I915_GEM);
++	return ret;
++}
++
++static int
++i915_gem_execbuffer_flush(struct drm_device *dev,
++			  uint32_t invalidate_domains,
++			  uint32_t flush_domains,
++			  uint32_t flush_rings)
++{
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	int i, ret;
++
++	if (flush_domains & I915_GEM_DOMAIN_CPU)
++		intel_gtt_chipset_flush();
++
++	if (flush_domains & I915_GEM_DOMAIN_GTT)
++		wmb();
++
++	if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
++		for (i = 0; i < I915_NUM_RINGS; i++)
++			if (flush_rings & (1 << i)) {
++				ret = i915_gem_flush_ring(&dev_priv->rings[i],
++				    invalidate_domains, flush_domains);
++				if (ret)
++					return ret;
++			}
++	}
++
 +	return 0;
 +}
 +
-+void intel_fbdev_fini(struct drm_device *dev)
++static bool
++intel_enable_semaphores(struct drm_device *dev)
 +{
-+	drm_i915_private_t *dev_priv = dev->dev_private;
-+	if (!dev_priv->fbdev)
++	if (INTEL_INFO(dev)->gen < 6)
++		return 0;
++
++	if (i915_semaphores >= 0)
++		return i915_semaphores;
++
++	/* Enable semaphores on SNB when IO remapping is off */
++	if (INTEL_INFO(dev)->gen == 6)
++		return !intel_iommu_enabled;
++
++	return 1;
++}
++
++static int
++i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
++			       struct intel_ring_buffer *to)
++{
++	struct intel_ring_buffer *from = obj->ring;
++	u32 seqno;
++	int ret, idx;
++
++	if (from == NULL || to == from)
++		return 0;
++
++	/* XXX gpu semaphores are implicated in various hard hangs on SNB */
++	if (!intel_enable_semaphores(obj->base.dev))
++		return i915_gem_object_wait_rendering(obj);
++
++	idx = intel_ring_sync_index(from, to);
++
++	seqno = obj->last_rendering_seqno;
++	if (seqno <= from->sync_seqno[idx])
++		return 0;
++
++	if (seqno == from->outstanding_lazy_request) {
++		struct drm_i915_gem_request *request;
++
++		request = malloc(sizeof(*request), DRM_I915_GEM,
++		    M_WAITOK | M_ZERO);
++		ret = i915_add_request(from, NULL, request);
++		if (ret) {
++			free(request, DRM_I915_GEM);
++			return ret;
++		}
++
++		seqno = request->seqno;
++	}
++
++	from->sync_seqno[idx] = seqno;
++
++	return to->sync_to(to, from, seqno - 1);
++}
++
++static int
++i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
++{
++	u32 plane, flip_mask;
++	int ret;
++
++	/* Check for any pending flips. As we only maintain a flip queue depth
++	 * of 1, we can simply insert a WAIT for the next display flip prior
++	 * to executing the batch and avoid stalling the CPU.
++	 */
++
++	for (plane = 0; flips >> plane; plane++) {
++		if (((flips >> plane) & 1) == 0)
++			continue;
++
++		if (plane)
++			flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
++		else
++			flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
++
++		ret = intel_ring_begin(ring, 2);
++		if (ret)
++			return ret;
++
++		intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
++		intel_ring_emit(ring, MI_NOOP);
++		intel_ring_advance(ring);
++	}
++
++	return 0;
++}
++
++static int
++i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
++				struct list_head *objects)
++{
++	struct drm_i915_gem_object *obj;
++	struct change_domains cd;
++	int ret;
++
++	memset(&cd, 0, sizeof(cd));
++	list_for_each_entry(obj, objects, exec_list)
++		i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
++
++	if (cd.invalidate_domains | cd.flush_domains) {
++#if WATCH_EXEC
++		DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
++			  __func__,
++			 cd.invalidate_domains,
++			 cd.flush_domains);
++#endif
++		ret = i915_gem_execbuffer_flush(ring->dev,
++						cd.invalidate_domains,
++						cd.flush_domains,
++						cd.flush_rings);
++		if (ret)
++			return ret;
++	}
++
++	if (cd.flips) {
++		ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips);
++		if (ret)
++			return ret;
++	}
++
++	list_for_each_entry(obj, objects, exec_list) {
++		ret = i915_gem_execbuffer_sync_rings(obj, ring);
++		if (ret)
++			return ret;
++	}
++
++	return 0;
++}
++
++static bool
++i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
++{
++	return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
++}
++
++static int
++validate_exec_list(struct drm_i915_gem_exec_object2 *exec, int count,
++    vm_page_t ***map)
++{
++	vm_page_t *ma;
++	int i, length, page_count;
++
++	/* XXXKIB various limits checking is missing there */
++	*map = malloc(count * sizeof(*ma), DRM_I915_GEM, M_WAITOK | M_ZERO);
++	for (i = 0; i < count; i++) {
++		/* First check for malicious input causing overflow */
++		if (exec[i].relocation_count >
++		    INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
++			return -EINVAL;
++
++		length = exec[i].relocation_count *
++		    sizeof(struct drm_i915_gem_relocation_entry);
++		if (length == 0) {
++			(*map)[i] = NULL;
++			continue;
++		}
++		/*
++		 * Since both start and end of the relocation region
++		 * may be not aligned on the page boundary, be
++		 * conservative and request a page slot for each
++		 * partial page.  Thus +2.
++		 */
++		page_count = howmany(length, PAGE_SIZE) + 2;
++		ma = (*map)[i] = malloc(page_count * sizeof(vm_page_t),
++		    DRM_I915_GEM, M_WAITOK | M_ZERO);
++		if (vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
++		    exec[i].relocs_ptr, length, VM_PROT_READ | VM_PROT_WRITE,
++		    ma, page_count) == -1) {
++			free(ma, DRM_I915_GEM);
++			(*map)[i] = NULL;
++			return (-EFAULT);
++		}
++	}
++
++	return 0;
++}
++
++static void
++i915_gem_execbuffer_move_to_active(struct list_head *objects,
++				   struct intel_ring_buffer *ring,
++				   u32 seqno)
++{
++	struct drm_i915_gem_object *obj;
++	uint32_t old_read, old_write;
++
++	list_for_each_entry(obj, objects, exec_list) {
++		old_read = obj->base.read_domains;
++		old_write = obj->base.write_domain;
++
++		obj->base.read_domains = obj->base.pending_read_domains;
++		obj->base.write_domain = obj->base.pending_write_domain;
++		obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
++
++		i915_gem_object_move_to_active(obj, ring, seqno);
++		if (obj->base.write_domain) {
++			obj->dirty = 1;
++			obj->pending_gpu_write = true;
++			list_move_tail(&obj->gpu_write_list,
++				       &ring->gpu_write_list);
++			intel_mark_busy(ring->dev, obj);
++		}
++		CTR3(KTR_DRM, "object_change_domain move_to_active %p %x %x",
++		    obj, old_read, old_write);
++	}
++}
++
++int i915_gem_sync_exec_requests;
++
++static void
++i915_gem_execbuffer_retire_commands(struct drm_device *dev,
++				    struct drm_file *file,
++				    struct intel_ring_buffer *ring)
++{
++	struct drm_i915_gem_request *request;
++	u32 invalidate;
++
++	/*
++	 * Ensure that the commands in the batch buffer are
++	 * finished before the interrupt fires.
++	 *
++	 * The sampler always gets flushed on i965 (sigh).
++	 */
++	invalidate = I915_GEM_DOMAIN_COMMAND;
++	if (INTEL_INFO(dev)->gen >= 4)
++		invalidate |= I915_GEM_DOMAIN_SAMPLER;
++	if (ring->flush(ring, invalidate, 0)) {
++		i915_gem_next_request_seqno(ring);
 +		return;
++	}
 +
-+	intel_fbdev_destroy(dev, dev_priv->fbdev);
-+	free(dev_priv->fbdev, DRM_MEM_KMS);
-+	dev_priv->fbdev = NULL;
++	/* Add a breadcrumb for the completion of the batch buffer */
++	request = malloc(sizeof(*request), DRM_I915_GEM, M_WAITOK | M_ZERO);
++	if (request == NULL || i915_add_request(ring, file, request)) {
++		i915_gem_next_request_seqno(ring);
++		free(request, DRM_I915_GEM);
++	} else if (i915_gem_sync_exec_requests)
++		i915_wait_request(ring, request->seqno);
 +}
 +
-+void intel_fb_output_poll_changed(struct drm_device *dev)
++static void
++i915_gem_fix_mi_batchbuffer_end(struct drm_i915_gem_object *batch_obj,
++    uint32_t batch_start_offset, uint32_t batch_len)
 +{
++	char *mkva;
++	uint64_t po_r, po_w;
++	uint32_t cmd;
++	
++	po_r = batch_obj->base.dev->agp->base + batch_obj->gtt_offset +
++	    batch_start_offset + batch_len;
++	if (batch_len > 0)
++		po_r -= 4;
++	mkva = pmap_mapdev_attr(trunc_page(po_r), 2 * PAGE_SIZE,
++	    PAT_WRITE_COMBINING);
++	cmd = *(uint32_t *)(mkva + (po_r & PAGE_MASK));
++
++	if (cmd != MI_BATCH_BUFFER_END) {
++		/*
++		 * batch_len != 0 due to the check at the start of
++		 * i915_gem_do_execbuffer
++		 */
++		if (batch_obj->base.size > batch_start_offset + batch_len) {
++			po_w = po_r + 4;
++/* DRM_ERROR("batchbuffer does not end by MI_BATCH_BUFFER_END !\n"); */
++		} else {
++			po_w = po_r;
++DRM_ERROR("batchbuffer does not end by MI_BATCH_BUFFER_END, overwriting last bo cmd !\n");
++		}
++
++		*(uint32_t *)(mkva + (po_w & PAGE_MASK)) = MI_BATCH_BUFFER_END;
++	}
++
++	pmap_unmapdev((vm_offset_t)mkva, 2 * PAGE_SIZE);
++}
++
++int i915_fix_mi_batchbuffer_end = 1;
++
++ static int
++i915_reset_gen7_sol_offsets(struct drm_device *dev,
++			    struct intel_ring_buffer *ring)
++{
 +	drm_i915_private_t *dev_priv = dev->dev_private;
-+	drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
++	int ret, i;
++
++	if (!IS_GEN7(dev) || ring != &dev_priv->rings[RCS])
++		return 0;
++
++	ret = intel_ring_begin(ring, 4 * 3);
++	if (ret)
++		return ret;
++
++	for (i = 0; i < 4; i++) {
++		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
++		intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
++		intel_ring_emit(ring, 0);
++	}
++
++	intel_ring_advance(ring);
++
++	return 0;
 +}
 +
-+void intel_fb_restore_mode(struct drm_device *dev)
++static int
++i915_gem_do_execbuffer(struct drm_device *dev, void *data,
++		       struct drm_file *file,
++		       struct drm_i915_gem_execbuffer2 *args,
++		       struct drm_i915_gem_exec_object2 *exec)
 +{
-+	int ret;
 +	drm_i915_private_t *dev_priv = dev->dev_private;
-+	struct drm_mode_config *config = &dev->mode_config;
-+	struct drm_plane *plane;
++	struct list_head objects;
++	struct eb_objects *eb;
++	struct drm_i915_gem_object *batch_obj;
++	struct drm_clip_rect *cliprects = NULL;
++	struct intel_ring_buffer *ring;
++	vm_page_t **relocs_ma;
++	u32 exec_start, exec_len;
++	u32 seqno;
++	u32 mask;
++	int ret, mode, i;
 +
-+	ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper);
++	if (!i915_gem_check_execbuffer(args)) {
++		DRM_ERROR("execbuf with invalid offset/length\n");
++		return -EINVAL;
++	}
++
++	if (args->batch_len == 0)
++		return (0);
++
++	ret = validate_exec_list(exec, args->buffer_count, &relocs_ma);
++	if (ret != 0)
++		goto pre_struct_lock_err;
++
++	switch (args->flags & I915_EXEC_RING_MASK) {
++	case I915_EXEC_DEFAULT:
++	case I915_EXEC_RENDER:
++		ring = &dev_priv->rings[RCS];
++		break;
++	case I915_EXEC_BSD:
++		if (!HAS_BSD(dev)) {
++			DRM_ERROR("execbuf with invalid ring (BSD)\n");
++			return -EINVAL;
++		}
++		ring = &dev_priv->rings[VCS];
++		break;
++	case I915_EXEC_BLT:
++		if (!HAS_BLT(dev)) {
++			DRM_ERROR("execbuf with invalid ring (BLT)\n");
++			return -EINVAL;
++		}
++		ring = &dev_priv->rings[BCS];
++		break;
++	default:
++		DRM_ERROR("execbuf with unknown ring: %d\n",
++			  (int)(args->flags & I915_EXEC_RING_MASK));
++		ret = -EINVAL;
++		goto pre_struct_lock_err;
++	}
++
++	mode = args->flags & I915_EXEC_CONSTANTS_MASK;
++	mask = I915_EXEC_CONSTANTS_MASK;
++	switch (mode) {
++	case I915_EXEC_CONSTANTS_REL_GENERAL:
++	case I915_EXEC_CONSTANTS_ABSOLUTE:
++	case I915_EXEC_CONSTANTS_REL_SURFACE:
++		if (ring == &dev_priv->rings[RCS] &&
++		    mode != dev_priv->relative_constants_mode) {
++			if (INTEL_INFO(dev)->gen < 4) {
++				ret = -EINVAL;
++				goto pre_struct_lock_err;
++			}
++
++			if (INTEL_INFO(dev)->gen > 5 &&
++			    mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
++				ret = -EINVAL;
++				goto pre_struct_lock_err;
++			}
++
++			/* The HW changed the meaning on this bit on gen6 */
++			if (INTEL_INFO(dev)->gen >= 6)
++				mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
++		}
++		break;
++	default:
++		DRM_ERROR("execbuf with unknown constants: %d\n", mode);
++		ret = -EINVAL;
++		goto pre_struct_lock_err;
++	}
++
++	if (args->buffer_count < 1) {
++		DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
++		ret = -EINVAL;
++		goto pre_struct_lock_err;
++	}
++
++	if (args->num_cliprects != 0) {
++		if (ring != &dev_priv->rings[RCS]) {
++	DRM_ERROR("clip rectangles are only valid with the render ring\n");
++			ret = -EINVAL;
++			goto pre_struct_lock_err;
++		}
++
++		cliprects = malloc( sizeof(*cliprects) * args->num_cliprects,
++		    DRM_I915_GEM, M_WAITOK | M_ZERO);
++		ret = -copyin((void *)(uintptr_t)args->cliprects_ptr, cliprects,
++		    sizeof(*cliprects) * args->num_cliprects);
++		if (ret != 0)
++			goto pre_struct_lock_err;
++	}
++
++	ret = i915_mutex_lock_interruptible(dev);
 +	if (ret)
-+		DRM_DEBUG("failed to restore crtc mode\n"