summaryrefslogtreecommitdiff
path: root/sys/arch/amd64
diff options
context:
space:
mode:
Diffstat (limited to 'sys/arch/amd64')
-rw-r--r--sys/arch/amd64/amd64/bus_dma.c9
-rw-r--r--sys/arch/amd64/amd64/sg_dma.c960
-rw-r--r--sys/arch/amd64/conf/files.amd643
-rw-r--r--sys/arch/amd64/include/bus.h47
-rw-r--r--sys/arch/amd64/pci/iommu.c394
-rw-r--r--sys/arch/amd64/pci/pci_machdep.c4
6 files changed, 1088 insertions, 329 deletions
diff --git a/sys/arch/amd64/amd64/bus_dma.c b/sys/arch/amd64/amd64/bus_dma.c
index fa0f31700f1..d1e427547f2 100644
--- a/sys/arch/amd64/amd64/bus_dma.c
+++ b/sys/arch/amd64/amd64/bus_dma.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: bus_dma.c,v 1.26 2009/04/20 00:42:05 oga Exp $ */
+/* $OpenBSD: bus_dma.c,v 1.27 2009/04/21 17:05:29 oga Exp $ */
/* $NetBSD: bus_dma.c,v 1.3 2003/05/07 21:33:58 fvdl Exp $ */
/*-
@@ -646,6 +646,13 @@ _bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
/* Always round the size. */
size = round_page(size);
+ segs[0]._ds_boundary = boundary;
+ segs[0]._ds_align = alignment;
+ if (flags & BUS_DMA_SG) {
+ boundary = 0;
+ alignment = 0;
+ }
+
/*
* Allocate pages from the VM system.
*/
diff --git a/sys/arch/amd64/amd64/sg_dma.c b/sys/arch/amd64/amd64/sg_dma.c
new file mode 100644
index 00000000000..f9da3094342
--- /dev/null
+++ b/sys/arch/amd64/amd64/sg_dma.c
@@ -0,0 +1,960 @@
+/* $OpenBSD: sg_dma.c,v 1.1 2009/04/21 17:05:29 oga Exp $ */
+/*
+ * Copyright (c) 2009 Owain G. Ainsworth <oga@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/*
+ * Copyright (c) 2003 Henric Jungheim
+ * Copyright (c) 2001, 2002 Eduardo Horvath
+ * Copyright (c) 1999, 2000 Matthew R. Green
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Support for scatter/gather style dma through agp or an iommu.
+ */
+#include <sys/param.h>
+#include <sys/extent.h>
+#include <sys/malloc.h>
+#include <sys/systm.h>
+#include <sys/device.h>
+#include <sys/mbuf.h>
+#include <sys/mutex.h>
+
+#include <uvm/uvm_extern.h>
+
+#include <machine/bus.h>
+#include <machine/cpu.h>
+
+#ifndef MAX_DMA_SEGS
+#define MAX_DMA_SEGS 20
+#endif
+
+#ifndef SMALL_KERNEL /* no bigmem needed in ramdisks */
+
+/*
+ * per-map DVMA page table
+ */
+struct sg_page_entry {
+ SPLAY_ENTRY(sg_page_entry) spe_node;
+ paddr_t spe_pa;
+ vaddr_t spe_va;
+};
+
+/* this should be in the map's dm_cookie. */
+struct sg_page_map {
+ SPLAY_HEAD(sg_page_tree, sg_page_entry) spm_tree;
+ int spm_maxpage; /* Size of allocated page map */
+ int spm_pagecnt; /* Number of entries in use */
+ bus_addr_t spm_start; /* dva when bound */
+ bus_size_t spm_size; /* size of bound map */
+ struct sg_page_entry spm_map[1];
+};
+
+int sg_dmamap_load_seg(bus_dma_tag_t, struct sg_cookie *, bus_dmamap_t,
+ bus_dma_segment_t *, int, int, bus_size_t, bus_size_t);
+struct sg_page_map *sg_iomap_create(int);
+int sg_dmamap_append_range(bus_dma_tag_t, bus_dmamap_t, paddr_t,
+ bus_size_t, int, bus_size_t);
+int sg_iomap_insert_page(struct sg_page_map *, paddr_t);
+vaddr_t sg_iomap_translate(struct sg_page_map *, paddr_t);
+void sg_iomap_load_map(struct sg_cookie *, struct sg_page_map *,
+ vaddr_t, int);
+void sg_iomap_unload_map(struct sg_cookie *, struct sg_page_map *);
+void sg_iomap_destroy(struct sg_page_map *);
+void sg_iomap_clear_pages(struct sg_page_map *);
+
+struct sg_cookie *
+sg_dmatag_init(char *name, void *hdl, bus_addr_t start, bus_size_t size,
+ void bind(void *, vaddr_t, paddr_t, int),
+ void unbind(void *, vaddr_t), void flush_tlb(void *))
+{
+ struct sg_cookie *cookie;
+
+ cookie = malloc(sizeof(*cookie), M_DEVBUF, M_NOWAIT|M_ZERO);
+ if (cookie == NULL)
+ return (NULL);
+
+ cookie->sg_ex = extent_create(name, start, start + size - 1,
+ M_DEVBUF, NULL, NULL, EX_NOWAIT | EX_NOCOALESCE);
+ if (cookie->sg_ex == NULL) {
+ free(cookie, M_DEVBUF);
+ return (NULL);
+ }
+
+ cookie->sg_hdl = hdl;
+ mtx_init(&cookie->sg_mtx, IPL_HIGH);
+ cookie->bind_page = bind;
+ cookie->unbind_page = unbind;
+ cookie->flush_tlb = flush_tlb;
+
+ return (cookie);
+}
+
+void
+sg_dmatag_destroy(struct sg_cookie *cookie)
+{
+ extent_destroy(cookie->sg_ex);
+ free(cookie, M_DEVBUF);
+}
+
+int
+sg_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
+ bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamap)
+{
+ struct sg_page_map *spm;
+ bus_dmamap_t map;
+ int ret;
+
+ if ((ret = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary,
+ flags, &map)) != 0)
+ return (ret);
+
+ if ((spm = sg_iomap_create(atop(round_page(size)))) == NULL) {
+ _bus_dmamap_destroy(t, map);
+ return (ENOMEM);
+ }
+
+ map->_dm_cookie = spm;
+ *dmamap = map;
+
+ return (0);
+}
+
+void
+sg_dmamap_set_alignment(bus_dma_tag_t tag, bus_dmamap_t dmam,
+ u_long alignment)
+{
+ if (alignment < PAGE_SIZE)
+ return;
+
+ dmam->dm_segs[0]._ds_align = alignment;
+}
+
+void
+sg_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
+{
+ /*
+ * The specification (man page) requires a loaded
+ * map to be unloaded before it is destroyed.
+ */
+ if (map->dm_nsegs)
+ bus_dmamap_unload(t, map);
+
+ if (map->_dm_cookie)
+ sg_iomap_destroy(map->_dm_cookie);
+ map->_dm_cookie = NULL;
+ _bus_dmamap_destroy(t, map);
+}
+
+/*
+ * Load a contiguous kva buffer into a dmamap. The physical pages are
+ * not assumed to be contiguous. Two passes are made through the buffer
+ * and both call pmap_extract() for the same va->pa translations. It
+ * is possible to run out of pa->dvma mappings; the code should be smart
+ * enough to resize the iomap (when the "flags" permit allocation). It
+ * is trivial to compute the number of entries required (round the length
+ * up to the page size and then divide by the page size)...
+ */
+int
+sg_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
+ bus_size_t buflen, struct proc *p, int flags)
+{
+ int err = 0;
+ bus_size_t sgsize;
+ u_long dvmaddr, sgstart, sgend;
+ bus_size_t align, boundary;
+ struct sg_cookie *is = t->_cookie;
+ struct sg_page_map *spm = map->_dm_cookie;
+ pmap_t pmap;
+
+ if (map->dm_nsegs) {
+ /*
+ * Is it still in use? _bus_dmamap_load should have taken care
+ * of this.
+ */
+#ifdef DIAGNOSTIC
+ panic("sg_dmamap_load: map still in use");
+#endif
+ bus_dmamap_unload(t, map);
+ }
+
+ /*
+ * Make sure that on error condition we return "no valid mappings".
+ */
+ map->dm_nsegs = 0;
+
+ if (buflen < 1 || buflen > map->_dm_size)
+ return (EINVAL);
+
+ /*
+ * A boundary presented to bus_dmamem_alloc() takes precedence
+ * over boundary in the map.
+ */
+ if ((boundary = (map->dm_segs[0]._ds_boundary)) == 0)
+ boundary = map->_dm_boundary;
+ align = MAX(map->dm_segs[0]._ds_align, PAGE_SIZE);
+
+ pmap = p ? p->p_vmspace->vm_map.pmap : pmap = pmap_kernel();
+
+ /* Count up the total number of pages we need */
+ sg_iomap_clear_pages(spm);
+ { /* Scope */
+ bus_addr_t a, aend;
+ bus_addr_t addr = (vaddr_t)buf;
+ int seg_len = buflen;
+
+ aend = round_page(addr + seg_len);
+ for (a = trunc_page(addr); a < aend; a += PAGE_SIZE) {
+ paddr_t pa;
+
+ if (pmap_extract(pmap, a, &pa) == FALSE) {
+ printf("iomap pmap error addr 0x%llx\n", a);
+ sg_iomap_clear_pages(spm);
+ return (EFBIG);
+ }
+
+ err = sg_iomap_insert_page(spm, pa);
+ if (err) {
+ printf("iomap insert error: %d for "
+ "va 0x%llx pa 0x%lx "
+ "(buf %p len %lld/%llx)\n",
+ err, a, pa, buf, buflen, buflen);
+ sg_iomap_clear_pages(spm);
+ return (EFBIG);
+ }
+ }
+ }
+ sgsize = spm->spm_pagecnt * PAGE_SIZE;
+
+ mtx_enter(&is->sg_mtx);
+ if (flags & BUS_DMA_24BIT) {
+ sgstart = MAX(is->sg_ex->ex_start, 0xff000000);
+ sgend = MIN(is->sg_ex->ex_end, 0xffffffff);
+ } else {
+ sgstart = is->sg_ex->ex_start;
+ sgend = is->sg_ex->ex_end;
+ }
+
+ /*
+ * If our segment size is larger than the boundary we need to
+ * split the transfer up into little pieces ourselves.
+ */
+ err = extent_alloc_subregion(is->sg_ex, sgstart, sgend,
+ sgsize, align, 0, (sgsize > boundary) ? 0 : boundary,
+ EX_NOWAIT | EX_BOUNDZERO, (u_long *)&dvmaddr);
+ mtx_leave(&is->sg_mtx);
+ if (err != 0)
+ return (err);
+
+ /* Set the active DVMA map */
+ spm->spm_start = dvmaddr;
+ spm->spm_size = sgsize;
+
+ map->dm_mapsize = buflen;
+
+ sg_iomap_load_map(is, spm, dvmaddr, flags);
+
+ { /* Scope */
+ bus_addr_t a, aend;
+ bus_addr_t addr = (vaddr_t)buf;
+ int seg_len = buflen;
+
+ aend = round_page(addr + seg_len);
+ for (a = trunc_page(addr); a < aend; a += PAGE_SIZE) {
+ bus_addr_t pgstart;
+ bus_addr_t pgend;
+ paddr_t pa;
+ int pglen;
+
+ /* Yuck... Redoing the same pmap_extract... */
+ if (pmap_extract(pmap, a, &pa) == FALSE) {
+ printf("iomap pmap error addr 0x%llx\n", a);
+ err = EFBIG;
+ break;
+ }
+
+ pgstart = pa | (MAX(a, addr) & PAGE_MASK);
+ pgend = pa | (MIN(a + PAGE_SIZE - 1,
+ addr + seg_len - 1) & PAGE_MASK);
+ pglen = pgend - pgstart + 1;
+
+ if (pglen < 1)
+ continue;
+
+ err = sg_dmamap_append_range(t, map, pgstart,
+ pglen, flags, boundary);
+ if (err == EFBIG)
+ break;
+ else if (err) {
+ printf("iomap load seg page: %d for "
+ "va 0x%llx pa %lx (%llx - %llx) "
+ "for %d/0x%x\n",
+ err, a, pa, pgstart, pgend, pglen, pglen);
+ break;
+ }
+ }
+ }
+ if (err) {
+ sg_iomap_unload_map(is, spm);
+ sg_iomap_clear_pages(spm);
+ map->dm_mapsize = 0;
+ map->dm_nsegs = 0;
+ mtx_enter(&is->sg_mtx);
+ extent_free(is->sg_ex, dvmaddr, sgsize, EX_NOWAIT);
+ spm->spm_start = 0;
+ spm->spm_size = 0;
+ mtx_leave(&is->sg_mtx);
+ }
+
+ return (err);
+}
+
+/*
+ * Load an mbuf into our map. we convert it to some bus_dma_segment_ts then
+ * pass it to load_raw.
+ */
+int
+sg_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *mb,
+ int flags)
+{
+ /*
+ * This code is adapted from sparc64, for very fragmented data
+ * we may need to adapt the algorithm
+ */
+ bus_dma_segment_t segs[MAX_DMA_SEGS];
+ size_t len;
+ int i;
+
+ /*
+ * Make sure that on error condition we return "no valid mappings".
+ */
+ map->dm_mapsize = 0;
+ map->dm_nsegs = 0;
+
+ if (mb->m_pkthdr.len > map->_dm_size)
+ return (EINVAL);
+
+ i = 0;
+ len = 0;
+ while (mb) {
+ vaddr_t vaddr = mtod(mb, vaddr_t);
+ long buflen = (long)mb->m_len;
+
+ len += buflen;
+ while (buflen > 0 && i < MAX_DMA_SEGS) {
+ paddr_t pa;
+ long incr;
+
+ incr = min(buflen, NBPG);
+
+ if (pmap_extract(pmap_kernel(), vaddr, &pa) == FALSE)
+ return EINVAL;
+
+ buflen -= incr;
+ vaddr += incr;
+
+ if (i > 0 && pa == (segs[i - 1].ds_addr +
+ segs[i - 1].ds_len) && ((segs[i - 1].ds_len + incr)
+ < map->_dm_maxsegsz)) {
+ /* contigious, great! */
+ segs[i - 1].ds_len += incr;
+ continue;
+ }
+ segs[i].ds_addr = pa;
+ segs[i].ds_len = incr;
+ segs[i]._ds_boundary = 0;
+ segs[i]._ds_align = 0;
+ i++;
+ }
+ mb = mb->m_next;
+ if (mb && i >= MAX_DMA_SEGS) {
+ /* our map, it is too big! */
+ return (EFBIG);
+ }
+ }
+
+ return (sg_dmamap_load_raw(t, map, segs, i, (bus_size_t)len, flags));
+}
+
+/*
+ * Load a uio into the map. Turn it into segments and call load_raw()
+ */
+int
+sg_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
+ int flags)
+{
+ /*
+ * loading uios is kinda broken since we can't lock the pages.
+ * and unlock them at unload. Perhaps page loaning is the answer.
+ * 'till then we only accept kernel data
+ */
+ bus_dma_segment_t segs[MAX_DMA_SEGS];
+ size_t len;
+ int i, j;
+
+ /*
+ * Make sure that on errror we return "no valid mappings".
+ */
+ map->dm_mapsize = 0;
+ map->dm_nsegs = 0;
+
+ if (uio->uio_resid > map->_dm_size)
+ return (EINVAL);
+
+ if (uio->uio_segflg != UIO_SYSSPACE)
+ return (EOPNOTSUPP);
+
+ i = j = 0;
+ len = 0;
+ while (j < uio->uio_iovcnt) {
+ vaddr_t vaddr = (vaddr_t)uio->uio_iov[j].iov_base;
+ long buflen = (long)uio->uio_iov[j].iov_len;
+
+ len += buflen;
+ while (buflen > 0 && i < MAX_DMA_SEGS) {
+ paddr_t pa;
+ long incr;
+
+ incr = min(buflen, NBPG);
+ (void)pmap_extract(pmap_kernel(), vaddr, &pa);
+ buflen -= incr;
+ vaddr += incr;
+
+ if (i > 0 && pa == (segs[i - 1].ds_addr +
+ segs[i -1].ds_len) && ((segs[i - 1].ds_len + incr)
+ < map->_dm_maxsegsz)) {
+ /* contigious, yay! */
+ segs[i - 1].ds_len += incr;
+ continue;
+ }
+ segs[i].ds_addr = pa;
+ segs[i].ds_len = incr;
+ segs[i]._ds_boundary = 0;
+ segs[i]._ds_align = 0;
+ i++;
+ }
+ j++;
+ if ((uio->uio_iovcnt - j) && i >= MAX_DMA_SEGS) {
+ /* Our map, is it too big! */
+ return (EFBIG);
+ }
+
+ }
+
+ return (sg_dmamap_load_raw(t, map, segs, i, (bus_size_t)len, flags));
+}
+
+/*
+ * Load a dvmamap from an array of segs. It calls sg_dmamap_append_range()
+ * or for part of the 2nd pass through the mapping.
+ */
+int
+sg_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
+ bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
+{
+ int i;
+ int left;
+ int err = 0;
+ bus_size_t sgsize;
+ bus_size_t boundary, align;
+ u_long dvmaddr, sgstart, sgend;
+ struct sg_cookie *is = t->_cookie;
+ struct sg_page_map *spm = map->_dm_cookie;
+
+ if (map->dm_nsegs) {
+ /* Already in use?? */
+#ifdef DIAGNOSTIC
+ panic("sg_dmamap_load_raw: map still in use");
+#endif
+ bus_dmamap_unload(t, map);
+ }
+
+ /*
+ * A boundary presented to bus_dmamem_alloc() takes precedence
+ * over boundary in the map.
+ */
+ if ((boundary = segs[0]._ds_boundary) == 0)
+ boundary = map->_dm_boundary;
+
+ align = MAX(segs[0]._ds_align, PAGE_SIZE);
+
+ /*
+ * Make sure that on error condition we return "no valid mappings".
+ */
+ map->dm_nsegs = 0;
+
+ sg_iomap_clear_pages(spm);
+ /* Count up the total number of pages we need */
+ for (i = 0, left = size; left > 0 && i < nsegs; i++) {
+ bus_addr_t a, aend;
+ bus_size_t len = segs[i].ds_len;
+ bus_addr_t addr = segs[i].ds_addr;
+ int seg_len = MIN(left, len);
+
+ if (len < 1)
+ continue;
+
+ aend = round_page(addr + seg_len);
+ for (a = trunc_page(addr); a < aend; a += PAGE_SIZE) {
+
+ err = sg_iomap_insert_page(spm, a);
+ if (err) {
+ printf("iomap insert error: %d for "
+ "pa 0x%llx\n", err, a);
+ sg_iomap_clear_pages(spm);
+ return (EFBIG);
+ }
+ }
+
+ left -= seg_len;
+ }
+ sgsize = spm->spm_pagecnt * PAGE_SIZE;
+
+ mtx_enter(&is->sg_mtx);
+ if (flags & BUS_DMA_24BIT) {
+ sgstart = MAX(is->sg_ex->ex_start, 0xff000000);
+ sgend = MIN(is->sg_ex->ex_end, 0xffffffff);
+ } else {
+ sgstart = is->sg_ex->ex_start;
+ sgend = is->sg_ex->ex_end;
+ }
+
+ /*
+ * If our segment size is larger than the boundary we need to
+ * split the transfer up into little pieces ourselves.
+ */
+ err = extent_alloc_subregion(is->sg_ex, sgstart, sgend,
+ sgsize, align, 0, (sgsize > boundary) ? 0 : boundary,
+ EX_NOWAIT | EX_BOUNDZERO, (u_long *)&dvmaddr);
+ mtx_leave(&is->sg_mtx);
+
+ if (err != 0)
+ return (err);
+
+ /* Set the active DVMA map */
+ spm->spm_start = dvmaddr;
+ spm->spm_size = sgsize;
+
+ map->dm_mapsize = size;
+
+ sg_iomap_load_map(is, spm, dvmaddr, flags);
+
+ err = sg_dmamap_load_seg(t, is, map, segs, nsegs, flags,
+ size, boundary);
+
+ if (err) {
+ sg_iomap_unload_map(is, spm);
+ sg_iomap_clear_pages(spm);
+ map->dm_mapsize = 0;
+ map->dm_nsegs = 0;
+ mtx_enter(&is->sg_mtx);
+ extent_free(is->sg_ex, dvmaddr, sgsize, EX_NOWAIT);
+ spm->spm_start = 0;
+ spm->spm_size = 0;
+ mtx_leave(&is->sg_mtx);
+ }
+
+ return (err);
+}
+
+/*
+ * Insert a range of addresses into a loaded map respecting the specified
+ * boundary and alignment restrictions. The range is specified by its
+ * physical address and length. The range cannot cross a page boundary.
+ * This code (along with most of the rest of the function in this file)
+ * assumes that the IOMMU page size is equal to PAGE_SIZE.
+ */
+int
+sg_dmamap_append_range(bus_dma_tag_t t, bus_dmamap_t map, paddr_t pa,
+ bus_size_t length, int flags, bus_size_t boundary)
+{
+ struct sg_page_map *spm = map->_dm_cookie;
+ bus_addr_t sgstart, sgend, bd_mask;
+ bus_dma_segment_t *seg = NULL;
+ int i = map->dm_nsegs;
+
+ sgstart = sg_iomap_translate(spm, pa);
+ sgend = sgstart + length - 1;
+
+#ifdef DIAGNOSTIC
+ if (sgstart == NULL || sgstart > sgend) {
+ printf("append range invalid mapping for %lx "
+ "(0x%llx - 0x%llx)\n", pa, sgstart, sgend);
+ map->dm_nsegs = 0;
+ return (EINVAL);
+ }
+#endif
+
+#ifdef DEBUG
+ if (trunc_page(sgstart) != trunc_page(sgend)) {
+ printf("append range crossing page boundary! "
+ "pa %lx length %lld/0x%llx sgstart %llx sgend %llx\n",
+ pa, length, length, sgstart, sgend);
+ }
+#endif
+
+ /*
+ * We will attempt to merge this range with the previous entry
+ * (if there is one).
+ */
+ if (i > 0) {
+ seg = &map->dm_segs[i - 1];
+ if (sgstart == seg->ds_addr + seg->ds_len) {
+ length += seg->ds_len;
+ sgstart = seg->ds_addr;
+ sgend = sgstart + length - 1;
+ } else
+ seg = NULL;
+ }
+
+ if (seg == NULL) {
+ seg = &map->dm_segs[i];
+ if (++i > map->_dm_segcnt) {
+ map->dm_nsegs = 0;
+ return (EFBIG);
+ }
+ }
+
+ /*
+ * At this point, "i" is the index of the *next* bus_dma_segment_t
+ * (the segment count, aka map->dm_nsegs) and "seg" points to the
+ * *current* entry. "length", "sgstart", and "sgend" reflect what
+ * we intend to put in "*seg". No assumptions should be made about
+ * the contents of "*seg". Only "boundary" issue can change this
+ * and "boundary" is often zero, so explicitly test for that case
+ * (the test is strictly an optimization).
+ */
+ if (boundary != 0) {
+ bd_mask = ~(boundary - 1);
+
+ while ((sgstart & bd_mask) != (sgend & bd_mask)) {
+ /*
+ * We are crossing a boundary so fill in the current
+ * segment with as much as possible, then grab a new
+ * one.
+ */
+
+ seg->ds_addr = sgstart;
+ seg->ds_len = boundary - (sgstart & bd_mask);
+
+ sgstart += seg->ds_len; /* sgend stays the same */
+ length -= seg->ds_len;
+
+ seg = &map->dm_segs[i];
+ if (++i > map->_dm_segcnt) {
+ map->dm_nsegs = 0;
+ return (EFBIG);
+ }
+ }
+ }
+
+ seg->ds_addr = sgstart;
+ seg->ds_len = length;
+ map->dm_nsegs = i;
+
+ return (0);
+}
+
+/*
+ * Populate the iomap from a bus_dma_segment_t array. See note for
+ * sg_dmamap_load() regarding page entry exhaustion of the iomap.
+ * This is less of a problem for load_seg, as the number of pages
+ * is usually similar to the number of segments (nsegs).
+ */
+int
+sg_dmamap_load_seg(bus_dma_tag_t t, struct sg_cookie *is,
+ bus_dmamap_t map, bus_dma_segment_t *segs, int nsegs, int flags,
+ bus_size_t size, bus_size_t boundary)
+{
+ int i;
+ int left;
+ int seg;
+
+ /*
+ * Keep in mind that each segment could span
+ * multiple pages and that these are not always
+ * adjacent. The code is no longer adding dvma
+ * aliases to the IOMMU. The STC will not cross
+ * page boundaries anyway and a IOMMU table walk
+ * vs. what may be a streamed PCI DMA to a ring
+ * descriptor is probably a wash. It eases TLB
+ * pressure and in the worst possible case, it is
+ * only as bad a non-IOMMUed architecture. More
+ * importantly, the code is not quite as hairy.
+ * (It's bad enough as it is.)
+ */
+ left = size;
+ seg = 0;
+ for (i = 0; left > 0 && i < nsegs; i++) {
+ bus_addr_t a, aend;
+ bus_size_t len = segs[i].ds_len;
+ bus_addr_t addr = segs[i].ds_addr;
+ int seg_len = MIN(left, len);
+
+ if (len < 1)
+ continue;
+
+ aend = round_page(addr + seg_len);
+ for (a = trunc_page(addr); a < aend; a += PAGE_SIZE) {
+ bus_addr_t pgstart;
+ bus_addr_t pgend;
+ int pglen;
+ int err;
+
+ pgstart = MAX(a, addr);
+ pgend = MIN(a + PAGE_SIZE - 1, addr + seg_len - 1);
+ pglen = pgend - pgstart + 1;
+
+ if (pglen < 1)
+ continue;
+
+ err = sg_dmamap_append_range(t, map, pgstart,
+ pglen, flags, boundary);
+ if (err == EFBIG)
+ return (err);
+ if (err) {
+ printf("iomap load seg page: %d for "
+ "pa 0x%llx (%llx - %llx for %d/%x\n",
+ err, a, pgstart, pgend, pglen, pglen);
+ return (err);
+ }
+
+ }
+
+ left -= seg_len;
+ }
+ return (0);
+}
+
+/*
+ * Unload a dvmamap.
+ */
+void
+sg_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
+{
+ struct sg_cookie *is = t->_cookie;
+ struct sg_page_map *spm = map->_dm_cookie;
+ bus_addr_t dvmaddr = spm->spm_start;
+ bus_size_t sgsize = spm->spm_size;
+ int error;
+
+ /* Remove the IOMMU entries */
+ sg_iomap_unload_map(is, spm);
+
+ /* Clear the iomap */
+ sg_iomap_clear_pages(spm);
+
+ mtx_enter(&is->sg_mtx);
+ error = extent_free(is->sg_ex, dvmaddr,
+ sgsize, EX_NOWAIT);
+ spm->spm_start = 0;
+ spm->spm_size = 0;
+ mtx_leave(&is->sg_mtx);
+ if (error != 0)
+ printf("warning: %qd of DVMA space lost\n", sgsize);
+ _bus_dmamap_unload(t, map);
+}
+
+/*
+ * Alloc dma safe memory, telling the backend that we're scatter gather
+ * to ease pressure on the vm.
+ *
+ * This assumes that we can map all physical memory.
+ */
+int
+sg_dmamem_alloc(bus_dma_tag_t t, bus_size_t size,
+ bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
+ int nsegs, int *rsegs, int flags)
+{
+ return (_bus_dmamem_alloc_range(t, size, alignment, boundary,
+ segs, nsegs, rsegs, flags | BUS_DMA_SG, 0, -1));
+}
+
+/*
+ * Create a new iomap.
+ */
+struct sg_page_map *
+sg_iomap_create(int n)
+{
+ struct sg_page_map *spm;
+
+ /* Safety for heavily fragmented data, such as mbufs */
+ n += 4;
+ if (n < 16)
+ n = 16;
+
+ spm = malloc(sizeof(*spm) + (n - 1) * sizeof(spm->spm_map[0]),
+ M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (spm == NULL)
+ return (NULL);
+
+ /* Initialize the map. */
+ spm->spm_maxpage = n;
+ SPLAY_INIT(&spm->spm_tree);
+
+ return (spm);
+}
+
+/*
+ * Destroy an iomap.
+ */
+void
+sg_iomap_destroy(struct sg_page_map *spm)
+{
+#ifdef DIAGNOSTIC
+ if (spm->spm_pagecnt > 0)
+ printf("sg_iomap_destroy: %d page entries in use\n",
+ spm->spm_pagecnt);
+#endif
+
+ free(spm, M_DEVBUF);
+}
+
+/*
+ * Utility function used by splay tree to order page entries by pa.
+ */
+static inline int
+iomap_compare(struct sg_page_entry *a, struct sg_page_entry *b)
+{
+ return ((a->spe_pa > b->spe_pa) ? 1 :
+ (a->spe_pa < b->spe_pa) ? -1 : 0);
+}
+
+SPLAY_PROTOTYPE(sg_page_tree, sg_page_entry, spe_node, iomap_compare);
+
+SPLAY_GENERATE(sg_page_tree, sg_page_entry, spe_node, iomap_compare);
+
+/*
+ * Insert a pa entry in the iomap.
+ */
+int
+sg_iomap_insert_page(struct sg_page_map *spm, paddr_t pa)
+{
+ struct sg_page_entry *e;
+
+ if (spm->spm_pagecnt >= spm->spm_maxpage) {
+ struct sg_page_entry spe;
+
+ spe.spe_pa = pa;
+ if (SPLAY_FIND(sg_page_tree, &spm->spm_tree, &spe))
+ return (0);
+
+ return (ENOMEM);
+ }
+
+ e = &spm->spm_map[spm->spm_pagecnt];
+
+ e->spe_pa = pa;
+ e->spe_va = NULL;
+
+ e = SPLAY_INSERT(sg_page_tree, &spm->spm_tree, e);
+
+ /* Duplicates are okay, but only count them once. */
+ if (e)
+ return (0);
+
+ ++spm->spm_pagecnt;
+
+ return (0);
+}
+
+/*
+ * Locate the iomap by filling in the pa->va mapping and inserting it
+ * into the IOMMU tables.
+ */
+void
+sg_iomap_load_map(struct sg_cookie *sc, struct sg_page_map *spm,
+ vaddr_t vmaddr, int flags)
+{
+ struct sg_page_entry *e;
+ int i;
+
+ for (i = 0, e = spm->spm_map; i < spm->spm_pagecnt; ++i, ++e) {
+ e->spe_va = vmaddr;
+ sc->bind_page(sc->sg_hdl, e->spe_va, e->spe_pa, flags);
+ vmaddr += PAGE_SIZE;
+ }
+ sc->flush_tlb(sc->sg_hdl);
+}
+
+/*
+ * Remove the iomap from the IOMMU.
+ */
+void
+sg_iomap_unload_map(struct sg_cookie *sc, struct sg_page_map *spm)
+{
+ struct sg_page_entry *e;
+ int i;
+
+ for (i = 0, e = spm->spm_map; i < spm->spm_pagecnt; ++i, ++e)
+ sc->unbind_page(sc->sg_hdl, e->spe_va);
+ sc->flush_tlb(sc->sg_hdl);
+
+}
+
+/*
+ * Translate a physical address (pa) into a DVMA address.
+ */
+vaddr_t
+sg_iomap_translate(struct sg_page_map *spm, paddr_t pa)
+{
+ struct sg_page_entry *e, pe;
+ paddr_t offset = pa & PAGE_MASK;
+
+ pe.spe_pa = trunc_page(pa);
+
+ e = SPLAY_FIND(sg_page_tree, &spm->spm_tree, &pe);
+
+ if (e == NULL)
+ return (NULL);
+
+ return (e->spe_va | offset);
+}
+
+/*
+ * Clear the iomap table and tree.
+ */
+void
+sg_iomap_clear_pages(struct sg_page_map *spm)
+{
+ spm->spm_pagecnt = 0;
+ SPLAY_INIT(&spm->spm_tree);
+}
+
+
+#endif /* !SMALL_KERNEL */
diff --git a/sys/arch/amd64/conf/files.amd64 b/sys/arch/amd64/conf/files.amd64
index 0c7522be0a7..7101f0b669f 100644
--- a/sys/arch/amd64/conf/files.amd64
+++ b/sys/arch/amd64/conf/files.amd64
@@ -1,4 +1,4 @@
-# $OpenBSD: files.amd64,v 1.45 2009/04/20 13:26:20 ariane Exp $
+# $OpenBSD: files.amd64,v 1.46 2009/04/21 17:05:29 oga Exp $
maxpartitions 16
maxusers 2 16 128
@@ -28,6 +28,7 @@ file arch/amd64/amd64/lock_machdep.c multiprocessor
file arch/amd64/amd64/intr.c
file arch/amd64/amd64/bus_space.c
file arch/amd64/amd64/bus_dma.c
+file arch/amd64/amd64/sg_dma.c
file arch/amd64/amd64/mptramp.S multiprocessor
file arch/amd64/amd64/ipifuncs.c multiprocessor
diff --git a/sys/arch/amd64/include/bus.h b/sys/arch/amd64/include/bus.h
index 1bb7ad405ea..8952c1a0678 100644
--- a/sys/arch/amd64/include/bus.h
+++ b/sys/arch/amd64/include/bus.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: bus.h,v 1.16 2009/04/20 00:42:05 oga Exp $ */
+/* $OpenBSD: bus.h,v 1.17 2009/04/21 17:05:29 oga Exp $ */
/* $NetBSD: bus.h,v 1.6 1996/11/10 03:19:25 thorpej Exp $ */
/*-
@@ -66,6 +66,8 @@
#ifndef _X86_BUS_H_
#define _X86_BUS_H_
+#include <sys/mutex.h>
+
#include <machine/pio.h>
/*
@@ -447,6 +449,7 @@ void bus_space_barrier(bus_space_tag_t, bus_space_handle_t,
#define BUS_DMA_WRITE 0x0400 /* mapping is memory -> device only */
#define BUS_DMA_NOCACHE 0x0800 /* map memory uncached */
#define BUS_DMA_ZERO 0x1000 /* zero memory in dmamem_alloc */
+#define BUS_DMA_SG 0x2000 /* Internal. memory is for SG map */
/* Forwards needed by prototypes below. */
struct mbuf;
@@ -473,6 +476,13 @@ typedef struct bus_dmamap *bus_dmamap_t;
struct bus_dma_segment {
bus_addr_t ds_addr; /* DMA address */
bus_size_t ds_len; /* length of transfer */
+ /*
+ * Ugh. need this so can pass alignment down from bus_dmamem_alloc
+ * to scatter gather maps. only the first one is used so the rest is
+ * wasted space. bus_dma could do with fixing the api for this.
+ */
+ bus_size_t _ds_boundary; /* don't cross */
+ bus_size_t _ds_align; /* align to me */
};
typedef struct bus_dma_segment bus_dma_segment_t;
@@ -604,6 +614,41 @@ int _bus_dmamem_alloc_range(bus_dma_tag_t tag, bus_size_t size,
bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags,
paddr_t low, paddr_t high);
+struct extent;
+
+/* Scatter gather bus_dma functions. */
+struct sg_cookie {
+ struct mutex sg_mtx;
+ struct extent *sg_ex;
+ void *sg_hdl;
+ void (*bind_page)(void *, vaddr_t, paddr_t, int);
+ void (*unbind_page)(void *, vaddr_t);
+ void (*flush_tlb)(void *);
+};
+
+struct sg_cookie *sg_dmatag_init(char *, void *, bus_addr_t, bus_size_t,
+ void (*)(void *, vaddr_t, paddr_t, int),
+ void (*)(void *, vaddr_t), void (*)(void *));
+void sg_dmatag_destroy(struct sg_cookie *);
+int sg_dmamap_create(bus_dma_tag_t, bus_size_t, int, bus_size_t,
+ bus_size_t, int, bus_dmamap_t *);
+void sg_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
+void sg_dmamap_set_alignment(bus_dma_tag_t, bus_dmamap_t, u_long);
+int sg_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *, bus_size_t,
+ struct proc *, int);
+int sg_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t,
+ struct mbuf *, int);
+int sg_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t, struct uio *, int);
+int sg_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t, bus_dma_segment_t *,
+ int, bus_size_t, int);
+void sg_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
+int sg_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *, bus_size_t,
+ struct proc *, int, int *, int);
+int sg_dmamap_load_physarray(bus_dma_tag_t, bus_dmamap_t, paddr_t *,
+ int, int, int *, int);
+int sg_dmamem_alloc(bus_dma_tag_t, bus_size_t, bus_size_t, bus_size_t,
+ bus_dma_segment_t *, int, int *, int);
+
/*
* paddr_t bus_space_mmap(bus_space_tag_t t, bus_addr_t base,
* off_t offset, int prot, int flags);
diff --git a/sys/arch/amd64/pci/iommu.c b/sys/arch/amd64/pci/iommu.c
index 18c11bbf21d..50eeb79493b 100644
--- a/sys/arch/amd64/pci/iommu.c
+++ b/sys/arch/amd64/pci/iommu.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: iommu.c,v 1.27 2009/04/15 23:53:22 oga Exp $ */
+/* $OpenBSD: iommu.c,v 1.28 2009/04/21 17:05:29 oga Exp $ */
/*
* Copyright (c) 2005 Jason L. Wright (jason@thought.net)
@@ -33,7 +33,6 @@
#include <sys/errno.h>
#include <sys/device.h>
#include <sys/lock.h>
-#include <sys/extent.h>
#include <sys/malloc.h>
#include <uvm/uvm_extern.h>
@@ -100,14 +99,12 @@
#define IOMMU_SIZE 512 /* size in MB */
#define IOMMU_ALIGN IOMMU_SIZE
-extern paddr_t avail_end;
-extern struct extent *iomem_ex;
-
int amdgart_enable = 0;
+#ifndef SMALL_KERNEL /* no bigmem in ramdisks */
+
struct amdgart_softc {
pci_chipset_tag_t g_pc;
- struct extent *g_ex;
paddr_t g_pa;
paddr_t g_scribpa;
void *g_scrib;
@@ -118,58 +115,56 @@ struct amdgart_softc {
pcitag_t g_tags[1];
};
-void amdgart_invalidate_wait(struct amdgart_softc *);
-void amdgart_invalidate(struct amdgart_softc *);
-void amdgart_probe(struct pcibus_attach_args *);
-void amdgart_dumpregs(struct amdgart_softc *);
-int amdgart_iommu_map(struct amdgart_softc *, bus_dmamap_t,
- bus_dma_segment_t *);
-int amdgart_iommu_unmap(struct amdgart_softc *, bus_dma_segment_t *);
-int amdgart_reload(struct amdgart_softc *, bus_dmamap_t);
-int amdgart_ok(pci_chipset_tag_t, pcitag_t);
-int amdgart_enabled(pci_chipset_tag_t, pcitag_t);
-void amdgart_initpt(struct amdgart_softc *, u_long);
-
-int amdgart_dmamap_create(bus_dma_tag_t, bus_size_t, int, bus_size_t,
- bus_size_t, int, bus_dmamap_t *);
-void amdgart_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
-int amdgart_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *, bus_size_t,
- struct proc *, int);
-int amdgart_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t, struct mbuf *, int);
-int amdgart_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t, struct uio *, int);
-int amdgart_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t,
- bus_dma_segment_t *, int, bus_size_t, int);
-void amdgart_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
-void amdgart_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
- bus_size_t, int);
-
-int amdgart_dmamem_alloc(bus_dma_tag_t, bus_size_t, bus_size_t, bus_size_t,
- bus_dma_segment_t *, int, int *, int);
-void amdgart_dmamem_free(bus_dma_tag_t, bus_dma_segment_t *, int);
-int amdgart_dmamem_map(bus_dma_tag_t, bus_dma_segment_t *, int, size_t,
- caddr_t *, int);
-void amdgart_dmamem_unmap(bus_dma_tag_t, caddr_t, size_t);
-paddr_t amdgart_dmamem_mmap(bus_dma_tag_t, bus_dma_segment_t *, int, off_t,
- int, int);
+void amdgart_probe(struct pcibus_attach_args *);
+void amdgart_dumpregs(struct amdgart_softc *);
+int amdgart_ok(pci_chipset_tag_t, pcitag_t);
+int amdgart_enabled(pci_chipset_tag_t, pcitag_t);
+void amdgart_initpt(struct amdgart_softc *, u_long);
+void amdgart_bind_page(void *, vaddr_t, paddr_t, int);
+void amdgart_unbind_page(void *, vaddr_t);
+void amdgart_invalidate(void *);
+void amdgart_invalidate_wait(struct amdgart_softc *);
struct bus_dma_tag amdgart_bus_dma_tag = {
NULL, /* _may_bounce */
- amdgart_dmamap_create,
- amdgart_dmamap_destroy,
- amdgart_dmamap_load,
- amdgart_dmamap_load_mbuf,
- amdgart_dmamap_load_uio,
- amdgart_dmamap_load_raw,
- amdgart_dmamap_unload,
+ sg_dmamap_create,
+ sg_dmamap_destroy,
+ sg_dmamap_load,
+ sg_dmamap_load_mbuf,
+ sg_dmamap_load_uio,
+ sg_dmamap_load_raw,
+ sg_dmamap_unload,
NULL,
- amdgart_dmamem_alloc,
- amdgart_dmamem_free,
- amdgart_dmamem_map,
- amdgart_dmamem_unmap,
- amdgart_dmamem_mmap,
+ sg_dmamem_alloc,
+ _bus_dmamem_free,
+ _bus_dmamem_map,
+ _bus_dmamem_unmap,
+ _bus_dmamem_mmap,
};
void
+amdgart_bind_page(void *handle, vaddr_t offset, paddr_t page, int flags)
+{
+ struct amdgart_softc *sc = handle;
+ u_int32_t pgno, pte;
+
+ pgno = (offset - sc->g_pa) >> PGSHIFT;
+ pte = GART_PTE_VALID | GART_PTE_COHERENT |
+ ((page >> 28) & GART_PTE_PHYSHI) | (page & GART_PTE_PHYSLO);
+ sc->g_pte[pgno] = pte;
+}
+
+void
+amdgart_unbind_page(void *handle, vaddr_t offset)
+{
+ struct amdgart_softc *sc = handle;
+ u_int32_t pgno;
+
+ pgno = (offset - sc->g_pa) >> PGSHIFT;
+ sc->g_pte[pgno] = sc->g_scribpte;
+}
+
+void
amdgart_invalidate_wait(struct amdgart_softc *sc)
{
int i, n;
@@ -187,8 +182,9 @@ amdgart_invalidate_wait(struct amdgart_softc *sc)
}
void
-amdgart_invalidate(struct amdgart_softc *sc)
+amdgart_invalidate(void* handle)
{
+ struct amdgart_softc *sc = handle;
int n;
for (n = 0; n < sc->g_count; n++)
@@ -257,16 +253,16 @@ static const struct gart_size {
void
amdgart_probe(struct pcibus_attach_args *pba)
{
- struct amdgart_softc *sc;
- int dev, count = 0, encount = 0, r, nseg;
- u_long mapsize, ptesize, gartsize = 0;
- bus_dma_segment_t seg;
- pcitag_t tag;
- pcireg_t v;
- paddr_t pa;
- void *scrib = NULL;
- u_int32_t *pte = NULL;
- paddr_t ptepa;
+ struct amdgart_softc *sc;
+ struct sg_cookie *cookie = NULL;
+ void *scrib = NULL;
+ u_int32_t *pte;
+ int dev, count = 0, encount = 0, r, nseg;
+ u_long mapsize, ptesize, gartsize = 0;
+ bus_dma_segment_t seg;
+ pcitag_t tag;
+ pcireg_t v;
+ paddr_t pa, ptepa;
if (amdgart_enable == 0)
return;
@@ -355,13 +351,6 @@ amdgart_probe(struct pcibus_attach_args *pba)
}
ptepa = seg.ds_addr;
- sc->g_ex = extent_create("iommu", sc->g_pa, sc->g_pa + mapsize - 1,
- M_DEVBUF, NULL, NULL, EX_NOWAIT | EX_NOCOALESCE);
- if (sc->g_ex == NULL) {
- printf("\nGART: extent create failed");
- goto err;
- }
-
scrib = malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO);
if (scrib == NULL) {
printf("\nGART: didn't get scribble page");
@@ -377,6 +366,13 @@ amdgart_probe(struct pcibus_attach_args *pba)
sc->g_pte = pte;
sc->g_dmat = pba->pba_dmat;
+ if ((cookie = sg_dmatag_init("iommu", sc, sc->g_pa, mapsize,
+ amdgart_bind_page, amdgart_unbind_page,
+ amdgart_invalidate)) == NULL) {
+ printf("\nGART: didn't get dma cookie\n");
+ goto err;
+ }
+
for (count = 0, dev = 24; dev < 32; dev++) {
tag = pci_make_tag(pba->pba_pc, 0, dev, 3);
@@ -436,7 +432,7 @@ amdgart_probe(struct pcibus_attach_args *pba)
amdgart_initpt(sc, ptesize / sizeof(*sc->g_pte));
sc->g_count = count;
- amdgart_bus_dma_tag._cookie = sc;
+ amdgart_bus_dma_tag._cookie = cookie;
pba->pba_dmat = &amdgart_bus_dma_tag;
return;
@@ -444,10 +440,10 @@ amdgart_probe(struct pcibus_attach_args *pba)
err:
_bus_dmamem_free(pba->pba_dmat, &seg, 1);
nofreeseg:
- if (sc->g_ex != NULL)
- extent_destroy(sc->g_ex);
if (scrib != NULL)
free(scrib, M_DEVBUF);
+ if (cookie != NULL)
+ sg_dmatag_destroy(cookie);
if (sc != NULL)
free(sc, M_DEVBUF);
}
@@ -462,256 +458,4 @@ amdgart_initpt(struct amdgart_softc *sc, u_long nent)
amdgart_invalidate(sc);
}
-int
-amdgart_reload(struct amdgart_softc *sc, bus_dmamap_t dmam)
-{
- int i, j, err;
-
- for (i = 0; i < dmam->dm_nsegs; i++) {
- psize_t len;
-
- len = dmam->dm_segs[i].ds_len;
- err = amdgart_iommu_map(sc, dmam, &dmam->dm_segs[i]);
- if (err) {
- for (j = 0; j < i - 1; j++)
- amdgart_iommu_unmap(sc, &dmam->dm_segs[j]);
- return (err);
- }
- }
- return (0);
-}
-
-int
-amdgart_iommu_map(struct amdgart_softc *sc, bus_dmamap_t dmam,
- bus_dma_segment_t *seg)
-{
- paddr_t base, end, idx;
- psize_t alen;
- u_long res;
- int err, s;
- u_int32_t pgno, flags;
-
- base = trunc_page(seg->ds_addr);
- end = roundup(seg->ds_addr + seg->ds_len, PAGE_SIZE);
- alen = end - base;
-
- s = splhigh();
- err = extent_alloc(sc->g_ex, alen, PAGE_SIZE, 0, dmam->_dm_boundary,
- EX_NOWAIT, &res);
- splx(s);
- if (err) {
- printf("GART: extent_alloc %d\n", err);
- return (err);
- }
-
- seg->ds_addr = res | (seg->ds_addr & PGOFSET);
-
- for (idx = 0; idx < alen; idx += PAGE_SIZE) {
- pgno = ((res + idx) - sc->g_pa) >> PGSHIFT;
- flags = GART_PTE_VALID | GART_PTE_COHERENT |
- (((base + idx) >> 28) & GART_PTE_PHYSHI) |
- ((base + idx) & GART_PTE_PHYSLO);
- sc->g_pte[pgno] = flags;
- }
-
- return (0);
-}
-
-int
-amdgart_iommu_unmap(struct amdgart_softc *sc, bus_dma_segment_t *seg)
-{
- paddr_t base, end, idx;
- psize_t alen;
- int err, s;
- u_int32_t pgno;
-
- base = trunc_page(seg->ds_addr);
- end = roundup(seg->ds_addr + seg->ds_len, PAGE_SIZE);
- alen = end - base;
-
- /*
- * order is significant here; invalidate the iommu page table
- * entries, then mark them as freed in the extent.
- */
-
- for (idx = 0; idx < alen; idx += PAGE_SIZE) {
- pgno = ((base - sc->g_pa) + idx) >> PGSHIFT;
- sc->g_pte[pgno] = sc->g_scribpte;
- }
-
- s = splhigh();
- err = extent_free(sc->g_ex, base, alen, EX_NOWAIT);
- splx(s);
- if (err) {
- /* XXX Shouldn't happen, but if it does, I think we lose. */
- printf("GART: extent_free %d\n", err);
- return (err);
- }
-
- return (0);
-}
-
-int
-amdgart_dmamap_create(bus_dma_tag_t tag, bus_size_t size, int nsegments,
- bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
-{
- struct amdgart_softc *sc = tag->_cookie;
-
- return (bus_dmamap_create(sc->g_dmat, size, nsegments,
- maxsegsz, boundary, flags, dmamp));
-}
-
-void
-amdgart_dmamap_destroy(bus_dma_tag_t tag, bus_dmamap_t dmam)
-{
- struct amdgart_softc *sc = tag->_cookie;
-
- bus_dmamap_destroy(sc->g_dmat, dmam);
-}
-
-int
-amdgart_dmamap_load(bus_dma_tag_t tag, bus_dmamap_t dmam, void *buf,
- bus_size_t buflen, struct proc *p, int flags)
-{
- struct amdgart_softc *sc = tag->_cookie;
- int err;
-
- err = bus_dmamap_load(sc->g_dmat, dmam, buf, buflen,
- p, flags);
- if (err)
- return (err);
- err = amdgart_reload(sc, dmam);
- if (err)
- bus_dmamap_unload(sc->g_dmat, dmam);
- else
- amdgart_invalidate(sc);
- return (err);
-}
-
-int
-amdgart_dmamap_load_mbuf(bus_dma_tag_t tag, bus_dmamap_t dmam,
- struct mbuf *chain, int flags)
-{
- struct amdgart_softc *sc = tag->_cookie;
- int err;
-
- err = bus_dmamap_load_mbuf(sc->g_dmat, dmam,
- chain, flags);
- if (err)
- return (err);
- err = amdgart_reload(sc, dmam);
- if (err)
- bus_dmamap_unload(sc->g_dmat, dmam);
- else
- amdgart_invalidate(sc);
- return (err);
-}
-
-int
-amdgart_dmamap_load_uio(bus_dma_tag_t tag, bus_dmamap_t dmam,
- struct uio *uio, int flags)
-{
- struct amdgart_softc *sc = tag->_cookie;
- int err;
-
- err = bus_dmamap_load_uio(sc->g_dmat, dmam, uio, flags);
- if (err)
- return (err);
- err = amdgart_reload(sc, dmam);
- if (err)
- bus_dmamap_unload(sc->g_dmat, dmam);
- else
- amdgart_invalidate(sc);
- return (err);
-}
-
-int
-amdgart_dmamap_load_raw(bus_dma_tag_t tag, bus_dmamap_t dmam,
- bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
-{
- struct amdgart_softc *sc = tag->_cookie;
- int err;
-
- err = bus_dmamap_load_raw(sc->g_dmat, dmam, segs, nsegs,
- size, flags);
- if (err)
- return (err);
- err = amdgart_reload(sc, dmam);
- if (err)
- bus_dmamap_unload(sc->g_dmat, dmam);
- else
- amdgart_invalidate(sc);
- return (err);
-}
-
-void
-amdgart_dmamap_unload(bus_dma_tag_t tag, bus_dmamap_t dmam)
-{
- struct amdgart_softc *sc = tag->_cookie;
- int i;
-
- for (i = 0; i < dmam->dm_nsegs; i++)
- amdgart_iommu_unmap(sc, &dmam->dm_segs[i]);
- amdgart_invalidate(sc);
- bus_dmamap_unload(sc->g_dmat, dmam);
-}
-
-void
-amdgart_dmamap_sync(bus_dma_tag_t tag, bus_dmamap_t dmam, bus_addr_t offset,
- bus_size_t size, int ops)
-{
- struct amdgart_softc *sc = tag->_cookie;
-
- /*
- * XXX how do we deal with non-coherent mappings? We don't
- * XXX allow them right now.
- */
- bus_dmamap_sync(sc->g_dmat, dmam, offset, size, ops);
-}
-
-int
-amdgart_dmamem_alloc(bus_dma_tag_t tag, bus_size_t size, bus_size_t alignment,
- bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
- int flags)
-{
- struct amdgart_softc *sc = tag->_cookie;
-
- return (bus_dmamem_alloc(sc->g_dmat, size, alignment,
- boundary, segs, nsegs, rsegs, flags));
-}
-
-void
-amdgart_dmamem_free(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs)
-{
- struct amdgart_softc *sc = tag->_cookie;
-
- bus_dmamem_free(sc->g_dmat, segs, nsegs);
-}
-
-int
-amdgart_dmamem_map(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs,
- size_t size, caddr_t *kvap, int flags)
-{
- struct amdgart_softc *sc = tag->_cookie;
-
- return (bus_dmamem_map(sc->g_dmat, segs, nsegs, size,
- kvap, flags));
-}
-
-void
-amdgart_dmamem_unmap(bus_dma_tag_t tag, caddr_t kva, size_t size)
-{
- struct amdgart_softc *sc = tag->_cookie;
-
- bus_dmamem_unmap(sc->g_dmat, kva, size);
-}
-
-paddr_t
-amdgart_dmamem_mmap(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs,
- off_t off, int prot, int flags)
-{
- struct amdgart_softc *sc = tag->_cookie;
-
- return (bus_dmamem_mmap(sc->g_dmat, segs, nsegs, off,
- prot, flags));
-}
+#endif /* !SMALL_KERNEL */
diff --git a/sys/arch/amd64/pci/pci_machdep.c b/sys/arch/amd64/pci/pci_machdep.c
index 78853bf8136..29d371f7067 100644
--- a/sys/arch/amd64/pci/pci_machdep.c
+++ b/sys/arch/amd64/pci/pci_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pci_machdep.c,v 1.26 2009/04/13 21:23:16 kettenis Exp $ */
+/* $OpenBSD: pci_machdep.c,v 1.27 2009/04/21 17:05:29 oga Exp $ */
/* $NetBSD: pci_machdep.c,v 1.3 2003/05/07 21:33:58 fvdl Exp $ */
/*-
@@ -175,7 +175,9 @@ pci_attach_hook(struct device *parent, struct device *self,
{
if (pba->pba_bus == 0) {
printf(": configuration mode %d", pci_mode);
+#ifndef SMALL_KERNEL
amdgart_probe(pba);
+#endif /* !SMALL_KERNEL */
}
}