summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/arch/sparc/include/bus.h188
-rw-r--r--sys/arch/sparc/sparc/iommu.c440
-rw-r--r--sys/arch/sparc/sparc/machdep.c201
3 files changed, 824 insertions, 5 deletions
diff --git a/sys/arch/sparc/include/bus.h b/sys/arch/sparc/include/bus.h
index c47a71e8bd7..d938939cc89 100644
--- a/sys/arch/sparc/include/bus.h
+++ b/sys/arch/sparc/include/bus.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: bus.h,v 1.6 2006/01/01 00:41:02 millert Exp $ */
+/* $OpenBSD: bus.h,v 1.7 2009/07/13 19:50:00 kettenis Exp $ */
/*
* Copyright (c) 2003, Miodrag Vallat.
*
@@ -497,4 +497,190 @@ bus_space_read_raw_region_4(bus_space_tag_t tag, bus_space_handle_t handle,
}
}
+/*
+ * Flags used in various bus DMA methods.
+ */
+#define BUS_DMA_WAITOK 0x000 /* safe to sleep (pseudo-flag) */
+#define BUS_DMA_NOWAIT 0x001 /* not safe to sleep */
+#define BUS_DMA_ALLOCNOW 0x002 /* perform resource allocation now */
+#define BUS_DMA_COHERENT 0x004 /* hint: map memory DMA coherent */
+#define BUS_DMA_STREAMING 0x008 /* hint: sequential, unidirectional */
+#define BUS_DMA_BUS1 0x010 /* placeholders for bus functions... */
+#define BUS_DMA_BUS2 0x020
+#define BUS_DMA_BUS3 0x040
+#define BUS_DMA_BUS4 0x080
+#define BUS_DMA_READ 0x100 /* mapping is device -> memory only */
+#define BUS_DMA_WRITE 0x200 /* mapping is memory -> device only */
+#define BUS_DMA_NOCACHE 0x400 /* hint: map non-cached memory */
+#define BUS_DMA_ZERO 0x800 /* zero memory in dmamem_alloc */
+
+/* For devices that have a 24-bit address space */
+#define BUS_DMA_24BIT BUS_DMA_BUS1
+
+/* Forwards needed by prototypes below. */
+struct mbuf;
+struct uio;
+
+/*
+ * Operations performed by bus_dmamap_sync().
+ */
+#define BUS_DMASYNC_PREREAD 0x01 /* pre-read synchronization */
+#define BUS_DMASYNC_POSTREAD 0x02 /* post-read synchronization */
+#define BUS_DMASYNC_PREWRITE 0x04 /* pre-write synchronization */
+#define BUS_DMASYNC_POSTWRITE 0x08 /* post-write synchronization */
+
+typedef struct sparc_bus_dma_tag *bus_dma_tag_t;
+typedef struct sparc_bus_dmamap *bus_dmamap_t;
+
+/*
+ * bus_dma_segment_t
+ *
+ * Describes a single contiguous DMA transaction. Values
+ * are suitable for programming into DMA registers.
+ */
+struct sparc_bus_dma_segment {
+ bus_addr_t ds_addr; /* DVMA address */
+ bus_size_t ds_len; /* length of transfer */
+ bus_size_t _ds_sgsize; /* size of allocated DVMA segment */
+ void *_ds_mlist; /* page list when dmamem_alloc'ed */
+ vaddr_t _ds_va; /* VA when dmamem_map'ed */
+};
+typedef struct sparc_bus_dma_segment bus_dma_segment_t;
+
+/*
+ * bus_dma_tag_t
+ *
+ * A machine-dependent opaque type describing the implementation of
+ * DMA for a given bus.
+ */
+struct sparc_bus_dma_tag {
+ void *_cookie; /* cookie used in the guts */
+
+ /*
+ * DMA mapping methods.
+ */
+ int (*_dmamap_create)(bus_dma_tag_t, bus_size_t, int,
+ bus_size_t, bus_size_t, int, bus_dmamap_t *);
+ void (*_dmamap_destroy)(bus_dma_tag_t, bus_dmamap_t);
+ int (*_dmamap_load)(bus_dma_tag_t, bus_dmamap_t, void *,
+ bus_size_t, struct proc *, int);
+ int (*_dmamap_load_mbuf)(bus_dma_tag_t, bus_dmamap_t,
+ struct mbuf *, int);
+ int (*_dmamap_load_uio)(bus_dma_tag_t, bus_dmamap_t,
+ struct uio *, int);
+ int (*_dmamap_load_raw)(bus_dma_tag_t, bus_dmamap_t,
+ bus_dma_segment_t *, int, bus_size_t, int);
+ void (*_dmamap_unload)(bus_dma_tag_t, bus_dmamap_t);
+ void (*_dmamap_sync)(bus_dma_tag_t, bus_dmamap_t,
+ bus_addr_t, bus_size_t, int);
+
+ /*
+ * DMA memory utility functions.
+ */
+ int (*_dmamem_alloc)(bus_dma_tag_t, bus_size_t, bus_size_t,
+ bus_size_t, bus_dma_segment_t *, int, int *, int);
+ void (*_dmamem_free)(bus_dma_tag_t,
+ bus_dma_segment_t *, int);
+ int (*_dmamem_map)(bus_dma_tag_t, bus_dma_segment_t *,
+ int, size_t, caddr_t *, int);
+ void (*_dmamem_unmap)(bus_dma_tag_t, void *, size_t);
+ paddr_t (*_dmamem_mmap)(bus_dma_tag_t, bus_dma_segment_t *,
+ int, off_t, int, int);
+};
+
+#define bus_dmamap_create(t, s, n, m, b, f, p) \
+ (*(t)->_dmamap_create)((t), (s), (n), (m), (b), (f), (p))
+#define bus_dmamap_destroy(t, p) \
+ (*(t)->_dmamap_destroy)((t), (p))
+#define bus_dmamap_load(t, m, b, s, p, f) \
+ (*(t)->_dmamap_load)((t), (m), (b), (s), (p), (f))
+#define bus_dmamap_load_mbuf(t, m, b, f) \
+ (*(t)->_dmamap_load_mbuf)((t), (m), (b), (f))
+#define bus_dmamap_load_uio(t, m, u, f) \
+ (*(t)->_dmamap_load_uio)((t), (m), (u), (f))
+#define bus_dmamap_load_raw(t, m, sg, n, s, f) \
+ (*(t)->_dmamap_load_raw)((t), (m), (sg), (n), (s), (f))
+#define bus_dmamap_unload(t, p) \
+ (*(t)->_dmamap_unload)((t), (p))
+#define bus_dmamap_sync(t, p, o, l, ops) \
+ (void)((t)->_dmamap_sync ? \
+ (*(t)->_dmamap_sync)((t), (p), (o), (l), (ops)) : (void)0)
+
+#define bus_dmamem_alloc(t, s, a, b, sg, n, r, f) \
+ (*(t)->_dmamem_alloc)((t), (s), (a), (b), (sg), (n), (r), (f))
+#define bus_dmamem_free(t, sg, n) \
+ (*(t)->_dmamem_free)((t), (sg), (n))
+#define bus_dmamem_map(t, sg, n, s, k, f) \
+ (*(t)->_dmamem_map)((t), (sg), (n), (s), (k), (f))
+#define bus_dmamem_unmap(t, k, s) \
+ (*(t)->_dmamem_unmap)((t), (k), (s))
+#define bus_dmamem_mmap(t, sg, n, o, p, f) \
+ (*(t)->_dmamem_mmap)((t), (sg), (n), (o), (p), (f))
+
+#define bus_dmatag_subregion(t, mna, mxa, nt, f) EOPNOTSUPP
+#define bus_dmatag_destroy(t)
+
+/*
+ * bus_dmamap_t
+ *
+ * Describes a DMA mapping.
+ */
+struct sparc_bus_dmamap {
+ /*
+ * PRIVATE MEMBERS: not for use by machine-independent code.
+ */
+ bus_size_t _dm_size; /* largest DMA transfer mappable */
+ int _dm_segcnt; /* number of segs this map can map */
+ bus_size_t _dm_maxmaxsegsz; /* fixed largest possible segment */
+ bus_size_t _dm_boundary; /* don't cross this */
+ int _dm_flags; /* misc. flags */
+
+ void *_dm_cookie; /* cookie for bus-specific functions */
+
+ u_long _dm_align; /* DVMA alignment; must be a
+ multiple of the page size */
+ u_long _dm_ex_start; /* constraints on DVMA map */
+ u_long _dm_ex_end; /* allocations; used by the VME bus
+ driver and by the IOMMU driver
+ when mapping 24-bit devices */
+
+ /*
+ * PUBLIC MEMBERS: these are used by machine-independent code.
+ */
+ bus_size_t dm_maxsegsz; /* largest possible segment */
+ bus_size_t dm_mapsize; /* size of the mapping */
+ int dm_nsegs; /* # valid segments in mapping */
+ bus_dma_segment_t dm_segs[1]; /* segments; variable length */
+};
+
+int _bus_dmamap_create(bus_dma_tag_t, bus_size_t, int, bus_size_t,
+ bus_size_t, int, bus_dmamap_t *);
+void _bus_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
+int _bus_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t,
+ struct mbuf *, int);
+int _bus_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t,
+ struct uio *, int);
+int _bus_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t,
+ bus_dma_segment_t *, int, bus_size_t, int);
+void _bus_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
+void _bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
+ bus_size_t, int);
+
+int _bus_dmamem_alloc(bus_dma_tag_t tag, bus_size_t size,
+ bus_size_t alignment, bus_size_t boundary,
+ bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags);
+void _bus_dmamem_free(bus_dma_tag_t tag, bus_dma_segment_t *segs,
+ int nsegs);
+void _bus_dmamem_unmap(bus_dma_tag_t tag, void *kva,
+ size_t size);
+paddr_t _bus_dmamem_mmap(bus_dma_tag_t tag, bus_dma_segment_t *segs,
+ int nsegs, off_t off, int prot, int flags);
+
+int _bus_dmamem_alloc_range(bus_dma_tag_t tag, bus_size_t size,
+ bus_size_t alignment, bus_size_t boundary,
+ bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags,
+ vaddr_t low, vaddr_t high);
+
+vaddr_t _bus_dma_valloc_skewed(size_t, u_long, u_long, u_long);
+
#endif /* _SPARC_BUS_H_ */
diff --git a/sys/arch/sparc/sparc/iommu.c b/sys/arch/sparc/sparc/iommu.c
index e70d66f5ce0..992134e556b 100644
--- a/sys/arch/sparc/sparc/iommu.c
+++ b/sys/arch/sparc/sparc/iommu.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: iommu.c,v 1.21 2009/04/14 16:01:04 oga Exp $ */
+/* $OpenBSD: iommu.c,v 1.22 2009/07/13 19:50:00 kettenis Exp $ */
/* $NetBSD: iommu.c,v 1.13 1997/07/29 09:42:04 fair Exp $ */
/*
@@ -40,12 +40,15 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/device.h>
+#include <sys/extent.h>
+#include <sys/mbuf.h>
#include <uvm/uvm.h>
#include <machine/pmap.h>
#include <machine/autoconf.h>
+#include <machine/bus.h>
#include <machine/ctlreg.h>
#include <sparc/sparc/asm.h>
#include <sparc/sparc/vaddrs.h>
@@ -60,11 +63,21 @@ struct iommu_softc {
u_int sc_dvmabase;
iopte_t *sc_ptes;
int sc_hasiocache;
+#define sc_cachecoherent sc_hasiocache
+
+/*
+ * Note: operations on the extent map are being protected with
+ * splhigh(), since we cannot predict at which interrupt priority
+ * our clients will run.
+ */
+ struct sparc_bus_dma_tag sc_dmatag;
+ struct extent *sc_dvmamap;
};
+
struct iommu_softc *iommu_sc;/*XXX*/
+struct sparc_bus_dma_tag *iommu_dmatag;/*XXX*/
int has_iocache;
-
/* autoconfiguration driver */
int iommu_print(void *, const char *);
void iommu_attach(struct device *, struct device *, void *);
@@ -78,6 +91,32 @@ struct cfdriver iommu_cd = {
NULL, "iommu", DV_DULL
};
+/* IOMMU DMA map functions */
+int iommu_dmamap_create(bus_dma_tag_t, bus_size_t, int, bus_size_t,
+ bus_size_t, int, bus_dmamap_t *);
+int iommu_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *,
+ bus_size_t, struct proc *, int);
+int iommu_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t,
+ struct mbuf *, int);
+int iommu_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t,
+ struct uio *, int);
+int iommu_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t,
+ bus_dma_segment_t *, int, bus_size_t, int);
+void iommu_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
+void iommu_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
+ bus_size_t, int);
+
+int iommu_dmamem_map(bus_dma_tag_t, bus_dma_segment_t *,
+ int, size_t, caddr_t *, int);
+void iommu_dmamem_unmap(bus_dma_tag_t, void *, size_t);
+paddr_t iommu_dmamem_mmap(bus_dma_tag_t, bus_dma_segment_t *,
+ int, off_t, int, int);
+int iommu_dvma_alloc(struct iommu_softc *, bus_dmamap_t, vaddr_t,
+ bus_size_t, int, bus_addr_t *, bus_size_t *);
+
+int iommu_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *,
+ bus_size_t, struct proc *, int);
+
/*
* Print the location of some iommu-attached device (called just
* before attaching that device). If `iommu' is not NULL, the
@@ -120,8 +159,9 @@ iommu_attach(parent, self, aux)
void *aux;
{
#if defined(SUN4M)
- register struct iommu_softc *sc = (struct iommu_softc *)self;
+ struct iommu_softc *sc = (struct iommu_softc *)self;
struct confargs oca, *ca = aux;
+ struct sparc_bus_dma_tag *dmat = &sc->sc_dmatag;
register struct romaux *ra = &ca->ca_ra;
register int node;
register char *name;
@@ -134,6 +174,7 @@ iommu_attach(parent, self, aux)
paddr_t iopte_pa;
iommu_sc = sc;
+ iommu_dmatag = dmat;
/*
* XXX there is only one iommu, for now -- do not know how to
* address children on others
@@ -142,6 +183,23 @@ iommu_attach(parent, self, aux)
printf(" unsupported\n");
return;
}
+
+ dmat->_cookie = sc;
+ dmat->_dmamap_create = iommu_dmamap_create;
+ dmat->_dmamap_destroy = _bus_dmamap_destroy;
+ dmat->_dmamap_load = iommu_dmamap_load;
+ dmat->_dmamap_load_mbuf = iommu_dmamap_load_mbuf;
+ dmat->_dmamap_load_uio = iommu_dmamap_load_uio;
+ dmat->_dmamap_load_raw = iommu_dmamap_load_raw;
+ dmat->_dmamap_unload = iommu_dmamap_unload;
+ dmat->_dmamap_sync = iommu_dmamap_sync;
+
+ dmat->_dmamem_alloc = _bus_dmamem_alloc;
+ dmat->_dmamem_free = _bus_dmamem_free;
+ dmat->_dmamem_map = iommu_dmamem_map;
+ dmat->_dmamem_unmap = _bus_dmamem_unmap;
+ dmat->_dmamem_mmap = iommu_dmamem_mmap;
+
node = ra->ra_node;
#if 0
@@ -265,6 +323,8 @@ iommu_attach(parent, self, aux)
sc->sc_pagesize,
sc->sc_range >> 20);
+ sc->sc_dvmamap = dvmamap_extent; /* XXX */
+
/* Propagate bootpath */
if (ra->ra_bp != NULL && strcmp(ra->ra_bp->name, "iommu") == 0)
oca.ca_ra.ra_bp = ra->ra_bp + 1;
@@ -331,3 +391,377 @@ iommu_remove(va, len)
va += sc->sc_pagesize;
}
}
+
+extern u_long dvma_cachealign;
+
+/*
+ * IOMMU DMA map functions.
+ */
+int
+iommu_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
+ bus_size_t maxsegsz, bus_size_t boundary, int flags,
+ bus_dmamap_t *dmamp)
+{
+ struct iommu_softc *sc = t->_cookie;
+ bus_dmamap_t map;
+ int error;
+
+ if ((error = _bus_dmamap_create(t, size, nsegments, maxsegsz,
+ boundary, flags, &map)) != 0)
+ return (error);
+
+ if ((flags & BUS_DMA_24BIT) != 0) {
+ /* Limit this map to the range usable by `24-bit' devices */
+ map->_dm_ex_start = DVMA_D24_BASE;
+ map->_dm_ex_end = DVMA_D24_END;
+ } else {
+ /* Enable allocations from the entire map */
+ map->_dm_ex_start = sc->sc_dvmamap->ex_start;
+ map->_dm_ex_end = sc->sc_dvmamap->ex_end;
+ }
+
+ *dmamp = map;
+ return (0);
+}
+
+/*
+ * Internal routine to allocate space in the IOMMU map.
+ */
+int
+iommu_dvma_alloc(struct iommu_softc *sc, bus_dmamap_t map,
+ vaddr_t va, bus_size_t len, int flags,
+ bus_addr_t *dvap, bus_size_t *sgsizep)
+{
+ bus_size_t sgsize;
+ u_long align, voff, dvaddr;
+ int s, error;
+ int pagesz = PAGE_SIZE;
+
+ /*
+ * Remember page offset, then truncate the buffer address to
+ * a page boundary.
+ */
+ voff = va & (pagesz - 1);
+ va &= -pagesz;
+
+ if (len > map->_dm_size)
+ return (EINVAL);
+
+ sgsize = (len + voff + pagesz - 1) & -pagesz;
+ align = dvma_cachealign ? dvma_cachealign : map->_dm_align;
+
+ s = splhigh();
+ error = extent_alloc_subregion(sc->sc_dvmamap, map->_dm_ex_start,
+ map->_dm_ex_end, sgsize, align, va & (align-1), map->_dm_boundary,
+ (flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT, &dvaddr);
+ splx(s);
+ *dvap = (bus_addr_t)dvaddr;
+ *sgsizep = sgsize;
+ return (error);
+}
+
+int
+iommu_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
+ bus_size_t buflen, struct proc *p, int flags)
+{
+ struct iommu_softc *sc = t->_cookie;
+ bus_size_t sgsize;
+ bus_addr_t dva;
+ vaddr_t va = (vaddr_t)buf;
+ int pagesz = PAGE_SIZE;
+ pmap_t pmap;
+ int error;
+
+ if (map->dm_nsegs >= map->_dm_segcnt)
+ return (EFBIG);
+
+ /* Allocate IOMMU resources */
+ if ((error = iommu_dvma_alloc(sc, map, va, buflen, flags,
+ &dva, &sgsize)) != 0)
+ return (error);
+
+ if ((sc->sc_cachecoherent == 0) ||
+ (CACHEINFO.ec_totalsize == 0))
+ cpuinfo.cache_flush(buf, buflen); /* XXX - move to bus_dma_sync? */
+
+ /*
+ * We always use just one segment.
+ */
+ map->dm_segs[map->dm_nsegs].ds_addr = dva + (va & (pagesz - 1));
+ map->dm_segs[map->dm_nsegs].ds_len = buflen;
+ map->dm_segs[map->dm_nsegs]._ds_sgsize = sgsize;
+ map->dm_nsegs++;
+
+ if (p != NULL)
+ pmap = p->p_vmspace->vm_map.pmap;
+ else
+ pmap = pmap_kernel();
+
+ for (; sgsize != 0; ) {
+ paddr_t pa;
+ /*
+ * Get the physical address for this page.
+ */
+ if (!pmap_extract(pmap, va, &pa))
+ return (EFAULT);
+
+ iommu_enter(dva, pa);
+
+ dva += pagesz;
+ va += pagesz;
+ sgsize -= pagesz;
+ }
+
+ return (0);
+}
+
+/*
+ * Prepare buffer for DMA transfer.
+ */
+int
+iommu_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map,
+ void *buf, bus_size_t buflen,
+ struct proc *p, int flags)
+{
+ int error;
+
+ /*
+ * Make sure that on error condition we return "no valid mappings".
+ */
+ map->dm_mapsize = buflen;
+ map->dm_nsegs = 0;
+
+ error = iommu_dmamap_load_buffer(t, map, buf, buflen, p, flags);
+ if (error)
+ iommu_dmamap_unload(t, map);
+
+ return (error);
+}
+
+/*
+ * Like _bus_dmamap_load(), but for mbufs.
+ */
+int
+iommu_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map,
+ struct mbuf *m0, int flags)
+{
+ struct mbuf *m;
+ int error = 0;
+
+ /*
+ * Make sure that on error condition we return "no valid mappings".
+ */
+ map->dm_mapsize = m0->m_pkthdr.len;
+ map->dm_nsegs = 0;
+
+ for (m = m0; m != NULL && error == 0; m = m->m_next) {
+ if (m->m_len == 0)
+ continue;
+ error = iommu_dmamap_load_buffer(t, map, m->m_data, m->m_len,
+ NULL, flags);
+ }
+
+ if (error)
+ iommu_dmamap_unload(t, map);
+
+ return (error);
+}
+
+/*
+ * Like _bus_dmamap_load(), but for uios.
+ */
+int
+iommu_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map,
+ struct uio *uio, int flags)
+{
+
+ panic("_bus_dmamap_load_uio: not implemented");
+}
+
+/*
+ * Like _bus_dmamap_load(), but for raw memory allocated with
+ * bus_dmamem_alloc().
+ */
+int
+iommu_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
+ bus_dma_segment_t *segs, int nsegs, bus_size_t size,
+ int flags)
+{
+ struct iommu_softc *sc = t->_cookie;
+ struct vm_page *m;
+ paddr_t pa;
+ bus_addr_t dva;
+ bus_size_t sgsize;
+ struct pglist *mlist;
+ int pagesz = PAGE_SIZE;
+ int error;
+
+ map->dm_nsegs = 0;
+
+ /* Allocate IOMMU resources */
+ if ((error = iommu_dvma_alloc(sc, map, segs[0]._ds_va, size,
+ flags, &dva, &sgsize)) != 0)
+ return (error);
+
+ /*
+ * Note DVMA address in case bus_dmamem_map() is called later.
+ * It can then insure cache coherency by choosing a KVA that
+ * is aligned to `ds_addr'.
+ */
+ segs[0].ds_addr = dva;
+ segs[0].ds_len = size;
+
+ map->dm_segs[0].ds_addr = dva;
+ map->dm_segs[0].ds_len = size;
+ map->dm_segs[0]._ds_sgsize = sgsize;
+
+ /* Map physical pages into IOMMU */
+ mlist = segs[0]._ds_mlist;
+ for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) {
+ if (sgsize == 0)
+ panic("iommu_dmamap_load_raw: size botch");
+ pa = VM_PAGE_TO_PHYS(m);
+ iommu_enter(dva, pa);
+ dva += pagesz;
+ sgsize -= pagesz;
+ }
+
+ map->dm_nsegs = 1;
+ map->dm_mapsize = size;
+
+ return (0);
+}
+
+/*
+ * Unload an IOMMU DMA map.
+ */
+void
+iommu_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
+{
+ struct iommu_softc *sc = t->_cookie;
+ bus_dma_segment_t *segs = map->dm_segs;
+ int nsegs = map->dm_nsegs;
+ bus_addr_t dva;
+ bus_size_t len;
+ int i, s, error;
+
+ for (i = 0; i < nsegs; i++) {
+ dva = segs[i].ds_addr & -PAGE_SIZE;
+ len = segs[i]._ds_sgsize;
+
+ iommu_remove(dva, len);
+ s = splhigh();
+ error = extent_free(sc->sc_dvmamap, dva, len, EX_NOWAIT);
+ splx(s);
+ if (error != 0)
+ printf("warning: %ld of DVMA space lost\n", (long)len);
+ }
+
+ /* Mark the mappings as invalid. */
+ map->dm_mapsize = 0;
+ map->dm_nsegs = 0;
+}
+
+/*
+ * DMA map synchronization.
+ */
+void
+iommu_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map,
+ bus_addr_t offset, bus_size_t len, int ops)
+{
+
+ /*
+ * XXX Should flush CPU write buffers.
+ */
+}
+
+/*
+ * Map DMA-safe memory.
+ */
+int
+iommu_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
+ size_t size, caddr_t *kvap, int flags)
+{
+ struct iommu_softc *sc = t->_cookie;
+ struct vm_page *m;
+ vaddr_t va;
+ bus_addr_t addr;
+ struct pglist *mlist;
+ int cbit;
+ u_long align;
+ int pagesz = PAGE_SIZE;
+
+ if (nsegs != 1)
+ panic("iommu_dmamem_map: nsegs = %d", nsegs);
+
+ cbit = sc->sc_cachecoherent ? 0 : PMAP_NC;
+ align = dvma_cachealign ? dvma_cachealign : pagesz;
+
+ size = round_page(size);
+
+#if 0
+ /*
+ * In case the segment has already been loaded by
+ * iommu_dmamap_load_raw(), find a region of kernel virtual
+ * addresses that can accommodate our aligment requirements.
+ */
+ va = _bus_dma_valloc_skewed(size, 0, align,
+ segs[0].ds_addr & (align - 1));
+#else
+ va = uvm_km_valloc(kernel_map, size);
+#endif
+ if (va == 0)
+ return (ENOMEM);
+
+ segs[0]._ds_va = va;
+ *kvap = (void *)va;
+
+ /*
+ * Map the pages allocated in _bus_dmamem_alloc() to the
+ * kernel virtual address space.
+ */
+ mlist = segs[0]._ds_mlist;
+ for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) {
+
+ if (size == 0)
+ panic("iommu_dmamem_map: size botch");
+
+ addr = VM_PAGE_TO_PHYS(m);
+ pmap_kenter_pa(va, addr | cbit, VM_PROT_READ | VM_PROT_WRITE);
+#if 0
+ if (flags & BUS_DMA_COHERENT)
+ /* XXX */;
+#endif
+ va += pagesz;
+ size -= pagesz;
+ }
+ pmap_update(pmap_kernel());
+
+ return (0);
+}
+
+void
+iommu_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
+{
+
+#ifdef DIAGNOSTIC
+ if ((u_long)kva & PAGE_MASK)
+ panic("iommu_dmamem_unmap");
+#endif
+
+ size = round_page(size);
+ pmap_kremove((vaddr_t)kva, size);
+ pmap_update(pmap_kernel());
+ uvm_unmap(kernel_map, (vaddr_t)kva, (vaddr_t)kva + size);
+}
+
+
+/*
+ * mmap(2)'ing DMA-safe memory.
+ */
+paddr_t
+iommu_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
+ off_t off, int prot, int flags)
+{
+ panic("_bus_dmamem_mmap: not implemented");
+}
diff --git a/sys/arch/sparc/sparc/machdep.c b/sys/arch/sparc/sparc/machdep.c
index e1a7178feea..0cc40477def 100644
--- a/sys/arch/sparc/sparc/machdep.c
+++ b/sys/arch/sparc/sparc/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.118 2009/06/15 17:01:26 beck Exp $ */
+/* $OpenBSD: machdep.c,v 1.119 2009/07/13 19:50:00 kettenis Exp $ */
/* $NetBSD: machdep.c,v 1.85 1997/09/12 08:55:02 pk Exp $ */
/*
@@ -71,6 +71,7 @@
#include <dev/rndvar.h>
#include <machine/autoconf.h>
+#include <machine/bus.h>
#include <machine/frame.h>
#include <machine/cpu.h>
#include <machine/pmap.h>
@@ -1056,3 +1057,201 @@ caddr_t addr;
return (res);
}
#endif /* SUN4 */
+
+/*
+ * Common function for DMA map creation. May be called by bus-specific
+ * DMA map creation functions.
+ */
+int
+_bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
+ bus_size_t maxsegsz, bus_size_t boundary, int flags,
+ bus_dmamap_t *dmamp)
+{
+ struct sparc_bus_dmamap *map;
+ void *mapstore;
+ size_t mapsize;
+
+ /*
+ * Allocate and initialize the DMA map. The end of the map
+ * is a variable-sized array of segments, so we allocate enough
+ * room for them in one shot.
+ *
+ * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
+ * of ALLOCNOW notifies others that we've reserved these resources,
+ * and they are not to be freed.
+ *
+ * The bus_dmamap_t includes one bus_dma_segment_t, hence
+ * the (nsegments - 1).
+ */
+ mapsize = sizeof(struct sparc_bus_dmamap) +
+ (sizeof(bus_dma_segment_t) * (nsegments - 1));
+ if ((mapstore = malloc(mapsize, M_DEVBUF, (flags & BUS_DMA_NOWAIT) ?
+ (M_NOWAIT | M_ZERO) : (M_WAITOK | M_ZERO))) == NULL)
+ return (ENOMEM);
+
+ map = (struct sparc_bus_dmamap *)mapstore;
+ map->_dm_size = size;
+ map->_dm_segcnt = nsegments;
+ map->_dm_maxmaxsegsz = maxsegsz;
+ map->_dm_boundary = boundary;
+ map->_dm_align = PAGE_SIZE;
+ map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
+ map->dm_maxsegsz = maxsegsz;
+ map->dm_mapsize = 0; /* no valid mappings */
+ map->dm_nsegs = 0;
+
+ *dmamp = map;
+ return (0);
+}
+
+/*
+ * Common function for DMA map destruction. May be called by bus-specific
+ * DMA map destruction functions.
+ */
+void
+_bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
+{
+ free(map, M_DEVBUF);
+}
+
+/*
+ * Like _bus_dmamap_load(), but for mbufs.
+ */
+int
+_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map,
+ struct mbuf *m, int flags)
+{
+ panic("_bus_dmamap_load_mbuf: not implemented");
+}
+
+/*
+ * Like _bus_dmamap_load(), but for uios.
+ */
+int
+_bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map,
+ struct uio *uio, int flags)
+{
+ panic("_bus_dmamap_load_uio: not implemented");
+}
+
+/*
+ * Like _bus_dmamap_load(), but for raw memory allocated with
+ * bus_dmamem_alloc().
+ */
+int
+_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
+ bus_dma_segment_t *segs, int nsegs, bus_size_t size,
+ int flags)
+{
+ panic("_bus_dmamap_load_raw: not implemented");
+}
+
+/*
+ * Common function for DMA map synchronization. May be called
+ * by bus-specific DMA map synchronization functions.
+ */
+void
+_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map,
+ bus_addr_t offset, bus_size_t len, int ops)
+{
+}
+
+/*
+ * Common function for DMA-safe memory allocation. May be called
+ * by bus-specific DMA memory allocation functions.
+ */
+int
+_bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size,
+ bus_size_t alignment, bus_size_t boundary,
+ bus_dma_segment_t *segs, int nsegs, int *rsegs,
+ int flags)
+{
+ struct pglist *mlist;
+ int error, plaflag;
+
+ /* Always round the size. */
+ size = round_page(size);
+
+ if ((mlist = malloc(sizeof(*mlist), M_DEVBUF,
+ (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
+ return (ENOMEM);
+
+ /*
+ * Allocate pages from the VM system.
+ */
+ plaflag = flags & BUS_DMA_NOWAIT ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
+ if (flags & BUS_DMA_ZERO)
+ plaflag |= UVM_PLA_ZERO;
+
+ TAILQ_INIT(mlist);
+ error = uvm_pglistalloc(size, (paddr_t)0, (paddr_t)-1, 0, 0,
+ mlist, nsegs, plaflag);
+ if (error)
+ return (error);
+
+ /*
+ * Simply keep a pointer around to the linked list, so
+ * bus_dmamap_free() can return it.
+ *
+ * NOBODY SHOULD TOUCH THE pageq FIELDS WHILE THESE PAGES
+ * ARE IN OUR CUSTODY.
+ */
+ segs[0]._ds_mlist = mlist;
+
+ /*
+ * We now have physical pages, but no DVMA addresses yet. These
+ * will be allocated in bus_dmamap_load*() routines. Hence we
+ * save any alignment and boundary requirements in this DMA
+ * segment.
+ */
+ segs[0].ds_addr = 0;
+ segs[0].ds_len = 0;
+ segs[0]._ds_va = 0;
+ *rsegs = 1;
+ return (0);
+}
+
+/*
+ * Common function for freeing DMA-safe memory. May be called by
+ * bus-specific DMA memory free functions.
+ */
+void
+_bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
+{
+ if (nsegs != 1)
+ panic("bus_dmamem_free: nsegs = %d", nsegs);
+
+ /*
+ * Return the list of pages back to the VM system.
+ */
+ uvm_pglistfree(segs[0]._ds_mlist);
+ free(segs[0]._ds_mlist, M_DEVBUF);
+}
+
+/*
+ * Common function for unmapping DMA-safe memory. May be called by
+ * bus-specific DMA memory unmapping functions.
+ */
+void
+_bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
+{
+
+#ifdef DIAGNOSTIC
+ if ((u_long)kva & PAGE_MASK)
+ panic("_bus_dmamem_unmap");
+#endif
+
+ size = round_page(size);
+ uvm_km_free(kernel_map, (vaddr_t)kva, (vaddr_t)size + size);
+}
+
+/*
+ * Common functin for mmap(2)'ing DMA-safe memory. May be called by
+ * bus-specific DMA mmap(2)'ing functions.
+ */
+paddr_t
+_bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
+ off_t off, int prot, int flags)
+{
+ panic("_bus_dmamem_mmap: not implemented");
+}