summaryrefslogtreecommitdiff
path: root/sys/arch
diff options
context:
space:
mode:
authorHenric Jungheim <henric@cvs.openbsd.org>2003-03-06 08:26:09 +0000
committerHenric Jungheim <henric@cvs.openbsd.org>2003-03-06 08:26:09 +0000
commitefca87016057d41201643e65d1f1c5d44716db0b (patch)
tree0a757cfc249bf077773a2182e04fd490183c3e6c /sys/arch
parent6216a22d89b680747bfb42b651aed1076b9dbf92 (diff)
The existing IOMMU code had a rounding problem that was most noticeable
on faster systems under heavy network load. This replaces some of the unreadable iommu functions with something a little less dense and a lot less crash prone. The bus_dma function pointer/cookie handling was broken. Change them to work like the stacked bus_space drivers (where "work" is the key word). Tested my many (thanks). ok jason@ deraadt@
Diffstat (limited to 'sys/arch')
-rw-r--r--sys/arch/sparc64/dev/ebus.c78
-rw-r--r--sys/arch/sparc64/dev/iommu.c1738
-rw-r--r--sys/arch/sparc64/dev/iommureg.h6
-rw-r--r--sys/arch/sparc64/dev/iommuvar.h68
-rw-r--r--sys/arch/sparc64/dev/psycho.c114
-rw-r--r--sys/arch/sparc64/dev/sbus.c132
-rw-r--r--sys/arch/sparc64/dev/schizo.c109
-rw-r--r--sys/arch/sparc64/include/bus.h274
-rw-r--r--sys/arch/sparc64/sparc64/machdep.c153
9 files changed, 1730 insertions, 942 deletions
diff --git a/sys/arch/sparc64/dev/ebus.c b/sys/arch/sparc64/dev/ebus.c
index 8f802a40c69..d82667411cf 100644
--- a/sys/arch/sparc64/dev/ebus.c
+++ b/sys/arch/sparc64/dev/ebus.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ebus.c,v 1.10 2003/02/17 01:29:20 henric Exp $ */
+/* $OpenBSD: ebus.c,v 1.11 2003/03/06 08:26:08 henric Exp $ */
/* $NetBSD: ebus.c,v 1.24 2001/07/25 03:49:54 eeh Exp $ */
/*
@@ -105,17 +105,6 @@ static int _ebus_bus_map(bus_space_tag_t, bus_space_tag_t, bus_addr_t,
bus_size_t, int, bus_space_handle_t *);
static void *ebus_intr_establish(bus_space_tag_t, bus_space_tag_t, int, int,
int, int (*)(void *), void *);
-static int ebus_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *, bus_size_t,
- struct proc *, int);
-static void ebus_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
-static void ebus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
- bus_size_t, int);
-int ebus_dmamem_alloc(bus_dma_tag_t, bus_size_t, bus_size_t, bus_size_t,
- bus_dma_segment_t *, int, int *, int);
-void ebus_dmamem_free(bus_dma_tag_t, bus_dma_segment_t *, int);
-int ebus_dmamem_map(bus_dma_tag_t, bus_dma_segment_t *, int, size_t,
- caddr_t *, int);
-void ebus_dmamem_unmap(bus_dma_tag_t, caddr_t, size_t);
bus_space_tag_t ebus_alloc_mem_tag(struct ebus_softc *, bus_space_tag_t);
bus_space_tag_t ebus_alloc_io_tag(struct ebus_softc *, bus_space_tag_t);
bus_space_tag_t _ebus_alloc_bus_tag(struct ebus_softc *sc, const char *,
@@ -421,21 +410,6 @@ ebus_alloc_dma_tag(struct ebus_softc *sc, bus_dma_tag_t pdt)
bzero(dt, sizeof *dt);
dt->_cookie = sc;
dt->_parent = pdt;
-#define PCOPY(x) dt->x = pdt->x
- PCOPY(_dmamap_create);
- PCOPY(_dmamap_destroy);
- dt->_dmamap_load = ebus_dmamap_load;
- PCOPY(_dmamap_load_mbuf);
- PCOPY(_dmamap_load_uio);
- PCOPY(_dmamap_load_raw);
- dt->_dmamap_unload = ebus_dmamap_unload;
- dt->_dmamap_sync = ebus_dmamap_sync;
- dt->_dmamem_alloc = ebus_dmamem_alloc;
- dt->_dmamem_free = ebus_dmamem_free;
- dt->_dmamem_map = ebus_dmamem_map;
- dt->_dmamem_unmap = ebus_dmamem_unmap;
- PCOPY(_dmamem_mmap);
-#undef PCOPY
sc->sc_dmatag = dt;
return (dt);
}
@@ -549,53 +523,3 @@ ebus_intr_establish(bus_space_tag_t t, bus_space_tag_t t0, int pri, int level,
handler, arg));
}
-/*
- * bus dma support
- */
-int
-ebus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
- bus_size_t buflen, struct proc *p, int flags)
-{
- return (bus_dmamap_load(t->_parent, map, buf, buflen, p, flags));
-}
-
-void
-ebus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
-{
- bus_dmamap_unload(t->_parent, map);
-}
-
-void
-ebus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
- bus_size_t len, int ops)
-{
- bus_dmamap_sync(t->_parent, map, offset, len, ops);
-}
-
-int
-ebus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
- bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
- int flags)
-{
- return (bus_dmamem_alloc(t->_parent, size, alignment, boundary, segs,
- nsegs, rsegs, flags));
-}
-
-void
-ebus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
-{
- bus_dmamem_free(t->_parent, segs, nsegs);
-}
-
-int
-ebus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
- size_t size, caddr_t *kvap, int flags)
-{
- return (bus_dmamem_map(t->_parent, segs, nsegs, size, kvap, flags));
-}
-
-void
-ebus_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
-{
- return (bus_dmamem_unmap(t->_parent, kva, size));
-}
diff --git a/sys/arch/sparc64/dev/iommu.c b/sys/arch/sparc64/dev/iommu.c
index aba6ab17b92..6e630d3b9a6 100644
--- a/sys/arch/sparc64/dev/iommu.c
+++ b/sys/arch/sparc64/dev/iommu.c
@@ -1,7 +1,8 @@
-/* $OpenBSD: iommu.c,v 1.27 2003/02/22 23:51:39 jason Exp $ */
+/* $OpenBSD: iommu.c,v 1.28 2003/03/06 08:26:08 henric Exp $ */
/* $NetBSD: iommu.c,v 1.47 2002/02/08 20:03:45 eeh Exp $ */
/*
+ * Copyright (c) 2003 Henric Jungheim
* Copyright (c) 2001, 2002 Eduardo Horvath
* Copyright (c) 1999, 2000 Matthew R. Green
* All rights reserved.
@@ -38,6 +39,7 @@
#include <sys/malloc.h>
#include <sys/systm.h>
#include <sys/device.h>
+#include <sys/mbuf.h>
#include <uvm/uvm_extern.h>
@@ -59,33 +61,58 @@
#define IDB_BUSDMA 0x1
#define IDB_IOMMU 0x2
#define IDB_INFO 0x4
-#define IDB_SYNC 0x8
+#define IDB_SYNC 0x8
+#define IDB_XXX 0x10
+#define IDB_PRINT_MAP 0x20
+#define IDB_BREAK 0x40
int iommudebug = 0x0;
#define DPRINTF(l, s) do { if (iommudebug & l) printf s; } while (0)
#else
#define DPRINTF(l, s)
#endif
-int iommu_dvmamap_sync_seg(bus_dma_tag_t, struct iommu_state *,
- bus_dma_segment_t *, bus_addr_t, bus_size_t, int);
-int iommu_dvmamap_sync_range(struct iommu_state *, vaddr_t, bus_size_t);
+void iommu_enter(struct iommu_state *, struct strbuf_ctl *, vaddr_t, paddr_t,
+ int);
+void iommu_remove(struct iommu_state *, struct strbuf_ctl *, vaddr_t);
+int iommu_dvmamap_sync_range(struct strbuf_ctl*, vaddr_t, bus_size_t);
+int iommu_strbuf_flush_done(struct iommu_map_state *);
+int iommu_dvmamap_load_seg(bus_dma_tag_t, struct iommu_state *,
+ bus_dmamap_t, bus_dma_segment_t *, int, int, bus_size_t, bus_size_t);
+int iommu_dvmamap_load_mlist(bus_dma_tag_t, struct iommu_state *,
+ bus_dmamap_t, struct pglist *, int, bus_size_t, bus_size_t);
+int iommu_dvmamap_validate_map(bus_dma_tag_t, struct iommu_state *,
+ bus_dmamap_t);
+void iommu_dvmamap_print_map(bus_dma_tag_t, struct iommu_state *,
+ bus_dmamap_t);
+int iommu_dvmamap_append_range(bus_dma_tag_t, bus_dmamap_t, paddr_t,
+ bus_size_t, int, bus_size_t);
+int64_t iommu_tsb_entry(struct iommu_state *, vaddr_t);
+void strbuf_reset(struct strbuf_ctl *);
+int iommu_iomap_insert_page(struct iommu_map_state *, paddr_t);
+vaddr_t iommu_iomap_translate(struct iommu_map_state *, paddr_t);
+int iommu_iomap_load_map(struct iommu_state *, struct iommu_map_state *,
+ vaddr_t, int);
+int iommu_iomap_unload_map(struct iommu_state *, struct iommu_map_state *);
+struct iommu_map_state *iommu_iomap_create(int);
+void iommu_iomap_destroy(struct iommu_map_state *);
+void iommu_iomap_clear_pages(struct iommu_map_state *);
+/*
+ * Initiate an STC entry flush.
+ */
static inline void
-iommu_strbuf_flush(struct iommu_state *is, vaddr_t va)
+iommu_strbuf_flush(struct strbuf_ctl *sb, vaddr_t va)
{
- int i;
- for(i = 0; i < 2; ++i) {
- struct strbuf_ctl *sb = is->is_sb[i];
- if(sb == NULL || sb->sb_flush == NULL)
- continue;
-
- bus_space_write_8(sb->sb_bustag, sb->sb_sb,
- STRBUFREG(strbuf_pgflush), va);
+#ifdef DEBUG
+ if (sb->sb_flush == NULL) {
+ printf("iommu_strbuf_flush: attempting to flush w/o STC\n");
+ return;
}
-}
+#endif
-int iommu_strbuf_flush_done(struct iommu_state *);
-int64_t iommu_tsb_entry(struct iommu_state *, vaddr_t);
+ bus_space_write_8(sb->sb_bustag, sb->sb_sb,
+ STRBUFREG(strbuf_pgflush), va);
+}
/*
* initialise the UltraSPARC IOMMU (SBUS or PCI):
@@ -134,10 +161,10 @@ iommu_init(char *name, struct iommu_state *is, int tsbsize, u_int32_t iovabase)
* contiguous.
*/
- size = NBPG << is->is_tsbsize;
+ size = PAGE_SIZE << is->is_tsbsize;
TAILQ_INIT(&mlist);
if (uvm_pglistalloc((psize_t)size, (paddr_t)0, (paddr_t)-1,
- (paddr_t)NBPG, (paddr_t)0, &mlist, 1, 0) != 0)
+ (paddr_t)PAGE_SIZE, (paddr_t)0, &mlist, 1, 0) != 0)
panic("iommu_init: no memory");
va = uvm_km_valloc(kernel_map, size);
@@ -154,7 +181,7 @@ iommu_init(char *name, struct iommu_state *is, int tsbsize, u_int32_t iovabase)
pmap_enter(pmap_kernel(), va, pa | PMAP_NVC,
VM_PROT_READ|VM_PROT_WRITE,
VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
- va += NBPG;
+ va += PAGE_SIZE;
}
pmap_update(pmap_kernel());
memset(is->is_tsb, 0, size);
@@ -181,9 +208,7 @@ iommu_init(char *name, struct iommu_state *is, int tsbsize, u_int32_t iovabase)
/*
* now actually start up the IOMMU
- * Don't start the thing until it can see all the TSB data
*/
- membar(MemIssue);
iommu_reset(is);
/*
@@ -194,12 +219,12 @@ iommu_init(char *name, struct iommu_state *is, int tsbsize, u_int32_t iovabase)
(unsigned long long)is->is_ptsb,
(unsigned long long)(is->is_ptsb + size));
is->is_dvmamap = extent_create(name,
- is->is_dvmabase, is->is_dvmaend - NBPG,
+ is->is_dvmabase, is->is_dvmaend - PAGE_SIZE,
M_DEVBUF, 0, 0, EX_NOWAIT);
}
/*
- * Streaming buffers don't exist on the UltraSPARC IIi; we should have
+ * Streaming buffers don't exist on the UltraSPARC IIi/e; we should have
* detected that already and disabled them. If not, we will notice that
* they aren't there when the STRBUF_EN bit does not remain.
*/
@@ -208,340 +233,464 @@ iommu_reset(struct iommu_state *is)
{
int i;
- /* Need to do 64-bit stores */
-
IOMMUREG_WRITE(is, iommu_tsb, is->is_ptsb);
- /* Enable IOMMU in diagnostic mode */
- IOMMUREG_WRITE(is, iommu_cr, is->is_cr | IOMMUCR_DE);
+ /* Enable IOMMU */
+ IOMMUREG_WRITE(is, iommu_cr, is->is_cr);
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < 2; ++i) {
struct strbuf_ctl *sb = is->is_sb[i];
- if(sb == NULL || sb->sb_flush == NULL)
+ if (sb == NULL)
continue;
- /* Enable diagnostics mode? */
- bus_space_write_8(sb->sb_bustag, sb->sb_sb,
- STRBUFREG(strbuf_ctl), STRBUF_EN);
+ sb->sb_iommu = is;
+ strbuf_reset(sb);
- membar(Lookaside);
- /* No streaming buffers? Disable them */
- if (bus_space_read_8(sb->sb_bustag, sb->sb_sb,
- STRBUFREG(strbuf_ctl)) == 0) {
- sb->sb_flush = NULL;
- } else {
- /*
- * locate the pa of the flush buffer
- */
-
- pmap_extract(pmap_kernel(),
- (vaddr_t)sb->sb_flush, &sb->sb_flushpa);
+ if (sb->sb_flush) {
+ char buf[64];
+ bus_space_render_tag(sb->sb_bustag, buf, sizeof buf);
+ printf("STC%d on %s enabled\n", i, buf);
}
}
}
/*
- * Here are the iommu control routines.
+ * Inititalize one STC.
*/
void
-iommu_enter(struct iommu_state *is, vaddr_t va, int64_t pa, int flags)
+strbuf_reset(struct strbuf_ctl *sb)
+{
+ if(sb->sb_flush == NULL)
+ return;
+
+ bus_space_write_8(sb->sb_bustag, sb->sb_sb,
+ STRBUFREG(strbuf_ctl), STRBUF_EN);
+
+ membar(Lookaside);
+
+ /* No streaming buffers? Disable them */
+ if (bus_space_read_8(sb->sb_bustag, sb->sb_sb,
+ STRBUFREG(strbuf_ctl)) == 0) {
+ sb->sb_flush = NULL;
+ } else {
+ /*
+ * locate the pa of the flush buffer
+ */
+ if (pmap_extract(pmap_kernel(),
+ (vaddr_t)sb->sb_flush, &sb->sb_flushpa) == FALSE)
+ sb->sb_flush = NULL;
+ }
+}
+
+/*
+ * Add an entry to the IOMMU table.
+ *
+ * The entry is marked streaming if an STC was detected and
+ * the BUS_DMA_STREAMING flag is set.
+ */
+void
+iommu_enter(struct iommu_state *is, struct strbuf_ctl *sb, vaddr_t va,
+ paddr_t pa, int flags)
{
int64_t tte;
- int strbuf = flags & BUS_DMA_STREAMING;
+ volatile int64_t *tte_ptr = &is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)];
#ifdef DIAGNOSTIC
- if (va < is->is_dvmabase || va > is->is_dvmaend)
+ if (va < is->is_dvmabase || round_page(va + PAGE_SIZE) >
+ is->is_dvmaend + 1)
panic("iommu_enter: va %#lx not in DVMA space", va);
-#endif
- /* Is the streamcache flush really needed? */
- if (is->is_sb[0] != NULL || is->is_sb[1] != NULL) {
- iommu_strbuf_flush(is, va);
- iommu_strbuf_flush_done(is);
+ tte = *tte_ptr;
+
+ if (tte & IOTTE_V) {
+ printf("Overwriting valid tte entry (dva %lx pa %lx "
+ "&tte %p tte %llx)\n", va, pa, tte_ptr, tte);
+ extent_print(is->is_dvmamap);
+ panic("IOMMU overwrite");
}
- else
- strbuf = 0;
+#endif
tte = MAKEIOTTE(pa, !(flags & BUS_DMA_NOWRITE),
- !(flags & BUS_DMA_NOCACHE), (strbuf));
-#ifdef DEBUG
- tte |= (flags & 0xff000LL) << (4 * 8); /* DEBUG */
-#endif /* DEBUG */
-
+ !(flags & BUS_DMA_NOCACHE), (flags & BUS_DMA_STREAMING));
DPRINTF(IDB_IOMMU, ("Clearing TSB slot %d for va %p\n",
- (int)IOTSBSLOT(va,is->is_tsbsize), (void *)(u_long)va));
- is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)] = tte;
- /* Make is->is_tsb[] change globally visible. Needed? */
- membar(MemIssue);
+ (int)IOTSBSLOT(va,is->is_tsbsize), (void *)(u_long)va));
+
+ *tte_ptr = tte;
+
+ /*
+ * Why bother to flush this va? It should only be relevant for
+ * V ==> V or V ==> non-V transitions. The former is illegal and
+ * the latter is never done here. It is true that this provides
+ * some protection against a misbehaving master using an address
+ * after it should. The IOMMU documentations specifically warns
+ * that the consequences of a simultaneous IOMMU flush and DVMA
+ * access to the same address are undefined. (By that argument,
+ * the STC should probably be flushed as well.) Note that if
+ * a bus master keeps using a memory region after it has been
+ * unmapped, the specific behavior of the IOMMU is likely to
+ * be the least of our worries.
+ */
IOMMUREG_WRITE(is, iommu_flush, va);
DPRINTF(IDB_IOMMU, ("iommu_enter: va %lx pa %lx TSB[%lx]@%p=%lx\n",
- va, (long)pa, (u_long)IOTSBSLOT(va,is->is_tsbsize),
- (void *)(u_long)
- &is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)],
- (u_long)tte));
+ va, (long)pa, (u_long)IOTSBSLOT(va,is->is_tsbsize),
+ (void *)(u_long)&is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)],
+ (u_long)tte));
+}
+
+/*
+ * Remove an entry from the IOMMU table.
+ *
+ * The entry is flushed from the STC if an STC is detected and the TSB
+ * entry has the IOTTE_STREAM flags set. It should be impossible for
+ * the TSB entry to have this flag set without the BUS_DMA_STREAMING
+ * flag, but better to be safe. (The IOMMU will be ignored as long
+ * as an STC entry exists.)
+ */
+void
+iommu_remove(struct iommu_state *is, struct strbuf_ctl *sb, vaddr_t va)
+{
+ int64_t *tte_ptr = &is->is_tsb[IOTSBSLOT(va, is->is_tsbsize)];
+ int64_t tte;
+
+#ifdef DIAGNOSTIC
+ if (trunc_page(va) < is->is_dvmabase || round_page(va) >
+ is->is_dvmaend + 1)
+ panic("iommu_remove: va 0x%lx not in DVMA space", (u_long)va);
+ if (va != trunc_page(va)) {
+ printf("iommu_remove: unaligned va: %lx\n", va);
+ va = trunc_page(va);
+ }
+#endif
+ tte = *tte_ptr;
+
+ DPRINTF(IDB_IOMMU, ("iommu_remove: va %lx TSB[%llx]@%p\n",
+ va, tte, tte_ptr));
+
+#ifdef DIAGNOSTIC
+ if ((tte & IOTTE_V) == 0) {
+ printf("Removing invalid tte entry (dva %lx &tte %p "
+ "tte %llx)\n", va, tte_ptr, tte);
+ extent_print(is->is_dvmamap);
+ panic("IOMMU remove overwrite");
+ }
+#endif
+
+ *tte_ptr = tte & ~IOTTE_V;
+
+ /*
+ * IO operations are strongly ordered WRT each other. It is
+ * unclear how they relate to normal memory accesses.
+ */
+ membar(StoreStore);
+
+ IOMMUREG_WRITE(is, iommu_flush, va);
+
+ if (sb && (tte & IOTTE_STREAM))
+ iommu_strbuf_flush(sb, va);
+
+ /* Should we sync the iommu and stc here? */
}
/*
- * Find the value of a DVMA address (debug routine).
+ * Find the physical address of a DVMA address (debug routine).
*/
paddr_t
iommu_extract(struct iommu_state *is, vaddr_t dva)
{
int64_t tte = 0;
- if (dva >= is->is_dvmabase && dva < is->is_dvmaend)
+ if (dva >= is->is_dvmabase && dva <= is->is_dvmaend)
tte = is->is_tsb[IOTSBSLOT(dva, is->is_tsbsize)];
- if ((tte & IOTTE_V) == 0)
- return ((paddr_t)-1L);
return (tte & IOTTE_PAMASK);
}
/*
- * Fetch a tsb entry with some sanity checking.
+ * Lookup a TSB entry for a given DVMA (debug routine).
*/
int64_t
-iommu_tsb_entry(struct iommu_state *is, vaddr_t dva)
+iommu_lookup_tte(struct iommu_state *is, vaddr_t dva)
{
- int64_t tte;
-
- if (dva < is->is_dvmabase && dva >= is->is_dvmaend)
- panic("invalid dva: %llx", (long long)dva);
-
- membar(Lookaside);
+ int64_t tte = 0;
+
+ if (dva >= is->is_dvmabase && dva <= is->is_dvmaend)
+ tte = is->is_tsb[IOTSBSLOT(dva, is->is_tsbsize)];
- tte = is->is_tsb[IOTSBSLOT(dva,is->is_tsbsize)];
+ return (tte);
+}
- if ((tte & IOTTE_V) == 0)
- panic("iommu_tsb_entry: invalid entry %llx", (long long)dva);
+/*
+ * Lookup a TSB entry at a given physical address (debug routine).
+ */
+int64_t
+iommu_fetch_tte(struct iommu_state *is, paddr_t pa)
+{
+ int64_t tte = 0;
+
+ if (pa >= is->is_ptsb && pa < is->is_ptsb +
+ (PAGE_SIZE << is->is_tsbsize))
+ tte = ldxa(pa, ASI_PHYS_CACHED);
return (tte);
}
/*
- * iommu_remove: removes mappings created by iommu_enter
- *
- * Only demap from IOMMU if flag is set.
- *
- * XXX: this function needs better internal error checking.
+ * Fetch a TSB entry with some sanity checking.
*/
-void
-iommu_remove(struct iommu_state *is, vaddr_t va, size_t len)
+int64_t
+iommu_tsb_entry(struct iommu_state *is, vaddr_t dva)
{
-#ifdef DIAGNOSTIC
- if (va < is->is_dvmabase || va > is->is_dvmaend)
- panic("iommu_remove: va 0x%lx not in DVMA space", (u_long)va);
- if ((long)(va + len) < (long)va)
- panic("iommu_remove: va 0x%lx + len 0x%lx wraps",
- (long) va, (long) len);
- if (len & ~0xfffffff)
- panic("iommu_remove: rediculous len 0x%lx", (u_long)len);
-#endif
+ int64_t tte;
- va = trunc_page(va);
- DPRINTF(IDB_IOMMU, ("iommu_remove: va %lx TSB[%lx]@%p\n",
- va, (u_long)IOTSBSLOT(va,is->is_tsbsize),
- &is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)]));
- while (len > 0) {
- /* NetBSD does *not* flush the streaming buffer (here, anyway) */
- DPRINTF(IDB_IOMMU,
- ("iommu_remove: clearing TSB slot %d for va %p size %lx\n",
- (int)IOTSBSLOT(va,is->is_tsbsize),
- (void *)(u_long)va, (u_long)len));
- if (is->is_sb[0] != NULL || is->is_sb[1] != NULL) {
- DPRINTF(IDB_IOMMU,
- ("iommu_remove: flushing va %p TSB[%lx]@%p=%lx, "
- "%lu bytes left\n",
- (void *)(u_long)va,
- (long)IOTSBSLOT(va,is->is_tsbsize),
- (void *)(u_long)
- &is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)],
- (long)
- (is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)]),
- (u_long)len));
- iommu_strbuf_flush(is, va);
- if (len <= NBPG)
- iommu_strbuf_flush_done(is);
- DPRINTF(IDB_IOMMU,
- ("iommu_remove: flushed va %p TSB[%lx]@%p=%lx, "
- "%lu bytes left\n",
- (void *)(u_long)va,
- (long)IOTSBSLOT(va,is->is_tsbsize),
- (void *)(u_long)
- &is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)],
- (long)
- (is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)]),
- (u_long)len));
- }
+ if (dva < is->is_dvmabase && dva > is->is_dvmaend)
+ panic("invalid dva: %llx", (long long)dva);
- if (len <= NBPG)
- len = 0;
- else
- len -= NBPG;
+ tte = is->is_tsb[IOTSBSLOT(dva,is->is_tsbsize)];
- /* XXX Zero-ing the entry would not require RMW */
- is->is_tsb[IOTSBSLOT(va, is->is_tsbsize)] &= ~IOTTE_V;
- membar(MemIssue); /* Needed? */
- IOMMUREG_WRITE(is, iommu_flush, va);
- va += NBPG;
- }
+ if ((tte & IOTTE_V) == 0)
+ panic("iommu_tsb_entry: invalid entry %lx", dva);
+
+ return (tte);
}
+/*
+ * Initiate and then block until an STC flush synchronization has completed.
+ */
int
-iommu_strbuf_flush_done(struct iommu_state *is)
+iommu_strbuf_flush_done(struct iommu_map_state *ims)
{
+ struct strbuf_ctl *sb = ims->ims_sb;
+ struct strbuf_flush *sf = &ims->ims_flush;
struct timeval cur, flushtimeout;
struct timeval to = { 0, 500000 };
- u_int64_t flush[2];
- struct strbuf_ctl *sb[2];
- int i;
- int present[2];
+ u_int64_t flush;
+ int timeout_started = 0;
- for(i = 0; i < 2; ++i) {
- sb[i] = is->is_sb[i];
- present[i] =
- (sb[i] == NULL || sb[i]->sb_flush == NULL) ? 0 : 1;
+#ifdef DIAGNOSTIC
+ if (sb == NULL) {
+ panic("iommu_strbuf_flush_done: invalid flush buffer");
}
-
- if (!present[0] && !present[1])
- return (0);
+#endif
/*
* Streaming buffer flushes:
*
- * 1 Tell strbuf to flush by storing va to strbuf_pgflush. If
- * we're not on a cache line boundary (64-bits):
+ * 1 Tell strbuf to flush by storing va to strbuf_pgflush.
* 2 Store 0 in flag
* 3 Store pointer to flag in flushsync
* 4 wait till flushsync becomes 0x1
*
- * If it takes more than .5 sec, something
- * went wrong.
+ * If it takes more than .5 sec, something went very, very wrong.
*/
/*
- * If we're reading from the ASI_PHYS_CACHED, then we'll write to
+ * If we're reading from ASI_PHYS_CACHED, then we'll write to
* it too. No need to tempt fate or learn about Si bugs or such.
* FreeBSD just uses normal "volatile" reads/writes...
*/
- for(i = 0; i < 2; ++i)
- if(present[i])
- stxa(sb[i]->sb_flushpa, ASI_PHYS_CACHED, 0);
+ stxa(sf->sbf_flushpa, ASI_PHYS_CACHED, 0);
/*
* Insure any previous strbuf operations are complete and that
- * memory is initialized before the IOMMU uses it
+ * memory is initialized before the IOMMU uses it.
+ * Is this Needed? How are IO and memory operations ordered?
*/
- membar(MemIssue);
+ membar(StoreStore);
- for(i = 0; i < 2; ++i) {
- if (present[i])
- bus_space_write_8(sb[i]->sb_bustag, sb[i]->sb_sb,
- STRBUFREG(strbuf_flushsync), sb[i]->sb_flushpa);
- }
+ bus_space_write_8(sb->sb_bustag, sb->sb_sb,
+ STRBUFREG(strbuf_flushsync), sf->sbf_flushpa);
- microtime(&cur);
- timeradd(&cur, &to, &flushtimeout);
-
DPRINTF(IDB_IOMMU,
- ("iommu_strbuf_flush_done: flush[0] = %lx flush[1] = %lx "
- "pa[0] = %lx pa[1] = %lx now=%lx:%lx until = %lx:%lx\n",
- (long)present[0] ?
- ldxa(sb[0]->sb_flushpa, ASI_PHYS_CACHED) : 1,
- (long)present[1] ?
- ldxa(sb[1]->sb_flushpa, ASI_PHYS_CACHED) : 1,
- (long)sb[0]->sb_flushpa,
- (long)sb[1]->sb_flushpa, cur.tv_sec, cur.tv_usec,
- flushtimeout.tv_sec, flushtimeout.tv_usec));
-
- membar(MemIssue | Lookaside);
-
- /* Bypass non-coherent D$ */
- /* non-coherent...? Huh? */
- for(;;) {
- membar(LoadLoad);
+ ("iommu_strbuf_flush_done: flush = %llx pa = %lx\n",
+ ldxa(sf->sbf_flushpa, ASI_PHYS_CACHED), sf->sbf_flushpa));
- flush[0] =
- present[0] ? ldxa(sb[0]->sb_flushpa, ASI_PHYS_CACHED) : 1;
- flush[1] =
- present[1] ? ldxa(sb[1]->sb_flushpa, ASI_PHYS_CACHED) : 1;
+ membar(StoreLoad | Lookaside);
- if(flush[0] && flush[1])
- break;
+ for(;;) {
+ int i;
+
+ /*
+ * Try to shave a few instruction cycles off the average
+ * latency by only checking the elapsed time every few
+ * fetches.
+ */
+ for (i = 0; i < 1000; ++i) {
+ membar(LoadLoad);
+ /* Bypass non-coherent D$ */
+ /* non-coherent...? Huh? */
+ flush = ldxa(sf->sbf_flushpa, ASI_PHYS_CACHED);
+
+ if (flush) {
+ DPRINTF(IDB_IOMMU,
+ ("iommu_strbuf_flush_done: flushed\n"));
+ return (0);
+ }
+ }
microtime(&cur);
- if (timercmp(&cur, &flushtimeout, >))
- break;
- }
-#ifdef DIAGNOSTIC
- if (flush[0] == 0 || flush[1] == 0) {
- printf("iommu_strbuf_flush_done: flush timeout %p/%llx, "
- "%p/%llx\n",
- present[0] ? sb[0]->sb_flushpa : 0, flush[0],
- present[1] ? sb[1]->sb_flushpa : 0, flush[1]);
- /* panic? */
-#ifdef DDB
-#if 0
- Debugger();
-#endif
-#endif
+ if (timeout_started) {
+ if (timercmp(&cur, &flushtimeout, >))
+ panic("STC timeout at %lx (%lld)",
+ sf->sbf_flushpa, flush);
+ } else {
+ timeradd(&cur, &to, &flushtimeout);
+
+ timeout_started = 1;
+
+ DPRINTF(IDB_IOMMU,
+ ("iommu_strbuf_flush_done: flush = %llx pa = %lx "
+ "now=%lx:%lx until = %lx:%lx\n",
+ ldxa(sf->sbf_flushpa, ASI_PHYS_CACHED),
+ sf->sbf_flushpa, cur.tv_sec, cur.tv_usec,
+ flushtimeout.tv_sec, flushtimeout.tv_usec));
+ }
}
-#endif
- DPRINTF(IDB_IOMMU, ("iommu_strbuf_flush_done: flushed\n"));
- return (flush[0] && flush[1]);
}
/*
* IOMMU DVMA operations, common to SBUS and PCI.
*/
int
+iommu_dvmamap_create(bus_dma_tag_t t, struct iommu_state *is,
+ struct strbuf_ctl *sb, bus_size_t size, int nsegments, bus_size_t maxsegsz,
+ bus_size_t boundary, int flags, bus_dmamap_t *dmamap)
+{
+ int ret;
+ bus_dmamap_t map;
+ struct iommu_map_state *ims;
+
+ ret = bus_dmamap_create(t->_parent, size, nsegments, maxsegsz,
+ boundary, flags, &map);
+
+ if (ret)
+ return (ret);
+
+ ims = iommu_iomap_create(nsegments);
+
+ if (ims == NULL) {
+ bus_dmamap_destroy(t->_parent, map);
+ return (ENOMEM);
+ }
+
+ ims->ims_sb = sb;
+ map->_dm_cookie = ims;
+ *dmamap = map;
+
+ return (0);
+}
+
+void
+iommu_dvmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
+{
+ /*
+ * The specification (man page) requires a loaded
+ * map to be unloaded before it is destroyed.
+ */
+ if (map->dm_nsegs)
+ bus_dmamap_unload(t, map);
+
+ if (map->_dm_cookie)
+ iommu_iomap_destroy(map->_dm_cookie);
+ map->_dm_cookie = NULL;
+
+ bus_dmamap_destroy(t->_parent, map);
+}
+
+/*
+ * Load a contiguous kva buffer into a dmamap. The physical pages are
+ * not assumed to be contiguous. Two passes are made through the buffer
+ * and both call pmap_extract() for the same va->pa translations. It
+ * is possible to run out of pa->dvma mappings; the code should be smart
+ * enough to resize the iomap (when the "flags" permit allocation). It
+ * is trivial to compute the number of entries required (round the length
+ * up to the page size and then divide by the page size)...
+ */
+int
iommu_dvmamap_load(bus_dma_tag_t t, struct iommu_state *is, bus_dmamap_t map,
void *buf, bus_size_t buflen, struct proc *p, int flags)
{
int s;
- int err;
+ int err = 0;
bus_size_t sgsize;
- paddr_t curaddr;
u_long dvmaddr, sgstart, sgend;
bus_size_t align, boundary;
- vaddr_t vaddr = (vaddr_t)buf;
- int seg;
+ struct iommu_map_state *ims = map->_dm_cookie;
pmap_t pmap;
+#ifdef DIAGNOSTIC
+ if (ims == NULL)
+ panic("iommu_dvmamap_load: null map state");
+#endif
+
if (map->dm_nsegs) {
- /* Already in use?? */
+ /*
+ * Is it still in use? _bus_dmamap_load should have taken care
+ * of this.
+ */
#ifdef DIAGNOSTIC
panic("iommu_dvmamap_load: map still in use");
#endif
bus_dmamap_unload(t, map);
}
+
/*
* Make sure that on error condition we return "no valid mappings".
*/
map->dm_nsegs = 0;
- if (buflen > map->_dm_size) {
+ if (buflen < 1 || buflen > map->_dm_size) {
DPRINTF(IDB_BUSDMA,
("iommu_dvmamap_load(): error %d > %d -- "
"map size exceeded!\n", (int)buflen, (int)map->_dm_size));
return (EINVAL);
}
- sgsize = round_page(buflen + ((int)vaddr & PGOFSET));
-
/*
* A boundary presented to bus_dmamem_alloc() takes precedence
* over boundary in the map.
*/
if ((boundary = (map->dm_segs[0]._ds_boundary)) == 0)
boundary = map->_dm_boundary;
- align = max(map->dm_segs[0]._ds_align, NBPG);
+ align = max(map->dm_segs[0]._ds_align, PAGE_SIZE);
+
+ pmap = p ? p->p_vmspace->vm_map.pmap : pmap = pmap_kernel();
+
+ /* Count up the total number of pages we need */
+ iommu_iomap_clear_pages(ims);
+ { /* Scope */
+ bus_addr_t a, aend;
+ bus_addr_t addr = (vaddr_t)buf;
+ int seg_len = buflen;
+
+ aend = round_page(addr + seg_len - 1);
+ for (a = trunc_page(addr); a < aend; a += PAGE_SIZE) {
+ paddr_t pa;
+
+ if (pmap_extract(pmap, a, &pa) == FALSE) {
+ printf("iomap pmap error addr 0x%llx\n", a);
+ iommu_iomap_clear_pages(ims);
+ return (E2BIG);
+ }
+
+ err = iommu_iomap_insert_page(ims, pa);
+ if (err) {
+ printf("iomap insert error: %d for "
+ "va 0x%llx pa 0x%lx "
+ "(buf %p len %lld/%llx)\n",
+ err, a, pa, buf, buflen, buflen);
+ iommu_dvmamap_print_map(t, is, map);
+ iommu_iomap_clear_pages(ims);
+ return (E2BIG);
+ }
+ }
+ }
+ sgsize = ims->ims_map.ipm_pagecnt * PAGE_SIZE;
if (flags & BUS_DMA_24BIT) {
sgstart = max(is->is_dvmamap->ex_start, 0xff000000);
@@ -550,9 +699,10 @@ iommu_dvmamap_load(bus_dma_tag_t t, struct iommu_state *is, bus_dmamap_t map,
sgstart = is->is_dvmamap->ex_start;
sgend = is->is_dvmamap->ex_end;
}
+
/*
* If our segment size is larger than the boundary we need to
- * split the transfer up int little pieces ourselves.
+ * split the transfer up into little pieces ourselves.
*/
s = splhigh();
err = extent_alloc_subregion(is->is_dvmamap, sgstart, sgend,
@@ -565,7 +715,8 @@ iommu_dvmamap_load(bus_dma_tag_t t, struct iommu_state *is, bus_dmamap_t map,
printf("iommu_dvmamap_load(): extent_alloc(%d, %x) failed!\n",
(int)sgsize, flags);
#ifdef DDB
- Debugger();
+ if (iommudebug & IDB_BREAK)
+ Debugger();
#endif
}
#endif
@@ -579,152 +730,104 @@ iommu_dvmamap_load(bus_dma_tag_t t, struct iommu_state *is, bus_dmamap_t map,
map->_dm_dvmastart = dvmaddr;
map->_dm_dvmasize = sgsize;
- /*
- * Now split the DVMA range into segments, not crossing
- * the boundary.
- */
- seg = 0;
- sgstart = dvmaddr + (vaddr & PGOFSET);
- sgend = sgstart + buflen - 1;
- map->dm_segs[seg].ds_addr = sgstart;
- DPRINTF(IDB_INFO, ("iommu_dvmamap_load: boundary %lx boundary-1 %lx "
- "~(boundary-1) %lx\n", boundary, (boundary-1), ~(boundary-1)));
- while ((sgstart & ~(boundary - 1)) != (sgend & ~(boundary - 1))) {
- /* Oops. We crossed a boundary. Split the xfer. */
- DPRINTF(IDB_INFO, ("iommu_dvmamap_load: "
- "seg %d start %lx size %lx\n", seg,
- (long)map->dm_segs[seg].ds_addr,
- map->dm_segs[seg].ds_len));
- map->dm_segs[seg].ds_len =
- boundary - (sgstart & (boundary - 1));
- if (++seg >= map->_dm_segcnt) {
- /* Too many segments. Fail the operation. */
- DPRINTF(IDB_INFO, ("iommu_dvmamap_load: "
- "too many segments %d\n", seg));
- s = splhigh();
- /* How can this fail? And if it does what can we do? */
- err = extent_free(is->is_dvmamap,
- dvmaddr, sgsize, EX_NOWAIT);
- map->_dm_dvmastart = 0;
- map->_dm_dvmasize = 0;
- splx(s);
- return (E2BIG);
- }
- sgstart = roundup(sgstart, boundary);
- map->dm_segs[seg].ds_addr = sgstart;
- }
- map->dm_segs[seg].ds_len = sgend - sgstart + 1;
- DPRINTF(IDB_INFO, ("iommu_dvmamap_load: "
- "seg %d start %lx size %lx\n", seg,
- (long)map->dm_segs[seg].ds_addr, map->dm_segs[seg].ds_len));
- map->dm_nsegs = seg + 1;
map->dm_mapsize = buflen;
- if (p != NULL)
- pmap = p->p_vmspace->vm_map.pmap;
- else
- pmap = pmap_kernel();
-
- for (; buflen > 0; ) {
- /*
- * Get the physical address for this page.
- */
- if (pmap_extract(pmap, (vaddr_t)vaddr, &curaddr) == FALSE) {
- bus_dmamap_unload(t, map);
- return (-1);
- }
+#ifdef DEBUG
+ iommu_dvmamap_validate_map(t, is, map);
+#endif
- /*
- * Compute the segment size, and adjust counts.
- */
- sgsize = NBPG - ((u_long)vaddr & PGOFSET);
- if (buflen < sgsize)
- sgsize = buflen;
+ if (iommu_iomap_load_map(is, ims, dvmaddr, flags))
+ return (E2BIG);
+
+ { /* Scope */
+ bus_addr_t a, aend;
+ bus_addr_t addr = (vaddr_t)buf;
+ int seg_len = buflen;
+
+ aend = round_page(addr + seg_len - 1);
+ for (a = trunc_page(addr); a < aend; a += PAGE_SIZE) {
+ bus_addr_t pgstart;
+ bus_addr_t pgend;
+ paddr_t pa;
+ int pglen;
+
+ /* Yuck... Redoing the same pmap_extract... */
+ if (pmap_extract(pmap, a, &pa) == FALSE) {
+ printf("iomap pmap error addr 0x%llx\n", a);
+ iommu_iomap_clear_pages(ims);
+ return (E2BIG);
+ }
- DPRINTF(IDB_BUSDMA,
- ("iommu_dvmamap_load: map %p loading va %p "
- "dva %lx at pa %lx\n",
- map, (void *)vaddr, (long)dvmaddr,
- (long)(curaddr&~(NBPG-1))));
- iommu_enter(is, trunc_page(dvmaddr), trunc_page(curaddr),
- flags | 0x4000); /* 0x4000? Magic...? */
-
- dvmaddr += PAGE_SIZE;
- vaddr += sgsize;
- buflen -= sgsize;
- }
-#ifdef DIAGNOSTIC
- for (seg = 0; seg < map->dm_nsegs; seg++) {
- if (map->dm_segs[seg].ds_addr < is->is_dvmabase ||
- map->dm_segs[seg].ds_addr > is->is_dvmaend) {
- printf("seg %d dvmaddr %lx out of range %x - %x\n",
- seg, (long)map->dm_segs[seg].ds_addr,
- is->is_dvmabase, is->is_dvmaend);
-#ifdef DDB
- Debugger();
-#endif
+ pgstart = pa | (max(a, addr) & PAGE_MASK);
+ pgend = pa | (min(a + PAGE_SIZE - 1,
+ addr + seg_len - 1) & PAGE_MASK);
+ pglen = pgend - pgstart + 1;
+
+ if (pglen < 1)
+ continue;
+
+ err = iommu_dvmamap_append_range(t, map, pgstart,
+ pglen, flags, boundary);
+ if (err) {
+ printf("iomap load seg page: %d for "
+ "va 0x%llx pa %lx (%llx - %llx) "
+ "for %d/0x%x\n",
+ err, a, pa, pgstart, pgend, pglen, pglen);
+ return (err);
+ }
}
}
-#endif
- return (0);
-}
+#ifdef DIAGNOSTIC
+ iommu_dvmamap_validate_map(t, is, map);
+#endif
-void
-iommu_dvmamap_unload(bus_dma_tag_t t, struct iommu_state *is, bus_dmamap_t map)
-{
- int error, s;
- bus_size_t sgsize;
-
- /* Flush the iommu */
#ifdef DEBUG
- if (!map->_dm_dvmastart) {
- printf("iommu_dvmamap_unload: No dvmastart is zero\n");
+ if (err)
+ printf("**** iommu_dvmamap_load failed with error %d\n",
+ err);
+
+ if (err || (iommudebug & IDB_PRINT_MAP)) {
+ iommu_dvmamap_print_map(t, is, map);
#ifdef DDB
- Debugger();
+ if (iommudebug & IDB_BREAK)
+ Debugger();
#endif
}
#endif
- iommu_remove(is, map->_dm_dvmastart, map->_dm_dvmasize);
-
- /* Flush the caches */
- bus_dmamap_unload(t->_parent, map);
-
- /* Mark the mappings as invalid. */
- map->dm_mapsize = 0;
- map->dm_nsegs = 0;
-
- sgsize = map->_dm_dvmasize;
- s = splhigh();
- error = extent_free(is->is_dvmamap, map->_dm_dvmastart,
- map->_dm_dvmasize, EX_NOWAIT);
- map->_dm_dvmastart = 0;
- map->_dm_dvmasize = 0;
- splx(s);
- if (error != 0)
- printf("warning: %qd of DVMA space lost\n", (long long)sgsize);
-
- /* Clear the map */
+ return (err);
}
-
+/*
+ * Load a dvmamap from an array of segs or an mlist (if the first
+ * "segs" entry's mlist is non-null). It calls iommu_dvmamap_load_segs()
+ * or iommu_dvmamap_load_mlist() for part of the 2nd pass through the
+ * mapping. This is ugly. A better solution would probably be to have
+ * function pointers for implementing the traversal. That way, there
+ * could be one core load routine for each of the three required algorithms
+ * (buffer, seg, and mlist). That would also mean that the traversal
+ * algorithm would then only need one implementation for each algorithm
+ * instead of two (one for populating the iomap and one for populating
+ * the dvma map).
+ */
int
iommu_dvmamap_load_raw(bus_dma_tag_t t, struct iommu_state *is,
bus_dmamap_t map, bus_dma_segment_t *segs, int nsegs, int flags,
bus_size_t size)
{
- struct vm_page *m;
- int i, j, s;
+ int i, s;
int left;
- int err;
+ int err = 0;
bus_size_t sgsize;
- paddr_t pa;
bus_size_t boundary, align;
u_long dvmaddr, sgstart, sgend;
- struct pglist *mlist;
- int pagesz = PAGE_SIZE;
- int npg = 0; /* DEBUG */
+ struct iommu_map_state *ims = map->_dm_cookie;
+
+#ifdef DIAGNOSTIC
+ if (ims == NULL)
+ panic("iommu_dvmamap_load_raw: null map state");
+#endif
if (map->dm_nsegs) {
/* Already in use?? */
@@ -741,24 +844,55 @@ iommu_dvmamap_load_raw(bus_dma_tag_t t, struct iommu_state *is,
if ((boundary = segs[0]._ds_boundary) == 0)
boundary = map->_dm_boundary;
- align = max(segs[0]._ds_align, pagesz);
+ align = max(segs[0]._ds_align, PAGE_SIZE);
/*
* Make sure that on error condition we return "no valid mappings".
*/
map->dm_nsegs = 0;
- /* Count up the total number of pages we need */
- pa = segs[0].ds_addr;
- sgsize = 0;
- left = size;
- for (i = 0; left && i < nsegs; i++) {
- if (round_page(pa) != round_page(segs[i].ds_addr))
- sgsize = round_page(sgsize);
- sgsize += min(left, segs[i].ds_len);
- left -= segs[i].ds_len;
- pa = segs[i].ds_addr + segs[i].ds_len;
+
+ iommu_iomap_clear_pages(ims);
+ if (segs[0]._ds_mlist) {
+ struct pglist *mlist = segs[0]._ds_mlist;
+ struct vm_page *m;
+ for (m = TAILQ_FIRST(mlist); m != NULL;
+ m = TAILQ_NEXT(m,pageq)) {
+ err = iommu_iomap_insert_page(ims, VM_PAGE_TO_PHYS(m));
+
+ if(err) {
+ printf("iomap insert error: %d for "
+ "pa 0x%lx\n", err, VM_PAGE_TO_PHYS(m));
+ iommu_iomap_clear_pages(ims);
+ return (E2BIG);
+ }
+ }
+ } else {
+ /* Count up the total number of pages we need */
+ for (i = 0, left = size; left > 0 && i < nsegs; i++) {
+ bus_addr_t a, aend;
+ bus_size_t len = segs[i].ds_len;
+ bus_addr_t addr = segs[i].ds_addr;
+ int seg_len = min(left, len);
+
+ if (len < 1)
+ continue;
+
+ aend = round_page(addr + seg_len - 1);
+ for (a = trunc_page(addr); a < aend; a += PAGE_SIZE) {
+
+ err = iommu_iomap_insert_page(ims, a);
+ if (err) {
+ printf("iomap insert error: %d for "
+ "pa 0x%llx\n", err, a);
+ iommu_iomap_clear_pages(ims);
+ return (E2BIG);
+ }
+ }
+
+ left -= seg_len;
+ }
}
- sgsize = round_page(sgsize);
+ sgsize = ims->ims_map.ipm_pagecnt * PAGE_SIZE;
if (flags & BUS_DMA_24BIT) {
sgstart = max(is->is_dvmamap->ex_start, 0xff000000);
@@ -767,11 +901,12 @@ iommu_dvmamap_load_raw(bus_dma_tag_t t, struct iommu_state *is,
sgstart = is->is_dvmamap->ex_start;
sgend = is->is_dvmamap->ex_end;
}
- s = splhigh();
+
/*
* If our segment size is larger than the boundary we need to
* split the transfer up into little pieces ourselves.
*/
+ s = splhigh();
err = extent_alloc_subregion(is->is_dvmamap, sgstart, sgend,
sgsize, align, 0, (sgsize > boundary) ? 0 : boundary,
EX_NOWAIT | EX_BOUNDZERO, (u_long *)&dvmaddr);
@@ -785,7 +920,8 @@ iommu_dvmamap_load_raw(bus_dma_tag_t t, struct iommu_state *is,
printf("iommu_dvmamap_load_raw(): extent_alloc(%d, %x) "
"failed!\n", (int)sgsize, flags);
#ifdef DDB
- Debugger();
+ if (iommudebug & IDB_BREAK)
+ Debugger();
#else
panic("");
#endif
@@ -798,224 +934,516 @@ iommu_dvmamap_load_raw(bus_dma_tag_t t, struct iommu_state *is,
map->_dm_dvmastart = dvmaddr;
map->_dm_dvmasize = sgsize;
- if ((mlist = segs[0]._ds_mlist) == NULL) {
- u_long prev_va = NULL;
- paddr_t prev_pa = 0;
- int end = 0, offset;
+ map->dm_mapsize = size;
- /*
- * This segs is made up of individual physical
- * segments, probably by _bus_dmamap_load_uio() or
- * _bus_dmamap_load_mbuf(). Ignore the mlist and
- * load each one individually.
- */
- map->dm_mapsize = size;
-
- j = 0;
- for (i = 0; i < nsegs; i++) {
- pa = segs[i].ds_addr;
- offset = (pa & PGOFSET);
- pa = trunc_page(pa);
- dvmaddr = trunc_page(dvmaddr);
- left = min(size, segs[i].ds_len);
-
- DPRINTF(IDB_INFO, ("iommu_dvamap_load_raw: converting "
- "physseg %d start %lx size %lx\n", i,
- (long)segs[i].ds_addr, segs[i].ds_len));
-
- if ((pa == prev_pa) &&
- ((offset != 0) || (end != offset))) {
- /* We can re-use this mapping */
#ifdef DEBUG
-if (iommudebug & 0x10) printf("reusing dva %lx prev %lx pa %lx prev %lx\n",
- dvmaddr, prev_va, pa, prev_pa);
+ iommu_dvmamap_validate_map(t, is, map);
#endif
- dvmaddr = prev_va;
+
+ if (iommu_iomap_load_map(is, ims, dvmaddr, flags))
+ return (E2BIG);
+
+ if (segs[0]._ds_mlist)
+ err = iommu_dvmamap_load_mlist(t, is, map, segs[0]._ds_mlist,
+ flags, size, boundary);
+ else
+ err = iommu_dvmamap_load_seg(t, is, map, segs, nsegs,
+ flags, size, boundary);
+
+ if (err)
+ iommu_iomap_unload_map(is, ims);
+
+#ifdef DIAGNOSTIC
+ /* The map should be valid even if the load failed */
+ if (iommu_dvmamap_validate_map(t, is, map)) {
+ printf("load size %lld/0x%llx\n", size, size);
+ if (segs[0]._ds_mlist)
+ printf("mlist %p\n", segs[0]._ds_mlist);
+ else {
+ long tot_len = 0;
+ long clip_len = 0;
+ printf("segs %p nsegs %d\n", segs, nsegs);
+
+ left = size;
+ for(i = 0; i < nsegs; i++) {
+ bus_size_t len = segs[i].ds_len;
+ bus_addr_t addr = segs[i].ds_addr;
+ int seg_len = min(left, len);
+
+ printf("addr %llx len %lld/0x%llx seg_len "
+ "%d/0x%x left %d/0x%x\n", addr, len, len,
+ seg_len, seg_len, left, left);
+
+ left -= seg_len;
+
+ clip_len += seg_len;
+ tot_len += segs[i].ds_len;
}
- sgstart = dvmaddr + offset;
- sgend = sgstart + left - 1;
+ printf("total length %ld/0x%lx total seg. "
+ "length %ld/0x%lx\n", tot_len, tot_len, clip_len,
+ clip_len);
+ }
+
+ if (err == 0)
+ err = 1;
+ }
+
+#endif
- /* Are the segments virtually adjacent? */
- if ((j > 0) && (end == offset) &&
- ((offset = 0) || (pa == prev_pa))) {
- /* Just append to the previous segment. */
#ifdef DEBUG
-if (iommudebug & 0x10) {
-printf("appending offset %x pa %lx, prev %lx dva %lx prev %lx\n",
- offset, pa, prev_pa, dvmaddr, prev_va);
-}
+ if (err)
+ printf("**** iommu_dvmamap_load_raw failed with error %d\n",
+ err);
+
+ if (err || (iommudebug & IDB_PRINT_MAP)) {
+ iommu_dvmamap_print_map(t, is, map);
+#ifdef DDB
+ if (iommudebug & IDB_BREAK)
+ Debugger();
+#endif
+ }
#endif
- map->dm_segs[--j].ds_len += left;
- DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: "
- "appending seg %d start %lx size %lx\n", j,
- (long)map->dm_segs[j].ds_addr,
- map->dm_segs[j].ds_len));
- } else {
- if (j >= map->_dm_segcnt) {
- iommu_dvmamap_unload(t, is, map);
- return (E2BIG);
- }
- map->dm_segs[j].ds_addr = sgstart;
- map->dm_segs[j].ds_len = left;
- DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: "
- "seg %d start %lx size %lx\n", j,
- (long)map->dm_segs[j].ds_addr,
- map->dm_segs[j].ds_len));
- }
- end = (offset + left) & PGOFSET;
-
- /* Check for boundary issues */
- while ((sgstart & ~(boundary - 1)) !=
- (sgend & ~(boundary - 1))) {
- /* Need a new segment. */
- map->dm_segs[j].ds_len =
- boundary - (sgstart & (boundary - 1));
- DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: "
- "seg %d start %lx size %lx\n", j,
- (long)map->dm_segs[j].ds_addr,
- (long)map->dm_segs[j].ds_len));
- if (++j >= map->_dm_segcnt) {
- iommu_dvmamap_unload(t, is, map);
- return (E2BIG);
- }
- sgstart = roundup(sgstart, boundary);
- map->dm_segs[j].ds_addr = sgstart;
- map->dm_segs[j].ds_len = sgend - sgstart + 1;
- }
+ return (err);
+}
+
+/*
+ * Insert a range of addresses into a loaded map respecting the specified
+ * boundary and alignment restrictions. The range is specified by its
+ * physical address and length. The range cannot cross a page boundary.
+ * This code (along with most of the rest of the function in this file)
+ * assumes that the IOMMU page size is equal to PAGE_SIZE.
+ */
+int
+iommu_dvmamap_append_range(bus_dma_tag_t t, bus_dmamap_t map, paddr_t pa,
+ bus_size_t length, int flags, bus_size_t boundary)
+{
+ struct iommu_map_state *ims = map->_dm_cookie;
+ bus_addr_t sgstart, sgend, bd_mask;
+ bus_dma_segment_t *seg = NULL;
+ int i = map->dm_nsegs;
- if (sgsize == 0)
- panic("iommu_dmamap_load_raw: size botch");
-
- /* Now map a series of pages. */
- while (dvmaddr <= sgend) {
- DPRINTF(IDB_BUSDMA,
- ("iommu_dvamap_load_raw: map %p "
- "loading va %lx at pa %lx\n",
- map, (long)dvmaddr,
- (long)(pa)));
- /* Enter if if we haven't before. */
- if (prev_va != dvmaddr)
#ifdef DEBUG
-{ if (iommudebug & 0x10) printf("seg %d:5d entering dvma %lx, prev %lx pa %lx\n", i, j, dvmaddr, prev_va, pa);
+ if (ims == NULL)
+ panic("iommu_dvmamap_append_range: null map state");
+#endif
+
+ sgstart = iommu_iomap_translate(ims, pa);
+ sgend = sgstart + length - 1;
+
+#ifdef DIAGNOSTIC
+ if (sgstart == NULL || sgstart >= sgend) {
+ printf("append range invalid mapping for %lx "
+ "(0x%llx - 0x%llx)\n", pa, sgstart, sgend);
+ map->dm_nsegs = 0;
+ return (EINVAL);
+ }
#endif
- iommu_enter(is, prev_va = dvmaddr,
- prev_pa = pa,
- flags | (++npg << 12));
+
#ifdef DEBUG
-} else if (iommudebug & 0x10) printf("seg %d:%d skipping dvma %lx, prev %lx\n", i, j, dvmaddr, prev_va);
+ if (trunc_page(sgstart) != trunc_page(sgend)) {
+ printf("append range crossing page boundary! "
+ "pa %lx length %lld/0x%llx sgstart %llx sgend %llx\n",
+ pa, length, length, sgstart, sgend);
+ }
#endif
- dvmaddr += pagesz;
- pa += pagesz;
- }
+ /*
+ * We will attempt to merge this range with the previous entry
+ * (if there is one).
+ */
+ if (i > 0) {
+ seg = &map->dm_segs[i - 1];
+ if (sgstart == seg->ds_addr + seg->ds_len) {
+ length += seg->ds_len;
+ sgstart = seg->ds_addr;
+ sgend = sgstart + length - 1;
+ } else
+ seg = NULL;
+ }
- size -= left;
- ++j;
+ if (seg == NULL) {
+ seg = &map->dm_segs[i];
+ if (++i > map->_dm_segcnt) {
+ printf("append range, out of segments (%d)\n", i);
+ iommu_dvmamap_print_map(t, NULL, map);
+ map->dm_nsegs = 0;
+ return (ENOMEM);
}
+ }
- map->dm_nsegs = j;
-#ifdef DIAGNOSTIC
- { /* Scope */
- int seg;
- for (seg = 0; seg < map->dm_nsegs; seg++) {
- if (map->dm_segs[seg].ds_addr <
- is->is_dvmabase ||
- map->dm_segs[seg].ds_addr >
- is->is_dvmaend) {
- printf("seg %d dvmaddr %lx out of "
- "range %x - %x\n",
- seg,
- (long)map->dm_segs[seg].ds_addr,
- is->is_dvmabase, is->is_dvmaend);
-#ifdef DDB
- Debugger();
-#endif
- }
+ /*
+ * At this point, "i" is the index of the *next* bus_dma_segment_t
+ * (the segment count, aka map->dm_nsegs) and "seg" points to the
+ * *current* entry. "length", "sgstart", and "sgend" reflect what
+ * we intend to put in "*seg". No assumptions should be made about
+ * the contents of "*seg". Only "boundary" issue can change this
+ * and "boundary" is often zero, so explicitly test for that case
+ * (the test is strictly an optimization).
+ */
+ if (boundary != 0) {
+ bd_mask = ~(boundary - 1);
+
+ while ((sgstart & bd_mask) != (sgend & bd_mask)) {
+ /*
+ * We are crossing a boundary so fill in the current
+ * segment with as much as possible, then grab a new
+ * one.
+ */
+
+ seg->ds_addr = sgstart;
+ seg->ds_len = boundary - (sgstart & bd_mask);
+
+ sgstart += seg->ds_len; /* sgend stays the same */
+ length -= seg->ds_len;
+
+ seg = &map->dm_segs[i];
+ if (++i > map->_dm_segcnt) {
+ printf("append range, out of segments\n");
+ iommu_dvmamap_print_map(t, NULL, map);
+ map->dm_nsegs = 0;
+ return (E2BIG);
}
}
-#endif
- return (0);
}
+
+ seg->ds_addr = sgstart;
+ seg->ds_len = length;
+ map->dm_nsegs = i;
+
+ return (0);
+}
+
+/*
+ * Populate the iomap from a bus_dma_segment_t array. See note for
+ * iommu_dvmamap_load() * regarding page entry exhaustion of the iomap.
+ * This is less of a problem for load_seg, as the number of pages
+ * is usually similar to the number of segments (nsegs).
+ */
+int
+iommu_dvmamap_load_seg(bus_dma_tag_t t, struct iommu_state *is,
+ bus_dmamap_t map, bus_dma_segment_t *segs, int nsegs, int flags,
+ bus_size_t size, bus_size_t boundary)
+{
+ int i;
+ int left;
+ int seg;
+
/*
- * This was allocated with bus_dmamem_alloc.
- * The pages are on an `mlist'.
+ * This segs is made up of individual physical
+ * segments, probably by _bus_dmamap_load_uio() or
+ * _bus_dmamap_load_mbuf(). Ignore the mlist and
+ * load each one individually.
*/
- map->dm_mapsize = size;
- i = 0;
- sgstart = dvmaddr;
- sgend = sgstart + size - 1;
- map->dm_segs[i].ds_addr = sgstart;
- while ((sgstart & ~(boundary - 1)) != (sgend & ~(boundary - 1))) {
- /* Oops. We crossed a boundary. Split the xfer. */
- map->dm_segs[i].ds_len = boundary - (sgstart & (boundary - 1));
- DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: "
- "seg %d start %lx size %lx\n", i,
- (long)map->dm_segs[i].ds_addr,
- map->dm_segs[i].ds_len));
- if (++i >= map->_dm_segcnt) {
- /* Too many segments. Fail the operation. */
- s = splhigh();
- /* How can this fail? And if it does what can we do? */
- err = extent_free(is->is_dvmamap,
- dvmaddr, sgsize, EX_NOWAIT);
- map->_dm_dvmastart = 0;
- map->_dm_dvmasize = 0;
- splx(s);
- return (E2BIG);
+
+ /*
+ * Keep in mind that each segment could span
+ * multiple pages and that these are not always
+ * adjacent. The code is no longer adding dvma
+ * aliases to the IOMMU. The STC will not cross
+ * page boundaries anyway and a IOMMU table walk
+ * vs. what may be a streamed PCI DMA to a ring
+ * descriptor is probably a wash. It eases TLB
+ * pressure and in the worst possible case, it is
+ * only as bad a non-IOMMUed architecture. More
+ * importantly, the code is not quite as hairy.
+ * (It's bad enough as it is.)
+ */
+ left = size;
+ seg = 0;
+ for (i = 0; left > 0 && i < nsegs; i++) {
+ bus_addr_t a, aend;
+ bus_size_t len = segs[i].ds_len;
+ bus_addr_t addr = segs[i].ds_addr;
+ int seg_len = min(left, len);
+
+ if (len < 1)
+ continue;
+
+ aend = addr + seg_len - 1;
+ for (a = trunc_page(addr); a < round_page(aend);
+ a += PAGE_SIZE) {
+ bus_addr_t pgstart;
+ bus_addr_t pgend;
+ int pglen;
+ int err;
+
+ pgstart = max(a, addr);
+ pgend = min(a + PAGE_SIZE - 1, addr + seg_len - 1);
+ pglen = pgend - pgstart + 1;
+
+ if (pglen < 1)
+ continue;
+
+ err = iommu_dvmamap_append_range(t, map, pgstart,
+ pglen, flags, boundary);
+ if (err) {
+ printf("iomap load seg page: %d for "
+ "pa 0x%llx (%llx - %llx for %d/%x\n",
+ err, a, pgstart, pgend, pglen, pglen);
+ return (err);
+ }
+
}
- sgstart = roundup(sgstart, boundary);
- map->dm_segs[i].ds_addr = sgstart;
+
+ left -= seg_len;
}
- DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: "
- "seg %d start %lx size %lx\n", i,
- (long)map->dm_segs[i].ds_addr, map->dm_segs[i].ds_len));
- map->dm_segs[i].ds_len = sgend - sgstart + 1;
+ return (0);
+}
+
+/*
+ * Populate the iomap from an mlist. See note for iommu_dvmamap_load()
+ * regarding page entry exhaustion of the iomap.
+ */
+int
+iommu_dvmamap_load_mlist(bus_dma_tag_t t, struct iommu_state *is,
+ bus_dmamap_t map, struct pglist *mlist, int flags,
+ bus_size_t size, bus_size_t boundary)
+{
+ struct vm_page *m;
+ paddr_t pa;
+ int err;
+ /*
+ * This was allocated with bus_dmamem_alloc.
+ * The pages are on an `mlist'.
+ */
for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) {
- if (sgsize == 0)
- panic("iommu_dmamap_load_raw: size botch");
pa = VM_PAGE_TO_PHYS(m);
- DPRINTF(IDB_BUSDMA,
- ("iommu_dvmamap_load_raw: map %p loading va %lx at "
- "pa %lx\n",
- map, (long)dvmaddr, (long)(pa)));
- iommu_enter(is, dvmaddr, pa, flags | 0x8000); /* Magic 0x8000? */
-
- dvmaddr += pagesz;
- sgsize -= pagesz;
+ err = iommu_dvmamap_append_range(t, map, pa, PAGE_SIZE,
+ flags, boundary);
+ if (err) {
+ printf("iomap load seg page: %d for pa 0x%lx "
+ "(%lx - %lx for %d/%x\n", err, pa, pa,
+ pa + PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ return (err);
+ }
}
- map->dm_mapsize = size;
- map->dm_nsegs = i + 1;
-#ifdef DIAGNOSTIC
- {
- int seg;
- for (seg = 0; seg < map->dm_nsegs; seg++) {
- if (map->dm_segs[seg].ds_addr < is->is_dvmabase ||
- map->dm_segs[seg].ds_addr > is->is_dvmaend) {
- printf("seg %d dvmaddr %lx out of range %x "
- "- %x\n",
- seg, (long)map->dm_segs[seg].ds_addr,
- is->is_dvmabase, is->is_dvmaend);
+
+ return (0);
+}
+
+/*
+ * Unload a dvmamap.
+ */
+void
+iommu_dvmamap_unload(bus_dma_tag_t t, struct iommu_state *is, bus_dmamap_t map)
+{
+ struct iommu_map_state *ims = map->_dm_cookie;
+ bus_addr_t dvmaddr = map->_dm_dvmastart;
+ bus_size_t sgsize = map->_dm_dvmasize;
+ int error, s;
+
+ /* Flush the iommu */
+#ifdef DEBUG
+ if (dvmaddr == 0) {
+ printf("iommu_dvmamap_unload: No dvmastart\n");
#ifdef DDB
- Debugger();
+ if (iommudebug & IDB_BREAK)
+ Debugger();
#endif
- }
+ return;
+ }
+ iommu_dvmamap_validate_map(t, is, map);
+
+ if (iommudebug & IDB_PRINT_MAP)
+ iommu_dvmamap_print_map(t, is, map);
+#endif /* DEBUG */
+
+ /* Remove the IOMMU entries */
+ iommu_iomap_unload_map(is, ims);
+
+ /* Clear the iomap */
+ iommu_iomap_clear_pages(ims);
+
+ bus_dmamap_unload(t->_parent, map);
+
+ /* Mark the mappings as invalid. */
+ map->dm_mapsize = 0;
+ map->dm_nsegs = 0;
+
+ s = splhigh();
+ error = extent_free(is->is_dvmamap, dvmaddr,
+ sgsize, EX_NOWAIT);
+ map->_dm_dvmastart = 0;
+ map->_dm_dvmasize = 0;
+ splx(s);
+ if (error != 0)
+ printf("warning: %qd of DVMA space lost\n", sgsize);
+}
+
+/*
+ * Perform internal consistency checking on a dvmamap.
+ */
+int
+iommu_dvmamap_validate_map(bus_dma_tag_t t, struct iommu_state *is,
+ bus_dmamap_t map)
+{
+ int err = 0;
+ int seg;
+
+ if (trunc_page(map->_dm_dvmastart) != map->_dm_dvmastart) {
+ printf("**** dvmastart address not page aligned: %llx",
+ map->_dm_dvmastart);
+ err = 1;
+ }
+ if (trunc_page(map->_dm_dvmasize) != map->_dm_dvmasize) {
+ printf("**** dvmasize not a multiple of page size: %llx",
+ map->_dm_dvmasize);
+ err = 1;
+ }
+ if (map->_dm_dvmastart < is->is_dvmabase ||
+ round_page(map->_dm_dvmastart + map->_dm_dvmasize) >
+ is->is_dvmaend + 1) {
+ printf("dvmaddr %llx len %llx out of range %x - %x\n",
+ map->_dm_dvmastart, map->_dm_dvmasize,
+ is->is_dvmabase, is->is_dvmaend);
+ err = 1;
+ }
+ for (seg = 0; seg < map->dm_nsegs; seg++) {
+ if (map->dm_segs[seg].ds_addr == 0 ||
+ map->dm_segs[seg].ds_len == 0) {
+ printf("seg %d null segment dvmaddr %llx len %llx for "
+ "range %llx len %llx\n",
+ seg,
+ map->dm_segs[seg].ds_addr,
+ map->dm_segs[seg].ds_len,
+ map->_dm_dvmastart, map->_dm_dvmasize);
+ err = 1;
+ } else if (map->dm_segs[seg].ds_addr < map->_dm_dvmastart ||
+ round_page(map->dm_segs[seg].ds_addr +
+ map->dm_segs[seg].ds_len) >
+ map->_dm_dvmastart + map->_dm_dvmasize) {
+ printf("seg %d dvmaddr %llx len %llx out of "
+ "range %llx len %llx\n",
+ seg,
+ map->dm_segs[seg].ds_addr,
+ map->dm_segs[seg].ds_len,
+ map->_dm_dvmastart, map->_dm_dvmasize);
+ err = 1;
}
- }
+ }
+
+ if (err) {
+ iommu_dvmamap_print_map(t, is, map);
+#if defined(DDB) && defined(DEBUG)
+ if (iommudebug & IDB_BREAK)
+ Debugger();
#endif
- return (0);
+ }
+
+ return (err);
+}
+
+void
+iommu_dvmamap_print_map(bus_dma_tag_t t, struct iommu_state *is,
+ bus_dmamap_t map)
+{
+ int seg, i;
+ long full_len, source_len;
+ struct mbuf *m;
+
+ printf("DVMA %x for %x, mapping %p: dvstart %llx dvsize %llx "
+ "size %lld/%llx maxsegsz %llx boundary %llx segcnt %d "
+ "flags %x type %d source %p "
+ "cookie %p mapsize %llx nsegs %d\n",
+ is ? is->is_dvmabase : 0, is ? is->is_dvmaend : 0, map,
+ map->_dm_dvmastart, map->_dm_dvmasize,
+ map->_dm_size, map->_dm_size, map->_dm_maxsegsz, map->_dm_boundary,
+ map->_dm_segcnt, map->_dm_flags, map->_dm_type,
+ map->_dm_source, map->_dm_cookie, map->dm_mapsize,
+ map->dm_nsegs);
+
+ full_len = 0;
+ for (seg = 0; seg < map->dm_nsegs; seg++) {
+ printf("seg %d dvmaddr %llx pa %lx len %llx (tte %llx)\n",
+ seg, map->dm_segs[seg].ds_addr,
+ is ? iommu_extract(is, map->dm_segs[seg].ds_addr) : 0,
+ map->dm_segs[seg].ds_len,
+ is ? iommu_lookup_tte(is, map->dm_segs[seg].ds_addr) : 0);
+ full_len += map->dm_segs[seg].ds_len;
+ }
+ printf("total length = %ld/0x%lx\n", full_len, full_len);
+
+ if (map->_dm_source) switch (map->_dm_type) {
+ case _DM_TYPE_MBUF:
+ m = map->_dm_source;
+ if (m->m_flags & M_PKTHDR)
+ printf("source PKTHDR mbuf (%p) hdr len = %d/0x%x:\n",
+ m, m->m_pkthdr.len, m->m_pkthdr.len);
+ else
+ printf("source mbuf (%p):\n", m);
+
+ source_len = 0;
+ for ( ; m; m = m->m_next) {
+ vaddr_t vaddr = mtod(m, vaddr_t);
+ long len = m->m_len;
+ paddr_t pa;
+
+ if (pmap_extract(pmap_kernel(), vaddr, &pa))
+ printf("kva %lx pa %lx len %ld/0x%lx\n",
+ vaddr, pa, len, len);
+ else
+ printf("kva %lx pa <invalid> len %ld/0x%lx\n",
+ vaddr, len, len);
+
+ source_len += len;
+ }
+
+ if (full_len != source_len)
+ printf("mbuf length %ld/0x%lx is %s than mapping "
+ "length %ld/0x%lx\n", source_len, source_len,
+ (source_len > full_len) ? "greater" : "less",
+ full_len, full_len);
+ else
+ printf("mbuf length %ld/0x%lx\n", source_len,
+ source_len);
+ break;
+ case _DM_TYPE_LOAD:
+ case _DM_TYPE_SEGS:
+ case _DM_TYPE_UIO:
+ default:
+ break;
+ }
+
+ if (map->_dm_cookie) {
+ struct iommu_map_state *ims = map->_dm_cookie;
+ struct iommu_page_map *ipm = &ims->ims_map;
+
+ printf("page map (%p) of size %d with %d entries\n",
+ ipm, ipm->ipm_maxpage, ipm->ipm_pagecnt);
+ for (i = 0; i < ipm->ipm_pagecnt; ++i) {
+ struct iommu_page_entry *e = &ipm->ipm_map[i];
+ printf("%d: vmaddr 0x%lx pa 0x%lx\n", i,
+ e->ipe_va, e->ipe_pa);
+ }
+ } else
+ printf("iommu map state (cookie) is NULL\n");
}
void
iommu_dvmamap_sync(bus_dma_tag_t t, struct iommu_state *is, bus_dmamap_t map,
bus_addr_t offset, bus_size_t len, int ops)
{
+ struct iommu_map_state *ims = map->_dm_cookie;
+ struct strbuf_ctl *sb;
bus_size_t count;
int i, needsflush = 0;
- if (is->is_sb[0] == NULL && is->is_sb[1] == NULL)
+#ifdef DIAGNOSTIC
+ if (ims == NULL)
+ panic("iommu_dvmamap_sync: null map state");
+#endif
+ sb = ims->ims_sb;
+
+ if ((ims->ims_flags & IOMMU_MAP_STREAM) == 0 || (len == 0))
+ return;
+
+ if (ops & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_POSTWRITE))
+ return;
+
+ if ((ops & (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE)) == 0)
return;
for (i = 0; i < map->dm_nsegs; i++) {
@@ -1029,8 +1457,8 @@ iommu_dvmamap_sync(bus_dma_tag_t t, struct iommu_state *is, bus_dmamap_t map,
for (; len > 0 && i < map->dm_nsegs; i++) {
count = min(map->dm_segs[i].ds_len - offset, len);
- if(iommu_dvmamap_sync_seg(t, is, &map->dm_segs[i],
- offset, count, ops))
+ if (count > 0 && iommu_dvmamap_sync_range(sb,
+ map->dm_segs[i].ds_addr + offset, count))
needsflush = 1;
len -= count;
}
@@ -1039,32 +1467,32 @@ iommu_dvmamap_sync(bus_dma_tag_t t, struct iommu_state *is, bus_dmamap_t map,
panic("iommu_dvmamap_sync: leftover %lu", len);
if (needsflush)
- iommu_strbuf_flush_done(is);
+ iommu_strbuf_flush_done(ims);
}
/*
* Flush an individual dma segment, returns non-zero if the streaming buffers
* need flushing afterwards.
*/
-
int
-iommu_dvmamap_sync_range(struct iommu_state *is, vaddr_t va, bus_size_t len)
+iommu_dvmamap_sync_range(struct strbuf_ctl *sb, vaddr_t va, bus_size_t len)
{
vaddr_t vaend;
-
- if (is->is_sb[0] == NULL && is->is_sb[1] == NULL)
- return (0);
-
#ifdef DIAGNOSTIC
+ struct iommu_state *is = sb->sb_iommu;
+
if (va < is->is_dvmabase || va >= is->is_dvmaend)
panic("invalid va: %llx", (long long)va);
-#endif
- if ((is->is_tsb[IOTSBSLOT(va, is->is_tsbsize)] & IOTTE_STREAM) == 0)
+ if ((is->is_tsb[IOTSBSLOT(va, is->is_tsbsize)] & IOTTE_STREAM) == 0) {
+ printf("iommu_dvmamap_sync_range: attempting to flush "
+ "non-streaming entry\n");
return (0);
+ }
+#endif
- vaend = (va + len + PGOFSET) & ~PGOFSET;
- va &= ~PGOFSET;
+ vaend = (va + len + PAGE_MASK) & ~PAGE_MASK;
+ va &= ~PAGE_MASK;
#ifdef DIAGNOSTIC
if (va < is->is_dvmabase || vaend >= is->is_dvmaend)
@@ -1074,43 +1502,17 @@ iommu_dvmamap_sync_range(struct iommu_state *is, vaddr_t va, bus_size_t len)
is->is_dvmaend);
#endif
- for( ; va <= vaend; va += NBPG) {
+ for ( ; va <= vaend; va += PAGE_SIZE) {
DPRINTF(IDB_BUSDMA,
("iommu_dvmamap_sync_range: flushing va %p\n",
(void *)(u_long)va));
- iommu_strbuf_flush(is, va);
+ iommu_strbuf_flush(sb, va);
}
return (1);
}
int
-iommu_dvmamap_sync_seg(bus_dma_tag_t t, struct iommu_state *is,
- bus_dma_segment_t *seg, bus_addr_t offset, bus_size_t len, int ops)
-{
- int needsflush = 0;
- vaddr_t va = seg->ds_addr + offset;
-
- DPRINTF(IDB_SYNC,
- ("iommu_dvmamap_sync_seg: syncing va %p len %lu (%x)\n",
- (void *)(u_long)va, (u_long)len, ops));
-
- if (len == 0)
- return (0);
-
- if (ops & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_POSTWRITE)) {
- /* Nothing to do */;
- }
-
- if (ops & (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE)) {
- if (iommu_dvmamap_sync_range(is, va, len))
- needsflush = 1;
- }
-
- return (needsflush);
-}
-
-int
iommu_dvmamem_alloc(bus_dma_tag_t t, struct iommu_state *is, bus_size_t size,
bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
int nsegs, int *rsegs, int flags)
@@ -1165,8 +1567,10 @@ iommu_dvmamem_map(bus_dma_tag_t t, struct iommu_state *is,
/*
* digest flags:
*/
+#if 0
if (flags & BUS_DMA_COHERENT) /* Disable vcache */
cbit |= PMAP_NVC;
+#endif
if (flags & BUS_DMA_NOCACHE) /* sideffects */
cbit |= PMAP_NC;
@@ -1206,7 +1610,7 @@ iommu_dvmamem_unmap(bus_dma_tag_t t, struct iommu_state *is, caddr_t kva,
kva, size));
#ifdef DIAGNOSTIC
- if ((u_long)kva & PGOFSET)
+ if ((u_long)kva & PAGE_MASK)
panic("iommu_dvmamem_unmap");
#endif
@@ -1216,3 +1620,179 @@ iommu_dvmamem_unmap(bus_dma_tag_t t, struct iommu_state *is, caddr_t kva,
uvm_km_free(kernel_map, (vaddr_t)kva, size);
}
+/*
+ * Create a new iomap.
+ */
+struct iommu_map_state *
+iommu_iomap_create(int n)
+{
+ struct iommu_map_state *ims;
+ struct strbuf_flush *sbf;
+ vaddr_t va;
+
+ if (n < 64)
+ n = 64;
+
+ ims = malloc(sizeof(*ims) + (n - 1) * sizeof(ims->ims_map.ipm_map[0]),
+ M_DEVBUF, M_NOWAIT);
+ if (ims == NULL)
+ return (NULL);
+
+ memset(ims, 0, sizeof *ims);
+
+ /* Initialize the map. */
+ ims->ims_map.ipm_maxpage = n;
+ SPLAY_INIT(&ims->ims_map.ipm_tree);
+
+ /* Initialize the flush area. */
+ sbf = &ims->ims_flush;
+ va = (vaddr_t)&sbf->sbf_area[0x40];
+ va &= ~0x3f;
+ pmap_extract(pmap_kernel(), va, &sbf->sbf_flushpa);
+ sbf->sbf_flush = (void *)va;
+
+ return (ims);
+}
+
+/*
+ * Destroy an iomap.
+ */
+void
+iommu_iomap_destroy(struct iommu_map_state *ims)
+{
+#ifdef DIAGNOSTIC
+ if (ims->ims_map.ipm_pagecnt > 0)
+ printf("iommu_iomap_destroy: %d page entries in use\n",
+ ims->ims_map.ipm_pagecnt);
+#endif
+
+ free(ims, M_DEVBUF);
+}
+
+/*
+ * Utility function used by splay tree to order page entries by pa.
+ */
+static inline int
+iomap_compare(struct iommu_page_entry *a, struct iommu_page_entry *b)
+{
+ return ((a->ipe_pa > b->ipe_pa) ? 1 :
+ (a->ipe_pa < b->ipe_pa) ? -1 : 0);
+}
+
+SPLAY_PROTOTYPE(iommu_page_tree, iommu_page_entry, ipe_node, iomap_compare);
+
+SPLAY_GENERATE(iommu_page_tree, iommu_page_entry, ipe_node, iomap_compare);
+
+/*
+ * Insert a pa entry in the iomap.
+ */
+int
+iommu_iomap_insert_page(struct iommu_map_state *ims, paddr_t pa)
+{
+ struct iommu_page_map *ipm = &ims->ims_map;
+ struct iommu_page_entry *e;
+
+ if (ipm->ipm_pagecnt >= ipm->ipm_maxpage) {
+ struct iommu_page_entry ipe;
+
+ ipe.ipe_pa = pa;
+ if (SPLAY_FIND(iommu_page_tree, &ipm->ipm_tree, &ipe))
+ return (0);
+
+ return (ENOMEM);
+ }
+
+ e = &ipm->ipm_map[ipm->ipm_pagecnt];
+
+ e->ipe_pa = pa;
+ e->ipe_va = NULL;
+
+ e = SPLAY_INSERT(iommu_page_tree, &ipm->ipm_tree, e);
+
+ /* Duplicates are okay, but only count them once. */
+ if (e)
+ return (0);
+
+ ++ipm->ipm_pagecnt;
+
+ return (0);
+}
+
+/*
+ * Locate the iomap by filling in the pa->va mapping and inserting it
+ * into the IOMMU tables.
+ */
+int
+iommu_iomap_load_map(struct iommu_state *is, struct iommu_map_state *ims,
+ vaddr_t vmaddr, int flags)
+{
+ struct iommu_page_map *ipm = &ims->ims_map;
+ struct iommu_page_entry *e;
+ struct strbuf_ctl *sb = ims->ims_sb;
+ int i;
+
+ if (sb->sb_flush == NULL)
+ flags &= ~BUS_DMA_STREAMING;
+
+ if (flags & BUS_DMA_STREAMING)
+ ims->ims_flags |= IOMMU_MAP_STREAM;
+ else
+ ims->ims_flags &= ~IOMMU_MAP_STREAM;
+
+ for (i = 0, e = ipm->ipm_map; i < ipm->ipm_pagecnt; ++i, ++e) {
+ e->ipe_va = vmaddr;
+ iommu_enter(is, sb, e->ipe_va, e->ipe_pa, flags);
+ vmaddr += PAGE_SIZE;
+ }
+
+ return (0);
+}
+
+/*
+ * Remove the iomap from the IOMMU.
+ */
+int
+iommu_iomap_unload_map(struct iommu_state *is, struct iommu_map_state *ims)
+{
+ struct iommu_page_map *ipm = &ims->ims_map;
+ struct iommu_page_entry *e;
+ struct strbuf_ctl *sb = ims->ims_sb;
+ int i;
+
+ for (i = 0, e = ipm->ipm_map; i < ipm->ipm_pagecnt; ++i, ++e)
+ iommu_remove(is, sb, e->ipe_va);
+
+ return (0);
+}
+
+/*
+ * Translate a physical address (pa) into a DVMA address.
+ */
+vaddr_t
+iommu_iomap_translate(struct iommu_map_state *ims, paddr_t pa)
+{
+ struct iommu_page_map *ipm = &ims->ims_map;
+ struct iommu_page_entry *e;
+ struct iommu_page_entry pe;
+ paddr_t offset = pa & PAGE_MASK;
+
+ pe.ipe_pa = trunc_page(pa);
+
+ e = SPLAY_FIND(iommu_page_tree, &ipm->ipm_tree, &pe);
+
+ if (e == NULL)
+ return (NULL);
+
+ return (e->ipe_va | offset);
+}
+
+/*
+ * Clear the iomap table and tree.
+ */
+void
+iommu_iomap_clear_pages(struct iommu_map_state *ims)
+{
+ ims->ims_map.ipm_pagecnt = 0;
+ SPLAY_INIT(&ims->ims_map.ipm_tree);
+}
+
diff --git a/sys/arch/sparc64/dev/iommureg.h b/sys/arch/sparc64/dev/iommureg.h
index a03d49ab378..66d1df1fa4a 100644
--- a/sys/arch/sparc64/dev/iommureg.h
+++ b/sys/arch/sparc64/dev/iommureg.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: iommureg.h,v 1.5 2003/02/17 01:29:20 henric Exp $ */
+/* $OpenBSD: iommureg.h,v 1.6 2003/03/06 08:26:08 henric Exp $ */
/* $NetBSD: iommureg.h,v 1.6 2001/07/20 00:07:13 eeh Exp $ */
/*
@@ -97,9 +97,11 @@ struct iommu_strbuf {
#define IOTTE_8K 0x0000000000000000LL
#define IOTTE_STREAM 0x1000000000000000LL /* Is page streamable? */
#define IOTTE_LOCAL 0x0800000000000000LL /* Accesses to same bus segment? */
-#define IOTTE_PAMASK 0x000001ffffffe000LL /* Let's assume this is correct */
+#define IOTTE_PAMASK 0x000007ffffffe000LL /* Let's assume this is correct (bits 42..12) */
#define IOTTE_C 0x0000000000000010LL /* Accesses to cacheable space */
#define IOTTE_W 0x0000000000000002LL /* Writeable */
+#define IOTTE_SOFTWARE 0x0000000000001f80LL /* For software use (bits 12..7) */
+
/*
* On sun4u each bus controller has a separate IOMMU. The IOMMU has
diff --git a/sys/arch/sparc64/dev/iommuvar.h b/sys/arch/sparc64/dev/iommuvar.h
index 6dfccb6dea0..f2726150aa0 100644
--- a/sys/arch/sparc64/dev/iommuvar.h
+++ b/sys/arch/sparc64/dev/iommuvar.h
@@ -1,7 +1,8 @@
-/* $OpenBSD: iommuvar.h,v 1.7 2003/02/17 01:29:20 henric Exp $ */
+/* $OpenBSD: iommuvar.h,v 1.8 2003/03/06 08:26:08 henric Exp $ */
/* $NetBSD: iommuvar.h,v 1.9 2001/10/07 20:30:41 eeh Exp $ */
/*
+ * Copyright (c) 2003 Henric Jungheim
* Copyright (c) 1999 Matthew R. Green
* All rights reserved.
*
@@ -32,18 +33,66 @@
#ifndef _SPARC64_DEV_IOMMUVAR_H_
#define _SPARC64_DEV_IOMMUVAR_H_
+#ifndef _SYS_TREE_H_
+#include <sys/tree.h>
+#endif
+
/*
* per-Streaming Buffer state
*/
-
struct strbuf_ctl {
bus_space_tag_t sb_bustag; /* streaming buffer registers */
bus_space_handle_t sb_sb; /* Handle for our regs */
+ struct iommu_state *sb_iommu; /* Associated IOMMU */
+ /*
+ * Since implementing the per-map IOMMU state, these per-STC
+ * flush areas are not used other than as a boolean flag to indicate
+ * the presence of a working and enabled STC. For inconsistency's
+ * sake, the "sb" pointers of iommu_state are sometimes used for the
+ * same purpose. This should be consolidated.
+ */
paddr_t sb_flushpa; /* to flush streaming buffers */
volatile int64_t *sb_flush;
};
/*
+ * per-map STC flush area
+ */
+struct strbuf_flush {
+ char sbf_area[0x80]; /* Holds 64-byte long/aligned buffer */
+ void *sbf_flush; /* Kernel virtual address of buffer */
+ paddr_t sbf_flushpa; /* Physical address of buffer area */
+};
+
+/*
+ * per-map DVMA page table
+ */
+struct iommu_page_entry {
+ SPLAY_ENTRY(iommu_page_entry) ipe_node;
+ paddr_t ipe_pa;
+ vaddr_t ipe_va;
+};
+struct iommu_page_map {
+ SPLAY_HEAD(iommu_page_tree, iommu_page_entry) ipm_tree;
+ int ipm_maxpage; /* Size of allocated page map */
+ int ipm_pagecnt; /* Number of entries in use */
+ struct iommu_page_entry ipm_map[1];
+};
+
+/*
+ * per-map IOMMU state
+ *
+ * This is what bus_dvmamap_t'c _dm_cookie should be pointing to.
+ */
+struct iommu_map_state {
+ struct strbuf_flush ims_flush; /* flush should be first (alignment) */
+ struct strbuf_ctl *ims_sb; /* Link to parent */
+ int ims_flags;
+ struct iommu_page_map ims_map; /* map must be last (array at end) */
+};
+#define IOMMU_MAP_STREAM 1
+
+/*
* per-IOMMU state
*/
struct iommu_state {
@@ -52,7 +101,7 @@ struct iommu_state {
int is_tsbsize; /* 0 = 8K, ... */
u_int is_dvmabase;
u_int is_dvmaend;
- int64_t is_cr; /* IOMMU control register value */
+ int64_t is_cr; /* Control register value */
struct extent *is_dvmamap; /* DVMA map for this instance */
struct strbuf_ctl *is_sb[2]; /* Streaming buffers if any */
@@ -65,18 +114,21 @@ struct iommu_state {
/* interfaces for PCI/SBUS code */
void iommu_init(char *, struct iommu_state *, int, u_int32_t);
void iommu_reset(struct iommu_state *);
-void iommu_enter(struct iommu_state *, vaddr_t, int64_t, int);
-void iommu_remove(struct iommu_state *, vaddr_t, size_t);
paddr_t iommu_extract(struct iommu_state *, vaddr_t);
-
+int64_t iommu_lookup_tte(struct iommu_state *, vaddr_t);
+int64_t iommu_fetch_tte(struct iommu_state *, paddr_t);
+int iommu_dvmamap_create(bus_dma_tag_t, struct iommu_state *,
+ struct strbuf_ctl *, bus_size_t, int, bus_size_t, bus_size_t,
+ int, bus_dmamap_t *);
+void iommu_dvmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
int iommu_dvmamap_load(bus_dma_tag_t, struct iommu_state *,
bus_dmamap_t, void *, bus_size_t, struct proc *, int);
void iommu_dvmamap_unload(bus_dma_tag_t, struct iommu_state *,
bus_dmamap_t);
int iommu_dvmamap_load_raw(bus_dma_tag_t, struct iommu_state *,
bus_dmamap_t, bus_dma_segment_t *, int, int, bus_size_t);
-void iommu_dvmamap_sync(bus_dma_tag_t, struct iommu_state *,
- bus_dmamap_t, bus_addr_t, bus_size_t, int);
+void iommu_dvmamap_sync(bus_dma_tag_t, struct iommu_state *, bus_dmamap_t,
+ bus_addr_t, bus_size_t, int);
int iommu_dvmamem_alloc(bus_dma_tag_t, struct iommu_state *,
bus_size_t, bus_size_t, bus_size_t, bus_dma_segment_t *,
int, int *, int);
diff --git a/sys/arch/sparc64/dev/psycho.c b/sys/arch/sparc64/dev/psycho.c
index a5bdd7abdfc..98a4ce8c10f 100644
--- a/sys/arch/sparc64/dev/psycho.c
+++ b/sys/arch/sparc64/dev/psycho.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: psycho.c,v 1.28 2003/03/05 00:20:13 henric Exp $ */
+/* $OpenBSD: psycho.c,v 1.29 2003/03/06 08:26:08 henric Exp $ */
/* $NetBSD: psycho.c,v 1.39 2001/10/07 20:30:41 eeh Exp $ */
/*
@@ -101,19 +101,22 @@ int _psycho_bus_map(bus_space_tag_t, bus_space_tag_t, bus_addr_t,
void *psycho_intr_establish(bus_space_tag_t, bus_space_tag_t, int, int, int,
int (*)(void *), void *);
-int psycho_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *,
+int psycho_dmamap_create(bus_dma_tag_t, bus_dma_tag_t, bus_size_t, int, bus_size_t,
+ bus_size_t, int, bus_dmamap_t *);
+void psycho_dvmamap_destroy(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
+int psycho_dmamap_load(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t, void *,
bus_size_t, struct proc *, int);
-void psycho_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
-int psycho_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t,
+void psycho_dmamap_unload(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
+int psycho_dmamap_load_raw(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
bus_dma_segment_t *, int, bus_size_t, int);
-void psycho_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
+void psycho_dmamap_sync(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
bus_size_t, int);
-int psycho_dmamem_alloc(bus_dma_tag_t, bus_size_t, bus_size_t, bus_size_t,
+int psycho_dmamem_alloc(bus_dma_tag_t, bus_dma_tag_t, bus_size_t, bus_size_t, bus_size_t,
bus_dma_segment_t *, int, int *, int);
-void psycho_dmamem_free(bus_dma_tag_t, bus_dma_segment_t *, int);
-int psycho_dmamem_map(bus_dma_tag_t, bus_dma_segment_t *, int, size_t,
+void psycho_dmamem_free(bus_dma_tag_t, bus_dma_tag_t, bus_dma_segment_t *, int);
+int psycho_dmamem_map(bus_dma_tag_t, bus_dma_tag_t, bus_dma_segment_t *, int, size_t,
caddr_t *, int);
-void psycho_dmamem_unmap(bus_dma_tag_t, caddr_t, size_t);
+void psycho_dmamem_unmap(bus_dma_tag_t, bus_dma_tag_t, caddr_t, size_t);
void psycho_map_psycho(struct psycho_softc *, int, bus_addr_t, bus_size_t,
bus_addr_t, bus_size_t);
@@ -471,6 +474,7 @@ psycho_attach(struct device *parent, struct device *self, void *aux)
offsetof(struct pci_ctl, pci_strbuf),
sizeof(struct iommu_strbuf),
&sb->sb_sb)) {
+ printf("STC0 subregion failed\n");
sb->sb_flush = 0;
}
@@ -507,12 +511,14 @@ psycho_attach(struct device *parent, struct device *self, void *aux)
offsetof(struct pci_ctl, pci_strbuf),
sizeof(struct iommu_strbuf),
&sb->sb_sb)) {
+ printf("STC1 subregion failed\n");
sb->sb_flush = 0;
}
/* Point out iommu at the strbuf_ctl. */
sc->sc_is->is_sb[1] = sb;
}
+
iommu_reset(sc->sc_is);
}
@@ -661,9 +667,12 @@ psycho_ue(void *arg)
/*
* It's uncorrectable. Dump the regs and panic.
*/
- panic("%s: uncorrectable DMA error AFAR %llx (pa=%llx) AFSR %llx",
- sc->sc_dev.dv_xname, afar,
- (long long)iommu_extract(sc->sc_is, (vaddr_t)afar), afsr);
+ panic("%s: uncorrectable DMA error AFAR %llx (pa=%llx tte=%llx/%llx) "
+ "AFSR %llx", sc->sc_dev.dv_xname, afar,
+ iommu_extract(sc->sc_is, (vaddr_t)afar),
+ iommu_lookup_tte(sc->sc_is, (vaddr_t)afar),
+ iommu_fetch_tte(sc->sc_is, (paddr_t)afar),
+ afsr);
return (1);
}
@@ -875,12 +884,9 @@ psycho_alloc_dma_tag(struct psycho_pbm *pp)
bzero(dt, sizeof *dt);
dt->_cookie = pp;
dt->_parent = pdt;
-#define PCOPY(x) dt->x = pdt->x
- PCOPY(_dmamap_create);
- PCOPY(_dmamap_destroy);
+ dt->_dmamap_create = psycho_dmamap_create;
+ dt->_dmamap_destroy = psycho_dvmamap_destroy;
dt->_dmamap_load = psycho_dmamap_load;
- PCOPY(_dmamap_load_mbuf);
- PCOPY(_dmamap_load_uio);
dt->_dmamap_load_raw = psycho_dmamap_load_raw;
dt->_dmamap_unload = psycho_dmamap_unload;
dt->_dmamap_sync = psycho_dmamap_sync;
@@ -888,8 +894,6 @@ psycho_alloc_dma_tag(struct psycho_pbm *pp)
dt->_dmamem_free = psycho_dmamem_free;
dt->_dmamem_map = psycho_dmamem_map;
dt->_dmamem_unmap = psycho_dmamem_unmap;
- PCOPY(_dmamem_mmap);
-#undef PCOPY
return (dt);
}
@@ -1117,92 +1121,124 @@ psycho_intr_establish(bus_space_tag_t t, bus_space_tag_t t0, int ihandle,
* hooks into the iommu dvma calls.
*/
int
-psycho_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
- bus_size_t buflen, struct proc *p, int flags)
+psycho_dmamap_create(bus_dma_tag_t t, bus_dma_tag_t t0, bus_size_t size,
+ int nsegments, bus_size_t maxsegsz, bus_size_t boundary, int flags,
+ bus_dmamap_t *dmamp)
+{
+ struct psycho_pbm *pp = t->_cookie;
+ struct psycho_softc *sc = pp->pp_sc;
+
+ return (iommu_dvmamap_create(t0, sc->sc_is, &pp->pp_sb, size,
+ nsegments, maxsegsz, boundary, flags, dmamp));
+}
+
+void
+psycho_dvmamap_destroy(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map)
+{
+ iommu_dvmamap_destroy(t0, map);
+}
+
+int
+psycho_dmamap_load(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map,
+ void *buf, bus_size_t buflen, struct proc *p, int flags)
{
struct psycho_pbm *pp = t->_cookie;
struct psycho_softc *sc = pp->pp_sc;
- return (iommu_dvmamap_load(t, sc->sc_is, map, buf, buflen, p, flags));
+ if (pp->pp_sb.sb_flush == NULL)
+ flags &= ~BUS_DMA_STREAMING;
+
+ return (iommu_dvmamap_load(t0, sc->sc_is, map, buf, buflen, p, flags));
}
void
-psycho_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
+psycho_dmamap_unload(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map)
{
struct psycho_pbm *pp = t->_cookie;
struct psycho_softc *sc = pp->pp_sc;
- iommu_dvmamap_unload(t, sc->sc_is, map);
+ iommu_dvmamap_unload(t0, sc->sc_is, map);
}
int
-psycho_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
+psycho_dmamap_load_raw(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map,
bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
{
struct psycho_pbm *pp = t->_cookie;
struct psycho_softc *sc = pp->pp_sc;
- return (iommu_dvmamap_load_raw(t, sc->sc_is, map, segs, nsegs, flags,
+ if (pp->pp_sb.sb_flush == NULL)
+ flags &= ~BUS_DMA_STREAMING;
+
+ return (iommu_dvmamap_load_raw(t0, sc->sc_is, map, segs, nsegs, flags,
size));
}
void
-psycho_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
+psycho_dmamap_sync(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map, bus_addr_t offset,
bus_size_t len, int ops)
{
struct psycho_pbm *pp = t->_cookie;
struct psycho_softc *sc = pp->pp_sc;
+ if (t->_parent == NULL)
+ panic("psycho_dmamap_sync: no parent");
+
+ for (t = t->_parent; t->_dmamap_sync == NULL; t = t->_parent)
+ if (t == NULL)
+ panic("psycho_dmamap_sync: can't find implementation");
+
if (ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) {
/* Flush the CPU then the IOMMU */
- bus_dmamap_sync(t->_parent, map, offset, len, ops);
- iommu_dvmamap_sync(t, sc->sc_is, map, offset, len, ops);
+ (*t->_dmamap_sync)(t, t0, map, offset, len,
+ ops);
+ iommu_dvmamap_sync(t0, sc->sc_is, map, offset, len, ops);
}
if (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) {
/* Flush the IOMMU then the CPU */
- iommu_dvmamap_sync(t, sc->sc_is, map, offset, len, ops);
- bus_dmamap_sync(t->_parent, map, offset, len, ops);
+ iommu_dvmamap_sync(t0, sc->sc_is, map, offset, len, ops);
+ (*t->_dmamap_sync)(t, t0, map, offset, len, ops);
}
}
int
-psycho_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
+psycho_dmamem_alloc(bus_dma_tag_t t, bus_dma_tag_t t0, bus_size_t size, bus_size_t alignment,
bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
int flags)
{
struct psycho_pbm *pp = t->_cookie;
struct psycho_softc *sc = pp->pp_sc;
- return (iommu_dvmamem_alloc(t, sc->sc_is, size, alignment, boundary,
+ return (iommu_dvmamem_alloc(t0, sc->sc_is, size, alignment, boundary,
segs, nsegs, rsegs, flags));
}
void
-psycho_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
+psycho_dmamem_free(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dma_segment_t *segs, int nsegs)
{
struct psycho_pbm *pp = t->_cookie;
struct psycho_softc *sc = pp->pp_sc;
- iommu_dvmamem_free(t, sc->sc_is, segs, nsegs);
+ iommu_dvmamem_free(t0, sc->sc_is, segs, nsegs);
}
int
-psycho_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
+psycho_dmamem_map(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dma_segment_t *segs, int nsegs,
size_t size, caddr_t *kvap, int flags)
{
struct psycho_pbm *pp = t->_cookie;
struct psycho_softc *sc = pp->pp_sc;
return (iommu_dvmamem_map
- (t, sc->sc_is, segs, nsegs, size, kvap, flags));
+ (t0, sc->sc_is, segs, nsegs, size, kvap, flags));
}
void
-psycho_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
+psycho_dmamem_unmap(bus_dma_tag_t t, bus_dma_tag_t t0, caddr_t kva, size_t size)
{
struct psycho_pbm *pp = t->_cookie;
struct psycho_softc *sc = pp->pp_sc;
- iommu_dvmamem_unmap(t, sc->sc_is, kva, size);
+ iommu_dvmamem_unmap(t0, sc->sc_is, kva, size);
}
diff --git a/sys/arch/sparc64/dev/sbus.c b/sys/arch/sparc64/dev/sbus.c
index c86439995ce..0bfe842cc9c 100644
--- a/sys/arch/sparc64/dev/sbus.c
+++ b/sys/arch/sparc64/dev/sbus.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: sbus.c,v 1.15 2003/02/17 01:29:20 henric Exp $ */
+/* $OpenBSD: sbus.c,v 1.16 2003/03/06 08:26:08 henric Exp $ */
/* $NetBSD: sbus.c,v 1.46 2001/10/07 20:30:41 eeh Exp $ */
/*-
@@ -176,19 +176,25 @@ extern struct cfdriver sbus_cd;
/*
* DVMA routines
*/
-int sbus_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *, bus_size_t,
- struct proc *, int);
-void sbus_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
-int sbus_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t, bus_dma_segment_t *,
- int, bus_size_t, int);
-void sbus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t, bus_size_t, int);
-int sbus_dmamem_alloc(bus_dma_tag_t tag, bus_size_t size, bus_size_t alignment,
- bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
- int flags);
-void sbus_dmamem_free(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs);
-int sbus_dmamem_map(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs,
- size_t size, caddr_t *kvap, int flags);
-void sbus_dmamem_unmap(bus_dma_tag_t tag, caddr_t kva, size_t size);
+int sbus_dmamap_create(bus_dma_tag_t, bus_dma_tag_t, bus_size_t, int,
+ bus_size_t, bus_size_t, int, bus_dmamap_t *);
+void sbus_dmamap_destroy(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
+int sbus_dmamap_load(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t, void *,
+ bus_size_t, struct proc *, int);
+void sbus_dmamap_unload(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
+int sbus_dmamap_load_raw(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
+ bus_dma_segment_t *, int, bus_size_t, int);
+void sbus_dmamap_sync(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
+ bus_size_t, int);
+int sbus_dmamem_alloc(bus_dma_tag_t, bus_dma_tag_t tag, bus_size_t size,
+ bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
+ int nsegs, int *rsegs, int flags);
+void sbus_dmamem_free(bus_dma_tag_t, bus_dma_tag_t tag,
+ bus_dma_segment_t *segs, int nsegs);
+int sbus_dmamem_map(bus_dma_tag_t, bus_dma_tag_t tag, bus_dma_segment_t *segs,
+ int nsegs, size_t size, caddr_t *kvap, int flags);
+void sbus_dmamem_unmap(bus_dma_tag_t, bus_dma_tag_t tag, caddr_t kva,
+ size_t size);
/*
* Child devices receive the Sbus interrupt level in their attach
@@ -806,12 +812,9 @@ sbus_alloc_dmatag(struct sbus_softc *sc)
sdt->_cookie = sc;
sdt->_parent = psdt;
-#define PCOPY(x) sdt->x = psdt->x
- PCOPY(_dmamap_create);
- PCOPY(_dmamap_destroy);
+ sdt->_dmamap_create = sbus_dmamap_create;
+ sdt->_dmamap_destroy = sbus_dmamap_destroy;
sdt->_dmamap_load = sbus_dmamap_load;
- PCOPY(_dmamap_load_mbuf);
- PCOPY(_dmamap_load_uio);
sdt->_dmamap_load_raw = sbus_dmamap_load_raw;
sdt->_dmamap_unload = sbus_dmamap_unload;
sdt->_dmamap_sync = sbus_dmamap_sync;
@@ -819,91 +822,114 @@ sbus_alloc_dmatag(struct sbus_softc *sc)
sdt->_dmamem_free = sbus_dmamem_free;
sdt->_dmamem_map = sbus_dmamem_map;
sdt->_dmamem_unmap = sbus_dmamem_unmap;
- PCOPY(_dmamem_mmap);
-#undef PCOPY
sc->sc_dmatag = sdt;
return (sdt);
}
int
-sbus_dmamap_load(bus_dma_tag_t tag, bus_dmamap_t map, void *buf,
- bus_size_t buflen, struct proc *p, int flags)
+sbus_dmamap_create(bus_dma_tag_t t, bus_dma_tag_t t0, bus_size_t size,
+ int nsegments, bus_size_t maxsegsz, bus_size_t boundary, int flags,
+ bus_dmamap_t *dmamp)
{
- struct sbus_softc *sc = tag->_cookie;
+ struct sbus_softc *sc = t->_cookie;
- return (iommu_dvmamap_load(tag, &sc->sc_is, map, buf, buflen,
+ return (iommu_dvmamap_create(t0, &sc->sc_is, &sc->sc_sb, size,
+ nsegments, maxsegsz, boundary, flags, dmamp));
+}
+
+void
+sbus_dmamap_destroy(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map)
+{
+ iommu_dvmamap_destroy(t0, map);
+}
+
+int
+sbus_dmamap_load(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map,
+ void *buf, bus_size_t buflen, struct proc *p, int flags)
+{
+ struct sbus_softc *sc = t->_cookie;
+
+ return (iommu_dvmamap_load(t0, &sc->sc_is, map, buf, buflen,
p, flags));
}
int
-sbus_dmamap_load_raw(bus_dma_tag_t tag, bus_dmamap_t map,
+sbus_dmamap_load_raw(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map,
bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
{
- struct sbus_softc *sc = tag->_cookie;
+ struct sbus_softc *sc = t->_cookie;
- return (iommu_dvmamap_load_raw(tag, &sc->sc_is, map, segs,
+ return (iommu_dvmamap_load_raw(t0, &sc->sc_is, map, segs,
nsegs, flags, size));
}
void
-sbus_dmamap_unload(bus_dma_tag_t tag, bus_dmamap_t map)
+sbus_dmamap_unload(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map)
{
- struct sbus_softc *sc = tag->_cookie;
+ struct sbus_softc *sc = t->_cookie;
- iommu_dvmamap_unload(tag, &sc->sc_is, map);
+ iommu_dvmamap_unload(t0, &sc->sc_is, map);
}
void
-sbus_dmamap_sync(bus_dma_tag_t tag, bus_dmamap_t map, bus_addr_t offset,
- bus_size_t len, int ops)
+sbus_dmamap_sync(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map,
+ bus_addr_t offset, bus_size_t len, int ops)
{
- struct sbus_softc *sc = tag->_cookie;
+ struct sbus_softc *sc = t->_cookie;
+
+ if (t->_parent == NULL)
+ panic("sbus_dmamap_sync: no parent");
+
+ for (t = t->_parent; t->_dmamap_sync == NULL; t = t->_parent)
+ if (t == NULL)
+ panic("sbus_dmamap_sync: can't find implementation");
if (ops & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) {
/* Flush the CPU then the IOMMU */
- bus_dmamap_sync(tag->_parent, map, offset, len, ops);
- iommu_dvmamap_sync(tag, &sc->sc_is, map, offset, len, ops);
+ (*t->_dmamap_sync)(t, t0, map, offset, len, ops);
+ iommu_dvmamap_sync(t0, &sc->sc_is, map, offset, len, ops);
}
if (ops & (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)) {
/* Flush the IOMMU then the CPU */
- iommu_dvmamap_sync(tag, &sc->sc_is, map, offset, len, ops);
- bus_dmamap_sync(tag->_parent, map, offset, len, ops);
+ iommu_dvmamap_sync(t0, &sc->sc_is, map, offset, len, ops);
+ (*t->_dmamap_sync)(t, t0, map, offset, len, ops);
}
}
int
-sbus_dmamem_alloc(bus_dma_tag_t tag, bus_size_t size, bus_size_t alignment,
- bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
- int flags)
+sbus_dmamem_alloc(bus_dma_tag_t t, bus_dma_tag_t t0, bus_size_t size,
+ bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
+ int nsegs, int *rsegs, int flags)
{
- struct sbus_softc *sc = tag->_cookie;
+ struct sbus_softc *sc = t->_cookie;
- return (iommu_dvmamem_alloc(tag, &sc->sc_is, size, alignment, boundary,
+ return (iommu_dvmamem_alloc(t0, &sc->sc_is, size, alignment, boundary,
segs, nsegs, rsegs, flags));
}
void
-sbus_dmamem_free(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs)
+sbus_dmamem_free(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dma_segment_t *segs,
+ int nsegs)
{
- struct sbus_softc *sc = (struct sbus_softc *)tag->_cookie;
+ struct sbus_softc *sc = t->_cookie;
- iommu_dvmamem_free(tag, &sc->sc_is, segs, nsegs);
+ iommu_dvmamem_free(t0, &sc->sc_is, segs, nsegs);
}
int
-sbus_dmamem_map(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs,
- size_t size, caddr_t *kvap, int flags)
+sbus_dmamem_map(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dma_segment_t *segs,
+ int nsegs, size_t size, caddr_t *kvap, int flags)
{
- struct sbus_softc *sc = tag->_cookie;
+ struct sbus_softc *sc = t->_cookie;
- return (iommu_dvmamem_map(tag, &sc->sc_is, segs, nsegs, size,
+ return (iommu_dvmamem_map(t0, &sc->sc_is, segs, nsegs, size,
kvap, flags));
}
void
-sbus_dmamem_unmap(bus_dma_tag_t tag, caddr_t kva, size_t size)
+sbus_dmamem_unmap(bus_dma_tag_t t, bus_dma_tag_t t0, caddr_t kva, size_t size)
{
- struct sbus_softc *sc = (struct sbus_softc *)tag->_cookie;
+ struct sbus_softc *sc = t->_cookie;
- iommu_dvmamem_unmap(tag, &sc->sc_is, kva, size);
+ iommu_dvmamem_unmap(t0, &sc->sc_is, kva, size);
}
diff --git a/sys/arch/sparc64/dev/schizo.c b/sys/arch/sparc64/dev/schizo.c
index 77691e871fb..84c0b603d66 100644
--- a/sys/arch/sparc64/dev/schizo.c
+++ b/sys/arch/sparc64/dev/schizo.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: schizo.c,v 1.10 2003/02/22 19:54:43 jason Exp $ */
+/* $OpenBSD: schizo.c,v 1.11 2003/03/06 08:26:08 henric Exp $ */
/*
* Copyright (c) 2002 Jason L. Wright (jason@thought.net)
@@ -90,19 +90,22 @@ void *_schizo_intr_establish(bus_space_tag_t, bus_space_tag_t, int, int, int,
int (*)(void *), void *);
paddr_t _schizo_bus_mmap(bus_space_tag_t, bus_space_tag_t, bus_addr_t, off_t, int, int);
-int schizo_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *,
+int schizo_dmamap_create(bus_dma_tag_t, bus_dma_tag_t, bus_size_t, int,
+ bus_size_t, bus_size_t, int, bus_dmamap_t *);
+void schizo_dmamap_destroy(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
+int schizo_dmamap_load(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t, void *,
bus_size_t, struct proc *, int);
-void schizo_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
-int schizo_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t,
+void schizo_dmamap_unload(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
+int schizo_dmamap_load_raw(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
bus_dma_segment_t *, int, bus_size_t, int);
-void schizo_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
+void schizo_dmamap_sync(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
bus_size_t, int);
-int schizo_dmamem_alloc(bus_dma_tag_t, bus_size_t, bus_size_t, bus_size_t,
- bus_dma_segment_t *, int, int *, int);
-void schizo_dmamem_free(bus_dma_tag_t, bus_dma_segment_t *, int);
-int schizo_dmamem_map(bus_dma_tag_t, bus_dma_segment_t *, int, size_t,
- caddr_t *, int);
-void schizo_dmamem_unmap(bus_dma_tag_t, caddr_t, size_t);
+int schizo_dmamem_alloc(bus_dma_tag_t, bus_dma_tag_t, bus_size_t, bus_size_t,
+ bus_size_t, bus_dma_segment_t *, int, int *, int);
+void schizo_dmamem_free(bus_dma_tag_t, bus_dma_tag_t, bus_dma_segment_t *, int);
+int schizo_dmamem_map(bus_dma_tag_t, bus_dma_tag_t, bus_dma_segment_t *, int,
+ size_t, caddr_t *, int);
+void schizo_dmamem_unmap(bus_dma_tag_t, bus_dma_tag_t, caddr_t, size_t);
int
schizo_match(struct device *parent, void *match, void *aux)
@@ -338,12 +341,9 @@ schizo_alloc_dma_tag(struct schizo_pbm *pbm)
bzero(dt, sizeof(*dt));
dt->_cookie = pbm;
dt->_parent = pdt;
-#define PCOPY(x) dt->x = pdt->x
- PCOPY(_dmamap_create);
- PCOPY(_dmamap_destroy);
+ dt->_dmamap_create = schizo_dmamap_create;
+ dt->_dmamap_destroy = schizo_dmamap_destroy;
dt->_dmamap_load = schizo_dmamap_load;
- PCOPY(_dmamap_load_mbuf);
- PCOPY(_dmamap_load_uio);
dt->_dmamap_load_raw = schizo_dmamap_load_raw;
dt->_dmamap_unload = schizo_dmamap_unload;
dt->_dmamap_sync = schizo_dmamap_sync;
@@ -351,8 +351,6 @@ schizo_alloc_dma_tag(struct schizo_pbm *pbm)
dt->_dmamem_free = schizo_dmamem_free;
dt->_dmamem_map = schizo_dmamem_map;
dt->_dmamem_unmap = schizo_dmamem_unmap;
- PCOPY(_dmamem_mmap);
-#undef PCOPY
return (dt);
}
@@ -372,85 +370,112 @@ schizo_alloc_chipset(struct schizo_pbm *pbm, int node, pci_chipset_tag_t pc)
}
int
-schizo_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
- bus_size_t buflen, struct proc *p, int flags)
+schizo_dmamap_create(bus_dma_tag_t t, bus_dma_tag_t t0, bus_size_t size,
+ int nsegments, bus_size_t maxsegsz, bus_size_t boundary, int flags,
+ bus_dmamap_t *dmamp)
+{
+ struct schizo_pbm *sp = t->_cookie;
+
+ return (iommu_dvmamap_create(t0, &sp->sp_is, &sp->sp_sb, size,
+ nsegments, maxsegsz, boundary, flags, dmamp));
+}
+
+void
+schizo_dmamap_destroy(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map)
+{
+ iommu_dvmamap_destroy(t0, map);
+}
+
+int
+schizo_dmamap_load(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map,
+ void *buf, bus_size_t buflen, struct proc *p, int flags)
{
struct schizo_pbm *pbm = t->_cookie;
- return (iommu_dvmamap_load(t, &pbm->sp_is, map, buf, buflen, p, flags));
+ return (iommu_dvmamap_load(t0, &pbm->sp_is, map, buf, buflen, p,
+ flags));
}
void
-schizo_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
+schizo_dmamap_unload(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map)
{
struct schizo_pbm *pbm = t->_cookie;
- iommu_dvmamap_unload(t, &pbm->sp_is, map);
+ iommu_dvmamap_unload(t0, &pbm->sp_is, map);
}
int
-schizo_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
+schizo_dmamap_load_raw(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map,
bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
{
struct schizo_pbm *pbm = t->_cookie;
- return (iommu_dvmamap_load_raw(t, &pbm->sp_is, map, segs, nsegs,
+ return (iommu_dvmamap_load_raw(t0, &pbm->sp_is, map, segs, nsegs,
flags, size));
}
void
-schizo_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
- bus_size_t len, int ops)
+schizo_dmamap_sync(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map,
+ bus_addr_t offset, bus_size_t len, int ops)
{
struct schizo_pbm *pbm = t->_cookie;
+ if (t->_parent == NULL)
+ panic("schizo_dmamap_sync: no parent");
+
+ for (t = t->_parent; t->_dmamap_sync == NULL; t = t->_parent)
+ if (t == NULL)
+ panic("schizo_dmamap_sync: can't find implementation");
+
+
if (ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) {
/* Flush the CPU then the IOMMU */
- bus_dmamap_sync(t->_parent, map, offset, len, ops);
- iommu_dvmamap_sync(t, &pbm->sp_is, map, offset, len, ops);
+ (*t->_dmamap_sync)(t, t0, map, offset, len, ops);
+ iommu_dvmamap_sync(t0, &pbm->sp_is, map, offset, len, ops);
}
if (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) {
/* Flush the IOMMU then the CPU */
- iommu_dvmamap_sync(t, &pbm->sp_is, map, offset, len, ops);
- bus_dmamap_sync(t->_parent, map, offset, len, ops);
+ iommu_dvmamap_sync(t0, &pbm->sp_is, map, offset, len, ops);
+ (*t->_dmamap_sync)(t, t0, map, offset, len, ops);
}
}
int
-schizo_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
- bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
- int flags)
+schizo_dmamem_alloc(bus_dma_tag_t t, bus_dma_tag_t t0, bus_size_t size,
+ bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
+ int nsegs, int *rsegs, int flags)
{
struct schizo_pbm *pbm = t->_cookie;
- return (iommu_dvmamem_alloc(t, &pbm->sp_is, size, alignment, boundary,
+ return (iommu_dvmamem_alloc(t0, &pbm->sp_is, size, alignment, boundary,
segs, nsegs, rsegs, flags));
}
void
-schizo_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
+schizo_dmamem_free(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dma_segment_t *segs,
+ int nsegs)
{
struct schizo_pbm *pbm = t->_cookie;
- iommu_dvmamem_free(t, &pbm->sp_is, segs, nsegs);
+ iommu_dvmamem_free(t0, &pbm->sp_is, segs, nsegs);
}
int
-schizo_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
- size_t size, caddr_t *kvap, int flags)
+schizo_dmamem_map(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dma_segment_t *segs,
+ int nsegs, size_t size, caddr_t *kvap, int flags)
{
struct schizo_pbm *pbm = t->_cookie;
- return (iommu_dvmamem_map(t, &pbm->sp_is, segs, nsegs, size,
+ return (iommu_dvmamem_map(t0, &pbm->sp_is, segs, nsegs, size,
kvap, flags));
}
void
-schizo_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
+schizo_dmamem_unmap(bus_dma_tag_t t, bus_dma_tag_t t0, caddr_t kva, size_t size)
{
struct schizo_pbm *pbm = t->_cookie;
- iommu_dvmamem_unmap(t, &pbm->sp_is, kva, size);
+ iommu_dvmamem_unmap(t0, &pbm->sp_is, kva, size);
}
int
diff --git a/sys/arch/sparc64/include/bus.h b/sys/arch/sparc64/include/bus.h
index 20a2e36f925..e30b876c509 100644
--- a/sys/arch/sparc64/include/bus.h
+++ b/sys/arch/sparc64/include/bus.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: bus.h,v 1.14 2003/02/17 01:29:20 henric Exp $ */
+/* $OpenBSD: bus.h,v 1.15 2003/03/06 08:26:08 henric Exp $ */
/* $NetBSD: bus.h,v 1.31 2001/09/21 15:30:41 wiz Exp $ */
/*-
@@ -96,7 +96,9 @@ extern int bus_space_debug;
#ifndef __SYSTM_H__
#include <sys/systm.h>
#endif
-#define BUS_SPACE_PRINTF(l, s) do { if(bus_space_debug & (l)) printf s; } while(0)
+#define BUS_SPACE_PRINTF(l, s) do { \
+ if(bus_space_debug & (l)) printf s; \
+} while(0)
#define BUS_SPACE_TRACE(t, h, s) do { \
if ( (((bus_space_debug & BSDB_ALL_ACCESS) != 0) && \
(((h).bh_flags & BSHDB_NO_ACCESS) == 0)) || \
@@ -113,7 +115,7 @@ extern int bus_space_debug;
if (bus_space_debug & BSDB_ASSERT) \
bus_space_assert(t, &(h), o, n); \
} while(0)
-#else
+#else /* BUS_SPACE_DEBUG */
#define BUS_SPACE_PRINTF(l, s)
#define BUS_SPACE_TRACE(t, h, s)
#define BUS_SPACE_SET_FLAGS(t, h, f)
@@ -122,7 +124,7 @@ extern int bus_space_debug;
#define BUS_SPACE_SAVE_FLAGS(t, h, s)
#define BUS_SPACE_RESTORE_FLAGS(t, h, s)
#define BUS_SPACE_ASSERT(t, h, o, n)
-#endif
+#endif /* BUS_SPACE_DEBUG */
/*
@@ -223,12 +225,6 @@ struct sparc_bus_space_tag {
};
-#ifdef BUS_SPACE_DEBUG
-void bus_space_assert(bus_space_tag_t,
- const bus_space_handle_t *,
- bus_size_t, int);
-void bus_space_render_tag(bus_space_tag_t, char*, size_t);
-#endif /* BUS_SPACE_DEBUG */
/*
* Bus space function prototypes.
*/
@@ -295,6 +291,14 @@ void *bus_space_vaddr(
bus_space_tag_t,
bus_space_handle_t);
+#ifdef BUS_SPACE_DEBUG
+void bus_space_assert(bus_space_tag_t,
+ const bus_space_handle_t *,
+ bus_size_t, int);
+void bus_space_render_tag(bus_space_tag_t, char*, size_t);
+#endif /* BUS_SPACE_DEBUG */
+
+
#define _BS_PRECALL(t,f) \
while (t->f == NULL) \
t = t->parent;
@@ -405,6 +409,13 @@ typedef struct sparc_bus_dmamap *bus_dmamap_t;
struct sparc_bus_dma_segment {
bus_addr_t ds_addr; /* DVMA address */
bus_size_t ds_len; /* length of transfer */
+ /*
+ * The following is to support bus_dmamem_alloc()'s
+ * odd interface. Only the values in the first
+ * segment are used. This means that 3/5ths of
+ * most segments are useless space (and mbufs use 1024
+ * segments).
+ */
bus_size_t _ds_boundary; /* don't cross this */
bus_size_t _ds_align; /* align to this */
void *_ds_mlist; /* XXX - dmamap_alloc'ed pages */
@@ -425,63 +436,173 @@ struct sparc_bus_dma_tag {
/*
* DMA mapping methods.
*/
- int (*_dmamap_create)(bus_dma_tag_t, bus_size_t, int,
- bus_size_t, bus_size_t, int, bus_dmamap_t *);
- void (*_dmamap_destroy)(bus_dma_tag_t, bus_dmamap_t);
- int (*_dmamap_load)(bus_dma_tag_t, bus_dmamap_t, void *,
- bus_size_t, struct proc *, int);
- int (*_dmamap_load_mbuf)(bus_dma_tag_t, bus_dmamap_t,
- struct mbuf *, int);
- int (*_dmamap_load_uio)(bus_dma_tag_t, bus_dmamap_t,
+ int (*_dmamap_create)(bus_dma_tag_t, bus_dma_tag_t, bus_size_t,
+ int, bus_size_t, bus_size_t, int, bus_dmamap_t *);
+ void (*_dmamap_destroy)(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
+ int (*_dmamap_load)(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
+ void *, bus_size_t, struct proc *, int);
+ int (*_dmamap_load_mbuf)(bus_dma_tag_t, bus_dma_tag_t,
+ bus_dmamap_t, struct mbuf *, int);
+ int (*_dmamap_load_uio)(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
struct uio *, int);
- int (*_dmamap_load_raw)(bus_dma_tag_t, bus_dmamap_t,
+ int (*_dmamap_load_raw)(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
bus_dma_segment_t *, int, bus_size_t, int);
- void (*_dmamap_unload)(bus_dma_tag_t, bus_dmamap_t);
- void (*_dmamap_sync)(bus_dma_tag_t, bus_dmamap_t,
+ void (*_dmamap_unload)(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
+ void (*_dmamap_sync)(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
bus_addr_t, bus_size_t, int);
/*
* DMA memory utility functions.
*/
- int (*_dmamem_alloc)(bus_dma_tag_t, bus_size_t, bus_size_t,
- bus_size_t, bus_dma_segment_t *, int, int *, int);
- void (*_dmamem_free)(bus_dma_tag_t,
+ int (*_dmamem_alloc)(bus_dma_tag_t, bus_dma_tag_t, bus_size_t,
+ bus_size_t, bus_size_t, bus_dma_segment_t *, int, int *,
+ int);
+ void (*_dmamem_free)(bus_dma_tag_t, bus_dma_tag_t,
bus_dma_segment_t *, int);
- int (*_dmamem_map)(bus_dma_tag_t, bus_dma_segment_t *,
- int, size_t, caddr_t *, int);
- void (*_dmamem_unmap)(bus_dma_tag_t, caddr_t, size_t);
- paddr_t (*_dmamem_mmap)(bus_dma_tag_t, bus_dma_segment_t *,
- int, off_t, int, int);
+ int (*_dmamem_map)(bus_dma_tag_t, bus_dma_tag_t,
+ bus_dma_segment_t *, int, size_t, caddr_t *, int);
+ void (*_dmamem_unmap)(bus_dma_tag_t, bus_dma_tag_t, caddr_t,
+ size_t);
+ paddr_t (*_dmamem_mmap)(bus_dma_tag_t, bus_dma_tag_t,
+ bus_dma_segment_t *, int, off_t, int, int);
};
-#define bus_dmamap_create(t, s, n, m, b, f, p) \
- (*(t)->_dmamap_create)((t), (s), (n), (m), (b), (f), (p))
-#define bus_dmamap_destroy(t, p) \
- (*(t)->_dmamap_destroy)((t), (p))
-#define bus_dmamap_load(t, m, b, s, p, f) \
- (*(t)->_dmamap_load)((t), (m), (b), (s), (p), (f))
-#define bus_dmamap_load_mbuf(t, m, b, f) \
- (*(t)->_dmamap_load_mbuf)((t), (m), (b), (f))
-#define bus_dmamap_load_uio(t, m, u, f) \
- (*(t)->_dmamap_load_uio)((t), (m), (u), (f))
-#define bus_dmamap_load_raw(t, m, sg, n, s, f) \
- (*(t)->_dmamap_load_raw)((t), (m), (sg), (n), (s), (f))
-#define bus_dmamap_unload(t, p) \
- (*(t)->_dmamap_unload)((t), (p))
-#define bus_dmamap_sync(t, p, o, l, ops) \
- (void)((t)->_dmamap_sync ? \
- (*(t)->_dmamap_sync)((t), (p), (o), (l), (ops)) : (void)0)
-
-#define bus_dmamem_alloc(t, s, a, b, sg, n, r, f) \
- (*(t)->_dmamem_alloc)((t), (s), (a), (b), (sg), (n), (r), (f))
-#define bus_dmamem_free(t, sg, n) \
- (*(t)->_dmamem_free)((t), (sg), (n))
-#define bus_dmamem_map(t, sg, n, s, k, f) \
- (*(t)->_dmamem_map)((t), (sg), (n), (s), (k), (f))
-#define bus_dmamem_unmap(t, k, s) \
- (*(t)->_dmamem_unmap)((t), (k), (s))
-#define bus_dmamem_mmap(t, sg, n, o, p, f) \
- (*(t)->_dmamem_mmap)((t), (sg), (n), (o), (p), (f))
+#define _BD_PRECALL(t,f) \
+ while (t->f == NULL) { \
+ t = t->_parent; \
+ }
+#define _BD_CALL(t,f) \
+ (*(t)->f)
+#define _BD_POSTCALL
+
+static inline int
+bus_dmamap_create(bus_dma_tag_t t, bus_size_t s, int n, bus_size_t m,
+ bus_size_t b, int f, bus_dmamap_t *p)
+{
+ int r;
+ const bus_dma_tag_t t0 = t;
+ _BD_PRECALL(t, _dmamap_create);
+ r = _BD_CALL(t, _dmamap_create)(t, t0, s, n, m, b, f, p);
+ _BD_POSTCALL;
+ return (r);
+}
+static inline void
+bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t p)
+{
+ const bus_dma_tag_t t0 = t;
+ _BD_PRECALL(t, _dmamap_destroy);
+ _BD_CALL(t, _dmamap_destroy)(t, t0, p);
+ _BD_POSTCALL;
+}
+static inline int
+bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t m, void *b, bus_size_t s,
+ struct proc *p, int f)
+{
+ const bus_dma_tag_t t0 = t;
+ int r;
+ _BD_PRECALL(t, _dmamap_load);
+ r = _BD_CALL(t, _dmamap_load)(t, t0, m, b, s, p, f);
+ _BD_POSTCALL;
+ return (r);
+}
+static inline int
+bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t m, struct mbuf *b,
+ int f)
+{
+ const bus_dma_tag_t t0 = t;
+ int r;
+ _BD_PRECALL(t, _dmamap_load_mbuf);
+ r = _BD_CALL(t, _dmamap_load_mbuf)(t, t0, m, b, f);
+ _BD_POSTCALL;
+ return (r);
+}
+static inline int
+bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t m, struct uio * u, int f)
+{
+ const bus_dma_tag_t t0 = t;
+ int r;
+ _BD_PRECALL(t, _dmamap_load_uio);
+ r = _BD_CALL(t, _dmamap_load_uio)(t, t0, m, u, f);
+ _BD_POSTCALL;
+ return (r);
+}
+static inline int
+bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t m, bus_dma_segment_t *sg,
+ int n, bus_size_t s, int f)
+{
+ const bus_dma_tag_t t0 = t;
+ int r;
+ _BD_PRECALL(t, _dmamap_load_raw);
+ r = _BD_CALL(t, _dmamap_load_raw)(t, t0, m, sg, n, s, f);
+ _BD_POSTCALL;
+ return (r);
+}
+static inline void
+bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t p)
+{
+ const bus_dma_tag_t t0 = t;
+ _BD_PRECALL(t, _dmamap_unload);
+ _BD_CALL(t, _dmamap_unload)(t, t0, p);
+ _BD_POSTCALL;
+}
+static inline void
+bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t p, bus_addr_t o, bus_size_t l,
+ int ops)
+{
+ const bus_dma_tag_t t0 = t;
+ _BD_PRECALL(t, _dmamap_sync);
+ _BD_CALL(t, _dmamap_sync)(t, t0, p, o, l, ops);
+ _BD_POSTCALL;
+}
+static inline int
+bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t s, bus_size_t a, bus_size_t b,
+ bus_dma_segment_t *sg, int n, int *r, int f)
+{
+ const bus_dma_tag_t t0 = t;
+ int ret;
+ _BD_PRECALL(t, _dmamem_alloc);
+ ret = _BD_CALL(t, _dmamem_alloc)(t, t0, s, a, b, sg, n, r, f);
+ _BD_POSTCALL;
+ return (ret);
+}
+static inline void
+bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *sg, int n)
+{
+ const bus_dma_tag_t t0 = t;
+ _BD_PRECALL(t, _dmamem_free);
+ _BD_CALL(t, _dmamem_free)(t, t0, sg, n);
+ _BD_POSTCALL;
+}
+static inline int
+bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *sg, int n, size_t s,
+ caddr_t *k, int f)
+{
+ const bus_dma_tag_t t0 = t;
+ int r;
+ _BD_PRECALL(t, _dmamem_map);
+ r = _BD_CALL(t, _dmamem_map)(t, t0, sg, n, s, k, f);
+ _BD_POSTCALL;
+ return (r);
+}
+static inline void
+bus_dmamem_unmap(bus_dma_tag_t t, caddr_t k, size_t s)
+{
+ const bus_dma_tag_t t0 = t;
+ _BD_PRECALL(t, _dmamem_unmap);
+ _BD_CALL(t, _dmamem_unmap)(t, t0, k, s);
+ _BD_POSTCALL;
+}
+static inline paddr_t
+bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *sg, int n, off_t o, int p,
+ int f)
+{
+ const bus_dma_tag_t t0 = t;
+ int r;
+ _BD_PRECALL(t, _dmamem_mmap);
+ r = _BD_CALL(t, _dmamem_mmap)(t, t0, sg, n, o, p, f);
+ _BD_POSTCALL;
+ return (r);
+}
/*
* bus_dmamap_t
@@ -504,8 +625,8 @@ struct sparc_bus_dmamap {
#define _DM_TYPE_SEGS 1
#define _DM_TYPE_UIO 2
#define _DM_TYPE_MBUF 3
- int _dm_type; /* type of mapping: raw, uio, mbuf, etc */
- void *_dm_source; /* source mbuf, uio, etc. needed for unload */
+ int _dm_type; /* mapping type: raw, uio, mbuf, etc */
+ void *_dm_source; /* source mbuf/uio/etc. for unload */
void *_dm_cookie; /* cookie for bus-specific functions */
@@ -514,42 +635,9 @@ struct sparc_bus_dmamap {
*/
bus_size_t dm_mapsize; /* size of the mapping */
int dm_nsegs; /* # valid segments in mapping */
+
bus_dma_segment_t dm_segs[1]; /* segments; variable length */
};
-#ifdef _SPARC_BUS_DMA_PRIVATE
-int _bus_dmamap_create(bus_dma_tag_t, bus_size_t, int, bus_size_t,
- bus_size_t, int, bus_dmamap_t *);
-void _bus_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
-int _bus_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *,
- bus_size_t, struct proc *, int);
-int _bus_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t,
- struct mbuf *, int);
-int _bus_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t,
- struct uio *, int);
-int _bus_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t,
- bus_dma_segment_t *, int, bus_size_t, int);
-void _bus_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
-void _bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
- bus_size_t, int);
-
-int _bus_dmamem_alloc(bus_dma_tag_t tag, bus_size_t size,
- bus_size_t alignment, bus_size_t boundary,
- bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags);
-void _bus_dmamem_free(bus_dma_tag_t tag, bus_dma_segment_t *segs,
- int nsegs);
-int _bus_dmamem_map(bus_dma_tag_t tag, bus_dma_segment_t *segs,
- int nsegs, size_t size, caddr_t *kvap, int flags);
-void _bus_dmamem_unmap(bus_dma_tag_t tag, caddr_t kva,
- size_t size);
-paddr_t _bus_dmamem_mmap(bus_dma_tag_t tag, bus_dma_segment_t *segs,
- int nsegs, off_t off, int prot, int flags);
-
-int _bus_dmamem_alloc_range(bus_dma_tag_t tag, bus_size_t size,
- bus_size_t alignment, bus_size_t boundary,
- bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags,
- vaddr_t low, vaddr_t high);
-#endif /* _SPARC_BUS_DMA_PRIVATE */
-
#endif /* _SPARC_BUS_H_ */
diff --git a/sys/arch/sparc64/sparc64/machdep.c b/sys/arch/sparc64/sparc64/machdep.c
index f9b7f6fb116..4a558e50c9b 100644
--- a/sys/arch/sparc64/sparc64/machdep.c
+++ b/sys/arch/sparc64/sparc64/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.57 2003/02/24 01:00:52 miod Exp $ */
+/* $OpenBSD: machdep.c,v 1.58 2003/03/06 08:26:08 henric Exp $ */
/* $NetBSD: machdep.c,v 1.108 2001/07/24 19:30:14 eeh Exp $ */
/*-
@@ -131,8 +131,46 @@
#include <dev/ic/pckbcvar.h>
#endif
-/* This may be used by macros elsewhere. */
-int bus_space_debug = BSDB_ACCESS | BSDB_ASSERT | BSDB_MAP;
+int _bus_dmamap_create(bus_dma_tag_t, bus_dma_tag_t, bus_size_t, int,
+ bus_size_t, bus_size_t, int, bus_dmamap_t *);
+void _bus_dmamap_destroy(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
+int _bus_dmamap_load(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t, void *,
+ bus_size_t, struct proc *, int);
+int _bus_dmamap_load_mbuf(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
+ struct mbuf *, int);
+int _bus_dmamap_load_uio(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
+ struct uio *, int);
+int _bus_dmamap_load_raw(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
+ bus_dma_segment_t *, int, bus_size_t, int);
+void _bus_dmamap_unload(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
+void _bus_dmamap_sync(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
+ bus_addr_t, bus_size_t, int);
+
+int _bus_dmamem_alloc(bus_dma_tag_t, bus_dma_tag_t tag, bus_size_t size,
+ bus_size_t alignment, bus_size_t boundary,
+ bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags);
+
+void _bus_dmamem_free(bus_dma_tag_t tag, bus_dma_tag_t,
+ bus_dma_segment_t *segs, int nsegs);
+int _bus_dmamem_map(bus_dma_tag_t tag, bus_dma_tag_t,
+ bus_dma_segment_t *segs, int nsegs, size_t size, caddr_t *kvap,
+ int flags);
+void _bus_dmamem_unmap(bus_dma_tag_t tag, bus_dma_tag_t, caddr_t kva,
+ size_t size);
+paddr_t _bus_dmamem_mmap(bus_dma_tag_t tag, bus_dma_tag_t,
+ bus_dma_segment_t *segs, int nsegs, off_t off, int prot, int flags);
+
+int _bus_dmamem_alloc_range(bus_dma_tag_t tag, bus_dma_tag_t,
+ bus_size_t size, bus_size_t alignment, bus_size_t boundary,
+ bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags,
+ vaddr_t low, vaddr_t high);
+
+/*
+ * The "bus_space_debug" flags used by macros elsewhere.
+ * A good set of flags to use when first debugging something is:
+ * int bus_space_debug = BSDB_ACCESS | BSDB_ASSERT | BSDB_MAP;
+ */
+int bus_space_debug = 0;
struct vm_map *exec_map = NULL;
extern vaddr_t avail_end;
@@ -1033,7 +1071,8 @@ stackdump()
if( ((long)fp) & 1 ) {
fp64 = (struct frame64*)(((char *)fp)+BIAS);
/* 64-bit frame */
- printf("%llx(%llx, %llx, %llx, %llx, %llx, %llx, %llx) fp = %llx\n",
+ printf("%llx(%llx, %llx, %llx, %llx, %llx, %llx, %llx) "
+ "fp = %llx\n",
(unsigned long long)fp64->fr_pc,
(unsigned long long)fp64->fr_arg[0],
(unsigned long long)fp64->fr_arg[1],
@@ -1046,10 +1085,11 @@ stackdump()
fp = (struct frame32 *)(u_long)fp64->fr_fp;
} else {
/* 32-bit frame */
- printf(" pc = %x args = (%x, %x, %x, %x, %x, %x, %x) fp = %x\n",
- fp->fr_pc, fp->fr_arg[0], fp->fr_arg[1], fp->fr_arg[2],
- fp->fr_arg[3], fp->fr_arg[4], fp->fr_arg[5], fp->fr_arg[6],
- fp->fr_fp);
+ printf(" pc = %x args = (%x, %x, %x, %x, %x, %x, %x) "
+ "fp = %x\n", fp->fr_pc, fp->fr_arg[0],
+ fp->fr_arg[1], fp->fr_arg[2], fp->fr_arg[3],
+ fp->fr_arg[4], fp->fr_arg[5], fp->fr_arg[6],
+ fp->fr_fp);
fp = (struct frame32*)(u_long)(u_short)fp->fr_fp;
}
}
@@ -1069,8 +1109,8 @@ cpu_exec_aout_makecmds(p, epp)
* DMA map creation functions.
*/
int
-_bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
- bus_dma_tag_t t;
+_bus_dmamap_create(t, t0, size, nsegments, maxsegsz, boundary, flags, dmamp)
+ bus_dma_tag_t t, t0;
bus_size_t size;
int nsegments;
bus_size_t maxsegsz;
@@ -1106,8 +1146,8 @@ _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
map->_dm_segcnt = nsegments;
map->_dm_maxsegsz = maxsegsz;
map->_dm_boundary = boundary;
- map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT|BUS_DMA_COHERENT|
- BUS_DMA_NOWRITE|BUS_DMA_NOCACHE);
+ map->_dm_flags = flags & ~(BUS_DMA_WAITOK | BUS_DMA_NOWAIT |
+ BUS_DMA_COHERENT | BUS_DMA_NOWRITE | BUS_DMA_NOCACHE);
map->dm_mapsize = 0; /* no valid mappings */
map->dm_nsegs = 0;
@@ -1120,10 +1160,21 @@ _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
* DMA map destruction functions.
*/
void
-_bus_dmamap_destroy(t, map)
- bus_dma_tag_t t;
+_bus_dmamap_destroy(t, t0, map)
+ bus_dma_tag_t t, t0;
bus_dmamap_t map;
+
{
+ /*
+ * Unload the map if it is still loaded. This is required
+ * by the specification (well, the manpage). Higher level
+ * drivers, if any, should do this too. By the time the
+ * system gets here, the higher level "destroy" functions
+ * would probably already have clobbered the data needed
+ * to do a proper unload.
+ */
+ if (map->dm_nsegs)
+ bus_dmamap_unload(t0, map);
free(map, M_DEVBUF);
}
@@ -1139,8 +1190,8 @@ _bus_dmamap_destroy(t, map)
* bypass DVMA.
*/
int
-_bus_dmamap_load(t, map, buf, buflen, p, flags)
- bus_dma_tag_t t;
+_bus_dmamap_load(t, t0, map, buf, buflen, p, flags)
+ bus_dma_tag_t t, t0;
bus_dmamap_t map;
void *buf;
bus_size_t buflen;
@@ -1185,7 +1236,9 @@ _bus_dmamap_load(t, map, buf, buflen, p, flags)
map->dm_segs[++i].ds_addr = pa;
map->dm_segs[i].ds_len = NBPG;
}
+ /* Is this what the above comment calls "one segment"? */
map->dm_nsegs = i;
+
/* Mapping is bus dependent */
return (0);
}
@@ -1194,8 +1247,8 @@ _bus_dmamap_load(t, map, buf, buflen, p, flags)
* Like _bus_dmamap_load(), but for mbufs.
*/
int
-_bus_dmamap_load_mbuf(t, map, m, flags)
- bus_dma_tag_t t;
+_bus_dmamap_load_mbuf(t, t0, map, m, flags)
+ bus_dma_tag_t t, t0;
bus_dmamap_t map;
struct mbuf *m;
int flags;
@@ -1206,7 +1259,7 @@ _bus_dmamap_load_mbuf(t, map, m, flags)
/* Record mbuf for *_unload */
map->_dm_type = _DM_TYPE_MBUF;
- map->_dm_source = (void *)m;
+ map->_dm_source = m;
i = 0;
len = 0;
@@ -1224,10 +1277,11 @@ _bus_dmamap_load_mbuf(t, map, m, flags)
buflen -= incr;
vaddr += incr;
- if (i > 0 && pa == (segs[i-1].ds_addr + segs[i-1].ds_len)
- && ((segs[i-1].ds_len + incr) < map->_dm_maxsegsz)) {
+ if (i > 0 && pa == (segs[i - 1].ds_addr +
+ segs[i - 1].ds_len) && ((segs[i - 1].ds_len + incr)
+ < map->_dm_maxsegsz)) {
/* Hey, waddyaknow, they're contiguous */
- segs[i-1].ds_len += incr;
+ segs[i - 1].ds_len += incr;
continue;
}
segs[i].ds_addr = pa;
@@ -1246,7 +1300,7 @@ _bus_dmamap_load_mbuf(t, map, m, flags)
}
}
- return (bus_dmamap_load_raw(t, map, segs, i,
+ return (bus_dmamap_load_raw(t0, map, segs, i,
(bus_size_t)len, flags));
}
@@ -1254,8 +1308,8 @@ _bus_dmamap_load_mbuf(t, map, m, flags)
* Like _bus_dmamap_load(), but for uios.
*/
int
-_bus_dmamap_load_uio(t, map, uio, flags)
- bus_dma_tag_t t;
+_bus_dmamap_load_uio(t, t0, map, uio, flags)
+ bus_dma_tag_t t, t0;
bus_dmamap_t map;
struct uio *uio;
int flags;
@@ -1293,10 +1347,11 @@ _bus_dmamap_load_uio(t, map, uio, flags)
buflen -= incr;
vaddr += incr;
- if (i > 0 && pa == (segs[i-1].ds_addr + segs[i-1].ds_len)
- && ((segs[i-1].ds_len + incr) < map->_dm_maxsegsz)) {
+ if (i > 0 && pa == (segs[i - 1].ds_addr +
+ segs[i - 1].ds_len) && ((segs[i - 1].ds_len + incr)
+ < map->_dm_maxsegsz)) {
/* Hey, waddyaknow, they're contiguous */
- segs[i-1].ds_len += incr;
+ segs[i - 1].ds_len += incr;
continue;
}
segs[i].ds_addr = pa;
@@ -1315,7 +1370,7 @@ _bus_dmamap_load_uio(t, map, uio, flags)
}
}
- return (bus_dmamap_load_raw(t, map, segs, i, (bus_size_t)len, flags));
+ return (bus_dmamap_load_raw(t0, map, segs, i, (bus_size_t)len, flags));
}
/*
@@ -1323,8 +1378,8 @@ _bus_dmamap_load_uio(t, map, uio, flags)
* bus_dmamem_alloc().
*/
int
-_bus_dmamap_load_raw(t, map, segs, nsegs, size, flags)
- bus_dma_tag_t t;
+_bus_dmamap_load_raw(t, t0, map, segs, nsegs, size, flags)
+ bus_dma_tag_t t, t0;
bus_dmamap_t map;
bus_dma_segment_t *segs;
int nsegs;
@@ -1340,8 +1395,8 @@ _bus_dmamap_load_raw(t, map, segs, nsegs, size, flags)
* bus-specific DMA map unload functions.
*/
void
-_bus_dmamap_unload(t, map)
- bus_dma_tag_t t;
+_bus_dmamap_unload(t, t0, map)
+ bus_dma_tag_t t, t0;
bus_dmamap_t map;
{
int i;
@@ -1349,7 +1404,7 @@ _bus_dmamap_unload(t, map)
struct pglist *mlist;
paddr_t pa;
- for (i=0; i<map->dm_nsegs; i++) {
+ for (i = 0; i < map->dm_nsegs; i++) {
if ((mlist = map->dm_segs[i]._ds_mlist) == NULL) {
/*
* We were asked to load random VAs and lost the
@@ -1379,8 +1434,8 @@ _bus_dmamap_unload(t, map)
* by bus-specific DMA map synchronization functions.
*/
void
-_bus_dmamap_sync(t, map, offset, len, ops)
- bus_dma_tag_t t;
+_bus_dmamap_sync(t, t0, map, offset, len, ops)
+ bus_dma_tag_t t, t0;
bus_dmamap_t map;
bus_addr_t offset;
bus_size_t len;
@@ -1437,8 +1492,8 @@ extern paddr_t vm_first_phys, vm_num_phys;
* by bus-specific DMA memory allocation functions.
*/
int
-_bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
- bus_dma_tag_t t;
+_bus_dmamem_alloc(t, t0, size, alignment, boundary, segs, nsegs, rsegs, flags)
+ bus_dma_tag_t t, t0;
bus_size_t size, alignment, boundary;
bus_dma_segment_t *segs;
int nsegs;
@@ -1503,8 +1558,8 @@ _bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
* bus-specific DMA memory free functions.
*/
void
-_bus_dmamem_free(t, segs, nsegs)
- bus_dma_tag_t t;
+_bus_dmamem_free(t, t0, segs, nsegs)
+ bus_dma_tag_t t, t0;
bus_dma_segment_t *segs;
int nsegs;
{
@@ -1524,8 +1579,8 @@ _bus_dmamem_free(t, segs, nsegs)
* bus-specific DMA memory map functions.
*/
int
-_bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
- bus_dma_tag_t t;
+_bus_dmamem_map(t, t0, segs, nsegs, size, kvap, flags)
+ bus_dma_tag_t t, t0;
bus_dma_segment_t *segs;
int nsegs;
size_t size;
@@ -1533,7 +1588,6 @@ _bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
int flags;
{
vaddr_t va, sva;
- struct pglist *mlist;
int r, cbit;
size_t oversize;
u_long align;
@@ -1569,7 +1623,6 @@ _bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
*kvap = (caddr_t)va;
- mlist = segs[0]._ds_mlist;
return (0);
}
@@ -1579,8 +1632,8 @@ _bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
* bus-specific DMA memory unmapping functions.
*/
void
-_bus_dmamem_unmap(t, kva, size)
- bus_dma_tag_t t;
+_bus_dmamem_unmap(t, t0, kva, size)
+ bus_dma_tag_t t, t0;
caddr_t kva;
size_t size;
{
@@ -1599,8 +1652,8 @@ _bus_dmamem_unmap(t, kva, size)
* bus-specific DMA mmap(2)'ing functions.
*/
paddr_t
-_bus_dmamem_mmap(t, segs, nsegs, off, prot, flags)
- bus_dma_tag_t t;
+_bus_dmamem_mmap(t, t0, segs, nsegs, off, prot, flags)
+ bus_dma_tag_t t, t0;
bus_dma_segment_t *segs;
int nsegs;
off_t off;
@@ -1680,7 +1733,7 @@ sparc_bus_map(bus_space_tag_t t, bus_space_tag_t t0, bus_addr_t addr,
io_space = extent_create("IOSPACE",
(u_long)IODEV_BASE, (u_long)IODEV_END, M_DEVBUF, 0, 0,
EX_NOWAIT);
- size = round_page(size);
+
if (size == 0) {
char buf[80];
bus_space_render_tag(t0, buf, sizeof buf);
@@ -1718,6 +1771,8 @@ sparc_bus_map(bus_space_tag_t t, bus_space_tag_t t0, bus_addr_t addr,
return (0);
}
+ size = round_page(size);
+
if (LITTLE_ASI(t0->sasi) && !LITTLE_ASI(t0->asi))
pm_flags |= PMAP_LITTLE;