summaryrefslogtreecommitdiff
path: root/sys/arch
diff options
context:
space:
mode:
authorMiod Vallat <miod@cvs.openbsd.org>2015-09-05 21:13:25 +0000
committerMiod Vallat <miod@cvs.openbsd.org>2015-09-05 21:13:25 +0000
commit0d014dea44612316aed2ff47e882601e7a05e885 (patch)
treedd6fa1fbfec72ec60d03772d83afc6229b45098b /sys/arch
parent5d914cb37ea64db77ffb8fbd644c576e9880def6 (diff)
Give up trying to map DMA descriptor in uncached memory on ECC flavours of the
IP22 motherboard (IP26, IP28). Instead, do not ask for a BUS_DMA_COHERENT mapping, but perform explicit cache operations. This removes the need for the memory controller to switch between `fast' and `slow' mode every time a DMA descriptor is updated. Tested on IP22 and IP28.
Diffstat (limited to 'sys/arch')
-rw-r--r--sys/arch/sgi/hpc/hpc.c80
-rw-r--r--sys/arch/sgi/hpc/hpcreg.h11
-rw-r--r--sys/arch/sgi/hpc/hpcvar.h7
-rw-r--r--sys/arch/sgi/hpc/if_sq.c61
-rw-r--r--sys/arch/sgi/hpc/if_sqvar.h24
-rw-r--r--sys/arch/sgi/sgi/bus_dma.c8
6 files changed, 95 insertions, 96 deletions
diff --git a/sys/arch/sgi/hpc/hpc.c b/sys/arch/sgi/hpc/hpc.c
index 25f00a57ca3..458ac49a601 100644
--- a/sys/arch/sgi/hpc/hpc.c
+++ b/sys/arch/sgi/hpc/hpc.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: hpc.c,v 1.16 2013/09/28 14:00:00 miod Exp $ */
+/* $OpenBSD: hpc.c,v 1.17 2015/09/05 21:13:24 miod Exp $ */
/* $NetBSD: hpc.c,v 1.66 2011/07/01 18:53:46 dyoung Exp $ */
/* $NetBSD: ioc.c,v 1.9 2011/07/01 18:53:47 dyoung Exp $ */
@@ -363,17 +363,14 @@ void hpc_blink_ioc(void *);
int hpc_read_eeprom(int, bus_space_tag_t, bus_space_handle_t, uint8_t *,
size_t);
-struct hpc_dma_desc *hpc_read_dma_desc_par(struct hpc_dma_desc *,
- struct hpc_dma_desc *);
-struct hpc_dma_desc *hpc_read_dma_desc_ecc(struct hpc_dma_desc *,
- struct hpc_dma_desc *);
-void hpc_write_dma_desc_par(struct hpc_dma_desc *, struct hpc_dma_desc *);
-void hpc_write_dma_desc_ecc(struct hpc_dma_desc *, struct hpc_dma_desc *);
+void hpc_sync_dma_desc_par(struct hpc_dma_desc *);
+void hpc_sync_dma_desc_ecc(struct hpc_dma_desc *);
+void hpc_update_dma_desc_par(struct hpc_dma_desc *);
+void hpc_update_dma_desc_ecc(struct hpc_dma_desc *);
/* globals since they depend upon the system type, not the hpc version */
-struct hpc_dma_desc *(*hpc_read_dma_desc_fn)(struct hpc_dma_desc *,
- struct hpc_dma_desc *);
-void (*hpc_write_dma_desc_fn)(struct hpc_dma_desc *, struct hpc_dma_desc *);
+void (*hpc_sync_dma_desc_fn)(struct hpc_dma_desc *);
+void (*hpc_update_dma_desc_fn)(struct hpc_dma_desc *);
const struct cfattach hpc_ca = {
sizeof(struct hpc_softc), hpc_match, hpc_attach
@@ -436,13 +433,13 @@ hpc_attach(struct device *parent, struct device *self, void *aux)
sc->sc_dmat = ga->ga_dmat;
/* setup HPC DMA helpers if not done already */
- if (hpc_read_dma_desc_fn == NULL) {
+ if (hpc_sync_dma_desc_fn == NULL) {
if (ip22_ecc) {
- hpc_read_dma_desc_fn = hpc_read_dma_desc_ecc;
- hpc_write_dma_desc_fn = hpc_write_dma_desc_ecc;
+ hpc_sync_dma_desc_fn = hpc_sync_dma_desc_ecc;
+ hpc_update_dma_desc_fn = hpc_update_dma_desc_ecc;
} else {
- hpc_read_dma_desc_fn = hpc_read_dma_desc_par;
- hpc_write_dma_desc_fn = hpc_write_dma_desc_par;
+ hpc_sync_dma_desc_fn = hpc_sync_dma_desc_par;
+ hpc_update_dma_desc_fn = hpc_update_dma_desc_par;
}
}
@@ -975,50 +972,53 @@ hpc_read_eeprom(int hpctype, bus_space_tag_t t, bus_space_handle_t h,
}
/*
- * Routines to copy and update HPC DMA descriptors in uncached memory.
+ * Routines to update HPC DMA descriptors.
*/
-struct hpc_dma_desc *
-hpc_read_dma_desc(struct hpc_dma_desc *src, struct hpc_dma_desc *store)
+void
+hpc_sync_dma_desc(struct hpc_dma_desc *desc)
{
- return (*hpc_read_dma_desc_fn)(src, store);
+ (*hpc_sync_dma_desc_fn)(desc);
}
void
-hpc_write_dma_desc(struct hpc_dma_desc *dst, struct hpc_dma_desc *src)
+hpc_update_dma_desc(struct hpc_dma_desc *desc)
{
- (*hpc_write_dma_desc_fn)(dst, src);
+ (*hpc_update_dma_desc_fn)(desc);
}
-/* parity MC flavour: no copy */
-struct hpc_dma_desc *
-hpc_read_dma_desc_par(struct hpc_dma_desc *src, struct hpc_dma_desc *store)
+/*
+ * Parity MC flavour: descriptors are in non-cacheable memory, to which
+ * accesses are allowed. No cache operation is needed.
+ */
+
+void
+hpc_sync_dma_desc_par(struct hpc_dma_desc *desc)
{
- return src;
+ /* nothing to do */
}
void
-hpc_write_dma_desc_par(struct hpc_dma_desc *dst, struct hpc_dma_desc *src)
+hpc_update_dma_desc_par(struct hpc_dma_desc *desc)
{
+ /* nothing to do */
}
-/* ECC MC flavour: copy, and update in slow mode */
-struct hpc_dma_desc *
-hpc_read_dma_desc_ecc(struct hpc_dma_desc *src, struct hpc_dma_desc *store)
+/*
+ * ECC MC flavour: descriptor are in cacheable memory, and need to be
+ * evicted from cache before reading, and flushed from cache after updating.
+ */
+
+void
+hpc_sync_dma_desc_ecc(struct hpc_dma_desc *desc)
{
- bcopy(src, store, sizeof(struct hpc_dma_desc));
- return store;
+ Mips_HitInvalidateDCache(curcpu(),
+ (vaddr_t)desc, sizeof(struct hpc_dma_desc));
}
void
-hpc_write_dma_desc_ecc(struct hpc_dma_desc *dst, struct hpc_dma_desc *src)
+hpc_update_dma_desc_ecc(struct hpc_dma_desc *desc)
{
- register_t sr;
- int mode;
-
- sr = disableintr();
- mode = ip22_slow_mode();
- bcopy(src, dst, sizeof(struct hpc_dma_desc));
- ip22_restore_mode(mode);
- setsr(sr);
+ Mips_HitSyncDCache(curcpu(),
+ (vaddr_t)desc, sizeof(struct hpc_dma_desc));
}
diff --git a/sys/arch/sgi/hpc/hpcreg.h b/sys/arch/sgi/hpc/hpcreg.h
index bce6a2d2245..7ca01674968 100644
--- a/sys/arch/sgi/hpc/hpcreg.h
+++ b/sys/arch/sgi/hpc/hpcreg.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: hpcreg.h,v 1.1 2012/03/28 20:44:23 miod Exp $ */
+/* $OpenBSD: hpcreg.h,v 1.2 2015/09/05 21:13:24 miod Exp $ */
/* $NetBSD: hpcreg.h,v 1.20 2011/01/25 12:21:04 tsutsui Exp $ */
/*
@@ -44,14 +44,21 @@ struct hpc_dma_desc {
uint32_t hdd_bufptr; /* Physical address of buffer */
uint32_t hdd_ctl; /* Control flags and byte count */
uint32_t hdd_descptr; /* Physical address of next descr. */
+#if defined(CPU_R8000) || defined (CPU_R10000)
+ uint32_t hdd_pad[29]; /* Pad out to largest cache line */
+#else
uint32_t hdd_pad; /* Pad out to quadword alignment */
+#endif
};
+#define HPC1_DMA_BOUNDARY 0x1000
+#define HPC3_DMA_BOUNDARY 0x2000
+
/*
* The hdd_bufptr and hdd_ctl fields are swapped between HPC1 and
* HPC3. These fields are referenced by macro for readability.
*/
-#define hpc1_hdd_ctl hdd_bufptr
+#define hpc1_hdd_ctl hdd_bufptr
#define hpc1_hdd_bufptr hdd_ctl
#define hpc3_hdd_ctl hdd_ctl
#define hpc3_hdd_bufptr hdd_bufptr
diff --git a/sys/arch/sgi/hpc/hpcvar.h b/sys/arch/sgi/hpc/hpcvar.h
index 6c0f7a86aed..1891d63c8d1 100644
--- a/sys/arch/sgi/hpc/hpcvar.h
+++ b/sys/arch/sgi/hpc/hpcvar.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: hpcvar.h,v 1.8 2012/05/27 14:27:08 miod Exp $ */
+/* $OpenBSD: hpcvar.h,v 1.9 2015/09/05 21:13:24 miod Exp $ */
/* $NetBSD: hpcvar.h,v 1.12 2011/01/25 12:21:04 tsutsui Exp $ */
/*
@@ -116,8 +116,7 @@ void hpc_intr_enable(void *);
*/
struct hpc_dma_desc;
-struct hpc_dma_desc *hpc_read_dma_desc(struct hpc_dma_desc *src,
- struct hpc_dma_desc *store);
-void hpc_write_dma_desc(struct hpc_dma_desc *dst, struct hpc_dma_desc *src);
+void hpc_sync_dma_desc(struct hpc_dma_desc *desc);
+void hpc_update_dma_desc(struct hpc_dma_desc *desc);
extern bus_space_t hpc3bus_tag;
diff --git a/sys/arch/sgi/hpc/if_sq.c b/sys/arch/sgi/hpc/if_sq.c
index 621fe6fb237..c0eda056c5a 100644
--- a/sys/arch/sgi/hpc/if_sq.c
+++ b/sys/arch/sgi/hpc/if_sq.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: if_sq.c,v 1.14 2015/06/24 09:40:53 mpi Exp $ */
+/* $OpenBSD: if_sq.c,v 1.15 2015/09/05 21:13:24 miod Exp $ */
/* $NetBSD: if_sq.c,v 1.42 2011/07/01 18:53:47 dyoung Exp $ */
/*
@@ -223,15 +223,9 @@ sq_attach(struct device *parent, struct device *self, void *aux)
goto fail_0;
}
- /*
- * Note that we need to pass BUS_DMA_BUS1 in order to get this
- * allocation to succeed on ECC MC systems. This code is
- * uncached-write safe, as all updates of the DMA descriptors are
- * handled in RCU style with hpc_{read,write}_dma_desc().
- */
if ((rc = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg,
sizeof(struct sq_control), (caddr_t *)&sc->sc_control,
- BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_BUS1)) != 0) {
+ BUS_DMA_NOWAIT | (ip22_ecc ? 0 : BUS_DMA_COHERENT))) != 0) {
printf(": unable to map control data, error = %d\n", rc);
goto fail_1;
}
@@ -656,7 +650,7 @@ sq_start(struct ifnet *ifp)
{
struct sq_softc *sc = ifp->if_softc;
struct mbuf *m0, *m;
- struct hpc_dma_desc *txd, txd_store;
+ struct hpc_dma_desc *txd;
bus_dmamap_t dmamap;
uint32_t status;
int err, len, totlen, nexttx, firsttx, lasttx = -1, ofree, seg;
@@ -789,8 +783,8 @@ sq_start(struct ifnet *ifp)
for (nexttx = sc->sc_nexttx, seg = 0, totlen = 0;
seg < dmamap->dm_nsegs;
seg++, nexttx = SQ_NEXTTX(nexttx)) {
- txd = hpc_read_dma_desc(sc->sc_txdesc + nexttx,
- &txd_store);
+ txd = sc->sc_txdesc + nexttx;
+ hpc_sync_dma_desc(txd);
if (sc->hpc_regs->revision == 3) {
txd->hpc3_hdd_bufptr =
dmamap->dm_segs[seg].ds_addr;
@@ -801,19 +795,20 @@ sq_start(struct ifnet *ifp)
txd->hpc1_hdd_ctl = dmamap->dm_segs[seg].ds_len;
}
txd->hdd_descptr = SQ_CDTXADDR(sc, SQ_NEXTTX(nexttx));
- hpc_write_dma_desc(sc->sc_txdesc + nexttx, txd);
+ hpc_update_dma_desc(txd);
lasttx = nexttx;
totlen += dmamap->dm_segs[seg].ds_len;
}
/* Last descriptor gets end-of-packet */
KASSERT(lasttx != -1);
- /* txd = hpc_read_dma_desc(sc->sc_txdesc + lasttx, &txd_store); */
+ /* txd = sc->sc_txdesc + lasttx; */
+ /* hpc_sync_dma_desc(txd); */
if (sc->hpc_regs->revision == 3)
txd->hpc3_hdd_ctl |= HPC3_HDD_CTL_EOPACKET;
else
txd->hpc1_hdd_ctl |= HPC1_HDD_CTL_EOPACKET;
- hpc_write_dma_desc(sc->sc_txdesc + lasttx, txd);
+ hpc_update_dma_desc(txd);
SQ_DPRINTF(("%s: transmit %d-%d, len %d\n",
sc->sc_dev.dv_xname, sc->sc_nexttx, lasttx, totlen));
@@ -869,7 +864,8 @@ sq_start(struct ifnet *ifp)
* addition to HPC3_HDD_CTL_INTR to interrupt.
*/
KASSERT(lasttx != -1);
- txd = hpc_read_dma_desc(sc->sc_txdesc + lasttx, &txd_store);
+ txd = sc->sc_txdesc + lasttx;
+ hpc_sync_dma_desc(txd);
if (sc->hpc_regs->revision == 3) {
txd->hpc3_hdd_ctl |=
HPC3_HDD_CTL_INTR | HPC3_HDD_CTL_EOCHAIN;
@@ -877,7 +873,7 @@ sq_start(struct ifnet *ifp)
txd->hpc1_hdd_ctl |= HPC1_HDD_CTL_INTR;
txd->hpc1_hdd_bufptr |= HPC1_HDD_CTL_EOCHAIN;
}
- hpc_write_dma_desc(sc->sc_txdesc + lasttx, txd);
+ hpc_update_dma_desc(txd);
SQ_CDTXSYNC(sc, lasttx, 1,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
@@ -898,8 +894,8 @@ sq_start(struct ifnet *ifp)
if ((status & sc->hpc_regs->enetx_ctl_active) != 0) {
SQ_TRACE(SQ_ADD_TO_DMA, sc, firsttx, status);
- txd = hpc_read_dma_desc(sc->sc_txdesc +
- SQ_PREVTX(firsttx), &txd_store);
+ txd = sc->sc_txdesc + SQ_PREVTX(firsttx);
+ hpc_sync_dma_desc(txd);
/*
* NB: hpc3_hdd_ctl == hpc1_hdd_bufptr, and
* HPC1_HDD_CTL_EOCHAIN == HPC3_HDD_CTL_EOCHAIN
@@ -908,8 +904,7 @@ sq_start(struct ifnet *ifp)
if (sc->hpc_regs->revision != 3)
txd->hpc1_hdd_ctl &= ~HPC1_HDD_CTL_INTR;
- hpc_write_dma_desc(sc->sc_txdesc + SQ_PREVTX(firsttx),
- txd);
+ hpc_update_dma_desc(txd);
SQ_CDTXSYNC(sc, SQ_PREVTX(firsttx), 1,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
} else if (sc->hpc_regs->revision == 3) {
@@ -1109,7 +1104,7 @@ sq_rxintr(struct sq_softc *sc)
struct ifnet *ifp = &sc->sc_ac.ac_if;
struct mbuf_list ml = MBUF_LIST_INITIALIZER();
struct mbuf* m;
- struct hpc_dma_desc *rxd, rxd_store;
+ struct hpc_dma_desc *rxd;
int i, framelen;
uint8_t pktstat;
uint32_t status;
@@ -1117,18 +1112,16 @@ sq_rxintr(struct sq_softc *sc)
int new_end, orig_end;
for (i = sc->sc_nextrx; ; i = SQ_NEXTRX(i)) {
+ rxd = sc->sc_rxdesc + i;
SQ_CDRXSYNC(sc, i,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
-
/*
* If this is a CPU-owned buffer, we're at the end of the list.
*/
if (sc->hpc_regs->revision == 3)
- ctl_reg =
- sc->sc_rxdesc[i].hpc3_hdd_ctl & HPC3_HDD_CTL_OWN;
+ ctl_reg = rxd->hpc3_hdd_ctl & HPC3_HDD_CTL_OWN;
else
- ctl_reg =
- sc->sc_rxdesc[i].hpc1_hdd_ctl & HPC1_HDD_CTL_OWN;
+ ctl_reg = rxd->hpc1_hdd_ctl & HPC1_HDD_CTL_OWN;
if (ctl_reg) {
#if defined(SQ_DEBUG)
@@ -1145,10 +1138,10 @@ sq_rxintr(struct sq_softc *sc)
framelen = m->m_ext.ext_size - 3;
if (sc->hpc_regs->revision == 3)
framelen -=
- HPC3_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc3_hdd_ctl);
+ HPC3_HDD_CTL_BYTECNT(rxd->hpc3_hdd_ctl);
else
framelen -=
- HPC1_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc1_hdd_ctl);
+ HPC1_HDD_CTL_BYTECNT(rxd->hpc1_hdd_ctl);
/* Now sync the actual packet data */
bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
@@ -1204,16 +1197,14 @@ sq_rxintr(struct sq_softc *sc)
*/
new_end = SQ_PREVRX(i);
- rxd = hpc_read_dma_desc(sc->sc_rxdesc + new_end, &rxd_store);
+ rxd = sc->sc_rxdesc + new_end;
rxd->hpc3_hdd_ctl |= HPC3_HDD_CTL_EOCHAIN;
- hpc_write_dma_desc(sc->sc_rxdesc + new_end, rxd);
SQ_CDRXSYNC(sc, new_end,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
orig_end = SQ_PREVRX(sc->sc_nextrx);
- rxd = hpc_read_dma_desc(sc->sc_rxdesc + orig_end, &rxd_store);
+ rxd = sc->sc_rxdesc + orig_end;
rxd->hpc3_hdd_ctl &= ~HPC3_HDD_CTL_EOCHAIN;
- hpc_write_dma_desc(sc->sc_rxdesc + orig_end, rxd);
SQ_CDRXSYNC(sc, orig_end,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
@@ -1382,6 +1373,7 @@ sq_txring_hpc3(struct sq_softc *sc)
* descriptors are left over.
*/
struct ifnet *ifp = &sc->sc_ac.ac_if;
+ struct hpc_dma_desc *txd;
int i;
uint32_t status = 0;
@@ -1394,12 +1386,13 @@ sq_txring_hpc3(struct sq_softc *sc)
*/
status = sq_hpc_read(sc, HPC3_ENETX_CTL);
+ txd = sc->sc_txdesc + i;
SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ hpc_sync_dma_desc(txd);
/* Check for used descriptor and restart DMA chain if needed */
- if ((sc->sc_txdesc[i].hpc3_hdd_ctl &
- HPC3_HDD_CTL_XMITDONE) == 0) {
+ if ((txd->hpc3_hdd_ctl & HPC3_HDD_CTL_XMITDONE) == 0) {
if ((status & HPC3_ENETX_CTL_ACTIVE) == 0) {
SQ_TRACE(SQ_RESTART_DMA, sc, i, status);
diff --git a/sys/arch/sgi/hpc/if_sqvar.h b/sys/arch/sgi/hpc/if_sqvar.h
index 9af015b2351..1dcea3c658c 100644
--- a/sys/arch/sgi/hpc/if_sqvar.h
+++ b/sys/arch/sgi/hpc/if_sqvar.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: if_sqvar.h,v 1.4 2012/05/28 17:03:36 miod Exp $ */
+/* $OpenBSD: if_sqvar.h,v 1.5 2015/09/05 21:13:24 miod Exp $ */
/* $NetBSD: sqvar.h,v 1.12 2011/01/25 13:12:39 tsutsui Exp $ */
/*
@@ -160,10 +160,12 @@ struct sq_softc {
#define SQ_CDTXADDR(sc, x) ((sc)->sc_cddma + SQ_CDTXOFF((x)))
#define SQ_CDRXADDR(sc, x) ((sc)->sc_cddma + SQ_CDRXOFF((x)))
-#if 0 /* not necessary as this memory is mapped uncached */
static inline void
SQ_CDTXSYNC(struct sq_softc *sc, int __x, int __n, int ops)
{
+ if (!ip22_ecc)
+ return;
+
/* If it will wrap around, sync to the end of the ring. */
if ((__x + __n) > SQ_NTXDESC) {
bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cdmap,
@@ -179,20 +181,20 @@ SQ_CDTXSYNC(struct sq_softc *sc, int __x, int __n, int ops)
}
#define SQ_CDRXSYNC(sc, x, ops) \
- bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cdmap, \
- SQ_CDRXOFF((x)), sizeof(struct hpc_dma_desc), (ops))
-#else
-#define SQ_CDTXSYNC(sc, x, n, ops) do { } while (0)
-#define SQ_CDRXSYNC(sc, x, ops) do { } while (0)
-#endif
+do { \
+ if (ip22_ecc) \
+ bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cdmap, \
+ SQ_CDRXOFF((x)), sizeof(struct hpc_dma_desc), (ops)); \
+} while (0)
static inline void
SQ_INIT_RXDESC(struct sq_softc *sc, unsigned int x)
{
- struct hpc_dma_desc *__rxd, rxd_store;
+ struct hpc_dma_desc *__rxd;
struct mbuf *__m = (sc)->sc_rxmbuf[(x)];
- __rxd = hpc_read_dma_desc(&(sc)->sc_rxdesc[(x)], &rxd_store);
+ __rxd = &(sc)->sc_rxdesc[(x)];
+ hpc_sync_dma_desc(__rxd);
__m->m_data = __m->m_ext.ext_buf;
if (sc->hpc_regs->revision == 3) {
__rxd->hpc3_hdd_bufptr =
@@ -208,6 +210,6 @@ SQ_INIT_RXDESC(struct sq_softc *sc, unsigned int x)
HPC1_HDD_CTL_INTR | HPC1_HDD_CTL_EOPACKET;
}
__rxd->hdd_descptr = SQ_CDRXADDR((sc), SQ_NEXTRX((x)));
- hpc_write_dma_desc(&(sc)->sc_rxdesc[(x)], __rxd);
+ hpc_update_dma_desc(__rxd);
SQ_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
diff --git a/sys/arch/sgi/sgi/bus_dma.c b/sys/arch/sgi/sgi/bus_dma.c
index c0d1519ee35..0bd28cba10f 100644
--- a/sys/arch/sgi/sgi/bus_dma.c
+++ b/sys/arch/sgi/sgi/bus_dma.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: bus_dma.c,v 1.39 2014/11/16 12:30:58 deraadt Exp $ */
+/* $OpenBSD: bus_dma.c,v 1.40 2015/09/05 21:13:24 miod Exp $ */
/*
* Copyright (c) 2003-2004 Opsycon AB (www.opsycon.se / www.opsycon.com)
@@ -460,11 +460,9 @@ _dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size,
/*
* On ECC MC systems, which do not allow uncached writes to memory
* during regular operation, fail requests for uncached (coherent)
- * memory, unless the caller tells us it is aware of this and will
- * do the right thing, by passing BUS_DMA_BUS1 as well.
+ * memory.
*/
- if ((flags & (BUS_DMA_COHERENT | BUS_DMA_BUS1)) == BUS_DMA_COHERENT &&
- ip22_ecc)
+ if ((flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE)) && ip22_ecc)
return EINVAL;
#endif