diff options
author | Miod Vallat <miod@cvs.openbsd.org> | 2015-09-18 20:50:03 +0000 |
---|---|---|
committer | Miod Vallat <miod@cvs.openbsd.org> | 2015-09-18 20:50:03 +0000 |
commit | 6ced8ba69a01d039dc40cc7cb5019240035c39ef (patch) | |
tree | 887259eb84bda6ff87cb8ab4f0eee69c5a450c4e /sys/arch | |
parent | 85a1b1c2acc80ebdbd63c881b502c3d7b6ea8ade (diff) |
Go back to the previous approach when managing individual HPC DMA descriptors:
provide again an optional storage for a copy of the descriptor in the `sync'
(fetch) function, and use the returned address afterwards.
On IP22 systems (in the broader sense of the term, thus IP20/IP22/IP24),
descriptors will remain in uncached memory and no local copies need to be made.
On IP28 systems, descriptors will remain in cached memory (so as to avoid
switching to `slow mode'), but a local copy will be performed with the necessary
cache eviction work, so that speculative code execution on R10000 will not
touch the real descriptor.
With this in place, all the explicit descriptor cache operations in if_sq,
some of them being redundant or operating on the wrong number of
descriptors, can be removed, with the HPC DMA wrappers taking care of doing
the right thing.
Tested on IP22 and IP28. IP26 still unhappy but no worse than before.
Diffstat (limited to 'sys/arch')
-rw-r--r-- | sys/arch/sgi/hpc/hpc.c | 64 | ||||
-rw-r--r-- | sys/arch/sgi/hpc/hpcvar.h | 7 | ||||
-rw-r--r-- | sys/arch/sgi/hpc/if_sq.c | 109 | ||||
-rw-r--r-- | sys/arch/sgi/hpc/if_sqvar.h | 52 |
4 files changed, 108 insertions, 124 deletions
diff --git a/sys/arch/sgi/hpc/hpc.c b/sys/arch/sgi/hpc/hpc.c index 458ac49a601..c3a2b72ab17 100644 --- a/sys/arch/sgi/hpc/hpc.c +++ b/sys/arch/sgi/hpc/hpc.c @@ -1,4 +1,4 @@ -/* $OpenBSD: hpc.c,v 1.17 2015/09/05 21:13:24 miod Exp $ */ +/* $OpenBSD: hpc.c,v 1.18 2015/09/18 20:50:02 miod Exp $ */ /* $NetBSD: hpc.c,v 1.66 2011/07/01 18:53:46 dyoung Exp $ */ /* $NetBSD: ioc.c,v 1.9 2011/07/01 18:53:47 dyoung Exp $ */ @@ -88,6 +88,7 @@ #include <machine/autoconf.h> #include <machine/bus.h> +#include <mips64/cache.h> #include <machine/cpu.h> #include <sgi/gio/gioreg.h> @@ -363,14 +364,17 @@ void hpc_blink_ioc(void *); int hpc_read_eeprom(int, bus_space_tag_t, bus_space_handle_t, uint8_t *, size_t); -void hpc_sync_dma_desc_par(struct hpc_dma_desc *); -void hpc_sync_dma_desc_ecc(struct hpc_dma_desc *); -void hpc_update_dma_desc_par(struct hpc_dma_desc *); -void hpc_update_dma_desc_ecc(struct hpc_dma_desc *); +struct hpc_dma_desc *hpc_sync_dma_desc_par(struct hpc_dma_desc *, + struct hpc_dma_desc *); +struct hpc_dma_desc *hpc_sync_dma_desc_ecc(struct hpc_dma_desc *, + struct hpc_dma_desc *); +void hpc_update_dma_desc_par(struct hpc_dma_desc *, struct hpc_dma_desc *); +void hpc_update_dma_desc_ecc(struct hpc_dma_desc *, struct hpc_dma_desc *); /* globals since they depend upon the system type, not the hpc version */ -void (*hpc_sync_dma_desc_fn)(struct hpc_dma_desc *); -void (*hpc_update_dma_desc_fn)(struct hpc_dma_desc *); +struct hpc_dma_desc *(*hpc_sync_dma_desc_fn)(struct hpc_dma_desc *, + struct hpc_dma_desc *); +void (*hpc_update_dma_desc_fn)(struct hpc_dma_desc *, struct hpc_dma_desc *); const struct cfattach hpc_ca = { sizeof(struct hpc_softc), hpc_match, hpc_attach @@ -975,16 +979,16 @@ hpc_read_eeprom(int hpctype, bus_space_tag_t t, bus_space_handle_t h, * Routines to update HPC DMA descriptors. */ -void -hpc_sync_dma_desc(struct hpc_dma_desc *desc) +struct hpc_dma_desc * +hpc_sync_dma_desc(struct hpc_dma_desc *desc, struct hpc_dma_desc *store) { - (*hpc_sync_dma_desc_fn)(desc); + return (*hpc_sync_dma_desc_fn)(desc, store); } void -hpc_update_dma_desc(struct hpc_dma_desc *desc) +hpc_update_dma_desc(struct hpc_dma_desc *desc, struct hpc_dma_desc *store) { - (*hpc_update_dma_desc_fn)(desc); + (*hpc_update_dma_desc_fn)(desc, store); } /* @@ -992,33 +996,49 @@ hpc_update_dma_desc(struct hpc_dma_desc *desc) * accesses are allowed. No cache operation is needed. */ -void -hpc_sync_dma_desc_par(struct hpc_dma_desc *desc) +struct hpc_dma_desc * +hpc_sync_dma_desc_par(struct hpc_dma_desc *desc, struct hpc_dma_desc *store) { /* nothing to do */ + return desc; } void -hpc_update_dma_desc_par(struct hpc_dma_desc *desc) +hpc_update_dma_desc_par(struct hpc_dma_desc *desc, struct hpc_dma_desc *store) { /* nothing to do */ + KDASSERT(desc == store); } /* * ECC MC flavour: descriptor are in cacheable memory, and need to be * evicted from cache before reading, and flushed from cache after updating. + * + * In addition, on R1000 systems, an actual copy of the descriptor needs + * to be performed, to prevent speculative execution from writing to the + * cached descriptor. */ -void -hpc_sync_dma_desc_ecc(struct hpc_dma_desc *desc) +struct hpc_dma_desc * +hpc_sync_dma_desc_ecc(struct hpc_dma_desc *desc, struct hpc_dma_desc *store) { - Mips_HitInvalidateDCache(curcpu(), - (vaddr_t)desc, sizeof(struct hpc_dma_desc)); + Mips_IOSyncDCache(curcpu(), + (vaddr_t)desc, sizeof(struct hpc_dma_desc), CACHE_SYNC_R); + + store->hdd_bufptr = desc->hdd_bufptr; + store->hdd_ctl = desc->hdd_ctl; + store->hdd_descptr = desc->hdd_descptr; + + return store; } void -hpc_update_dma_desc_ecc(struct hpc_dma_desc *desc) +hpc_update_dma_desc_ecc(struct hpc_dma_desc *desc, struct hpc_dma_desc *store) { - Mips_HitSyncDCache(curcpu(), - (vaddr_t)desc, sizeof(struct hpc_dma_desc)); + desc->hdd_bufptr = store->hdd_bufptr; + desc->hdd_ctl = store->hdd_ctl; + desc->hdd_descptr = store->hdd_descptr; + + Mips_IOSyncDCache(curcpu(), + (vaddr_t)desc, sizeof(struct hpc_dma_desc), CACHE_SYNC_X); } diff --git a/sys/arch/sgi/hpc/hpcvar.h b/sys/arch/sgi/hpc/hpcvar.h index 1891d63c8d1..ce938f05dbc 100644 --- a/sys/arch/sgi/hpc/hpcvar.h +++ b/sys/arch/sgi/hpc/hpcvar.h @@ -1,4 +1,4 @@ -/* $OpenBSD: hpcvar.h,v 1.9 2015/09/05 21:13:24 miod Exp $ */ +/* $OpenBSD: hpcvar.h,v 1.10 2015/09/18 20:50:02 miod Exp $ */ /* $NetBSD: hpcvar.h,v 1.12 2011/01/25 12:21:04 tsutsui Exp $ */ /* @@ -116,7 +116,8 @@ void hpc_intr_enable(void *); */ struct hpc_dma_desc; -void hpc_sync_dma_desc(struct hpc_dma_desc *desc); -void hpc_update_dma_desc(struct hpc_dma_desc *desc); +struct hpc_dma_desc *hpc_sync_dma_desc(struct hpc_dma_desc *, + struct hpc_dma_desc *); +void hpc_update_dma_desc(struct hpc_dma_desc *, struct hpc_dma_desc *); extern bus_space_t hpc3bus_tag; diff --git a/sys/arch/sgi/hpc/if_sq.c b/sys/arch/sgi/hpc/if_sq.c index a9171499d74..26ca048e362 100644 --- a/sys/arch/sgi/hpc/if_sq.c +++ b/sys/arch/sgi/hpc/if_sq.c @@ -1,4 +1,4 @@ -/* $OpenBSD: if_sq.c,v 1.16 2015/09/14 11:18:49 stsp Exp $ */ +/* $OpenBSD: if_sq.c,v 1.17 2015/09/18 20:50:02 miod Exp $ */ /* $NetBSD: if_sq.c,v 1.42 2011/07/01 18:53:47 dyoung Exp $ */ /* @@ -650,7 +650,7 @@ sq_start(struct ifnet *ifp) { struct sq_softc *sc = ifp->if_softc; struct mbuf *m0, *m; - struct hpc_dma_desc *txd; + struct hpc_dma_desc *txd, *active, store; bus_dmamap_t dmamap; uint32_t status; int err, len, totlen, nexttx, firsttx, lasttx = -1, ofree, seg; @@ -784,31 +784,33 @@ sq_start(struct ifnet *ifp) seg < dmamap->dm_nsegs; seg++, nexttx = SQ_NEXTTX(nexttx)) { txd = sc->sc_txdesc + nexttx; - hpc_sync_dma_desc(txd); + active = hpc_sync_dma_desc(txd, &store); if (sc->hpc_regs->revision == 3) { - txd->hpc3_hdd_bufptr = + active->hpc3_hdd_bufptr = dmamap->dm_segs[seg].ds_addr; - txd->hpc3_hdd_ctl = dmamap->dm_segs[seg].ds_len; + active->hpc3_hdd_ctl = + dmamap->dm_segs[seg].ds_len; } else { - txd->hpc1_hdd_bufptr = + active->hpc1_hdd_bufptr = dmamap->dm_segs[seg].ds_addr; - txd->hpc1_hdd_ctl = dmamap->dm_segs[seg].ds_len; + active->hpc1_hdd_ctl = + dmamap->dm_segs[seg].ds_len; } - txd->hdd_descptr = SQ_CDTXADDR(sc, SQ_NEXTTX(nexttx)); - hpc_update_dma_desc(txd); + active->hdd_descptr = SQ_CDTXADDR(sc, SQ_NEXTTX(nexttx)); + hpc_update_dma_desc(txd, active); lasttx = nexttx; totlen += dmamap->dm_segs[seg].ds_len; } /* Last descriptor gets end-of-packet */ KASSERT(lasttx != -1); - /* txd = sc->sc_txdesc + lasttx; */ - /* hpc_sync_dma_desc(txd); */ + txd = sc->sc_txdesc + lasttx; + active = hpc_sync_dma_desc(txd, &store); if (sc->hpc_regs->revision == 3) - txd->hpc3_hdd_ctl |= HPC3_HDD_CTL_EOPACKET; + active->hpc3_hdd_ctl |= HPC3_HDD_CTL_EOPACKET; else - txd->hpc1_hdd_ctl |= HPC1_HDD_CTL_EOPACKET; - hpc_update_dma_desc(txd); + active->hpc1_hdd_ctl |= HPC1_HDD_CTL_EOPACKET; + hpc_update_dma_desc(txd, active); SQ_DPRINTF(("%s: transmit %d-%d, len %d\n", sc->sc_dev.dv_xname, sc->sc_nexttx, lasttx, totlen)); @@ -816,27 +818,25 @@ sq_start(struct ifnet *ifp) if (ifp->if_flags & IFF_DEBUG) { printf(" transmit chain:\n"); for (seg = sc->sc_nexttx;; seg = SQ_NEXTTX(seg)) { + active = hpc_sync_dma_desc(&sc->sc_txdesc[seg], + &store); printf(" descriptor %d:\n", seg); printf(" hdd_bufptr: 0x%08x\n", (sc->hpc_regs->revision == 3) ? - sc->sc_txdesc[seg].hpc3_hdd_bufptr : - sc->sc_txdesc[seg].hpc1_hdd_bufptr); + active->hpc3_hdd_bufptr : + active->hpc1_hdd_bufptr); printf(" hdd_ctl: 0x%08x\n", (sc->hpc_regs->revision == 3) ? - sc->sc_txdesc[seg].hpc3_hdd_ctl: - sc->sc_txdesc[seg].hpc1_hdd_ctl); + active->hpc3_hdd_ctl: + active->hpc1_hdd_ctl); printf(" hdd_descptr: 0x%08x\n", - sc->sc_txdesc[seg].hdd_descptr); + active->hdd_descptr); if (seg == lasttx) break; } } - /* Sync the descriptors we're using. */ - SQ_CDTXSYNC(sc, sc->sc_nexttx, dmamap->dm_nsegs, - BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); - /* Store a pointer to the packet so we can free it later */ sc->sc_txmbuf[sc->sc_nexttx] = m0; @@ -865,17 +865,15 @@ sq_start(struct ifnet *ifp) */ KASSERT(lasttx != -1); txd = sc->sc_txdesc + lasttx; - hpc_sync_dma_desc(txd); + active = hpc_sync_dma_desc(txd, &store); if (sc->hpc_regs->revision == 3) { - txd->hpc3_hdd_ctl |= + active->hpc3_hdd_ctl |= HPC3_HDD_CTL_INTR | HPC3_HDD_CTL_EOCHAIN; } else { - txd->hpc1_hdd_ctl |= HPC1_HDD_CTL_INTR; - txd->hpc1_hdd_bufptr |= HPC1_HDD_CTL_EOCHAIN; + active->hpc1_hdd_ctl |= HPC1_HDD_CTL_INTR; + active->hpc1_hdd_bufptr |= HPC1_HDD_CTL_EOCHAIN; } - hpc_update_dma_desc(txd); - SQ_CDTXSYNC(sc, lasttx, 1, - BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + hpc_update_dma_desc(txd, active); /* * There is a potential race condition here if the HPC @@ -895,18 +893,16 @@ sq_start(struct ifnet *ifp) SQ_TRACE(SQ_ADD_TO_DMA, sc, firsttx, status); txd = sc->sc_txdesc + SQ_PREVTX(firsttx); - hpc_sync_dma_desc(txd); + active = hpc_sync_dma_desc(txd, &store); /* * NB: hpc3_hdd_ctl == hpc1_hdd_bufptr, and * HPC1_HDD_CTL_EOCHAIN == HPC3_HDD_CTL_EOCHAIN */ - txd->hpc3_hdd_ctl &= ~HPC3_HDD_CTL_EOCHAIN; + active->hpc3_hdd_ctl &= ~HPC3_HDD_CTL_EOCHAIN; if (sc->hpc_regs->revision != 3) - txd->hpc1_hdd_ctl &= ~HPC1_HDD_CTL_INTR; + active->hpc1_hdd_ctl &= ~HPC1_HDD_CTL_INTR; - hpc_update_dma_desc(txd); - SQ_CDTXSYNC(sc, SQ_PREVTX(firsttx), 1, - BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + hpc_update_dma_desc(txd, active); } else if (sc->hpc_regs->revision == 3) { SQ_TRACE(SQ_START_DMA, sc, firsttx, status); @@ -1104,7 +1100,7 @@ sq_rxintr(struct sq_softc *sc) struct ifnet *ifp = &sc->sc_ac.ac_if; struct mbuf_list ml = MBUF_LIST_INITIALIZER(); struct mbuf* m; - struct hpc_dma_desc *rxd; + struct hpc_dma_desc *rxd, *active, store; int i, framelen; uint8_t pktstat; uint32_t status; @@ -1113,15 +1109,14 @@ sq_rxintr(struct sq_softc *sc) for (i = sc->sc_nextrx; ; i = SQ_NEXTRX(i)) { rxd = sc->sc_rxdesc + i; - SQ_CDRXSYNC(sc, i, - BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); + active = hpc_sync_dma_desc(rxd, &store); /* * If this is a CPU-owned buffer, we're at the end of the list. */ if (sc->hpc_regs->revision == 3) - ctl_reg = rxd->hpc3_hdd_ctl & HPC3_HDD_CTL_OWN; + ctl_reg = active->hpc3_hdd_ctl & HPC3_HDD_CTL_OWN; else - ctl_reg = rxd->hpc1_hdd_ctl & HPC1_HDD_CTL_OWN; + ctl_reg = active->hpc1_hdd_ctl & HPC1_HDD_CTL_OWN; if (ctl_reg) { #if defined(SQ_DEBUG) @@ -1138,10 +1133,10 @@ sq_rxintr(struct sq_softc *sc) framelen = m->m_ext.ext_size - 3; if (sc->hpc_regs->revision == 3) framelen -= - HPC3_HDD_CTL_BYTECNT(rxd->hpc3_hdd_ctl); + HPC3_HDD_CTL_BYTECNT(active->hpc3_hdd_ctl); else framelen -= - HPC1_HDD_CTL_BYTECNT(rxd->hpc1_hdd_ctl); + HPC1_HDD_CTL_BYTECNT(active->hpc1_hdd_ctl); /* Now sync the actual packet data */ bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0, @@ -1191,22 +1186,21 @@ sq_rxintr(struct sq_softc *sc) /* If anything happened, move ring start/end pointers to new spot */ if (i != sc->sc_nextrx) { + new_end = SQ_PREVRX(i); + rxd = sc->sc_rxdesc + new_end; + active = hpc_sync_dma_desc(rxd, &store); /* * NB: hpc3_hdd_ctl == hpc1_hdd_bufptr, and * HPC1_HDD_CTL_EOCHAIN == HPC3_HDD_CTL_EOCHAIN */ - - new_end = SQ_PREVRX(i); - rxd = sc->sc_rxdesc + new_end; - rxd->hpc3_hdd_ctl |= HPC3_HDD_CTL_EOCHAIN; - SQ_CDRXSYNC(sc, new_end, - BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + active->hpc3_hdd_ctl |= HPC3_HDD_CTL_EOCHAIN; + hpc_update_dma_desc(rxd, active); orig_end = SQ_PREVRX(sc->sc_nextrx); rxd = sc->sc_rxdesc + orig_end; - rxd->hpc3_hdd_ctl &= ~HPC3_HDD_CTL_EOCHAIN; - SQ_CDRXSYNC(sc, orig_end, - BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + active = hpc_sync_dma_desc(rxd, &store); + active->hpc3_hdd_ctl &= ~HPC3_HDD_CTL_EOCHAIN; + hpc_update_dma_desc(rxd, active); sc->sc_nextrx = i; } @@ -1320,9 +1314,6 @@ sq_txring_hpc1(struct sq_softc *sc) if (SQ_CDTXADDR(sc, i) == reclaimto && !reclaimall) break; - SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs, - BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); - /* Sync the packet data, unload DMA map, free mbuf */ bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0, sc->sc_txmap[i]->dm_mapsize, BUS_DMASYNC_POSTWRITE); @@ -1373,7 +1364,7 @@ sq_txring_hpc3(struct sq_softc *sc) * descriptors are left over. */ struct ifnet *ifp = &sc->sc_ac.ac_if; - struct hpc_dma_desc *txd; + struct hpc_dma_desc *txd, *active, store; int i; uint32_t status = 0; @@ -1387,12 +1378,10 @@ sq_txring_hpc3(struct sq_softc *sc) status = sq_hpc_read(sc, HPC3_ENETX_CTL); txd = sc->sc_txdesc + i; - SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs, - BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); - hpc_sync_dma_desc(txd); + active = hpc_sync_dma_desc(txd, &store); /* Check for used descriptor and restart DMA chain if needed */ - if ((txd->hpc3_hdd_ctl & HPC3_HDD_CTL_XMITDONE) == 0) { + if ((active->hpc3_hdd_ctl & HPC3_HDD_CTL_XMITDONE) == 0) { if ((status & HPC3_ENETX_CTL_ACTIVE) == 0) { SQ_TRACE(SQ_RESTART_DMA, sc, i, status); diff --git a/sys/arch/sgi/hpc/if_sqvar.h b/sys/arch/sgi/hpc/if_sqvar.h index 1dcea3c658c..ab83035e577 100644 --- a/sys/arch/sgi/hpc/if_sqvar.h +++ b/sys/arch/sgi/hpc/if_sqvar.h @@ -1,4 +1,4 @@ -/* $OpenBSD: if_sqvar.h,v 1.5 2015/09/05 21:13:24 miod Exp $ */ +/* $OpenBSD: if_sqvar.h,v 1.6 2015/09/18 20:50:02 miod Exp $ */ /* $NetBSD: sqvar.h,v 1.12 2011/01/25 13:12:39 tsutsui Exp $ */ /* @@ -161,55 +161,29 @@ struct sq_softc { #define SQ_CDRXADDR(sc, x) ((sc)->sc_cddma + SQ_CDRXOFF((x))) static inline void -SQ_CDTXSYNC(struct sq_softc *sc, int __x, int __n, int ops) -{ - if (!ip22_ecc) - return; - - /* If it will wrap around, sync to the end of the ring. */ - if ((__x + __n) > SQ_NTXDESC) { - bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cdmap, - SQ_CDTXOFF(__x), sizeof(struct hpc_dma_desc) * - (SQ_NTXDESC - __x), (ops)); - __n -= (SQ_NTXDESC - __x); - __x = 0; - } - - /* Now sync whatever is left. */ - bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cdmap, - SQ_CDTXOFF(__x), sizeof(struct hpc_dma_desc) * __n, (ops)); -} - -#define SQ_CDRXSYNC(sc, x, ops) \ -do { \ - if (ip22_ecc) \ - bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cdmap, \ - SQ_CDRXOFF((x)), sizeof(struct hpc_dma_desc), (ops)); \ -} while (0) - -static inline void SQ_INIT_RXDESC(struct sq_softc *sc, unsigned int x) { - struct hpc_dma_desc *__rxd; + struct hpc_dma_desc *__rxd, *__active, __store; struct mbuf *__m = (sc)->sc_rxmbuf[(x)]; __rxd = &(sc)->sc_rxdesc[(x)]; - hpc_sync_dma_desc(__rxd); + __active = hpc_sync_dma_desc(__rxd, &__store); __m->m_data = __m->m_ext.ext_buf; if (sc->hpc_regs->revision == 3) { - __rxd->hpc3_hdd_bufptr = + __active->hpc3_hdd_bufptr = (sc)->sc_rxmap[(x)]->dm_segs[0].ds_addr; - __rxd->hpc3_hdd_ctl = __m->m_ext.ext_size | HPC3_HDD_CTL_OWN | - HPC3_HDD_CTL_INTR | HPC3_HDD_CTL_EOPACKET | + __active->hpc3_hdd_ctl = __m->m_ext.ext_size | + HPC3_HDD_CTL_OWN | HPC3_HDD_CTL_INTR | + HPC3_HDD_CTL_EOPACKET | ((x) == (SQ_NRXDESC - 1) ? HPC3_HDD_CTL_EOCHAIN : 0); } else { - __rxd->hpc1_hdd_bufptr = + __active->hpc1_hdd_bufptr = (sc)->sc_rxmap[(x)]->dm_segs[0].ds_addr | ((x) == (SQ_NRXDESC - 1) ? HPC1_HDD_CTL_EOCHAIN : 0); - __rxd->hpc1_hdd_ctl = __m->m_ext.ext_size | HPC1_HDD_CTL_OWN | - HPC1_HDD_CTL_INTR | HPC1_HDD_CTL_EOPACKET; + __active->hpc1_hdd_ctl = __m->m_ext.ext_size | + HPC1_HDD_CTL_OWN | HPC1_HDD_CTL_INTR | + HPC1_HDD_CTL_EOPACKET; } - __rxd->hdd_descptr = SQ_CDRXADDR((sc), SQ_NEXTRX((x))); - hpc_update_dma_desc(__rxd); - SQ_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + __active->hdd_descptr = SQ_CDRXADDR((sc), SQ_NEXTRX((x))); + hpc_update_dma_desc(__rxd, __active); } |