summaryrefslogtreecommitdiff
path: root/sys/dev/pci/ubsec.c
diff options
context:
space:
mode:
authorJason Wright <jason@cvs.openbsd.org>2001-05-30 02:26:15 +0000
committerJason Wright <jason@cvs.openbsd.org>2001-05-30 02:26:15 +0000
commit9cc78d5f10ed758e335a1785a4aed68f5c604470 (patch)
tree409347132518fce33d3f96bd64ae9e622c6ba947 /sys/dev/pci/ubsec.c
parent7834b023b6facb7df667f01d4e3d1580eb557524 (diff)
Un-bus_dma(9) mcr1 context and mcr operations (causes problems under load).
Reimplement mcr2 bus_dma handling.
Diffstat (limited to 'sys/dev/pci/ubsec.c')
-rw-r--r--sys/dev/pci/ubsec.c237
1 files changed, 98 insertions, 139 deletions
diff --git a/sys/dev/pci/ubsec.c b/sys/dev/pci/ubsec.c
index 238329abbed..f951f12c92e 100644
--- a/sys/dev/pci/ubsec.c
+++ b/sys/dev/pci/ubsec.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ubsec.c,v 1.53 2001/05/23 14:42:52 jason Exp $ */
+/* $OpenBSD: ubsec.c,v 1.54 2001/05/30 02:26:14 jason Exp $ */
/*
* Copyright (c) 2000 Jason L. Wright (jason@thought.net)
@@ -84,15 +84,15 @@ int ubsec_intr __P((void *));
int ubsec_newsession __P((u_int32_t *, struct cryptoini *));
int ubsec_freesession __P((u_int64_t));
int ubsec_process __P((struct cryptop *));
-void ubsec_callback __P((struct ubsec_q *));
+void ubsec_callback __P((struct ubsec_softc *, struct ubsec_q *));
int ubsec_feed __P((struct ubsec_softc *));
void ubsec_mcopy __P((struct mbuf *, struct mbuf *, int, int));
void ubsec_callback2 __P((struct ubsec_softc *, struct ubsec_q2 *));
int ubsec_feed2 __P((struct ubsec_softc *));
void ubsec_rng __P((void *));
-void ubsec_dma_free __P((struct ubsec_softc *, struct ubsec_dma_alloc *));
int ubsec_dma_malloc __P((struct ubsec_softc *, bus_size_t,
struct ubsec_dma_alloc *, int));
+void ubsec_dma_free __P((struct ubsec_softc *, struct ubsec_dma_alloc *));
#define READ_REG(sc,r) \
bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r))
@@ -102,10 +102,6 @@ int ubsec_dma_malloc __P((struct ubsec_softc *, bus_size_t,
#define SWAP32(x) (x) = swap32((x))
-#ifdef __alpha__
-#define vtophys(va) alpha_XXX_dmamap((vm_offset_t)(va))
-#endif
-
int
ubsec_probe(parent, match, aux)
struct device *parent;
@@ -204,12 +200,11 @@ ubsec_attach(parent, self, aux)
if (sc->sc_flags & UBS_FLAGS_KEY) {
sc->sc_statmask |= BS_STAT_MCR2_DONE;
- if (hz > 100)
+ timeout_set(&sc->sc_rngto, ubsec_rng, sc);
+ if (hz >= 100)
sc->sc_rnghz = hz / 100;
else
sc->sc_rnghz = 1;
-
- timeout_set(&sc->sc_rngto, ubsec_rng, sc);
timeout_add(&sc->sc_rngto, sc->sc_rnghz);
printf(", rng");
}
@@ -230,7 +225,6 @@ ubsec_intr(arg)
struct ubsec_q *q;
struct ubsec_q2 *q2;
struct ubsec_mcr *mcr;
- struct ubsec_dma_alloc mcr_dma;
int npkts = 0, i;
stat = READ_REG(sc, BS_STAT);
@@ -247,18 +241,12 @@ ubsec_intr(arg)
if ((stat & BS_STAT_MCR1_DONE)) {
while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) {
q = SIMPLEQ_FIRST(&sc->sc_qchip);
-
- bus_dmamap_sync(sc->sc_dmat, q->q_mcr_dma.dma_map,
- BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
- if ((q->q_mcr->mcr_flags & UBS_MCR_DONE) == 0) {
- bus_dmamap_sync(sc->sc_dmat, q->q_mcr_dma.dma_map,
- BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
- break;
- }
#ifdef UBSEC_DEBUG
printf("mcr_flags %x %x %x\n", q->q_mcr,
q->q_mcr->mcr_flags, READ_REG(sc, BS_ERR));
#endif
+ if ((q->q_mcr->mcr_flags & UBS_MCR_DONE) == 0)
+ break;
npkts++;
SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, q, q_next);
#ifdef UBSEC_DEBUG
@@ -266,8 +254,7 @@ ubsec_intr(arg)
q->q_mcr->mcr_flags);
#endif
mcr = q->q_mcr;
- mcr_dma = q->q_mcr_dma;
- ubsec_callback(q);
+ ubsec_callback(sc, q);
/*
* search for further sc_qchip ubsec_q's that share
@@ -283,14 +270,14 @@ ubsec_intr(arg)
#endif
SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip,
q, q_next);
- ubsec_callback(q);
+ ubsec_callback(sc, q);
} else {
printf("HUH!\n");
break;
}
}
- ubsec_dma_free(sc, &mcr_dma);
+ free(mcr, M_DEVBUF);
}
#ifdef UBSEC_DEBUG
if (npkts > 1)
@@ -337,23 +324,23 @@ ubsec_feed(sc)
#endif
struct ubsec_q *q;
struct ubsec_mcr *mcr;
- struct ubsec_dma_alloc mcr_dma;
int npkts, i, l;
- u_int8_t *v, *mcr2;
+ void *v, *mcr2;
npkts = sc->sc_nqueue;
if (npkts > 5)
npkts = 5;
if (npkts < 2)
goto feed1;
+ goto feed1;
if (READ_REG(sc, BS_STAT) & BS_STAT_MCR1_FULL)
return (0);
- if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr) +
- ((npkts-1) * sizeof(struct ubsec_mcr_add)), &mcr_dma, 0))
+ mcr = (struct ubsec_mcr *)malloc(sizeof(struct ubsec_mcr) +
+ (npkts-1) * sizeof(struct ubsec_mcr_add), M_DEVBUF, M_NOWAIT);
+ if (mcr == NULL)
goto feed1;
- mcr = (struct ubsec_mcr *)mcr_dma.dma_vaddr;
#ifdef UBSEC_DEBUG
printf("merging %d records\n", npkts);
@@ -365,7 +352,7 @@ ubsec_feed(sc)
}
#endif
- for (mcr2 = (u_int8_t *)mcr, i = 0; i < npkts; i++) {
+ for (mcr2 = mcr, i = 0; i < npkts; i++) {
q = SIMPLEQ_FIRST(&sc->sc_queue);
SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q, q_next);
--sc->sc_nqueue;
@@ -376,10 +363,10 @@ ubsec_feed(sc)
* a shortened one
*/
if (i == 0) {
- v = (u_int8_t *)q->q_mcr;
+ v = q->q_mcr;
l = sizeof(struct ubsec_mcr);
} else {
- v = ((u_int8_t *)q->q_mcr) + sizeof(struct ubsec_mcr) -
+ v = ((void *)q->q_mcr) + sizeof(struct ubsec_mcr) -
sizeof(struct ubsec_mcr_add);
l = sizeof(struct ubsec_mcr_add);
}
@@ -389,14 +376,11 @@ ubsec_feed(sc)
bcopy(v, mcr2, l);
mcr2 += l;
- ubsec_dma_free(sc, &q->q_mcr_dma);
- bcopy(&mcr_dma, &q->q_mcr_dma, sizeof(mcr_dma));
+ free(q->q_mcr, M_DEVBUF);
q->q_mcr = mcr;
}
mcr->mcr_pkts = npkts;
- bus_dmamap_sync(sc->sc_dmat, mcr_dma.dma_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- WRITE_REG(sc, BS_MCR1, mcr_dma.dma_map->dm_segs[0].ds_addr);
+ WRITE_REG(sc, BS_MCR1, (u_int32_t)vtophys(mcr));
return (0);
feed1:
@@ -404,10 +388,9 @@ feed1:
if (READ_REG(sc, BS_STAT) & BS_STAT_MCR1_FULL)
break;
q = SIMPLEQ_FIRST(&sc->sc_queue);
- WRITE_REG(sc, BS_MCR1, q->q_mcr_dma.dma_map->dm_segs[0].ds_addr);
+ WRITE_REG(sc, BS_MCR1, (u_int32_t)vtophys(q->q_mcr));
#ifdef UBSEC_DEBUG
- printf("feed: q->chip %08x %08x\n", q,
- q->q_mcr_dma.dma_map->dm_segs[0].ds_addr);
+ printf("feed: q->chip %08x %08x\n", q, (u_int32_t)vtophys(q->q_mcr));
#endif
SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q, q_next);
--sc->sc_nqueue;
@@ -598,16 +581,13 @@ ubsec_process(crp)
int sskip, dskip, stheend, dtheend;
int16_t coffset;
struct ubsec_session *ses;
- struct ubsec_pktctx ctx;
if (crp == NULL || crp->crp_callback == NULL)
return (EINVAL);
card = UBSEC_CARD(crp->crp_sid);
- if (card >= ubsec_cd.cd_ndevs || ubsec_cd.cd_devs[card] == NULL) {
- err = EINVAL;
- goto errout;
- }
+ if (card >= ubsec_cd.cd_ndevs || ubsec_cd.cd_devs[card] == NULL)
+ return (EINVAL);
sc = ubsec_cd.cd_devs[card];
@@ -641,17 +621,16 @@ ubsec_process(crp)
goto errout; /* XXX only handle mbufs right now */
}
- if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), &q->q_mcr_dma, 0)) {
+ q->q_mcr = (struct ubsec_mcr *)malloc(sizeof(struct ubsec_mcr),
+ M_DEVBUF, M_NOWAIT);
+ if (q->q_mcr == NULL) {
err = ENOMEM;
goto errout;
}
- q->q_mcr = (struct ubsec_mcr *)q->q_mcr_dma.dma_vaddr;
bzero(q->q_mcr, sizeof(struct ubsec_mcr));
- bzero(&ctx, sizeof(ctx));
q->q_mcr->mcr_pkts = 1;
q->q_mcr->mcr_flags = 0;
- q->q_sc = sc;
q->q_crp = crp;
crd1 = crp->crp_desc;
@@ -700,59 +679,70 @@ ubsec_process(crp)
if (enccrd) {
encoffset = enccrd->crd_skip;
- ctx.pc_flags |= UBS_PKTCTX_ENC_3DES;
+ q->q_ctx.pc_flags |= UBS_PKTCTX_ENC_3DES;
if (enccrd->crd_flags & CRD_F_ENCRYPT) {
q->q_flags |= UBSEC_QFLAGS_COPYOUTIV;
if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
- bcopy(enccrd->crd_iv, ctx.pc_iv, 8);
+ bcopy(enccrd->crd_iv, q->q_ctx.pc_iv, 8);
else {
- ctx.pc_iv[0] = ses->ses_iv[0];
- ctx.pc_iv[1] = ses->ses_iv[1];
+ q->q_ctx.pc_iv[0] = ses->ses_iv[0];
+ q->q_ctx.pc_iv[1] = ses->ses_iv[1];
}
if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
if (crp->crp_flags & CRYPTO_F_IMBUF)
m_copyback(q->q_src_m, enccrd->crd_inject,
- 8, (caddr_t)ctx.pc_iv);
+ 8, (caddr_t)q->q_ctx.pc_iv);
else if (crp->crp_flags & CRYPTO_F_IOV) {
if (crp->crp_iv == NULL) {
err = EINVAL;
goto errout;
}
bcopy(crp->crp_iv,
- (caddr_t)ctx.pc_iv, 8);
+ (caddr_t)q->q_ctx.pc_iv, 8);
}
}
} else {
- ctx.pc_flags |= UBS_PKTCTX_INBOUND;
+ q->q_ctx.pc_flags |= UBS_PKTCTX_INBOUND;
if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
- bcopy(enccrd->crd_iv, ctx.pc_iv, 8);
+ bcopy(enccrd->crd_iv, q->q_ctx.pc_iv, 8);
else if (crp->crp_flags & CRYPTO_F_IMBUF)
m_copydata(q->q_src_m, enccrd->crd_inject,
- 8, (caddr_t)ctx.pc_iv);
+ 8, (caddr_t)q->q_ctx.pc_iv);
else if (crp->crp_flags & CRYPTO_F_IOV) {
if (crp->crp_iv == NULL) {
err = EINVAL;
goto errout;
}
- bcopy(crp->crp_iv, (caddr_t)ctx.pc_iv, 8);
+ bcopy(crp->crp_iv, (caddr_t)q->q_ctx.pc_iv, 8);
}
}
- SWAP32(ctx.pc_iv[0]);
- SWAP32(ctx.pc_iv[1]);
+ q->q_ctx.pc_deskey[0] = ses->ses_deskey[0];
+ q->q_ctx.pc_deskey[1] = ses->ses_deskey[1];
+ q->q_ctx.pc_deskey[2] = ses->ses_deskey[2];
+ q->q_ctx.pc_deskey[3] = ses->ses_deskey[3];
+ q->q_ctx.pc_deskey[4] = ses->ses_deskey[4];
+ q->q_ctx.pc_deskey[5] = ses->ses_deskey[5];
+ SWAP32(q->q_ctx.pc_iv[0]);
+ SWAP32(q->q_ctx.pc_iv[1]);
}
if (maccrd) {
macoffset = maccrd->crd_skip;
if (maccrd->crd_alg == CRYPTO_MD5_HMAC)
- ctx.pc_flags |= UBS_PKTCTX_AUTH_MD5;
+ q->q_ctx.pc_flags |= UBS_PKTCTX_AUTH_MD5;
else
- ctx.pc_flags |= UBS_PKTCTX_AUTH_SHA1;
+ q->q_ctx.pc_flags |= UBS_PKTCTX_AUTH_SHA1;
+
+ for (i = 0; i < 5; i++) {
+ q->q_ctx.pc_hminner[i] = ses->ses_hminner[i];
+ q->q_ctx.pc_hmouter[i] = ses->ses_hmouter[i];
+ }
}
if (enccrd && maccrd) {
@@ -789,14 +779,14 @@ ubsec_process(crp)
cpoffset = cpskip + dtheend;
coffset = 0;
}
- ctx.pc_offset = coffset >> 2;
+ q->q_ctx.pc_offset = coffset >> 2;
if (crp->crp_flags & CRYPTO_F_IMBUF)
q->q_src_l = mbuf2pages(q->q_src_m, &q->q_src_npa, q->q_src_packp,
q->q_src_packl, MAX_SCATTER, &nicealign);
else if (crp->crp_flags & CRYPTO_F_IOV)
- q->q_src_l = iov2pages(q->q_src_io, &q->q_src_npa, q->q_src_packp,
- q->q_src_packl, MAX_SCATTER, &nicealign);
+ q->q_src_l = iov2pages(q->q_src_io, &q->q_src_npa,
+ q->q_src_packp, q->q_src_packl, MAX_SCATTER, &nicealign);
if (q->q_src_l == 0) {
err = ENOMEM;
goto errout;
@@ -850,8 +840,7 @@ ubsec_process(crp)
j++;
}
#ifdef UBSEC_DEBUG
- printf(" buf[%x]: %d@%x -> %x\n",
- q->q_mcr_dma.dma_map->dm_segs[0].ds_addr,
+ printf(" buf[%x]: %d@%x -> %x\n", vtophys(q->q_mcr),
q->q_mcr->mcr_ipktbuf.pb_len,
q->q_mcr->mcr_ipktbuf.pb_addr,
q->q_mcr->mcr_ipktbuf.pb_next);
@@ -989,7 +978,7 @@ ubsec_process(crp)
}
#ifdef UBSEC_DEBUG
printf(" buf[%d, %x]: %d@%x -> %x\n", 0,
- q->q_mcr_dma.dma_map->dm_segs[0].ds_addr,
+ vtophys(q->q_mcr),
q->q_mcr->mcr_opktbuf.pb_len,
q->q_mcr->mcr_opktbuf.pb_addr,
q->q_mcr->mcr_opktbuf.pb_next);
@@ -1003,51 +992,24 @@ ubsec_process(crp)
#endif
}
- /*
- * Put computed context into dma safe memory
- */
- if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktctx_long),
- &q->q_ctx_dma, 0)) {
- err = ENOMEM;
- goto errout;
- }
- q->q_mcr->mcr_cmdctxp = q->q_ctx_dma.dma_map->dm_segs[0].ds_addr;
if (sc->sc_flags & UBS_FLAGS_LONGCTX) {
- struct ubsec_pktctx_long *cx;
-
- cx = (struct ubsec_pktctx_long *)q->q_ctx_dma.dma_vaddr;
- cx->pc_len = sizeof(struct ubsec_pktctx_long);
- cx->pc_type = UBS_PKTCTX_TYPE_IPSEC;
- cx->pc_flags = ctx.pc_flags;
- cx->pc_offset = ctx.pc_offset;
-
- if (enccrd)
- for (i = 0; i < 6; i++)
- cx->pc_deskey[i] = ses->ses_deskey[i];
- if (maccrd) {
- for (i = 0; i < 5; i++) {
- cx->pc_hminner[i] = ses->ses_hminner[i];
- cx->pc_hmouter[i] = ses->ses_hmouter[i];
- }
- }
- cx->pc_iv[0] = ctx.pc_iv[0];
- cx->pc_iv[1] = ctx.pc_iv[1];
- } else {
- struct ubsec_pktctx *cx;
-
- cx = (struct ubsec_pktctx *)q->q_ctx_dma.dma_vaddr;
- bcopy(&ctx, q->q_ctx_dma.dma_vaddr, sizeof(ctx));
- if (enccrd)
- for (i = 0; i < 6; i++)
- cx->pc_deskey[i] = ses->ses_deskey[i];
- if (maccrd) {
- for (i = 0; i < 5; i++) {
- ctx.pc_hminner[i] = ses->ses_hminner[i];
- ctx.pc_hmouter[i] = ses->ses_hmouter[i];
- }
- }
- }
- bus_dmamap_sync(sc->sc_dmat, q->q_ctx_dma.dma_map, BUS_DMASYNC_PREREAD);
+ q->q_mcr->mcr_cmdctxp = vtophys(&q->q_ctxl);
+
+ /* transform small context into long context */
+ q->q_ctxl.pc_len = sizeof(struct ubsec_pktctx_long);
+ q->q_ctxl.pc_type = UBS_PKTCTX_TYPE_IPSEC;
+ q->q_ctxl.pc_flags = q->q_ctx.pc_flags;
+ q->q_ctxl.pc_offset = q->q_ctx.pc_offset;
+ for (i = 0; i < 6; i++)
+ q->q_ctxl.pc_deskey[i] = q->q_ctx.pc_deskey[i];
+ for (i = 0; i < 5; i++)
+ q->q_ctxl.pc_hminner[i] = q->q_ctx.pc_hminner[i];
+ for (i = 0; i < 5; i++)
+ q->q_ctxl.pc_hmouter[i] = q->q_ctx.pc_hmouter[i];
+ q->q_ctxl.pc_iv[0] = q->q_ctx.pc_iv[0];
+ q->q_ctxl.pc_iv[1] = q->q_ctx.pc_iv[1];
+ } else
+ q->q_mcr->mcr_cmdctxp = vtophys(&q->q_ctx);
s = splnet();
SIMPLEQ_INSERT_TAIL(&sc->sc_queue, q, q_next);
@@ -1058,11 +1020,9 @@ ubsec_process(crp)
errout:
if (q != NULL) {
- if (q->q_ctx_dma.dma_map != NULL)
- ubsec_dma_free(sc, &q->q_ctx_dma);
- if (q->q_mcr_dma.dma_map != NULL)
- ubsec_dma_free(sc, &q->q_mcr_dma);
- if (q->q_dst_m && q->q_src_m != q->q_dst_m)
+ if (q->q_mcr != NULL)
+ free(q->q_mcr, M_DEVBUF);
+ if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m))
m_freem(q->q_dst_m);
free(q, M_DEVBUF);
}
@@ -1072,16 +1032,13 @@ errout:
}
void
-ubsec_callback(q)
+ubsec_callback(sc, q)
+ struct ubsec_softc *sc;
struct ubsec_q *q;
{
struct cryptop *crp = (struct cryptop *)q->q_crp;
struct cryptodesc *crd;
- bus_dmamap_sync(q->q_sc->sc_dmat, q->q_ctx_dma.dma_map,
- BUS_DMASYNC_POSTREAD);
- ubsec_dma_free(q->q_sc, &q->q_ctx_dma);
-
if ((crp->crp_flags & CRYPTO_F_IMBUF) && (q->q_src_m != q->q_dst_m)) {
m_freem(q->q_src_m);
crp->crp_buf = (caddr_t)q->q_dst_m;
@@ -1096,11 +1053,12 @@ ubsec_callback(q)
if (crp->crp_flags & CRYPTO_F_IMBUF)
m_copydata((struct mbuf *)crp->crp_buf,
crd->crd_skip + crd->crd_len - 8, 8,
- (caddr_t)q->q_sc->sc_sessions[q->q_sesn].ses_iv);
- else if (crp->crp_flags & CRYPTO_F_IOV)
+ (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv);
+ else if (crp->crp_flags & CRYPTO_F_IOV) {
cuio_copydata((struct uio *)crp->crp_buf,
crd->crd_skip + crd->crd_len - 8, 8,
- (caddr_t)q->q_sc->sc_sessions[q->q_sesn].ses_iv);
+ (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv);
+ }
break;
}
}
@@ -1177,7 +1135,13 @@ ubsec_feed2(sc)
if (READ_REG(sc, BS_STAT) & BS_STAT_MCR2_FULL)
break;
q = SIMPLEQ_FIRST(&sc->sc_queue2);
- WRITE_REG(sc, BS_MCR2, q->q_mcr.dma_map->dm_segs[0].ds_addr);
+
+ bus_dmamap_sync(sc->sc_dmat, q->q_mcr.dma_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ bus_dmamap_sync(sc->sc_dmat, q->q_ctx.dma_map,
+ BUS_DMASYNC_PREREAD);
+
+ WRITE_REG(sc, BS_MCR2, q->q_mcr.dma_paddr);
SIMPLEQ_REMOVE_HEAD(&sc->sc_queue2, q, q_next);
--sc->sc_nqueue2;
SIMPLEQ_INSERT_TAIL(&sc->sc_qchip2, q, q_next);
@@ -1236,21 +1200,19 @@ ubsec_rng(vsc)
if (sc->sc_nqueue2 >= UBS_MAX_NQUEUE)
goto out;
- if (rng->rng_q.q_mcr.dma_vaddr == NULL) {
+ if (rng->rng_q.q_mcr.dma_map == NULL) {
if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr),
&rng->rng_q.q_mcr, 0))
goto out;
if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rngbypass),
&rng->rng_q.q_ctx, 0)) {
ubsec_dma_free(sc, &rng->rng_q.q_mcr);
- rng->rng_q.q_mcr.dma_vaddr = NULL;
goto out;
}
if (ubsec_dma_malloc(sc, sizeof(u_int32_t) * UBSEC_RNG_BUFSIZ,
&rng->rng_buf, 0)) {
ubsec_dma_free(sc, &rng->rng_q.q_ctx);
ubsec_dma_free(sc, &rng->rng_q.q_mcr);
- rng->rng_q.q_mcr.dma_vaddr = NULL;
goto out;
}
}
@@ -1260,11 +1222,11 @@ ubsec_rng(vsc)
mcr->mcr_pkts = 1;
mcr->mcr_flags = 0;
- mcr->mcr_cmdctxp = rng->rng_q.q_ctx.dma_map->dm_segs[0].ds_addr;
+ mcr->mcr_cmdctxp = rng->rng_q.q_ctx.dma_paddr;
mcr->mcr_ipktbuf.pb_addr = mcr->mcr_ipktbuf.pb_next = 0;
mcr->mcr_ipktbuf.pb_len = 0;
mcr->mcr_reserved = mcr->mcr_pktlen = 0;
- mcr->mcr_opktbuf.pb_addr = rng->rng_buf.dma_map->dm_segs[0].ds_addr;
+ mcr->mcr_opktbuf.pb_addr = rng->rng_buf.dma_paddr;
mcr->mcr_opktbuf.pb_len = ((sizeof(u_int32_t) * UBSEC_RNG_BUFSIZ)) &
UBS_PKTBUF_LEN;
mcr->mcr_opktbuf.pb_next = 0;
@@ -1272,10 +1234,6 @@ ubsec_rng(vsc)
ctx->rbp_len = sizeof(struct ubsec_ctx_rngbypass);
ctx->rbp_op = UBS_CTXOP_RNGBYPASS;
- bus_dmamap_sync(sc->sc_dmat, rng->rng_q.q_mcr.dma_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- bus_dmamap_sync(sc->sc_dmat, rng->rng_q.q_ctx.dma_map,
- BUS_DMASYNC_PREREAD);
bus_dmamap_sync(sc->sc_dmat, rng->rng_buf.dma_map,
BUS_DMASYNC_PREWRITE);
@@ -1302,10 +1260,10 @@ ubsec_dma_malloc(sc, size, dma, mapflags)
struct ubsec_dma_alloc *dma;
int mapflags;
{
- int r;
+ int r;
- if ((r = bus_dmamem_alloc(sc->sc_dmat, size, 4, 0,
- &dma->dma_seg, 1, &dma->dma_nseg, 0)) != 0)
+ if ((r = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0,
+ &dma->dma_seg, 1, &dma->dma_nseg, BUS_DMA_NOWAIT)) != 0)
goto fail_0;
if ((r = bus_dmamem_map(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg,
@@ -1321,6 +1279,7 @@ ubsec_dma_malloc(sc, size, dma, mapflags)
goto fail_3;
dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
+ dma->dma_size = size;
return (0);
fail_3:
@@ -1340,7 +1299,7 @@ ubsec_dma_free(sc, dma)
struct ubsec_dma_alloc *dma;
{
bus_dmamap_unload(sc->sc_dmat, dma->dma_map);
- bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, dma->dma_map->dm_mapsize);
+ bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, dma->dma_size);
bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg);
bus_dmamap_destroy(sc->sc_dmat, dma->dma_map);
}