summaryrefslogtreecommitdiff
path: root/sys/dev/pci/aeon.c
diff options
context:
space:
mode:
authorTheo de Raadt <deraadt@cvs.openbsd.org>1999-02-21 00:05:16 +0000
committerTheo de Raadt <deraadt@cvs.openbsd.org>1999-02-21 00:05:16 +0000
commit0714901d0a6f95da7b79f015e05fb6536678985f (patch)
treebcdc0107e836ace3d8664bc652a39a0363cffa1f /sys/dev/pci/aeon.c
parent3816fe98406dbbc16c848ee4fc7d24ac802f1229 (diff)
use src/dst descriptor chaining out of an mbuf
Diffstat (limited to 'sys/dev/pci/aeon.c')
-rw-r--r--sys/dev/pci/aeon.c638
1 files changed, 283 insertions, 355 deletions
diff --git a/sys/dev/pci/aeon.c b/sys/dev/pci/aeon.c
index ddb83650ec2..92de95e014c 100644
--- a/sys/dev/pci/aeon.c
+++ b/sys/dev/pci/aeon.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: aeon.c,v 1.1 1999/02/19 02:52:19 deraadt Exp $ */
+/* $OpenBSD: aeon.c,v 1.2 1999/02/21 00:05:14 deraadt Exp $ */
/*
* Invertex AEON driver
@@ -14,12 +14,12 @@
* are met:
*
* 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
+ * notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
+ * derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
@@ -39,6 +39,7 @@
#include <sys/errno.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
+#include <sys/mbuf.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
#include <vm/pmap.h>
@@ -60,18 +61,6 @@
int aeon_probe __P((struct device *, void *, void *));
void aeon_attach __P((struct device *, struct device *, void *));
-void aeon_reset_board __P((struct aeon_softc *));
-int aeon_enable_crypto __P((struct aeon_softc *));
-void aeon_init_dma __P((struct aeon_softc *));
-void aeon_init_pci_registers __P((struct aeon_softc *));
-int aeon_ram_setting_okay __P((struct aeon_softc *));
-int aeon_intr __P((void *));
-u_int32_t aeon_write_command __P((const struct aeon_command_buf_data *,
- u_int8_t *));
-int aeon_build_command __P((const struct aeon_command * cmd,
- struct aeon_command_buf_data *));
-void aeon_intr_process_ring __P((struct aeon_softc *, struct aeon_dma *));
-
struct cfattach aeon_ca = {
sizeof(struct aeon_softc), aeon_probe, aeon_attach,
};
@@ -80,13 +69,25 @@ struct cfdriver aeon_cd = {
0, "aeon", DV_DULL
};
+void aeon_reset_board __P((struct aeon_softc *));
+int aeon_enable_crypto __P((struct aeon_softc *));
+void aeon_init_dma __P((struct aeon_softc *));
+void aeon_init_pci_registers __P((struct aeon_softc *));
+int aeon_checkram __P((struct aeon_softc *));
+int aeon_intr __P((void *));
+u_int32_t aeon_write_command __P((const struct aeon_command_buf_data *,
+ u_int8_t *));
+int aeon_build_command __P((const struct aeon_command * cmd,
+ struct aeon_command_buf_data *));
+void aeon_intr_process_ring __P((struct aeon_softc *, struct aeon_dma *));
+
+
/*
* Used for round robin crypto requests
*/
int aeon_num_devices = 0;
struct aeon_softc *aeon_devices[AEON_MAX_DEVICES];
-
int
aeon_probe(parent, match, aux)
struct device *parent;
@@ -101,9 +102,6 @@ aeon_probe(parent, match, aux)
return (0);
}
-/*
- * Purpose: One time initialization for the device performed at bootup.
- */
void
aeon_attach(parent, self, aux)
struct device *parent, *self;
@@ -167,8 +165,8 @@ aeon_attach(parent, self, aux)
aeon_init_dma(sc);
aeon_init_pci_registers(sc);
- if (aeon_ram_setting_okay(sc) != 0)
- sc->is_dram_model = 1;
+ if (aeon_checkram(sc) != 0)
+ sc->sc_drammodel = 1;
/*
* Reinitialize again, since the DRAM/SRAM detection shifted our ring
@@ -202,8 +200,8 @@ aeon_attach(parent, self, aux)
}
/*
- * Purpose: Resets the board. Values in the regesters are left as is
- * from the reset (i.e. initial values are assigned elsewhere).
+ * Resets the board. Values in the regesters are left as is
+ * from the reset (i.e. initial values are assigned elsewhere).
*/
void
aeon_reset_board(sc)
@@ -241,13 +239,9 @@ aeon_reset_board(sc)
}
/*
- * Purpose: Checks to see if crypto is already enabled. If crypto
- * isn't enable, "aeon_enable_crypto" is called to enable it.
- * The check is important, as enabling crypto twice will lock
- * the board.
- *
- * Returns: 0 value on success, -1 if we were not able to unlock the
- * cryptographic engine.
+ * Checks to see if crypto is already enabled. If crypto isn't enable,
+ * "aeon_enable_crypto" is called to enable it. The check is important,
+ * as enabling crypto twice will lock the board.
*/
int
aeon_enable_crypto(sc)
@@ -262,7 +256,7 @@ aeon_enable_crypto(sc)
WRITE_REG_0(sc, AEON_RAM_CONFIG,
READ_REG_0(sc, AEON_RAM_CONFIG) | (0x1 << 5));
- encryption_level = READ_REG_0(sc, AEON_ENCRYPTION_LEVEL);
+ encryption_level = READ_REG_0(sc, AEON_CRYPTLEVEL);
/*
* Make sure we don't re-unlock. Two unlocks kills chip until the
@@ -302,69 +296,50 @@ aeon_enable_crypto(sc)
}
/*
- * Purpose: Give initial values to the registers listed in the
- * "Register Space" section of the AEON Software Development
- * reference manual.
+ * Give initial values to the registers listed in the "Register Space"
+ * section of the AEON Software Development reference manual.
*/
void
aeon_init_pci_registers(sc)
struct aeon_softc *sc;
{
- u_int32_t ram_config;
-
- /*
- * Write fixed values needed by the Initialization registers
- */
+ /* write fixed values needed by the Initialization registers */
WRITE_REG_0(sc, AEON_INIT_1, 0x2);
WRITE_REG_0(sc, AEON_INIT_2, 0x400);
WRITE_REG_0(sc, AEON_INIT_3, 0x200);
- /*
- * Write all 4 ring address registers
- */
- WRITE_REG_1(sc, AEON_COMMAND_RING_ADDR,
- vtophys(sc->sc_dma->command_ring));
- WRITE_REG_1(sc, AEON_SOURCE_RING_ADDR,
- vtophys(sc->sc_dma->source_ring));
- WRITE_REG_1(sc, AEON_DEST_RING_ADDR,
- vtophys(sc->sc_dma->dest_ring));
- WRITE_REG_1(sc, AEON_RESULT_RING_ADDR,
- vtophys(sc->sc_dma->result_ring));
+ /* write all 4 ring address registers */
+ WRITE_REG_1(sc, AEON_CMDR_ADDR, vtophys(sc->sc_dma->cmdr));
+ WRITE_REG_1(sc, AEON_SRCR_ADDR, vtophys(sc->sc_dma->srcr));
+ WRITE_REG_1(sc, AEON_DSTR_ADDR, vtophys(sc->sc_dma->dstr));
+ WRITE_REG_1(sc, AEON_RESR_ADDR, vtophys(sc->sc_dma->resr));
- /*
- * Write status register
- */
+ /* write status register */
WRITE_REG_1(sc, AEON_STATUS, AEON_INIT_STATUS_REG);
+ WRITE_REG_1(sc, AEON_IRQEN, AEON_INIT_INTERRUPT_ENABLE_REG);
- /*
- * Write registers which had thier initial values defined
- * elsewhere. The "Encryption level register" is the only
- * documented register not initialized by this routine (it's read
- * only).
- */
- WRITE_REG_1(sc, AEON_INTERRUPT_ENABLE, AEON_INIT_INTERRUPT_ENABLE_REG);
-
- ram_config = AEON_INIT_RAM_CONFIG_REG
+#if 0
#if BYTE_ORDER == BIG_ENDIAN
- | (0x1 << 7)
+ (0x1 << 7) |
+#endif
#endif
- | (sc->is_dram_model << 4);
- WRITE_REG_0(sc, AEON_RAM_CONFIG, ram_config);
+ WRITE_REG_0(sc, AEON_RAM_CONFIG, AEON_INIT_RAM_CONFIG_REG |
+ sc->sc_drammodel << 4);
+
WRITE_REG_0(sc, AEON_EXPAND, AEON_INIT_EXPAND_REG);
WRITE_REG_1(sc, AEON_DMA_CFG, AEON_INIT_DMA_CONFIG_REG);
}
/*
- * Purpose: There are both DRAM and SRAM models of the aeon board.
- * A bit in the "ram configuration register" needs to be
- * set according to the model. The driver will guess one
- * way or the other -- and then call this routine to verify.
- * Returns:
- * 0: RAM setting okay
- * -1: Current RAM setting in error
+ * There are both DRAM and SRAM models of the aeon board.
+ * A bit in the "ram configuration register" needs to be
+ * set according to the model. The driver will guess one
+ * way or the other -- and then call this routine to verify.
+ *
+ * 0: RAM setting okay, -1: Current RAM setting in error
*/
int
-aeon_ram_setting_okay(sc)
+aeon_checkram(sc)
struct aeon_softc *sc;
{
aeon_base_command_t write_command = {(0x3 << 13), 0, 8, 0};
@@ -372,166 +347,128 @@ aeon_ram_setting_okay(sc)
u_int8_t data[8] = {'1', '2', '3', '4', '5', '6', '7', '8'};
u_int8_t *source_buf, *dest_buf;
struct aeon_dma *dma = sc->sc_dma;
+ const u_int32_t masks = AEON_D_VALID | AEON_D_LAST |
+ AEON_D_MASKDONEIRQ;
- const u_int32_t masks = AEON_DESCRIPT_VALID | AEON_DESCRIPT_LAST |
- AEON_DESCRIPT_MASK_DONE_IRQ;
-
-#if (AEON_DESCRIPT_RING_SIZE < 3)
+#if (AEON_D_RSIZE < 3)
#error "descriptor ring size too small DRAM/SRAM check"
#endif
/*
- * We steal the 8 bytes needed for both the source and dest buffers
- * from the 3rd slot that the DRAM/SRAM test won't use.
+ * We steal the 8 bytes needed for both the source and dest buffers
+ * from the 3rd slot that the DRAM/SRAM test won't use.
*/
source_buf = sc->sc_dma->command_bufs[2];
dest_buf = sc->sc_dma->result_bufs[2];
- /*
- * Build write command
- */
+ /* build write command */
*(aeon_base_command_t *) sc->sc_dma->command_bufs[0] = write_command;
bcopy(data, source_buf, sizeof(data));
- dma->source_ring[0].pointer = vtophys(source_buf);
- dma->dest_ring[0].pointer = vtophys(dest_buf);
+ dma->srcr[0].p = vtophys(source_buf);
+ dma->dstr[0].p = vtophys(dest_buf);
- dma->command_ring[0].length = 16 | masks;
- dma->source_ring[0].length = 8 | masks;
- dma->dest_ring[0].length = 8 | masks;
- dma->result_ring[0].length = AEON_MAX_RESULT_LENGTH | masks;
+ dma->cmdr[0].l = 16 | masks;
+ dma->srcr[0].l = 8 | masks;
+ dma->dstr[0].l = 8 | masks;
+ dma->resr[0].l = AEON_MAX_RESULT | masks;
- /*
- * Let write command execute
- */
- DELAY(1000);
-
- if (dma->result_ring[0].length & AEON_DESCRIPT_VALID)
+ DELAY(1000); /* let write command execute */
+ if (dma->resr[0].l & AEON_D_VALID)
printf("%s: SRAM/DRAM detection error -- result[0] valid still set\n",
sc->sc_dv.dv_xname);
- /*
- * Build read command
- */
+ /* Build read command */
*(aeon_base_command_t *) sc->sc_dma->command_bufs[1] = read_command;
- dma->source_ring[1].pointer = vtophys(source_buf);
- dma->dest_ring[1].pointer = vtophys(dest_buf);
-
- dma->command_ring[1].length = 16 | masks;
- dma->source_ring[1].length = 8 | masks;
- dma->dest_ring[1].length = 8 | masks;
- dma->result_ring[1].length = AEON_MAX_RESULT_LENGTH | masks;
-
- /*
- * Let read command execute
- */
- DELAY(1000);
+ dma->srcr[1].p = vtophys(source_buf);
+ dma->dstr[1].p = vtophys(dest_buf);
+ dma->cmdr[1].l = 16 | masks;
+ dma->srcr[1].l = 8 | masks;
+ dma->dstr[1].l = 8 | masks;
+ dma->resr[1].l = AEON_MAX_RESULT | masks;
- if (dma->result_ring[1].length & AEON_DESCRIPT_VALID)
+ DELAY(1000); /* let read command execute */
+ if (dma->resr[1].l & AEON_D_VALID)
printf("%s: SRAM/DRAM detection error -- result[1] valid still set\n",
sc->sc_dv.dv_xname);
-
return (memcmp(dest_buf, data, sizeof(data)) == 0) ? 0 : -1;
}
/*
- * Purpose: Initialize the descriptor rings.
+ * Initialize the descriptor rings.
*/
void
aeon_init_dma(sc)
struct aeon_softc *sc;
{
- int i;
struct aeon_dma *dma = sc->sc_dma;
+ int i;
- /*
- * Initialize static pointer values.
- */
- for (i = 0; i < AEON_DESCRIPT_RING_SIZE; i++) {
- dma->command_ring[i].pointer = vtophys(dma->command_bufs[i]);
- dma->result_ring[i].pointer = vtophys(dma->result_bufs[i]);
+ /* initialize static pointer values */
+ for (i = 0; i < AEON_D_RSIZE; i++) {
+ dma->cmdr[i].p = vtophys(dma->command_bufs[i]);
+ dma->resr[i].p = vtophys(dma->result_bufs[i]);
}
-
- dma->command_ring[AEON_DESCRIPT_RING_SIZE].pointer =
- vtophys(dma->command_ring);
-
- dma->source_ring[AEON_DESCRIPT_RING_SIZE].pointer =
- vtophys(dma->source_ring);
-
- dma->dest_ring[AEON_DESCRIPT_RING_SIZE].pointer =
- vtophys(dma->dest_ring);
-
- dma->result_ring[AEON_DESCRIPT_RING_SIZE].pointer =
- vtophys(dma->result_ring);
+ dma->cmdr[AEON_D_RSIZE].p = vtophys(dma->cmdr);
+ dma->srcr[AEON_D_RSIZE].p = vtophys(dma->srcr);
+ dma->dstr[AEON_D_RSIZE].p = vtophys(dma->dstr);
+ dma->resr[AEON_D_RSIZE].p = vtophys(dma->resr);
}
/*
- * Purpose: Writes out the raw command buffer space. Returns the
- * command buffer size.
+ * Writes out the raw command buffer space. Returns the
+ * command buffer size.
*/
u_int32_t
-aeon_write_command(
- const struct aeon_command_buf_data * cmd_data,
- u_int8_t * command_buf
-)
+aeon_write_command(const struct aeon_command_buf_data *cmd_data,
+ u_int8_t *command_buf)
{
u_int8_t *command_buf_pos = command_buf;
const aeon_base_command_t *base_cmd = &cmd_data->base_cmd;
const aeon_mac_command_t *mac_cmd = &cmd_data->mac_cmd;
const aeon_crypt_command_t *crypt_cmd = &cmd_data->crypt_cmd;
-
int using_mac = base_cmd->masks & AEON_BASE_CMD_MAC;
int using_crypt = base_cmd->masks & AEON_BASE_CMD_CRYPT;
- /*
- * Write base command structure
- */
+ /* write base command structure */
*((aeon_base_command_t *) command_buf_pos) = *base_cmd;
command_buf_pos += sizeof(aeon_base_command_t);
- /*
- * Write MAC command structure
- */
+ /* Write MAC command structure */
if (using_mac) {
*((aeon_mac_command_t *) command_buf_pos) = *mac_cmd;
command_buf_pos += sizeof(aeon_mac_command_t);
}
- /*
- * Write encryption command structure
- */
+
+ /* Write encryption command structure */
if (using_crypt) {
*((aeon_crypt_command_t *) command_buf_pos) = *crypt_cmd;
command_buf_pos += sizeof(aeon_crypt_command_t);
}
- /*
- * Write MAC key
- */
- if (mac_cmd->masks & AEON_MAC_CMD_NEW_KEY) {
- bcopy(cmd_data->mac_key, command_buf_pos, AEON_MAC_KEY_LENGTH);
+
+ /* write MAC key */
+ if (mac_cmd->masks & AEON_MAC_NEW_KEY) {
+ bcopy(cmd_data->mac, command_buf_pos, AEON_MAC_KEY_LENGTH);
command_buf_pos += AEON_MAC_KEY_LENGTH;
}
- /*
- * Write crypto key
- */
+
+ /* Write crypto key */
if (crypt_cmd->masks & AEON_CRYPT_CMD_NEW_KEY) {
u_int32_t alg = crypt_cmd->masks & AEON_CRYPT_CMD_ALG_MASK;
u_int32_t key_len = (alg == AEON_CRYPT_CMD_ALG_DES) ?
AEON_DES_KEY_LENGTH : AEON_3DES_KEY_LENGTH;
- bcopy(cmd_data->crypt_key, command_buf_pos, key_len);
+ bcopy(cmd_data->ck, command_buf_pos, key_len);
command_buf_pos += key_len;
}
- /*
- * Write crypto iv
- */
+
+ /* Write crypto iv */
if (crypt_cmd->masks & AEON_CRYPT_CMD_NEW_IV) {
- bcopy(cmd_data->initial_vector, command_buf_pos, AEON_IV_LENGTH);
+ bcopy(cmd_data->iv, command_buf_pos, AEON_IV_LENGTH);
command_buf_pos += AEON_IV_LENGTH;
}
- /*
- * Write 8 bytes of zero's if we're not sending crypt or MAC
- * structures
- */
+
+ /* Write 8 zero bytes we're not sending crypt or MAC structures */
if (!(base_cmd->masks & AEON_BASE_CMD_MAC) &&
!(base_cmd->masks & AEON_BASE_CMD_CRYPT)) {
*((u_int32_t *) command_buf_pos) = 0;
@@ -539,25 +476,20 @@ aeon_write_command(
*((u_int32_t *) command_buf_pos) = 0;
command_buf_pos += 4;
}
-#if 0
- if ((command_buf_pos - command_buf) > AEON_MAX_COMMAND_LENGTH)
- printf("aeon: Internal Error -- Command buffer overflow.\n");
-#endif
+ if ((command_buf_pos - command_buf) > AEON_MAX_COMMAND)
+ printf("aeon: Internal Error -- Command buffer overflow.\n");
return command_buf_pos - command_buf;
-
}
/*
- * Purpose: Check command input and build up structure to write
- * the command buffer later. Returns 0 on success and
- * -1 if given bad command input was given.
+ * Check command input and build up structure to write
+ * the command buffer later. Returns 0 on success and
+ * -1 if given bad command input was given.
*/
int
-aeon_build_command(
- const struct aeon_command * cmd,
- struct aeon_command_buf_data * cmd_buf_data
-)
+aeon_build_command(const struct aeon_command *cmd,
+ struct aeon_command_buf_data * cmd_buf_data)
{
#define AEON_COMMAND_CHECKING
@@ -566,14 +498,12 @@ aeon_build_command(
aeon_mac_command_t *mac_cmd = &cmd_buf_data->mac_cmd;
aeon_crypt_command_t *crypt_cmd = &cmd_buf_data->crypt_cmd;
u_int mac_length;
-
#ifdef AEON_COMMAND_CHECKING
int dest_diff;
#endif
bzero(cmd_buf_data, sizeof(struct aeon_command_buf_data));
-
#ifdef AEON_COMMAND_CHECKING
if (!(!!(flags & AEON_DECODE) ^ !!(flags & AEON_ENCODE))) {
printf("aeon: encode/decode setting error\n");
@@ -591,7 +521,7 @@ aeon_build_command(
/*
- * Compute the mac value length -- leave at zero if not MAC'ing
+ * Compute the mac value length -- leave at zero if not MAC'ing
*/
mac_length = 0;
if (AEON_USING_MAC(flags)) {
@@ -600,12 +530,12 @@ aeon_build_command(
}
#ifdef AEON_COMMAND_CHECKING
/*
- * Check for valid src/dest buf sizes
+ * Check for valid src/dest buf sizes
*/
/*
- * XXX XXX We need to include header counts into all these
- * checks!!!!
+ * XXX XXX We need to include header counts into all these
+ * checks!!!!
*/
if (cmd->source_length <= mac_length) {
@@ -620,14 +550,8 @@ aeon_build_command(
}
#endif
-
- /**
- ** Building up base command
- **
- **/
-
/*
- * Set MAC bit
+ * Set MAC bit
*/
if (AEON_USING_MAC(flags))
base_cmd->masks |= AEON_BASE_CMD_MAC;
@@ -637,20 +561,20 @@ aeon_build_command(
base_cmd->masks |= AEON_BASE_CMD_CRYPT;
/*
- * Set Decode bit
+ * Set Decode bit
*/
if (flags & AEON_DECODE)
base_cmd->masks |= AEON_BASE_CMD_DECODE;
/*
- * Set total source and dest counts. These values are the same as the
- * values set in the length field of the source and dest descriptor rings.
+ * Set total source and dest counts. These values are the same as the
+ * values set in the length field of the source and dest descriptor rings.
*/
base_cmd->total_source_count = cmd->source_length;
base_cmd->total_dest_count = cmd->dest_length;
/*
- * XXX -- We need session number range checking...
+ * XXX -- We need session number range checking...
*/
base_cmd->session_num = cmd->session_num;
@@ -661,7 +585,7 @@ aeon_build_command(
if (AEON_USING_MAC(flags)) {
/*
- * Set the MAC algorithm and trunc setting
+ * Set the MAC algorithm and trunc setting
*/
mac_cmd->masks |= (flags & AEON_MAC_MD5) ?
AEON_MAC_CMD_ALG_MD5 : AEON_MAC_CMD_ALG_SHA1;
@@ -669,36 +593,33 @@ aeon_build_command(
mac_cmd->masks |= AEON_MAC_CMD_TRUNC;
/*
- * We always use HMAC mode, assume MAC values are appended to the
- * source buffer on decodes and we append them to the dest buffer
- * on encodes, and order auth/encryption engines as needed by
- * IPSEC
+ * We always use HMAC mode, assume MAC values are appended to the
+ * source buffer on decodes and we append them to the dest buffer
+ * on encodes, and order auth/encryption engines as needed by
+ * IPSEC
*/
mac_cmd->masks |= AEON_MAC_CMD_MODE_HMAC | AEON_MAC_CMD_APPEND |
AEON_MAC_CMD_POS_IPSEC;
/*
- * Setup to send new MAC key if needed.
+ * Setup to send new MAC key if needed.
*/
- if (flags & AEON_MAC_CMD_NEW_KEY) {
+ if (flags & AEON_MAC_NEW_KEY) {
mac_cmd->masks |= AEON_MAC_CMD_NEW_KEY;
- cmd_buf_data->mac_key = cmd->mac_key;
+ cmd_buf_data->mac = cmd->mac;
}
/*
- * Set the mac header skip and source count.
+ * Set the mac header skip and source count.
*/
+ mac_cmd->header_skip = cmd->mac_header_skip;
mac_cmd->source_count = cmd->source_length - cmd->mac_header_skip;
if (flags & AEON_DECODE)
mac_cmd->source_count -= mac_length;
}
- /**
- ** Building up crypto command
- **
- **/
- if (AEON_USING_CRYPT(flags)) {
+ if (AEON_USING_CRYPT(flags)) {
/*
- * Set the encryption algorithm bits.
+ * Set the encryption algorithm bits.
*/
crypt_cmd->masks |= (flags & AEON_CRYPT_DES) ?
AEON_CRYPT_CMD_ALG_DES : AEON_CRYPT_CMD_ALG_3DES;
@@ -708,14 +629,14 @@ aeon_build_command(
crypt_cmd->masks |= AEON_CRYPT_CMD_MODE_CBC | AEON_CRYPT_CMD_NEW_IV;
/*
- * Setup to send new encrypt key if needed.
+ * Setup to send new encrypt key if needed.
*/
if (flags & AEON_CRYPT_CMD_NEW_KEY) {
crypt_cmd->masks |= AEON_CRYPT_CMD_NEW_KEY;
- cmd_buf_data->crypt_key = cmd->crypt_key;
+ cmd_buf_data->ck = cmd->ck;
}
/*
- * Set the encrypt header skip and source count.
+ * Set the encrypt header skip and source count.
*/
crypt_cmd->header_skip = cmd->crypt_header_skip;
crypt_cmd->source_count = cmd->source_length - cmd->crypt_header_skip;
@@ -731,56 +652,45 @@ aeon_build_command(
}
#endif
}
- cmd_buf_data->initial_vector = cmd->initial_vector;
+ cmd_buf_data->iv = cmd->iv;
-#if 0
+#if 1
printf("aeon: command parameters"
" -- session num %u"
" -- base t.s.c: %u"
" -- base t.d.c: %u"
" -- mac h.s. %u s.c. %u"
" -- crypt h.s. %u s.c. %u\n",
- base_cmd->session_num,
- base_cmd->total_source_count,
- base_cmd->total_dest_count,
- mac_cmd->header_skip,
- mac_cmd->source_count,
- crypt_cmd->header_skip,
- crypt_cmd->source_count
- );
+ base_cmd->session_num, base_cmd->total_source_count,
+ base_cmd->total_dest_count, mac_cmd->header_skip,
+ mac_cmd->source_count, crypt_cmd->header_skip,
+ crypt_cmd->source_count);
#endif
return 0; /* success */
}
-
-/*
- * Function: aeon_process_command
- */
int
-aeon_crypto(struct aeon_command * cmd)
+aeon_crypto(struct aeon_command *cmd)
{
- u_int32_t command_length;
-
- u_int32_t local_ring_pos;
- int err;
- int oldint;
+ u_int32_t cmdlen;
static u_int32_t current_device = 0;
- struct aeon_softc *sc;
- struct aeon_dma *dma;
- const u_int32_t masks = AEON_DESCRIPT_VALID | AEON_DESCRIPT_LAST |
- AEON_DESCRIPT_MASK_DONE_IRQ;
-
- struct aeon_command_buf_data cmd_buf_data;
+ struct aeon_softc *sc;
+ struct aeon_dma *dma;
+ struct aeon_command_buf_data cmd_buf_data;
+ int cmdi, srci, dsti, resi;
+ int error, s;
+#define MAX_SCATTER 10
+ long packp[MAX_SCATTER];
+ int packl[MAX_SCATTER];
+ struct mbuf *m0, *m = cmd->m;
+ int nchunks, i;
if (aeon_build_command(cmd, &cmd_buf_data) != 0)
return AEON_CRYPTO_BAD_INPUT;
- /*
- * Turning off interrupts
- */
- oldint = splimp();
+ s = splimp();
/* Pick the aeon board to send the data to. Right now we use a round
* robin approach. */
@@ -789,7 +699,7 @@ aeon_crypto(struct aeon_command * cmd)
current_device = 0;
dma = sc->sc_dma;
-#if 0
+#if 1
printf("%s: Entering command"
" -- Status Reg 0x%08x"
" -- Interrupt Enable Reg 0x%08x"
@@ -797,214 +707,232 @@ aeon_crypto(struct aeon_command * cmd)
" -- source length %u"
" -- dest length %u\n",
sc->sc_dv.dv_xname,
- READ_REG_1(sc, AEON_STATUS),
- READ_REG_1(sc, AEON_INTERRUPT_ENABLE),
- dma->slots_in_use,
- cmd->source_length,
- cmd->dest_length
- );
+ READ_REG_1(sc, AEON_STATUS), READ_REG_1(sc, AEON_IRQEN),
+ dma->slots_in_use, cmd->source_length, cmd->dest_length);
#endif
+ /*
+ * Generate a [pa,len] array from an mbuf.
+ * This is very broken
+ */
+ nchunks = 0;
+ for (m0 = m; m; m = m->m_next) {
+ void *va;
+ long pg, npg;
+ int len, off;
- if (dma->slots_in_use == AEON_DESCRIPT_RING_SIZE) {
-
- if (cmd->flags & AEON_DMA_FULL_NOBLOCK)
- return AEON_CRYPTO_RINGS_FULL;
+ va = m->m_data;
+ len = m->m_len;
- do {
-#ifdef AEON_DEBUG
- printf("%s: Waiting for unused ring.\n",
- sc->sc_dv.dv_xname);
-#endif
- /* sleep for minimum timeout */
- tsleep((caddr_t) dma, PZERO, "QFULL", 1);
+ packl[nchunks] = len;
+ packp[nchunks] = vtophys(va);
+ pg = packp[nchunks] & ~PAGE_MASK;
+ off = (long)va & PAGE_MASK;
- } while (dma->slots_in_use == AEON_DESCRIPT_RING_SIZE);
+ while (len + off > PAGE_SIZE) {
+ va = va + PAGE_SIZE - off;
+ npg = vtophys(va);
+ if (npg != pg) {
+ nchunks++;
+ break;
+ }
+ packl[nchunks] = PAGE_SIZE - off;
+ off = 0;
+ ++nchunks;
+ packl[nchunks] = len - (PAGE_SIZE - off);
+ len -= packl[nchunks];
+ packp[nchunks] = vtophys(va);
+ }
}
- dma->slots_in_use++;
-
- if (dma->ring_pos == AEON_DESCRIPT_RING_SIZE) {
- local_ring_pos = 0;
- dma->ring_pos = 1;
- } else {
- local_ring_pos = dma->ring_pos++;
+ /*
+ * need 1 cmd, and 1 res
+ * need N src, and N dst
+ */
+ while (dma->cmdu+1 == AEON_D_RSIZE || dma->srcu+nchunks == AEON_D_RSIZE ||
+ dma->dstu+nchunks == AEON_D_RSIZE || dma->resu+1 == AEON_D_RSIZE) {
+ if (cmd->flags & AEON_DMA_FULL_NOBLOCK)
+ splx(s);
+ return (AEON_CRYPTO_RINGS_FULL);
+ tsleep((caddr_t) dma, PZERO, "aeonring", 1);
}
+ dma->cmdu += 1;
+ dma->resu += 1;
+ if (dma->cmdi == AEON_D_RSIZE) {
+ cmdi = 0, dma->cmdi = 1;
+ dma->cmdr[AEON_D_RSIZE].l = AEON_D_VALID | AEON_D_LAST |
+ AEON_D_MASKDONEIRQ | AEON_D_JUMP;
+ } else
+ cmdi = dma->cmdi++;
- command_length =
- aeon_write_command(&cmd_buf_data, dma->command_bufs[local_ring_pos]);
+ if (dma->resi == AEON_D_RSIZE) {
+ resi = 0, dma->resi = 1;
+ dma->resr[AEON_D_RSIZE].l = AEON_D_VALID | AEON_D_LAST |
+ AEON_D_MASKDONEIRQ | AEON_D_JUMP;
+ } else
+ resi = dma->resi++;
- dma->aeon_commands[local_ring_pos] = cmd;
+ cmdlen = aeon_write_command(&cmd_buf_data, dma->command_bufs[cmdi]);
+ dma->aeon_commands[cmdi] = cmd;
/*
- * If we wrapped to the begining of the ring, validate the jump
- * descriptor. (Not needed on the very first time command -- but it
- * doesn't hurt.)
+ * .p for command/result already set
*/
- if (local_ring_pos == 0) {
- const u_int32_t jmp_masks = masks | AEON_DESCRIPT_JUMP;
+ dma->cmdr[cmdi].l = cmdlen | AEON_D_VALID | AEON_D_LAST |
+ AEON_D_MASKDONEIRQ;
- dma->command_ring[AEON_DESCRIPT_RING_SIZE].length = jmp_masks;
- dma->source_ring[AEON_DESCRIPT_RING_SIZE].length = jmp_masks;
- dma->dest_ring[AEON_DESCRIPT_RING_SIZE].length = jmp_masks;
- dma->result_ring[AEON_DESCRIPT_RING_SIZE].length = jmp_masks;
- }
- /*
- * "pointer" values for command and result descriptors are already set
- */
- dma->command_ring[local_ring_pos].length = command_length | masks;
+ for (i = 0; i < nchunks; i++) {
+ int last = 0;
+
+ if (i == nchunks-1)
+ last = AEON_D_LAST;
- dma->source_ring[local_ring_pos].pointer = vtophys(cmd->source_buf);
- dma->source_ring[local_ring_pos].length = cmd->source_length | masks;
+ if (dma->srci == AEON_D_RSIZE) {
+ srci = 0, dma->srci = 1;
+ dma->srcr[AEON_D_RSIZE].l = AEON_D_VALID |
+ AEON_D_MASKDONEIRQ | AEON_D_JUMP;
+ } else
+ srci = dma->srci++;
+ dma->srcu++;
- dma->dest_ring[local_ring_pos].pointer = vtophys(cmd->dest_buf);
- dma->dest_ring[local_ring_pos].length = cmd->dest_length | masks;
+ dma->srcr[srci].p = vtophys(packp[i]);
+ dma->srcr[srci].l = packl[i] | AEON_D_VALID |
+ AEON_D_MASKDONEIRQ | last;
+ if (dma->dsti == AEON_D_RSIZE) {
+ dsti = 0, dma->dsti = 1;
+ dma->dstr[AEON_D_RSIZE].l = AEON_D_VALID |
+ AEON_D_MASKDONEIRQ | AEON_D_JUMP;
+ } else
+ dsti = dma->dsti++;
+ dma->dstu++;
+
+ dma->dstr[dsti].p = vtophys(packp[i]);
+ dma->dstr[dsti].l = packl[i] | AEON_D_VALID |
+ AEON_D_MASKDONEIRQ | last;
+ }
/*
- * Unlike other descriptors, we don't mask done interrupt from
- * result descriptor.
+ * Unlike other descriptors, we don't mask done interrupt from
+ * result descriptor.
*/
- dma->result_ring[local_ring_pos].length =
- AEON_MAX_RESULT_LENGTH | AEON_DESCRIPT_VALID | AEON_DESCRIPT_LAST;
+ dma->resr[resi].l = AEON_MAX_RESULT | AEON_D_VALID | AEON_D_LAST;
/*
- * We don't worry about missing an interrupt (which a waiting
- * on command interrupt salvages us from), unless there is more
- * than one command in the queue.
+ * We don't worry about missing an interrupt (which a waiting
+ * on command interrupt salvages us from), unless there is more
+ * than one command in the queue.
*/
if (dma->slots_in_use > 1) {
- WRITE_REG_1(sc, AEON_INTERRUPT_ENABLE,
+ WRITE_REG_1(sc, AEON_IRQEN,
AEON_INTR_ON_RESULT_DONE | AEON_INTR_ON_COMMAND_WAITING);
}
+
/*
- * If not given a callback routine, we block until the dest data is
- * ready. (Setting interrupt timeout at 3 seconds.)
+ * If not given a callback routine, we block until the dest data is
+ * ready. (Setting interrupt timeout at 3 seconds.)
*/
if (cmd->dest_ready_callback == NULL) {
-#if 0
printf("%s: no callback -- we're sleeping\n",
sc->sc_dv.dv_xname);
-#endif
- err = tsleep((caddr_t) & dma->result_ring[local_ring_pos], PZERO, "CRYPT",
+ error = tsleep((caddr_t) & dma->resr[resi], PZERO, "CRYPT",
hz * 3);
- if (err != 0)
+ if (error != 0)
printf("%s: timed out waiting for interrupt"
" -- tsleep() exited with %d\n",
- sc->sc_dv.dv_xname, err);
+ sc->sc_dv.dv_xname, error);
}
-#if 0
+
printf("%s: command executed"
" -- Status Register 0x%08x"
" -- Interrupt Enable Reg 0x%08x\n",
sc->sc_dv.dv_xname,
- READ_REG_1(sc, AEON_STATUS),
- READ_REG_1(sc, AEON_INTERRUPT_ENABLE));
-#endif
-
- /*
- * Turning interupts back on
- */
- splx(oldint);
+ READ_REG_1(sc, AEON_STATUS), READ_REG_1(sc, AEON_IRQEN));
+ splx(s);
return 0; /* success */
}
/*
- * Part of interrupt handler--cleans out done jobs from rings
+ * Part of interrupt handler--cleans out done jobs from rings
*/
void
-aeon_intr_process_ring(sc, dma)
- struct aeon_softc *sc;
- struct aeon_dma *dma;
+aeon_intr_process_ring(struct aeon_softc *sc, struct aeon_dma *dma)
{
- if (dma->slots_in_use > AEON_DESCRIPT_RING_SIZE)
+ if (dma->slots_in_use > AEON_D_RSIZE)
printf("%s: Internal Error -- ring overflow\n",
sc->sc_dv.dv_xname);
while (dma->slots_in_use > 0) {
- u_int32_t wake_pos = dma->wakeup_ring_pos;
+ u_int32_t wake_pos = dma->wakeup_rpos;
struct aeon_command *cmd = dma->aeon_commands[wake_pos];
- /*
- * If still valid, stop processing
- */
- if (dma->result_ring[wake_pos].length & AEON_DESCRIPT_VALID)
+ /* if still valid, stop processing */
+ if (dma->resr[wake_pos].l & AEON_D_VALID)
break;
if (AEON_USING_MAC(cmd->flags) && (cmd->flags & AEON_DECODE)) {
u_int8_t *result_buf = dma->result_bufs[wake_pos];
+
cmd->result_status = (result_buf[8] & 0x2) ? AEON_MAC_BAD : 0;
printf("%s: byte index 8 of result 0x%02x\n",
sc->sc_dv.dv_xname, (u_int32_t) result_buf[8]);
}
- /*
- * Position is done, notify producer with wakup or callback
- */
- if (cmd->dest_ready_callback == NULL) {
- wakeup((caddr_t) &dma->result_ring[wake_pos]);
- } else {
+
+ /* position is done, notify producer with wakup or callback */
+ if (cmd->dest_ready_callback == NULL)
+ wakeup((caddr_t) &dma->resr[wake_pos]);
+ else
cmd->dest_ready_callback(cmd);
- }
- if (++dma->wakeup_ring_pos == AEON_DESCRIPT_RING_SIZE)
- dma->wakeup_ring_pos = 0;
+ if (++dma->wakeup_rpos == AEON_D_RSIZE)
+ dma->wakeup_rpos = 0;
dma->slots_in_use--;
}
-
}
-/*
- * Purpose: Interrupt handler. The argument passed is the device
- * structure for the board that generated the interrupt.
- * XXX: Remove hardcoded status checking/setting values.
- */
int
aeon_intr(arg)
void *arg;
{
struct aeon_softc *sc = arg;
struct aeon_dma *dma = sc->sc_dma;
- int r;
+ int r = 0;
-#if 0
+#if 1
printf("%s: Processing Interrupt"
" -- Status Reg 0x%08x"
" -- Interrupt Enable Reg 0x%08x"
" -- slots in use %u\n",
sc->sc_dv.dv_xname,
- READ_REG_1(sc, AEON_STATUS),
- READ_REG_1(sc, AEON_INTERRUPT_ENABLE),
- dma->slots_in_use
- );
+ READ_REG_1(sc, AEON_STATUS), READ_REG_1(sc, AEON_IRQEN),
+ dma->slots_in_use);
#endif
if (dma->slots_in_use == 0 && (READ_REG_1(sc, AEON_STATUS) & (1 << 2))) {
/*
* If no slots to process and we received a "waiting on
- * result" interrupt, we disable the "waiting on result" (by
- * clearing it).
+ * result" interrupt, we disable the "waiting on result"
+ * (by clearing it).
*/
- WRITE_REG_1(sc, AEON_INTERRUPT_ENABLE,
- AEON_INTR_ON_RESULT_DONE);
- r = 1;
+ WRITE_REG_1(sc, AEON_IRQEN, AEON_INTR_ON_RESULT_DONE);
+ r = 1;
} else {
aeon_intr_process_ring(sc, dma);
r = 1;
}
-#if 0
+#if 1
printf("%s: exiting interrupt handler -- slots in use %u\n",
sc->sc_dv.dv_xname, dma->slots_in_use);
#endif
/*
- * Clear "result done" and "waiting on command ring" flags in status
- * register. If we still have slots to process and we received a
- * waiting interrupt, this will interupt us again.
+ * Clear "result done" and "waiting on command ring" flags in status
+ * register. If we still have slots to process and we received a
+ * waiting interrupt, this will interupt us again.
*/
WRITE_REG_1(sc, AEON_STATUS, (1 << 20) | (1 << 2));
return (r);
}
-