summaryrefslogtreecommitdiff
path: root/sys/dev
diff options
context:
space:
mode:
authorTheo de Raadt <deraadt@cvs.openbsd.org>1999-02-19 02:52:21 +0000
committerTheo de Raadt <deraadt@cvs.openbsd.org>1999-02-19 02:52:21 +0000
commitc610dcf2362fdd8769ee5ab5581422f649d94597 (patch)
tree73ed8b665c6c39cac7c29c81e4cc062d777a8fe7 /sys/dev
parent491baeb0c006021a661e2f6bf5e243b75ae5cd3c (diff)
invertex aeon driver base
Diffstat (limited to 'sys/dev')
-rw-r--r--sys/dev/pci/aeon.c1010
-rw-r--r--sys/dev/pci/aeonreg.h313
-rw-r--r--sys/dev/pci/aeonvar.h287
-rw-r--r--sys/dev/pci/hifn7751.c1010
-rw-r--r--sys/dev/pci/hifn7751reg.h313
-rw-r--r--sys/dev/pci/hifn7751var.h287
6 files changed, 3220 insertions, 0 deletions
diff --git a/sys/dev/pci/aeon.c b/sys/dev/pci/aeon.c
new file mode 100644
index 00000000000..ddb83650ec2
--- /dev/null
+++ b/sys/dev/pci/aeon.c
@@ -0,0 +1,1010 @@
+/* $OpenBSD: aeon.c,v 1.1 1999/02/19 02:52:19 deraadt Exp $ */
+
+/*
+ * Invertex AEON driver
+ * Copyright (c) 1999 Invertex Inc. All rights reserved.
+ * Copyright (c) 1999 Theo de Raadt
+ *
+ * This driver is based on a previous driver by Invertex, for which they
+ * requested: Please send any comments, feedback, bug-fixes, or feature
+ * requests to software@invertex.com.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/errno.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+#include <vm/pmap.h>
+#include <machine/pmap.h>
+#include <sys/device.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcidevs.h>
+
+#include <dev/pci/aeonvar.h>
+#include <dev/pci/aeonreg.h>
+
+#define AEON_DEBUG
+
+/*
+ * Prototypes and count for the pci_device structure
+ */
+int aeon_probe __P((struct device *, void *, void *));
+void aeon_attach __P((struct device *, struct device *, void *));
+
+void aeon_reset_board __P((struct aeon_softc *));
+int aeon_enable_crypto __P((struct aeon_softc *));
+void aeon_init_dma __P((struct aeon_softc *));
+void aeon_init_pci_registers __P((struct aeon_softc *));
+int aeon_ram_setting_okay __P((struct aeon_softc *));
+int aeon_intr __P((void *));
+u_int32_t aeon_write_command __P((const struct aeon_command_buf_data *,
+ u_int8_t *));
+int aeon_build_command __P((const struct aeon_command * cmd,
+ struct aeon_command_buf_data *));
+void aeon_intr_process_ring __P((struct aeon_softc *, struct aeon_dma *));
+
+struct cfattach aeon_ca = {
+ sizeof(struct aeon_softc), aeon_probe, aeon_attach,
+};
+
+struct cfdriver aeon_cd = {
+ 0, "aeon", DV_DULL
+};
+
+/*
+ * Used for round robin crypto requests
+ */
+int aeon_num_devices = 0;
+struct aeon_softc *aeon_devices[AEON_MAX_DEVICES];
+
+
+int
+aeon_probe(parent, match, aux)
+ struct device *parent;
+ void *match;
+ void *aux;
+{
+ struct pci_attach_args *pa = (struct pci_attach_args *) aux;
+
+ if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_INVERTEX &&
+ PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INVERTEX_AEON)
+ return (1);
+ return (0);
+}
+
+/*
+ * Purpose: One time initialization for the device performed at bootup.
+ */
+void
+aeon_attach(parent, self, aux)
+ struct device *parent, *self;
+ void *aux;
+{
+ struct aeon_softc *sc = (struct aeon_softc *)self;
+ struct pci_attach_args *pa = aux;
+ pci_chipset_tag_t pc = pa->pa_pc;
+ pci_intr_handle_t ih;
+ const char *intrstr = NULL;
+ bus_addr_t iobase;
+ bus_size_t iosize;
+ u_int32_t cmd;
+
+ cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
+ cmd |= PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE |
+ PCI_COMMAND_MASTER_ENABLE;
+ pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd);
+ cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
+
+ if (!(cmd & PCI_COMMAND_MEM_ENABLE)) {
+ printf(": failed to enable memory mapping\n");
+ return;
+ }
+
+ if (pci_mem_find(pc, pa->pa_tag, PCI_BASE_ADDRESS_0, &iobase, &iosize,
+ NULL)){
+ printf(": can't find mem space\n");
+ return;
+ }
+ if (bus_space_map(pa->pa_memt, iobase, iosize, 0, &sc->sc_sh0)) {
+ printf(": can't map mem space\n");
+ return;
+ }
+ sc->sc_st0 = pa->pa_memt;
+
+ if (pci_mem_find(pc, pa->pa_tag, PCI_BASE_ADDRESS_1, &iobase, &iosize,
+ NULL)){
+ printf(": can't find mem space\n");
+ return;
+ }
+ if (bus_space_map(pa->pa_memt, iobase, iosize, 0, &sc->sc_sh1)) {
+ printf(": can't map mem space\n");
+ return;
+ }
+ sc->sc_st1 = pa->pa_memt;
+ printf(" mem %x %x", sc->sc_sh0, sc->sc_sh1);
+
+ sc->sc_dma = (struct aeon_dma *)vm_page_alloc_contig(sizeof(*sc->sc_dma),
+ 0x100000, 0xffffffff, PAGE_SIZE);
+ bzero(sc->sc_dma, sizeof(*sc->sc_dma));
+
+ aeon_reset_board(sc);
+
+ if (aeon_enable_crypto(sc) != 0) {
+ printf("%s: crypto enabling failed\n",
+ sc->sc_dv.dv_xname);
+ return;
+ }
+
+ aeon_init_dma(sc);
+ aeon_init_pci_registers(sc);
+
+ if (aeon_ram_setting_okay(sc) != 0)
+ sc->is_dram_model = 1;
+
+ /*
+ * Reinitialize again, since the DRAM/SRAM detection shifted our ring
+ * pointers and may have changed the value we send to the RAM Config
+ * Register.
+ */
+ aeon_reset_board(sc);
+ aeon_init_dma(sc);
+ aeon_init_pci_registers(sc);
+
+ if (pci_intr_map(pc, pa->pa_intrtag, pa->pa_intrpin,
+ pa->pa_intrline, &ih)) {
+ printf(": couldn't map interrupt\n");
+ return;
+ }
+ intrstr = pci_intr_string(pc, ih);
+ sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, aeon_intr, sc,
+ self->dv_xname);
+ if (sc->sc_ih == NULL) {
+ printf(": couldn't establish interrupt\n");
+ if (intrstr != NULL)
+ printf(" at %s", intrstr);
+ printf("\n");
+ return;
+ }
+
+ aeon_devices[aeon_num_devices] = sc;
+ aeon_num_devices++;
+
+ printf(": %s\n", intrstr);
+}
+
+/*
+ * Purpose: Resets the board. Values in the regesters are left as is
+ * from the reset (i.e. initial values are assigned elsewhere).
+ */
+void
+aeon_reset_board(sc)
+ struct aeon_softc *sc;
+{
+ /*
+ * Set polling in the DMA configuration register to zero. 0x7 avoids
+ * resetting the board and zeros out the other fields.
+ */
+ WRITE_REG_1(sc, AEON_DMA_CFG, AEON_DMA_CFG_NOBOARDRESET |
+ AEON_DMA_CFG_NODMARESET | AEON_DMA_CFG_NEED);
+
+ /*
+ * Now that polling has been disabled, we have to wait 1 ms
+ * before resetting the board.
+ */
+ DELAY(1000);
+
+ /* Reset the board. We do this by writing zeros to the DMA reset
+ * field, the BRD reset field, and the manditory 1 at position 2.
+ * Every other field is set to zero.
+ */
+ WRITE_REG_1(sc, AEON_DMA_CFG, AEON_DMA_CFG_NEED);
+
+ /*
+ * Wait another millisecond for the board to reset.
+ */
+ DELAY(1000);
+
+ /*
+ * Turn off the reset! (No joke.)
+ */
+ WRITE_REG_1(sc, AEON_DMA_CFG, AEON_DMA_CFG_NOBOARDRESET |
+ AEON_DMA_CFG_NODMARESET | AEON_DMA_CFG_NEED);
+}
+
+/*
+ * Purpose: Checks to see if crypto is already enabled. If crypto
+ * isn't enable, "aeon_enable_crypto" is called to enable it.
+ * The check is important, as enabling crypto twice will lock
+ * the board.
+ *
+ * Returns: 0 value on success, -1 if we were not able to unlock the
+ * cryptographic engine.
+ */
+int
+aeon_enable_crypto(sc)
+ struct aeon_softc *sc;
+{
+ u_int32_t encryption_level;
+
+ /*
+ * The RAM config register's encrypt level bit needs to be set before
+ * every read performed on the encryption level register.
+ */
+ WRITE_REG_0(sc, AEON_RAM_CONFIG,
+ READ_REG_0(sc, AEON_RAM_CONFIG) | (0x1 << 5));
+
+ encryption_level = READ_REG_0(sc, AEON_ENCRYPTION_LEVEL);
+
+ /*
+ * Make sure we don't re-unlock. Two unlocks kills chip until the
+ * next reboot.
+ */
+ if (encryption_level == 0x1020 || encryption_level == 0x1120) {
+#ifdef AEON_DEBUG
+ printf("%s: Strong Crypto already enabled!\n",
+ sc->sc_dv.dv_xname);
+#endif
+ return 0; /* success */
+ }
+
+ /**
+ **
+ ** Rest of unlock procedure removed.
+ **
+ **
+ **/
+
+ switch(encryption_level) {
+ case 0x3020:
+ printf(" no encr/auth");
+ break;
+ case 0x1020:
+ printf(" DES");
+ break;
+ case 0x1120:
+ printf(" FULL");
+ break;
+ default:
+ printf(" disabled");
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * Purpose: Give initial values to the registers listed in the
+ * "Register Space" section of the AEON Software Development
+ * reference manual.
+ */
+void
+aeon_init_pci_registers(sc)
+ struct aeon_softc *sc;
+{
+ u_int32_t ram_config;
+
+ /*
+ * Write fixed values needed by the Initialization registers
+ */
+ WRITE_REG_0(sc, AEON_INIT_1, 0x2);
+ WRITE_REG_0(sc, AEON_INIT_2, 0x400);
+ WRITE_REG_0(sc, AEON_INIT_3, 0x200);
+
+ /*
+ * Write all 4 ring address registers
+ */
+ WRITE_REG_1(sc, AEON_COMMAND_RING_ADDR,
+ vtophys(sc->sc_dma->command_ring));
+ WRITE_REG_1(sc, AEON_SOURCE_RING_ADDR,
+ vtophys(sc->sc_dma->source_ring));
+ WRITE_REG_1(sc, AEON_DEST_RING_ADDR,
+ vtophys(sc->sc_dma->dest_ring));
+ WRITE_REG_1(sc, AEON_RESULT_RING_ADDR,
+ vtophys(sc->sc_dma->result_ring));
+
+ /*
+ * Write status register
+ */
+ WRITE_REG_1(sc, AEON_STATUS, AEON_INIT_STATUS_REG);
+
+ /*
+ * Write registers which had thier initial values defined
+ * elsewhere. The "Encryption level register" is the only
+ * documented register not initialized by this routine (it's read
+ * only).
+ */
+ WRITE_REG_1(sc, AEON_INTERRUPT_ENABLE, AEON_INIT_INTERRUPT_ENABLE_REG);
+
+ ram_config = AEON_INIT_RAM_CONFIG_REG
+#if BYTE_ORDER == BIG_ENDIAN
+ | (0x1 << 7)
+#endif
+ | (sc->is_dram_model << 4);
+ WRITE_REG_0(sc, AEON_RAM_CONFIG, ram_config);
+ WRITE_REG_0(sc, AEON_EXPAND, AEON_INIT_EXPAND_REG);
+ WRITE_REG_1(sc, AEON_DMA_CFG, AEON_INIT_DMA_CONFIG_REG);
+}
+
+/*
+ * Purpose: There are both DRAM and SRAM models of the aeon board.
+ * A bit in the "ram configuration register" needs to be
+ * set according to the model. The driver will guess one
+ * way or the other -- and then call this routine to verify.
+ * Returns:
+ * 0: RAM setting okay
+ * -1: Current RAM setting in error
+ */
+int
+aeon_ram_setting_okay(sc)
+ struct aeon_softc *sc;
+{
+ aeon_base_command_t write_command = {(0x3 << 13), 0, 8, 0};
+ aeon_base_command_t read_command = {(0x2 << 13), 0, 0, 8};
+ u_int8_t data[8] = {'1', '2', '3', '4', '5', '6', '7', '8'};
+ u_int8_t *source_buf, *dest_buf;
+ struct aeon_dma *dma = sc->sc_dma;
+
+ const u_int32_t masks = AEON_DESCRIPT_VALID | AEON_DESCRIPT_LAST |
+ AEON_DESCRIPT_MASK_DONE_IRQ;
+
+#if (AEON_DESCRIPT_RING_SIZE < 3)
+#error "descriptor ring size too small DRAM/SRAM check"
+#endif
+
+ /*
+ * We steal the 8 bytes needed for both the source and dest buffers
+ * from the 3rd slot that the DRAM/SRAM test won't use.
+ */
+ source_buf = sc->sc_dma->command_bufs[2];
+ dest_buf = sc->sc_dma->result_bufs[2];
+
+ /*
+ * Build write command
+ */
+ *(aeon_base_command_t *) sc->sc_dma->command_bufs[0] = write_command;
+ bcopy(data, source_buf, sizeof(data));
+
+ dma->source_ring[0].pointer = vtophys(source_buf);
+ dma->dest_ring[0].pointer = vtophys(dest_buf);
+
+ dma->command_ring[0].length = 16 | masks;
+ dma->source_ring[0].length = 8 | masks;
+ dma->dest_ring[0].length = 8 | masks;
+ dma->result_ring[0].length = AEON_MAX_RESULT_LENGTH | masks;
+
+ /*
+ * Let write command execute
+ */
+ DELAY(1000);
+
+ if (dma->result_ring[0].length & AEON_DESCRIPT_VALID)
+ printf("%s: SRAM/DRAM detection error -- result[0] valid still set\n",
+ sc->sc_dv.dv_xname);
+
+ /*
+ * Build read command
+ */
+ *(aeon_base_command_t *) sc->sc_dma->command_bufs[1] = read_command;
+
+ dma->source_ring[1].pointer = vtophys(source_buf);
+ dma->dest_ring[1].pointer = vtophys(dest_buf);
+
+ dma->command_ring[1].length = 16 | masks;
+ dma->source_ring[1].length = 8 | masks;
+ dma->dest_ring[1].length = 8 | masks;
+ dma->result_ring[1].length = AEON_MAX_RESULT_LENGTH | masks;
+
+ /*
+ * Let read command execute
+ */
+ DELAY(1000);
+
+ if (dma->result_ring[1].length & AEON_DESCRIPT_VALID)
+ printf("%s: SRAM/DRAM detection error -- result[1] valid still set\n",
+ sc->sc_dv.dv_xname);
+
+ return (memcmp(dest_buf, data, sizeof(data)) == 0) ? 0 : -1;
+}
+
+/*
+ * Purpose: Initialize the descriptor rings.
+ */
+void
+aeon_init_dma(sc)
+ struct aeon_softc *sc;
+{
+ int i;
+ struct aeon_dma *dma = sc->sc_dma;
+
+ /*
+ * Initialize static pointer values.
+ */
+ for (i = 0; i < AEON_DESCRIPT_RING_SIZE; i++) {
+ dma->command_ring[i].pointer = vtophys(dma->command_bufs[i]);
+ dma->result_ring[i].pointer = vtophys(dma->result_bufs[i]);
+ }
+
+ dma->command_ring[AEON_DESCRIPT_RING_SIZE].pointer =
+ vtophys(dma->command_ring);
+
+ dma->source_ring[AEON_DESCRIPT_RING_SIZE].pointer =
+ vtophys(dma->source_ring);
+
+ dma->dest_ring[AEON_DESCRIPT_RING_SIZE].pointer =
+ vtophys(dma->dest_ring);
+
+ dma->result_ring[AEON_DESCRIPT_RING_SIZE].pointer =
+ vtophys(dma->result_ring);
+}
+
+/*
+ * Purpose: Writes out the raw command buffer space. Returns the
+ * command buffer size.
+ */
+u_int32_t
+aeon_write_command(
+ const struct aeon_command_buf_data * cmd_data,
+ u_int8_t * command_buf
+)
+{
+ u_int8_t *command_buf_pos = command_buf;
+ const aeon_base_command_t *base_cmd = &cmd_data->base_cmd;
+ const aeon_mac_command_t *mac_cmd = &cmd_data->mac_cmd;
+ const aeon_crypt_command_t *crypt_cmd = &cmd_data->crypt_cmd;
+
+ int using_mac = base_cmd->masks & AEON_BASE_CMD_MAC;
+ int using_crypt = base_cmd->masks & AEON_BASE_CMD_CRYPT;
+
+ /*
+ * Write base command structure
+ */
+ *((aeon_base_command_t *) command_buf_pos) = *base_cmd;
+ command_buf_pos += sizeof(aeon_base_command_t);
+
+ /*
+ * Write MAC command structure
+ */
+ if (using_mac) {
+ *((aeon_mac_command_t *) command_buf_pos) = *mac_cmd;
+ command_buf_pos += sizeof(aeon_mac_command_t);
+ }
+ /*
+ * Write encryption command structure
+ */
+ if (using_crypt) {
+ *((aeon_crypt_command_t *) command_buf_pos) = *crypt_cmd;
+ command_buf_pos += sizeof(aeon_crypt_command_t);
+ }
+ /*
+ * Write MAC key
+ */
+ if (mac_cmd->masks & AEON_MAC_CMD_NEW_KEY) {
+ bcopy(cmd_data->mac_key, command_buf_pos, AEON_MAC_KEY_LENGTH);
+ command_buf_pos += AEON_MAC_KEY_LENGTH;
+ }
+ /*
+ * Write crypto key
+ */
+ if (crypt_cmd->masks & AEON_CRYPT_CMD_NEW_KEY) {
+ u_int32_t alg = crypt_cmd->masks & AEON_CRYPT_CMD_ALG_MASK;
+ u_int32_t key_len = (alg == AEON_CRYPT_CMD_ALG_DES) ?
+ AEON_DES_KEY_LENGTH : AEON_3DES_KEY_LENGTH;
+ bcopy(cmd_data->crypt_key, command_buf_pos, key_len);
+ command_buf_pos += key_len;
+ }
+ /*
+ * Write crypto iv
+ */
+ if (crypt_cmd->masks & AEON_CRYPT_CMD_NEW_IV) {
+ bcopy(cmd_data->initial_vector, command_buf_pos, AEON_IV_LENGTH);
+ command_buf_pos += AEON_IV_LENGTH;
+ }
+ /*
+ * Write 8 bytes of zero's if we're not sending crypt or MAC
+ * structures
+ */
+ if (!(base_cmd->masks & AEON_BASE_CMD_MAC) &&
+ !(base_cmd->masks & AEON_BASE_CMD_CRYPT)) {
+ *((u_int32_t *) command_buf_pos) = 0;
+ command_buf_pos += 4;
+ *((u_int32_t *) command_buf_pos) = 0;
+ command_buf_pos += 4;
+ }
+#if 0
+ if ((command_buf_pos - command_buf) > AEON_MAX_COMMAND_LENGTH)
+ printf("aeon: Internal Error -- Command buffer overflow.\n");
+#endif
+
+ return command_buf_pos - command_buf;
+
+}
+
+/*
+ * Purpose: Check command input and build up structure to write
+ * the command buffer later. Returns 0 on success and
+ * -1 if given bad command input was given.
+ */
+int
+aeon_build_command(
+ const struct aeon_command * cmd,
+ struct aeon_command_buf_data * cmd_buf_data
+)
+{
+#define AEON_COMMAND_CHECKING
+
+ u_int32_t flags = cmd->flags;
+ aeon_base_command_t *base_cmd = &cmd_buf_data->base_cmd;
+ aeon_mac_command_t *mac_cmd = &cmd_buf_data->mac_cmd;
+ aeon_crypt_command_t *crypt_cmd = &cmd_buf_data->crypt_cmd;
+ u_int mac_length;
+
+#ifdef AEON_COMMAND_CHECKING
+ int dest_diff;
+#endif
+
+ bzero(cmd_buf_data, sizeof(struct aeon_command_buf_data));
+
+
+#ifdef AEON_COMMAND_CHECKING
+ if (!(!!(flags & AEON_DECODE) ^ !!(flags & AEON_ENCODE))) {
+ printf("aeon: encode/decode setting error\n");
+ return -1;
+ }
+ if ((flags & AEON_CRYPT_DES) && (flags & AEON_CRYPT_3DES)) {
+ printf("aeon: Too many crypto algorithms set in command\n");
+ return -1;
+ }
+ if ((flags & AEON_MAC_SHA1) && (flags & AEON_MAC_MD5)) {
+ printf("aeon: Too many MAC algorithms set in command\n");
+ return -1;
+ }
+#endif
+
+
+ /*
+ * Compute the mac value length -- leave at zero if not MAC'ing
+ */
+ mac_length = 0;
+ if (AEON_USING_MAC(flags)) {
+ mac_length = (flags & AEON_MAC_TRUNC) ? AEON_MAC_TRUNC_LENGTH :
+ ((flags & AEON_MAC_MD5) ? AEON_MD5_LENGTH : AEON_SHA1_LENGTH);
+ }
+#ifdef AEON_COMMAND_CHECKING
+ /*
+ * Check for valid src/dest buf sizes
+ */
+
+ /*
+ * XXX XXX We need to include header counts into all these
+ * checks!!!!
+ */
+
+ if (cmd->source_length <= mac_length) {
+ printf("aeon: command source buffer has no data\n");
+ return -1;
+ }
+ dest_diff = (flags & AEON_ENCODE) ? mac_length : -mac_length;
+ if (cmd->dest_length < cmd->source_length + dest_diff) {
+ printf("aeon: command dest length %u too short -- needed %u\n",
+ cmd->dest_length, cmd->source_length + dest_diff);
+ return -1;
+ }
+#endif
+
+
+ /**
+ ** Building up base command
+ **
+ **/
+
+ /*
+ * Set MAC bit
+ */
+ if (AEON_USING_MAC(flags))
+ base_cmd->masks |= AEON_BASE_CMD_MAC;
+
+ /* Set Encrypt bit */
+ if (AEON_USING_CRYPT(flags))
+ base_cmd->masks |= AEON_BASE_CMD_CRYPT;
+
+ /*
+ * Set Decode bit
+ */
+ if (flags & AEON_DECODE)
+ base_cmd->masks |= AEON_BASE_CMD_DECODE;
+
+ /*
+ * Set total source and dest counts. These values are the same as the
+ * values set in the length field of the source and dest descriptor rings.
+ */
+ base_cmd->total_source_count = cmd->source_length;
+ base_cmd->total_dest_count = cmd->dest_length;
+
+ /*
+ * XXX -- We need session number range checking...
+ */
+ base_cmd->session_num = cmd->session_num;
+
+ /**
+ ** Building up mac command
+ **
+ **/
+ if (AEON_USING_MAC(flags)) {
+
+ /*
+ * Set the MAC algorithm and trunc setting
+ */
+ mac_cmd->masks |= (flags & AEON_MAC_MD5) ?
+ AEON_MAC_CMD_ALG_MD5 : AEON_MAC_CMD_ALG_SHA1;
+ if (flags & AEON_MAC_TRUNC)
+ mac_cmd->masks |= AEON_MAC_CMD_TRUNC;
+
+ /*
+ * We always use HMAC mode, assume MAC values are appended to the
+ * source buffer on decodes and we append them to the dest buffer
+ * on encodes, and order auth/encryption engines as needed by
+ * IPSEC
+ */
+ mac_cmd->masks |= AEON_MAC_CMD_MODE_HMAC | AEON_MAC_CMD_APPEND |
+ AEON_MAC_CMD_POS_IPSEC;
+
+ /*
+ * Setup to send new MAC key if needed.
+ */
+ if (flags & AEON_MAC_CMD_NEW_KEY) {
+ mac_cmd->masks |= AEON_MAC_CMD_NEW_KEY;
+ cmd_buf_data->mac_key = cmd->mac_key;
+ }
+ /*
+ * Set the mac header skip and source count.
+ */
+ mac_cmd->source_count = cmd->source_length - cmd->mac_header_skip;
+ if (flags & AEON_DECODE)
+ mac_cmd->source_count -= mac_length;
+ }
+ /**
+ ** Building up crypto command
+ **
+ **/
+ if (AEON_USING_CRYPT(flags)) {
+
+ /*
+ * Set the encryption algorithm bits.
+ */
+ crypt_cmd->masks |= (flags & AEON_CRYPT_DES) ?
+ AEON_CRYPT_CMD_ALG_DES : AEON_CRYPT_CMD_ALG_3DES;
+
+ /* We always use CBC mode and send a new IV (as needed by
+ * IPSec). */
+ crypt_cmd->masks |= AEON_CRYPT_CMD_MODE_CBC | AEON_CRYPT_CMD_NEW_IV;
+
+ /*
+ * Setup to send new encrypt key if needed.
+ */
+ if (flags & AEON_CRYPT_CMD_NEW_KEY) {
+ crypt_cmd->masks |= AEON_CRYPT_CMD_NEW_KEY;
+ cmd_buf_data->crypt_key = cmd->crypt_key;
+ }
+ /*
+ * Set the encrypt header skip and source count.
+ */
+ crypt_cmd->header_skip = cmd->crypt_header_skip;
+ crypt_cmd->source_count = cmd->source_length - cmd->crypt_header_skip;
+ if (flags & AEON_DECODE)
+ crypt_cmd->source_count -= mac_length;
+
+
+#ifdef AEON_COMMAND_CHECKING
+ if (crypt_cmd->source_count % 8 != 0) {
+ printf("aeon: Error -- encryption source %u not a multiple of 8!\n",
+ crypt_cmd->source_count);
+ return -1;
+ }
+#endif
+ }
+ cmd_buf_data->initial_vector = cmd->initial_vector;
+
+
+#if 0
+ printf("aeon: command parameters"
+ " -- session num %u"
+ " -- base t.s.c: %u"
+ " -- base t.d.c: %u"
+ " -- mac h.s. %u s.c. %u"
+ " -- crypt h.s. %u s.c. %u\n",
+ base_cmd->session_num,
+ base_cmd->total_source_count,
+ base_cmd->total_dest_count,
+ mac_cmd->header_skip,
+ mac_cmd->source_count,
+ crypt_cmd->header_skip,
+ crypt_cmd->source_count
+ );
+#endif
+
+ return 0; /* success */
+}
+
+
+/*
+ * Function: aeon_process_command
+ */
+int
+aeon_crypto(struct aeon_command * cmd)
+{
+ u_int32_t command_length;
+
+ u_int32_t local_ring_pos;
+ int err;
+ int oldint;
+ static u_int32_t current_device = 0;
+ struct aeon_softc *sc;
+ struct aeon_dma *dma;
+ const u_int32_t masks = AEON_DESCRIPT_VALID | AEON_DESCRIPT_LAST |
+ AEON_DESCRIPT_MASK_DONE_IRQ;
+
+ struct aeon_command_buf_data cmd_buf_data;
+
+ if (aeon_build_command(cmd, &cmd_buf_data) != 0)
+ return AEON_CRYPTO_BAD_INPUT;
+
+ /*
+ * Turning off interrupts
+ */
+ oldint = splimp();
+
+ /* Pick the aeon board to send the data to. Right now we use a round
+ * robin approach. */
+ sc = aeon_devices[current_device++];
+ if (current_device == aeon_num_devices)
+ current_device = 0;
+ dma = sc->sc_dma;
+
+#if 0
+ printf("%s: Entering command"
+ " -- Status Reg 0x%08x"
+ " -- Interrupt Enable Reg 0x%08x"
+ " -- slots in use %u"
+ " -- source length %u"
+ " -- dest length %u\n",
+ sc->sc_dv.dv_xname,
+ READ_REG_1(sc, AEON_STATUS),
+ READ_REG_1(sc, AEON_INTERRUPT_ENABLE),
+ dma->slots_in_use,
+ cmd->source_length,
+ cmd->dest_length
+ );
+#endif
+
+
+ if (dma->slots_in_use == AEON_DESCRIPT_RING_SIZE) {
+
+ if (cmd->flags & AEON_DMA_FULL_NOBLOCK)
+ return AEON_CRYPTO_RINGS_FULL;
+
+ do {
+#ifdef AEON_DEBUG
+ printf("%s: Waiting for unused ring.\n",
+ sc->sc_dv.dv_xname);
+#endif
+ /* sleep for minimum timeout */
+ tsleep((caddr_t) dma, PZERO, "QFULL", 1);
+
+ } while (dma->slots_in_use == AEON_DESCRIPT_RING_SIZE);
+ }
+ dma->slots_in_use++;
+
+
+ if (dma->ring_pos == AEON_DESCRIPT_RING_SIZE) {
+ local_ring_pos = 0;
+ dma->ring_pos = 1;
+ } else {
+ local_ring_pos = dma->ring_pos++;
+ }
+
+
+ command_length =
+ aeon_write_command(&cmd_buf_data, dma->command_bufs[local_ring_pos]);
+
+ dma->aeon_commands[local_ring_pos] = cmd;
+
+ /*
+ * If we wrapped to the begining of the ring, validate the jump
+ * descriptor. (Not needed on the very first time command -- but it
+ * doesn't hurt.)
+ */
+ if (local_ring_pos == 0) {
+ const u_int32_t jmp_masks = masks | AEON_DESCRIPT_JUMP;
+
+ dma->command_ring[AEON_DESCRIPT_RING_SIZE].length = jmp_masks;
+ dma->source_ring[AEON_DESCRIPT_RING_SIZE].length = jmp_masks;
+ dma->dest_ring[AEON_DESCRIPT_RING_SIZE].length = jmp_masks;
+ dma->result_ring[AEON_DESCRIPT_RING_SIZE].length = jmp_masks;
+ }
+ /*
+ * "pointer" values for command and result descriptors are already set
+ */
+ dma->command_ring[local_ring_pos].length = command_length | masks;
+
+ dma->source_ring[local_ring_pos].pointer = vtophys(cmd->source_buf);
+ dma->source_ring[local_ring_pos].length = cmd->source_length | masks;
+
+ dma->dest_ring[local_ring_pos].pointer = vtophys(cmd->dest_buf);
+ dma->dest_ring[local_ring_pos].length = cmd->dest_length | masks;
+
+
+ /*
+ * Unlike other descriptors, we don't mask done interrupt from
+ * result descriptor.
+ */
+ dma->result_ring[local_ring_pos].length =
+ AEON_MAX_RESULT_LENGTH | AEON_DESCRIPT_VALID | AEON_DESCRIPT_LAST;
+
+ /*
+ * We don't worry about missing an interrupt (which a waiting
+ * on command interrupt salvages us from), unless there is more
+ * than one command in the queue.
+ */
+ if (dma->slots_in_use > 1) {
+ WRITE_REG_1(sc, AEON_INTERRUPT_ENABLE,
+ AEON_INTR_ON_RESULT_DONE | AEON_INTR_ON_COMMAND_WAITING);
+ }
+ /*
+ * If not given a callback routine, we block until the dest data is
+ * ready. (Setting interrupt timeout at 3 seconds.)
+ */
+ if (cmd->dest_ready_callback == NULL) {
+#if 0
+ printf("%s: no callback -- we're sleeping\n",
+ sc->sc_dv.dv_xname);
+#endif
+ err = tsleep((caddr_t) & dma->result_ring[local_ring_pos], PZERO, "CRYPT",
+ hz * 3);
+ if (err != 0)
+ printf("%s: timed out waiting for interrupt"
+ " -- tsleep() exited with %d\n",
+ sc->sc_dv.dv_xname, err);
+ }
+#if 0
+ printf("%s: command executed"
+ " -- Status Register 0x%08x"
+ " -- Interrupt Enable Reg 0x%08x\n",
+ sc->sc_dv.dv_xname,
+ READ_REG_1(sc, AEON_STATUS),
+ READ_REG_1(sc, AEON_INTERRUPT_ENABLE));
+#endif
+
+ /*
+ * Turning interupts back on
+ */
+ splx(oldint);
+
+ return 0; /* success */
+}
+
+/*
+ * Part of interrupt handler--cleans out done jobs from rings
+ */
+void
+aeon_intr_process_ring(sc, dma)
+ struct aeon_softc *sc;
+ struct aeon_dma *dma;
+{
+ if (dma->slots_in_use > AEON_DESCRIPT_RING_SIZE)
+ printf("%s: Internal Error -- ring overflow\n",
+ sc->sc_dv.dv_xname);
+
+ while (dma->slots_in_use > 0) {
+ u_int32_t wake_pos = dma->wakeup_ring_pos;
+ struct aeon_command *cmd = dma->aeon_commands[wake_pos];
+
+ /*
+ * If still valid, stop processing
+ */
+ if (dma->result_ring[wake_pos].length & AEON_DESCRIPT_VALID)
+ break;
+
+ if (AEON_USING_MAC(cmd->flags) && (cmd->flags & AEON_DECODE)) {
+ u_int8_t *result_buf = dma->result_bufs[wake_pos];
+ cmd->result_status = (result_buf[8] & 0x2) ? AEON_MAC_BAD : 0;
+ printf("%s: byte index 8 of result 0x%02x\n",
+ sc->sc_dv.dv_xname, (u_int32_t) result_buf[8]);
+ }
+ /*
+ * Position is done, notify producer with wakup or callback
+ */
+ if (cmd->dest_ready_callback == NULL) {
+ wakeup((caddr_t) &dma->result_ring[wake_pos]);
+ } else {
+ cmd->dest_ready_callback(cmd);
+ }
+
+ if (++dma->wakeup_ring_pos == AEON_DESCRIPT_RING_SIZE)
+ dma->wakeup_ring_pos = 0;
+ dma->slots_in_use--;
+ }
+
+}
+
+/*
+ * Purpose: Interrupt handler. The argument passed is the device
+ * structure for the board that generated the interrupt.
+ * XXX: Remove hardcoded status checking/setting values.
+ */
+int
+aeon_intr(arg)
+ void *arg;
+{
+ struct aeon_softc *sc = arg;
+ struct aeon_dma *dma = sc->sc_dma;
+ int r;
+
+#if 0
+ printf("%s: Processing Interrupt"
+ " -- Status Reg 0x%08x"
+ " -- Interrupt Enable Reg 0x%08x"
+ " -- slots in use %u\n",
+ sc->sc_dv.dv_xname,
+ READ_REG_1(sc, AEON_STATUS),
+ READ_REG_1(sc, AEON_INTERRUPT_ENABLE),
+ dma->slots_in_use
+ );
+#endif
+
+ if (dma->slots_in_use == 0 && (READ_REG_1(sc, AEON_STATUS) & (1 << 2))) {
+ /*
+ * If no slots to process and we received a "waiting on
+ * result" interrupt, we disable the "waiting on result" (by
+ * clearing it).
+ */
+ WRITE_REG_1(sc, AEON_INTERRUPT_ENABLE,
+ AEON_INTR_ON_RESULT_DONE);
+ r = 1;
+ } else {
+ aeon_intr_process_ring(sc, dma);
+ r = 1;
+ }
+
+#if 0
+ printf("%s: exiting interrupt handler -- slots in use %u\n",
+ sc->sc_dv.dv_xname, dma->slots_in_use);
+#endif
+
+ /*
+ * Clear "result done" and "waiting on command ring" flags in status
+ * register. If we still have slots to process and we received a
+ * waiting interrupt, this will interupt us again.
+ */
+ WRITE_REG_1(sc, AEON_STATUS, (1 << 20) | (1 << 2));
+ return (r);
+}
+
diff --git a/sys/dev/pci/aeonreg.h b/sys/dev/pci/aeonreg.h
new file mode 100644
index 00000000000..ffc4723ee9b
--- /dev/null
+++ b/sys/dev/pci/aeonreg.h
@@ -0,0 +1,313 @@
+/* $OpenBSD: aeonreg.h,v 1.1 1999/02/19 02:52:20 deraadt Exp $ */
+
+/*
+ * Invertex AEON driver
+ * Copyright (c) 1999 Invertex Inc. All rights reserved.
+ *
+ * Please send any comments, feedback, bug-fixes, or feature requests to
+ * software@invertex.com.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __AEON_H__
+#define __AEON_H__
+
+#include <machine/endian.h>
+
+/*
+ * Some PCI configuration space offset defines. The names were made
+ * identical to the names used by the Linux kernel.
+ */
+#define PCI_BASE_ADDRESS_0 0x10 /* 32 bits */
+#define PCI_BASE_ADDRESS_1 0x14 /* 32 bits */
+
+/*
+ * Some configurable values for the driver
+ */
+#define AEON_DESCRIPT_RING_SIZE 24
+#define AEON_MAX_DEVICES 4
+
+/*
+ * The values below should multiple of 4 -- and be large enough to handle
+ * any command the driver implements.
+ */
+#define AEON_MAX_COMMAND_LENGTH 120
+#define AEON_MAX_RESULT_LENGTH 16
+
+/*
+ * aeon_descriptor_t
+ *
+ * Holds an individual descriptor for any of the rings.
+ */
+typedef struct aeon_descriptor {
+ volatile u_int32_t length; /* length and status bits */
+ volatile u_int32_t pointer;
+} aeon_descriptor_t;
+
+/*
+ * Masks for the "length" field of struct aeon_descriptor.
+ */
+#define AEON_DESCRIPT_MASK_DONE_IRQ (0x1 << 25)
+#define AEON_DESCRIPT_LAST (0x1 << 29)
+#define AEON_DESCRIPT_JUMP (0x1 << 30)
+#define AEON_DESCRIPT_VALID (0x1 << 31)
+
+/*
+ * aeon_callback_t
+ *
+ * Type for callback function when dest data is ready.
+ */
+typedef void (*aeon_callback_t)(aeon_command_t *);
+
+/*
+ * Data structure to hold all 4 rings and any other ring related data.
+ */
+struct aeon_dma {
+ /*
+ * Descriptor rings. We add +1 to the size to accomidate the
+ * jump descriptor.
+ */
+ struct aeon_descriptor command_ring[AEON_DESCRIPT_RING_SIZE + 1];
+ struct aeon_descriptor source_ring[AEON_DESCRIPT_RING_SIZE + 1];
+ struct aeon_descriptor dest_ring[AEON_DESCRIPT_RING_SIZE + 1];
+ struct aeon_descriptor result_ring[AEON_DESCRIPT_RING_SIZE + 1];
+
+ aeon_command_t *aeon_commands[AEON_DESCRIPT_RING_SIZE ];
+
+ u_char command_bufs[AEON_DESCRIPT_RING_SIZE][AEON_MAX_COMMAND_LENGTH];
+ u_char result_bufs[AEON_DESCRIPT_RING_SIZE][AEON_MAX_RESULT_LENGTH];
+
+ /*
+ * Our current positions for insertion and removal from the desriptor
+ * rings.
+ */
+ u_int32_t ring_pos;
+ u_int32_t wakeup_ring_pos;
+ volatile u_int32_t slots_in_use;
+};
+
+/*
+ * Holds data specific to a single AEON board.
+ */
+struct aeon_softc {
+ struct device sc_dv; /* generic device */
+ void * sc_ih; /* interrupt handler cookie */
+ u_int32_t is_dram_model; /* 1=dram, 0=sram */
+
+ /* Register set 0 */
+ bus_space_handle_t sc_sh0;
+ bus_space_tag_t sc_st0;
+
+ /* Register set 1 */
+ bus_space_handle_t sc_sh1;
+ bus_space_tag_t sc_st1;
+
+ struct aeon_dma *sc_dma;
+};
+
+/*
+ * Register offsets in register set 0
+ */
+#define AEON_INIT_1 0x04
+#define AEON_RAM_CONFIG 0x0c
+#define AEON_EXPAND 0x08
+#define AEON_ENCRYPTION_LEVEL 0x14
+#define AEON_INIT_3 0x10
+#define AEON_INIT_2 0x1c
+
+#define WRITE_REG_0(sc,reg,val) \
+ bus_space_write_4((sc)->sc_st0, (sc)->sc_sh0, reg, val)
+#define READ_REG_0(sc,reg) \
+ bus_space_read_4((sc)->sc_st0, (sc)->sc_sh0, reg)
+
+/*
+ * Register offsets in register set 1
+ */
+#define AEON_COMMAND_RING_ADDR 0x0c
+#define AEON_SOURCE_RING_ADDR 0x1c
+#define AEON_RESULT_RING_ADDR 0x2c
+#define AEON_DEST_RING_ADDR 0x3c
+#define AEON_STATUS 0x40
+#define AEON_INTERRUPT_ENABLE 0x44
+
+#define AEON_DMA_CFG 0x48
+#define AEON_DMA_CFG_NOBOARDRESET 0x00000001
+#define AEON_DMA_CFG_NODMARESET 0x00000002
+#define AEON_DMA_CFG_NEED 0x00000004
+#define AEON_DMA_CFG_HOSTLAST 0x00000010
+
+#define WRITE_REG_1(sc,reg,val) \
+ bus_space_write_4((sc)->sc_st1, (sc)->sc_sh1, reg, val)
+#define READ_REG_1(sc,reg) \
+ bus_space_read_4((sc)->sc_st1, (sc)->sc_sh1, reg)
+
+/*
+ * Initial register values
+ */
+
+/*
+ * Status Register
+ *
+ * The value below enables polling on all 4 descriptor rings and
+ * writes a "1" to every status bit in the register. (Writing "1"
+ * clears the bit.)
+ */
+#define AEON_INIT_STATUS_REG ((1<<31)|(1<<23)|(1<<15)|(1<<7))
+
+/*
+ * Interrupt Enable Register
+ *
+ * Initial value sets all interrupts to off except the "mask done"
+ * interrupt of the the result descriptor ring.
+ */
+#define AEON_INIT_INTERRUPT_ENABLE_REG (AEON_INTR_ON_RESULT_DONE)
+
+/*
+ * DMA Configuration Register
+ *
+ * Initial value sets the polling scalar and frequency, and puts
+ * the host (not the AEON board) in charge of "last" bits in the
+ * dest data and result descriptor rings.
+ */
+#define AEON_INIT_DMA_CONFIG_REG \
+ (AEON_DMA_CFG_NOBOARDRESET | AEON_DMA_CFG_NODMARESET | \
+ AEON_DMA_CFG_NEED | \
+ AEON_DMA_CFG_HOSTLAST | /* host controls last bit in all rings */ \
+ (AEON_POLL_SCALAR << 8) | /* setting poll scalar value */ \
+ (AEON_POLL_FREQUENCY << 16)) /* setting poll frequency value */
+
+/*
+ * RAM Configuration Register
+ *
+ * Initial value sets the ecryption context size to 128 bytes (if using
+ * RC4 bump it to 512, but you'll decrease the number of available
+ * sessions). We don't configure multiple compression histories -- since
+ * IPSec doesn't use them.
+ *
+ * NOTE: Use the AEON_RAM_CONFIG_INIT() macro instead of the
+ * variable, since DRAM/SRAM detection is not determined staticly.
+ */
+#define AEON_INIT_RAM_CONFIG_REG \
+ ((0x0 << 1) | /* RAM Encrypt: 0 for 128 bytes, 1 for 512 bytes */ \
+ (0x1 << 2) | /* RAM Comp cfg: 1 for single compression history */ \
+ 0x4B40) /* Setting fixed bits required by the register */
+
+/*
+ * Expand Register
+ *
+ * The only bit in this register is the expand bit at position 9. It's
+ * cleared by writing a 1 to it.
+ */
+#define AEON_INIT_EXPAND_REG (0x1 << 9)
+
+/*********************************************************************
+ * Structs for board commands
+ *
+ *********************************************************************/
+
+/*
+ * Structure to help build up the command data structure.
+ */
+typedef struct aeon_base_command {
+ u_int16_t masks;
+ u_int16_t session_num;
+ u_int16_t total_source_count;
+ u_int16_t total_dest_count;
+} aeon_base_command_t;
+
+#define AEON_BASE_CMD_MAC (0x1 << 10)
+#define AEON_BASE_CMD_CRYPT (0x1 << 11)
+#define AEON_BASE_CMD_DECODE (0x1 << 13)
+
+/*
+ * Structure to help build up the command data structure.
+ */
+typedef struct aeon_crypt_command {
+ u_int16_t masks;
+ u_int16_t header_skip;
+ u_int32_t source_count;
+} aeon_crypt_command_t;
+
+#define AEON_CRYPT_CMD_ALG_MASK (0x3 << 0)
+#define AEON_CRYPT_CMD_ALG_DES (0x0 << 0)
+#define AEON_CRYPT_CMD_ALG_3DES (0x1 << 0)
+#define AEON_CRYPT_CMD_MODE_CBC (0x1 << 3)
+#define AEON_CRYPT_CMD_NEW_KEY (0x1 << 11)
+#define AEON_CRYPT_CMD_NEW_IV (0x1 << 12)
+
+/*
+ * Structure to help build up the command data structure.
+ */
+typedef struct aeon_mac_command {
+ u_int16_t masks;
+ u_int16_t header_skip;
+ u_int32_t source_count;
+} aeon_mac_command_t;
+
+#define AEON_MAC_CMD_ALG_MD5 (0x1 << 0)
+#define AEON_MAC_CMD_ALG_SHA1 (0x0 << 0)
+#define AEON_MAC_CMD_MODE_HMAC (0x0 << 2)
+#define AEON_MAC_CMD_TRUNC (0x1 << 4)
+#define AEON_MAC_CMD_APPEND (0x1 << 6)
+/*
+ * MAC POS IPSec initiates authentication after encryption on encodes
+ * and before decryption on decodes.
+ */
+#define AEON_MAC_CMD_POS_IPSEC (0x2 << 8)
+#define AEON_MAC_CMD_NEW_KEY (0x1 << 11)
+
+/*
+ * Structure with all fields necessary to write the command buffer.
+ * We build it up while interrupts are on, then use it to write out
+ * the command buffer quickly while interrupts are off.
+ */
+typedef struct aeon_command_buf_data {
+ aeon_base_command_t base_cmd;
+ aeon_mac_command_t mac_cmd;
+ aeon_crypt_command_t crypt_cmd;
+ const u_int8_t *mac_key;
+ const u_int8_t *crypt_key;
+ const u_int8_t *initial_vector;
+} aeon_command_buf_data_t;
+
+/*
+ * Values for the interrupt enable register
+ */
+#define AEON_INTR_ON_RESULT_DONE (1 << 20)
+#define AEON_INTR_ON_COMMAND_WAITING (1 << 2)
+
+/*
+ * The poll frequency and poll scalar defines are unshifted values used
+ * to set fields in the DMA Configuration Register.
+ */
+#ifndef AEON_POLL_FREQUENCY
+#define AEON_POLL_FREQUENCY 0x1
+#endif
+
+#ifndef AEON_POLL_SCALAR
+#define AEON_POLL_SCALAR 0x0
+#endif
+
+#endif /* __AEON_H__ */
diff --git a/sys/dev/pci/aeonvar.h b/sys/dev/pci/aeonvar.h
new file mode 100644
index 00000000000..68c7452c16d
--- /dev/null
+++ b/sys/dev/pci/aeonvar.h
@@ -0,0 +1,287 @@
+/* $OpenBSD: aeonvar.h,v 1.1 1999/02/19 02:52:20 deraadt Exp $ */
+
+/*
+ * Invertex AEON driver
+ * Copyright (c) 1999 Invertex Inc. All rights reserved.
+ *
+ * Please send any comments, feedback, bug-fixes, or feature requests to
+ * software@invertex.com.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __AEON_EXPORT_H__
+#define __AEON_EXPORT_H__
+
+/*
+ * Length values for cryptography
+ */
+#define AEON_DES_KEY_LENGTH 8
+#define AEON_3DES_KEY_LENGTH 24
+#define AEON_MAX_CRYPT_KEY_LENGTH AEON_3DES_KEY_LENGTH
+#define AEON_IV_LENGTH 8
+
+/*
+ * Length values for authentication
+ */
+#define AEON_MAC_KEY_LENGTH 64
+#define AEON_MD5_LENGTH 16
+#define AEON_SHA1_LENGTH 20
+#define AEON_MAC_TRUNC_LENGTH 12
+
+/*
+ * aeon_command_t
+ *
+ * This is the control structure used to pass commands to aeon_encrypt().
+ *
+ * flags
+ * -----
+ * Flags is the bitwise "or" values for command configuration. A single
+ * encrypt direction needs to be set:
+ *
+ * AEON_ENCODE or AEON_DECODE
+ *
+ * To use cryptography, a single crypto algorithm must be included:
+ *
+ * AEON_CRYPT_3DES or AEON_CRYPT_DES
+ *
+ * To use authentication is used, a single MAC algorithm must be included:
+ *
+ * AEON_MAC_MD5 or AEON_MAC_SHA1
+ *
+ * By default MD5 uses a 16 byte hash and SHA-1 uses a 20 byte hash.
+ * If the value below is set, hash values are truncated or assumed
+ * truncated to 12 bytes:
+ *
+ * AEON_MAC_TRUNC
+ *
+ * Keys for encryption and authentication can be sent as part of a command,
+ * or the last key value used with a particular session can be retrieved
+ * and used again if either of these flags are not specified.
+ *
+ * AEON_CRYPT_NEW_KEY, AEON_MAC_NEW_KEY
+ *
+ * Whether we block or not waiting for the dest data to be ready is
+ * determined by whether a callback function is given. The other
+ * place we could block is when all the DMA rings are full. If
+ * it is not okay to block while waiting for an open slot in the
+ * rings, include in the following value:
+ *
+ * AEON_DMA_FULL_NOBLOCK
+ *
+ * result_flags
+ * ------------
+ * result_flags is a bitwise "or" of result values. The result_flags
+ * values should not be considered valid until:
+ *
+ * callback routine NULL: aeon_crypto() returns
+ * callback routine set: callback routine called
+ *
+ * Right now there is only one result flag: AEON_MAC_BAD
+ * It's bit is set on decode operations using authentication when a
+ * hash result does not match the input hash value.
+ * The AEON_MAC_OK(r) macro can be used to help inspect this flag.
+ *
+ * session_num
+ * -----------
+ * A number between 0 and 2048 (for DRAM models) or a number between
+ * 0 and 768 (for SRAM models). Those who don't want to use session
+ * numbers should leave value at zero and send a new crypt key and/or
+ * new MAC key on every command. If you use session numbers and
+ * don't send a key with a command, the last key sent for that same
+ * session number will be used.
+ *
+ * Warning: Using session numbers and multiboard at the same time
+ * is currently broken.
+ *
+ * source_buf
+ * ----------
+ * The source buffer is used for DMA -- it must be a 4-byte aligned
+ * address to physically contiguous memory where encode / decode
+ * input is read from. In a decode operation using authentication,
+ * the final bytes of the buffer should contain the appropriate hash
+ * data.
+ *
+ * dest_buf
+ * --------
+ * The dest buffer is used for DMA -- it must be a 4-byte aligned
+ * address to physically contiguous memory where encoded / decoded
+ * output is written to. If desired, this buffer can be the same
+ * as the source buffer with no performance penalty. If
+ * authentication is used, the final bytes will always consist of
+ * the hashed value (even on decode operations).
+ *
+ * mac_header_skip
+ * ---------------
+ * The number of bytes of the source_buf that are skipped over before
+ * authentication begins. This must be a number between 0 and 2^16-1
+ * and can be used by IPSec implementers to skip over IP headers.
+ * *** Value ignored if authentication not used ***
+ *
+ * crypt_header_skip
+ * -----------------
+ * The number of bytes of the source_buf that are skipped over before
+ * the cryptographic operation begins. This must be a number between 0
+ * and 2^16-1. For IPSec, this number will always be 8 bytes larger
+ * than the auth_header_skip (to skip over the ESP header).
+ * *** Value ignored if cryptography not used ***
+ *
+ * source_length
+ * -------------
+ * Length of input data including all skipped headers. On decode
+ * operations using authentication, the length must also include the
+ * the appended MAC hash (12, 16, or 20 bytes depending on algorithm
+ * and truncation settings).
+ *
+ * If encryption is used, the encryption payload must be a non-zero
+ * multiple of 8. On encode operations, the encryption payload size
+ * is (source_length - crypt_header_skip - (MAC hash size)). On
+ * decode operations, the encryption payload is
+ * (source_length - crypt_header_skip).
+ *
+ * dest_length
+ * -----------
+ * Length of the dest buffer. It must be at least as large as the
+ * source buffer when authentication is not used. When authentication
+ * is used on an encode operation, it must be at least as long as the
+ * source length plus an extra 12, 16, or 20 bytes to hold the MAC
+ * value (length of mac value varies with algorithm used). When
+ * authentication is used on decode operations, it must be at least
+ * as long as the source buffer minus 12, 16, or 20 bytes for the MAC
+ * value which is not included in the dest data. Unlike source_length,
+ * the dest_length does not have to be exact, values larger than required
+ * are fine.
+ *
+ * dest_ready_callback
+ * -------------------
+ * Callback routine called from AEON's interrupt handler. The routine
+ * must be quick and non-blocking. The callback routine is passed a
+ * pointer to the same aeon_command_t structure used to initiate the
+ * command.
+ *
+ * If this value is null, the aeon_crypto() routine will block until the
+ * dest data is ready.
+ *
+ * private_data
+ * ------------
+ * An unsigned long quantity (i.e. large enough to hold a pointer), that
+ * can be used by the callback routine if desired.
+ */
+typedef struct aeon_command {
+ u_int flags;
+ volatile u_int result_status;
+
+ u_short session_num;
+
+ /*
+ * You should be able to convert any of these arrays into pointers
+ * (if desired) without modifying code in aeon.c.
+ */
+ u_char initial_vector[AEON_IV_LENGTH];
+ u_char crypt_key[AEON_MAX_CRYPT_KEY_LENGTH];
+ u_char mac_key[AEON_MAC_KEY_LENGTH];
+
+ void *source_buf;
+ void *dest_buf;
+
+ u_short mac_header_skip;
+ u_short crypt_header_skip;
+ u_short source_length;
+ u_short dest_length;
+
+ void (*dest_ready_callback)(struct aeon_command *);
+ u_long private_data;
+} aeon_command_t;
+
+/*
+ * Return values for aeon_crypto()
+ */
+#define AEON_CRYPTO_SUCCESS 0
+#define AEON_CRYPTO_BAD_INPUT -1
+#define AEON_CRYPTO_RINGS_FULL -2
+
+
+/*
+ * Defines for the "config" parameter of aeon_command_t
+ */
+#define AEON_ENCODE 1
+#define AEON_DECODE 2
+#define AEON_CRYPT_3DES 4
+#define AEON_CRYPT_DES 8
+#define AEON_MAC_MD5 16
+#define AEON_MAC_SHA1 32
+#define AEON_MAC_TRUNC 64
+#define AEON_CRYPT_NEW_KEY 128
+#define AEON_MAC_NEW_KEY 256
+#define AEON_DMA_FULL_NOBLOCK 512
+
+#define AEON_USING_CRYPT(f) ((f) & (AEON_CRYPT_3DES|AEON_CRYPT_DES))
+#define AEON_USING_MAC(f) ((f) & (AEON_MAC_MD5|AEON_MAC_SHA1))
+
+/*
+ * Defines for the "result_status" parameter of aeon_command_t.
+ */
+#define AEON_MAC_BAD 1
+#define AEON_MAC_OK(r) !((r) & AEON_MAC_BAD)
+
+#ifdef _KERNEL
+
+/**************************************************************************
+ *
+ * Function: aeon_crypto
+ *
+ * Purpose: Called by external drivers to begin an encryption on the
+ * AEON board.
+ *
+ * Blocking/Non-blocking Issues
+ * ============================
+ * If the dest_ready_callback field of the aeon_command structure
+ * is NULL, aeon_encrypt will block until the dest_data is ready --
+ * otherwise aeon_encrypt() will return immediately and the
+ * dest_ready_callback routine will be called when the dest data is
+ * ready.
+ *
+ * The routine can also block when waiting for an open slot when all
+ * DMA rings are full. You can avoid this behaviour by sending the
+ * AEON_DMA_FULL_NOBLOCK as part of the command flags. This will
+ * make aeon_crypt() return immediately when the rings are full.
+ *
+ * Return Values
+ * =============
+ * 0 for success, negative values on error
+ *
+ * Defines for negative error codes are:
+ *
+ * AEON_CRYPTO_BAD_INPUT : The passed in command had invalid settings.
+ * AEON_CRYPTO_RINGS_FULL : All DMA rings were full and non-blocking
+ * behaviour was requested.
+ *
+ *************************************************************************/
+int aeon_crypto(aeon_command_t *command);
+
+#endif /* _KERNEL */
+
+#endif /* __AEON_EXPORT_H__ */
diff --git a/sys/dev/pci/hifn7751.c b/sys/dev/pci/hifn7751.c
new file mode 100644
index 00000000000..f33c2b1cff2
--- /dev/null
+++ b/sys/dev/pci/hifn7751.c
@@ -0,0 +1,1010 @@
+/* $OpenBSD: hifn7751.c,v 1.1 1999/02/19 02:52:19 deraadt Exp $ */
+
+/*
+ * Invertex AEON driver
+ * Copyright (c) 1999 Invertex Inc. All rights reserved.
+ * Copyright (c) 1999 Theo de Raadt
+ *
+ * This driver is based on a previous driver by Invertex, for which they
+ * requested: Please send any comments, feedback, bug-fixes, or feature
+ * requests to software@invertex.com.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/errno.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+#include <vm/pmap.h>
+#include <machine/pmap.h>
+#include <sys/device.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcidevs.h>
+
+#include <dev/pci/aeonvar.h>
+#include <dev/pci/aeonreg.h>
+
+#define AEON_DEBUG
+
+/*
+ * Prototypes and count for the pci_device structure
+ */
+int aeon_probe __P((struct device *, void *, void *));
+void aeon_attach __P((struct device *, struct device *, void *));
+
+void aeon_reset_board __P((struct aeon_softc *));
+int aeon_enable_crypto __P((struct aeon_softc *));
+void aeon_init_dma __P((struct aeon_softc *));
+void aeon_init_pci_registers __P((struct aeon_softc *));
+int aeon_ram_setting_okay __P((struct aeon_softc *));
+int aeon_intr __P((void *));
+u_int32_t aeon_write_command __P((const struct aeon_command_buf_data *,
+ u_int8_t *));
+int aeon_build_command __P((const struct aeon_command * cmd,
+ struct aeon_command_buf_data *));
+void aeon_intr_process_ring __P((struct aeon_softc *, struct aeon_dma *));
+
+struct cfattach aeon_ca = {
+ sizeof(struct aeon_softc), aeon_probe, aeon_attach,
+};
+
+struct cfdriver aeon_cd = {
+ 0, "aeon", DV_DULL
+};
+
+/*
+ * Used for round robin crypto requests
+ */
+int aeon_num_devices = 0;
+struct aeon_softc *aeon_devices[AEON_MAX_DEVICES];
+
+
+int
+aeon_probe(parent, match, aux)
+ struct device *parent;
+ void *match;
+ void *aux;
+{
+ struct pci_attach_args *pa = (struct pci_attach_args *) aux;
+
+ if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_INVERTEX &&
+ PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INVERTEX_AEON)
+ return (1);
+ return (0);
+}
+
+/*
+ * Purpose: One time initialization for the device performed at bootup.
+ */
+void
+aeon_attach(parent, self, aux)
+ struct device *parent, *self;
+ void *aux;
+{
+ struct aeon_softc *sc = (struct aeon_softc *)self;
+ struct pci_attach_args *pa = aux;
+ pci_chipset_tag_t pc = pa->pa_pc;
+ pci_intr_handle_t ih;
+ const char *intrstr = NULL;
+ bus_addr_t iobase;
+ bus_size_t iosize;
+ u_int32_t cmd;
+
+ cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
+ cmd |= PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE |
+ PCI_COMMAND_MASTER_ENABLE;
+ pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd);
+ cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
+
+ if (!(cmd & PCI_COMMAND_MEM_ENABLE)) {
+ printf(": failed to enable memory mapping\n");
+ return;
+ }
+
+ if (pci_mem_find(pc, pa->pa_tag, PCI_BASE_ADDRESS_0, &iobase, &iosize,
+ NULL)){
+ printf(": can't find mem space\n");
+ return;
+ }
+ if (bus_space_map(pa->pa_memt, iobase, iosize, 0, &sc->sc_sh0)) {
+ printf(": can't map mem space\n");
+ return;
+ }
+ sc->sc_st0 = pa->pa_memt;
+
+ if (pci_mem_find(pc, pa->pa_tag, PCI_BASE_ADDRESS_1, &iobase, &iosize,
+ NULL)){
+ printf(": can't find mem space\n");
+ return;
+ }
+ if (bus_space_map(pa->pa_memt, iobase, iosize, 0, &sc->sc_sh1)) {
+ printf(": can't map mem space\n");
+ return;
+ }
+ sc->sc_st1 = pa->pa_memt;
+ printf(" mem %x %x", sc->sc_sh0, sc->sc_sh1);
+
+ sc->sc_dma = (struct aeon_dma *)vm_page_alloc_contig(sizeof(*sc->sc_dma),
+ 0x100000, 0xffffffff, PAGE_SIZE);
+ bzero(sc->sc_dma, sizeof(*sc->sc_dma));
+
+ aeon_reset_board(sc);
+
+ if (aeon_enable_crypto(sc) != 0) {
+ printf("%s: crypto enabling failed\n",
+ sc->sc_dv.dv_xname);
+ return;
+ }
+
+ aeon_init_dma(sc);
+ aeon_init_pci_registers(sc);
+
+ if (aeon_ram_setting_okay(sc) != 0)
+ sc->is_dram_model = 1;
+
+ /*
+ * Reinitialize again, since the DRAM/SRAM detection shifted our ring
+ * pointers and may have changed the value we send to the RAM Config
+ * Register.
+ */
+ aeon_reset_board(sc);
+ aeon_init_dma(sc);
+ aeon_init_pci_registers(sc);
+
+ if (pci_intr_map(pc, pa->pa_intrtag, pa->pa_intrpin,
+ pa->pa_intrline, &ih)) {
+ printf(": couldn't map interrupt\n");
+ return;
+ }
+ intrstr = pci_intr_string(pc, ih);
+ sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, aeon_intr, sc,
+ self->dv_xname);
+ if (sc->sc_ih == NULL) {
+ printf(": couldn't establish interrupt\n");
+ if (intrstr != NULL)
+ printf(" at %s", intrstr);
+ printf("\n");
+ return;
+ }
+
+ aeon_devices[aeon_num_devices] = sc;
+ aeon_num_devices++;
+
+ printf(": %s\n", intrstr);
+}
+
+/*
+ * Purpose: Resets the board. Values in the regesters are left as is
+ * from the reset (i.e. initial values are assigned elsewhere).
+ */
+void
+aeon_reset_board(sc)
+ struct aeon_softc *sc;
+{
+ /*
+ * Set polling in the DMA configuration register to zero. 0x7 avoids
+ * resetting the board and zeros out the other fields.
+ */
+ WRITE_REG_1(sc, AEON_DMA_CFG, AEON_DMA_CFG_NOBOARDRESET |
+ AEON_DMA_CFG_NODMARESET | AEON_DMA_CFG_NEED);
+
+ /*
+ * Now that polling has been disabled, we have to wait 1 ms
+ * before resetting the board.
+ */
+ DELAY(1000);
+
+ /* Reset the board. We do this by writing zeros to the DMA reset
+ * field, the BRD reset field, and the manditory 1 at position 2.
+ * Every other field is set to zero.
+ */
+ WRITE_REG_1(sc, AEON_DMA_CFG, AEON_DMA_CFG_NEED);
+
+ /*
+ * Wait another millisecond for the board to reset.
+ */
+ DELAY(1000);
+
+ /*
+ * Turn off the reset! (No joke.)
+ */
+ WRITE_REG_1(sc, AEON_DMA_CFG, AEON_DMA_CFG_NOBOARDRESET |
+ AEON_DMA_CFG_NODMARESET | AEON_DMA_CFG_NEED);
+}
+
+/*
+ * Purpose: Checks to see if crypto is already enabled. If crypto
+ * isn't enable, "aeon_enable_crypto" is called to enable it.
+ * The check is important, as enabling crypto twice will lock
+ * the board.
+ *
+ * Returns: 0 value on success, -1 if we were not able to unlock the
+ * cryptographic engine.
+ */
+int
+aeon_enable_crypto(sc)
+ struct aeon_softc *sc;
+{
+ u_int32_t encryption_level;
+
+ /*
+ * The RAM config register's encrypt level bit needs to be set before
+ * every read performed on the encryption level register.
+ */
+ WRITE_REG_0(sc, AEON_RAM_CONFIG,
+ READ_REG_0(sc, AEON_RAM_CONFIG) | (0x1 << 5));
+
+ encryption_level = READ_REG_0(sc, AEON_ENCRYPTION_LEVEL);
+
+ /*
+ * Make sure we don't re-unlock. Two unlocks kills chip until the
+ * next reboot.
+ */
+ if (encryption_level == 0x1020 || encryption_level == 0x1120) {
+#ifdef AEON_DEBUG
+ printf("%s: Strong Crypto already enabled!\n",
+ sc->sc_dv.dv_xname);
+#endif
+ return 0; /* success */
+ }
+
+ /**
+ **
+ ** Rest of unlock procedure removed.
+ **
+ **
+ **/
+
+ switch(encryption_level) {
+ case 0x3020:
+ printf(" no encr/auth");
+ break;
+ case 0x1020:
+ printf(" DES");
+ break;
+ case 0x1120:
+ printf(" FULL");
+ break;
+ default:
+ printf(" disabled");
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * Purpose: Give initial values to the registers listed in the
+ * "Register Space" section of the AEON Software Development
+ * reference manual.
+ */
+void
+aeon_init_pci_registers(sc)
+ struct aeon_softc *sc;
+{
+ u_int32_t ram_config;
+
+ /*
+ * Write fixed values needed by the Initialization registers
+ */
+ WRITE_REG_0(sc, AEON_INIT_1, 0x2);
+ WRITE_REG_0(sc, AEON_INIT_2, 0x400);
+ WRITE_REG_0(sc, AEON_INIT_3, 0x200);
+
+ /*
+ * Write all 4 ring address registers
+ */
+ WRITE_REG_1(sc, AEON_COMMAND_RING_ADDR,
+ vtophys(sc->sc_dma->command_ring));
+ WRITE_REG_1(sc, AEON_SOURCE_RING_ADDR,
+ vtophys(sc->sc_dma->source_ring));
+ WRITE_REG_1(sc, AEON_DEST_RING_ADDR,
+ vtophys(sc->sc_dma->dest_ring));
+ WRITE_REG_1(sc, AEON_RESULT_RING_ADDR,
+ vtophys(sc->sc_dma->result_ring));
+
+ /*
+ * Write status register
+ */
+ WRITE_REG_1(sc, AEON_STATUS, AEON_INIT_STATUS_REG);
+
+ /*
+ * Write registers which had thier initial values defined
+ * elsewhere. The "Encryption level register" is the only
+ * documented register not initialized by this routine (it's read
+ * only).
+ */
+ WRITE_REG_1(sc, AEON_INTERRUPT_ENABLE, AEON_INIT_INTERRUPT_ENABLE_REG);
+
+ ram_config = AEON_INIT_RAM_CONFIG_REG
+#if BYTE_ORDER == BIG_ENDIAN
+ | (0x1 << 7)
+#endif
+ | (sc->is_dram_model << 4);
+ WRITE_REG_0(sc, AEON_RAM_CONFIG, ram_config);
+ WRITE_REG_0(sc, AEON_EXPAND, AEON_INIT_EXPAND_REG);
+ WRITE_REG_1(sc, AEON_DMA_CFG, AEON_INIT_DMA_CONFIG_REG);
+}
+
+/*
+ * Purpose: There are both DRAM and SRAM models of the aeon board.
+ * A bit in the "ram configuration register" needs to be
+ * set according to the model. The driver will guess one
+ * way or the other -- and then call this routine to verify.
+ * Returns:
+ * 0: RAM setting okay
+ * -1: Current RAM setting in error
+ */
+int
+aeon_ram_setting_okay(sc)
+ struct aeon_softc *sc;
+{
+ aeon_base_command_t write_command = {(0x3 << 13), 0, 8, 0};
+ aeon_base_command_t read_command = {(0x2 << 13), 0, 0, 8};
+ u_int8_t data[8] = {'1', '2', '3', '4', '5', '6', '7', '8'};
+ u_int8_t *source_buf, *dest_buf;
+ struct aeon_dma *dma = sc->sc_dma;
+
+ const u_int32_t masks = AEON_DESCRIPT_VALID | AEON_DESCRIPT_LAST |
+ AEON_DESCRIPT_MASK_DONE_IRQ;
+
+#if (AEON_DESCRIPT_RING_SIZE < 3)
+#error "descriptor ring size too small DRAM/SRAM check"
+#endif
+
+ /*
+ * We steal the 8 bytes needed for both the source and dest buffers
+ * from the 3rd slot that the DRAM/SRAM test won't use.
+ */
+ source_buf = sc->sc_dma->command_bufs[2];
+ dest_buf = sc->sc_dma->result_bufs[2];
+
+ /*
+ * Build write command
+ */
+ *(aeon_base_command_t *) sc->sc_dma->command_bufs[0] = write_command;
+ bcopy(data, source_buf, sizeof(data));
+
+ dma->source_ring[0].pointer = vtophys(source_buf);
+ dma->dest_ring[0].pointer = vtophys(dest_buf);
+
+ dma->command_ring[0].length = 16 | masks;
+ dma->source_ring[0].length = 8 | masks;
+ dma->dest_ring[0].length = 8 | masks;
+ dma->result_ring[0].length = AEON_MAX_RESULT_LENGTH | masks;
+
+ /*
+ * Let write command execute
+ */
+ DELAY(1000);
+
+ if (dma->result_ring[0].length & AEON_DESCRIPT_VALID)
+ printf("%s: SRAM/DRAM detection error -- result[0] valid still set\n",
+ sc->sc_dv.dv_xname);
+
+ /*
+ * Build read command
+ */
+ *(aeon_base_command_t *) sc->sc_dma->command_bufs[1] = read_command;
+
+ dma->source_ring[1].pointer = vtophys(source_buf);
+ dma->dest_ring[1].pointer = vtophys(dest_buf);
+
+ dma->command_ring[1].length = 16 | masks;
+ dma->source_ring[1].length = 8 | masks;
+ dma->dest_ring[1].length = 8 | masks;
+ dma->result_ring[1].length = AEON_MAX_RESULT_LENGTH | masks;
+
+ /*
+ * Let read command execute
+ */
+ DELAY(1000);
+
+ if (dma->result_ring[1].length & AEON_DESCRIPT_VALID)
+ printf("%s: SRAM/DRAM detection error -- result[1] valid still set\n",
+ sc->sc_dv.dv_xname);
+
+ return (memcmp(dest_buf, data, sizeof(data)) == 0) ? 0 : -1;
+}
+
+/*
+ * Purpose: Initialize the descriptor rings.
+ */
+void
+aeon_init_dma(sc)
+ struct aeon_softc *sc;
+{
+ int i;
+ struct aeon_dma *dma = sc->sc_dma;
+
+ /*
+ * Initialize static pointer values.
+ */
+ for (i = 0; i < AEON_DESCRIPT_RING_SIZE; i++) {
+ dma->command_ring[i].pointer = vtophys(dma->command_bufs[i]);
+ dma->result_ring[i].pointer = vtophys(dma->result_bufs[i]);
+ }
+
+ dma->command_ring[AEON_DESCRIPT_RING_SIZE].pointer =
+ vtophys(dma->command_ring);
+
+ dma->source_ring[AEON_DESCRIPT_RING_SIZE].pointer =
+ vtophys(dma->source_ring);
+
+ dma->dest_ring[AEON_DESCRIPT_RING_SIZE].pointer =
+ vtophys(dma->dest_ring);
+
+ dma->result_ring[AEON_DESCRIPT_RING_SIZE].pointer =
+ vtophys(dma->result_ring);
+}
+
+/*
+ * Purpose: Writes out the raw command buffer space. Returns the
+ * command buffer size.
+ */
+u_int32_t
+aeon_write_command(
+ const struct aeon_command_buf_data * cmd_data,
+ u_int8_t * command_buf
+)
+{
+ u_int8_t *command_buf_pos = command_buf;
+ const aeon_base_command_t *base_cmd = &cmd_data->base_cmd;
+ const aeon_mac_command_t *mac_cmd = &cmd_data->mac_cmd;
+ const aeon_crypt_command_t *crypt_cmd = &cmd_data->crypt_cmd;
+
+ int using_mac = base_cmd->masks & AEON_BASE_CMD_MAC;
+ int using_crypt = base_cmd->masks & AEON_BASE_CMD_CRYPT;
+
+ /*
+ * Write base command structure
+ */
+ *((aeon_base_command_t *) command_buf_pos) = *base_cmd;
+ command_buf_pos += sizeof(aeon_base_command_t);
+
+ /*
+ * Write MAC command structure
+ */
+ if (using_mac) {
+ *((aeon_mac_command_t *) command_buf_pos) = *mac_cmd;
+ command_buf_pos += sizeof(aeon_mac_command_t);
+ }
+ /*
+ * Write encryption command structure
+ */
+ if (using_crypt) {
+ *((aeon_crypt_command_t *) command_buf_pos) = *crypt_cmd;
+ command_buf_pos += sizeof(aeon_crypt_command_t);
+ }
+ /*
+ * Write MAC key
+ */
+ if (mac_cmd->masks & AEON_MAC_CMD_NEW_KEY) {
+ bcopy(cmd_data->mac_key, command_buf_pos, AEON_MAC_KEY_LENGTH);
+ command_buf_pos += AEON_MAC_KEY_LENGTH;
+ }
+ /*
+ * Write crypto key
+ */
+ if (crypt_cmd->masks & AEON_CRYPT_CMD_NEW_KEY) {
+ u_int32_t alg = crypt_cmd->masks & AEON_CRYPT_CMD_ALG_MASK;
+ u_int32_t key_len = (alg == AEON_CRYPT_CMD_ALG_DES) ?
+ AEON_DES_KEY_LENGTH : AEON_3DES_KEY_LENGTH;
+ bcopy(cmd_data->crypt_key, command_buf_pos, key_len);
+ command_buf_pos += key_len;
+ }
+ /*
+ * Write crypto iv
+ */
+ if (crypt_cmd->masks & AEON_CRYPT_CMD_NEW_IV) {
+ bcopy(cmd_data->initial_vector, command_buf_pos, AEON_IV_LENGTH);
+ command_buf_pos += AEON_IV_LENGTH;
+ }
+ /*
+ * Write 8 bytes of zero's if we're not sending crypt or MAC
+ * structures
+ */
+ if (!(base_cmd->masks & AEON_BASE_CMD_MAC) &&
+ !(base_cmd->masks & AEON_BASE_CMD_CRYPT)) {
+ *((u_int32_t *) command_buf_pos) = 0;
+ command_buf_pos += 4;
+ *((u_int32_t *) command_buf_pos) = 0;
+ command_buf_pos += 4;
+ }
+#if 0
+ if ((command_buf_pos - command_buf) > AEON_MAX_COMMAND_LENGTH)
+ printf("aeon: Internal Error -- Command buffer overflow.\n");
+#endif
+
+ return command_buf_pos - command_buf;
+
+}
+
+/*
+ * Purpose: Check command input and build up structure to write
+ * the command buffer later. Returns 0 on success and
+ * -1 if given bad command input was given.
+ */
+int
+aeon_build_command(
+ const struct aeon_command * cmd,
+ struct aeon_command_buf_data * cmd_buf_data
+)
+{
+#define AEON_COMMAND_CHECKING
+
+ u_int32_t flags = cmd->flags;
+ aeon_base_command_t *base_cmd = &cmd_buf_data->base_cmd;
+ aeon_mac_command_t *mac_cmd = &cmd_buf_data->mac_cmd;
+ aeon_crypt_command_t *crypt_cmd = &cmd_buf_data->crypt_cmd;
+ u_int mac_length;
+
+#ifdef AEON_COMMAND_CHECKING
+ int dest_diff;
+#endif
+
+ bzero(cmd_buf_data, sizeof(struct aeon_command_buf_data));
+
+
+#ifdef AEON_COMMAND_CHECKING
+ if (!(!!(flags & AEON_DECODE) ^ !!(flags & AEON_ENCODE))) {
+ printf("aeon: encode/decode setting error\n");
+ return -1;
+ }
+ if ((flags & AEON_CRYPT_DES) && (flags & AEON_CRYPT_3DES)) {
+ printf("aeon: Too many crypto algorithms set in command\n");
+ return -1;
+ }
+ if ((flags & AEON_MAC_SHA1) && (flags & AEON_MAC_MD5)) {
+ printf("aeon: Too many MAC algorithms set in command\n");
+ return -1;
+ }
+#endif
+
+
+ /*
+ * Compute the mac value length -- leave at zero if not MAC'ing
+ */
+ mac_length = 0;
+ if (AEON_USING_MAC(flags)) {
+ mac_length = (flags & AEON_MAC_TRUNC) ? AEON_MAC_TRUNC_LENGTH :
+ ((flags & AEON_MAC_MD5) ? AEON_MD5_LENGTH : AEON_SHA1_LENGTH);
+ }
+#ifdef AEON_COMMAND_CHECKING
+ /*
+ * Check for valid src/dest buf sizes
+ */
+
+ /*
+ * XXX XXX We need to include header counts into all these
+ * checks!!!!
+ */
+
+ if (cmd->source_length <= mac_length) {
+ printf("aeon: command source buffer has no data\n");
+ return -1;
+ }
+ dest_diff = (flags & AEON_ENCODE) ? mac_length : -mac_length;
+ if (cmd->dest_length < cmd->source_length + dest_diff) {
+ printf("aeon: command dest length %u too short -- needed %u\n",
+ cmd->dest_length, cmd->source_length + dest_diff);
+ return -1;
+ }
+#endif
+
+
+ /**
+ ** Building up base command
+ **
+ **/
+
+ /*
+ * Set MAC bit
+ */
+ if (AEON_USING_MAC(flags))
+ base_cmd->masks |= AEON_BASE_CMD_MAC;
+
+ /* Set Encrypt bit */
+ if (AEON_USING_CRYPT(flags))
+ base_cmd->masks |= AEON_BASE_CMD_CRYPT;
+
+ /*
+ * Set Decode bit
+ */
+ if (flags & AEON_DECODE)
+ base_cmd->masks |= AEON_BASE_CMD_DECODE;
+
+ /*
+ * Set total source and dest counts. These values are the same as the
+ * values set in the length field of the source and dest descriptor rings.
+ */
+ base_cmd->total_source_count = cmd->source_length;
+ base_cmd->total_dest_count = cmd->dest_length;
+
+ /*
+ * XXX -- We need session number range checking...
+ */
+ base_cmd->session_num = cmd->session_num;
+
+ /**
+ ** Building up mac command
+ **
+ **/
+ if (AEON_USING_MAC(flags)) {
+
+ /*
+ * Set the MAC algorithm and trunc setting
+ */
+ mac_cmd->masks |= (flags & AEON_MAC_MD5) ?
+ AEON_MAC_CMD_ALG_MD5 : AEON_MAC_CMD_ALG_SHA1;
+ if (flags & AEON_MAC_TRUNC)
+ mac_cmd->masks |= AEON_MAC_CMD_TRUNC;
+
+ /*
+ * We always use HMAC mode, assume MAC values are appended to the
+ * source buffer on decodes and we append them to the dest buffer
+ * on encodes, and order auth/encryption engines as needed by
+ * IPSEC
+ */
+ mac_cmd->masks |= AEON_MAC_CMD_MODE_HMAC | AEON_MAC_CMD_APPEND |
+ AEON_MAC_CMD_POS_IPSEC;
+
+ /*
+ * Setup to send new MAC key if needed.
+ */
+ if (flags & AEON_MAC_CMD_NEW_KEY) {
+ mac_cmd->masks |= AEON_MAC_CMD_NEW_KEY;
+ cmd_buf_data->mac_key = cmd->mac_key;
+ }
+ /*
+ * Set the mac header skip and source count.
+ */
+ mac_cmd->source_count = cmd->source_length - cmd->mac_header_skip;
+ if (flags & AEON_DECODE)
+ mac_cmd->source_count -= mac_length;
+ }
+ /**
+ ** Building up crypto command
+ **
+ **/
+ if (AEON_USING_CRYPT(flags)) {
+
+ /*
+ * Set the encryption algorithm bits.
+ */
+ crypt_cmd->masks |= (flags & AEON_CRYPT_DES) ?
+ AEON_CRYPT_CMD_ALG_DES : AEON_CRYPT_CMD_ALG_3DES;
+
+ /* We always use CBC mode and send a new IV (as needed by
+ * IPSec). */
+ crypt_cmd->masks |= AEON_CRYPT_CMD_MODE_CBC | AEON_CRYPT_CMD_NEW_IV;
+
+ /*
+ * Setup to send new encrypt key if needed.
+ */
+ if (flags & AEON_CRYPT_CMD_NEW_KEY) {
+ crypt_cmd->masks |= AEON_CRYPT_CMD_NEW_KEY;
+ cmd_buf_data->crypt_key = cmd->crypt_key;
+ }
+ /*
+ * Set the encrypt header skip and source count.
+ */
+ crypt_cmd->header_skip = cmd->crypt_header_skip;
+ crypt_cmd->source_count = cmd->source_length - cmd->crypt_header_skip;
+ if (flags & AEON_DECODE)
+ crypt_cmd->source_count -= mac_length;
+
+
+#ifdef AEON_COMMAND_CHECKING
+ if (crypt_cmd->source_count % 8 != 0) {
+ printf("aeon: Error -- encryption source %u not a multiple of 8!\n",
+ crypt_cmd->source_count);
+ return -1;
+ }
+#endif
+ }
+ cmd_buf_data->initial_vector = cmd->initial_vector;
+
+
+#if 0
+ printf("aeon: command parameters"
+ " -- session num %u"
+ " -- base t.s.c: %u"
+ " -- base t.d.c: %u"
+ " -- mac h.s. %u s.c. %u"
+ " -- crypt h.s. %u s.c. %u\n",
+ base_cmd->session_num,
+ base_cmd->total_source_count,
+ base_cmd->total_dest_count,
+ mac_cmd->header_skip,
+ mac_cmd->source_count,
+ crypt_cmd->header_skip,
+ crypt_cmd->source_count
+ );
+#endif
+
+ return 0; /* success */
+}
+
+
+/*
+ * Function: aeon_process_command
+ */
+int
+aeon_crypto(struct aeon_command * cmd)
+{
+ u_int32_t command_length;
+
+ u_int32_t local_ring_pos;
+ int err;
+ int oldint;
+ static u_int32_t current_device = 0;
+ struct aeon_softc *sc;
+ struct aeon_dma *dma;
+ const u_int32_t masks = AEON_DESCRIPT_VALID | AEON_DESCRIPT_LAST |
+ AEON_DESCRIPT_MASK_DONE_IRQ;
+
+ struct aeon_command_buf_data cmd_buf_data;
+
+ if (aeon_build_command(cmd, &cmd_buf_data) != 0)
+ return AEON_CRYPTO_BAD_INPUT;
+
+ /*
+ * Turning off interrupts
+ */
+ oldint = splimp();
+
+ /* Pick the aeon board to send the data to. Right now we use a round
+ * robin approach. */
+ sc = aeon_devices[current_device++];
+ if (current_device == aeon_num_devices)
+ current_device = 0;
+ dma = sc->sc_dma;
+
+#if 0
+ printf("%s: Entering command"
+ " -- Status Reg 0x%08x"
+ " -- Interrupt Enable Reg 0x%08x"
+ " -- slots in use %u"
+ " -- source length %u"
+ " -- dest length %u\n",
+ sc->sc_dv.dv_xname,
+ READ_REG_1(sc, AEON_STATUS),
+ READ_REG_1(sc, AEON_INTERRUPT_ENABLE),
+ dma->slots_in_use,
+ cmd->source_length,
+ cmd->dest_length
+ );
+#endif
+
+
+ if (dma->slots_in_use == AEON_DESCRIPT_RING_SIZE) {
+
+ if (cmd->flags & AEON_DMA_FULL_NOBLOCK)
+ return AEON_CRYPTO_RINGS_FULL;
+
+ do {
+#ifdef AEON_DEBUG
+ printf("%s: Waiting for unused ring.\n",
+ sc->sc_dv.dv_xname);
+#endif
+ /* sleep for minimum timeout */
+ tsleep((caddr_t) dma, PZERO, "QFULL", 1);
+
+ } while (dma->slots_in_use == AEON_DESCRIPT_RING_SIZE);
+ }
+ dma->slots_in_use++;
+
+
+ if (dma->ring_pos == AEON_DESCRIPT_RING_SIZE) {
+ local_ring_pos = 0;
+ dma->ring_pos = 1;
+ } else {
+ local_ring_pos = dma->ring_pos++;
+ }
+
+
+ command_length =
+ aeon_write_command(&cmd_buf_data, dma->command_bufs[local_ring_pos]);
+
+ dma->aeon_commands[local_ring_pos] = cmd;
+
+ /*
+ * If we wrapped to the begining of the ring, validate the jump
+ * descriptor. (Not needed on the very first time command -- but it
+ * doesn't hurt.)
+ */
+ if (local_ring_pos == 0) {
+ const u_int32_t jmp_masks = masks | AEON_DESCRIPT_JUMP;
+
+ dma->command_ring[AEON_DESCRIPT_RING_SIZE].length = jmp_masks;
+ dma->source_ring[AEON_DESCRIPT_RING_SIZE].length = jmp_masks;
+ dma->dest_ring[AEON_DESCRIPT_RING_SIZE].length = jmp_masks;
+ dma->result_ring[AEON_DESCRIPT_RING_SIZE].length = jmp_masks;
+ }
+ /*
+ * "pointer" values for command and result descriptors are already set
+ */
+ dma->command_ring[local_ring_pos].length = command_length | masks;
+
+ dma->source_ring[local_ring_pos].pointer = vtophys(cmd->source_buf);
+ dma->source_ring[local_ring_pos].length = cmd->source_length | masks;
+
+ dma->dest_ring[local_ring_pos].pointer = vtophys(cmd->dest_buf);
+ dma->dest_ring[local_ring_pos].length = cmd->dest_length | masks;
+
+
+ /*
+ * Unlike other descriptors, we don't mask done interrupt from
+ * result descriptor.
+ */
+ dma->result_ring[local_ring_pos].length =
+ AEON_MAX_RESULT_LENGTH | AEON_DESCRIPT_VALID | AEON_DESCRIPT_LAST;
+
+ /*
+ * We don't worry about missing an interrupt (which a waiting
+ * on command interrupt salvages us from), unless there is more
+ * than one command in the queue.
+ */
+ if (dma->slots_in_use > 1) {
+ WRITE_REG_1(sc, AEON_INTERRUPT_ENABLE,
+ AEON_INTR_ON_RESULT_DONE | AEON_INTR_ON_COMMAND_WAITING);
+ }
+ /*
+ * If not given a callback routine, we block until the dest data is
+ * ready. (Setting interrupt timeout at 3 seconds.)
+ */
+ if (cmd->dest_ready_callback == NULL) {
+#if 0
+ printf("%s: no callback -- we're sleeping\n",
+ sc->sc_dv.dv_xname);
+#endif
+ err = tsleep((caddr_t) & dma->result_ring[local_ring_pos], PZERO, "CRYPT",
+ hz * 3);
+ if (err != 0)
+ printf("%s: timed out waiting for interrupt"
+ " -- tsleep() exited with %d\n",
+ sc->sc_dv.dv_xname, err);
+ }
+#if 0
+ printf("%s: command executed"
+ " -- Status Register 0x%08x"
+ " -- Interrupt Enable Reg 0x%08x\n",
+ sc->sc_dv.dv_xname,
+ READ_REG_1(sc, AEON_STATUS),
+ READ_REG_1(sc, AEON_INTERRUPT_ENABLE));
+#endif
+
+ /*
+ * Turning interupts back on
+ */
+ splx(oldint);
+
+ return 0; /* success */
+}
+
+/*
+ * Part of interrupt handler--cleans out done jobs from rings
+ */
+void
+aeon_intr_process_ring(sc, dma)
+ struct aeon_softc *sc;
+ struct aeon_dma *dma;
+{
+ if (dma->slots_in_use > AEON_DESCRIPT_RING_SIZE)
+ printf("%s: Internal Error -- ring overflow\n",
+ sc->sc_dv.dv_xname);
+
+ while (dma->slots_in_use > 0) {
+ u_int32_t wake_pos = dma->wakeup_ring_pos;
+ struct aeon_command *cmd = dma->aeon_commands[wake_pos];
+
+ /*
+ * If still valid, stop processing
+ */
+ if (dma->result_ring[wake_pos].length & AEON_DESCRIPT_VALID)
+ break;
+
+ if (AEON_USING_MAC(cmd->flags) && (cmd->flags & AEON_DECODE)) {
+ u_int8_t *result_buf = dma->result_bufs[wake_pos];
+ cmd->result_status = (result_buf[8] & 0x2) ? AEON_MAC_BAD : 0;
+ printf("%s: byte index 8 of result 0x%02x\n",
+ sc->sc_dv.dv_xname, (u_int32_t) result_buf[8]);
+ }
+ /*
+ * Position is done, notify producer with wakup or callback
+ */
+ if (cmd->dest_ready_callback == NULL) {
+ wakeup((caddr_t) &dma->result_ring[wake_pos]);
+ } else {
+ cmd->dest_ready_callback(cmd);
+ }
+
+ if (++dma->wakeup_ring_pos == AEON_DESCRIPT_RING_SIZE)
+ dma->wakeup_ring_pos = 0;
+ dma->slots_in_use--;
+ }
+
+}
+
+/*
+ * Purpose: Interrupt handler. The argument passed is the device
+ * structure for the board that generated the interrupt.
+ * XXX: Remove hardcoded status checking/setting values.
+ */
+int
+aeon_intr(arg)
+ void *arg;
+{
+ struct aeon_softc *sc = arg;
+ struct aeon_dma *dma = sc->sc_dma;
+ int r;
+
+#if 0
+ printf("%s: Processing Interrupt"
+ " -- Status Reg 0x%08x"
+ " -- Interrupt Enable Reg 0x%08x"
+ " -- slots in use %u\n",
+ sc->sc_dv.dv_xname,
+ READ_REG_1(sc, AEON_STATUS),
+ READ_REG_1(sc, AEON_INTERRUPT_ENABLE),
+ dma->slots_in_use
+ );
+#endif
+
+ if (dma->slots_in_use == 0 && (READ_REG_1(sc, AEON_STATUS) & (1 << 2))) {
+ /*
+ * If no slots to process and we received a "waiting on
+ * result" interrupt, we disable the "waiting on result" (by
+ * clearing it).
+ */
+ WRITE_REG_1(sc, AEON_INTERRUPT_ENABLE,
+ AEON_INTR_ON_RESULT_DONE);
+ r = 1;
+ } else {
+ aeon_intr_process_ring(sc, dma);
+ r = 1;
+ }
+
+#if 0
+ printf("%s: exiting interrupt handler -- slots in use %u\n",
+ sc->sc_dv.dv_xname, dma->slots_in_use);
+#endif
+
+ /*
+ * Clear "result done" and "waiting on command ring" flags in status
+ * register. If we still have slots to process and we received a
+ * waiting interrupt, this will interupt us again.
+ */
+ WRITE_REG_1(sc, AEON_STATUS, (1 << 20) | (1 << 2));
+ return (r);
+}
+
diff --git a/sys/dev/pci/hifn7751reg.h b/sys/dev/pci/hifn7751reg.h
new file mode 100644
index 00000000000..36a5f2837df
--- /dev/null
+++ b/sys/dev/pci/hifn7751reg.h
@@ -0,0 +1,313 @@
+/* $OpenBSD: hifn7751reg.h,v 1.1 1999/02/19 02:52:20 deraadt Exp $ */
+
+/*
+ * Invertex AEON driver
+ * Copyright (c) 1999 Invertex Inc. All rights reserved.
+ *
+ * Please send any comments, feedback, bug-fixes, or feature requests to
+ * software@invertex.com.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __AEON_H__
+#define __AEON_H__
+
+#include <machine/endian.h>
+
+/*
+ * Some PCI configuration space offset defines. The names were made
+ * identical to the names used by the Linux kernel.
+ */
+#define PCI_BASE_ADDRESS_0 0x10 /* 32 bits */
+#define PCI_BASE_ADDRESS_1 0x14 /* 32 bits */
+
+/*
+ * Some configurable values for the driver
+ */
+#define AEON_DESCRIPT_RING_SIZE 24
+#define AEON_MAX_DEVICES 4
+
+/*
+ * The values below should multiple of 4 -- and be large enough to handle
+ * any command the driver implements.
+ */
+#define AEON_MAX_COMMAND_LENGTH 120
+#define AEON_MAX_RESULT_LENGTH 16
+
+/*
+ * aeon_descriptor_t
+ *
+ * Holds an individual descriptor for any of the rings.
+ */
+typedef struct aeon_descriptor {
+ volatile u_int32_t length; /* length and status bits */
+ volatile u_int32_t pointer;
+} aeon_descriptor_t;
+
+/*
+ * Masks for the "length" field of struct aeon_descriptor.
+ */
+#define AEON_DESCRIPT_MASK_DONE_IRQ (0x1 << 25)
+#define AEON_DESCRIPT_LAST (0x1 << 29)
+#define AEON_DESCRIPT_JUMP (0x1 << 30)
+#define AEON_DESCRIPT_VALID (0x1 << 31)
+
+/*
+ * aeon_callback_t
+ *
+ * Type for callback function when dest data is ready.
+ */
+typedef void (*aeon_callback_t)(aeon_command_t *);
+
+/*
+ * Data structure to hold all 4 rings and any other ring related data.
+ */
+struct aeon_dma {
+ /*
+ * Descriptor rings. We add +1 to the size to accomidate the
+ * jump descriptor.
+ */
+ struct aeon_descriptor command_ring[AEON_DESCRIPT_RING_SIZE + 1];
+ struct aeon_descriptor source_ring[AEON_DESCRIPT_RING_SIZE + 1];
+ struct aeon_descriptor dest_ring[AEON_DESCRIPT_RING_SIZE + 1];
+ struct aeon_descriptor result_ring[AEON_DESCRIPT_RING_SIZE + 1];
+
+ aeon_command_t *aeon_commands[AEON_DESCRIPT_RING_SIZE ];
+
+ u_char command_bufs[AEON_DESCRIPT_RING_SIZE][AEON_MAX_COMMAND_LENGTH];
+ u_char result_bufs[AEON_DESCRIPT_RING_SIZE][AEON_MAX_RESULT_LENGTH];
+
+ /*
+ * Our current positions for insertion and removal from the desriptor
+ * rings.
+ */
+ u_int32_t ring_pos;
+ u_int32_t wakeup_ring_pos;
+ volatile u_int32_t slots_in_use;
+};
+
+/*
+ * Holds data specific to a single AEON board.
+ */
+struct aeon_softc {
+ struct device sc_dv; /* generic device */
+ void * sc_ih; /* interrupt handler cookie */
+ u_int32_t is_dram_model; /* 1=dram, 0=sram */
+
+ /* Register set 0 */
+ bus_space_handle_t sc_sh0;
+ bus_space_tag_t sc_st0;
+
+ /* Register set 1 */
+ bus_space_handle_t sc_sh1;
+ bus_space_tag_t sc_st1;
+
+ struct aeon_dma *sc_dma;
+};
+
+/*
+ * Register offsets in register set 0
+ */
+#define AEON_INIT_1 0x04
+#define AEON_RAM_CONFIG 0x0c
+#define AEON_EXPAND 0x08
+#define AEON_ENCRYPTION_LEVEL 0x14
+#define AEON_INIT_3 0x10
+#define AEON_INIT_2 0x1c
+
+#define WRITE_REG_0(sc,reg,val) \
+ bus_space_write_4((sc)->sc_st0, (sc)->sc_sh0, reg, val)
+#define READ_REG_0(sc,reg) \
+ bus_space_read_4((sc)->sc_st0, (sc)->sc_sh0, reg)
+
+/*
+ * Register offsets in register set 1
+ */
+#define AEON_COMMAND_RING_ADDR 0x0c
+#define AEON_SOURCE_RING_ADDR 0x1c
+#define AEON_RESULT_RING_ADDR 0x2c
+#define AEON_DEST_RING_ADDR 0x3c
+#define AEON_STATUS 0x40
+#define AEON_INTERRUPT_ENABLE 0x44
+
+#define AEON_DMA_CFG 0x48
+#define AEON_DMA_CFG_NOBOARDRESET 0x00000001
+#define AEON_DMA_CFG_NODMARESET 0x00000002
+#define AEON_DMA_CFG_NEED 0x00000004
+#define AEON_DMA_CFG_HOSTLAST 0x00000010
+
+#define WRITE_REG_1(sc,reg,val) \
+ bus_space_write_4((sc)->sc_st1, (sc)->sc_sh1, reg, val)
+#define READ_REG_1(sc,reg) \
+ bus_space_read_4((sc)->sc_st1, (sc)->sc_sh1, reg)
+
+/*
+ * Initial register values
+ */
+
+/*
+ * Status Register
+ *
+ * The value below enables polling on all 4 descriptor rings and
+ * writes a "1" to every status bit in the register. (Writing "1"
+ * clears the bit.)
+ */
+#define AEON_INIT_STATUS_REG ((1<<31)|(1<<23)|(1<<15)|(1<<7))
+
+/*
+ * Interrupt Enable Register
+ *
+ * Initial value sets all interrupts to off except the "mask done"
+ * interrupt of the the result descriptor ring.
+ */
+#define AEON_INIT_INTERRUPT_ENABLE_REG (AEON_INTR_ON_RESULT_DONE)
+
+/*
+ * DMA Configuration Register
+ *
+ * Initial value sets the polling scalar and frequency, and puts
+ * the host (not the AEON board) in charge of "last" bits in the
+ * dest data and result descriptor rings.
+ */
+#define AEON_INIT_DMA_CONFIG_REG \
+ (AEON_DMA_CFG_NOBOARDRESET | AEON_DMA_CFG_NODMARESET | \
+ AEON_DMA_CFG_NEED | \
+ AEON_DMA_CFG_HOSTLAST | /* host controls last bit in all rings */ \
+ (AEON_POLL_SCALAR << 8) | /* setting poll scalar value */ \
+ (AEON_POLL_FREQUENCY << 16)) /* setting poll frequency value */
+
+/*
+ * RAM Configuration Register
+ *
+ * Initial value sets the ecryption context size to 128 bytes (if using
+ * RC4 bump it to 512, but you'll decrease the number of available
+ * sessions). We don't configure multiple compression histories -- since
+ * IPSec doesn't use them.
+ *
+ * NOTE: Use the AEON_RAM_CONFIG_INIT() macro instead of the
+ * variable, since DRAM/SRAM detection is not determined staticly.
+ */
+#define AEON_INIT_RAM_CONFIG_REG \
+ ((0x0 << 1) | /* RAM Encrypt: 0 for 128 bytes, 1 for 512 bytes */ \
+ (0x1 << 2) | /* RAM Comp cfg: 1 for single compression history */ \
+ 0x4B40) /* Setting fixed bits required by the register */
+
+/*
+ * Expand Register
+ *
+ * The only bit in this register is the expand bit at position 9. It's
+ * cleared by writing a 1 to it.
+ */
+#define AEON_INIT_EXPAND_REG (0x1 << 9)
+
+/*********************************************************************
+ * Structs for board commands
+ *
+ *********************************************************************/
+
+/*
+ * Structure to help build up the command data structure.
+ */
+typedef struct aeon_base_command {
+ u_int16_t masks;
+ u_int16_t session_num;
+ u_int16_t total_source_count;
+ u_int16_t total_dest_count;
+} aeon_base_command_t;
+
+#define AEON_BASE_CMD_MAC (0x1 << 10)
+#define AEON_BASE_CMD_CRYPT (0x1 << 11)
+#define AEON_BASE_CMD_DECODE (0x1 << 13)
+
+/*
+ * Structure to help build up the command data structure.
+ */
+typedef struct aeon_crypt_command {
+ u_int16_t masks;
+ u_int16_t header_skip;
+ u_int32_t source_count;
+} aeon_crypt_command_t;
+
+#define AEON_CRYPT_CMD_ALG_MASK (0x3 << 0)
+#define AEON_CRYPT_CMD_ALG_DES (0x0 << 0)
+#define AEON_CRYPT_CMD_ALG_3DES (0x1 << 0)
+#define AEON_CRYPT_CMD_MODE_CBC (0x1 << 3)
+#define AEON_CRYPT_CMD_NEW_KEY (0x1 << 11)
+#define AEON_CRYPT_CMD_NEW_IV (0x1 << 12)
+
+/*
+ * Structure to help build up the command data structure.
+ */
+typedef struct aeon_mac_command {
+ u_int16_t masks;
+ u_int16_t header_skip;
+ u_int32_t source_count;
+} aeon_mac_command_t;
+
+#define AEON_MAC_CMD_ALG_MD5 (0x1 << 0)
+#define AEON_MAC_CMD_ALG_SHA1 (0x0 << 0)
+#define AEON_MAC_CMD_MODE_HMAC (0x0 << 2)
+#define AEON_MAC_CMD_TRUNC (0x1 << 4)
+#define AEON_MAC_CMD_APPEND (0x1 << 6)
+/*
+ * MAC POS IPSec initiates authentication after encryption on encodes
+ * and before decryption on decodes.
+ */
+#define AEON_MAC_CMD_POS_IPSEC (0x2 << 8)
+#define AEON_MAC_CMD_NEW_KEY (0x1 << 11)
+
+/*
+ * Structure with all fields necessary to write the command buffer.
+ * We build it up while interrupts are on, then use it to write out
+ * the command buffer quickly while interrupts are off.
+ */
+typedef struct aeon_command_buf_data {
+ aeon_base_command_t base_cmd;
+ aeon_mac_command_t mac_cmd;
+ aeon_crypt_command_t crypt_cmd;
+ const u_int8_t *mac_key;
+ const u_int8_t *crypt_key;
+ const u_int8_t *initial_vector;
+} aeon_command_buf_data_t;
+
+/*
+ * Values for the interrupt enable register
+ */
+#define AEON_INTR_ON_RESULT_DONE (1 << 20)
+#define AEON_INTR_ON_COMMAND_WAITING (1 << 2)
+
+/*
+ * The poll frequency and poll scalar defines are unshifted values used
+ * to set fields in the DMA Configuration Register.
+ */
+#ifndef AEON_POLL_FREQUENCY
+#define AEON_POLL_FREQUENCY 0x1
+#endif
+
+#ifndef AEON_POLL_SCALAR
+#define AEON_POLL_SCALAR 0x0
+#endif
+
+#endif /* __AEON_H__ */
diff --git a/sys/dev/pci/hifn7751var.h b/sys/dev/pci/hifn7751var.h
new file mode 100644
index 00000000000..0ee8dbf8c2d
--- /dev/null
+++ b/sys/dev/pci/hifn7751var.h
@@ -0,0 +1,287 @@
+/* $OpenBSD: hifn7751var.h,v 1.1 1999/02/19 02:52:20 deraadt Exp $ */
+
+/*
+ * Invertex AEON driver
+ * Copyright (c) 1999 Invertex Inc. All rights reserved.
+ *
+ * Please send any comments, feedback, bug-fixes, or feature requests to
+ * software@invertex.com.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __AEON_EXPORT_H__
+#define __AEON_EXPORT_H__
+
+/*
+ * Length values for cryptography
+ */
+#define AEON_DES_KEY_LENGTH 8
+#define AEON_3DES_KEY_LENGTH 24
+#define AEON_MAX_CRYPT_KEY_LENGTH AEON_3DES_KEY_LENGTH
+#define AEON_IV_LENGTH 8
+
+/*
+ * Length values for authentication
+ */
+#define AEON_MAC_KEY_LENGTH 64
+#define AEON_MD5_LENGTH 16
+#define AEON_SHA1_LENGTH 20
+#define AEON_MAC_TRUNC_LENGTH 12
+
+/*
+ * aeon_command_t
+ *
+ * This is the control structure used to pass commands to aeon_encrypt().
+ *
+ * flags
+ * -----
+ * Flags is the bitwise "or" values for command configuration. A single
+ * encrypt direction needs to be set:
+ *
+ * AEON_ENCODE or AEON_DECODE
+ *
+ * To use cryptography, a single crypto algorithm must be included:
+ *
+ * AEON_CRYPT_3DES or AEON_CRYPT_DES
+ *
+ * To use authentication is used, a single MAC algorithm must be included:
+ *
+ * AEON_MAC_MD5 or AEON_MAC_SHA1
+ *
+ * By default MD5 uses a 16 byte hash and SHA-1 uses a 20 byte hash.
+ * If the value below is set, hash values are truncated or assumed
+ * truncated to 12 bytes:
+ *
+ * AEON_MAC_TRUNC
+ *
+ * Keys for encryption and authentication can be sent as part of a command,
+ * or the last key value used with a particular session can be retrieved
+ * and used again if either of these flags are not specified.
+ *
+ * AEON_CRYPT_NEW_KEY, AEON_MAC_NEW_KEY
+ *
+ * Whether we block or not waiting for the dest data to be ready is
+ * determined by whether a callback function is given. The other
+ * place we could block is when all the DMA rings are full. If
+ * it is not okay to block while waiting for an open slot in the
+ * rings, include in the following value:
+ *
+ * AEON_DMA_FULL_NOBLOCK
+ *
+ * result_flags
+ * ------------
+ * result_flags is a bitwise "or" of result values. The result_flags
+ * values should not be considered valid until:
+ *
+ * callback routine NULL: aeon_crypto() returns
+ * callback routine set: callback routine called
+ *
+ * Right now there is only one result flag: AEON_MAC_BAD
+ * It's bit is set on decode operations using authentication when a
+ * hash result does not match the input hash value.
+ * The AEON_MAC_OK(r) macro can be used to help inspect this flag.
+ *
+ * session_num
+ * -----------
+ * A number between 0 and 2048 (for DRAM models) or a number between
+ * 0 and 768 (for SRAM models). Those who don't want to use session
+ * numbers should leave value at zero and send a new crypt key and/or
+ * new MAC key on every command. If you use session numbers and
+ * don't send a key with a command, the last key sent for that same
+ * session number will be used.
+ *
+ * Warning: Using session numbers and multiboard at the same time
+ * is currently broken.
+ *
+ * source_buf
+ * ----------
+ * The source buffer is used for DMA -- it must be a 4-byte aligned
+ * address to physically contiguous memory where encode / decode
+ * input is read from. In a decode operation using authentication,
+ * the final bytes of the buffer should contain the appropriate hash
+ * data.
+ *
+ * dest_buf
+ * --------
+ * The dest buffer is used for DMA -- it must be a 4-byte aligned
+ * address to physically contiguous memory where encoded / decoded
+ * output is written to. If desired, this buffer can be the same
+ * as the source buffer with no performance penalty. If
+ * authentication is used, the final bytes will always consist of
+ * the hashed value (even on decode operations).
+ *
+ * mac_header_skip
+ * ---------------
+ * The number of bytes of the source_buf that are skipped over before
+ * authentication begins. This must be a number between 0 and 2^16-1
+ * and can be used by IPSec implementers to skip over IP headers.
+ * *** Value ignored if authentication not used ***
+ *
+ * crypt_header_skip
+ * -----------------
+ * The number of bytes of the source_buf that are skipped over before
+ * the cryptographic operation begins. This must be a number between 0
+ * and 2^16-1. For IPSec, this number will always be 8 bytes larger
+ * than the auth_header_skip (to skip over the ESP header).
+ * *** Value ignored if cryptography not used ***
+ *
+ * source_length
+ * -------------
+ * Length of input data including all skipped headers. On decode
+ * operations using authentication, the length must also include the
+ * the appended MAC hash (12, 16, or 20 bytes depending on algorithm
+ * and truncation settings).
+ *
+ * If encryption is used, the encryption payload must be a non-zero
+ * multiple of 8. On encode operations, the encryption payload size
+ * is (source_length - crypt_header_skip - (MAC hash size)). On
+ * decode operations, the encryption payload is
+ * (source_length - crypt_header_skip).
+ *
+ * dest_length
+ * -----------
+ * Length of the dest buffer. It must be at least as large as the
+ * source buffer when authentication is not used. When authentication
+ * is used on an encode operation, it must be at least as long as the
+ * source length plus an extra 12, 16, or 20 bytes to hold the MAC
+ * value (length of mac value varies with algorithm used). When
+ * authentication is used on decode operations, it must be at least
+ * as long as the source buffer minus 12, 16, or 20 bytes for the MAC
+ * value which is not included in the dest data. Unlike source_length,
+ * the dest_length does not have to be exact, values larger than required
+ * are fine.
+ *
+ * dest_ready_callback
+ * -------------------
+ * Callback routine called from AEON's interrupt handler. The routine
+ * must be quick and non-blocking. The callback routine is passed a
+ * pointer to the same aeon_command_t structure used to initiate the
+ * command.
+ *
+ * If this value is null, the aeon_crypto() routine will block until the
+ * dest data is ready.
+ *
+ * private_data
+ * ------------
+ * An unsigned long quantity (i.e. large enough to hold a pointer), that
+ * can be used by the callback routine if desired.
+ */
+typedef struct aeon_command {
+ u_int flags;
+ volatile u_int result_status;
+
+ u_short session_num;
+
+ /*
+ * You should be able to convert any of these arrays into pointers
+ * (if desired) without modifying code in aeon.c.
+ */
+ u_char initial_vector[AEON_IV_LENGTH];
+ u_char crypt_key[AEON_MAX_CRYPT_KEY_LENGTH];
+ u_char mac_key[AEON_MAC_KEY_LENGTH];
+
+ void *source_buf;
+ void *dest_buf;
+
+ u_short mac_header_skip;
+ u_short crypt_header_skip;
+ u_short source_length;
+ u_short dest_length;
+
+ void (*dest_ready_callback)(struct aeon_command *);
+ u_long private_data;
+} aeon_command_t;
+
+/*
+ * Return values for aeon_crypto()
+ */
+#define AEON_CRYPTO_SUCCESS 0
+#define AEON_CRYPTO_BAD_INPUT -1
+#define AEON_CRYPTO_RINGS_FULL -2
+
+
+/*
+ * Defines for the "config" parameter of aeon_command_t
+ */
+#define AEON_ENCODE 1
+#define AEON_DECODE 2
+#define AEON_CRYPT_3DES 4
+#define AEON_CRYPT_DES 8
+#define AEON_MAC_MD5 16
+#define AEON_MAC_SHA1 32
+#define AEON_MAC_TRUNC 64
+#define AEON_CRYPT_NEW_KEY 128
+#define AEON_MAC_NEW_KEY 256
+#define AEON_DMA_FULL_NOBLOCK 512
+
+#define AEON_USING_CRYPT(f) ((f) & (AEON_CRYPT_3DES|AEON_CRYPT_DES))
+#define AEON_USING_MAC(f) ((f) & (AEON_MAC_MD5|AEON_MAC_SHA1))
+
+/*
+ * Defines for the "result_status" parameter of aeon_command_t.
+ */
+#define AEON_MAC_BAD 1
+#define AEON_MAC_OK(r) !((r) & AEON_MAC_BAD)
+
+#ifdef _KERNEL
+
+/**************************************************************************
+ *
+ * Function: aeon_crypto
+ *
+ * Purpose: Called by external drivers to begin an encryption on the
+ * AEON board.
+ *
+ * Blocking/Non-blocking Issues
+ * ============================
+ * If the dest_ready_callback field of the aeon_command structure
+ * is NULL, aeon_encrypt will block until the dest_data is ready --
+ * otherwise aeon_encrypt() will return immediately and the
+ * dest_ready_callback routine will be called when the dest data is
+ * ready.
+ *
+ * The routine can also block when waiting for an open slot when all
+ * DMA rings are full. You can avoid this behaviour by sending the
+ * AEON_DMA_FULL_NOBLOCK as part of the command flags. This will
+ * make aeon_crypt() return immediately when the rings are full.
+ *
+ * Return Values
+ * =============
+ * 0 for success, negative values on error
+ *
+ * Defines for negative error codes are:
+ *
+ * AEON_CRYPTO_BAD_INPUT : The passed in command had invalid settings.
+ * AEON_CRYPTO_RINGS_FULL : All DMA rings were full and non-blocking
+ * behaviour was requested.
+ *
+ *************************************************************************/
+int aeon_crypto(aeon_command_t *command);
+
+#endif /* _KERNEL */
+
+#endif /* __AEON_EXPORT_H__ */