From: deraadt Date: Thu, 16 Mar 2000 20:33:47 +0000 (+0000) Subject: move aeon to hifn7751 X-Git-Url: http://artulab.com/gitweb/?a=commitdiff_plain;h=c6d54241daff654cabe1d717a90cfd13dad2d85f;p=openbsd move aeon to hifn7751 --- diff --git a/sys/dev/pci/aeon.c b/sys/dev/pci/aeon.c deleted file mode 100644 index 015353a94cc..00000000000 --- a/sys/dev/pci/aeon.c +++ /dev/null @@ -1,1096 +0,0 @@ -/* $OpenBSD: aeon.c,v 1.9 2000/03/15 14:55:51 jason Exp $ */ - -/* - * Invertex AEON driver - * Copyright (c) 1999 Invertex Inc. All rights reserved. - * Copyright (c) 1999 Theo de Raadt - * - * This driver is based on a previous driver by Invertex, for which they - * requested: Please send any comments, feedback, bug-fixes, or feature - * requests to software@invertex.com. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include -#include - -#undef AEON_DEBUG - -/* - * Prototypes and count for the pci_device structure - */ -int aeon_probe __P((struct device *, void *, void *)); -void aeon_attach __P((struct device *, struct device *, void *)); - -struct cfattach aeon_ca = { - sizeof(struct aeon_softc), aeon_probe, aeon_attach, -}; - -struct cfdriver aeon_cd = { - 0, "aeon", DV_DULL -}; - -void aeon_reset_board __P((struct aeon_softc *)); -int aeon_enable_crypto __P((struct aeon_softc *, pcireg_t)); -void aeon_init_dma __P((struct aeon_softc *)); -void aeon_init_pci_registers __P((struct aeon_softc *)); -int aeon_checkram __P((struct aeon_softc *)); -int aeon_intr __P((void *)); -u_int aeon_write_command __P((const struct aeon_command_buf_data *, - u_int8_t *)); -int aeon_build_command __P((const struct aeon_command * cmd, - struct aeon_command_buf_data *)); -int aeon_mbuf __P((struct mbuf *, int *np, long *pp, int *lp, int maxp, - int *nicealign)); -u_int32_t aeon_next_signature __P((u_int a, u_int cnt)); - -/* - * Used for round robin crypto requests - */ -int aeon_num_devices = 0; -struct aeon_softc *aeon_devices[AEON_MAX_DEVICES]; - -int -aeon_probe(parent, match, aux) - struct device *parent; - void *match; - void *aux; -{ - struct pci_attach_args *pa = (struct pci_attach_args *) aux; - - if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_INVERTEX && - PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INVERTEX_AEON) - return (1); - if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_HIFN && - PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_HIFN_7751) - return (1); - return (0); -} - -void -aeon_attach(parent, self, aux) - struct device *parent, *self; - void *aux; -{ - struct aeon_softc *sc = (struct aeon_softc *)self; - struct pci_attach_args *pa = aux; - pci_chipset_tag_t pc = pa->pa_pc; - pci_intr_handle_t ih; - const char *intrstr = NULL; - bus_addr_t iobase; - bus_size_t iosize; - u_int32_t cmd; - bus_dma_segment_t seg; - bus_dmamap_t dmamap; - int rseg; - caddr_t kva; - - cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); - cmd |= PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE | - PCI_COMMAND_MASTER_ENABLE; - pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd); - cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); - - if (!(cmd & PCI_COMMAND_MEM_ENABLE)) { - printf(": failed to enable memory mapping\n"); - return; - } - - if (pci_mem_find(pc, pa->pa_tag, AEON_BAR0, &iobase, &iosize, NULL)) { - printf(": can't find mem space\n"); - return; - } - if (bus_space_map(pa->pa_memt, iobase, iosize, 0, &sc->sc_sh0)) { - printf(": can't map mem space\n"); - return; - } - sc->sc_st0 = pa->pa_memt; - - if (pci_mem_find(pc, pa->pa_tag, AEON_BAR1, &iobase, &iosize, NULL)) { - printf(": can't find mem space\n"); - return; - } - if (bus_space_map(pa->pa_memt, iobase, iosize, 0, &sc->sc_sh1)) { - printf(": can't map mem space\n"); - return; - } - sc->sc_st1 = pa->pa_memt; -#ifdef AEON_DEBUG - printf(" mem %x %x", sc->sc_sh0, sc->sc_sh1); -#endif - - sc->sc_dmat = pa->pa_dmat; - if (bus_dmamem_alloc(sc->sc_dmat, sizeof(*sc->sc_dma), PAGE_SIZE, 0, - &seg, 1, &rseg, BUS_DMA_NOWAIT)) { - printf(": can't alloc dma buffer\n", sc->sc_dv.dv_xname); - return; - } - if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(*sc->sc_dma), &kva, - BUS_DMA_NOWAIT)) { - printf(": can't map dma buffers (%d bytes)\n", - sc->sc_dv.dv_xname, sizeof(*sc->sc_dma)); - bus_dmamem_free(sc->sc_dmat, &seg, rseg); - return; - } - if (bus_dmamap_create(sc->sc_dmat, sizeof(*sc->sc_dma), 1, - sizeof(*sc->sc_dma), 0, BUS_DMA_NOWAIT, &dmamap)) { - printf(": can't create dma map\n", sc->sc_dv.dv_xname); - bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma)); - bus_dmamem_free(sc->sc_dmat, &seg, rseg); - return; - } - if (bus_dmamap_load(sc->sc_dmat, dmamap, kva, sizeof(*sc->sc_dma), - NULL, BUS_DMA_NOWAIT)) { - printf(": can't load dma map\n", sc->sc_dv.dv_xname); - bus_dmamap_destroy(sc->sc_dmat, dmamap); - bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma)); - bus_dmamem_free(sc->sc_dmat, &seg, rseg); - return; - } - sc->sc_dma = (struct aeon_dma *)kva; - bzero(sc->sc_dma, sizeof(*sc->sc_dma)); - - aeon_reset_board(sc); - - if (aeon_enable_crypto(sc, pa->pa_id) != 0) { - printf("%s: crypto enabling failed\n", sc->sc_dv.dv_xname); - return; - } - - aeon_init_dma(sc); - aeon_init_pci_registers(sc); - - if (aeon_checkram(sc) != 0) - sc->sc_drammodel = 1; - - /* - * Reinitialize again, since the DRAM/SRAM detection shifted our ring - * pointers and may have changed the value we send to the RAM Config - * Register. - */ - aeon_reset_board(sc); - aeon_init_dma(sc); - aeon_init_pci_registers(sc); - - if (pci_intr_map(pc, pa->pa_intrtag, pa->pa_intrpin, - pa->pa_intrline, &ih)) { - printf(": couldn't map interrupt\n"); - return; - } - intrstr = pci_intr_string(pc, ih); - sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, aeon_intr, sc, - self->dv_xname); - if (sc->sc_ih == NULL) { - printf(": couldn't establish interrupt\n"); - if (intrstr != NULL) - printf(" at %s", intrstr); - printf("\n"); - return; - } - - aeon_devices[aeon_num_devices] = sc; - aeon_num_devices++; - - printf(", %s\n", intrstr); -} - -/* - * Resets the board. Values in the regesters are left as is - * from the reset (i.e. initial values are assigned elsewhere). - */ -void -aeon_reset_board(sc) - struct aeon_softc *sc; -{ - /* - * Set polling in the DMA configuration register to zero. 0x7 avoids - * resetting the board and zeros out the other fields. - */ - WRITE_REG_1(sc, AEON_1_DMA_CNFG, AEON_DMACNFG_MSTRESET | - AEON_DMACNFG_DMARESET | AEON_DMACNFG_MODE); - - /* - * Now that polling has been disabled, we have to wait 1 ms - * before resetting the board. - */ - DELAY(1000); - - /* Reset the board. We do this by writing zeros to the DMA reset - * field, the BRD reset field, and the manditory 1 at position 2. - * Every other field is set to zero. - */ - WRITE_REG_1(sc, AEON_1_DMA_CNFG, AEON_DMACNFG_MODE); - - /* - * Wait another millisecond for the board to reset. - */ - DELAY(1000); - - /* - * Turn off the reset! (No joke.) - */ - WRITE_REG_1(sc, AEON_1_DMA_CNFG, AEON_DMACNFG_MSTRESET | - AEON_DMACNFG_DMARESET | AEON_DMACNFG_MODE); -} - -u_int32_t -aeon_next_signature(a, cnt) - u_int a, cnt; -{ - int i, v; - - for (i = 0; i < cnt; i++) { - - /* get the parity */ - v = a & 0x80080125; - v ^= v >> 16; - v ^= v >> 8; - v ^= v >> 4; - v ^= v >> 2; - v ^= v >> 1; - - a = (v & 1) ^ (a << 1); - } - - return a; -} - -struct pci2id { - u_short pci_vendor; - u_short pci_prod; - char card_id[13]; -} pci2id[] = { - { - PCI_VENDOR_INVERTEX, - PCI_PRODUCT_INVERTEX_AEON, - { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00 } - }, { - PCI_VENDOR_HIFN, - PCI_PRODUCT_HIFN_7751, - { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00 } - }, -}; - -/* - * Checks to see if crypto is already enabled. If crypto isn't enable, - * "aeon_enable_crypto" is called to enable it. The check is important, - * as enabling crypto twice will lock the board. - */ -int -aeon_enable_crypto(sc, pciid) - struct aeon_softc *sc; - pcireg_t pciid; -{ - u_int32_t dmacfg, ramcfg, encl, addr, i; - char *offtbl = NULL; - - for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) { - if (pci2id[i].pci_vendor == PCI_VENDOR(pciid) && - pci2id[i].pci_prod == PCI_PRODUCT(pciid)) { - offtbl = pci2id[i].card_id; - break; - } - } - - if (offtbl == NULL) { -#ifdef AEON_DEBUG - printf("%s: Unknown card!\n", sc->sc_dv.dv_xname); -#endif - return (1); - } - - ramcfg = READ_REG_0(sc, AEON_0_PUCNFG); - dmacfg = READ_REG_1(sc, AEON_1_DMA_CNFG); - - /* - * The RAM config register's encrypt level bit needs to be set before - * every read performed on the encryption level register. - */ - WRITE_REG_0(sc, AEON_0_PUCNFG, ramcfg | AEON_PUCNFG_CHIPID); - - encl = READ_REG_0(sc, AEON_0_PUSTAT); - - /* - * Make sure we don't re-unlock. Two unlocks kills chip until the - * next reboot. - */ - if (encl == 0x1020 || encl == 0x1120) { -#ifdef AEON_DEBUG - printf("%s: Strong Crypto already enabled!\n", - sc->sc_dv.dv_xname); -#endif - WRITE_REG_0(sc, AEON_0_PUCNFG, ramcfg); - WRITE_REG_1(sc, AEON_1_DMA_CNFG, dmacfg); - return 0; /* success */ - } - - if (encl != 0 && encl != 0x3020) { -#ifdef AEON_DEBUG - printf("%: Unknown encryption level\n", sc->sc_dv.dv_xname); -#endif - return 1; - } - - WRITE_REG_1(sc, AEON_1_DMA_CNFG, AEON_DMACNFG_UNLOCK | - AEON_DMACNFG_MSTRESET | AEON_DMACNFG_DMARESET | AEON_DMACNFG_MODE); - addr = READ_REG_1(sc, AEON_UNLOCK_SECRET1); - WRITE_REG_1(sc, AEON_UNLOCK_SECRET2, 0); - - for (i = 0; i <= 12; i++) { - addr = aeon_next_signature(addr, offtbl[i] + 0x101); - WRITE_REG_1(sc, AEON_UNLOCK_SECRET2, addr); - - DELAY(1000); - } - - WRITE_REG_0(sc, AEON_0_PUCNFG, ramcfg | AEON_PUCNFG_CHIPID); - encl = READ_REG_0(sc, AEON_0_PUSTAT); - -#ifdef AEON_DEBUG - if (encl != 0x1020 && encl != 0x1120) - printf("Encryption engine is permanently locked until next system reset."); - else - printf("Encryption engine enabled successfully!"); -#endif - - WRITE_REG_0(sc, AEON_0_PUCNFG, ramcfg); - WRITE_REG_1(sc, AEON_1_DMA_CNFG, dmacfg); - - switch(encl) { - case 0x3020: - printf(": no encr/auth"); - break; - case 0x1020: - printf(": DES enabled"); - break; - case 0x1120: - printf(": fully enabled"); - break; - default: - printf(": disabled"); - break; - } - - return 0; -} - -/* - * Give initial values to the registers listed in the "Register Space" - * section of the AEON Software Development reference manual. - */ -void -aeon_init_pci_registers(sc) - struct aeon_softc *sc; -{ - /* write fixed values needed by the Initialization registers */ - WRITE_REG_0(sc, AEON_0_PUCTRL, AEON_PUCTRL_DMAENA); - WRITE_REG_0(sc, AEON_0_FIFOCNFG, AEON_FIFOCNFG_THRESHOLD); - WRITE_REG_0(sc, AEON_0_PUIER, AEON_PUIER_DSTOVER); - - /* write all 4 ring address registers */ - WRITE_REG_1(sc, AEON_1_DMA_CRAR, vtophys(sc->sc_dma->cmdr)); - WRITE_REG_1(sc, AEON_1_DMA_SRAR, vtophys(sc->sc_dma->srcr)); - WRITE_REG_1(sc, AEON_1_DMA_DRAR, vtophys(sc->sc_dma->dstr)); - WRITE_REG_1(sc, AEON_1_DMA_RRAR, vtophys(sc->sc_dma->resr)); - - /* write status register */ - WRITE_REG_1(sc, AEON_1_DMA_CSR, AEON_DMACSR_D_CTRL_ENA | - AEON_DMACSR_R_CTRL_ENA | AEON_DMACSR_S_CTRL_ENA | - AEON_DMACSR_C_CTRL_ENA); - WRITE_REG_1(sc, AEON_1_DMA_IER, AEON_DMAIER_R_DONE); - -#if 0 -#if BYTE_ORDER == BIG_ENDIAN - (0x1 << 7) | -#endif -#endif - WRITE_REG_0(sc, AEON_0_PUCNFG, AEON_PUCNFG_COMPSING | - AEON_PUCNFG_DRFR_128 | AEON_PUCNFG_TCALLPHASES | - AEON_PUCNFG_TCDRVTOTEM | AEON_PUCNFG_BUS32 | - (sc->sc_drammodel ? AEON_PUCNFG_DRAM : AEON_PUCNFG_SRAM)); - - WRITE_REG_0(sc, AEON_0_PUISR, AEON_PUISR_DSTOVER); - WRITE_REG_1(sc, AEON_1_DMA_CNFG, AEON_DMACNFG_MSTRESET | - AEON_DMACNFG_DMARESET | AEON_DMACNFG_MODE | - AEON_DMACNFG_LAST | - ((AEON_POLL_FREQUENCY << 16 ) & AEON_DMACNFG_POLLFREQ) | - ((AEON_POLL_SCALAR << 8) & AEON_DMACNFG_POLLINVAL)); -} - -/* - * There are both DRAM and SRAM models of the aeon board. - * A bit in the "ram configuration register" needs to be - * set according to the model. The driver will guess one - * way or the other -- and then call this routine to verify. - * - * 0: RAM setting okay, -1: Current RAM setting in error - */ -int -aeon_checkram(sc) - struct aeon_softc *sc; -{ - aeon_base_command_t write_command = {(0x3 << 13), 0, 8, 0}; - aeon_base_command_t read_command = {(0x2 << 13), 0, 0, 8}; - u_int8_t data[8] = {'1', '2', '3', '4', '5', '6', '7', '8'}; - u_int8_t *source_buf, *dest_buf; - struct aeon_dma *dma = sc->sc_dma; - const u_int32_t masks = AEON_D_VALID | AEON_D_LAST | - AEON_D_MASKDONEIRQ; - -#if (AEON_D_RSIZE < 3) -#error "descriptor ring size too small DRAM/SRAM check" -#endif - - /* - * We steal the 8 bytes needed for both the source and dest buffers - * from the 3rd slot that the DRAM/SRAM test won't use. - */ - source_buf = sc->sc_dma->command_bufs[2]; - dest_buf = sc->sc_dma->result_bufs[2]; - - /* build write command */ - *(aeon_base_command_t *) sc->sc_dma->command_bufs[0] = write_command; - bcopy(data, source_buf, sizeof(data)); - - dma->srcr[0].p = vtophys(source_buf); - dma->dstr[0].p = vtophys(dest_buf); - - dma->cmdr[0].l = 16 | masks; - dma->srcr[0].l = 8 | masks; - dma->dstr[0].l = 8 | masks; - dma->resr[0].l = AEON_MAX_RESULT | masks; - - DELAY(1000); /* let write command execute */ - if (dma->resr[0].l & AEON_D_VALID) - printf("%s: SRAM/DRAM detection error -- result[0] valid still set\n", - sc->sc_dv.dv_xname); - - /* Build read command */ - *(aeon_base_command_t *) sc->sc_dma->command_bufs[1] = read_command; - - dma->srcr[1].p = vtophys(source_buf); - dma->dstr[1].p = vtophys(dest_buf); - dma->cmdr[1].l = 16 | masks; - dma->srcr[1].l = 8 | masks; - dma->dstr[1].l = 8 | masks; - dma->resr[1].l = AEON_MAX_RESULT | masks; - - DELAY(1000); /* let read command execute */ - if (dma->resr[1].l & AEON_D_VALID) - printf("%s: SRAM/DRAM detection error -- result[1] valid still set\n", - sc->sc_dv.dv_xname); - return (memcmp(dest_buf, data, sizeof(data)) == 0) ? 0 : -1; -} - -/* - * Initialize the descriptor rings. - */ -void -aeon_init_dma(sc) - struct aeon_softc *sc; -{ - struct aeon_dma *dma = sc->sc_dma; - int i; - - /* initialize static pointer values */ - for (i = 0; i < AEON_D_CMD_RSIZE; i++) - dma->cmdr[i].p = vtophys(dma->command_bufs[i]); - for (i = 0; i < AEON_D_RES_RSIZE; i++) - dma->resr[i].p = vtophys(dma->result_bufs[i]); - - dma->cmdr[AEON_D_CMD_RSIZE].p = vtophys(dma->cmdr); - dma->srcr[AEON_D_SRC_RSIZE].p = vtophys(dma->srcr); - dma->dstr[AEON_D_DST_RSIZE].p = vtophys(dma->dstr); - dma->resr[AEON_D_RES_RSIZE].p = vtophys(dma->resr); -} - -/* - * Writes out the raw command buffer space. Returns the - * command buffer size. - */ -u_int -aeon_write_command(const struct aeon_command_buf_data *cmd_data, - u_int8_t *command_buf) -{ - u_int8_t *command_buf_pos = command_buf; - const aeon_base_command_t *base_cmd = &cmd_data->base_cmd; - const aeon_mac_command_t *mac_cmd = &cmd_data->mac_cmd; - const aeon_crypt_command_t *crypt_cmd = &cmd_data->crypt_cmd; - int using_mac = base_cmd->masks & AEON_BASE_CMD_MAC; - int using_crypt = base_cmd->masks & AEON_BASE_CMD_CRYPT; - - /* write base command structure */ - *((aeon_base_command_t *) command_buf_pos) = *base_cmd; - command_buf_pos += sizeof(aeon_base_command_t); - - /* Write MAC command structure */ - if (using_mac) { - *((aeon_mac_command_t *) command_buf_pos) = *mac_cmd; - command_buf_pos += sizeof(aeon_mac_command_t); - } - - /* Write encryption command structure */ - if (using_crypt) { - *((aeon_crypt_command_t *) command_buf_pos) = *crypt_cmd; - command_buf_pos += sizeof(aeon_crypt_command_t); - } - - /* write MAC key */ - if (mac_cmd->masks & AEON_MAC_NEW_KEY) { - bcopy(cmd_data->mac, command_buf_pos, AEON_MAC_KEY_LENGTH); - command_buf_pos += AEON_MAC_KEY_LENGTH; - } - - /* Write crypto key */ - if (crypt_cmd->masks & AEON_CRYPT_CMD_NEW_KEY) { - u_int32_t alg = crypt_cmd->masks & AEON_CRYPT_CMD_ALG_MASK; - u_int32_t key_len = (alg == AEON_CRYPT_CMD_ALG_DES) ? - AEON_DES_KEY_LENGTH : AEON_3DES_KEY_LENGTH; - bcopy(cmd_data->ck, command_buf_pos, key_len); - command_buf_pos += key_len; - } - - /* Write crypto iv */ - if (crypt_cmd->masks & AEON_CRYPT_CMD_NEW_IV) { - bcopy(cmd_data->iv, command_buf_pos, AEON_IV_LENGTH); - command_buf_pos += AEON_IV_LENGTH; - } - - /* Write 8 zero bytes we're not sending crypt or MAC structures */ - if (!(base_cmd->masks & AEON_BASE_CMD_MAC) && - !(base_cmd->masks & AEON_BASE_CMD_CRYPT)) { - *((u_int32_t *) command_buf_pos) = 0; - command_buf_pos += 4; - *((u_int32_t *) command_buf_pos) = 0; - command_buf_pos += 4; - } - - if ((command_buf_pos - command_buf) > AEON_MAX_COMMAND) - printf("aeon: Internal Error -- Command buffer overflow.\n"); - return command_buf_pos - command_buf; -} - -/* - * Check command input and build up structure to write - * the command buffer later. Returns 0 on success and - * -1 if given bad command input was given. - */ -int -aeon_build_command(const struct aeon_command *cmd, - struct aeon_command_buf_data * cmd_buf_data) -{ -#define AEON_COMMAND_CHECKING - - u_int32_t flags = cmd->flags; - aeon_base_command_t *base_cmd = &cmd_buf_data->base_cmd; - aeon_mac_command_t *mac_cmd = &cmd_buf_data->mac_cmd; - aeon_crypt_command_t *crypt_cmd = &cmd_buf_data->crypt_cmd; - u_int mac_length; -#ifdef AEON_COMMAND_CHECKING - int dest_diff; -#endif - - bzero(cmd_buf_data, sizeof(struct aeon_command_buf_data)); - -#ifdef AEON_COMMAND_CHECKING - if (!(!!(flags & AEON_DECODE) ^ !!(flags & AEON_ENCODE))) { - printf("aeon: encode/decode setting error\n"); - return -1; - } - if ((flags & AEON_CRYPT_DES) && (flags & AEON_CRYPT_3DES)) { - printf("aeon: Too many crypto algorithms set in command\n"); - return -1; - } - if ((flags & AEON_MAC_SHA1) && (flags & AEON_MAC_MD5)) { - printf("aeon: Too many MAC algorithms set in command\n"); - return -1; - } -#endif - - - /* - * Compute the mac value length -- leave at zero if not MAC'ing - */ - mac_length = 0; - if (AEON_USING_MAC(flags)) { - mac_length = (flags & AEON_MAC_TRUNC) ? AEON_MAC_TRUNC_LENGTH : - ((flags & AEON_MAC_MD5) ? AEON_MD5_LENGTH : AEON_SHA1_LENGTH); - } -#ifdef AEON_COMMAND_CHECKING - /* - * Check for valid src/dest buf sizes - */ - - /* - * XXX XXX We need to include header counts into all these - * checks!!!! - */ - - if (cmd->src_npa <= mac_length) { - printf("aeon: command source buffer has no data\n"); - return -1; - } - dest_diff = (flags & AEON_ENCODE) ? mac_length : -mac_length; - if (cmd->dst_npa < cmd->dst_npa + dest_diff) { - printf("aeon: command dest length %u too short -- needed %u\n", - cmd->dst_npa, cmd->dst_npa + dest_diff); - return -1; - } -#endif - - /* - * Set MAC bit - */ - if (AEON_USING_MAC(flags)) - base_cmd->masks |= AEON_BASE_CMD_MAC; - - /* Set Encrypt bit */ - if (AEON_USING_CRYPT(flags)) - base_cmd->masks |= AEON_BASE_CMD_CRYPT; - - /* - * Set Decode bit - */ - if (flags & AEON_DECODE) - base_cmd->masks |= AEON_BASE_CMD_DECODE; - - /* - * Set total source and dest counts. These values are the same as the - * values set in the length field of the source and dest descriptor rings. - */ - base_cmd->total_source_count = cmd->src_l; - base_cmd->total_dest_count = cmd->dst_l; - - /* - * XXX -- We need session number range checking... - */ - base_cmd->session_num = cmd->session_num; - - /** - ** Building up mac command - ** - **/ - if (AEON_USING_MAC(flags)) { - - /* - * Set the MAC algorithm and trunc setting - */ - mac_cmd->masks |= (flags & AEON_MAC_MD5) ? - AEON_MAC_CMD_ALG_MD5 : AEON_MAC_CMD_ALG_SHA1; - if (flags & AEON_MAC_TRUNC) - mac_cmd->masks |= AEON_MAC_CMD_TRUNC; - - /* - * We always use HMAC mode, assume MAC values are appended to the - * source buffer on decodes and we append them to the dest buffer - * on encodes, and order auth/encryption engines as needed by - * IPSEC - */ - mac_cmd->masks |= AEON_MAC_CMD_MODE_HMAC | AEON_MAC_CMD_APPEND | - AEON_MAC_CMD_POS_IPSEC; - - /* - * Setup to send new MAC key if needed. - */ - if (flags & AEON_MAC_NEW_KEY) { - mac_cmd->masks |= AEON_MAC_CMD_NEW_KEY; - cmd_buf_data->mac = cmd->mac; - } - /* - * Set the mac header skip and source count. - */ - mac_cmd->header_skip = cmd->mac_header_skip; - mac_cmd->source_count = cmd->src_npa - cmd->mac_header_skip; - if (flags & AEON_DECODE) - mac_cmd->source_count -= mac_length; - } - - if (AEON_USING_CRYPT(flags)) { - /* - * Set the encryption algorithm bits. - */ - crypt_cmd->masks |= (flags & AEON_CRYPT_DES) ? - AEON_CRYPT_CMD_ALG_DES : AEON_CRYPT_CMD_ALG_3DES; - - /* We always use CBC mode and send a new IV (as needed by - * IPSec). */ - crypt_cmd->masks |= AEON_CRYPT_CMD_MODE_CBC | AEON_CRYPT_CMD_NEW_IV; - - /* - * Setup to send new encrypt key if needed. - */ - if (flags & AEON_CRYPT_CMD_NEW_KEY) { - crypt_cmd->masks |= AEON_CRYPT_CMD_NEW_KEY; - cmd_buf_data->ck = cmd->ck; - } - /* - * Set the encrypt header skip and source count. - */ - crypt_cmd->header_skip = cmd->crypt_header_skip; - crypt_cmd->source_count = cmd->src_npa - cmd->crypt_header_skip; - if (flags & AEON_DECODE) - crypt_cmd->source_count -= mac_length; - - -#ifdef AEON_COMMAND_CHECKING - if (crypt_cmd->source_count % 8 != 0) { - printf("aeon: Error -- encryption source %u not a multiple of 8!\n", - crypt_cmd->source_count); - return -1; - } -#endif - } - cmd_buf_data->iv = cmd->iv; - - -#if 1 - printf("aeon: command parameters" - " -- session num %u" - " -- base t.s.c: %u" - " -- base t.d.c: %u" - " -- mac h.s. %u s.c. %u" - " -- crypt h.s. %u s.c. %u\n", - base_cmd->session_num, base_cmd->total_source_count, - base_cmd->total_dest_count, mac_cmd->header_skip, - mac_cmd->source_count, crypt_cmd->header_skip, - crypt_cmd->source_count); -#endif - - return 0; /* success */ -} - -int -aeon_mbuf(m, np, pp, lp, maxp, nicep) - struct mbuf *m; - int *np; - long *pp; - int *lp; - int maxp; - int *nicep; -{ - struct mbuf *m0; - int npa = *np; - int tlen = 0; - - /* generate a [pa,len] array from an mbuf */ - npa = 0; - for (m0 = m; m; m = m->m_next) { - void *va; - long pg, npg; - int len, off; - - va = m->m_data; - len = m->m_len; - tlen += len; - - lp[npa] = len; - pp[npa] = vtophys(va); - pg = pp[npa] & ~PAGE_MASK; - off = (long)va & PAGE_MASK; - - while (len + off > PAGE_SIZE) { - va = va + PAGE_SIZE - off; - npg = vtophys(va); - if (npg != pg) { - /* FUCKED UP condition */ - npa++; - continue; - } - lp[npa] = PAGE_SIZE - off; - off = 0; - ++npa; - if (npa > maxp) - return (0); - lp[npa] = len - (PAGE_SIZE - off); - len -= lp[npa]; - pp[npa] = vtophys(va); - } - } - - if (nicep) { - int nice = 1; - int i; - - /* see if each [pa,len] entry is long-word aligned */ - for (i = 0; i < npa; i++) - if ((lp[i] & 3) || (pp[i] & 3)) - nice = 0; - *nicep = nice; - } - - *np = npa; - return (tlen); -} - -int -aeon_crypto(struct aeon_command *cmd) -{ - u_int32_t cmdlen; - static u_int32_t current_device = 0; - struct aeon_softc *sc; - struct aeon_dma *dma; - struct aeon_command_buf_data cmd_buf_data; - int cmdi, srci, dsti, resi, nicealign = 0; - int error, s, i; - - /* Pick the aeon board to send the data to. Right now we use a round - * robin approach. */ - sc = aeon_devices[current_device++]; - if (current_device == aeon_num_devices) - current_device = 0; - dma = sc->sc_dma; - - if (cmd->src_npa == 0 && cmd->src_m) - cmd->src_l = aeon_mbuf(cmd->src_m, &cmd->src_npa, - cmd->src_packp, cmd->src_packl, MAX_SCATTER, &nicealign); - if (cmd->src_l == 0) - return (-1); - - if (nicealign == 0) { - cmd->dst_l = cmd->src_l; - MGETHDR(cmd->dst_m, M_DONTWAIT, MT_DATA); - if (cmd->dst_m == NULL) - return (-1); - if (cmd->src_l > MHLEN) { - MCLGET(cmd->dst_m, M_DONTWAIT); - if ((cmd->dst_m->m_flags & M_EXT) == 0) { - m_freem(cmd->dst_m); - return (-1); - } - } - } else - cmd->dst_m = cmd->src_m; - - cmd->dst_l = aeon_mbuf(cmd->dst_m, &cmd->dst_npa, - cmd->dst_packp, cmd->dst_packl, MAX_SCATTER, NULL); - if (cmd->dst_l == 0) - return (-1); - - if (aeon_build_command(cmd, &cmd_buf_data) != 0) - return AEON_CRYPTO_BAD_INPUT; - - printf("%s: Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n", - sc->sc_dv.dv_xname, - READ_REG_1(sc, AEON_1_DMA_CSR), READ_REG_1(sc, AEON_1_DMA_IER), - dma->cmdu, dma->srcu, dma->dstu, dma->resu, cmd->src_npa, - cmd->dst_npa); - - s = splimp(); - - /* - * need 1 cmd, and 1 res - * need N src, and N dst - */ - while (dma->cmdu+1 > AEON_D_CMD_RSIZE || - dma->srcu+cmd->src_npa > AEON_D_SRC_RSIZE || - dma->dstu+cmd->dst_npa > AEON_D_DST_RSIZE || - dma->resu+1 > AEON_D_RES_RSIZE) { - if (cmd->flags & AEON_DMA_FULL_NOBLOCK) { - splx(s); - return (AEON_CRYPTO_RINGS_FULL); - } - tsleep((caddr_t) dma, PZERO, "aeonring", 1); - } - - if (dma->cmdi == AEON_D_CMD_RSIZE) { - cmdi = 0, dma->cmdi = 1; - dma->cmdr[AEON_D_CMD_RSIZE].l = AEON_D_VALID | AEON_D_LAST | - AEON_D_MASKDONEIRQ | AEON_D_JUMP; - } else - cmdi = dma->cmdi++; - - if (dma->resi == AEON_D_RES_RSIZE) { - resi = 0, dma->resi = 1; - dma->resr[AEON_D_RES_RSIZE].l = AEON_D_VALID | AEON_D_LAST | - AEON_D_MASKDONEIRQ | AEON_D_JUMP; - } else - resi = dma->resi++; - - cmdlen = aeon_write_command(&cmd_buf_data, dma->command_bufs[cmdi]); - dma->aeon_commands[cmdi] = cmd; - /* .p for command/result already set */ - dma->cmdr[cmdi].l = cmdlen | AEON_D_VALID | AEON_D_LAST | - AEON_D_MASKDONEIRQ; - dma->cmdu += 1; - - for (i = 0; i < cmd->src_npa; i++) { - int last = 0; - - if (i == cmd->src_npa-1) - last = AEON_D_LAST; - - if (dma->srci == AEON_D_SRC_RSIZE) { - srci = 0, dma->srci = 1; - dma->srcr[AEON_D_SRC_RSIZE].l = AEON_D_VALID | - AEON_D_MASKDONEIRQ | AEON_D_JUMP; - } else - srci = dma->srci++; - dma->srcr[srci].p = vtophys(cmd->src_packp[i]); - dma->srcr[srci].l = cmd->src_packl[i] | AEON_D_VALID | - AEON_D_MASKDONEIRQ | last; - } - dma->srcu += cmd->src_npa; - - for (i = 0; i < cmd->dst_npa; i++) { - int last = 0; - - if (dma->dsti == AEON_D_DST_RSIZE) { - dsti = 0, dma->dsti = 1; - dma->dstr[AEON_D_DST_RSIZE].l = AEON_D_VALID | - AEON_D_MASKDONEIRQ | AEON_D_JUMP; - } else - dsti = dma->dsti++; - dma->dstr[dsti].p = vtophys(cmd->dst_packp[i]); - dma->dstr[dsti].l = cmd->dst_packl[i] | AEON_D_VALID | - AEON_D_MASKDONEIRQ | last; - } - dma->dstu += cmd->dst_npa; - - /* - * Unlike other descriptors, we don't mask done interrupt from - * result descriptor. - */ - dma->resr[resi].l = AEON_MAX_RESULT | AEON_D_VALID | AEON_D_LAST; - dma->resu += 1; - - /* - * We don't worry about missing an interrupt (which a waiting - * on command interrupt salvages us from), unless there is more - * than one command in the queue. - */ - if (dma->slots_in_use > 1) { - WRITE_REG_1(sc, AEON_1_DMA_IER, - AEON_DMAIER_R_DONE | AEON_DMAIER_C_WAIT); - } - - /* - * If not given a callback routine, we block until the dest data is - * ready. (Setting interrupt timeout at 3 seconds.) - */ - if (cmd->dest_ready_callback == NULL) { - printf("%s: no callback -- we're sleeping\n", - sc->sc_dv.dv_xname); - error = tsleep((caddr_t) & dma->resr[resi], PZERO, "CRYPT", - hz * 3); - if (error != 0) - printf("%s: timed out waiting for interrupt" - " -- tsleep() exited with %d\n", - sc->sc_dv.dv_xname, error); - } - - printf("%s: command: stat %8x ier %8x\n", - sc->sc_dv.dv_xname, - READ_REG_1(sc, AEON_1_DMA_CSR), READ_REG_1(sc, AEON_1_DMA_IER)); - - splx(s); - return 0; /* success */ -} - -int -aeon_intr(arg) - void *arg; -{ - struct aeon_softc *sc = arg; - struct aeon_dma *dma = sc->sc_dma; - u_int32_t dmacsr; - - dmacsr = READ_REG_1(sc, AEON_1_DMA_CSR); - - printf("%s: irq: stat %8x ien %8x u %d/%d/%d/%d\n", - sc->sc_dv.dv_xname, - dmacsr, READ_REG_1(sc, AEON_1_DMA_IER), - dma->cmdu, dma->srcu, dma->dstu, dma->resu); - - if ((dmacsr & (AEON_DMACSR_C_WAIT|AEON_DMACSR_R_DONE)) == 0) - return (0); - - if ((dma->slots_in_use == 0) && (dmacsr & AEON_DMACSR_C_WAIT)) { - /* - * If no slots to process and we received a "waiting on - * result" interrupt, we disable the "waiting on result" - * (by clearing it). - */ - WRITE_REG_1(sc, AEON_1_DMA_IER, AEON_DMAIER_R_DONE); - } else { - if (dma->slots_in_use > AEON_D_RSIZE) - printf("%s: Internal Error -- ring overflow\n", - sc->sc_dv.dv_xname); - - while (dma->slots_in_use > 0) { - u_int32_t wake_pos = dma->wakeup_rpos; - struct aeon_command *cmd = dma->aeon_commands[wake_pos]; - - /* if still valid, stop processing */ - if (dma->resr[wake_pos].l & AEON_D_VALID) - break; - - if (AEON_USING_MAC(cmd->flags) && (cmd->flags & AEON_DECODE)) { - u_int8_t *result_buf = dma->result_bufs[wake_pos]; - - cmd->result_status = (result_buf[8] & 0x2) ? - AEON_MAC_BAD : 0; - printf("%s: byte index 8 of result 0x%02x\n", - sc->sc_dv.dv_xname, (u_int32_t) result_buf[8]); - } - - /* position is done, notify producer with wakup or callback */ - if (cmd->dest_ready_callback == NULL) - wakeup((caddr_t) &dma->resr[wake_pos]); - else - cmd->dest_ready_callback(cmd); - - if (++dma->wakeup_rpos == AEON_D_RSIZE) - dma->wakeup_rpos = 0; - dma->slots_in_use--; - } - } - - /* - * Clear "result done" and "waiting on command ring" flags in status - * register. If we still have slots to process and we received a - * waiting interrupt, this will interupt us again. - */ - WRITE_REG_1(sc, AEON_1_DMA_CSR, AEON_DMACSR_R_DONE|AEON_DMACSR_C_WAIT); - return (1); -} diff --git a/sys/dev/pci/aeonreg.h b/sys/dev/pci/aeonreg.h deleted file mode 100644 index d5a44e3180f..00000000000 --- a/sys/dev/pci/aeonreg.h +++ /dev/null @@ -1,409 +0,0 @@ -/* $OpenBSD: aeonreg.h,v 1.6 2000/03/15 14:55:52 jason Exp $ */ - -/* - * Invertex AEON driver - * Copyright (c) 1999 Invertex Inc. All rights reserved. - * - * Please send any comments, feedback, bug-fixes, or feature requests to - * software@invertex.com. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef __AEON_H__ -#define __AEON_H__ - -#include - -/* - * Some PCI configuration space offset defines. The names were made - * identical to the names used by the Linux kernel. - */ -#define AEON_BAR0 (PCI_MAPREG_START + 0) /* PUC register map */ -#define AEON_BAR1 (PCI_MAPREG_START + 4) /* DMA register map */ - -/* - * Some configurable values for the driver - */ -#define AEON_D_RSIZE 24 -#define AEON_MAX_DEVICES 4 - -#define AEON_D_CMD_RSIZE 24 -#define AEON_D_SRC_RSIZE 80 -#define AEON_D_DST_RSIZE 80 -#define AEON_D_RES_RSIZE 24 - -/* - * The values below should multiple of 4 -- and be large enough to handle - * any command the driver implements. - */ -#define AEON_MAX_COMMAND 120 -#define AEON_MAX_RESULT 16 - -/* - * aeon_desc_t - * - * Holds an individual descriptor for any of the rings. - */ -typedef struct aeon_desc { - volatile u_int32_t l; /* length and status bits */ - volatile u_int32_t p; -} aeon_desc_t; - -/* - * Masks for the "length" field of struct aeon_desc. - */ -#define AEON_D_MASKDONEIRQ (0x1 << 25) -#define AEON_D_LAST (0x1 << 29) -#define AEON_D_JUMP (0x1 << 30) -#define AEON_D_VALID (0x1 << 31) - -/* - * aeon_callback_t - * - * Type for callback function when dest data is ready. - */ -typedef void (*aeon_callback_t)(aeon_command_t *); - -/* - * Data structure to hold all 4 rings and any other ring related data. - */ -struct aeon_dma { - /* - * Descriptor rings. We add +1 to the size to accomidate the - * jump descriptor. - */ - struct aeon_desc cmdr[AEON_D_RSIZE+1]; - struct aeon_desc srcr[AEON_D_RSIZE+1]; - struct aeon_desc dstr[AEON_D_RSIZE+1]; - struct aeon_desc resr[AEON_D_RSIZE+1]; - - struct aeon_command *aeon_commands[AEON_D_RSIZE]; - - u_char command_bufs[AEON_D_RSIZE][AEON_MAX_COMMAND]; - u_char result_bufs[AEON_D_RSIZE][AEON_MAX_RESULT]; - - /* - * Our current positions for insertion and removal from the desriptor - * rings. - */ - int cmdi, srci, dsti, resi; - volatile int cmdu, srcu, dstu, resu; - - u_int32_t wakeup_rpos; - volatile u_int32_t slots_in_use; -}; - -/* - * Holds data specific to a single AEON board. - */ -struct aeon_softc { - struct device sc_dv; /* generic device */ - void * sc_ih; /* interrupt handler cookie */ - u_int32_t sc_drammodel; /* 1=dram, 0=sram */ - - bus_space_handle_t sc_sh0, sc_sh1; - bus_space_tag_t sc_st0, sc_st1; - bus_dma_tag_t sc_dmat; - - struct aeon_dma *sc_dma; -}; - -/* - * Processing Unit Registers (offset from BASEREG0) - */ -#define AEON_0_PUDATA 0x00 /* Processing Unit Data */ -#define AEON_0_PUCTRL 0x04 /* Processing Unit Control */ -#define AEON_0_PUISR 0x08 /* Processing Unit Interrupt Status */ -#define AEON_0_PUCNFG 0x0c /* Processing Unit Configuration */ -#define AEON_0_PUIER 0x10 /* Processing Unit Interrupt Enable */ -#define AEON_0_PUSTAT 0x14 /* Processing Unit Status/Chip ID */ -#define AEON_0_FIFOSTAT 0x18 /* FIFO Status */ -#define AEON_0_FIFOCNFG 0x1c /* FIFO Configuration */ -#define AEON_0_SPACESIZE 0x20 /* Register space size */ - -/* Processing Unit Control Register (AEON_0_PUCTRL) */ -#define AEON_PUCTRL_CLRSRCFIFO 0x0010 /* clear source fifo */ -#define AEON_PUCTRL_STOP 0x0008 /* stop pu */ -#define AEON_PUCTRL_LOCKRAM 0x0004 /* lock ram */ -#define AEON_PUCTRL_DMAENA 0x0002 /* enable dma */ -#define AEON_PUCTRL_RESET 0x0001 /* Reset processing unit */ - -/* Processing Unit Interrupt Status Register (AEON_0_PUISR) */ -#define AEON_PUISR_CMDINVAL 0x8000 /* Invalid command interrupt */ -#define AEON_PUISR_DATAERR 0x4000 /* Data error interrupt */ -#define AEON_PUISR_SRCFIFO 0x2000 /* Source FIFO ready interrupt */ -#define AEON_PUISR_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */ -#define AEON_PUISR_DSTOVER 0x0200 /* Destination overrun interrupt */ -#define AEON_PUISR_SRCCMD 0x0080 /* Source command interrupt */ -#define AEON_PUISR_SRCCTX 0x0040 /* Source context interrupt */ -#define AEON_PUISR_SRCDATA 0x0020 /* Source data interrupt */ -#define AEON_PUISR_DSTDATA 0x0010 /* Destination data interrupt */ -#define AEON_PUISR_DSTRESULT 0x0004 /* Destination result interrupt */ - -/* Processing Unit Configuration Register (AEON_0_PUCNFG) */ -#define AEON_PUCNFG_DRAMMASK 0xe000 /* DRAM size mask */ -#define AEON_PUCNFG_DSZ_256K 0x0000 /* 256k dram */ -#define AEON_PUCNFG_DSZ_512K 0x2000 /* 512k dram */ -#define AEON_PUCNFG_DSZ_1M 0x4000 /* 1m dram */ -#define AEON_PUCNFG_DSZ_2M 0x6000 /* 2m dram */ -#define AEON_PUCNFG_DSZ_4M 0x8000 /* 4m dram */ -#define AEON_PUCNFG_DSZ_8M 0xa000 /* 8m dram */ -#define AEON_PUNCFG_DSZ_16M 0xc000 /* 16m dram */ -#define AEON_PUCNFG_DSZ_32M 0xe000 /* 32m dram */ -#define AEON_PUCNFG_DRAMREFRESH 0x1800 /* DRAM refresh rate mask */ -#define AEON_PUCNFG_DRFR_512 0x0000 /* 512 divisor of ECLK */ -#define AEON_PUCNFG_DRFR_256 0x0800 /* 256 divisor of ECLK */ -#define AEON_PUCNFG_DRFR_128 0x1000 /* 128 divisor of ECLK */ -#define AEON_PUCNFG_TCALLPHASES 0x0200 /* your guess is as good as mine... */ -#define AEON_PUCNFG_TCDRVTOTEM 0x0100 /* your guess is as good as mine... */ -#define AEON_PUCNFG_BIGENDIAN 0x0080 /* DMA big endian mode */ -#define AEON_PUCNFG_BUS32 0x0040 /* Bus width 32bits */ -#define AEON_PUCNFG_BUS16 0x0000 /* Bus width 16 bits */ -#define AEON_PUCNFG_CHIPID 0x0020 /* Allow chipid from PUSTAT */ -#define AEON_PUCNFG_DRAM 0x0010 /* Context RAM is DRAM */ -#define AEON_PUCNFG_SRAM 0x0000 /* Context RAM is SRAM */ -#define AEON_PUCNFG_COMPSING 0x0004 /* Enable single compression context */ -#define AEON_PUCNFG_ENCCNFG 0x0002 /* Encryption configuration */ - -/* Processing Unit Interrupt Enable Register (AEON_0_PUIER) */ -#define AEON_PUIER_CMDINVAL 0x8000 /* Invalid command interrupt */ -#define AEON_PUIER_DATAERR 0x4000 /* Data error interrupt */ -#define AEON_PUIER_SRCFIFO 0x2000 /* Source FIFO ready interrupt */ -#define AEON_PUIER_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */ -#define AEON_PUIER_DSTOVER 0x0200 /* Destination overrun interrupt */ -#define AEON_PUIER_SRCCMD 0x0080 /* Source command interrupt */ -#define AEON_PUIER_SRCCTX 0x0040 /* Source context interrupt */ -#define AEON_PUIER_SRCDATA 0x0020 /* Source data interrupt */ -#define AEON_PUIER_DSTDATA 0x0010 /* Destination data interrupt */ -#define AEON_PUIER_DSTRESULT 0x0004 /* Destination result interrupt */ - -/* Processing Unit Status Register/Chip ID (AEON_0_PUSTAT) */ -#define AEON_PUSTAT_CMDINVAL 0x8000 /* Invalid command interrupt */ -#define AEON_PUSTAT_DATAERR 0x4000 /* Data error interrupt */ -#define AEON_PUSTAT_SRCFIFO 0x2000 /* Source FIFO ready interrupt */ -#define AEON_PUSTAT_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */ -#define AEON_PUSTAT_DSTOVER 0x0200 /* Destination overrun interrupt */ -#define AEON_PUSTAT_SRCCMD 0x0080 /* Source command interrupt */ -#define AEON_PUSTAT_SRCCTX 0x0040 /* Source context interrupt */ -#define AEON_PUSTAT_SRCDATA 0x0020 /* Source data interrupt */ -#define AEON_PUSTAT_DSTDATA 0x0010 /* Destination data interrupt */ -#define AEON_PUSTAT_DSTRESULT 0x0004 /* Destination result interrupt */ -#define AEON_PUSTAT_CHIPREV 0x00ff /* Chip revision mask */ -#define AEON_PUSTAT_CHIPENA 0xff00 /* Chip enabled mask */ -#define AEON_PUSTAT_ENA_2 0x1100 /* Level 2 enabled */ -#define AEON_PUSTAT_ENA_1 0x1000 /* Level 1 enabled */ -#define AEON_PUSTAT_ENA_0 0x3000 /* Level 0 enabled */ -#define AEON_PUSTAT_REV_2 0x0020 /* 7751 PT6/2 */ -#define AEON_PUSTAT_REV_3 0x0030 /* 7751 PT6/3 */ - -/* FIFO Status Register (AEON_0_FIFOSTAT) */ -#define AEON_FIFOSTAT_SRC 0x7f00 /* Source FIFO available */ -#define AEON_FIFOSTAT_DST 0x007f /* Destination FIFO available */ - -/* FIFO Configuration Register (AEON_0_FIFOCNFG) */ -#define AEON_FIFOCNFG_THRESHOLD 0x0400 /* must be written as 1 */ - -/* - * DMA Interface Registers (offset from BASEREG1) - */ -#define AEON_1_DMA_CRAR 0x0c /* DMA Command Ring Address */ -#define AEON_1_DMA_SRAR 0x1c /* DMA Source Ring Address */ -#define AEON_1_DMA_RRAR 0x2c /* DMA Resultt Ring Address */ -#define AEON_1_DMA_DRAR 0x3c /* DMA Destination Ring Address */ -#define AEON_1_DMA_CSR 0x40 /* DMA Status and Control */ -#define AEON_1_DMA_IER 0x44 /* DMA Interrupt Enable */ -#define AEON_1_DMA_CNFG 0x48 /* DMA Configuration */ -#define AEON_1_REVID 0x98 /* Revision ID */ - -/* DMA Status and Control Register (AEON_1_DMA_CSR) */ -#define AEON_DMACSR_D_CTRLMASK 0xc0000000 /* Destinition Ring Control */ -#define AEON_DMACSR_D_CTRL_NOP 0x00000000 /* Dest. Control: no-op */ -#define AEON_DMACSR_D_CTRL_DIS 0x40000000 /* Dest. Control: disable */ -#define AEON_DMACSR_D_CTRL_ENA 0x80000000 /* Dest. Control: enable */ -#define AEON_DMACSR_D_ABORT 0x20000000 /* Destinition Ring PCIAbort */ -#define AEON_DMACSR_D_DONE 0x10000000 /* Destinition Ring Done */ -#define AEON_DMACSR_D_LAST 0x08000000 /* Destinition Ring Last */ -#define AEON_DMACSR_D_WAIT 0x04000000 /* Destinition Ring Waiting */ -#define AEON_DMACSR_D_OVER 0x02000000 /* Destinition Ring Overflow */ -#define AEON_DMACSR_R_CTRL 0x00c00000 /* Result Ring Control */ -#define AEON_DMACSR_R_CTRL_NOP 0x00000000 /* Result Control: no-op */ -#define AEON_DMACSR_R_CTRL_DIS 0x00400000 /* Result Control: disable */ -#define AEON_DMACSR_R_CTRL_ENA 0x00800000 /* Result Control: enable */ -#define AEON_DMACSR_R_ABORT 0x00200000 /* Result Ring PCI Abort */ -#define AEON_DMACSR_R_DONE 0x00100000 /* Result Ring Done */ -#define AEON_DMACSR_R_LAST 0x00080000 /* Result Ring Last */ -#define AEON_DMACSR_R_WAIT 0x00040000 /* Result Ring Waiting */ -#define AEON_DMACSR_R_OVER 0x00020000 /* Result Ring Overflow */ -#define AEON_DMACSR_S_CTRL 0x0000c000 /* Source Ring Control */ -#define AEON_DMACSR_S_CTRL_NOP 0x00000000 /* Source Control: no-op */ -#define AEON_DMACSR_S_CTRL_DIS 0x00004000 /* Source Control: disable */ -#define AEON_DMACSR_S_CTRL_ENA 0x00008000 /* Source Control: enable */ -#define AEON_DMACSR_S_ABORT 0x00002000 /* Source Ring PCI Abort */ -#define AEON_DMACSR_S_DONE 0x00001000 /* Source Ring Done */ -#define AEON_DMACSR_S_LAST 0x00000800 /* Source Ring Last */ -#define AEON_DMACSR_S_WAIT 0x00000400 /* Source Ring Waiting */ -#define AEON_DMACSR_S_OVER 0x00000200 /* Source Ring Overflow */ -#define AEON_DMACSR_C_CTRL 0x000000c0 /* Command Ring Control */ -#define AEON_DMACSR_C_CTRL_NOP 0x00000000 /* Command Control: no-op */ -#define AEON_DMACSR_C_CTRL_DIS 0x00000040 /* Command Control: disable */ -#define AEON_DMACSR_C_CTRL_ENA 0x00000080 /* Command Control: enable */ -#define AEON_DMACSR_C_ABORT 0x00000020 /* Command Ring PCI Abort */ -#define AEON_DMACSR_C_DONE 0x00000010 /* Command Ring Done */ -#define AEON_DMACSR_C_LAST 0x00000008 /* Command Ring Last */ -#define AEON_DMACSR_C_WAIT 0x00000004 /* Command Ring Waiting */ -#define AEON_DMACSR_C_EIRQ 0x00000001 /* Command Ring Engine IRQ */ - -/* DMA Interrupt Enable Register (AEON_1_DMA_IER) */ -#define AEON_DMAIER_D_ABORT 0x20000000 /* Destination Ring PCIAbort */ -#define AEON_DMAIER_D_DONE 0x10000000 /* Destination Ring Done */ -#define AEON_DMAIER_D_LAST 0x08000000 /* Destination Ring Last */ -#define AEON_DMAIER_D_WAIT 0x04000000 /* Destination Ring Waiting */ -#define AEON_DMAIER_D_OVER 0x02000000 /* Destination Ring Overflow */ -#define AEON_DMAIER_R_ABORT 0x00200000 /* Result Ring PCI Abort */ -#define AEON_DMAIER_R_DONE 0x00100000 /* Result Ring Done */ -#define AEON_DMAIER_R_LAST 0x00080000 /* Result Ring Last */ -#define AEON_DMAIER_R_WAIT 0x00040000 /* Result Ring Waiting */ -#define AEON_DMAIER_R_OVER 0x00020000 /* Result Ring Overflow */ -#define AEON_DMAIER_S_ABORT 0x00002000 /* Source Ring PCI Abort */ -#define AEON_DMAIER_S_DONE 0x00001000 /* Source Ring Done */ -#define AEON_DMAIER_S_LAST 0x00000800 /* Source Ring Last */ -#define AEON_DMAIER_S_WAIT 0x00000400 /* Source Ring Waiting */ -#define AEON_DMAIER_S_OVER 0x00000200 /* Source Ring Overflow */ -#define AEON_DMAIER_C_ABORT 0x00000020 /* Command Ring PCI Abort */ -#define AEON_DMAIER_C_DONE 0x00000010 /* Command Ring Done */ -#define AEON_DMAIER_C_LAST 0x00000008 /* Command Ring Last */ -#define AEON_DMAIER_C_WAIT 0x00000004 /* Command Ring Waiting */ -#define AEON_DMAIER_ENGINE 0x00000001 /* Engine IRQ */ - -/* DMA Configuration Register (AEON_1_DMA_CNFG) */ -#define AEON_DMACNFG_BIGENDIAN 0x10000000 /* big endian mode */ -#define AEON_DMACNFG_POLLFREQ 0x00ff0000 /* Poll frequency mask */ -#define AEON_DMACNFG_UNLOCK 0x00000800 -#define AEON_DMACNFG_POLLINVAL 0x00000700 /* Invalid Poll Scalar */ -#define AEON_DMACNFG_LAST 0x00000010 /* Host control LAST bit */ -#define AEON_DMACNFG_MODE 0x00000004 /* DMA mode */ -#define AEON_DMACNFG_DMARESET 0x00000002 /* DMA Reset # */ -#define AEON_DMACNFG_MSTRESET 0x00000001 /* Master Reset # */ - -#define WRITE_REG_0(sc,reg,val) \ - bus_space_write_4((sc)->sc_st0, (sc)->sc_sh0, reg, val) -#define READ_REG_0(sc,reg) \ - bus_space_read_4((sc)->sc_st0, (sc)->sc_sh0, reg) - -/* - * Register offsets in register set 1 - */ - -#define AEON_UNLOCK_SECRET1 0xf4 -#define AEON_UNLOCK_SECRET2 0xfc - -#define WRITE_REG_1(sc,reg,val) \ - bus_space_write_4((sc)->sc_st1, (sc)->sc_sh1, reg, val) -#define READ_REG_1(sc,reg) \ - bus_space_read_4((sc)->sc_st1, (sc)->sc_sh1, reg) - -/********************************************************************* - * Structs for board commands - * - *********************************************************************/ - -/* - * Structure to help build up the command data structure. - */ -typedef struct aeon_base_command { - u_int16_t masks; - u_int16_t session_num; - u_int16_t total_source_count; - u_int16_t total_dest_count; -} aeon_base_command_t; - -#define AEON_BASE_CMD_MAC (0x1 << 10) -#define AEON_BASE_CMD_CRYPT (0x1 << 11) -#define AEON_BASE_CMD_DECODE (0x1 << 13) - -/* - * Structure to help build up the command data structure. - */ -typedef struct aeon_crypt_command { - u_int16_t masks; - u_int16_t header_skip; - u_int32_t source_count; -} aeon_crypt_command_t; - -#define AEON_CRYPT_CMD_ALG_MASK (0x3 << 0) -#define AEON_CRYPT_CMD_ALG_DES (0x0 << 0) -#define AEON_CRYPT_CMD_ALG_3DES (0x1 << 0) -#define AEON_CRYPT_CMD_MODE_CBC (0x1 << 3) -#define AEON_CRYPT_CMD_NEW_KEY (0x1 << 11) -#define AEON_CRYPT_CMD_NEW_IV (0x1 << 12) - -/* - * Structure to help build up the command data structure. - */ -typedef struct aeon_mac_command { - u_int16_t masks; - u_int16_t header_skip; - u_int32_t source_count; -} aeon_mac_command_t; - -#define AEON_MAC_CMD_ALG_MD5 (0x1 << 0) -#define AEON_MAC_CMD_ALG_SHA1 (0x0 << 0) -#define AEON_MAC_CMD_MODE_HMAC (0x0 << 2) -#define AEON_MAC_CMD_TRUNC (0x1 << 4) -#define AEON_MAC_CMD_APPEND (0x1 << 6) -/* - * MAC POS IPSec initiates authentication after encryption on encodes - * and before decryption on decodes. - */ -#define AEON_MAC_CMD_POS_IPSEC (0x2 << 8) -#define AEON_MAC_CMD_NEW_KEY (0x1 << 11) - -/* - * Structure with all fields necessary to write the command buffer. - * We build it up while interrupts are on, then use it to write out - * the command buffer quickly while interrupts are off. - */ -typedef struct aeon_command_buf_data { - aeon_base_command_t base_cmd; - aeon_mac_command_t mac_cmd; - aeon_crypt_command_t crypt_cmd; - const u_int8_t *mac; - const u_int8_t *ck; - const u_int8_t *iv; -} aeon_command_buf_data_t; - -/* - * The poll frequency and poll scalar defines are unshifted values used - * to set fields in the DMA Configuration Register. - */ -#ifndef AEON_POLL_FREQUENCY -#define AEON_POLL_FREQUENCY 0x1 -#endif - -#ifndef AEON_POLL_SCALAR -#define AEON_POLL_SCALAR 0x0 -#endif - -#endif /* __AEON_H__ */ diff --git a/sys/dev/pci/aeonvar.h b/sys/dev/pci/aeonvar.h deleted file mode 100644 index a933884346a..00000000000 --- a/sys/dev/pci/aeonvar.h +++ /dev/null @@ -1,277 +0,0 @@ -/* $OpenBSD: aeonvar.h,v 1.3 1999/02/24 06:09:45 deraadt Exp $ */ - -/* - * Invertex AEON driver - * Copyright (c) 1999 Invertex Inc. All rights reserved. - * - * Please send any comments, feedback, bug-fixes, or feature requests to - * software@invertex.com. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#ifndef __AEON_EXPORT_H__ -#define __AEON_EXPORT_H__ - -/* - * Length values for cryptography - */ -#define AEON_DES_KEY_LENGTH 8 -#define AEON_3DES_KEY_LENGTH 24 -#define AEON_MAX_CRYPT_KEY_LENGTH AEON_3DES_KEY_LENGTH -#define AEON_IV_LENGTH 8 - -/* - * Length values for authentication - */ -#define AEON_MAC_KEY_LENGTH 64 -#define AEON_MD5_LENGTH 16 -#define AEON_SHA1_LENGTH 20 -#define AEON_MAC_TRUNC_LENGTH 12 - -#define MAX_SCATTER 10 - -/* - * aeon_command_t - * - * This is the control structure used to pass commands to aeon_encrypt(). - * - * flags - * ----- - * Flags is the bitwise "or" values for command configuration. A single - * encrypt direction needs to be set: - * - * AEON_ENCODE or AEON_DECODE - * - * To use cryptography, a single crypto algorithm must be included: - * - * AEON_CRYPT_3DES or AEON_CRYPT_DES - * - * To use authentication is used, a single MAC algorithm must be included: - * - * AEON_MAC_MD5 or AEON_MAC_SHA1 - * - * By default MD5 uses a 16 byte hash and SHA-1 uses a 20 byte hash. - * If the value below is set, hash values are truncated or assumed - * truncated to 12 bytes: - * - * AEON_MAC_TRUNC - * - * Keys for encryption and authentication can be sent as part of a command, - * or the last key value used with a particular session can be retrieved - * and used again if either of these flags are not specified. - * - * AEON_CRYPT_NEW_KEY, AEON_MAC_NEW_KEY - * - * Whether we block or not waiting for the dest data to be ready is - * determined by whether a callback function is given. The other - * place we could block is when all the DMA rings are full. If - * it is not okay to block while waiting for an open slot in the - * rings, include in the following value: - * - * AEON_DMA_FULL_NOBLOCK - * - * result_flags - * ------------ - * result_flags is a bitwise "or" of result values. The result_flags - * values should not be considered valid until: - * - * callback routine NULL: aeon_crypto() returns - * callback routine set: callback routine called - * - * Right now there is only one result flag: AEON_MAC_BAD - * It's bit is set on decode operations using authentication when a - * hash result does not match the input hash value. - * The AEON_MAC_OK(r) macro can be used to help inspect this flag. - * - * session_num - * ----------- - * A number between 0 and 2048 (for DRAM models) or a number between - * 0 and 768 (for SRAM models). Those who don't want to use session - * numbers should leave value at zero and send a new crypt key and/or - * new MAC key on every command. If you use session numbers and - * don't send a key with a command, the last key sent for that same - * session number will be used. - * - * Warning: Using session numbers and multiboard at the same time - * is currently broken. - * - * mbuf: either fill in the mbuf pointer and npa=0 or - * fill packp[] and packl[] and set npa to > 0 - * - * mac_header_skip - * --------------- - * The number of bytes of the source_buf that are skipped over before - * authentication begins. This must be a number between 0 and 2^16-1 - * and can be used by IPSec implementers to skip over IP headers. - * *** Value ignored if authentication not used *** - * - * crypt_header_skip - * ----------------- - * The number of bytes of the source_buf that are skipped over before - * the cryptographic operation begins. This must be a number between 0 - * and 2^16-1. For IPSec, this number will always be 8 bytes larger - * than the auth_header_skip (to skip over the ESP header). - * *** Value ignored if cryptography not used *** - * - * source_length - * ------------- - * Length of input data including all skipped headers. On decode - * operations using authentication, the length must also include the - * the appended MAC hash (12, 16, or 20 bytes depending on algorithm - * and truncation settings). - * - * If encryption is used, the encryption payload must be a non-zero - * multiple of 8. On encode operations, the encryption payload size - * is (source_length - crypt_header_skip - (MAC hash size)). On - * decode operations, the encryption payload is - * (source_length - crypt_header_skip). - * - * dest_length - * ----------- - * Length of the dest buffer. It must be at least as large as the - * source buffer when authentication is not used. When authentication - * is used on an encode operation, it must be at least as long as the - * source length plus an extra 12, 16, or 20 bytes to hold the MAC - * value (length of mac value varies with algorithm used). When - * authentication is used on decode operations, it must be at least - * as long as the source buffer minus 12, 16, or 20 bytes for the MAC - * value which is not included in the dest data. Unlike source_length, - * the dest_length does not have to be exact, values larger than required - * are fine. - * - * dest_ready_callback - * ------------------- - * Callback routine called from AEON's interrupt handler. The routine - * must be quick and non-blocking. The callback routine is passed a - * pointer to the same aeon_command_t structure used to initiate the - * command. - * - * If this value is null, the aeon_crypto() routine will block until the - * dest data is ready. - * - * private_data - * ------------ - * An unsigned long quantity (i.e. large enough to hold a pointer), that - * can be used by the callback routine if desired. - */ -typedef struct aeon_command { - u_int flags; - volatile u_int result_status; - - u_short session_num; - - u_char *iv, *ck, *mac; - int iv_len, ck_len, mac_len; - - struct mbuf *src_m; - long src_packp[MAX_SCATTER]; - int src_packl[MAX_SCATTER]; - int src_npa; - int src_l; - - struct mbuf *dst_m; - long dst_packp[MAX_SCATTER]; - int dst_packl[MAX_SCATTER]; - int dst_npa; - int dst_l; - - u_short mac_header_skip; - u_short crypt_header_skip; - - void (*dest_ready_callback)(struct aeon_command *); - u_long private_data; -} aeon_command_t; - -/* - * Return values for aeon_crypto() - */ -#define AEON_CRYPTO_SUCCESS 0 -#define AEON_CRYPTO_BAD_INPUT -1 -#define AEON_CRYPTO_RINGS_FULL -2 - - -/* - * Defines for the "config" parameter of aeon_command_t - */ -#define AEON_ENCODE 1 -#define AEON_DECODE 2 -#define AEON_CRYPT_3DES 4 -#define AEON_CRYPT_DES 8 -#define AEON_MAC_MD5 16 -#define AEON_MAC_SHA1 32 -#define AEON_MAC_TRUNC 64 -#define AEON_CRYPT_NEW_KEY 128 -#define AEON_MAC_NEW_KEY 256 -#define AEON_DMA_FULL_NOBLOCK 512 - -#define AEON_USING_CRYPT(f) ((f) & (AEON_CRYPT_3DES|AEON_CRYPT_DES)) -#define AEON_USING_MAC(f) ((f) & (AEON_MAC_MD5|AEON_MAC_SHA1)) - -/* - * Defines for the "result_status" parameter of aeon_command_t. - */ -#define AEON_MAC_BAD 1 -#define AEON_MAC_OK(r) !((r) & AEON_MAC_BAD) - -#ifdef _KERNEL - -/************************************************************************** - * - * Function: aeon_crypto - * - * Purpose: Called by external drivers to begin an encryption on the - * AEON board. - * - * Blocking/Non-blocking Issues - * ============================ - * If the dest_ready_callback field of the aeon_command structure - * is NULL, aeon_encrypt will block until the dest_data is ready -- - * otherwise aeon_encrypt() will return immediately and the - * dest_ready_callback routine will be called when the dest data is - * ready. - * - * The routine can also block when waiting for an open slot when all - * DMA rings are full. You can avoid this behaviour by sending the - * AEON_DMA_FULL_NOBLOCK as part of the command flags. This will - * make aeon_crypt() return immediately when the rings are full. - * - * Return Values - * ============= - * 0 for success, negative values on error - * - * Defines for negative error codes are: - * - * AEON_CRYPTO_BAD_INPUT : The passed in command had invalid settings. - * AEON_CRYPTO_RINGS_FULL : All DMA rings were full and non-blocking - * behaviour was requested. - * - *************************************************************************/ -int aeon_crypto __P((aeon_command_t *command)); - -#endif /* _KERNEL */ - -#endif /* __AEON_EXPORT_H__ */ diff --git a/sys/dev/pci/files.pci b/sys/dev/pci/files.pci index 0f2731a08f5..eb9b9d60880 100644 --- a/sys/dev/pci/files.pci +++ b/sys/dev/pci/files.pci @@ -1,4 +1,4 @@ -# $OpenBSD: files.pci,v 1.64 2000/02/11 14:51:50 jason Exp $ +# $OpenBSD: files.pci,v 1.65 2000/03/16 20:33:48 deraadt Exp $ # $NetBSD: files.pci,v 1.20 1996/09/24 17:47:15 christos Exp $ # # Config file and device description for machine-independent PCI code. @@ -178,10 +178,10 @@ file dev/pci/if_ti.c ti attach ne at pci with ne_pci: rtl80x9 file dev/pci/if_ne_pci.c ne_pci -# Invertix AEON -device aeon: crypto -attach aeon at pci -file dev/pci/aeon.c aeon +# Hi/fn 7751 +device hifn: crypto +attach hifn at pci +file dev/pci/hifn7751.c hifn # Winbond W89C840F ethernet device wb: ether, ifnet, mii, ifmedia diff --git a/sys/dev/pci/hifn7751.c b/sys/dev/pci/hifn7751.c index 07f51f0ce56..41443dd2932 100644 --- a/sys/dev/pci/hifn7751.c +++ b/sys/dev/pci/hifn7751.c @@ -1,7 +1,7 @@ -/* $OpenBSD: hifn7751.c,v 1.9 2000/03/15 14:55:51 jason Exp $ */ +/* $OpenBSD: hifn7751.c,v 1.10 2000/03/16 20:33:47 deraadt Exp $ */ /* - * Invertex AEON driver + * Invertex AEON / Hi/fn 7751 driver * Copyright (c) 1999 Invertex Inc. All rights reserved. * Copyright (c) 1999 Theo de Raadt * @@ -50,47 +50,47 @@ #include #include -#include -#include +#include +#include -#undef AEON_DEBUG +#undef HIFN_DEBUG /* * Prototypes and count for the pci_device structure */ -int aeon_probe __P((struct device *, void *, void *)); -void aeon_attach __P((struct device *, struct device *, void *)); +int hifn_probe __P((struct device *, void *, void *)); +void hifn_attach __P((struct device *, struct device *, void *)); -struct cfattach aeon_ca = { - sizeof(struct aeon_softc), aeon_probe, aeon_attach, +struct cfattach hifn_ca = { + sizeof(struct hifn_softc), hifn_probe, hifn_attach, }; -struct cfdriver aeon_cd = { - 0, "aeon", DV_DULL +struct cfdriver hifn_cd = { + 0, "hifn", DV_DULL }; -void aeon_reset_board __P((struct aeon_softc *)); -int aeon_enable_crypto __P((struct aeon_softc *, pcireg_t)); -void aeon_init_dma __P((struct aeon_softc *)); -void aeon_init_pci_registers __P((struct aeon_softc *)); -int aeon_checkram __P((struct aeon_softc *)); -int aeon_intr __P((void *)); -u_int aeon_write_command __P((const struct aeon_command_buf_data *, +void hifn_reset_board __P((struct hifn_softc *)); +int hifn_enable_crypto __P((struct hifn_softc *, pcireg_t)); +void hifn_init_dma __P((struct hifn_softc *)); +void hifn_init_pci_registers __P((struct hifn_softc *)); +int hifn_checkram __P((struct hifn_softc *)); +int hifn_intr __P((void *)); +u_int hifn_write_command __P((const struct hifn_command_buf_data *, u_int8_t *)); -int aeon_build_command __P((const struct aeon_command * cmd, - struct aeon_command_buf_data *)); -int aeon_mbuf __P((struct mbuf *, int *np, long *pp, int *lp, int maxp, +int hifn_build_command __P((const struct hifn_command * cmd, + struct hifn_command_buf_data *)); +int hifn_mbuf __P((struct mbuf *, int *np, long *pp, int *lp, int maxp, int *nicealign)); -u_int32_t aeon_next_signature __P((u_int a, u_int cnt)); +u_int32_t hifn_next_signature __P((u_int a, u_int cnt)); /* * Used for round robin crypto requests */ -int aeon_num_devices = 0; -struct aeon_softc *aeon_devices[AEON_MAX_DEVICES]; +int hifn_num_devices = 0; +struct hifn_softc *hifn_devices[HIFN_MAX_DEVICES]; int -aeon_probe(parent, match, aux) +hifn_probe(parent, match, aux) struct device *parent; void *match; void *aux; @@ -107,11 +107,11 @@ aeon_probe(parent, match, aux) } void -aeon_attach(parent, self, aux) +hifn_attach(parent, self, aux) struct device *parent, *self; void *aux; { - struct aeon_softc *sc = (struct aeon_softc *)self; + struct hifn_softc *sc = (struct hifn_softc *)self; struct pci_attach_args *pa = aux; pci_chipset_tag_t pc = pa->pa_pc; pci_intr_handle_t ih; @@ -135,7 +135,7 @@ aeon_attach(parent, self, aux) return; } - if (pci_mem_find(pc, pa->pa_tag, AEON_BAR0, &iobase, &iosize, NULL)) { + if (pci_mem_find(pc, pa->pa_tag, HIFN_BAR0, &iobase, &iosize, NULL)) { printf(": can't find mem space\n"); return; } @@ -145,7 +145,7 @@ aeon_attach(parent, self, aux) } sc->sc_st0 = pa->pa_memt; - if (pci_mem_find(pc, pa->pa_tag, AEON_BAR1, &iobase, &iosize, NULL)) { + if (pci_mem_find(pc, pa->pa_tag, HIFN_BAR1, &iobase, &iosize, NULL)) { printf(": can't find mem space\n"); return; } @@ -154,7 +154,7 @@ aeon_attach(parent, self, aux) return; } sc->sc_st1 = pa->pa_memt; -#ifdef AEON_DEBUG +#ifdef HIFN_DEBUG printf(" mem %x %x", sc->sc_sh0, sc->sc_sh1); #endif @@ -186,20 +186,20 @@ aeon_attach(parent, self, aux) bus_dmamem_free(sc->sc_dmat, &seg, rseg); return; } - sc->sc_dma = (struct aeon_dma *)kva; + sc->sc_dma = (struct hifn_dma *)kva; bzero(sc->sc_dma, sizeof(*sc->sc_dma)); - aeon_reset_board(sc); + hifn_reset_board(sc); - if (aeon_enable_crypto(sc, pa->pa_id) != 0) { + if (hifn_enable_crypto(sc, pa->pa_id) != 0) { printf("%s: crypto enabling failed\n", sc->sc_dv.dv_xname); return; } - aeon_init_dma(sc); - aeon_init_pci_registers(sc); + hifn_init_dma(sc); + hifn_init_pci_registers(sc); - if (aeon_checkram(sc) != 0) + if (hifn_checkram(sc) != 0) sc->sc_drammodel = 1; /* @@ -207,9 +207,9 @@ aeon_attach(parent, self, aux) * pointers and may have changed the value we send to the RAM Config * Register. */ - aeon_reset_board(sc); - aeon_init_dma(sc); - aeon_init_pci_registers(sc); + hifn_reset_board(sc); + hifn_init_dma(sc); + hifn_init_pci_registers(sc); if (pci_intr_map(pc, pa->pa_intrtag, pa->pa_intrpin, pa->pa_intrline, &ih)) { @@ -217,7 +217,7 @@ aeon_attach(parent, self, aux) return; } intrstr = pci_intr_string(pc, ih); - sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, aeon_intr, sc, + sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc, self->dv_xname); if (sc->sc_ih == NULL) { printf(": couldn't establish interrupt\n"); @@ -227,8 +227,8 @@ aeon_attach(parent, self, aux) return; } - aeon_devices[aeon_num_devices] = sc; - aeon_num_devices++; + hifn_devices[hifn_num_devices] = sc; + hifn_num_devices++; printf(", %s\n", intrstr); } @@ -238,15 +238,15 @@ aeon_attach(parent, self, aux) * from the reset (i.e. initial values are assigned elsewhere). */ void -aeon_reset_board(sc) - struct aeon_softc *sc; +hifn_reset_board(sc) + struct hifn_softc *sc; { /* * Set polling in the DMA configuration register to zero. 0x7 avoids * resetting the board and zeros out the other fields. */ - WRITE_REG_1(sc, AEON_1_DMA_CNFG, AEON_DMACNFG_MSTRESET | - AEON_DMACNFG_DMARESET | AEON_DMACNFG_MODE); + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | + HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); /* * Now that polling has been disabled, we have to wait 1 ms @@ -258,7 +258,7 @@ aeon_reset_board(sc) * field, the BRD reset field, and the manditory 1 at position 2. * Every other field is set to zero. */ - WRITE_REG_1(sc, AEON_1_DMA_CNFG, AEON_DMACNFG_MODE); + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE); /* * Wait another millisecond for the board to reset. @@ -268,12 +268,12 @@ aeon_reset_board(sc) /* * Turn off the reset! (No joke.) */ - WRITE_REG_1(sc, AEON_1_DMA_CNFG, AEON_DMACNFG_MSTRESET | - AEON_DMACNFG_DMARESET | AEON_DMACNFG_MODE); + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | + HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); } u_int32_t -aeon_next_signature(a, cnt) +hifn_next_signature(a, cnt) u_int a, cnt; { int i, v; @@ -314,12 +314,12 @@ struct pci2id { /* * Checks to see if crypto is already enabled. If crypto isn't enable, - * "aeon_enable_crypto" is called to enable it. The check is important, + * "hifn_enable_crypto" is called to enable it. The check is important, * as enabling crypto twice will lock the board. */ int -aeon_enable_crypto(sc, pciid) - struct aeon_softc *sc; +hifn_enable_crypto(sc, pciid) + struct hifn_softc *sc; pcireg_t pciid; { u_int32_t dmacfg, ramcfg, encl, addr, i; @@ -334,68 +334,68 @@ aeon_enable_crypto(sc, pciid) } if (offtbl == NULL) { -#ifdef AEON_DEBUG +#ifdef HIFN_DEBUG printf("%s: Unknown card!\n", sc->sc_dv.dv_xname); #endif return (1); } - ramcfg = READ_REG_0(sc, AEON_0_PUCNFG); - dmacfg = READ_REG_1(sc, AEON_1_DMA_CNFG); + ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG); + dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG); /* * The RAM config register's encrypt level bit needs to be set before * every read performed on the encryption level register. */ - WRITE_REG_0(sc, AEON_0_PUCNFG, ramcfg | AEON_PUCNFG_CHIPID); + WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); - encl = READ_REG_0(sc, AEON_0_PUSTAT); + encl = READ_REG_0(sc, HIFN_0_PUSTAT); /* * Make sure we don't re-unlock. Two unlocks kills chip until the * next reboot. */ if (encl == 0x1020 || encl == 0x1120) { -#ifdef AEON_DEBUG +#ifdef HIFN_DEBUG printf("%s: Strong Crypto already enabled!\n", sc->sc_dv.dv_xname); #endif - WRITE_REG_0(sc, AEON_0_PUCNFG, ramcfg); - WRITE_REG_1(sc, AEON_1_DMA_CNFG, dmacfg); + WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg); + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg); return 0; /* success */ } if (encl != 0 && encl != 0x3020) { -#ifdef AEON_DEBUG +#ifdef HIFN_DEBUG printf("%: Unknown encryption level\n", sc->sc_dv.dv_xname); #endif return 1; } - WRITE_REG_1(sc, AEON_1_DMA_CNFG, AEON_DMACNFG_UNLOCK | - AEON_DMACNFG_MSTRESET | AEON_DMACNFG_DMARESET | AEON_DMACNFG_MODE); - addr = READ_REG_1(sc, AEON_UNLOCK_SECRET1); - WRITE_REG_1(sc, AEON_UNLOCK_SECRET2, 0); + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK | + HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); + addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1); + WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0); for (i = 0; i <= 12; i++) { - addr = aeon_next_signature(addr, offtbl[i] + 0x101); - WRITE_REG_1(sc, AEON_UNLOCK_SECRET2, addr); + addr = hifn_next_signature(addr, offtbl[i] + 0x101); + WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr); DELAY(1000); } - WRITE_REG_0(sc, AEON_0_PUCNFG, ramcfg | AEON_PUCNFG_CHIPID); - encl = READ_REG_0(sc, AEON_0_PUSTAT); + WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); + encl = READ_REG_0(sc, HIFN_0_PUSTAT); -#ifdef AEON_DEBUG +#ifdef HIFN_DEBUG if (encl != 0x1020 && encl != 0x1120) printf("Encryption engine is permanently locked until next system reset."); else printf("Encryption engine enabled successfully!"); #endif - WRITE_REG_0(sc, AEON_0_PUCNFG, ramcfg); - WRITE_REG_1(sc, AEON_1_DMA_CNFG, dmacfg); + WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg); + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg); switch(encl) { case 0x3020: @@ -417,49 +417,49 @@ aeon_enable_crypto(sc, pciid) /* * Give initial values to the registers listed in the "Register Space" - * section of the AEON Software Development reference manual. + * section of the HIFN Software Development reference manual. */ void -aeon_init_pci_registers(sc) - struct aeon_softc *sc; +hifn_init_pci_registers(sc) + struct hifn_softc *sc; { /* write fixed values needed by the Initialization registers */ - WRITE_REG_0(sc, AEON_0_PUCTRL, AEON_PUCTRL_DMAENA); - WRITE_REG_0(sc, AEON_0_FIFOCNFG, AEON_FIFOCNFG_THRESHOLD); - WRITE_REG_0(sc, AEON_0_PUIER, AEON_PUIER_DSTOVER); + WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); + WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD); + WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER); /* write all 4 ring address registers */ - WRITE_REG_1(sc, AEON_1_DMA_CRAR, vtophys(sc->sc_dma->cmdr)); - WRITE_REG_1(sc, AEON_1_DMA_SRAR, vtophys(sc->sc_dma->srcr)); - WRITE_REG_1(sc, AEON_1_DMA_DRAR, vtophys(sc->sc_dma->dstr)); - WRITE_REG_1(sc, AEON_1_DMA_RRAR, vtophys(sc->sc_dma->resr)); + WRITE_REG_1(sc, HIFN_1_DMA_CRAR, vtophys(sc->sc_dma->cmdr)); + WRITE_REG_1(sc, HIFN_1_DMA_SRAR, vtophys(sc->sc_dma->srcr)); + WRITE_REG_1(sc, HIFN_1_DMA_DRAR, vtophys(sc->sc_dma->dstr)); + WRITE_REG_1(sc, HIFN_1_DMA_RRAR, vtophys(sc->sc_dma->resr)); /* write status register */ - WRITE_REG_1(sc, AEON_1_DMA_CSR, AEON_DMACSR_D_CTRL_ENA | - AEON_DMACSR_R_CTRL_ENA | AEON_DMACSR_S_CTRL_ENA | - AEON_DMACSR_C_CTRL_ENA); - WRITE_REG_1(sc, AEON_1_DMA_IER, AEON_DMAIER_R_DONE); + WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA | + HIFN_DMACSR_R_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | + HIFN_DMACSR_C_CTRL_ENA); + WRITE_REG_1(sc, HIFN_1_DMA_IER, HIFN_DMAIER_R_DONE); #if 0 #if BYTE_ORDER == BIG_ENDIAN (0x1 << 7) | #endif #endif - WRITE_REG_0(sc, AEON_0_PUCNFG, AEON_PUCNFG_COMPSING | - AEON_PUCNFG_DRFR_128 | AEON_PUCNFG_TCALLPHASES | - AEON_PUCNFG_TCDRVTOTEM | AEON_PUCNFG_BUS32 | - (sc->sc_drammodel ? AEON_PUCNFG_DRAM : AEON_PUCNFG_SRAM)); - - WRITE_REG_0(sc, AEON_0_PUISR, AEON_PUISR_DSTOVER); - WRITE_REG_1(sc, AEON_1_DMA_CNFG, AEON_DMACNFG_MSTRESET | - AEON_DMACNFG_DMARESET | AEON_DMACNFG_MODE | - AEON_DMACNFG_LAST | - ((AEON_POLL_FREQUENCY << 16 ) & AEON_DMACNFG_POLLFREQ) | - ((AEON_POLL_SCALAR << 8) & AEON_DMACNFG_POLLINVAL)); + WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | + HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES | + HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 | + (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM)); + + WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | + HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | + HIFN_DMACNFG_LAST | + ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) | + ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL)); } /* - * There are both DRAM and SRAM models of the aeon board. + * There are both DRAM and SRAM models of the hifn board. * A bit in the "ram configuration register" needs to be * set according to the model. The driver will guess one * way or the other -- and then call this routine to verify. @@ -467,18 +467,18 @@ aeon_init_pci_registers(sc) * 0: RAM setting okay, -1: Current RAM setting in error */ int -aeon_checkram(sc) - struct aeon_softc *sc; +hifn_checkram(sc) + struct hifn_softc *sc; { - aeon_base_command_t write_command = {(0x3 << 13), 0, 8, 0}; - aeon_base_command_t read_command = {(0x2 << 13), 0, 0, 8}; + hifn_base_command_t write_command = {(0x3 << 13), 0, 8, 0}; + hifn_base_command_t read_command = {(0x2 << 13), 0, 0, 8}; u_int8_t data[8] = {'1', '2', '3', '4', '5', '6', '7', '8'}; u_int8_t *source_buf, *dest_buf; - struct aeon_dma *dma = sc->sc_dma; - const u_int32_t masks = AEON_D_VALID | AEON_D_LAST | - AEON_D_MASKDONEIRQ; + struct hifn_dma *dma = sc->sc_dma; + const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | + HIFN_D_MASKDONEIRQ; -#if (AEON_D_RSIZE < 3) +#if (HIFN_D_RSIZE < 3) #error "descriptor ring size too small DRAM/SRAM check" #endif @@ -490,7 +490,7 @@ aeon_checkram(sc) dest_buf = sc->sc_dma->result_bufs[2]; /* build write command */ - *(aeon_base_command_t *) sc->sc_dma->command_bufs[0] = write_command; + *(hifn_base_command_t *) sc->sc_dma->command_bufs[0] = write_command; bcopy(data, source_buf, sizeof(data)); dma->srcr[0].p = vtophys(source_buf); @@ -499,25 +499,25 @@ aeon_checkram(sc) dma->cmdr[0].l = 16 | masks; dma->srcr[0].l = 8 | masks; dma->dstr[0].l = 8 | masks; - dma->resr[0].l = AEON_MAX_RESULT | masks; + dma->resr[0].l = HIFN_MAX_RESULT | masks; DELAY(1000); /* let write command execute */ - if (dma->resr[0].l & AEON_D_VALID) + if (dma->resr[0].l & HIFN_D_VALID) printf("%s: SRAM/DRAM detection error -- result[0] valid still set\n", sc->sc_dv.dv_xname); /* Build read command */ - *(aeon_base_command_t *) sc->sc_dma->command_bufs[1] = read_command; + *(hifn_base_command_t *) sc->sc_dma->command_bufs[1] = read_command; dma->srcr[1].p = vtophys(source_buf); dma->dstr[1].p = vtophys(dest_buf); dma->cmdr[1].l = 16 | masks; dma->srcr[1].l = 8 | masks; dma->dstr[1].l = 8 | masks; - dma->resr[1].l = AEON_MAX_RESULT | masks; + dma->resr[1].l = HIFN_MAX_RESULT | masks; DELAY(1000); /* let read command execute */ - if (dma->resr[1].l & AEON_D_VALID) + if (dma->resr[1].l & HIFN_D_VALID) printf("%s: SRAM/DRAM detection error -- result[1] valid still set\n", sc->sc_dv.dv_xname); return (memcmp(dest_buf, data, sizeof(data)) == 0) ? 0 : -1; @@ -527,22 +527,22 @@ aeon_checkram(sc) * Initialize the descriptor rings. */ void -aeon_init_dma(sc) - struct aeon_softc *sc; +hifn_init_dma(sc) + struct hifn_softc *sc; { - struct aeon_dma *dma = sc->sc_dma; + struct hifn_dma *dma = sc->sc_dma; int i; /* initialize static pointer values */ - for (i = 0; i < AEON_D_CMD_RSIZE; i++) + for (i = 0; i < HIFN_D_CMD_RSIZE; i++) dma->cmdr[i].p = vtophys(dma->command_bufs[i]); - for (i = 0; i < AEON_D_RES_RSIZE; i++) + for (i = 0; i < HIFN_D_RES_RSIZE; i++) dma->resr[i].p = vtophys(dma->result_bufs[i]); - dma->cmdr[AEON_D_CMD_RSIZE].p = vtophys(dma->cmdr); - dma->srcr[AEON_D_SRC_RSIZE].p = vtophys(dma->srcr); - dma->dstr[AEON_D_DST_RSIZE].p = vtophys(dma->dstr); - dma->resr[AEON_D_RES_RSIZE].p = vtophys(dma->resr); + dma->cmdr[HIFN_D_CMD_RSIZE].p = vtophys(dma->cmdr); + dma->srcr[HIFN_D_SRC_RSIZE].p = vtophys(dma->srcr); + dma->dstr[HIFN_D_DST_RSIZE].p = vtophys(dma->dstr); + dma->resr[HIFN_D_RES_RSIZE].p = vtophys(dma->resr); } /* @@ -550,64 +550,64 @@ aeon_init_dma(sc) * command buffer size. */ u_int -aeon_write_command(const struct aeon_command_buf_data *cmd_data, +hifn_write_command(const struct hifn_command_buf_data *cmd_data, u_int8_t *command_buf) { u_int8_t *command_buf_pos = command_buf; - const aeon_base_command_t *base_cmd = &cmd_data->base_cmd; - const aeon_mac_command_t *mac_cmd = &cmd_data->mac_cmd; - const aeon_crypt_command_t *crypt_cmd = &cmd_data->crypt_cmd; - int using_mac = base_cmd->masks & AEON_BASE_CMD_MAC; - int using_crypt = base_cmd->masks & AEON_BASE_CMD_CRYPT; + const hifn_base_command_t *base_cmd = &cmd_data->base_cmd; + const hifn_mac_command_t *mac_cmd = &cmd_data->mac_cmd; + const hifn_crypt_command_t *crypt_cmd = &cmd_data->crypt_cmd; + int using_mac = base_cmd->masks & HIFN_BASE_CMD_MAC; + int using_crypt = base_cmd->masks & HIFN_BASE_CMD_CRYPT; /* write base command structure */ - *((aeon_base_command_t *) command_buf_pos) = *base_cmd; - command_buf_pos += sizeof(aeon_base_command_t); + *((hifn_base_command_t *) command_buf_pos) = *base_cmd; + command_buf_pos += sizeof(hifn_base_command_t); /* Write MAC command structure */ if (using_mac) { - *((aeon_mac_command_t *) command_buf_pos) = *mac_cmd; - command_buf_pos += sizeof(aeon_mac_command_t); + *((hifn_mac_command_t *) command_buf_pos) = *mac_cmd; + command_buf_pos += sizeof(hifn_mac_command_t); } /* Write encryption command structure */ if (using_crypt) { - *((aeon_crypt_command_t *) command_buf_pos) = *crypt_cmd; - command_buf_pos += sizeof(aeon_crypt_command_t); + *((hifn_crypt_command_t *) command_buf_pos) = *crypt_cmd; + command_buf_pos += sizeof(hifn_crypt_command_t); } /* write MAC key */ - if (mac_cmd->masks & AEON_MAC_NEW_KEY) { - bcopy(cmd_data->mac, command_buf_pos, AEON_MAC_KEY_LENGTH); - command_buf_pos += AEON_MAC_KEY_LENGTH; + if (mac_cmd->masks & HIFN_MAC_NEW_KEY) { + bcopy(cmd_data->mac, command_buf_pos, HIFN_MAC_KEY_LENGTH); + command_buf_pos += HIFN_MAC_KEY_LENGTH; } /* Write crypto key */ - if (crypt_cmd->masks & AEON_CRYPT_CMD_NEW_KEY) { - u_int32_t alg = crypt_cmd->masks & AEON_CRYPT_CMD_ALG_MASK; - u_int32_t key_len = (alg == AEON_CRYPT_CMD_ALG_DES) ? - AEON_DES_KEY_LENGTH : AEON_3DES_KEY_LENGTH; + if (crypt_cmd->masks & HIFN_CRYPT_CMD_NEW_KEY) { + u_int32_t alg = crypt_cmd->masks & HIFN_CRYPT_CMD_ALG_MASK; + u_int32_t key_len = (alg == HIFN_CRYPT_CMD_ALG_DES) ? + HIFN_DES_KEY_LENGTH : HIFN_3DES_KEY_LENGTH; bcopy(cmd_data->ck, command_buf_pos, key_len); command_buf_pos += key_len; } /* Write crypto iv */ - if (crypt_cmd->masks & AEON_CRYPT_CMD_NEW_IV) { - bcopy(cmd_data->iv, command_buf_pos, AEON_IV_LENGTH); - command_buf_pos += AEON_IV_LENGTH; + if (crypt_cmd->masks & HIFN_CRYPT_CMD_NEW_IV) { + bcopy(cmd_data->iv, command_buf_pos, HIFN_IV_LENGTH); + command_buf_pos += HIFN_IV_LENGTH; } /* Write 8 zero bytes we're not sending crypt or MAC structures */ - if (!(base_cmd->masks & AEON_BASE_CMD_MAC) && - !(base_cmd->masks & AEON_BASE_CMD_CRYPT)) { + if (!(base_cmd->masks & HIFN_BASE_CMD_MAC) && + !(base_cmd->masks & HIFN_BASE_CMD_CRYPT)) { *((u_int32_t *) command_buf_pos) = 0; command_buf_pos += 4; *((u_int32_t *) command_buf_pos) = 0; command_buf_pos += 4; } - if ((command_buf_pos - command_buf) > AEON_MAX_COMMAND) - printf("aeon: Internal Error -- Command buffer overflow.\n"); + if ((command_buf_pos - command_buf) > HIFN_MAX_COMMAND) + printf("hifn: Internal Error -- Command buffer overflow.\n"); return command_buf_pos - command_buf; } @@ -617,33 +617,33 @@ aeon_write_command(const struct aeon_command_buf_data *cmd_data, * -1 if given bad command input was given. */ int -aeon_build_command(const struct aeon_command *cmd, - struct aeon_command_buf_data * cmd_buf_data) +hifn_build_command(const struct hifn_command *cmd, + struct hifn_command_buf_data * cmd_buf_data) { -#define AEON_COMMAND_CHECKING +#define HIFN_COMMAND_CHECKING u_int32_t flags = cmd->flags; - aeon_base_command_t *base_cmd = &cmd_buf_data->base_cmd; - aeon_mac_command_t *mac_cmd = &cmd_buf_data->mac_cmd; - aeon_crypt_command_t *crypt_cmd = &cmd_buf_data->crypt_cmd; + hifn_base_command_t *base_cmd = &cmd_buf_data->base_cmd; + hifn_mac_command_t *mac_cmd = &cmd_buf_data->mac_cmd; + hifn_crypt_command_t *crypt_cmd = &cmd_buf_data->crypt_cmd; u_int mac_length; -#ifdef AEON_COMMAND_CHECKING +#ifdef HIFN_COMMAND_CHECKING int dest_diff; #endif - bzero(cmd_buf_data, sizeof(struct aeon_command_buf_data)); + bzero(cmd_buf_data, sizeof(struct hifn_command_buf_data)); -#ifdef AEON_COMMAND_CHECKING - if (!(!!(flags & AEON_DECODE) ^ !!(flags & AEON_ENCODE))) { - printf("aeon: encode/decode setting error\n"); +#ifdef HIFN_COMMAND_CHECKING + if (!(!!(flags & HIFN_DECODE) ^ !!(flags & HIFN_ENCODE))) { + printf("hifn: encode/decode setting error\n"); return -1; } - if ((flags & AEON_CRYPT_DES) && (flags & AEON_CRYPT_3DES)) { - printf("aeon: Too many crypto algorithms set in command\n"); + if ((flags & HIFN_CRYPT_DES) && (flags & HIFN_CRYPT_3DES)) { + printf("hifn: Too many crypto algorithms set in command\n"); return -1; } - if ((flags & AEON_MAC_SHA1) && (flags & AEON_MAC_MD5)) { - printf("aeon: Too many MAC algorithms set in command\n"); + if ((flags & HIFN_MAC_SHA1) && (flags & HIFN_MAC_MD5)) { + printf("hifn: Too many MAC algorithms set in command\n"); return -1; } #endif @@ -653,11 +653,11 @@ aeon_build_command(const struct aeon_command *cmd, * Compute the mac value length -- leave at zero if not MAC'ing */ mac_length = 0; - if (AEON_USING_MAC(flags)) { - mac_length = (flags & AEON_MAC_TRUNC) ? AEON_MAC_TRUNC_LENGTH : - ((flags & AEON_MAC_MD5) ? AEON_MD5_LENGTH : AEON_SHA1_LENGTH); + if (HIFN_USING_MAC(flags)) { + mac_length = (flags & HIFN_MAC_TRUNC) ? HIFN_MAC_TRUNC_LENGTH : + ((flags & HIFN_MAC_MD5) ? HIFN_MD5_LENGTH : HIFN_SHA1_LENGTH); } -#ifdef AEON_COMMAND_CHECKING +#ifdef HIFN_COMMAND_CHECKING /* * Check for valid src/dest buf sizes */ @@ -668,12 +668,12 @@ aeon_build_command(const struct aeon_command *cmd, */ if (cmd->src_npa <= mac_length) { - printf("aeon: command source buffer has no data\n"); + printf("hifn: command source buffer has no data\n"); return -1; } - dest_diff = (flags & AEON_ENCODE) ? mac_length : -mac_length; + dest_diff = (flags & HIFN_ENCODE) ? mac_length : -mac_length; if (cmd->dst_npa < cmd->dst_npa + dest_diff) { - printf("aeon: command dest length %u too short -- needed %u\n", + printf("hifn: command dest length %u too short -- needed %u\n", cmd->dst_npa, cmd->dst_npa + dest_diff); return -1; } @@ -682,18 +682,18 @@ aeon_build_command(const struct aeon_command *cmd, /* * Set MAC bit */ - if (AEON_USING_MAC(flags)) - base_cmd->masks |= AEON_BASE_CMD_MAC; + if (HIFN_USING_MAC(flags)) + base_cmd->masks |= HIFN_BASE_CMD_MAC; /* Set Encrypt bit */ - if (AEON_USING_CRYPT(flags)) - base_cmd->masks |= AEON_BASE_CMD_CRYPT; + if (HIFN_USING_CRYPT(flags)) + base_cmd->masks |= HIFN_BASE_CMD_CRYPT; /* * Set Decode bit */ - if (flags & AEON_DECODE) - base_cmd->masks |= AEON_BASE_CMD_DECODE; + if (flags & HIFN_DECODE) + base_cmd->masks |= HIFN_BASE_CMD_DECODE; /* * Set total source and dest counts. These values are the same as the @@ -711,15 +711,15 @@ aeon_build_command(const struct aeon_command *cmd, ** Building up mac command ** **/ - if (AEON_USING_MAC(flags)) { + if (HIFN_USING_MAC(flags)) { /* * Set the MAC algorithm and trunc setting */ - mac_cmd->masks |= (flags & AEON_MAC_MD5) ? - AEON_MAC_CMD_ALG_MD5 : AEON_MAC_CMD_ALG_SHA1; - if (flags & AEON_MAC_TRUNC) - mac_cmd->masks |= AEON_MAC_CMD_TRUNC; + mac_cmd->masks |= (flags & HIFN_MAC_MD5) ? + HIFN_MAC_CMD_ALG_MD5 : HIFN_MAC_CMD_ALG_SHA1; + if (flags & HIFN_MAC_TRUNC) + mac_cmd->masks |= HIFN_MAC_CMD_TRUNC; /* * We always use HMAC mode, assume MAC values are appended to the @@ -727,14 +727,14 @@ aeon_build_command(const struct aeon_command *cmd, * on encodes, and order auth/encryption engines as needed by * IPSEC */ - mac_cmd->masks |= AEON_MAC_CMD_MODE_HMAC | AEON_MAC_CMD_APPEND | - AEON_MAC_CMD_POS_IPSEC; + mac_cmd->masks |= HIFN_MAC_CMD_MODE_HMAC | HIFN_MAC_CMD_APPEND | + HIFN_MAC_CMD_POS_IPSEC; /* * Setup to send new MAC key if needed. */ - if (flags & AEON_MAC_NEW_KEY) { - mac_cmd->masks |= AEON_MAC_CMD_NEW_KEY; + if (flags & HIFN_MAC_NEW_KEY) { + mac_cmd->masks |= HIFN_MAC_CMD_NEW_KEY; cmd_buf_data->mac = cmd->mac; } /* @@ -742,26 +742,26 @@ aeon_build_command(const struct aeon_command *cmd, */ mac_cmd->header_skip = cmd->mac_header_skip; mac_cmd->source_count = cmd->src_npa - cmd->mac_header_skip; - if (flags & AEON_DECODE) + if (flags & HIFN_DECODE) mac_cmd->source_count -= mac_length; } - if (AEON_USING_CRYPT(flags)) { + if (HIFN_USING_CRYPT(flags)) { /* * Set the encryption algorithm bits. */ - crypt_cmd->masks |= (flags & AEON_CRYPT_DES) ? - AEON_CRYPT_CMD_ALG_DES : AEON_CRYPT_CMD_ALG_3DES; + crypt_cmd->masks |= (flags & HIFN_CRYPT_DES) ? + HIFN_CRYPT_CMD_ALG_DES : HIFN_CRYPT_CMD_ALG_3DES; /* We always use CBC mode and send a new IV (as needed by * IPSec). */ - crypt_cmd->masks |= AEON_CRYPT_CMD_MODE_CBC | AEON_CRYPT_CMD_NEW_IV; + crypt_cmd->masks |= HIFN_CRYPT_CMD_MODE_CBC | HIFN_CRYPT_CMD_NEW_IV; /* * Setup to send new encrypt key if needed. */ - if (flags & AEON_CRYPT_CMD_NEW_KEY) { - crypt_cmd->masks |= AEON_CRYPT_CMD_NEW_KEY; + if (flags & HIFN_CRYPT_CMD_NEW_KEY) { + crypt_cmd->masks |= HIFN_CRYPT_CMD_NEW_KEY; cmd_buf_data->ck = cmd->ck; } /* @@ -769,13 +769,13 @@ aeon_build_command(const struct aeon_command *cmd, */ crypt_cmd->header_skip = cmd->crypt_header_skip; crypt_cmd->source_count = cmd->src_npa - cmd->crypt_header_skip; - if (flags & AEON_DECODE) + if (flags & HIFN_DECODE) crypt_cmd->source_count -= mac_length; -#ifdef AEON_COMMAND_CHECKING +#ifdef HIFN_COMMAND_CHECKING if (crypt_cmd->source_count % 8 != 0) { - printf("aeon: Error -- encryption source %u not a multiple of 8!\n", + printf("hifn: Error -- encryption source %u not a multiple of 8!\n", crypt_cmd->source_count); return -1; } @@ -785,7 +785,7 @@ aeon_build_command(const struct aeon_command *cmd, #if 1 - printf("aeon: command parameters" + printf("hifn: command parameters" " -- session num %u" " -- base t.s.c: %u" " -- base t.d.c: %u" @@ -801,7 +801,7 @@ aeon_build_command(const struct aeon_command *cmd, } int -aeon_mbuf(m, np, pp, lp, maxp, nicep) +hifn_mbuf(m, np, pp, lp, maxp, nicep) struct mbuf *m; int *np; long *pp; @@ -864,25 +864,25 @@ aeon_mbuf(m, np, pp, lp, maxp, nicep) } int -aeon_crypto(struct aeon_command *cmd) +hifn_crypto(struct hifn_command *cmd) { u_int32_t cmdlen; static u_int32_t current_device = 0; - struct aeon_softc *sc; - struct aeon_dma *dma; - struct aeon_command_buf_data cmd_buf_data; + struct hifn_softc *sc; + struct hifn_dma *dma; + struct hifn_command_buf_data cmd_buf_data; int cmdi, srci, dsti, resi, nicealign = 0; int error, s, i; - /* Pick the aeon board to send the data to. Right now we use a round + /* Pick the hifn board to send the data to. Right now we use a round * robin approach. */ - sc = aeon_devices[current_device++]; - if (current_device == aeon_num_devices) + sc = hifn_devices[current_device++]; + if (current_device == hifn_num_devices) current_device = 0; dma = sc->sc_dma; if (cmd->src_npa == 0 && cmd->src_m) - cmd->src_l = aeon_mbuf(cmd->src_m, &cmd->src_npa, + cmd->src_l = hifn_mbuf(cmd->src_m, &cmd->src_npa, cmd->src_packp, cmd->src_packl, MAX_SCATTER, &nicealign); if (cmd->src_l == 0) return (-1); @@ -902,17 +902,17 @@ aeon_crypto(struct aeon_command *cmd) } else cmd->dst_m = cmd->src_m; - cmd->dst_l = aeon_mbuf(cmd->dst_m, &cmd->dst_npa, + cmd->dst_l = hifn_mbuf(cmd->dst_m, &cmd->dst_npa, cmd->dst_packp, cmd->dst_packl, MAX_SCATTER, NULL); if (cmd->dst_l == 0) return (-1); - if (aeon_build_command(cmd, &cmd_buf_data) != 0) - return AEON_CRYPTO_BAD_INPUT; + if (hifn_build_command(cmd, &cmd_buf_data) != 0) + return HIFN_CRYPTO_BAD_INPUT; printf("%s: Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n", sc->sc_dv.dv_xname, - READ_REG_1(sc, AEON_1_DMA_CSR), READ_REG_1(sc, AEON_1_DMA_IER), + READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER), dma->cmdu, dma->srcu, dma->dstu, dma->resu, cmd->src_npa, cmd->dst_npa); @@ -922,68 +922,68 @@ aeon_crypto(struct aeon_command *cmd) * need 1 cmd, and 1 res * need N src, and N dst */ - while (dma->cmdu+1 > AEON_D_CMD_RSIZE || - dma->srcu+cmd->src_npa > AEON_D_SRC_RSIZE || - dma->dstu+cmd->dst_npa > AEON_D_DST_RSIZE || - dma->resu+1 > AEON_D_RES_RSIZE) { - if (cmd->flags & AEON_DMA_FULL_NOBLOCK) { + while (dma->cmdu+1 > HIFN_D_CMD_RSIZE || + dma->srcu+cmd->src_npa > HIFN_D_SRC_RSIZE || + dma->dstu+cmd->dst_npa > HIFN_D_DST_RSIZE || + dma->resu+1 > HIFN_D_RES_RSIZE) { + if (cmd->flags & HIFN_DMA_FULL_NOBLOCK) { splx(s); - return (AEON_CRYPTO_RINGS_FULL); + return (HIFN_CRYPTO_RINGS_FULL); } - tsleep((caddr_t) dma, PZERO, "aeonring", 1); + tsleep((caddr_t) dma, PZERO, "hifnring", 1); } - if (dma->cmdi == AEON_D_CMD_RSIZE) { + if (dma->cmdi == HIFN_D_CMD_RSIZE) { cmdi = 0, dma->cmdi = 1; - dma->cmdr[AEON_D_CMD_RSIZE].l = AEON_D_VALID | AEON_D_LAST | - AEON_D_MASKDONEIRQ | AEON_D_JUMP; + dma->cmdr[HIFN_D_CMD_RSIZE].l = HIFN_D_VALID | HIFN_D_LAST | + HIFN_D_MASKDONEIRQ | HIFN_D_JUMP; } else cmdi = dma->cmdi++; - if (dma->resi == AEON_D_RES_RSIZE) { + if (dma->resi == HIFN_D_RES_RSIZE) { resi = 0, dma->resi = 1; - dma->resr[AEON_D_RES_RSIZE].l = AEON_D_VALID | AEON_D_LAST | - AEON_D_MASKDONEIRQ | AEON_D_JUMP; + dma->resr[HIFN_D_RES_RSIZE].l = HIFN_D_VALID | HIFN_D_LAST | + HIFN_D_MASKDONEIRQ | HIFN_D_JUMP; } else resi = dma->resi++; - cmdlen = aeon_write_command(&cmd_buf_data, dma->command_bufs[cmdi]); - dma->aeon_commands[cmdi] = cmd; + cmdlen = hifn_write_command(&cmd_buf_data, dma->command_bufs[cmdi]); + dma->hifn_commands[cmdi] = cmd; /* .p for command/result already set */ - dma->cmdr[cmdi].l = cmdlen | AEON_D_VALID | AEON_D_LAST | - AEON_D_MASKDONEIRQ; + dma->cmdr[cmdi].l = cmdlen | HIFN_D_VALID | HIFN_D_LAST | + HIFN_D_MASKDONEIRQ; dma->cmdu += 1; for (i = 0; i < cmd->src_npa; i++) { int last = 0; if (i == cmd->src_npa-1) - last = AEON_D_LAST; + last = HIFN_D_LAST; - if (dma->srci == AEON_D_SRC_RSIZE) { + if (dma->srci == HIFN_D_SRC_RSIZE) { srci = 0, dma->srci = 1; - dma->srcr[AEON_D_SRC_RSIZE].l = AEON_D_VALID | - AEON_D_MASKDONEIRQ | AEON_D_JUMP; + dma->srcr[HIFN_D_SRC_RSIZE].l = HIFN_D_VALID | + HIFN_D_MASKDONEIRQ | HIFN_D_JUMP; } else srci = dma->srci++; dma->srcr[srci].p = vtophys(cmd->src_packp[i]); - dma->srcr[srci].l = cmd->src_packl[i] | AEON_D_VALID | - AEON_D_MASKDONEIRQ | last; + dma->srcr[srci].l = cmd->src_packl[i] | HIFN_D_VALID | + HIFN_D_MASKDONEIRQ | last; } dma->srcu += cmd->src_npa; for (i = 0; i < cmd->dst_npa; i++) { int last = 0; - if (dma->dsti == AEON_D_DST_RSIZE) { + if (dma->dsti == HIFN_D_DST_RSIZE) { dsti = 0, dma->dsti = 1; - dma->dstr[AEON_D_DST_RSIZE].l = AEON_D_VALID | - AEON_D_MASKDONEIRQ | AEON_D_JUMP; + dma->dstr[HIFN_D_DST_RSIZE].l = HIFN_D_VALID | + HIFN_D_MASKDONEIRQ | HIFN_D_JUMP; } else dsti = dma->dsti++; dma->dstr[dsti].p = vtophys(cmd->dst_packp[i]); - dma->dstr[dsti].l = cmd->dst_packl[i] | AEON_D_VALID | - AEON_D_MASKDONEIRQ | last; + dma->dstr[dsti].l = cmd->dst_packl[i] | HIFN_D_VALID | + HIFN_D_MASKDONEIRQ | last; } dma->dstu += cmd->dst_npa; @@ -991,7 +991,7 @@ aeon_crypto(struct aeon_command *cmd) * Unlike other descriptors, we don't mask done interrupt from * result descriptor. */ - dma->resr[resi].l = AEON_MAX_RESULT | AEON_D_VALID | AEON_D_LAST; + dma->resr[resi].l = HIFN_MAX_RESULT | HIFN_D_VALID | HIFN_D_LAST; dma->resu += 1; /* @@ -1000,8 +1000,8 @@ aeon_crypto(struct aeon_command *cmd) * than one command in the queue. */ if (dma->slots_in_use > 1) { - WRITE_REG_1(sc, AEON_1_DMA_IER, - AEON_DMAIER_R_DONE | AEON_DMAIER_C_WAIT); + WRITE_REG_1(sc, HIFN_1_DMA_IER, + HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_WAIT); } /* @@ -1021,55 +1021,55 @@ aeon_crypto(struct aeon_command *cmd) printf("%s: command: stat %8x ier %8x\n", sc->sc_dv.dv_xname, - READ_REG_1(sc, AEON_1_DMA_CSR), READ_REG_1(sc, AEON_1_DMA_IER)); + READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER)); splx(s); return 0; /* success */ } int -aeon_intr(arg) +hifn_intr(arg) void *arg; { - struct aeon_softc *sc = arg; - struct aeon_dma *dma = sc->sc_dma; + struct hifn_softc *sc = arg; + struct hifn_dma *dma = sc->sc_dma; u_int32_t dmacsr; - dmacsr = READ_REG_1(sc, AEON_1_DMA_CSR); + dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR); printf("%s: irq: stat %8x ien %8x u %d/%d/%d/%d\n", sc->sc_dv.dv_xname, - dmacsr, READ_REG_1(sc, AEON_1_DMA_IER), + dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), dma->cmdu, dma->srcu, dma->dstu, dma->resu); - if ((dmacsr & (AEON_DMACSR_C_WAIT|AEON_DMACSR_R_DONE)) == 0) + if ((dmacsr & (HIFN_DMACSR_C_WAIT|HIFN_DMACSR_R_DONE)) == 0) return (0); - if ((dma->slots_in_use == 0) && (dmacsr & AEON_DMACSR_C_WAIT)) { + if ((dma->slots_in_use == 0) && (dmacsr & HIFN_DMACSR_C_WAIT)) { /* * If no slots to process and we received a "waiting on * result" interrupt, we disable the "waiting on result" * (by clearing it). */ - WRITE_REG_1(sc, AEON_1_DMA_IER, AEON_DMAIER_R_DONE); + WRITE_REG_1(sc, HIFN_1_DMA_IER, HIFN_DMAIER_R_DONE); } else { - if (dma->slots_in_use > AEON_D_RSIZE) + if (dma->slots_in_use > HIFN_D_RSIZE) printf("%s: Internal Error -- ring overflow\n", sc->sc_dv.dv_xname); while (dma->slots_in_use > 0) { u_int32_t wake_pos = dma->wakeup_rpos; - struct aeon_command *cmd = dma->aeon_commands[wake_pos]; + struct hifn_command *cmd = dma->hifn_commands[wake_pos]; /* if still valid, stop processing */ - if (dma->resr[wake_pos].l & AEON_D_VALID) + if (dma->resr[wake_pos].l & HIFN_D_VALID) break; - if (AEON_USING_MAC(cmd->flags) && (cmd->flags & AEON_DECODE)) { + if (HIFN_USING_MAC(cmd->flags) && (cmd->flags & HIFN_DECODE)) { u_int8_t *result_buf = dma->result_bufs[wake_pos]; cmd->result_status = (result_buf[8] & 0x2) ? - AEON_MAC_BAD : 0; + HIFN_MAC_BAD : 0; printf("%s: byte index 8 of result 0x%02x\n", sc->sc_dv.dv_xname, (u_int32_t) result_buf[8]); } @@ -1080,7 +1080,7 @@ aeon_intr(arg) else cmd->dest_ready_callback(cmd); - if (++dma->wakeup_rpos == AEON_D_RSIZE) + if (++dma->wakeup_rpos == HIFN_D_RSIZE) dma->wakeup_rpos = 0; dma->slots_in_use--; } @@ -1091,6 +1091,6 @@ aeon_intr(arg) * register. If we still have slots to process and we received a * waiting interrupt, this will interupt us again. */ - WRITE_REG_1(sc, AEON_1_DMA_CSR, AEON_DMACSR_R_DONE|AEON_DMACSR_C_WAIT); + WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_DONE|HIFN_DMACSR_C_WAIT); return (1); } diff --git a/sys/dev/pci/hifn7751reg.h b/sys/dev/pci/hifn7751reg.h index cee7b06ae87..49f4402fda8 100644 --- a/sys/dev/pci/hifn7751reg.h +++ b/sys/dev/pci/hifn7751reg.h @@ -1,7 +1,7 @@ -/* $OpenBSD: hifn7751reg.h,v 1.6 2000/03/15 14:55:52 jason Exp $ */ +/* $OpenBSD: hifn7751reg.h,v 1.7 2000/03/16 20:33:48 deraadt Exp $ */ /* - * Invertex AEON driver + * Invertex AEON / Hi/fn 7751 driver * Copyright (c) 1999 Invertex Inc. All rights reserved. * * Please send any comments, feedback, bug-fixes, or feature requests to @@ -31,8 +31,8 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef __AEON_H__ -#define __AEON_H__ +#ifndef __HIFN_H__ +#define __HIFN_H__ #include @@ -40,69 +40,69 @@ * Some PCI configuration space offset defines. The names were made * identical to the names used by the Linux kernel. */ -#define AEON_BAR0 (PCI_MAPREG_START + 0) /* PUC register map */ -#define AEON_BAR1 (PCI_MAPREG_START + 4) /* DMA register map */ +#define HIFN_BAR0 (PCI_MAPREG_START + 0) /* PUC register map */ +#define HIFN_BAR1 (PCI_MAPREG_START + 4) /* DMA register map */ /* * Some configurable values for the driver */ -#define AEON_D_RSIZE 24 -#define AEON_MAX_DEVICES 4 +#define HIFN_D_RSIZE 24 +#define HIFN_MAX_DEVICES 4 -#define AEON_D_CMD_RSIZE 24 -#define AEON_D_SRC_RSIZE 80 -#define AEON_D_DST_RSIZE 80 -#define AEON_D_RES_RSIZE 24 +#define HIFN_D_CMD_RSIZE 24 +#define HIFN_D_SRC_RSIZE 80 +#define HIFN_D_DST_RSIZE 80 +#define HIFN_D_RES_RSIZE 24 /* * The values below should multiple of 4 -- and be large enough to handle * any command the driver implements. */ -#define AEON_MAX_COMMAND 120 -#define AEON_MAX_RESULT 16 +#define HIFN_MAX_COMMAND 120 +#define HIFN_MAX_RESULT 16 /* - * aeon_desc_t + * hifn_desc_t * * Holds an individual descriptor for any of the rings. */ -typedef struct aeon_desc { +typedef struct hifn_desc { volatile u_int32_t l; /* length and status bits */ volatile u_int32_t p; -} aeon_desc_t; +} hifn_desc_t; /* - * Masks for the "length" field of struct aeon_desc. + * Masks for the "length" field of struct hifn_desc. */ -#define AEON_D_MASKDONEIRQ (0x1 << 25) -#define AEON_D_LAST (0x1 << 29) -#define AEON_D_JUMP (0x1 << 30) -#define AEON_D_VALID (0x1 << 31) +#define HIFN_D_MASKDONEIRQ (0x1 << 25) +#define HIFN_D_LAST (0x1 << 29) +#define HIFN_D_JUMP (0x1 << 30) +#define HIFN_D_VALID (0x1 << 31) /* - * aeon_callback_t + * hifn_callback_t * * Type for callback function when dest data is ready. */ -typedef void (*aeon_callback_t)(aeon_command_t *); +typedef void (*hifn_callback_t)(hifn_command_t *); /* * Data structure to hold all 4 rings and any other ring related data. */ -struct aeon_dma { +struct hifn_dma { /* * Descriptor rings. We add +1 to the size to accomidate the * jump descriptor. */ - struct aeon_desc cmdr[AEON_D_RSIZE+1]; - struct aeon_desc srcr[AEON_D_RSIZE+1]; - struct aeon_desc dstr[AEON_D_RSIZE+1]; - struct aeon_desc resr[AEON_D_RSIZE+1]; + struct hifn_desc cmdr[HIFN_D_RSIZE+1]; + struct hifn_desc srcr[HIFN_D_RSIZE+1]; + struct hifn_desc dstr[HIFN_D_RSIZE+1]; + struct hifn_desc resr[HIFN_D_RSIZE+1]; - struct aeon_command *aeon_commands[AEON_D_RSIZE]; + struct hifn_command *hifn_commands[HIFN_D_RSIZE]; - u_char command_bufs[AEON_D_RSIZE][AEON_MAX_COMMAND]; - u_char result_bufs[AEON_D_RSIZE][AEON_MAX_RESULT]; + u_char command_bufs[HIFN_D_RSIZE][HIFN_MAX_COMMAND]; + u_char result_bufs[HIFN_D_RSIZE][HIFN_MAX_RESULT]; /* * Our current positions for insertion and removal from the desriptor @@ -116,9 +116,9 @@ struct aeon_dma { }; /* - * Holds data specific to a single AEON board. + * Holds data specific to a single HIFN board. */ -struct aeon_softc { +struct hifn_softc { struct device sc_dv; /* generic device */ void * sc_ih; /* interrupt handler cookie */ u_int32_t sc_drammodel; /* 1=dram, 0=sram */ @@ -127,185 +127,185 @@ struct aeon_softc { bus_space_tag_t sc_st0, sc_st1; bus_dma_tag_t sc_dmat; - struct aeon_dma *sc_dma; + struct hifn_dma *sc_dma; }; /* * Processing Unit Registers (offset from BASEREG0) */ -#define AEON_0_PUDATA 0x00 /* Processing Unit Data */ -#define AEON_0_PUCTRL 0x04 /* Processing Unit Control */ -#define AEON_0_PUISR 0x08 /* Processing Unit Interrupt Status */ -#define AEON_0_PUCNFG 0x0c /* Processing Unit Configuration */ -#define AEON_0_PUIER 0x10 /* Processing Unit Interrupt Enable */ -#define AEON_0_PUSTAT 0x14 /* Processing Unit Status/Chip ID */ -#define AEON_0_FIFOSTAT 0x18 /* FIFO Status */ -#define AEON_0_FIFOCNFG 0x1c /* FIFO Configuration */ -#define AEON_0_SPACESIZE 0x20 /* Register space size */ - -/* Processing Unit Control Register (AEON_0_PUCTRL) */ -#define AEON_PUCTRL_CLRSRCFIFO 0x0010 /* clear source fifo */ -#define AEON_PUCTRL_STOP 0x0008 /* stop pu */ -#define AEON_PUCTRL_LOCKRAM 0x0004 /* lock ram */ -#define AEON_PUCTRL_DMAENA 0x0002 /* enable dma */ -#define AEON_PUCTRL_RESET 0x0001 /* Reset processing unit */ - -/* Processing Unit Interrupt Status Register (AEON_0_PUISR) */ -#define AEON_PUISR_CMDINVAL 0x8000 /* Invalid command interrupt */ -#define AEON_PUISR_DATAERR 0x4000 /* Data error interrupt */ -#define AEON_PUISR_SRCFIFO 0x2000 /* Source FIFO ready interrupt */ -#define AEON_PUISR_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */ -#define AEON_PUISR_DSTOVER 0x0200 /* Destination overrun interrupt */ -#define AEON_PUISR_SRCCMD 0x0080 /* Source command interrupt */ -#define AEON_PUISR_SRCCTX 0x0040 /* Source context interrupt */ -#define AEON_PUISR_SRCDATA 0x0020 /* Source data interrupt */ -#define AEON_PUISR_DSTDATA 0x0010 /* Destination data interrupt */ -#define AEON_PUISR_DSTRESULT 0x0004 /* Destination result interrupt */ - -/* Processing Unit Configuration Register (AEON_0_PUCNFG) */ -#define AEON_PUCNFG_DRAMMASK 0xe000 /* DRAM size mask */ -#define AEON_PUCNFG_DSZ_256K 0x0000 /* 256k dram */ -#define AEON_PUCNFG_DSZ_512K 0x2000 /* 512k dram */ -#define AEON_PUCNFG_DSZ_1M 0x4000 /* 1m dram */ -#define AEON_PUCNFG_DSZ_2M 0x6000 /* 2m dram */ -#define AEON_PUCNFG_DSZ_4M 0x8000 /* 4m dram */ -#define AEON_PUCNFG_DSZ_8M 0xa000 /* 8m dram */ -#define AEON_PUNCFG_DSZ_16M 0xc000 /* 16m dram */ -#define AEON_PUCNFG_DSZ_32M 0xe000 /* 32m dram */ -#define AEON_PUCNFG_DRAMREFRESH 0x1800 /* DRAM refresh rate mask */ -#define AEON_PUCNFG_DRFR_512 0x0000 /* 512 divisor of ECLK */ -#define AEON_PUCNFG_DRFR_256 0x0800 /* 256 divisor of ECLK */ -#define AEON_PUCNFG_DRFR_128 0x1000 /* 128 divisor of ECLK */ -#define AEON_PUCNFG_TCALLPHASES 0x0200 /* your guess is as good as mine... */ -#define AEON_PUCNFG_TCDRVTOTEM 0x0100 /* your guess is as good as mine... */ -#define AEON_PUCNFG_BIGENDIAN 0x0080 /* DMA big endian mode */ -#define AEON_PUCNFG_BUS32 0x0040 /* Bus width 32bits */ -#define AEON_PUCNFG_BUS16 0x0000 /* Bus width 16 bits */ -#define AEON_PUCNFG_CHIPID 0x0020 /* Allow chipid from PUSTAT */ -#define AEON_PUCNFG_DRAM 0x0010 /* Context RAM is DRAM */ -#define AEON_PUCNFG_SRAM 0x0000 /* Context RAM is SRAM */ -#define AEON_PUCNFG_COMPSING 0x0004 /* Enable single compression context */ -#define AEON_PUCNFG_ENCCNFG 0x0002 /* Encryption configuration */ - -/* Processing Unit Interrupt Enable Register (AEON_0_PUIER) */ -#define AEON_PUIER_CMDINVAL 0x8000 /* Invalid command interrupt */ -#define AEON_PUIER_DATAERR 0x4000 /* Data error interrupt */ -#define AEON_PUIER_SRCFIFO 0x2000 /* Source FIFO ready interrupt */ -#define AEON_PUIER_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */ -#define AEON_PUIER_DSTOVER 0x0200 /* Destination overrun interrupt */ -#define AEON_PUIER_SRCCMD 0x0080 /* Source command interrupt */ -#define AEON_PUIER_SRCCTX 0x0040 /* Source context interrupt */ -#define AEON_PUIER_SRCDATA 0x0020 /* Source data interrupt */ -#define AEON_PUIER_DSTDATA 0x0010 /* Destination data interrupt */ -#define AEON_PUIER_DSTRESULT 0x0004 /* Destination result interrupt */ - -/* Processing Unit Status Register/Chip ID (AEON_0_PUSTAT) */ -#define AEON_PUSTAT_CMDINVAL 0x8000 /* Invalid command interrupt */ -#define AEON_PUSTAT_DATAERR 0x4000 /* Data error interrupt */ -#define AEON_PUSTAT_SRCFIFO 0x2000 /* Source FIFO ready interrupt */ -#define AEON_PUSTAT_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */ -#define AEON_PUSTAT_DSTOVER 0x0200 /* Destination overrun interrupt */ -#define AEON_PUSTAT_SRCCMD 0x0080 /* Source command interrupt */ -#define AEON_PUSTAT_SRCCTX 0x0040 /* Source context interrupt */ -#define AEON_PUSTAT_SRCDATA 0x0020 /* Source data interrupt */ -#define AEON_PUSTAT_DSTDATA 0x0010 /* Destination data interrupt */ -#define AEON_PUSTAT_DSTRESULT 0x0004 /* Destination result interrupt */ -#define AEON_PUSTAT_CHIPREV 0x00ff /* Chip revision mask */ -#define AEON_PUSTAT_CHIPENA 0xff00 /* Chip enabled mask */ -#define AEON_PUSTAT_ENA_2 0x1100 /* Level 2 enabled */ -#define AEON_PUSTAT_ENA_1 0x1000 /* Level 1 enabled */ -#define AEON_PUSTAT_ENA_0 0x3000 /* Level 0 enabled */ -#define AEON_PUSTAT_REV_2 0x0020 /* 7751 PT6/2 */ -#define AEON_PUSTAT_REV_3 0x0030 /* 7751 PT6/3 */ - -/* FIFO Status Register (AEON_0_FIFOSTAT) */ -#define AEON_FIFOSTAT_SRC 0x7f00 /* Source FIFO available */ -#define AEON_FIFOSTAT_DST 0x007f /* Destination FIFO available */ - -/* FIFO Configuration Register (AEON_0_FIFOCNFG) */ -#define AEON_FIFOCNFG_THRESHOLD 0x0400 /* must be written as 1 */ +#define HIFN_0_PUDATA 0x00 /* Processing Unit Data */ +#define HIFN_0_PUCTRL 0x04 /* Processing Unit Control */ +#define HIFN_0_PUISR 0x08 /* Processing Unit Interrupt Status */ +#define HIFN_0_PUCNFG 0x0c /* Processing Unit Configuration */ +#define HIFN_0_PUIER 0x10 /* Processing Unit Interrupt Enable */ +#define HIFN_0_PUSTAT 0x14 /* Processing Unit Status/Chip ID */ +#define HIFN_0_FIFOSTAT 0x18 /* FIFO Status */ +#define HIFN_0_FIFOCNFG 0x1c /* FIFO Configuration */ +#define HIFN_0_SPACESIZE 0x20 /* Register space size */ + +/* Processing Unit Control Register (HIFN_0_PUCTRL) */ +#define HIFN_PUCTRL_CLRSRCFIFO 0x0010 /* clear source fifo */ +#define HIFN_PUCTRL_STOP 0x0008 /* stop pu */ +#define HIFN_PUCTRL_LOCKRAM 0x0004 /* lock ram */ +#define HIFN_PUCTRL_DMAENA 0x0002 /* enable dma */ +#define HIFN_PUCTRL_RESET 0x0001 /* Reset processing unit */ + +/* Processing Unit Interrupt Status Register (HIFN_0_PUISR) */ +#define HIFN_PUISR_CMDINVAL 0x8000 /* Invalid command interrupt */ +#define HIFN_PUISR_DATAERR 0x4000 /* Data error interrupt */ +#define HIFN_PUISR_SRCFIFO 0x2000 /* Source FIFO ready interrupt */ +#define HIFN_PUISR_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */ +#define HIFN_PUISR_DSTOVER 0x0200 /* Destination overrun interrupt */ +#define HIFN_PUISR_SRCCMD 0x0080 /* Source command interrupt */ +#define HIFN_PUISR_SRCCTX 0x0040 /* Source context interrupt */ +#define HIFN_PUISR_SRCDATA 0x0020 /* Source data interrupt */ +#define HIFN_PUISR_DSTDATA 0x0010 /* Destination data interrupt */ +#define HIFN_PUISR_DSTRESULT 0x0004 /* Destination result interrupt */ + +/* Processing Unit Configuration Register (HIFN_0_PUCNFG) */ +#define HIFN_PUCNFG_DRAMMASK 0xe000 /* DRAM size mask */ +#define HIFN_PUCNFG_DSZ_256K 0x0000 /* 256k dram */ +#define HIFN_PUCNFG_DSZ_512K 0x2000 /* 512k dram */ +#define HIFN_PUCNFG_DSZ_1M 0x4000 /* 1m dram */ +#define HIFN_PUCNFG_DSZ_2M 0x6000 /* 2m dram */ +#define HIFN_PUCNFG_DSZ_4M 0x8000 /* 4m dram */ +#define HIFN_PUCNFG_DSZ_8M 0xa000 /* 8m dram */ +#define HIFN_PUNCFG_DSZ_16M 0xc000 /* 16m dram */ +#define HIFN_PUCNFG_DSZ_32M 0xe000 /* 32m dram */ +#define HIFN_PUCNFG_DRAMREFRESH 0x1800 /* DRAM refresh rate mask */ +#define HIFN_PUCNFG_DRFR_512 0x0000 /* 512 divisor of ECLK */ +#define HIFN_PUCNFG_DRFR_256 0x0800 /* 256 divisor of ECLK */ +#define HIFN_PUCNFG_DRFR_128 0x1000 /* 128 divisor of ECLK */ +#define HIFN_PUCNFG_TCALLPHASES 0x0200 /* your guess is as good as mine... */ +#define HIFN_PUCNFG_TCDRVTOTEM 0x0100 /* your guess is as good as mine... */ +#define HIFN_PUCNFG_BIGENDIAN 0x0080 /* DMA big endian mode */ +#define HIFN_PUCNFG_BUS32 0x0040 /* Bus width 32bits */ +#define HIFN_PUCNFG_BUS16 0x0000 /* Bus width 16 bits */ +#define HIFN_PUCNFG_CHIPID 0x0020 /* Allow chipid from PUSTAT */ +#define HIFN_PUCNFG_DRAM 0x0010 /* Context RAM is DRAM */ +#define HIFN_PUCNFG_SRAM 0x0000 /* Context RAM is SRAM */ +#define HIFN_PUCNFG_COMPSING 0x0004 /* Enable single compression context */ +#define HIFN_PUCNFG_ENCCNFG 0x0002 /* Encryption configuration */ + +/* Processing Unit Interrupt Enable Register (HIFN_0_PUIER) */ +#define HIFN_PUIER_CMDINVAL 0x8000 /* Invalid command interrupt */ +#define HIFN_PUIER_DATAERR 0x4000 /* Data error interrupt */ +#define HIFN_PUIER_SRCFIFO 0x2000 /* Source FIFO ready interrupt */ +#define HIFN_PUIER_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */ +#define HIFN_PUIER_DSTOVER 0x0200 /* Destination overrun interrupt */ +#define HIFN_PUIER_SRCCMD 0x0080 /* Source command interrupt */ +#define HIFN_PUIER_SRCCTX 0x0040 /* Source context interrupt */ +#define HIFN_PUIER_SRCDATA 0x0020 /* Source data interrupt */ +#define HIFN_PUIER_DSTDATA 0x0010 /* Destination data interrupt */ +#define HIFN_PUIER_DSTRESULT 0x0004 /* Destination result interrupt */ + +/* Processing Unit Status Register/Chip ID (HIFN_0_PUSTAT) */ +#define HIFN_PUSTAT_CMDINVAL 0x8000 /* Invalid command interrupt */ +#define HIFN_PUSTAT_DATAERR 0x4000 /* Data error interrupt */ +#define HIFN_PUSTAT_SRCFIFO 0x2000 /* Source FIFO ready interrupt */ +#define HIFN_PUSTAT_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */ +#define HIFN_PUSTAT_DSTOVER 0x0200 /* Destination overrun interrupt */ +#define HIFN_PUSTAT_SRCCMD 0x0080 /* Source command interrupt */ +#define HIFN_PUSTAT_SRCCTX 0x0040 /* Source context interrupt */ +#define HIFN_PUSTAT_SRCDATA 0x0020 /* Source data interrupt */ +#define HIFN_PUSTAT_DSTDATA 0x0010 /* Destination data interrupt */ +#define HIFN_PUSTAT_DSTRESULT 0x0004 /* Destination result interrupt */ +#define HIFN_PUSTAT_CHIPREV 0x00ff /* Chip revision mask */ +#define HIFN_PUSTAT_CHIPENA 0xff00 /* Chip enabled mask */ +#define HIFN_PUSTAT_ENA_2 0x1100 /* Level 2 enabled */ +#define HIFN_PUSTAT_ENA_1 0x1000 /* Level 1 enabled */ +#define HIFN_PUSTAT_ENA_0 0x3000 /* Level 0 enabled */ +#define HIFN_PUSTAT_REV_2 0x0020 /* 7751 PT6/2 */ +#define HIFN_PUSTAT_REV_3 0x0030 /* 7751 PT6/3 */ + +/* FIFO Status Register (HIFN_0_FIFOSTAT) */ +#define HIFN_FIFOSTAT_SRC 0x7f00 /* Source FIFO available */ +#define HIFN_FIFOSTAT_DST 0x007f /* Destination FIFO available */ + +/* FIFO Configuration Register (HIFN_0_FIFOCNFG) */ +#define HIFN_FIFOCNFG_THRESHOLD 0x0400 /* must be written as 1 */ /* * DMA Interface Registers (offset from BASEREG1) */ -#define AEON_1_DMA_CRAR 0x0c /* DMA Command Ring Address */ -#define AEON_1_DMA_SRAR 0x1c /* DMA Source Ring Address */ -#define AEON_1_DMA_RRAR 0x2c /* DMA Resultt Ring Address */ -#define AEON_1_DMA_DRAR 0x3c /* DMA Destination Ring Address */ -#define AEON_1_DMA_CSR 0x40 /* DMA Status and Control */ -#define AEON_1_DMA_IER 0x44 /* DMA Interrupt Enable */ -#define AEON_1_DMA_CNFG 0x48 /* DMA Configuration */ -#define AEON_1_REVID 0x98 /* Revision ID */ - -/* DMA Status and Control Register (AEON_1_DMA_CSR) */ -#define AEON_DMACSR_D_CTRLMASK 0xc0000000 /* Destinition Ring Control */ -#define AEON_DMACSR_D_CTRL_NOP 0x00000000 /* Dest. Control: no-op */ -#define AEON_DMACSR_D_CTRL_DIS 0x40000000 /* Dest. Control: disable */ -#define AEON_DMACSR_D_CTRL_ENA 0x80000000 /* Dest. Control: enable */ -#define AEON_DMACSR_D_ABORT 0x20000000 /* Destinition Ring PCIAbort */ -#define AEON_DMACSR_D_DONE 0x10000000 /* Destinition Ring Done */ -#define AEON_DMACSR_D_LAST 0x08000000 /* Destinition Ring Last */ -#define AEON_DMACSR_D_WAIT 0x04000000 /* Destinition Ring Waiting */ -#define AEON_DMACSR_D_OVER 0x02000000 /* Destinition Ring Overflow */ -#define AEON_DMACSR_R_CTRL 0x00c00000 /* Result Ring Control */ -#define AEON_DMACSR_R_CTRL_NOP 0x00000000 /* Result Control: no-op */ -#define AEON_DMACSR_R_CTRL_DIS 0x00400000 /* Result Control: disable */ -#define AEON_DMACSR_R_CTRL_ENA 0x00800000 /* Result Control: enable */ -#define AEON_DMACSR_R_ABORT 0x00200000 /* Result Ring PCI Abort */ -#define AEON_DMACSR_R_DONE 0x00100000 /* Result Ring Done */ -#define AEON_DMACSR_R_LAST 0x00080000 /* Result Ring Last */ -#define AEON_DMACSR_R_WAIT 0x00040000 /* Result Ring Waiting */ -#define AEON_DMACSR_R_OVER 0x00020000 /* Result Ring Overflow */ -#define AEON_DMACSR_S_CTRL 0x0000c000 /* Source Ring Control */ -#define AEON_DMACSR_S_CTRL_NOP 0x00000000 /* Source Control: no-op */ -#define AEON_DMACSR_S_CTRL_DIS 0x00004000 /* Source Control: disable */ -#define AEON_DMACSR_S_CTRL_ENA 0x00008000 /* Source Control: enable */ -#define AEON_DMACSR_S_ABORT 0x00002000 /* Source Ring PCI Abort */ -#define AEON_DMACSR_S_DONE 0x00001000 /* Source Ring Done */ -#define AEON_DMACSR_S_LAST 0x00000800 /* Source Ring Last */ -#define AEON_DMACSR_S_WAIT 0x00000400 /* Source Ring Waiting */ -#define AEON_DMACSR_S_OVER 0x00000200 /* Source Ring Overflow */ -#define AEON_DMACSR_C_CTRL 0x000000c0 /* Command Ring Control */ -#define AEON_DMACSR_C_CTRL_NOP 0x00000000 /* Command Control: no-op */ -#define AEON_DMACSR_C_CTRL_DIS 0x00000040 /* Command Control: disable */ -#define AEON_DMACSR_C_CTRL_ENA 0x00000080 /* Command Control: enable */ -#define AEON_DMACSR_C_ABORT 0x00000020 /* Command Ring PCI Abort */ -#define AEON_DMACSR_C_DONE 0x00000010 /* Command Ring Done */ -#define AEON_DMACSR_C_LAST 0x00000008 /* Command Ring Last */ -#define AEON_DMACSR_C_WAIT 0x00000004 /* Command Ring Waiting */ -#define AEON_DMACSR_C_EIRQ 0x00000001 /* Command Ring Engine IRQ */ - -/* DMA Interrupt Enable Register (AEON_1_DMA_IER) */ -#define AEON_DMAIER_D_ABORT 0x20000000 /* Destination Ring PCIAbort */ -#define AEON_DMAIER_D_DONE 0x10000000 /* Destination Ring Done */ -#define AEON_DMAIER_D_LAST 0x08000000 /* Destination Ring Last */ -#define AEON_DMAIER_D_WAIT 0x04000000 /* Destination Ring Waiting */ -#define AEON_DMAIER_D_OVER 0x02000000 /* Destination Ring Overflow */ -#define AEON_DMAIER_R_ABORT 0x00200000 /* Result Ring PCI Abort */ -#define AEON_DMAIER_R_DONE 0x00100000 /* Result Ring Done */ -#define AEON_DMAIER_R_LAST 0x00080000 /* Result Ring Last */ -#define AEON_DMAIER_R_WAIT 0x00040000 /* Result Ring Waiting */ -#define AEON_DMAIER_R_OVER 0x00020000 /* Result Ring Overflow */ -#define AEON_DMAIER_S_ABORT 0x00002000 /* Source Ring PCI Abort */ -#define AEON_DMAIER_S_DONE 0x00001000 /* Source Ring Done */ -#define AEON_DMAIER_S_LAST 0x00000800 /* Source Ring Last */ -#define AEON_DMAIER_S_WAIT 0x00000400 /* Source Ring Waiting */ -#define AEON_DMAIER_S_OVER 0x00000200 /* Source Ring Overflow */ -#define AEON_DMAIER_C_ABORT 0x00000020 /* Command Ring PCI Abort */ -#define AEON_DMAIER_C_DONE 0x00000010 /* Command Ring Done */ -#define AEON_DMAIER_C_LAST 0x00000008 /* Command Ring Last */ -#define AEON_DMAIER_C_WAIT 0x00000004 /* Command Ring Waiting */ -#define AEON_DMAIER_ENGINE 0x00000001 /* Engine IRQ */ - -/* DMA Configuration Register (AEON_1_DMA_CNFG) */ -#define AEON_DMACNFG_BIGENDIAN 0x10000000 /* big endian mode */ -#define AEON_DMACNFG_POLLFREQ 0x00ff0000 /* Poll frequency mask */ -#define AEON_DMACNFG_UNLOCK 0x00000800 -#define AEON_DMACNFG_POLLINVAL 0x00000700 /* Invalid Poll Scalar */ -#define AEON_DMACNFG_LAST 0x00000010 /* Host control LAST bit */ -#define AEON_DMACNFG_MODE 0x00000004 /* DMA mode */ -#define AEON_DMACNFG_DMARESET 0x00000002 /* DMA Reset # */ -#define AEON_DMACNFG_MSTRESET 0x00000001 /* Master Reset # */ +#define HIFN_1_DMA_CRAR 0x0c /* DMA Command Ring Address */ +#define HIFN_1_DMA_SRAR 0x1c /* DMA Source Ring Address */ +#define HIFN_1_DMA_RRAR 0x2c /* DMA Resultt Ring Address */ +#define HIFN_1_DMA_DRAR 0x3c /* DMA Destination Ring Address */ +#define HIFN_1_DMA_CSR 0x40 /* DMA Status and Control */ +#define HIFN_1_DMA_IER 0x44 /* DMA Interrupt Enable */ +#define HIFN_1_DMA_CNFG 0x48 /* DMA Configuration */ +#define HIFN_1_REVID 0x98 /* Revision ID */ + +/* DMA Status and Control Register (HIFN_1_DMA_CSR) */ +#define HIFN_DMACSR_D_CTRLMASK 0xc0000000 /* Destinition Ring Control */ +#define HIFN_DMACSR_D_CTRL_NOP 0x00000000 /* Dest. Control: no-op */ +#define HIFN_DMACSR_D_CTRL_DIS 0x40000000 /* Dest. Control: disable */ +#define HIFN_DMACSR_D_CTRL_ENA 0x80000000 /* Dest. Control: enable */ +#define HIFN_DMACSR_D_ABORT 0x20000000 /* Destinition Ring PCIAbort */ +#define HIFN_DMACSR_D_DONE 0x10000000 /* Destinition Ring Done */ +#define HIFN_DMACSR_D_LAST 0x08000000 /* Destinition Ring Last */ +#define HIFN_DMACSR_D_WAIT 0x04000000 /* Destinition Ring Waiting */ +#define HIFN_DMACSR_D_OVER 0x02000000 /* Destinition Ring Overflow */ +#define HIFN_DMACSR_R_CTRL 0x00c00000 /* Result Ring Control */ +#define HIFN_DMACSR_R_CTRL_NOP 0x00000000 /* Result Control: no-op */ +#define HIFN_DMACSR_R_CTRL_DIS 0x00400000 /* Result Control: disable */ +#define HIFN_DMACSR_R_CTRL_ENA 0x00800000 /* Result Control: enable */ +#define HIFN_DMACSR_R_ABORT 0x00200000 /* Result Ring PCI Abort */ +#define HIFN_DMACSR_R_DONE 0x00100000 /* Result Ring Done */ +#define HIFN_DMACSR_R_LAST 0x00080000 /* Result Ring Last */ +#define HIFN_DMACSR_R_WAIT 0x00040000 /* Result Ring Waiting */ +#define HIFN_DMACSR_R_OVER 0x00020000 /* Result Ring Overflow */ +#define HIFN_DMACSR_S_CTRL 0x0000c000 /* Source Ring Control */ +#define HIFN_DMACSR_S_CTRL_NOP 0x00000000 /* Source Control: no-op */ +#define HIFN_DMACSR_S_CTRL_DIS 0x00004000 /* Source Control: disable */ +#define HIFN_DMACSR_S_CTRL_ENA 0x00008000 /* Source Control: enable */ +#define HIFN_DMACSR_S_ABORT 0x00002000 /* Source Ring PCI Abort */ +#define HIFN_DMACSR_S_DONE 0x00001000 /* Source Ring Done */ +#define HIFN_DMACSR_S_LAST 0x00000800 /* Source Ring Last */ +#define HIFN_DMACSR_S_WAIT 0x00000400 /* Source Ring Waiting */ +#define HIFN_DMACSR_S_OVER 0x00000200 /* Source Ring Overflow */ +#define HIFN_DMACSR_C_CTRL 0x000000c0 /* Command Ring Control */ +#define HIFN_DMACSR_C_CTRL_NOP 0x00000000 /* Command Control: no-op */ +#define HIFN_DMACSR_C_CTRL_DIS 0x00000040 /* Command Control: disable */ +#define HIFN_DMACSR_C_CTRL_ENA 0x00000080 /* Command Control: enable */ +#define HIFN_DMACSR_C_ABORT 0x00000020 /* Command Ring PCI Abort */ +#define HIFN_DMACSR_C_DONE 0x00000010 /* Command Ring Done */ +#define HIFN_DMACSR_C_LAST 0x00000008 /* Command Ring Last */ +#define HIFN_DMACSR_C_WAIT 0x00000004 /* Command Ring Waiting */ +#define HIFN_DMACSR_C_EIRQ 0x00000001 /* Command Ring Engine IRQ */ + +/* DMA Interrupt Enable Register (HIFN_1_DMA_IER) */ +#define HIFN_DMAIER_D_ABORT 0x20000000 /* Destination Ring PCIAbort */ +#define HIFN_DMAIER_D_DONE 0x10000000 /* Destination Ring Done */ +#define HIFN_DMAIER_D_LAST 0x08000000 /* Destination Ring Last */ +#define HIFN_DMAIER_D_WAIT 0x04000000 /* Destination Ring Waiting */ +#define HIFN_DMAIER_D_OVER 0x02000000 /* Destination Ring Overflow */ +#define HIFN_DMAIER_R_ABORT 0x00200000 /* Result Ring PCI Abort */ +#define HIFN_DMAIER_R_DONE 0x00100000 /* Result Ring Done */ +#define HIFN_DMAIER_R_LAST 0x00080000 /* Result Ring Last */ +#define HIFN_DMAIER_R_WAIT 0x00040000 /* Result Ring Waiting */ +#define HIFN_DMAIER_R_OVER 0x00020000 /* Result Ring Overflow */ +#define HIFN_DMAIER_S_ABORT 0x00002000 /* Source Ring PCI Abort */ +#define HIFN_DMAIER_S_DONE 0x00001000 /* Source Ring Done */ +#define HIFN_DMAIER_S_LAST 0x00000800 /* Source Ring Last */ +#define HIFN_DMAIER_S_WAIT 0x00000400 /* Source Ring Waiting */ +#define HIFN_DMAIER_S_OVER 0x00000200 /* Source Ring Overflow */ +#define HIFN_DMAIER_C_ABORT 0x00000020 /* Command Ring PCI Abort */ +#define HIFN_DMAIER_C_DONE 0x00000010 /* Command Ring Done */ +#define HIFN_DMAIER_C_LAST 0x00000008 /* Command Ring Last */ +#define HIFN_DMAIER_C_WAIT 0x00000004 /* Command Ring Waiting */ +#define HIFN_DMAIER_ENGINE 0x00000001 /* Engine IRQ */ + +/* DMA Configuration Register (HIFN_1_DMA_CNFG) */ +#define HIFN_DMACNFG_BIGENDIAN 0x10000000 /* big endian mode */ +#define HIFN_DMACNFG_POLLFREQ 0x00ff0000 /* Poll frequency mask */ +#define HIFN_DMACNFG_UNLOCK 0x00000800 +#define HIFN_DMACNFG_POLLINVAL 0x00000700 /* Invalid Poll Scalar */ +#define HIFN_DMACNFG_LAST 0x00000010 /* Host control LAST bit */ +#define HIFN_DMACNFG_MODE 0x00000004 /* DMA mode */ +#define HIFN_DMACNFG_DMARESET 0x00000002 /* DMA Reset # */ +#define HIFN_DMACNFG_MSTRESET 0x00000001 /* Master Reset # */ #define WRITE_REG_0(sc,reg,val) \ bus_space_write_4((sc)->sc_st0, (sc)->sc_sh0, reg, val) @@ -316,8 +316,8 @@ struct aeon_softc { * Register offsets in register set 1 */ -#define AEON_UNLOCK_SECRET1 0xf4 -#define AEON_UNLOCK_SECRET2 0xfc +#define HIFN_UNLOCK_SECRET1 0xf4 +#define HIFN_UNLOCK_SECRET2 0xfc #define WRITE_REG_1(sc,reg,val) \ bus_space_write_4((sc)->sc_st1, (sc)->sc_sh1, reg, val) @@ -332,78 +332,78 @@ struct aeon_softc { /* * Structure to help build up the command data structure. */ -typedef struct aeon_base_command { +typedef struct hifn_base_command { u_int16_t masks; u_int16_t session_num; u_int16_t total_source_count; u_int16_t total_dest_count; -} aeon_base_command_t; +} hifn_base_command_t; -#define AEON_BASE_CMD_MAC (0x1 << 10) -#define AEON_BASE_CMD_CRYPT (0x1 << 11) -#define AEON_BASE_CMD_DECODE (0x1 << 13) +#define HIFN_BASE_CMD_MAC (0x1 << 10) +#define HIFN_BASE_CMD_CRYPT (0x1 << 11) +#define HIFN_BASE_CMD_DECODE (0x1 << 13) /* * Structure to help build up the command data structure. */ -typedef struct aeon_crypt_command { +typedef struct hifn_crypt_command { u_int16_t masks; u_int16_t header_skip; u_int32_t source_count; -} aeon_crypt_command_t; +} hifn_crypt_command_t; -#define AEON_CRYPT_CMD_ALG_MASK (0x3 << 0) -#define AEON_CRYPT_CMD_ALG_DES (0x0 << 0) -#define AEON_CRYPT_CMD_ALG_3DES (0x1 << 0) -#define AEON_CRYPT_CMD_MODE_CBC (0x1 << 3) -#define AEON_CRYPT_CMD_NEW_KEY (0x1 << 11) -#define AEON_CRYPT_CMD_NEW_IV (0x1 << 12) +#define HIFN_CRYPT_CMD_ALG_MASK (0x3 << 0) +#define HIFN_CRYPT_CMD_ALG_DES (0x0 << 0) +#define HIFN_CRYPT_CMD_ALG_3DES (0x1 << 0) +#define HIFN_CRYPT_CMD_MODE_CBC (0x1 << 3) +#define HIFN_CRYPT_CMD_NEW_KEY (0x1 << 11) +#define HIFN_CRYPT_CMD_NEW_IV (0x1 << 12) /* * Structure to help build up the command data structure. */ -typedef struct aeon_mac_command { +typedef struct hifn_mac_command { u_int16_t masks; u_int16_t header_skip; u_int32_t source_count; -} aeon_mac_command_t; +} hifn_mac_command_t; -#define AEON_MAC_CMD_ALG_MD5 (0x1 << 0) -#define AEON_MAC_CMD_ALG_SHA1 (0x0 << 0) -#define AEON_MAC_CMD_MODE_HMAC (0x0 << 2) -#define AEON_MAC_CMD_TRUNC (0x1 << 4) -#define AEON_MAC_CMD_APPEND (0x1 << 6) +#define HIFN_MAC_CMD_ALG_MD5 (0x1 << 0) +#define HIFN_MAC_CMD_ALG_SHA1 (0x0 << 0) +#define HIFN_MAC_CMD_MODE_HMAC (0x0 << 2) +#define HIFN_MAC_CMD_TRUNC (0x1 << 4) +#define HIFN_MAC_CMD_APPEND (0x1 << 6) /* * MAC POS IPSec initiates authentication after encryption on encodes * and before decryption on decodes. */ -#define AEON_MAC_CMD_POS_IPSEC (0x2 << 8) -#define AEON_MAC_CMD_NEW_KEY (0x1 << 11) +#define HIFN_MAC_CMD_POS_IPSEC (0x2 << 8) +#define HIFN_MAC_CMD_NEW_KEY (0x1 << 11) /* * Structure with all fields necessary to write the command buffer. * We build it up while interrupts are on, then use it to write out * the command buffer quickly while interrupts are off. */ -typedef struct aeon_command_buf_data { - aeon_base_command_t base_cmd; - aeon_mac_command_t mac_cmd; - aeon_crypt_command_t crypt_cmd; +typedef struct hifn_command_buf_data { + hifn_base_command_t base_cmd; + hifn_mac_command_t mac_cmd; + hifn_crypt_command_t crypt_cmd; const u_int8_t *mac; const u_int8_t *ck; const u_int8_t *iv; -} aeon_command_buf_data_t; +} hifn_command_buf_data_t; /* * The poll frequency and poll scalar defines are unshifted values used * to set fields in the DMA Configuration Register. */ -#ifndef AEON_POLL_FREQUENCY -#define AEON_POLL_FREQUENCY 0x1 +#ifndef HIFN_POLL_FREQUENCY +#define HIFN_POLL_FREQUENCY 0x1 #endif -#ifndef AEON_POLL_SCALAR -#define AEON_POLL_SCALAR 0x0 +#ifndef HIFN_POLL_SCALAR +#define HIFN_POLL_SCALAR 0x0 #endif -#endif /* __AEON_H__ */ +#endif /* __HIFN_H__ */ diff --git a/sys/dev/pci/hifn7751var.h b/sys/dev/pci/hifn7751var.h index 7483f4b23f8..e20db3c10a2 100644 --- a/sys/dev/pci/hifn7751var.h +++ b/sys/dev/pci/hifn7751var.h @@ -1,7 +1,7 @@ -/* $OpenBSD: hifn7751var.h,v 1.3 1999/02/24 06:09:45 deraadt Exp $ */ +/* $OpenBSD: hifn7751var.h,v 1.4 2000/03/16 20:33:48 deraadt Exp $ */ /* - * Invertex AEON driver + * Invertex AEON / Hi/fn 7751 driver * Copyright (c) 1999 Invertex Inc. All rights reserved. * * Please send any comments, feedback, bug-fixes, or feature requests to @@ -33,58 +33,58 @@ * */ -#ifndef __AEON_EXPORT_H__ -#define __AEON_EXPORT_H__ +#ifndef __HIFN_EXPORT_H__ +#define __HIFN_EXPORT_H__ /* * Length values for cryptography */ -#define AEON_DES_KEY_LENGTH 8 -#define AEON_3DES_KEY_LENGTH 24 -#define AEON_MAX_CRYPT_KEY_LENGTH AEON_3DES_KEY_LENGTH -#define AEON_IV_LENGTH 8 +#define HIFN_DES_KEY_LENGTH 8 +#define HIFN_3DES_KEY_LENGTH 24 +#define HIFN_MAX_CRYPT_KEY_LENGTH HIFN_3DES_KEY_LENGTH +#define HIFN_IV_LENGTH 8 /* * Length values for authentication */ -#define AEON_MAC_KEY_LENGTH 64 -#define AEON_MD5_LENGTH 16 -#define AEON_SHA1_LENGTH 20 -#define AEON_MAC_TRUNC_LENGTH 12 +#define HIFN_MAC_KEY_LENGTH 64 +#define HIFN_MD5_LENGTH 16 +#define HIFN_SHA1_LENGTH 20 +#define HIFN_MAC_TRUNC_LENGTH 12 #define MAX_SCATTER 10 /* - * aeon_command_t + * hifn_command_t * - * This is the control structure used to pass commands to aeon_encrypt(). + * This is the control structure used to pass commands to hifn_encrypt(). * * flags * ----- * Flags is the bitwise "or" values for command configuration. A single * encrypt direction needs to be set: * - * AEON_ENCODE or AEON_DECODE + * HIFN_ENCODE or HIFN_DECODE * * To use cryptography, a single crypto algorithm must be included: * - * AEON_CRYPT_3DES or AEON_CRYPT_DES + * HIFN_CRYPT_3DES or HIFN_CRYPT_DES * * To use authentication is used, a single MAC algorithm must be included: * - * AEON_MAC_MD5 or AEON_MAC_SHA1 + * HIFN_MAC_MD5 or HIFN_MAC_SHA1 * * By default MD5 uses a 16 byte hash and SHA-1 uses a 20 byte hash. * If the value below is set, hash values are truncated or assumed * truncated to 12 bytes: * - * AEON_MAC_TRUNC + * HIFN_MAC_TRUNC * * Keys for encryption and authentication can be sent as part of a command, * or the last key value used with a particular session can be retrieved * and used again if either of these flags are not specified. * - * AEON_CRYPT_NEW_KEY, AEON_MAC_NEW_KEY + * HIFN_CRYPT_NEW_KEY, HIFN_MAC_NEW_KEY * * Whether we block or not waiting for the dest data to be ready is * determined by whether a callback function is given. The other @@ -92,20 +92,20 @@ * it is not okay to block while waiting for an open slot in the * rings, include in the following value: * - * AEON_DMA_FULL_NOBLOCK + * HIFN_DMA_FULL_NOBLOCK * * result_flags * ------------ * result_flags is a bitwise "or" of result values. The result_flags * values should not be considered valid until: * - * callback routine NULL: aeon_crypto() returns + * callback routine NULL: hifn_crypto() returns * callback routine set: callback routine called * - * Right now there is only one result flag: AEON_MAC_BAD + * Right now there is only one result flag: HIFN_MAC_BAD * It's bit is set on decode operations using authentication when a * hash result does not match the input hash value. - * The AEON_MAC_OK(r) macro can be used to help inspect this flag. + * The HIFN_MAC_OK(r) macro can be used to help inspect this flag. * * session_num * ----------- @@ -165,12 +165,12 @@ * * dest_ready_callback * ------------------- - * Callback routine called from AEON's interrupt handler. The routine + * Callback routine called from HIFN's interrupt handler. The routine * must be quick and non-blocking. The callback routine is passed a - * pointer to the same aeon_command_t structure used to initiate the + * pointer to the same hifn_command_t structure used to initiate the * command. * - * If this value is null, the aeon_crypto() routine will block until the + * If this value is null, the hifn_crypto() routine will block until the * dest data is ready. * * private_data @@ -178,7 +178,7 @@ * An unsigned long quantity (i.e. large enough to hold a pointer), that * can be used by the callback routine if desired. */ -typedef struct aeon_command { +typedef struct hifn_command { u_int flags; volatile u_int result_status; @@ -202,62 +202,62 @@ typedef struct aeon_command { u_short mac_header_skip; u_short crypt_header_skip; - void (*dest_ready_callback)(struct aeon_command *); + void (*dest_ready_callback)(struct hifn_command *); u_long private_data; -} aeon_command_t; +} hifn_command_t; /* - * Return values for aeon_crypto() + * Return values for hifn_crypto() */ -#define AEON_CRYPTO_SUCCESS 0 -#define AEON_CRYPTO_BAD_INPUT -1 -#define AEON_CRYPTO_RINGS_FULL -2 +#define HIFN_CRYPTO_SUCCESS 0 +#define HIFN_CRYPTO_BAD_INPUT -1 +#define HIFN_CRYPTO_RINGS_FULL -2 /* - * Defines for the "config" parameter of aeon_command_t + * Defines for the "config" parameter of hifn_command_t */ -#define AEON_ENCODE 1 -#define AEON_DECODE 2 -#define AEON_CRYPT_3DES 4 -#define AEON_CRYPT_DES 8 -#define AEON_MAC_MD5 16 -#define AEON_MAC_SHA1 32 -#define AEON_MAC_TRUNC 64 -#define AEON_CRYPT_NEW_KEY 128 -#define AEON_MAC_NEW_KEY 256 -#define AEON_DMA_FULL_NOBLOCK 512 +#define HIFN_ENCODE 1 +#define HIFN_DECODE 2 +#define HIFN_CRYPT_3DES 4 +#define HIFN_CRYPT_DES 8 +#define HIFN_MAC_MD5 16 +#define HIFN_MAC_SHA1 32 +#define HIFN_MAC_TRUNC 64 +#define HIFN_CRYPT_NEW_KEY 128 +#define HIFN_MAC_NEW_KEY 256 +#define HIFN_DMA_FULL_NOBLOCK 512 -#define AEON_USING_CRYPT(f) ((f) & (AEON_CRYPT_3DES|AEON_CRYPT_DES)) -#define AEON_USING_MAC(f) ((f) & (AEON_MAC_MD5|AEON_MAC_SHA1)) +#define HIFN_USING_CRYPT(f) ((f) & (HIFN_CRYPT_3DES|HIFN_CRYPT_DES)) +#define HIFN_USING_MAC(f) ((f) & (HIFN_MAC_MD5|HIFN_MAC_SHA1)) /* - * Defines for the "result_status" parameter of aeon_command_t. + * Defines for the "result_status" parameter of hifn_command_t. */ -#define AEON_MAC_BAD 1 -#define AEON_MAC_OK(r) !((r) & AEON_MAC_BAD) +#define HIFN_MAC_BAD 1 +#define HIFN_MAC_OK(r) !((r) & HIFN_MAC_BAD) #ifdef _KERNEL /************************************************************************** * - * Function: aeon_crypto + * Function: hifn_crypto * * Purpose: Called by external drivers to begin an encryption on the - * AEON board. + * HIFN board. * * Blocking/Non-blocking Issues * ============================ - * If the dest_ready_callback field of the aeon_command structure - * is NULL, aeon_encrypt will block until the dest_data is ready -- - * otherwise aeon_encrypt() will return immediately and the + * If the dest_ready_callback field of the hifn_command structure + * is NULL, hifn_encrypt will block until the dest_data is ready -- + * otherwise hifn_encrypt() will return immediately and the * dest_ready_callback routine will be called when the dest data is * ready. * * The routine can also block when waiting for an open slot when all * DMA rings are full. You can avoid this behaviour by sending the - * AEON_DMA_FULL_NOBLOCK as part of the command flags. This will - * make aeon_crypt() return immediately when the rings are full. + * HIFN_DMA_FULL_NOBLOCK as part of the command flags. This will + * make hifn_crypt() return immediately when the rings are full. * * Return Values * ============= @@ -265,13 +265,13 @@ typedef struct aeon_command { * * Defines for negative error codes are: * - * AEON_CRYPTO_BAD_INPUT : The passed in command had invalid settings. - * AEON_CRYPTO_RINGS_FULL : All DMA rings were full and non-blocking + * HIFN_CRYPTO_BAD_INPUT : The passed in command had invalid settings. + * HIFN_CRYPTO_RINGS_FULL : All DMA rings were full and non-blocking * behaviour was requested. * *************************************************************************/ -int aeon_crypto __P((aeon_command_t *command)); +int hifn_crypto __P((hifn_command_t *command)); #endif /* _KERNEL */ -#endif /* __AEON_EXPORT_H__ */ +#endif /* __HIFN_EXPORT_H__ */