diff options
author | Stefan Sperling <stsp@cvs.openbsd.org> | 2019-11-18 18:53:12 +0000 |
---|---|---|
committer | Stefan Sperling <stsp@cvs.openbsd.org> | 2019-11-18 18:53:12 +0000 |
commit | 41be0d59d6e678e8f4d9158e3277b961b43c973a (patch) | |
tree | b22f45b00a06df5fc14b880e49e55781ec112cd9 | |
parent | d007f03d94f037f426db7fd1fee0d72a5ac42ef4 (diff) |
Add support for 9260 and 9560 devices to iwm(4).
Joint work with patrick@
Parts lifted from FreeBSD's r354492, r354502, r354508, r354516, r354508.
Firmware is available with fw_update(1) thanks to sthen@
7265 device tested by myself (still works)
8260 device tested by phessler@ (still works)
9260 devices tested by Travis Cole and myself
9560 devices tested by jcs@, mlarkin@, kevlo@, guenther@
Some 9560 devices have known issues. Those are being worked on.
ok patrick@
-rw-r--r-- | sys/dev/pci/if_iwm.c | 450 | ||||
-rw-r--r-- | sys/dev/pci/if_iwmreg.h | 188 | ||||
-rw-r--r-- | sys/dev/pci/if_iwmvar.h | 15 |
3 files changed, 593 insertions, 60 deletions
diff --git a/sys/dev/pci/if_iwm.c b/sys/dev/pci/if_iwm.c index a8da81de7dc..9e4f9562f7f 100644 --- a/sys/dev/pci/if_iwm.c +++ b/sys/dev/pci/if_iwm.c @@ -1,4 +1,4 @@ -/* $OpenBSD: if_iwm.c,v 1.282 2019/11/17 01:38:20 jcs Exp $ */ +/* $OpenBSD: if_iwm.c,v 1.283 2019/11/18 18:53:11 stsp Exp $ */ /* * Copyright (c) 2014, 2016 genua gmbh <info@genua.de> @@ -292,6 +292,8 @@ int iwm_start_hw(struct iwm_softc *); void iwm_stop_device(struct iwm_softc *); void iwm_nic_config(struct iwm_softc *); int iwm_nic_rx_init(struct iwm_softc *); +int iwm_nic_rx_legacy_init(struct iwm_softc *); +int iwm_nic_rx_mq_init(struct iwm_softc *); int iwm_nic_tx_init(struct iwm_softc *); int iwm_nic_init(struct iwm_softc *); int iwm_enable_ac_txq(struct iwm_softc *, int, int); @@ -363,11 +365,14 @@ int iwm_run_init_mvm_ucode(struct iwm_softc *, int); int iwm_config_ltr(struct iwm_softc *); int iwm_rx_addbuf(struct iwm_softc *, int, int); int iwm_get_signal_strength(struct iwm_softc *, struct iwm_rx_phy_info *); +int iwm_rxmq_get_signal_strength(struct iwm_softc *, struct iwm_rx_mpdu_desc *); void iwm_rx_rx_phy_cmd(struct iwm_softc *, struct iwm_rx_packet *, struct iwm_rx_data *); int iwm_get_noise(const struct iwm_statistics_rx_non_phy *); void iwm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *, struct iwm_rx_data *, struct mbuf_list *); +void iwm_rx_mpdu_mq(struct iwm_softc *, struct iwm_rx_packet *, + struct iwm_rx_data *, struct mbuf_list *); void iwm_enable_ht_cck_fallback(struct iwm_softc *, struct iwm_node *); void iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *, struct iwm_node *); @@ -913,6 +918,13 @@ iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val) IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val); } +void +iwm_write_prph64(struct iwm_softc *sc, uint64_t addr, uint64_t val) +{ + iwm_write_prph(sc, (uint32_t)addr, val & 0xffffffff); + iwm_write_prph(sc, (uint32_t)addr + 4, val >> 32); +} + int iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords) { @@ -984,7 +996,7 @@ iwm_nic_lock(struct iwm_softc *sc) IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) + if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) DELAY(2); if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL, @@ -1111,19 +1123,28 @@ int iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring) { bus_size_t size; - int i, err; + size_t descsz; + int count, i, err; ring->cur = 0; + if (sc->sc_mqrx_supported) { + count = IWM_RX_MQ_RING_COUNT; + descsz = sizeof(uint64_t); + } else { + count = IWM_RX_RING_COUNT; + descsz = sizeof(uint32_t); + } + /* Allocate RX descriptors (256-byte aligned). */ - size = IWM_RX_RING_COUNT * sizeof(uint32_t); - err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256); + size = count * descsz; + err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size, 256); if (err) { printf("%s: could not allocate RX ring DMA memory\n", DEVNAME(sc)); goto fail; } - ring->desc = ring->desc_dma.vaddr; + ring->desc = ring->free_desc_dma.vaddr; /* Allocate RX status area (16-byte aligned). */ err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma, @@ -1135,7 +1156,18 @@ iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring) } ring->stat = ring->stat_dma.vaddr; - for (i = 0; i < IWM_RX_RING_COUNT; i++) { + if (sc->sc_mqrx_supported) { + size = count * sizeof(uint32_t); + err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma, + size, 256); + if (err) { + printf("%s: could not allocate RX ring DMA memory\n", + DEVNAME(sc)); + goto fail; + } + } + + for (i = 0; i < count; i++) { struct iwm_rx_data *data = &ring->data[i]; memset(data, 0, sizeof(*data)); @@ -1164,12 +1196,22 @@ iwm_disable_rx_dma(struct iwm_softc *sc) int ntries; if (iwm_nic_lock(sc)) { - IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); - for (ntries = 0; ntries < 1000; ntries++) { - if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG) & - IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE) - break; - DELAY(10); + if (sc->sc_mqrx_supported) { + iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0); + for (ntries = 0; ntries < 1000; ntries++) { + if (iwm_read_prph(sc, IWM_RFH_GEN_STATUS) & + IWM_RXF_DMA_IDLE) + break; + DELAY(10); + } + } else { + IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); + for (ntries = 0; ntries < 1000; ntries++) { + if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG)& + IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE) + break; + DELAY(10); + } } iwm_nic_unlock(sc); } @@ -1190,12 +1232,18 @@ iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring) void iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring) { - int i; + int count, i; - iwm_dma_contig_free(&ring->desc_dma); + iwm_dma_contig_free(&ring->free_desc_dma); iwm_dma_contig_free(&ring->stat_dma); + iwm_dma_contig_free(&ring->used_desc_dma); - for (i = 0; i < IWM_RX_RING_COUNT; i++) { + if (sc->sc_mqrx_supported) + count = IWM_RX_MQ_RING_COUNT; + else + count = IWM_RX_RING_COUNT; + + for (i = 0; i < count; i++) { struct iwm_rx_data *data = &ring->data[i]; if (data->m != NULL) { @@ -1351,6 +1399,10 @@ iwm_enable_rfkill_int(struct iwm_softc *sc) { sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL; IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask); + + if (sc->sc_device_family >= IWM_DEVICE_FAMILY_9000) + IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, + IWM_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN); } int @@ -1466,7 +1518,10 @@ iwm_prepare_card_hw(struct iwm_softc *sc) if (iwm_set_hw_ready(sc)) return 0; - DELAY(100); + IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG, + IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED); + DELAY(1000); + /* If HW is not ready, prepare the conditions to check again */ IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG, @@ -1638,6 +1693,16 @@ iwm_apm_init(struct iwm_softc *sc) void iwm_apm_stop(struct iwm_softc *sc) { + IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG, + IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED); + IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG, + IWM_CSR_HW_IF_CONFIG_REG_PREPARE | + IWM_CSR_HW_IF_CONFIG_REG_ENABLE_PME); + DELAY(1000); + IWM_CLRBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG, + IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED); + DELAY(5000); + /* stop device's busmaster DMA activity */ IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER); @@ -1645,6 +1710,13 @@ iwm_apm_stop(struct iwm_softc *sc) IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100)) printf("%s: timeout waiting for master\n", DEVNAME(sc)); + + /* + * Clear "initialization complete" bit to move adapter from + * D0A* (powered-up Active) --> D0U* (Uninitialized) state. + */ + IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL, + IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE); } int @@ -1658,12 +1730,19 @@ iwm_start_hw(struct iwm_softc *sc) /* Reset the entire device */ IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET); - DELAY(10); + DELAY(5000); err = iwm_apm_init(sc); if (err) return err; + /* Newer chips default to MSIX. */ + if (sc->sc_device_family >= IWM_DEVICE_FAMILY_9000 && + iwm_nic_lock(sc)) { + iwm_write_prph(sc, IWM_UREG_CHICK, IWM_UREG_CHICK_MSI_ENABLE); + iwm_nic_unlock(sc); + } + iwm_enable_rfkill_int(sc); iwm_check_rfkill(sc); @@ -1728,18 +1807,21 @@ iwm_stop_device(struct iwm_softc *sc) /* Stop the device, and put it in low power state */ iwm_apm_stop(sc); + /* Reset the on-board processor. */ + IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET); + DELAY(5000); + /* * Upon stop, the APM issues an interrupt if HW RF kill is set. * Clear the interrupt again. */ iwm_disable_interrupts(sc); - /* Reset the on-board processor. */ - IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET); - /* Even though we stop the HW we still want the RF kill interrupt. */ iwm_enable_rfkill_int(sc); iwm_check_rfkill(sc); + + iwm_prepare_card_hw(sc); } void @@ -1765,7 +1847,15 @@ iwm_nic_config(struct iwm_softc *sc) reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP; reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH; - IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val); + IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, + IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH | + IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP | + IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP | + IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH | + IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE | + IWM_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | + IWM_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI | + reg_val); /* * W/A : NIC is stuck in a reset state after Early PCIe power off @@ -1781,6 +1871,69 @@ iwm_nic_config(struct iwm_softc *sc) int iwm_nic_rx_init(struct iwm_softc *sc) { + if (sc->sc_mqrx_supported) + return iwm_nic_rx_mq_init(sc); + else + return iwm_nic_rx_legacy_init(sc); +} + +int +iwm_nic_rx_mq_init(struct iwm_softc *sc) +{ + int enabled; + + if (!iwm_nic_lock(sc)) + return EBUSY; + + /* Stop RX DMA. */ + iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0); + /* Disable RX used and free queue operation. */ + iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, 0); + + iwm_write_prph64(sc, IWM_RFH_Q0_FRBDCB_BA_LSB, + sc->rxq.free_desc_dma.paddr); + iwm_write_prph64(sc, IWM_RFH_Q0_URBDCB_BA_LSB, + sc->rxq.used_desc_dma.paddr); + iwm_write_prph64(sc, IWM_RFH_Q0_URBD_STTS_WPTR_LSB, + sc->rxq.stat_dma.paddr); + iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_WIDX, 0); + iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_RIDX, 0); + iwm_write_prph(sc, IWM_RFH_Q0_URBDCB_WIDX, 0); + + /* We configure only queue 0 for now. */ + enabled = ((1 << 0) << 16) | (1 << 0); + + /* Enable RX DMA, 4KB buffer size. */ + iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, + IWM_RFH_DMA_EN_ENABLE_VAL | + IWM_RFH_RXF_DMA_RB_SIZE_4K | + IWM_RFH_RXF_DMA_MIN_RB_4_8 | + IWM_RFH_RXF_DMA_DROP_TOO_LARGE_MASK | + IWM_RFH_RXF_DMA_SINGLE_FRAME_MASK | + IWM_RFH_RXF_DMA_RBDCB_SIZE_512); + + /* Enable RX DMA snooping. */ + iwm_write_prph(sc, IWM_RFH_GEN_CFG, + IWM_RFH_GEN_CFG_RFH_DMA_SNOOP | + IWM_RFH_GEN_CFG_SERVICE_DMA_SNOOP | + (sc->sc_integrated ? IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_64 : + IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_128)); + + /* Enable the configured queue(s). */ + iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, enabled); + + iwm_nic_unlock(sc); + + IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF); + + IWM_WRITE(sc, IWM_RFH_Q0_FRBDCB_WIDX_TRG, 8); + + return 0; +} + +int +iwm_nic_rx_legacy_init(struct iwm_softc *sc) +{ memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat)); iwm_disable_rx_dma(sc); @@ -1796,7 +1949,7 @@ iwm_nic_rx_init(struct iwm_softc *sc) /* Set physical address of RX ring (256-byte aligned). */ IWM_WRITE(sc, - IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8); + IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.free_desc_dma.paddr >> 8); /* Set physical address of RX status (16-byte aligned). */ IWM_WRITE(sc, @@ -1818,14 +1971,14 @@ iwm_nic_rx_init(struct iwm_softc *sc) if (sc->host_interrupt_operation_mode) IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE); + iwm_nic_unlock(sc); + /* * This value should initially be 0 (before preparing any RBs), * and should be 8 after preparing the first 8 RBs (for example). */ IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8); - iwm_nic_unlock(sc); - return 0; } @@ -2018,7 +2171,7 @@ iwm_post_alive(struct iwm_softc *sc) iwm_nic_unlock(sc); /* Enable L1-Active */ - if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) + if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000) iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG, IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS); @@ -2832,9 +2985,7 @@ iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw, data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE; data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE; - data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS); - - if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) { + if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) { uint16_t lar_offset = data->nvm_version < 0xE39 ? IWM_NVM_LAR_OFFSET_8000_OLD : IWM_NVM_LAR_OFFSET_8000; @@ -2842,7 +2993,10 @@ iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw, lar_config = le16_to_cpup(regulatory + lar_offset); data->lar_enabled = !!(lar_config & IWM_NVM_LAR_ENABLED_8000); - } + data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS_8000); + } else + data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS); + /* The byte order is little endian 16 bit, meaning 214365 */ if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) { @@ -2902,7 +3056,7 @@ iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections) n_regulatory = sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].length; } - } else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) { + } else if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) { /* SW and REGULATORY sections are mandatory */ if (!sections[IWM_NVM_SECTION_TYPE_SW].data || !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) { @@ -3223,7 +3377,7 @@ iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type) sc->sc_uc.uc_intr = 0; - if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) + if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) err = iwm_load_firmware_8000(sc, ucode_type); else err = iwm_load_firmware_7000(sc, ucode_type); @@ -3491,9 +3645,19 @@ iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx) bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD); /* Update RX descriptor. */ - ring->desc[idx] = htole32(data->map->dm_segs[0].ds_addr >> 8); - bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, - idx * sizeof(uint32_t), sizeof(uint32_t), BUS_DMASYNC_PREWRITE); + if (sc->sc_mqrx_supported) { + ((uint64_t *)ring->desc)[idx] = + htole64(data->map->dm_segs[0].ds_addr); + bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map, + idx * sizeof(uint64_t), sizeof(uint64_t), + BUS_DMASYNC_PREWRITE); + } else { + ((uint32_t *)ring->desc)[idx] = + htole32(data->map->dm_segs[0].ds_addr >> 8); + bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map, + idx * sizeof(uint32_t), sizeof(uint32_t), + BUS_DMASYNC_PREWRITE); + } return 0; } @@ -3525,6 +3689,19 @@ iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info) return max_energy; } +int +iwm_rxmq_get_signal_strength(struct iwm_softc *sc, + struct iwm_rx_mpdu_desc *desc) +{ + int energy_a, energy_b; + + energy_a = desc->v1.energy_a; + energy_b = desc->v1.energy_b; + energy_a = energy_a ? -energy_a : -256; + energy_b = energy_b ? -energy_b : -256; + return MAX(energy_a, energy_b); +} + void iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt, struct iwm_rx_data *data) @@ -3694,6 +3871,152 @@ iwm_rx_rx_mpdu(struct iwm_softc *sc, struct iwm_rx_packet *pkt, } void +iwm_rx_mpdu_mq(struct iwm_softc *sc, struct iwm_rx_packet *pkt, + struct iwm_rx_data *data, struct mbuf_list *ml) +{ + struct ieee80211com *ic = &sc->sc_ic; + struct ieee80211_frame *wh; + struct ieee80211_node *ni; + struct ieee80211_rxinfo rxi; + struct ieee80211_channel *bss_chan; + struct mbuf *m; + struct iwm_rx_mpdu_desc *desc; + uint32_t len, hdrlen, rate_n_flags; + int rssi; + uint8_t chanidx; + uint16_t phy_info; + uint8_t saved_bssid[IEEE80211_ADDR_LEN] = { 0 }; + + bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE, + BUS_DMASYNC_POSTREAD); + + desc = (struct iwm_rx_mpdu_desc *)pkt->data; + + if (!(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_CRC_OK)) || + !(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_OVERRUN_OK))) + return; /* drop */ + + wh = (struct ieee80211_frame *)(pkt->data + sizeof(*desc)); + len = le16toh(desc->mpdu_len); + if (len < IEEE80211_MIN_LEN) { + ic->ic_stats.is_rx_tooshort++; + IC2IFP(ic)->if_ierrors++; + return; + } + if (len > IWM_RBUF_SIZE - sizeof(*desc)) { + IC2IFP(ic)->if_ierrors++; + return; + } + + m = data->m; + if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) + return; + m->m_data = pkt->data + sizeof(*desc); + m->m_pkthdr.len = m->m_len = len; + + /* Account for padding following the frame header. */ + if (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_PAD) { + int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; + if (type == IEEE80211_FC0_TYPE_CTL) { + switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) { + case IEEE80211_FC0_SUBTYPE_CTS: + hdrlen = sizeof(struct ieee80211_frame_cts); + break; + case IEEE80211_FC0_SUBTYPE_ACK: + hdrlen = sizeof(struct ieee80211_frame_ack); + break; + default: + hdrlen = sizeof(struct ieee80211_frame_min); + break; + } + } else + hdrlen = ieee80211_get_hdrlen(wh); + memmove(m->m_data + 2, m->m_data, hdrlen); + m->m_data = m->m_data + 2; + wh = mtod(m, struct ieee80211_frame *); + } + + phy_info = le16toh(desc->phy_info); + rate_n_flags = le32toh(desc->v1.rate_n_flags); + + rssi = iwm_rxmq_get_signal_strength(sc, desc); + rssi -= sc->sc_noise; + rssi *= 2; /* rssi is in 1/2db units */ + + ni = ieee80211_find_rxnode(ic, wh); + if (ni == ic->ic_bss) { + /* + * We may switch ic_bss's channel during scans. + * Record the current channel so we can restore it later. + */ + bss_chan = ni->ni_chan; + IEEE80211_ADDR_COPY(&saved_bssid, ni->ni_macaddr); + } + chanidx = desc->v1.channel; + ni->ni_chan = &ic->ic_channels[chanidx]; + + memset(&rxi, 0, sizeof(rxi)); + rxi.rxi_rssi = rssi; + rxi.rxi_tstamp = le64toh(desc->v1.tsf_on_air_rise); + +#if NBPFILTER > 0 + if (sc->sc_drvbpf != NULL) { + struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap; + uint16_t chan_flags; + + tap->wr_flags = 0; + if (phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE) + tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; + tap->wr_chan_freq = + htole16(ic->ic_channels[chanidx].ic_freq); + chan_flags = ic->ic_channels[chanidx].ic_flags; + if (ic->ic_curmode != IEEE80211_MODE_11N) + chan_flags &= ~IEEE80211_CHAN_HT; + tap->wr_chan_flags = htole16(chan_flags); + tap->wr_dbm_antsignal = (int8_t)rssi; + tap->wr_dbm_antnoise = (int8_t)sc->sc_noise; + tap->wr_tsft = desc->v1.gp2_on_air_rise; + if (rate_n_flags & IWM_RATE_HT_MCS_RATE_CODE_MSK) { + uint8_t mcs = (rate_n_flags & + (IWM_RATE_HT_MCS_RATE_CODE_MSK | + IWM_RATE_HT_MCS_NSS_MSK)); + tap->wr_rate = (0x80 | mcs); + } else { + switch ((rate_n_flags & IWM_RATE_LEGACY_RATE_MSK)) { + /* CCK rates. */ + case 10: tap->wr_rate = 2; break; + case 20: tap->wr_rate = 4; break; + case 55: tap->wr_rate = 11; break; + case 110: tap->wr_rate = 22; break; + /* OFDM rates. */ + case 0xd: tap->wr_rate = 12; break; + case 0xf: tap->wr_rate = 18; break; + case 0x5: tap->wr_rate = 24; break; + case 0x7: tap->wr_rate = 36; break; + case 0x9: tap->wr_rate = 48; break; + case 0xb: tap->wr_rate = 72; break; + case 0x1: tap->wr_rate = 96; break; + case 0x3: tap->wr_rate = 108; break; + /* Unknown rate: should not happen. */ + default: tap->wr_rate = 0; + } + } + + bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len, + m, BPF_DIRECTION_IN); + } +#endif + ieee80211_inputm(IC2IFP(ic), m, ni, &rxi, ml); + /* + * ieee80211_inputm() might have changed our BSS. + * Restore ic_bss's channel if we are still in the same BSS. + */ + if (ni == ic->ic_bss && IEEE80211_ADDR_EQ(saved_bssid, ni->ni_macaddr)) + ni->ni_chan = bss_chan; + ieee80211_release_node(ic, ni); +} + +void iwm_enable_ht_cck_fallback(struct iwm_softc *sc, struct iwm_node *in) { struct ieee80211com *ic = &sc->sc_ic; @@ -4506,20 +4829,20 @@ iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac) desc->num_tbs = 2 + data->map->dm_nsegs; desc->tbs[0].lo = htole32(data->cmd_paddr); - desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) | - (TB0_SIZE << 4); + desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) | + (TB0_SIZE << 4)); desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE); - desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) | + desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) | ((sizeof(struct iwm_cmd_header) + sizeof(*tx) - + hdrlen + pad - TB0_SIZE) << 4); + + hdrlen + pad - TB0_SIZE) << 4)); /* Other DMA segments are for data payload. */ seg = data->map->dm_segs; for (i = 0; i < data->map->dm_nsegs; i++, seg++) { desc->tbs[i+2].lo = htole32(seg->ds_addr); desc->tbs[i+2].hi_n_len = \ - htole16(iwm_get_dma_hi_addr(seg->ds_addr)) - | ((seg->ds_len) << 4); + htole16(iwm_get_dma_hi_addr(seg->ds_addr) + | ((seg->ds_len) << 4)); } bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize, @@ -7541,19 +7864,29 @@ do { \ _ptr_ = (void *)((_pkt_)+1); \ } while (/*CONSTCOND*/0) -#define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT); +#define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % count); void iwm_notif_intr(struct iwm_softc *sc) { struct mbuf_list ml = MBUF_LIST_INITIALIZER(); + uint32_t wreg; uint16_t hw; + int count; bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map, 0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD); + if (sc->sc_mqrx_supported) { + count = IWM_RX_MQ_RING_COUNT; + wreg = IWM_RFH_Q0_FRBDCB_WIDX_TRG; + } else { + count = IWM_RX_RING_COUNT; + wreg = IWM_FH_RSCSR_CHNL0_WPTR; + } + hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff; - hw &= (IWM_RX_RING_COUNT - 1); + hw &= (count - 1); while (sc->rxq.cur != hw) { struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur]; struct iwm_rx_packet *pkt; @@ -7584,7 +7917,10 @@ iwm_notif_intr(struct iwm_softc *sc) break; case IWM_REPLY_RX_MPDU_CMD: - iwm_rx_rx_mpdu(sc, pkt, data, &ml); + if (sc->sc_mqrx_supported) + iwm_rx_mpdu_mq(sc, pkt, data, &ml); + else + iwm_rx_rx_mpdu(sc, pkt, data, &ml); break; case IWM_TX_CMD: @@ -7845,8 +8181,8 @@ iwm_notif_intr(struct iwm_softc *sc) * Tell the firmware what we have processed. * Seems like the hardware gets upset unless we align the write by 8?? */ - hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1; - IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7); + hw = (hw == 0) ? count - 1 : hw - 1; + IWM_WRITE(sc, wreg, hw & ~7); } int @@ -8002,6 +8338,9 @@ static const struct pci_matchid iwm_devices[] = { { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8260_1 }, { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8260_2 }, { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8265_1 }, + { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9260_1 }, + { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9560_1 }, + { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9560_2 }, }; int @@ -8211,6 +8550,23 @@ iwm_attach(struct device *parent, struct device *self, void *aux) sc->sc_nvm_max_section_size = 32768; sc->nvm_type = IWM_NVM_EXT; break; + case PCI_PRODUCT_INTEL_WL_9260_1: + sc->sc_fwname = "iwm-9260-34"; + sc->host_interrupt_operation_mode = 0; + sc->sc_device_family = IWM_DEVICE_FAMILY_9000; + sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000; + sc->sc_nvm_max_section_size = 32768; + sc->sc_mqrx_supported = 1; + break; + case PCI_PRODUCT_INTEL_WL_9560_1: + sc->sc_fwname = "iwm-9000-34"; + sc->host_interrupt_operation_mode = 0; + sc->sc_device_family = IWM_DEVICE_FAMILY_9000; + sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000; + sc->sc_nvm_max_section_size = 32768; + sc->sc_mqrx_supported = 1; + sc->sc_integrated = 1; + break; default: printf("%s: unknown adapter type\n", DEVNAME(sc)); return; @@ -8222,7 +8578,7 @@ iwm_attach(struct device *parent, struct device *self, void *aux) * "dash" value). To keep hw_rev backwards compatible - we'll store it * in the old format. */ - if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) { + if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) { uint32_t hw_step; sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) | diff --git a/sys/dev/pci/if_iwmreg.h b/sys/dev/pci/if_iwmreg.h index 5089c36405b..ef1a8b9f380 100644 --- a/sys/dev/pci/if_iwmreg.h +++ b/sys/dev/pci/if_iwmreg.h @@ -1,4 +1,4 @@ -/* $OpenBSD: if_iwmreg.h,v 1.43 2019/11/12 07:24:22 stsp Exp $ */ +/* $OpenBSD: if_iwmreg.h,v 1.44 2019/11/18 18:53:11 stsp Exp $ */ /****************************************************************************** * @@ -183,7 +183,7 @@ /* interrupt flags in INTA, set by uCode or hardware (e.g. dma), * acknowledged (reset) by host writing "1" to flagged bits. */ -#define IWM_CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */ +#define IWM_CSR_INT_BIT_FH_RX (1U << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */ #define IWM_CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */ #define IWM_CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */ #define IWM_CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */ @@ -206,7 +206,7 @@ IWM_CSR_INT_BIT_RX_PERIODIC) /* interrupt flags in FH (flow handler) (PCI busmaster DMA) */ -#define IWM_CSR_FH_INT_BIT_ERR (1 << 31) /* Error */ +#define IWM_CSR_FH_INT_BIT_ERR (1U << 31) /* Error */ #define IWM_CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */ #define IWM_CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */ #define IWM_CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */ @@ -279,7 +279,7 @@ #define IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001) #define IWM_CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000) -#define IWM_CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000) +#define IWM_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN (0x04000000) #define IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000) @@ -397,7 +397,7 @@ #define IWM_CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000) /* DRAM INT TABLE */ -#define IWM_CSR_DRAM_INT_TBL_ENABLE (1 << 31) +#define IWM_CSR_DRAM_INT_TBL_ENABLE (1U << 31) #define IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER (1 << 28) #define IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27) @@ -417,6 +417,111 @@ #define IWM_FH_MEM_TB_MAX_LENGTH 0x20000 +/* 9000 rx series registers */ + +#define IWM_RFH_Q0_FRBDCB_BA_LSB 0xA08000 /* 64 bit address */ +#define IWM_RFH_Q_FRBDCB_BA_LSB(q) (IWM_RFH_Q0_FRBDCB_BA_LSB + (q) * 8) +/* Write index table */ +#define IWM_RFH_Q0_FRBDCB_WIDX 0xA08080 +#define IWM_RFH_Q_FRBDCB_WIDX(q) (IWM_RFH_Q0_FRBDCB_WIDX + (q) * 4) +/* Write index table - shadow registers */ +#define IWM_RFH_Q0_FRBDCB_WIDX_TRG 0x1C80 +#define IWM_RFH_Q_FRBDCB_WIDX_TRG(q) (IWM_RFH_Q0_FRBDCB_WIDX_TRG + (q) * 4) +/* Read index table */ +#define IWM_RFH_Q0_FRBDCB_RIDX 0xA080C0 +#define IWM_RFH_Q_FRBDCB_RIDX(q) (IWM_RFH_Q0_FRBDCB_RIDX + (q) * 4) +/* Used list table */ +#define IWM_RFH_Q0_URBDCB_BA_LSB 0xA08100 /* 64 bit address */ +#define IWM_RFH_Q_URBDCB_BA_LSB(q) (IWM_RFH_Q0_URBDCB_BA_LSB + (q) * 8) +/* Write index table */ +#define IWM_RFH_Q0_URBDCB_WIDX 0xA08180 +#define IWM_RFH_Q_URBDCB_WIDX(q) (IWM_RFH_Q0_URBDCB_WIDX + (q) * 4) +#define IWM_RFH_Q0_URBDCB_VAID 0xA081C0 +#define IWM_RFH_Q_URBDCB_VAID(q) (IWM_RFH_Q0_URBDCB_VAID + (q) * 4) +/* stts */ +#define IWM_RFH_Q0_URBD_STTS_WPTR_LSB 0xA08200 /*64 bits address */ +#define IWM_RFH_Q_URBD_STTS_WPTR_LSB(q) (IWM_RFH_Q0_URBD_STTS_WPTR_LSB + (q) * 8) + +#define IWM_RFH_Q0_ORB_WPTR_LSB 0xA08280 +#define IWM_RFH_Q_ORB_WPTR_LSB(q) (IWM_RFH_Q0_ORB_WPTR_LSB + (q) * 8) +#define IWM_RFH_RBDBUF_RBD0_LSB 0xA08300 +#define IWM_RFH_RBDBUF_RBD_LSB(q) (IWM_RFH_RBDBUF_RBD0_LSB + (q) * 8) + +/** + * RFH Status Register + * + * Bit fields: + * + * Bit 29: RBD_FETCH_IDLE + * This status flag is set by the RFH when there is no active RBD fetch from + * DRAM. + * Once the RFH RBD controller starts fetching (or when there is a pending + * RBD read response from DRAM), this flag is immediately turned off. + * + * Bit 30: SRAM_DMA_IDLE + * This status flag is set by the RFH when there is no active transaction from + * SRAM to DRAM. + * Once the SRAM to DRAM DMA is active, this flag is immediately turned off. + * + * Bit 31: RXF_DMA_IDLE + * This status flag is set by the RFH when there is no active transaction from + * RXF to DRAM. + * Once the RXF-to-DRAM DMA is active, this flag is immediately turned off. + */ +#define IWM_RFH_GEN_STATUS 0xA09808 +#define IWM_RFH_GEN_STATUS_GEN3 0xA07824 +#define IWM_RBD_FETCH_IDLE (1 << 29) +#define IWM_SRAM_DMA_IDLE (1 << 30) +#define IWM_RXF_DMA_IDLE (1U << 31) + +/* DMA configuration */ +#define IWM_RFH_RXF_DMA_CFG 0xA09820 +#define IWM_RFH_RXF_DMA_CFG_GEN3 0xA07880 +/* RB size */ +#define IWM_RFH_RXF_DMA_RB_SIZE_MASK (0x000F0000) /* bits 16-19 */ +#define IWM_RFH_RXF_DMA_RB_SIZE_POS 16 +#define IWM_RFH_RXF_DMA_RB_SIZE_1K (0x1 << IWM_RFH_RXF_DMA_RB_SIZE_POS) +#define IWM_RFH_RXF_DMA_RB_SIZE_2K (0x2 << IWM_RFH_RXF_DMA_RB_SIZE_POS) +#define IWM_RFH_RXF_DMA_RB_SIZE_4K (0x4 << IWM_RFH_RXF_DMA_RB_SIZE_POS) +#define IWM_RFH_RXF_DMA_RB_SIZE_8K (0x8 << IWM_RFH_RXF_DMA_RB_SIZE_POS) +#define IWM_RFH_RXF_DMA_RB_SIZE_12K (0x9 << IWM_RFH_RXF_DMA_RB_SIZE_POS) +#define IWM_RFH_RXF_DMA_RB_SIZE_16K (0xA << IWM_RFH_RXF_DMA_RB_SIZE_POS) +#define IWM_RFH_RXF_DMA_RB_SIZE_20K (0xB << IWM_RFH_RXF_DMA_RB_SIZE_POS) +#define IWM_RFH_RXF_DMA_RB_SIZE_24K (0xC << IWM_RFH_RXF_DMA_RB_SIZE_POS) +#define IWM_RFH_RXF_DMA_RB_SIZE_28K (0xD << IWM_RFH_RXF_DMA_RB_SIZE_POS) +#define IWM_RFH_RXF_DMA_RB_SIZE_32K (0xE << IWM_RFH_RXF_DMA_RB_SIZE_POS) +/* RB Circular Buffer size:defines the table sizes in RBD units */ +#define IWM_RFH_RXF_DMA_RBDCB_SIZE_MASK (0x00F00000) /* bits 20-23 */ +#define IWM_RFH_RXF_DMA_RBDCB_SIZE_POS 20 +#define IWM_RFH_RXF_DMA_RBDCB_SIZE_8 (0x3 << IWM_RFH_RXF_DMA_RBDCB_SIZE_POS) +#define IWM_RFH_RXF_DMA_RBDCB_SIZE_16 (0x4 << IWM_RFH_RXF_DMA_RBDCB_SIZE_POS) +#define IWM_RFH_RXF_DMA_RBDCB_SIZE_32 (0x5 << IWM_RFH_RXF_DMA_RBDCB_SIZE_POS) +#define IWM_RFH_RXF_DMA_RBDCB_SIZE_64 (0x7 << IWM_RFH_RXF_DMA_RBDCB_SIZE_POS) +#define IWM_RFH_RXF_DMA_RBDCB_SIZE_128 (0x7 << IWM_RFH_RXF_DMA_RBDCB_SIZE_POS) +#define IWM_RFH_RXF_DMA_RBDCB_SIZE_256 (0x8 << IWM_RFH_RXF_DMA_RBDCB_SIZE_POS) +#define IWM_RFH_RXF_DMA_RBDCB_SIZE_512 (0x9 << IWM_RFH_RXF_DMA_RBDCB_SIZE_POS) +#define IWM_RFH_RXF_DMA_RBDCB_SIZE_1024 (0xA << IWM_RFH_RXF_DMA_RBDCB_SIZE_POS) +#define IWM_RFH_RXF_DMA_RBDCB_SIZE_2048 (0xB << IWM_RFH_RXF_DMA_RBDCB_SIZE_POS) +#define IWM_RFH_RXF_DMA_MIN_RB_SIZE_MASK (0x03000000) /* bit 24-25 */ +#define IWM_RFH_RXF_DMA_MIN_RB_SIZE_POS 24 +#define IWM_RFH_RXF_DMA_MIN_RB_4_8 (3 << IWM_RFH_RXF_DMA_MIN_RB_SIZE_POS) +#define IWM_RFH_RXF_DMA_DROP_TOO_LARGE_MASK (0x04000000) /* bit 26 */ +#define IWM_RFH_RXF_DMA_SINGLE_FRAME_MASK (0x20000000) /* bit 29 */ +#define IWM_RFH_DMA_EN_MASK (0xC0000000) /* bits 30-31*/ +#define IWM_RFH_DMA_EN_ENABLE_VAL (1U << 31) + +#define IWM_RFH_RXF_RXQ_ACTIVE 0xA0980C + +#define IWM_RFH_GEN_CFG 0xA09800 +#define IWM_RFH_GEN_CFG_SERVICE_DMA_SNOOP (1 << 0) +#define IWM_RFH_GEN_CFG_RFH_DMA_SNOOP (1 << 1) +#define IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_128 0x00000010 +#define IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_64 0x00000000 +/* the driver assumes everywhere that the default RXQ is 0 */ +#define IWM_RFH_GEN_CFG_DEFAULT_RXQ_NUM 0xF00 + +/* end of 9000 rx series registers */ + #define IWM_LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR 0x1e78 #define IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR 0x1e7c @@ -502,6 +607,8 @@ #define IWM_AUX_MISC_MASTER1_SMPHR_STATUS 0xa20800 #define IWM_RSA_ENABLE 0xa24b08 #define IWM_PREG_AUX_BUS_WPROT_0 0xa04cc0 +#define IWM_PREG_PRPH_WPROT_9000 0xa04ce0 +#define IWM_PREG_PRPH_WPROT_22000 0xa04d00 #define IWM_SB_CFG_OVERRIDE_ADDR 0xa26c78 #define IWM_SB_CFG_OVERRIDE_ENABLE 0x8000 #define IWM_SB_CFG_BASE_OVERRIDE 0xa20000 @@ -509,6 +616,14 @@ #define IWM_SB_CPU_1_STATUS 0xa01e30 #define IWM_SB_CPU_2_STATUS 0Xa01e34 +#define IWM_UREG_CHICK 0xa05c00 +#define IWM_UREG_CHICK_MSI_ENABLE (1 << 24) +#define IWM_UREG_CHICK_MSIX_ENABLE (1 << 25) + +#define IWM_HPM_DEBUG 0xa03440 +#define IWM_HPM_PERSISTENCE_BIT (1 << 12) +#define IWM_PREG_WFPM_ACCESS (1 << 12) + /* Used to enable DBGM */ #define IWM_HBUS_TARG_TEST_REG (IWM_HBUS_BASE+0x05c) @@ -534,7 +649,7 @@ #define IWM_HOST_INT_TIMEOUT_MAX (0xFF) #define IWM_HOST_INT_TIMEOUT_DEF (0x40) #define IWM_HOST_INT_TIMEOUT_MIN (0x0) -#define IWM_HOST_INT_OPER_MODE (1 << 31) +#define IWM_HOST_INT_OPER_MODE (1U << 31) /***************************************************************************** * 7000/3000 series SHR DTS addresses * @@ -598,7 +713,7 @@ #define IWM_UCODE_TLV_FLAGS_P2P_PS_UAPSD (1 << 26) #define IWM_UCODE_TLV_FLAGS_BCAST_FILTERING (1 << 29) #define IWM_UCODE_TLV_FLAGS_GO_UAPSD (1 << 30) -#define IWM_UCODE_TLV_FLAGS_LTE_COEX (1 << 31) +#define IWM_UCODE_TLV_FLAGS_LTE_COEX (1U << 31) #define IWM_UCODE_TLV_FLAG_BITS \ "\020\1PAN\2NEWSCAN\3MFP\4P2P\5DW_BC_TABLE\6NEWBT_COEX\7PM_CMD\10SHORT_BL\11RX_ENERGY\12TIME_EVENT_V2\13D3_6_IPV6\14BF_UPDATED\15NO_BASIC_SSID\17D3_CONTINUITY\20NEW_NSOFFL_S\21NEW_NSOFFL_L\22SCHED_SCAN\24STA_KEY_CMD\25DEVICE_PS_CMD\26P2P_PS\27P2P_PS_DCM\30P2P_PS_SCM\31UAPSD_SUPPORT\32EBS\33P2P_PS_UAPSD\36BCAST_FILTERING\37GO_UAPSD\40LTE_COEX" @@ -2943,6 +3058,63 @@ struct iwm_rx_mpdu_res_start { #define IWM_RX_MPDU_RES_STATUS_FILTERING_MSK (0xc00000) #define IWM_RX_MPDU_RES_STATUS2_FILTERING_MSK (0xc0000000) +#define IWM_RX_MPDU_MFLG1_ADDRTYPE_MASK 0x03 +#define IWM_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK 0xf0 +#define IWM_RX_MPDU_MFLG1_MIC_CRC_LEN_SHIFT 3 + +#define IWM_RX_MPDU_MFLG2_HDR_LEN_MASK 0x1f +#define IWM_RX_MPDU_MFLG2_PAD 0x20 +#define IWM_RX_MPDU_MFLG2_AMSDU 0x40 + +#define IWM_RX_MPDU_PHY_AMPDU (1 << 5) +#define IWM_RX_MPDU_PHY_AMPDU_TOGGLE (1 << 6) +#define IWM_RX_MPDU_PHY_SHORT_PREAMBLE (1 << 7) +#define IWM_RX_MPDU_PHY_NCCK_ADDTL_NTFY (1 << 7) +#define IWM_RX_MPDU_PHY_TSF_OVERLOAD (1 << 8) + +struct iwm_rx_mpdu_desc_v1 { + union { + uint32_t rss_hash; + uint32_t phy_data2; + }; + union { + uint32_t filter_match; + uint32_t phy_data3; + }; + uint32_t rate_n_flags; + uint8_t energy_a; + uint8_t energy_b; + uint8_t channel; + uint8_t mac_context; + uint32_t gp2_on_air_rise; + union { + uint64_t tsf_on_air_rise; + struct { + uint32_t phy_data0; + uint32_t phy_data1; + }; + }; +} __packed; + +struct iwm_rx_mpdu_desc { + uint16_t mpdu_len; + uint8_t mac_flags1; + uint8_t mac_flags2; + uint8_t amsdu_info; + uint16_t phy_info; + uint8_t mac_phy_idx; + uint16_t raw_csum; + union { + uint16_t l3l4_flags; + uint16_t phy_data4; + }; + uint16_t status; + uint8_t hash_filter; + uint8_t sta_id_flags; + uint32_t reorder_data; + struct iwm_rx_mpdu_desc_v1 v1; +} __packed; + /** * struct iwm_radio_version_notif - information on the radio version * ( IWM_RADIO_VERSION_NOTIFICATION = 0x68 ) @@ -4335,7 +4507,7 @@ struct iwm_lq_cmd { #define IWM_TX_CMD_FLG_FW_DROP (1 << 26) #define IWM_TX_CMD_FLG_EXEC_PAPD (1 << 27) #define IWM_TX_CMD_FLG_PAPD_TYPE (1 << 28) -#define IWM_TX_CMD_FLG_HCCA_CHUNK (1 << 31) +#define IWM_TX_CMD_FLG_HCCA_CHUNK (1U << 31) /* IWM_TX_FLAGS_BITS_API_S_VER_1 */ /* diff --git a/sys/dev/pci/if_iwmvar.h b/sys/dev/pci/if_iwmvar.h index da169e90e7e..ed8a83570dc 100644 --- a/sys/dev/pci/if_iwmvar.h +++ b/sys/dev/pci/if_iwmvar.h @@ -1,4 +1,4 @@ -/* $OpenBSD: if_iwmvar.h,v 1.46 2019/11/12 07:24:22 stsp Exp $ */ +/* $OpenBSD: if_iwmvar.h,v 1.47 2019/11/18 18:53:11 stsp Exp $ */ /* * Copyright (c) 2014 genua mbh <info@genua.de> @@ -272,8 +272,8 @@ struct iwm_tx_ring { int cur; }; +#define IWM_RX_MQ_RING_COUNT 512 #define IWM_RX_RING_COUNT 256 -#define IWM_RBUF_COUNT (IWM_RX_RING_COUNT + 32) /* Linux driver optionally uses 8k buffer */ #define IWM_RBUF_SIZE 4096 @@ -283,12 +283,13 @@ struct iwm_rx_data { }; struct iwm_rx_ring { - struct iwm_dma_info desc_dma; + struct iwm_dma_info free_desc_dma; struct iwm_dma_info stat_dma; + struct iwm_dma_info used_desc_dma; struct iwm_dma_info buf_dma; - uint32_t *desc; + void *desc; struct iwm_rb_status *stat; - struct iwm_rx_data data[IWM_RX_RING_COUNT]; + struct iwm_rx_data data[IWM_RX_MQ_RING_COUNT]; int cur; }; @@ -419,6 +420,7 @@ struct iwm_softc { int sc_device_family; #define IWM_DEVICE_FAMILY_7000 1 #define IWM_DEVICE_FAMILY_8000 2 +#define IWM_DEVICE_FAMILY_9000 3 struct iwm_dma_info kw_dma; struct iwm_dma_info fw_dma; @@ -500,6 +502,9 @@ struct iwm_softc { int sc_ltr_enabled; enum iwm_nvm_type nvm_type; + int sc_mqrx_supported; + int sc_integrated; + /* * Paging parameters - All of the parameters should be set by the * opmode when paging is enabled |