summaryrefslogtreecommitdiff
path: root/sys/dev/pci/if_iwx.c
diff options
context:
space:
mode:
authorStefan Sperling <stsp@cvs.openbsd.org>2021-09-23 15:34:01 +0000
committerStefan Sperling <stsp@cvs.openbsd.org>2021-09-23 15:34:01 +0000
commite74b44688059860631e956b5ec93344ba2f5cfa8 (patch)
treeabdfaeb05e959071315e6f07627a400ce1a17c74 /sys/dev/pci/if_iwx.c
parent548833c81b3955f9954ddba7074e871565ed2245 (diff)
Add support for Tx aggregation to the iwx(4) driver.
Throughput goes up to 100 Mbit/s under ideal conditions. This is mostly working and stable, however rare occasional hangs may occur where the device stops giving us interrupts for reasons which are not yet understood. In such cases ifconfig down/up will recover the interface. Regardless, this code seems to be stable enough for remaining issues to be resolved in-tree. My working assumption is that the remaining issues were always present but only trigger under increased load when firmware is driven with Tx agg enabled. Feedback is welcome, especially if stability issues turn out to be worse than expected! As usual, please enable "ifconfig iwx0 debug" when reporting issues. Tests on ax200/ax201: myself, dv, hrvoje, Stefan Hagen, kevlo, Eric Auge, mlarkin, jmc, Mark Patruck Of which only me and jmc have reported seeing a small amount of hangs during continuous usage over about 2 weeks.
Diffstat (limited to 'sys/dev/pci/if_iwx.c')
-rw-r--r--sys/dev/pci/if_iwx.c703
1 files changed, 547 insertions, 156 deletions
diff --git a/sys/dev/pci/if_iwx.c b/sys/dev/pci/if_iwx.c
index 5b58d964d12..12b741ac9f4 100644
--- a/sys/dev/pci/if_iwx.c
+++ b/sys/dev/pci/if_iwx.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: if_iwx.c,v 1.109 2021/09/11 17:28:44 stsp Exp $ */
+/* $OpenBSD: if_iwx.c,v 1.110 2021/09/23 15:34:00 stsp Exp $ */
/*
* Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
@@ -318,18 +318,16 @@ int iwx_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
uint8_t);
void iwx_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
uint8_t);
+int iwx_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
+ uint8_t);
void iwx_rx_ba_session_expired(void *);
void iwx_rx_bar_frame_release(struct iwx_softc *, struct iwx_rx_packet *,
struct iwx_rx_data *, struct mbuf_list *);
void iwx_reorder_timer_expired(void *);
void iwx_sta_rx_agg(struct iwx_softc *, struct ieee80211_node *, uint8_t,
uint16_t, uint16_t, int, int);
-#ifdef notyet
-int iwx_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
- uint8_t);
-void iwx_ampdu_tx_stop(struct ieee80211com *, struct ieee80211_node *,
+void iwx_sta_tx_agg_start(struct iwx_softc *, struct ieee80211_node *,
uint8_t);
-#endif
void iwx_ba_task(void *);
int iwx_set_mac_addr_from_csr(struct iwx_softc *, struct iwx_nvm_data *);
@@ -355,10 +353,12 @@ int iwx_ccmp_decap(struct iwx_softc *, struct mbuf *,
struct ieee80211_node *, struct ieee80211_rxinfo *);
void iwx_rx_frame(struct iwx_softc *, struct mbuf *, int, uint32_t, int, int,
uint32_t, struct ieee80211_rxinfo *, struct mbuf_list *);
-void iwx_rx_tx_cmd_single(struct iwx_softc *, struct iwx_rx_packet *,
- struct iwx_node *);
+void iwx_clear_tx_desc(struct iwx_softc *, struct iwx_tx_ring *, int);
+void iwx_txd_done(struct iwx_softc *, struct iwx_tx_data *);
+void iwx_txq_advance(struct iwx_softc *, struct iwx_tx_ring *, int);
void iwx_rx_tx_cmd(struct iwx_softc *, struct iwx_rx_packet *,
struct iwx_rx_data *);
+void iwx_clear_oactive(struct iwx_softc *, struct iwx_tx_ring *);
void iwx_rx_bmiss(struct iwx_softc *, struct iwx_rx_packet *,
struct iwx_rx_data *);
int iwx_binding_cmd(struct iwx_softc *, struct iwx_node *, uint32_t);
@@ -382,8 +382,11 @@ void iwx_cmd_done(struct iwx_softc *, int, int, int);
const struct iwx_rate *iwx_tx_fill_cmd(struct iwx_softc *, struct iwx_node *,
struct ieee80211_frame *, struct iwx_tx_cmd_gen2 *);
void iwx_tx_update_byte_tbl(struct iwx_tx_ring *, int, uint16_t, uint16_t);
-int iwx_tx(struct iwx_softc *, struct mbuf *, struct ieee80211_node *, int);
-int iwx_flush_tx_path(struct iwx_softc *);
+int iwx_tx(struct iwx_softc *, struct mbuf *, struct ieee80211_node *);
+int iwx_flush_sta_tids(struct iwx_softc *, int, uint16_t);
+int iwx_wait_tx_queues_empty(struct iwx_softc *);
+int iwx_drain_sta(struct iwx_softc *sc, struct iwx_node *, int);
+int iwx_flush_sta(struct iwx_softc *, struct iwx_node *);
int iwx_beacon_filter_send_cmd(struct iwx_softc *,
struct iwx_beacon_filter_cmd *);
int iwx_update_beacon_abort(struct iwx_softc *, struct iwx_node *, int);
@@ -396,6 +399,7 @@ int iwx_disable_beacon_filter(struct iwx_softc *);
int iwx_add_sta_cmd(struct iwx_softc *, struct iwx_node *, int);
int iwx_add_aux_sta(struct iwx_softc *);
int iwx_rm_sta_cmd(struct iwx_softc *, struct iwx_node *);
+int iwx_rm_sta(struct iwx_softc *, struct iwx_node *);
int iwx_fill_probe_req(struct iwx_softc *, struct iwx_scan_probe_req *);
int iwx_config_umac_scan_reduced(struct iwx_softc *);
int iwx_config_umac_scan(struct iwx_softc *);
@@ -425,6 +429,7 @@ int iwx_scan(struct iwx_softc *);
int iwx_bgscan(struct ieee80211com *);
int iwx_umac_scan_abort(struct iwx_softc *);
int iwx_scan_abort(struct iwx_softc *);
+int iwx_enable_mgmt_queue(struct iwx_softc *);
int iwx_rs_rval2idx(uint8_t);
uint16_t iwx_rs_ht_rates(struct iwx_softc *, struct ieee80211_node *, int);
int iwx_rs_init(struct iwx_softc *, struct iwx_node *);
@@ -1776,20 +1781,20 @@ iwx_alloc_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring, int qid)
ring->desc = ring->desc_dma.vaddr;
/*
- * There is no need to allocate DMA buffers for unused rings.
- * The hardware supports up to 31 Tx rings which is more
+ * The hardware supports up to 512 Tx rings which is more
* than we currently need.
*
- * In DQA mode we use 1 command queue + 4 DQA mgmt/data queues.
- * The command is queue 0 (sc->txq[0]), and 4 mgmt/data frame queues
- * are sc->tqx[ac + IWX_DQA_AUX_QUEUE + 1], i.e. sc->txq[2:5],
- * in order to provide one queue per EDCA category.
+ * In DQA mode we use 1 command queue + 1 default queue for
+ * managment, control, and non-QoS data frames.
+ * The command is queue sc->txq[0], our default queue is sc->txq[1].
*
- * Tx aggregation will require additional queues (one queue per TID
- * for which aggregation is enabled) but we do not implement this yet.
+ * Tx aggregation requires additional queues, one queue per TID for
+ * which aggregation is enabled. We map TID 0-7 to sc->txq[2:9].
+ * Firmware may assign its own internal IDs for these queues
+ * depending on which TID gets aggregation enabled first.
+ * The driver maintains a table mapping driver-side queue IDs
+ * to firmware-side queue IDs.
*/
- if (qid > IWX_DQA_MIN_MGMT_QUEUE)
- return 0;
err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->bc_tbl,
sizeof(struct iwx_agn_scd_bc_tbl), 0);
@@ -1863,9 +1868,17 @@ iwx_reset_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
sc->qfullmsk &= ~(1 << ring->qid);
+ sc->qenablemsk &= ~(1 << ring->qid);
+ for (i = 0; i < nitems(sc->aggqid); i++) {
+ if (sc->aggqid[i] == ring->qid) {
+ sc->aggqid[i] = 0;
+ break;
+ }
+ }
ring->queued = 0;
ring->cur = 0;
ring->tail = 0;
+ ring->tid = 0;
}
void
@@ -2372,15 +2385,23 @@ iwx_start_hw(struct iwx_softc *sc)
void
iwx_stop_device(struct iwx_softc *sc)
{
- int qid;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_node *ni = ic->ic_bss;
+ int i;
iwx_disable_interrupts(sc);
sc->sc_flags &= ~IWX_FLAG_USE_ICT;
iwx_disable_rx_dma(sc);
iwx_reset_rx_ring(sc, &sc->rxq);
- for (qid = 0; qid < nitems(sc->txq); qid++)
- iwx_reset_tx_ring(sc, &sc->txq[qid]);
+ for (i = 0; i < nitems(sc->txq); i++)
+ iwx_reset_tx_ring(sc, &sc->txq[i]);
+ for (i = 0; i < IEEE80211_NUM_TID; i++) {
+ struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
+ if (ba->ba_state != IEEE80211_BA_AGREED)
+ continue;
+ ieee80211_delba_request(ic, ni, 0, 1, i);
+ }
/* Make sure (redundant) we've released our request to stay awake */
IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
@@ -2487,6 +2508,18 @@ iwx_nic_init(struct iwx_softc *sc)
return 0;
}
+/* Map a TID to an ieee80211_edca_ac category. */
+const uint8_t iwx_tid_to_ac[IWX_MAX_TID_COUNT] = {
+ EDCA_AC_BE,
+ EDCA_AC_BK,
+ EDCA_AC_BK,
+ EDCA_AC_BE,
+ EDCA_AC_VI,
+ EDCA_AC_VI,
+ EDCA_AC_VO,
+ EDCA_AC_VO,
+};
+
/* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
const uint8_t iwx_ac_to_tx_fifo[] = {
IWX_GEN2_EDCA_TX_FIFO_BE,
@@ -2559,6 +2592,9 @@ iwx_enable_txq(struct iwx_softc *sc, int sta_id, int qid, int tid,
err = EIO;
goto out;
}
+
+ sc->qenablemsk |= (1 << qid);
+ ring->tid = tid;
out:
iwx_free_resp(sc, &hcmd);
return err;
@@ -3146,6 +3182,62 @@ iwx_updateedca(struct ieee80211com *ic)
}
void
+iwx_sta_tx_agg_start(struct iwx_softc *sc, struct ieee80211_node *ni,
+ uint8_t tid)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_tx_ba *ba;
+ int err, qid;
+ struct iwx_tx_ring *ring;
+
+ /* Ensure we can map this TID to an aggregation queue. */
+ if (tid >= IWX_MAX_TID_COUNT)
+ return;
+
+ ba = &ni->ni_tx_ba[tid];
+ if (ba->ba_state != IEEE80211_BA_REQUESTED)
+ return;
+
+ qid = sc->aggqid[tid];
+ if (qid == 0) {
+ /* Firmware should pick the next unused Tx queue. */
+ qid = fls(sc->qenablemsk);
+ }
+
+ /*
+ * Simply enable the queue.
+ * Firmware handles Tx Ba session setup and teardown.
+ */
+ if ((sc->qenablemsk & (1 << qid)) == 0) {
+ if (!iwx_nic_lock(sc)) {
+ ieee80211_addba_resp_refuse(ic, ni, tid,
+ IEEE80211_STATUS_UNSPECIFIED);
+ return;
+ }
+ err = iwx_enable_txq(sc, IWX_STATION_ID, qid, tid,
+ IWX_TX_RING_COUNT);
+ iwx_nic_unlock(sc);
+ if (err) {
+ printf("%s: could not enable Tx queue %d "
+ "(error %d)\n", DEVNAME(sc), qid, err);
+ ieee80211_addba_resp_refuse(ic, ni, tid,
+ IEEE80211_STATUS_UNSPECIFIED);
+ return;
+ }
+
+ ba->ba_winstart = 0;
+ } else
+ ba->ba_winstart = ni->ni_qos_txseqs[tid];
+
+ ba->ba_winend = (ba->ba_winstart + ba->ba_winsize - 1) & 0xfff;
+
+ ring = &sc->txq[qid];
+ ba->ba_timeout_val = 0;
+ ieee80211_addba_resp_accept(ic, ni, tid);
+ sc->aggqid[tid] = qid;
+}
+
+void
iwx_ba_task(void *arg)
{
struct iwx_softc *sc = arg;
@@ -3157,13 +3249,23 @@ iwx_ba_task(void *arg)
for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
break;
- if (sc->ba_start_tidmask & (1 << tid)) {
- iwx_sta_rx_agg(sc, ni, tid, sc->ba_ssn[tid],
- sc->ba_winsize[tid], sc->ba_timeout_val[tid], 1);
- sc->ba_start_tidmask &= ~(1 << tid);
- } else if (sc->ba_stop_tidmask & (1 << tid)) {
+ if (sc->ba_rx.start_tidmask & (1 << tid)) {
+ struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
+ iwx_sta_rx_agg(sc, ni, tid, ba->ba_winstart,
+ ba->ba_winsize, ba->ba_timeout_val, 1);
+ sc->ba_rx.start_tidmask &= ~(1 << tid);
+ } else if (sc->ba_rx.stop_tidmask & (1 << tid)) {
iwx_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
- sc->ba_stop_tidmask &= ~(1 << tid);
+ sc->ba_rx.stop_tidmask &= ~(1 << tid);
+ }
+ }
+
+ for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
+ if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
+ break;
+ if (sc->ba_tx.start_tidmask & (1 << tid)) {
+ iwx_sta_tx_agg_start(sc, ni, tid);
+ sc->ba_tx.start_tidmask &= ~(1 << tid);
}
}
@@ -3179,17 +3281,16 @@ int
iwx_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
uint8_t tid)
{
- struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
struct iwx_softc *sc = IC2IFP(ic)->if_softc;
if (sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS ||
- tid > IWX_MAX_TID_COUNT || (sc->ba_start_tidmask & (1 << tid)))
+ tid > IWX_MAX_TID_COUNT)
return ENOSPC;
- sc->ba_start_tidmask |= (1 << tid);
- sc->ba_ssn[tid] = ba->ba_winstart;
- sc->ba_winsize[tid] = ba->ba_winsize;
- sc->ba_timeout_val[tid] = ba->ba_timeout_val;
+ if (sc->ba_rx.start_tidmask & (1 << tid))
+ return EBUSY;
+
+ sc->ba_rx.start_tidmask |= (1 << tid);
iwx_add_task(sc, systq, &sc->ba_task);
return EBUSY;
@@ -3205,13 +3306,49 @@ iwx_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
{
struct iwx_softc *sc = IC2IFP(ic)->if_softc;
- if (tid > IWX_MAX_TID_COUNT || sc->ba_stop_tidmask & (1 << tid))
+ if (tid > IWX_MAX_TID_COUNT || sc->ba_rx.stop_tidmask & (1 << tid))
return;
- sc->ba_stop_tidmask = (1 << tid);
+ sc->ba_rx.stop_tidmask = (1 << tid);
iwx_add_task(sc, systq, &sc->ba_task);
}
+int
+iwx_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
+ uint8_t tid)
+{
+ struct iwx_softc *sc = IC2IFP(ic)->if_softc;
+ struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
+
+ /*
+ * Require a firmware version which uses an internal AUX queue.
+ * The value of IWX_FIRST_AGG_TX_QUEUE would be incorrect otherwise.
+ */
+ if (sc->first_data_qid != IWX_DQA_CMD_QUEUE + 1)
+ return ENOTSUP;
+
+ /* Ensure we can map this TID to an aggregation queue. */
+ if (tid >= IWX_MAX_TID_COUNT)
+ return EINVAL;
+
+ /* We only support a fixed Tx aggregation window size, for now. */
+ if (ba->ba_winsize != IWX_FRAME_LIMIT)
+ return ENOTSUP;
+
+ /* Is firmware already using an agg queue with this TID? */
+ if (sc->aggqid[tid] != 0)
+ return ENOSPC;
+
+ /* Are we already processing an ADDBA request? */
+ if (sc->ba_tx.start_tidmask & (1 << tid))
+ return EBUSY;
+
+ sc->ba_tx.start_tidmask |= (1 << tid);
+ iwx_add_task(sc, systq, &sc->ba_task);
+
+ return EBUSY;
+}
+
/* Read the mac address from WFMP registers. */
int
iwx_set_mac_addr_from_csr(struct iwx_softc *sc, struct iwx_nvm_data *data)
@@ -4368,25 +4505,6 @@ iwx_rx_mpdu_mq(struct iwx_softc *sc, struct mbuf *m, void *pktdata,
}
void
-iwx_rx_tx_cmd_single(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
- struct iwx_node *in)
-{
- struct ieee80211com *ic = &sc->sc_ic;
- struct ifnet *ifp = IC2IFP(ic);
- struct iwx_tx_resp *tx_resp = (void *)pkt->data;
- int status = le16toh(tx_resp->status.status) & IWX_TX_STATUS_MSK;
- int txfail;
-
- KASSERT(tx_resp->frame_count == 1);
-
- txfail = (status != IWX_TX_STATUS_SUCCESS &&
- status != IWX_TX_STATUS_DIRECT_DONE);
-
- if (txfail)
- ifp->if_oerrors++;
-}
-
-void
iwx_clear_tx_desc(struct iwx_softc *sc, struct iwx_tx_ring *ring, int idx)
{
struct iwx_tfh_tfd *desc = &ring->desc[idx];
@@ -4422,16 +4540,31 @@ iwx_txd_done(struct iwx_softc *sc, struct iwx_tx_data *txd)
}
void
+iwx_txq_advance(struct iwx_softc *sc, struct iwx_tx_ring *ring, int idx)
+{
+ struct iwx_tx_data *txd;
+
+ while (ring->tail != idx) {
+ txd = &ring->data[ring->tail];
+ if (txd->m != NULL) {
+ iwx_clear_tx_desc(sc, ring, ring->tail);
+ iwx_tx_update_byte_tbl(ring, ring->tail, 0, 0);
+ iwx_txd_done(sc, txd);
+ ring->queued--;
+ }
+ ring->tail = (ring->tail + 1) % IWX_TX_RING_COUNT;
+ }
+}
+
+void
iwx_rx_tx_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
struct iwx_rx_data *data)
{
struct ieee80211com *ic = &sc->sc_ic;
struct ifnet *ifp = IC2IFP(ic);
struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
- int idx = cmd_hdr->idx;
- int qid = cmd_hdr->qid;
+ int qid = cmd_hdr->qid, status, txfail;
struct iwx_tx_ring *ring = &sc->txq[qid];
- struct iwx_tx_data *txd;
struct iwx_tx_resp *tx_resp = (void *)pkt->data;
uint32_t ssn;
uint32_t len = iwx_rx_packet_len(pkt);
@@ -4441,32 +4574,42 @@ iwx_rx_tx_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
sc->sc_tx_timer = 0;
- txd = &ring->data[idx];
- if (txd->m == NULL)
+ /* Sanity checks. */
+ if (sizeof(*tx_resp) > len)
return;
-
- if (sizeof(*tx_resp) + sizeof(ssn) +
+ if (qid < IWX_FIRST_AGG_TX_QUEUE && tx_resp->frame_count > 1)
+ return;
+ if (qid >= IWX_FIRST_AGG_TX_QUEUE && sizeof(*tx_resp) + sizeof(ssn) +
tx_resp->frame_count * sizeof(tx_resp->status) > len)
return;
- iwx_rx_tx_cmd_single(sc, pkt, txd->in);
+ if (tx_resp->frame_count > 1) /* A-MPDU */
+ return;
+
+ status = le16toh(tx_resp->status.status) & IWX_TX_STATUS_MSK;
+ txfail = (status != IWX_TX_STATUS_SUCCESS &&
+ status != IWX_TX_STATUS_DIRECT_DONE);
+
+ if (txfail)
+ ifp->if_oerrors++;
/*
- * Even though this is not an agg queue, we must only free
- * frames before the firmware's starting sequence number.
+ * On hardware supported by iwx(4) the SSN counter is only
+ * 8 bit and corresponds to a Tx ring index rather than a
+ * sequence number. Frames up to this index (non-inclusive)
+ * can now be freed.
*/
memcpy(&ssn, &tx_resp->status + tx_resp->frame_count, sizeof(ssn));
- ssn = le32toh(ssn) & 0xfff;
- while (ring->tail != IWX_AGG_SSN_TO_TXQ_IDX(ssn)) {
- txd = &ring->data[ring->tail];
- if (txd->m != NULL) {
- iwx_txd_done(sc, txd);
- iwx_clear_tx_desc(sc, ring, ring->tail);
- iwx_tx_update_byte_tbl(ring, ring->tail, 0, 0);
- ring->queued--;
- }
- ring->tail = (ring->tail + 1) % IWX_TX_RING_COUNT;
- }
+ ssn = le32toh(ssn) & 0xff;
+ iwx_txq_advance(sc, ring, ssn);
+ iwx_clear_oactive(sc, ring);
+}
+
+void
+iwx_clear_oactive(struct iwx_softc *sc, struct iwx_tx_ring *ring)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ifnet *ifp = IC2IFP(ic);
if (ring->queued < IWX_TX_RING_LOMARK) {
sc->qfullmsk &= ~(1 << ring->qid);
@@ -4483,6 +4626,64 @@ iwx_rx_tx_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
}
void
+iwx_rx_compressed_ba(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
+ struct iwx_rx_data *data)
+{
+ struct iwx_compressed_ba_notif *ba_res = (void *)pkt->data;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_node *ni;
+ struct ieee80211_tx_ba *ba;
+ struct iwx_node *in;
+ struct iwx_tx_ring *ring;
+ uint16_t i, tfd_cnt, ra_tid_cnt, idx;
+ int qid;
+
+ if (ic->ic_state != IEEE80211_S_RUN)
+ return;
+
+ if (iwx_rx_packet_payload_len(pkt) < sizeof(*ba_res))
+ return;
+
+ if (ba_res->sta_id != IWX_STATION_ID)
+ return;
+
+ ni = ic->ic_bss;
+ in = (void *)ni;
+
+ tfd_cnt = le16toh(ba_res->tfd_cnt);
+ ra_tid_cnt = le16toh(ba_res->ra_tid_cnt);
+ if (!tfd_cnt || iwx_rx_packet_payload_len(pkt) < (sizeof(*ba_res) +
+ sizeof(ba_res->ra_tid[0]) * ra_tid_cnt +
+ sizeof(ba_res->tfd[0]) * tfd_cnt))
+ return;
+
+ for (i = 0; i < tfd_cnt; i++) {
+ struct iwx_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i];
+ uint8_t tid;
+
+ tid = ba_tfd->tid;
+ if (tid >= nitems(sc->aggqid))
+ continue;
+
+ qid = sc->aggqid[tid];
+ if (qid != htole16(ba_tfd->q_num))
+ continue;
+
+ ring = &sc->txq[qid];
+
+ ba = &ni->ni_tx_ba[tid];
+ if (ba->ba_state != IEEE80211_BA_AGREED)
+ continue;
+
+ idx = le16toh(ba_tfd->tfd_index);
+ if (idx >= IWX_TX_RING_COUNT)
+ continue;
+ iwx_txq_advance(sc, ring, idx);
+ iwx_clear_oactive(sc, ring);
+ }
+}
+
+void
iwx_rx_bmiss(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
struct iwx_rx_data *data)
{
@@ -5078,7 +5279,7 @@ iwx_tx_update_byte_tbl(struct iwx_tx_ring *txq, int idx, uint16_t byte_cnt,
}
int
-iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
+iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
{
struct ieee80211com *ic = &sc->sc_ic;
struct iwx_node *in = (void *)ni;
@@ -5094,25 +5295,36 @@ iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
u_int hdrlen;
bus_dma_segment_t *seg;
uint16_t num_tbs;
- uint8_t type;
- int i, totlen, err, pad;
+ uint8_t type, subtype;
+ int i, totlen, err, pad, qid;
wh = mtod(m, struct ieee80211_frame *);
- hdrlen = ieee80211_get_hdrlen(wh);
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+ subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
+ if (type == IEEE80211_FC0_TYPE_CTL)
+ hdrlen = sizeof(struct ieee80211_frame_min);
+ else
+ hdrlen = ieee80211_get_hdrlen(wh);
- /*
- * Map EDCA categories to Tx data queues.
- *
- * We use static data queue assignments even in DQA mode. We do not
- * need to share Tx queues between stations because we only implement
- * client mode; the firmware's station table contains only one entry
- * which represents our access point.
- *
- * Tx aggregation will require additional queues (one queue per TID
- * for which aggregation is enabled) but we do not implement this yet.
- */
- ring = &sc->txq[ac + sc->first_data_qid];
+ qid = sc->first_data_qid;
+
+ /* Put QoS frames on the data queue which maps to their TID. */
+ if (ieee80211_has_qos(wh)) {
+ struct ieee80211_tx_ba *ba;
+ uint16_t qos = ieee80211_get_qos(wh);
+ uint8_t tid = qos & IEEE80211_QOS_TID;
+
+ ba = &ni->ni_tx_ba[tid];
+ if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
+ type == IEEE80211_FC0_TYPE_DATA &&
+ subtype != IEEE80211_FC0_SUBTYPE_NODATA &&
+ sc->aggqid[tid] != 0 &&
+ ba->ba_state == IEEE80211_BA_AGREED) {
+ qid = sc->aggqid[tid];
+ }
+ }
+
+ ring = &sc->txq[qid];
desc = &ring->desc[ring->cur];
memset(desc, 0, sizeof(*desc));
data = &ring->data[ring->cur];
@@ -5263,18 +5475,167 @@ iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
}
int
-iwx_flush_tx_path(struct iwx_softc *sc)
+iwx_flush_sta_tids(struct iwx_softc *sc, int sta_id, uint16_t tids)
{
+ struct iwx_rx_packet *pkt;
+ struct iwx_tx_path_flush_cmd_rsp *resp;
struct iwx_tx_path_flush_cmd flush_cmd = {
- .sta_id = htole32(IWX_STATION_ID),
- .tid_mask = htole16(0xffff),
+ .sta_id = htole32(sta_id),
+ .tid_mask = htole16(tids),
};
+ struct iwx_host_cmd hcmd = {
+ .id = IWX_TXPATH_FLUSH,
+ .len = { sizeof(flush_cmd), },
+ .data = { &flush_cmd, },
+ .flags = IWX_CMD_WANT_RESP,
+ .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
+ };
+ int err, resp_len, i, num_flushed_queues;
+
+ err = iwx_send_cmd(sc, &hcmd);
+ if (err)
+ return err;
+
+ pkt = hcmd.resp_pkt;
+ if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
+ err = EIO;
+ goto out;
+ }
+
+ resp_len = iwx_rx_packet_payload_len(pkt);
+ /* Some firmware versions don't provide a response. */
+ if (resp_len == 0)
+ goto out;
+ else if (resp_len != sizeof(*resp)) {
+ err = EIO;
+ goto out;
+ }
+
+ resp = (void *)pkt->data;
+
+ if (le16toh(resp->sta_id) != sta_id) {
+ err = EIO;
+ goto out;
+ }
+
+ num_flushed_queues = le16toh(resp->num_flushed_queues);
+ if (num_flushed_queues > IWX_TX_FLUSH_QUEUE_RSP) {
+ err = EIO;
+ goto out;
+ }
+
+ for (i = 0; i < num_flushed_queues; i++) {
+ struct iwx_flush_queue_info *queue_info = &resp->queues[i];
+ uint16_t tid = le16toh(queue_info->tid);
+ uint16_t read_after = le16toh(queue_info->read_after_flush);
+ uint16_t qid = le16toh(queue_info->queue_num);
+ struct iwx_tx_ring *txq;
+
+ if (qid >= nitems(sc->txq))
+ continue;
+
+ txq = &sc->txq[qid];
+ if (tid != txq->tid)
+ continue;
+
+ iwx_txq_advance(sc, txq, read_after);
+ }
+out:
+ iwx_free_resp(sc, &hcmd);
+ return err;
+}
+
+#define IWX_FLUSH_WAIT_MS 2000
+
+int
+iwx_wait_tx_queues_empty(struct iwx_softc *sc)
+{
+ int i, err;
+
+ for (i = 0; i < nitems(sc->txq); i++) {
+ struct iwx_tx_ring *ring = &sc->txq[i];
+
+ if (i == IWX_DQA_CMD_QUEUE)
+ continue;
+
+ while (ring->queued > 0) {
+ err = tsleep_nsec(ring, 0, "iwxflush",
+ MSEC_TO_NSEC(IWX_FLUSH_WAIT_MS));
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+int
+iwx_drain_sta(struct iwx_softc *sc, struct iwx_node* in, int drain)
+{
+ struct iwx_add_sta_cmd cmd;
int err;
+ uint32_t status;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.mac_id_n_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
+ in->in_color));
+ cmd.sta_id = IWX_STATION_ID;
+ cmd.add_modify = IWX_STA_MODE_MODIFY;
+ cmd.station_flags = drain ? htole32(IWX_STA_FLG_DRAIN_FLOW) : 0;
+ cmd.station_flags_msk = htole32(IWX_STA_FLG_DRAIN_FLOW);
- err = iwx_send_cmd_pdu(sc, IWX_TXPATH_FLUSH, 0,
- sizeof(flush_cmd), &flush_cmd);
+ status = IWX_ADD_STA_SUCCESS;
+ err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA,
+ sizeof(cmd), &cmd, &status);
+ if (err) {
+ printf("%s: could not update sta (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+
+ switch (status & IWX_ADD_STA_STATUS_MASK) {
+ case IWX_ADD_STA_SUCCESS:
+ break;
+ default:
+ err = EIO;
+ printf("%s: Couldn't %s draining for station\n",
+ DEVNAME(sc), drain ? "enable" : "disable");
+ break;
+ }
+
+ return err;
+}
+
+int
+iwx_flush_sta(struct iwx_softc *sc, struct iwx_node *in)
+{
+ int err;
+
+ splassert(IPL_NET);
+
+ sc->sc_flags |= IWX_FLAG_TXFLUSH;
+
+ err = iwx_drain_sta(sc, in, 1);
if (err)
- printf("%s: Flushing tx queue failed: %d\n", DEVNAME(sc), err);
+ goto done;
+
+ err = iwx_flush_sta_tids(sc, IWX_STATION_ID, 0xffff);
+ if (err) {
+ printf("%s: could not flush Tx path (error %d)\n",
+ DEVNAME(sc), err);
+ goto done;
+ }
+
+ err = iwx_wait_tx_queues_empty(sc);
+ if (err) {
+ printf("%s: Could not empty Tx queues (error %d)\n",
+ DEVNAME(sc), err);
+ goto done;
+ }
+
+ err = iwx_drain_sta(sc, in, 0);
+done:
+ sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
return err;
}
@@ -5441,9 +5802,6 @@ iwx_add_sta_cmd(struct iwx_softc *sc, struct iwx_node *in, int update)
add_sta_cmd.add_modify = update ? 1 : 0;
add_sta_cmd.station_flags_msk
|= htole32(IWX_STA_FLG_FAT_EN_MSK | IWX_STA_FLG_MIMO_EN_MSK);
- add_sta_cmd.tid_disable_tx = htole16(0xffff);
- if (update)
- add_sta_cmd.modify_mask |= (IWX_STA_MODIFY_TID_DISABLE_TX);
if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
add_sta_cmd.station_flags_msk
@@ -5515,7 +5873,6 @@ iwx_add_aux_sta(struct iwx_softc *sc)
cmd.station_type = IWX_STA_AUX_ACTIVITY;
cmd.mac_id_n_color =
htole32(IWX_FW_CMD_ID_AND_COLOR(IWX_MAC_INDEX_AUX, 0));
- cmd.tid_disable_tx = htole16(0xffff);
status = IWX_ADD_STA_SUCCESS;
err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(cmd), &cmd,
@@ -5549,6 +5906,46 @@ iwx_rm_sta_cmd(struct iwx_softc *sc, struct iwx_node *in)
return err;
}
+int
+iwx_rm_sta(struct iwx_softc *sc, struct iwx_node *in)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_node *ni = &in->in_ni;
+ int err, i;
+
+ err = iwx_flush_sta(sc, in);
+ if (err) {
+ printf("%s: could not flush Tx path (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+ err = iwx_rm_sta_cmd(sc, in);
+ if (err) {
+ printf("%s: could not remove STA (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+
+ in->in_flags = 0;
+
+ sc->sc_rx_ba_sessions = 0;
+ sc->ba_rx.start_tidmask = 0;
+ sc->ba_rx.stop_tidmask = 0;
+ memset(sc->aggqid, 0, sizeof(sc->aggqid));
+ sc->ba_tx.start_tidmask = 0;
+ sc->ba_tx.stop_tidmask = 0;
+ for (i = IWX_FIRST_AGG_TX_QUEUE; i < IWX_LAST_AGG_TX_QUEUE; i++)
+ sc->qenablemsk &= ~(1 << i);
+ for (i = 0; i < IEEE80211_NUM_TID; i++) {
+ struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
+ if (ba->ba_state != IEEE80211_BA_AGREED)
+ continue;
+ ieee80211_delba_request(ic, ni, 0, 1, i);
+ }
+
+ return 0;
+}
+
uint8_t
iwx_umac_scan_fill_channels(struct iwx_softc *sc,
struct iwx_scan_channel_cfg_umac *chan, size_t chan_nitems,
@@ -6693,9 +7090,9 @@ iwx_scan_abort(struct iwx_softc *sc)
}
int
-iwx_enable_data_tx_queues(struct iwx_softc *sc)
+iwx_enable_mgmt_queue(struct iwx_softc *sc)
{
- int err, ac, cmdver;
+ int err, cmdver;
/*
* ADD_STA command version >= 12 implies that firmware uses
@@ -6709,19 +7106,16 @@ iwx_enable_data_tx_queues(struct iwx_softc *sc)
else
sc->first_data_qid = IWX_DQA_AUX_QUEUE + 1;
- for (ac = 0; ac < EDCA_NUM_AC; ac++) {
- int qid = ac + sc->first_data_qid;
- /*
- * Regular data frames use the "MGMT" TID and queue.
- * Other TIDs and queues are reserved for frame aggregation.
- */
- err = iwx_enable_txq(sc, IWX_STATION_ID, qid, IWX_MGMT_TID,
- IWX_TX_RING_COUNT);
- if (err) {
- printf("%s: could not enable Tx queue %d (error %d)\n",
- DEVNAME(sc), qid, err);
- return err;
- }
+ /*
+ * Non-QoS frames use the "MGMT" TID and queue.
+ * Other TIDs and data queues are reserved for QoS data frames.
+ */
+ err = iwx_enable_txq(sc, IWX_STATION_ID, sc->first_data_qid,
+ IWX_MGMT_TID, IWX_TX_RING_COUNT);
+ if (err) {
+ printf("%s: could not enable Tx queue %d (error %d)\n",
+ DEVNAME(sc), sc->first_data_qid, err);
+ return err;
}
return 0;
@@ -6799,7 +7193,7 @@ iwx_rs_init(struct iwx_softc *sc, struct iwx_node *in)
cfg_cmd.sta_id = IWX_STATION_ID;
cfg_cmd.max_ch_width = IWX_RATE_MCS_CHAN_WIDTH_20;
cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
- cfg_cmd.max_mpdu_len = IEEE80211_MAX_LEN;
+ cfg_cmd.max_mpdu_len = 3839;
if (ieee80211_node_supports_ht_sgi20(ni))
cfg_cmd.sgi_ch_width_supp = (1 << IWX_TLC_MNG_CH_WIDTH_20MHZ);
@@ -6944,7 +7338,7 @@ iwx_auth(struct iwx_softc *sc)
return 0;
}
- err = iwx_enable_data_tx_queues(sc);
+ err = iwx_enable_mgmt_queue(sc);
if (err)
goto rm_sta;
@@ -6997,21 +7391,10 @@ iwx_deauth(struct iwx_softc *sc)
iwx_unprotect_session(sc, in);
if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
- err = iwx_flush_tx_path(sc);
- if (err) {
- printf("%s: could not flush Tx path (error %d)\n",
- DEVNAME(sc), err);
- return err;
- }
- err = iwx_rm_sta_cmd(sc, in);
- if (err) {
- printf("%s: could not remove STA (error %d)\n",
- DEVNAME(sc), err);
+ err = iwx_rm_sta(sc, in);
+ if (err)
return err;
- }
sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
- sc->sc_rx_ba_sessions = 0;
- in->in_flags = 0;
}
if (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE) {
@@ -7061,7 +7444,7 @@ iwx_assoc(struct iwx_softc *sc)
}
if (!update_sta)
- err = iwx_enable_data_tx_queues(sc);
+ err = iwx_enable_mgmt_queue(sc);
return err;
}
@@ -7076,19 +7459,10 @@ iwx_disassoc(struct iwx_softc *sc)
splassert(IPL_NET);
if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
- err = iwx_rm_sta_cmd(sc, in);
- if (err) {
- printf("%s: could not remove STA (error %d)\n",
- DEVNAME(sc), err);
+ err = iwx_rm_sta(sc, in);
+ if (err)
return err;
- }
sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
- in->in_flags = 0;
- sc->sc_rx_ba_sessions = 0;
- sc->ba_start_tidmask = 0;
- sc->ba_stop_tidmask = 0;
- sc->ba_start_tidmask = 0;
- sc->ba_stop_tidmask = 0;
}
return 0;
@@ -7204,6 +7578,15 @@ iwx_run_stop(struct iwx_softc *sc)
splassert(IPL_NET);
+ if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
+ err = iwx_flush_sta(sc, in);
+ if (err) {
+ printf("%s: could not flush Tx path (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+ }
+
err = iwx_sf_config(sc, IWX_SF_INIT_OFF);
if (err)
return err;
@@ -8044,7 +8427,6 @@ iwx_start(struct ifnet *ifp)
struct ieee80211_node *ni;
struct ether_header *eh;
struct mbuf *m;
- int ac = EDCA_AC_BE; /* XXX */
if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
return;
@@ -8056,6 +8438,10 @@ iwx_start(struct ifnet *ifp)
break;
}
+ /* Don't queue additional frames while flushing Tx queues. */
+ if (sc->sc_flags & IWX_FLAG_TXFLUSH)
+ break;
+
/* need to send management frames even if we're not RUNning */
m = mq_dequeue(&ic->ic_mgtq);
if (m) {
@@ -8089,7 +8475,7 @@ iwx_start(struct ifnet *ifp)
if (ic->ic_rawbpf != NULL)
bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
#endif
- if (iwx_tx(sc, m, ni, ac) != 0) {
+ if (iwx_tx(sc, m, ni) != 0) {
ieee80211_release_node(ic, ni);
ifp->if_oerrors++;
continue;
@@ -8150,13 +8536,14 @@ iwx_stop(struct ifnet *ifp)
sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
sc->sc_flags &= ~IWX_FLAG_HW_ERR;
sc->sc_flags &= ~IWX_FLAG_SHUTDOWN;
+ sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
sc->sc_rx_ba_sessions = 0;
- sc->ba_start_tidmask = 0;
- sc->ba_stop_tidmask = 0;
- memset(sc->ba_ssn, 0, sizeof(sc->ba_ssn));
- memset(sc->ba_winsize, 0, sizeof(sc->ba_winsize));
- memset(sc->ba_timeout_val, 0, sizeof(sc->ba_timeout_val));
+ sc->ba_rx.start_tidmask = 0;
+ sc->ba_rx.stop_tidmask = 0;
+ memset(sc->aggqid, 0, sizeof(sc->aggqid));
+ sc->ba_tx.start_tidmask = 0;
+ sc->ba_tx.stop_tidmask = 0;
sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
@@ -8504,7 +8891,7 @@ iwx_dump_driver_status(struct iwx_softc *sc)
int i;
printf("driver status:\n");
- for (i = 0; i < IWX_MAX_QUEUES; i++) {
+ for (i = 0; i < nitems(sc->txq); i++) {
struct iwx_tx_ring *ring = &sc->txq[i];
printf(" tx ring %2d: qid=%-2d cur=%-3d "
"queued=%-3d\n",
@@ -8638,6 +9025,10 @@ iwx_rx_pkt(struct iwx_softc *sc, struct iwx_rx_data *data, struct mbuf_list *ml)
iwx_rx_tx_cmd(sc, pkt, data);
break;
+ case IWX_BA_NOTIF:
+ iwx_rx_compressed_ba(sc, pkt, data);
+ break;
+
case IWX_MISSED_BEACONS_NOTIFICATION:
iwx_rx_bmiss(sc, pkt, data);
break;
@@ -9444,6 +9835,8 @@ iwx_attach(struct device *parent, struct device *self, void *aux)
/* Set device capabilities. */
ic->ic_caps =
+ IEEE80211_C_QOS | IEEE80211_C_TX_AMPDU | /* A-MPDU */
+ IEEE80211_C_ADDBA_OFFLOAD | /* device sends ADDBA/DELBA frames */
IEEE80211_C_WEP | /* WEP */
IEEE80211_C_RSN | /* WPA/RSN */
IEEE80211_C_SCANALL | /* device scans all channels at once */
@@ -9517,10 +9910,8 @@ iwx_attach(struct device *parent, struct device *self, void *aux)
ic->ic_updateedca = iwx_updateedca;
ic->ic_ampdu_rx_start = iwx_ampdu_rx_start;
ic->ic_ampdu_rx_stop = iwx_ampdu_rx_stop;
-#ifdef notyet
ic->ic_ampdu_tx_start = iwx_ampdu_tx_start;
- ic->ic_ampdu_tx_stop = iwx_ampdu_tx_stop;
-#endif
+ ic->ic_ampdu_tx_stop = NULL;
/*
* We cannot read the MAC address without loading the
* firmware from disk. Postpone until mountroot is done.