summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorStefan Sperling <stsp@cvs.openbsd.org>2015-02-06 19:49:30 +0000
committerStefan Sperling <stsp@cvs.openbsd.org>2015-02-06 19:49:30 +0000
commit798e4c6be5fc42f1bbd7c3970360a62ea325d4be (patch)
tree4a2a8f466992d694dc06f4385fc2b6bc8533415a /sys
parent744d736dcc5d0977cac967fc6e81977b439ca28f (diff)
Add iwm(4), a new driver for Intel 7260 wifi cards.
Based on iwn(4) and Linux iwlwifi (which is dual BSD/GPLv2 licenced). Created by Fixup Software Ltd. for genua mbh, who then passed on the code base to the OpenBSD project. The genua version of this driver was written for OpenBSD 5.4; ported to -current by myself, phessler@, and deraadt@. So far, we've done semantic and stylistic cleanup without functional changes. The driver is functional but has some known issues which will be worked on in-tree. Requires iwm firmware which is available in ports thanks to sthen@. ok deraadt@ phessler@
Diffstat (limited to 'sys')
-rw-r--r--sys/dev/pci/files.pci7
-rw-r--r--sys/dev/pci/if_iwm.c6485
-rw-r--r--sys/dev/pci/if_iwmreg.h5304
-rw-r--r--sys/dev/pci/if_iwmvar.h460
4 files changed, 12255 insertions, 1 deletions
diff --git a/sys/dev/pci/files.pci b/sys/dev/pci/files.pci
index ec9e7313f9e..7edb332c0cb 100644
--- a/sys/dev/pci/files.pci
+++ b/sys/dev/pci/files.pci
@@ -1,4 +1,4 @@
-# $OpenBSD: files.pci,v 1.311 2015/01/10 16:26:17 kettenis Exp $
+# $OpenBSD: files.pci,v 1.312 2015/02/06 19:49:29 stsp Exp $
# $NetBSD: files.pci,v 1.20 1996/09/24 17:47:15 christos Exp $
#
# Config file and device description for machine-independent PCI code.
@@ -577,6 +577,11 @@ device iwn: ifnet, wlan, firmload
attach iwn at pci
file dev/pci/if_iwn.c iwn
+# Intel Wireless WiFi Link 7xxx
+device iwm: ifnet, wlan, firmload
+attach iwm at pci
+file dev/pci/if_iwm.c iwm
+
# C-Media CMI8x38 Audio Chip
device cmpci {}: audio, auconv, mulaw
attach cmpci at pci
diff --git a/sys/dev/pci/if_iwm.c b/sys/dev/pci/if_iwm.c
new file mode 100644
index 00000000000..01e176e7311
--- /dev/null
+++ b/sys/dev/pci/if_iwm.c
@@ -0,0 +1,6485 @@
+/* $OpenBSD: if_iwm.c,v 1.1 2015/02/06 19:49:29 stsp Exp $ */
+
+/*
+ * Copyright (c) 2014 genua mbh <info@genua.de>
+ * Copyright (c) 2014 Fixup Software Ltd.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*-
+ * Based on BSD-licensed source modules in the Linux iwlwifi driver,
+ * which were used as the reference documentation for this implementation.
+ *
+ * Driver version we are currently based off of is
+ * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
+ *
+ ***********************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*-
+ * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <sys/cdefs.h>
+
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/systm.h>
+
+#include <sys/task.h>
+#include <machine/bus.h>
+#include <machine/endian.h>
+#include <machine/intr.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcidevs.h>
+
+#include <net/bpf.h>
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+#include <net/if_types.h>
+
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/if_ether.h>
+#include <netinet/ip.h>
+
+#include <net80211/ieee80211_var.h>
+#include <net80211/ieee80211_amrr.h>
+#include <net80211/ieee80211_radiotap.h>
+
+#define DEVNAME(_s) ((_s)->sc_dev.dv_xname)
+
+#define IC2IFP(_ic_) (&(_ic_)->ic_if)
+
+#define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
+#define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
+
+#ifdef IWM_DEBUG
+#define DPRINTF(x) do { if (iwm_debug > 0) printf x; } while (0)
+#define DPRINTFN(n, x) do { if (iwm_debug >= (n)) printf x; } while (0)
+int iwm_debug = 1;
+#else
+#define DPRINTF(x) do { ; } while (0)
+#define DPRINTFN(n, x) do { ; } while (0)
+#endif
+
+#include <dev/pci/if_iwmreg.h>
+#include <dev/pci/if_iwmvar.h>
+
+const uint8_t iwm_nvm_channels[] = {
+ /* 2.4 GHz */
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ /* 5 GHz */
+ 36, 40, 44 , 48, 52, 56, 60, 64,
+ 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
+ 149, 153, 157, 161, 165
+};
+#define IWM_NUM_2GHZ_CHANNELS 14
+
+#define IWM_NO_5GHZ 1
+
+const struct iwm_rate {
+ uint8_t rate;
+ uint8_t plcp;
+} iwm_rates[] = {
+ { 2, IWM_RATE_1M_PLCP },
+ { 4, IWM_RATE_2M_PLCP },
+ { 11, IWM_RATE_5M_PLCP },
+ { 22, IWM_RATE_11M_PLCP },
+ { 12, IWM_RATE_6M_PLCP },
+ { 18, IWM_RATE_9M_PLCP },
+ { 24, IWM_RATE_12M_PLCP },
+ { 36, IWM_RATE_18M_PLCP },
+ { 48, IWM_RATE_24M_PLCP },
+ { 72, IWM_RATE_36M_PLCP },
+ { 96, IWM_RATE_48M_PLCP },
+ { 108, IWM_RATE_54M_PLCP },
+};
+#define IWM_RIDX_CCK 0
+#define IWM_RIDX_OFDM 4
+#define IWM_RIDX_MAX (nitems(iwm_rates)-1)
+#define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
+#define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
+
+/*
+ * Supported rates for 802.11a/b/g modes (in 500Kbps unit).
+ */
+const struct ieee80211_rateset iwm_rateset_11a =
+ { 8, { 12, 18, 24, 36, 48, 72, 96, 108 } };
+
+const struct ieee80211_rateset iwm_rateset_11b =
+ { 4, { 2, 4, 11, 22 } };
+
+const struct ieee80211_rateset iwm_rateset_11g =
+ { 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
+
+struct iwm_newstate_state {
+ struct task ns_wk;
+ struct ieee80211com *ns_ic;
+ enum ieee80211_state ns_nstate;
+ int ns_arg;
+ int ns_generation;
+};
+
+int iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
+int iwm_firmware_store_section(struct iwm_softc *, enum iwm_ucode_type,
+ uint8_t *, size_t);
+int iwm_set_default_calib(struct iwm_softc *, const void *);
+int iwm_read_firmware(struct iwm_softc *);
+uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
+void iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
+int iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
+int iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
+int iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
+int iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
+int iwm_nic_lock(struct iwm_softc *);
+void iwm_nic_unlock(struct iwm_softc *);
+void iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
+ uint32_t);
+void iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
+void iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
+int iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
+ bus_size_t, bus_size_t);
+void iwm_dma_contig_free(struct iwm_dma_info *);
+int iwm_alloc_fwmem(struct iwm_softc *);
+void iwm_free_fwmem(struct iwm_softc *);
+int iwm_alloc_sched(struct iwm_softc *);
+void iwm_free_sched(struct iwm_softc *);
+int iwm_alloc_kw(struct iwm_softc *);
+void iwm_free_kw(struct iwm_softc *);
+int iwm_alloc_ict(struct iwm_softc *);
+void iwm_free_ict(struct iwm_softc *);
+int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
+void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
+void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
+int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *, int);
+void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
+void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
+void iwm_enable_rfkill_int(struct iwm_softc *);
+int iwm_check_rfkill(struct iwm_softc *);
+void iwm_enable_interrupts(struct iwm_softc *);
+void iwm_restore_interrupts(struct iwm_softc *);
+void iwm_disable_interrupts(struct iwm_softc *);
+void iwm_ict_reset(struct iwm_softc *);
+int iwm_set_hw_ready(struct iwm_softc *);
+int iwm_prepare_card_hw(struct iwm_softc *);
+void iwm_apm_config(struct iwm_softc *);
+int iwm_apm_init(struct iwm_softc *);
+void iwm_apm_stop(struct iwm_softc *);
+int iwm_start_hw(struct iwm_softc *);
+void iwm_stop_device(struct iwm_softc *);
+void iwm_set_pwr(struct iwm_softc *);
+void iwm_mvm_nic_config(struct iwm_softc *);
+int iwm_nic_rx_init(struct iwm_softc *);
+int iwm_nic_tx_init(struct iwm_softc *);
+int iwm_nic_init(struct iwm_softc *);
+void iwm_enable_txq(struct iwm_softc *, int, int);
+int iwm_post_alive(struct iwm_softc *);
+#ifdef notyet
+struct iwm_phy_db_entry *iwm_phy_db_get_section(struct iwm_softc *,
+ enum iwm_phy_db_section_type, uint16_t);
+int iwm_phy_db_set_section(struct iwm_softc *,
+ struct iwm_calib_res_notif_phy_db *);
+#endif
+int iwm_is_valid_channel(uint16_t);
+uint8_t iwm_ch_id_to_ch_index(uint16_t);
+uint16_t iwm_channel_id_to_papd(uint16_t);
+uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
+int iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t, uint8_t **,
+ uint16_t *, uint16_t);
+int iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t, void *);
+#ifdef notyet
+int iwm_phy_db_send_all_channel_groups(struct iwm_softc *,
+ enum iwm_phy_db_section_type, uint8_t);
+#endif
+int iwm_send_phy_db_data(struct iwm_softc *);
+int iwm_send_phy_db_data(struct iwm_softc *);
+void iwm_mvm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *,
+ struct iwm_time_event_cmd_v1 *);
+int iwm_mvm_send_time_event_cmd(struct iwm_softc *,
+ const struct iwm_time_event_cmd_v2 *);
+int iwm_mvm_time_event_send_add(struct iwm_softc *, struct iwm_node *,
+ void *, struct iwm_time_event_cmd_v2 *);
+void iwm_mvm_protect_session(struct iwm_softc *, struct iwm_node *,
+ uint32_t, uint32_t, uint32_t);
+int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t, uint16_t,
+ uint8_t *, uint16_t *);
+int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
+ uint16_t *);
+void iwm_init_channel_map(struct iwm_softc *, const uint16_t * const);
+int iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
+ const uint16_t *, const uint16_t *, uint8_t,
+ uint8_t);
+#ifdef notyet
+int iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
+#endif
+int iwm_nvm_init(struct iwm_softc *);
+int iwm_firmware_load_chunk(struct iwm_softc *, uint32_t, const uint8_t *,
+ uint32_t);
+int iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
+int iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
+int iwm_fw_alive(struct iwm_softc *, uint32_t);
+int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
+int iwm_send_phy_cfg_cmd(struct iwm_softc *);
+int iwm_mvm_load_ucode_wait_alive(struct iwm_softc *, enum iwm_ucode_type);
+int iwm_run_init_mvm_ucode(struct iwm_softc *, int);
+int iwm_rx_addbuf(struct iwm_softc *, int, int);
+int iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
+int iwm_mvm_get_signal_strength(struct iwm_softc *,
+ struct iwm_rx_phy_info *);
+void iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *, struct iwm_rx_packet *,
+ struct iwm_rx_data *);
+void iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
+ struct iwm_rx_data *);
+void iwm_mvm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *,
+ struct iwm_node *);
+void iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
+ struct iwm_rx_data *);
+int iwm_mvm_binding_cmd(struct iwm_softc *, struct iwm_node *, uint32_t);
+int iwm_mvm_binding_update(struct iwm_softc *, struct iwm_node *, int);
+int iwm_mvm_binding_add_vif(struct iwm_softc *, struct iwm_node *);
+void iwm_mvm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_mvm_phy_ctxt *,
+ struct iwm_phy_context_cmd *, uint32_t, uint32_t);
+void iwm_mvm_phy_ctxt_cmd_data(struct iwm_softc *,
+ struct iwm_phy_context_cmd *, struct ieee80211_channel *,
+ uint8_t, uint8_t);
+int iwm_mvm_phy_ctxt_apply(struct iwm_softc *, struct iwm_mvm_phy_ctxt *,
+ uint8_t, uint8_t, uint32_t, uint32_t);
+int iwm_mvm_phy_ctxt_add(struct iwm_softc *, struct iwm_mvm_phy_ctxt *,
+ struct ieee80211_channel *, uint8_t, uint8_t);
+int iwm_mvm_phy_ctxt_changed(struct iwm_softc *, struct iwm_mvm_phy_ctxt *,
+ struct ieee80211_channel *, uint8_t, uint8_t);
+int iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
+int iwm_mvm_send_cmd_pdu(struct iwm_softc *, uint8_t, uint32_t, uint16_t,
+ const void *);
+int iwm_mvm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
+ uint32_t *);
+int iwm_mvm_send_cmd_pdu_status(struct iwm_softc *, uint8_t,
+ uint16_t, const void *, uint32_t *);
+void iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
+void iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
+void iwm_update_sched(struct iwm_softc *, int, int, uint8_t, uint16_t);
+void iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
+ struct ieee80211_frame *, struct iwm_tx_cmd *);
+int iwm_tx(struct iwm_softc *, struct mbuf *, struct ieee80211_node *, int);
+int iwm_mvm_beacon_filter_send_cmd(struct iwm_softc *,
+ struct iwm_beacon_filter_cmd *);
+void iwm_mvm_beacon_filter_set_cqm_params(struct iwm_softc *,
+ struct iwm_node *, struct iwm_beacon_filter_cmd *);
+int iwm_mvm_update_beacon_abort(struct iwm_softc *, struct iwm_node *, int);
+void iwm_mvm_power_log(struct iwm_softc *, struct iwm_mac_power_cmd *);
+void iwm_mvm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
+ struct iwm_mac_power_cmd *);
+int iwm_mvm_power_mac_update_mode(struct iwm_softc *, struct iwm_node *);
+int iwm_mvm_power_update_device(struct iwm_softc *);
+int iwm_mvm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
+int iwm_mvm_disable_beacon_filter(struct iwm_softc *, struct iwm_node *);
+void iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *,
+ struct iwm_mvm_add_sta_cmd_v5 *);
+int iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
+ struct iwm_mvm_add_sta_cmd_v6 *, int *);
+int iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *, int);
+int iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
+int iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
+int iwm_mvm_add_int_sta_common(struct iwm_softc *, struct iwm_int_sta *,
+ const uint8_t *, uint16_t, uint16_t);
+int iwm_mvm_add_aux_sta(struct iwm_softc *);
+uint16_t iwm_mvm_scan_rx_chain(struct iwm_softc *);
+uint32_t iwm_mvm_scan_max_out_time(struct iwm_softc *, uint32_t, int);
+uint32_t iwm_mvm_scan_suspend_time(struct iwm_softc *, int);
+uint32_t iwm_mvm_scan_rxon_flags(struct iwm_softc *, int);
+uint32_t iwm_mvm_scan_rate_n_flags(struct iwm_softc *, int, int);
+uint16_t iwm_mvm_get_active_dwell(struct iwm_softc *, int, int);
+uint16_t iwm_mvm_get_passive_dwell(struct iwm_softc *, int);
+int iwm_mvm_scan_fill_channels(struct iwm_softc *, struct iwm_scan_cmd *,
+ int, int, int);
+uint16_t iwm_mvm_fill_probe_req(struct iwm_softc *, struct ieee80211_frame *,
+ const uint8_t *, int, const uint8_t *, int, const uint8_t *, int, int);
+int iwm_mvm_scan_request(struct iwm_softc *, int, int, uint8_t *, int);
+void iwm_mvm_ack_rates(struct iwm_softc *, struct iwm_node *, int *, int *);
+void iwm_mvm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
+ struct iwm_mac_ctx_cmd *, uint32_t);
+int iwm_mvm_mac_ctxt_send_cmd(struct iwm_softc *, struct iwm_mac_ctx_cmd *);
+void iwm_mvm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
+ struct iwm_mac_data_sta *, int);
+int iwm_mvm_mac_ctxt_cmd_station(struct iwm_softc *, struct iwm_node *,
+ uint32_t);
+int iwm_mvm_mac_ctx_send(struct iwm_softc *, struct iwm_node *, uint32_t);
+int iwm_mvm_mac_ctxt_add(struct iwm_softc *, struct iwm_node *);
+int iwm_mvm_mac_ctxt_changed(struct iwm_softc *, struct iwm_node *);
+int iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
+int iwm_auth(struct iwm_softc *);
+int iwm_assoc(struct iwm_softc *);
+int iwm_release(struct iwm_softc *, struct iwm_node *);
+struct ieee80211_node *iwm_node_alloc(struct ieee80211com *);
+void iwm_calib_timeout(void *);
+void iwm_setrates(struct iwm_node *);
+int iwm_media_change(struct ifnet *);
+void iwm_newstate_cb(void *);
+int iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
+void iwm_endscan_cb(void *);
+int iwm_init_hw(struct iwm_softc *);
+int iwm_init(struct ifnet *);
+void iwm_start(struct ifnet *);
+void iwm_stop(struct ifnet *, int);
+void iwm_watchdog(struct ifnet *);
+int iwm_ioctl(struct ifnet *, u_long, iwm_caddr_t);
+const char *iwm_desc_lookup(uint32_t);
+void iwm_nic_error(struct iwm_softc *);
+void iwm_notif_intr(struct iwm_softc *);
+int iwm_intr(void *);
+int iwm_match(struct device *, void *, void *);
+int iwm_preinit(struct iwm_softc *);
+void iwm_attach_hook(iwm_hookarg_t);
+void iwm_attach(struct device *, struct device *, void *);
+
+/*
+ * Firmware parser.
+ */
+
+int
+iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
+{
+ struct iwm_fw_cscheme_list *l = (void *)data;
+
+ if (dlen < sizeof(*l) ||
+ dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
+ return EINVAL;
+
+ /* we don't actually store anything for now, always use s/w crypto */
+
+ return 0;
+}
+
+int
+iwm_firmware_store_section(struct iwm_softc *sc,
+ enum iwm_ucode_type type, uint8_t *data, size_t dlen)
+{
+ struct iwm_fw_sects *fws;
+ struct iwm_fw_onesect *fwone;
+
+ if (type >= IWM_UCODE_TYPE_MAX)
+ return EINVAL;
+ if (dlen < sizeof(uint32_t))
+ return EINVAL;
+
+ fws = &sc->sc_fw.fw_sects[type];
+ if (fws->fw_count >= IWM_UCODE_SECT_MAX)
+ return EINVAL;
+
+ fwone = &fws->fw_sect[fws->fw_count];
+
+ /* first 32bit are device load offset */
+ memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
+
+ /* rest is data */
+ fwone->fws_data = data + sizeof(uint32_t);
+ fwone->fws_len = dlen - sizeof(uint32_t);
+
+ /* for freeing the buffer during driver unload */
+ fwone->fws_alloc = data;
+ fwone->fws_allocsize = dlen;
+
+ fws->fw_count++;
+ fws->fw_totlen += fwone->fws_len;
+
+ return 0;
+}
+
+/* iwlwifi: iwl-drv.c */
+struct iwm_tlv_calib_data {
+ uint32_t ucode_type;
+ struct iwm_tlv_calib_ctrl calib;
+} __packed;
+
+int
+iwm_set_default_calib(struct iwm_softc *sc, const void *data)
+{
+ const struct iwm_tlv_calib_data *def_calib = data;
+ uint32_t ucode_type = le32toh(def_calib->ucode_type);
+
+ if (ucode_type >= IWM_UCODE_TYPE_MAX) {
+ printf("%s: Wrong ucode_type %u for default "
+ "calibration.\n", DEVNAME(sc), ucode_type);
+ return EINVAL;
+ }
+
+ sc->sc_default_calib[ucode_type].flow_trigger =
+ def_calib->calib.flow_trigger;
+ sc->sc_default_calib[ucode_type].event_trigger =
+ def_calib->calib.event_trigger;
+
+ return 0;
+}
+
+int
+iwm_read_firmware(struct iwm_softc *sc)
+{
+ struct iwm_fw_info *fw = &sc->sc_fw;
+ struct iwm_tlv_ucode_header *uhdr;
+ struct iwm_ucode_tlv tlv;
+ enum iwm_ucode_tlv_type tlv_type;
+ uint8_t *data;
+ int error, status, len;
+
+ if (fw->fw_status == IWM_FW_STATUS_NONE) {
+ fw->fw_status = IWM_FW_STATUS_INPROGRESS;
+ } else {
+ while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
+ tsleep(&sc->sc_fw, 0, "iwmfwp", 0);
+ }
+ status = fw->fw_status;
+
+ if (status == IWM_FW_STATUS_DONE)
+ return 0;
+ else if (status < 0)
+ return -status;
+
+ KASSERT(status == IWM_FW_STATUS_INPROGRESS);
+
+ /*
+ * Load firmware into driver memory.
+ * fw_rawdata and fw_rawsize will be set.
+ */
+ error = loadfirmware(sc->sc_fwname,
+ (u_char **)&fw->fw_rawdata, &fw->fw_rawsize);
+ if (error != 0) {
+ printf("%s: could not read firmware %s\n",
+ DEVNAME(sc), sc->sc_fwname);
+ goto out;
+ }
+
+ /*
+ * Parse firmware contents
+ */
+
+ uhdr = (void *)fw->fw_rawdata;
+ if (*(uint32_t *)fw->fw_rawdata != 0
+ || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
+ printf("%s: invalid firmware %s\n",
+ DEVNAME(sc), sc->sc_fwname);
+ error = EINVAL;
+ goto out;
+ }
+
+ sc->sc_fwver = le32toh(uhdr->ver);
+ data = uhdr->data;
+ len = fw->fw_rawsize - sizeof(*uhdr);
+
+ while (len >= sizeof(tlv)) {
+ uint32_t tlv_len;
+ void *tlv_data;
+
+ memcpy(&tlv, data, sizeof(tlv));
+ tlv_len = le32toh(tlv.length);
+ tlv_type = le32toh(tlv.type);
+
+ len -= sizeof(tlv);
+ data += sizeof(tlv);
+ tlv_data = data;
+
+ if (len < tlv_len) {
+ printf("%s: firmware image invalid length\n", DEVNAME(sc));
+ error = EINVAL;
+ goto parse_out;
+ }
+
+ switch ((int)tlv_type) {
+ case IWM_UCODE_TLV_PROBE_MAX_LEN:
+ if (tlv_len < sizeof(uint32_t)) {
+ error = EINVAL;
+ goto parse_out;
+ }
+ sc->sc_capa_max_probe_len
+ = le32toh(*(uint32_t *)tlv_data);
+ /* limit it to something sensible */
+ if (sc->sc_capa_max_probe_len > (1<<16)) {
+ printf("%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
+ "ridiculous\n", DEVNAME(sc));
+ error = EINVAL;
+ goto parse_out;
+ }
+ break;
+ case IWM_UCODE_TLV_PAN:
+ if (tlv_len) {
+ error = EINVAL;
+ goto parse_out;
+ }
+ sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
+ break;
+ case IWM_UCODE_TLV_FLAGS:
+ if (tlv_len < sizeof(uint32_t)) {
+ error = EINVAL;
+ goto parse_out;
+ }
+ /*
+ * Apparently there can be many flags, but Linux driver
+ * parses only the first one, and so do we.
+ *
+ * XXX: why does this override IWM_UCODE_TLV_PAN?
+ * Intentional or a bug? Observations from
+ * current firmware file:
+ * 1) TLV_PAN is parsed first
+ * 2) TLV_FLAGS contains TLV_FLAGS_PAN
+ * ==> this resets TLV_PAN to itself... hnnnk
+ */
+ sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
+ break;
+ case IWM_UCODE_TLV_CSCHEME:
+ if ((error = iwm_store_cscheme(sc,
+ tlv_data, tlv_len)) != 0)
+ goto parse_out;
+ break;
+ case IWM_UCODE_TLV_NUM_OF_CPU:
+ if (tlv_len != sizeof(uint32_t)) {
+ error = EINVAL;
+ goto parse_out;
+ }
+ if (le32toh(*(uint32_t*)tlv_data) != 1) {
+ printf("%s: driver supports "
+ "only TLV_NUM_OF_CPU == 1", DEVNAME(sc));
+ error = EINVAL;
+ goto parse_out;
+ }
+ break;
+ case IWM_UCODE_TLV_SEC_RT:
+ if ((error = iwm_firmware_store_section(sc,
+ IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0)
+ goto parse_out;
+ break;
+ case IWM_UCODE_TLV_SEC_INIT:
+ if ((error = iwm_firmware_store_section(sc,
+ IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0)
+ goto parse_out;
+ break;
+ case IWM_UCODE_TLV_SEC_WOWLAN:
+ if ((error = iwm_firmware_store_section(sc,
+ IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0)
+ goto parse_out;
+ break;
+ case IWM_UCODE_TLV_DEF_CALIB:
+ if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
+ error = EINVAL;
+ goto parse_out;
+ }
+ if ((error = iwm_set_default_calib(sc, tlv_data)) != 0)
+ goto parse_out;
+ break;
+ case IWM_UCODE_TLV_PHY_SKU:
+ if (tlv_len != sizeof(uint32_t)) {
+ error = EINVAL;
+ goto parse_out;
+ }
+ sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
+ break;
+
+ case IWM_UCODE_TLV_API_CHANGES_SET:
+ case IWM_UCODE_TLV_ENABLED_CAPABILITIES:
+ /* ignore, not used by current driver */
+ break;
+
+ default:
+ printf("%s: unknown firmware section %d, abort\n",
+ DEVNAME(sc), tlv_type);
+ error = EINVAL;
+ goto parse_out;
+ }
+
+ len -= roundup(tlv_len, 4);
+ data += roundup(tlv_len, 4);
+ }
+
+ KASSERT(error == 0);
+
+ parse_out:
+ if (error) {
+ printf("%s: firmware parse error, "
+ "section type %d\n", DEVNAME(sc), tlv_type);
+ }
+
+ if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
+ printf("%s: device uses unsupported power ops\n", DEVNAME(sc));
+ error = ENOTSUP;
+ }
+
+ out:
+ if (error) {
+ KASSERT(error > 0);
+ fw->fw_status = -error;
+ } else {
+ fw->fw_status = IWM_FW_STATUS_DONE;
+ }
+ wakeup(&sc->sc_fw);
+
+ if (error) {
+ free(fw->fw_rawdata, M_DEVBUF, fw->fw_rawsize);
+ fw->fw_rawdata = NULL;
+ }
+ return error;
+}
+
+/*
+ * basic device access
+ */
+
+uint32_t
+iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
+{
+ IWM_WRITE(sc,
+ IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
+ IWM_BARRIER_READ_WRITE(sc);
+ return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
+}
+
+void
+iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
+{
+ IWM_WRITE(sc,
+ IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
+ IWM_BARRIER_WRITE(sc);
+ IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
+}
+
+/* iwlwifi: pcie/trans.c */
+int
+iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
+{
+ int offs, ret = 0;
+ uint32_t *vals = buf;
+
+ if (iwm_nic_lock(sc)) {
+ IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
+ for (offs = 0; offs < dwords; offs++)
+ vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
+ iwm_nic_unlock(sc);
+ } else {
+ ret = EBUSY;
+ }
+ return ret;
+}
+
+/* iwlwifi: pcie/trans.c */
+int
+iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
+{
+ int offs, ret = 0;
+ const uint32_t *vals = buf;
+
+ if (iwm_nic_lock(sc)) {
+ IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
+ /* WADDR auto-increments */
+ for (offs = 0; offs < dwords; offs++) {
+ uint32_t val = vals ? vals[offs] : 0;
+ IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
+ }
+ iwm_nic_unlock(sc);
+ } else {
+ /* let's just say that it's good to notice this failure */
+ printf("%s: WARNING: write_mem failed\n", DEVNAME(sc));
+ ret = EBUSY;
+ }
+ return ret;
+}
+
+int
+iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
+{
+ return iwm_write_mem(sc, addr, &val, 1);
+}
+
+int
+iwm_poll_bit(struct iwm_softc *sc, int reg,
+ uint32_t bits, uint32_t mask, int timo)
+{
+ for (;;) {
+ if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
+ return 1;
+ }
+ if (timo < 10) {
+ return 0;
+ }
+ timo -= 10;
+ DELAY(10);
+ }
+}
+
+int
+iwm_nic_lock(struct iwm_softc *sc)
+{
+ int rv = 0;
+
+ IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
+ IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+ if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
+ IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+ IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
+ | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 15000)) {
+ rv = 1;
+ } else {
+ /* jolt */
+ IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_FORCE_NMI);
+ }
+
+ return rv;
+}
+
+void
+iwm_nic_unlock(struct iwm_softc *sc)
+{
+ IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
+ IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+}
+
+void
+iwm_set_bits_mask_prph(struct iwm_softc *sc,
+ uint32_t reg, uint32_t bits, uint32_t mask)
+{
+ uint32_t val;
+
+ /* XXX: no error path? */
+ if (iwm_nic_lock(sc)) {
+ val = iwm_read_prph(sc, reg) & mask;
+ val |= bits;
+ iwm_write_prph(sc, reg, val);
+ iwm_nic_unlock(sc);
+ }
+}
+
+void
+iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
+{
+ iwm_set_bits_mask_prph(sc, reg, bits, ~0);
+}
+
+void
+iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
+{
+ iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
+}
+
+/*
+ * DMA resource routines
+ */
+
+int
+iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
+ bus_size_t size, bus_size_t alignment)
+{
+ int nsegs, error;
+ caddr_t va;
+
+ dma->tag = tag;
+ dma->size = size;
+
+ error = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
+ &dma->map);
+ if (error != 0)
+ goto fail;
+
+ error = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
+ BUS_DMA_NOWAIT);
+ if (error != 0)
+ goto fail;
+
+ error = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
+ BUS_DMA_NOWAIT);
+ if (error != 0)
+ goto fail;
+ dma->vaddr = va;
+
+ error = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
+ BUS_DMA_NOWAIT);
+ if (error != 0)
+ goto fail;
+
+ memset(dma->vaddr, 0, size);
+ bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
+ dma->paddr = dma->map->dm_segs[0].ds_addr;
+
+ return 0;
+
+fail: iwm_dma_contig_free(dma);
+ return error;
+}
+
+void
+iwm_dma_contig_free(struct iwm_dma_info *dma)
+{
+ if (dma->map != NULL) {
+ if (dma->vaddr != NULL) {
+ bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(dma->tag, dma->map);
+ bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
+ bus_dmamem_free(dma->tag, &dma->seg, 1);
+ dma->vaddr = NULL;
+ }
+ bus_dmamap_destroy(dma->tag, dma->map);
+ dma->map = NULL;
+ }
+}
+
+/* fwmem is used to load firmware onto the card */
+int
+iwm_alloc_fwmem(struct iwm_softc *sc)
+{
+ /* Must be aligned on a 16-byte boundary. */
+ return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
+ sc->sc_fwdmasegsz, 16);
+}
+
+void
+iwm_free_fwmem(struct iwm_softc *sc)
+{
+ iwm_dma_contig_free(&sc->fw_dma);
+}
+
+/* tx scheduler rings. not used? */
+int
+iwm_alloc_sched(struct iwm_softc *sc)
+{
+ int rv;
+
+ /* TX scheduler rings must be aligned on a 1KB boundary. */
+ rv = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
+ nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
+ return rv;
+}
+
+void
+iwm_free_sched(struct iwm_softc *sc)
+{
+ iwm_dma_contig_free(&sc->sched_dma);
+}
+
+/* keep-warm page is used internally by the card. see iwl-fh.h for more info */
+int
+iwm_alloc_kw(struct iwm_softc *sc)
+{
+ return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
+}
+
+void
+iwm_free_kw(struct iwm_softc *sc)
+{
+ iwm_dma_contig_free(&sc->kw_dma);
+}
+
+/* interrupt cause table */
+int
+iwm_alloc_ict(struct iwm_softc *sc)
+{
+ return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
+ IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
+}
+
+void
+iwm_free_ict(struct iwm_softc *sc)
+{
+ iwm_dma_contig_free(&sc->ict_dma);
+}
+
+int
+iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
+{
+ bus_size_t size;
+ int i, error;
+
+ ring->cur = 0;
+
+ /* Allocate RX descriptors (256-byte aligned). */
+ size = IWM_RX_RING_COUNT * sizeof(uint32_t);
+ error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
+ if (error != 0) {
+ printf("%s: could not allocate RX ring DMA memory\n",
+ DEVNAME(sc));
+ goto fail;
+ }
+ ring->desc = ring->desc_dma.vaddr;
+
+ /* Allocate RX status area (16-byte aligned). */
+ error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
+ sizeof(*ring->stat), 16);
+ if (error != 0) {
+ printf("%s: could not allocate RX status DMA memory\n",
+ DEVNAME(sc));
+ goto fail;
+ }
+ ring->stat = ring->stat_dma.vaddr;
+
+ /*
+ * Allocate and map RX buffers.
+ */
+ for (i = 0; i < IWM_RX_RING_COUNT; i++) {
+ struct iwm_rx_data *data = &ring->data[i];
+
+ memset(data, 0, sizeof(*data));
+ error = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
+ IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
+ &data->map);
+ if (error != 0) {
+ printf("%s: could not create RX buf DMA map\n",
+ DEVNAME(sc));
+ goto fail;
+ }
+
+ if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
+ printf("%s: could not add mbuf to ring", DEVNAME(sc));
+ goto fail;
+ }
+ }
+ return 0;
+
+fail: iwm_free_rx_ring(sc, ring);
+ return error;
+}
+
+void
+iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
+{
+ int ntries;
+
+ if (iwm_nic_lock(sc)) {
+ IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ for (ntries = 0; ntries < 1000; ntries++) {
+ if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG) &
+ IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
+ break;
+ DELAY(10);
+ }
+ if (ntries == 1000) {
+ printf("%s: unable to detect idle rx chan after "
+ "reset\n", DEVNAME(sc));
+ }
+ iwm_nic_unlock(sc);
+ }
+ ring->cur = 0;
+}
+
+void
+iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
+{
+ int i;
+
+ iwm_dma_contig_free(&ring->desc_dma);
+ iwm_dma_contig_free(&ring->stat_dma);
+
+ for (i = 0; i < IWM_RX_RING_COUNT; i++) {
+ struct iwm_rx_data *data = &ring->data[i];
+
+ if (data->m != NULL) {
+ bus_dmamap_sync(sc->sc_dmat, data->map, 0,
+ data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->sc_dmat, data->map);
+ m_freem(data->m);
+ }
+ if (data->map != NULL)
+ bus_dmamap_destroy(sc->sc_dmat, data->map);
+ }
+}
+
+int
+iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
+{
+ bus_addr_t paddr;
+ bus_size_t size;
+ int i, error;
+
+ ring->qid = qid;
+ ring->queued = 0;
+ ring->cur = 0;
+
+ /* Allocate TX descriptors (256-byte aligned). */
+ size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
+ error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
+ if (error != 0) {
+ printf("%s: could not allocate TX ring DMA memory\n",
+ DEVNAME(sc));
+ goto fail;
+ }
+ ring->desc = ring->desc_dma.vaddr;
+
+ /*
+ * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
+ * to allocate commands space for other rings.
+ */
+ if (qid > IWM_MVM_CMD_QUEUE)
+ return 0;
+
+ size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
+ error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
+ if (error != 0) {
+ printf("%s: could not allocate TX cmd DMA memory\n", DEVNAME(sc));
+ goto fail;
+ }
+ ring->cmd = ring->cmd_dma.vaddr;
+
+ paddr = ring->cmd_dma.paddr;
+ for (i = 0; i < IWM_TX_RING_COUNT; i++) {
+ struct iwm_tx_data *data = &ring->data[i];
+
+ data->cmd_paddr = paddr;
+ data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
+ + offsetof(struct iwm_tx_cmd, scratch);
+ paddr += sizeof(struct iwm_device_cmd);
+
+ error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
+ IWM_NUM_OF_TBS, MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map);
+ if (error != 0) {
+ printf("%s: could not create TX buf DMA map\n", DEVNAME(sc));
+ goto fail;
+ }
+ }
+ KASSERT(paddr == ring->cmd_dma.paddr + size);
+ return 0;
+
+fail: iwm_free_tx_ring(sc, ring);
+ return error;
+}
+
+void
+iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
+{
+ int i;
+
+ for (i = 0; i < IWM_TX_RING_COUNT; i++) {
+ struct iwm_tx_data *data = &ring->data[i];
+
+ if (data->m != NULL) {
+ bus_dmamap_sync(sc->sc_dmat, data->map, 0,
+ data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->sc_dmat, data->map);
+ m_freem(data->m);
+ data->m = NULL;
+ }
+ }
+ /* Clear TX descriptors. */
+ memset(ring->desc, 0, ring->desc_dma.size);
+ bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
+ ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
+ sc->qfullmsk &= ~(1 << ring->qid);
+ ring->queued = 0;
+ ring->cur = 0;
+}
+
+void
+iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
+{
+ int i;
+
+ iwm_dma_contig_free(&ring->desc_dma);
+ iwm_dma_contig_free(&ring->cmd_dma);
+
+ for (i = 0; i < IWM_TX_RING_COUNT; i++) {
+ struct iwm_tx_data *data = &ring->data[i];
+
+ if (data->m != NULL) {
+ bus_dmamap_sync(sc->sc_dmat, data->map, 0,
+ data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->sc_dmat, data->map);
+ m_freem(data->m);
+ }
+ if (data->map != NULL)
+ bus_dmamap_destroy(sc->sc_dmat, data->map);
+ }
+}
+
+/*
+ * High-level hardware frobbing routines
+ */
+
+void
+iwm_enable_rfkill_int(struct iwm_softc *sc)
+{
+ sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
+ IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
+}
+
+int
+iwm_check_rfkill(struct iwm_softc *sc)
+{
+ uint32_t v;
+ int s;
+ int rv;
+
+ s = splnet();
+
+ /*
+ * "documentation" is not really helpful here:
+ * 27: HW_RF_KILL_SW
+ * Indicates state of (platform's) hardware RF-Kill switch
+ *
+ * But apparently when it's off, it's on ...
+ */
+ v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
+ rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
+ if (rv) {
+ sc->sc_flags |= IWM_FLAG_RFKILL;
+ } else {
+ sc->sc_flags &= ~IWM_FLAG_RFKILL;
+ }
+
+ splx(s);
+ return rv;
+}
+
+void
+iwm_enable_interrupts(struct iwm_softc *sc)
+{
+ sc->sc_intmask = IWM_CSR_INI_SET_MASK;
+ IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
+}
+
+void
+iwm_restore_interrupts(struct iwm_softc *sc)
+{
+ IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
+}
+
+void
+iwm_disable_interrupts(struct iwm_softc *sc)
+{
+ int s = splnet();
+
+ /* disable interrupts */
+ IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
+
+ /* acknowledge all interrupts */
+ IWM_WRITE(sc, IWM_CSR_INT, ~0);
+ IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
+
+ splx(s);
+}
+
+void
+iwm_ict_reset(struct iwm_softc *sc)
+{
+ iwm_disable_interrupts(sc);
+
+ /* Reset ICT table. */
+ memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
+ sc->ict_cur = 0;
+
+ /* Set physical address of ICT table (4KB aligned). */
+ IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
+ IWM_CSR_DRAM_INT_TBL_ENABLE
+ | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
+ | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
+
+ /* Switch to ICT interrupt mode in driver. */
+ sc->sc_flags |= IWM_FLAG_USE_ICT;
+
+ /* Re-enable interrupts. */
+ IWM_WRITE(sc, IWM_CSR_INT, ~0);
+ iwm_enable_interrupts(sc);
+}
+
+#define IWM_HW_READY_TIMEOUT 50
+int
+iwm_set_hw_ready(struct iwm_softc *sc)
+{
+ IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
+ IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
+
+ return iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
+ IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ IWM_HW_READY_TIMEOUT);
+}
+#undef IWM_HW_READY_TIMEOUT
+
+int
+iwm_prepare_card_hw(struct iwm_softc *sc)
+{
+ int rv = 0;
+ int t = 0;
+
+ if (!iwm_set_hw_ready(sc))
+ goto out;
+
+ /* If HW is not ready, prepare the conditions to check again */
+ IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
+ IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
+
+ do {
+ if (iwm_set_hw_ready(sc))
+ goto out;
+ DELAY(200);
+ t += 200;
+ } while (t < 150000);
+
+ rv = ETIMEDOUT;
+
+ out:
+ return rv;
+}
+
+void
+iwm_apm_config(struct iwm_softc *sc)
+{
+ pcireg_t reg;
+
+ reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
+ sc->sc_cap_off + PCI_PCIE_LCSR);
+ if (reg & PCI_PCIE_LCSR_ASPM_L1) {
+ /* Um the Linux driver prints "Disabling L0S for this one ... */
+ IWM_SETBITS(sc, IWM_CSR_GIO_REG,
+ IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
+ } else {
+ /* ... and "Enabling" here */
+ IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
+ IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
+ }
+}
+
+/*
+ * Start up NIC's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via iwm_pcie_apm_stop())
+ * NOTE: This does not load uCode nor start the embedded processor
+ */
+int
+iwm_apm_init(struct iwm_softc *sc)
+{
+ int error = 0;
+
+ DPRINTF(("iwm apm start\n"));
+
+ /* Disable L0S exit timer (platform NMI Work/Around) */
+ IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
+ IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+
+ /*
+ * Disable L0s without affecting L1;
+ * don't wait for ICH L0s (ICH bug W/A)
+ */
+ IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
+ IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
+
+ /* Set FH wait threshold to maximum (HW error during stress W/A) */
+ IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
+
+ /*
+ * Enable HAP INTA (interrupt from management bus) to
+ * wake device's PCI Express link L1a -> L0s
+ */
+ IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
+ IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+
+ iwm_apm_config(sc);
+
+#if 0 /* not for 7k */
+ /* Configure analog phase-lock-loop before activating to D0A */
+ if (trans->cfg->base_params->pll_cfg_val)
+ IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
+ trans->cfg->base_params->pll_cfg_val);
+#endif
+
+ /*
+ * Set "initialization complete" bit to move adapter from
+ * D0U* --> D0A* (powered-up active) state.
+ */
+ IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /*
+ * Wait for clock stabilization; once stabilized, access to
+ * device-internal resources is supported, e.g. iwm_write_prph()
+ * and accesses to uCode SRAM.
+ */
+ if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
+ IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
+ printf("%s: Failed to init the card\n", DEVNAME(sc));
+ goto out;
+ }
+
+ /*
+ * This is a bit of an abuse - This is needed for 7260 / 3160
+ * only check host_interrupt_operation_mode even if this is
+ * not related to host_interrupt_operation_mode.
+ *
+ * Enable the oscillator to count wake up time for L1 exit. This
+ * consumes slightly more power (100uA) - but allows to be sure
+ * that we wake up from L1 on time.
+ *
+ * This looks weird: read twice the same register, discard the
+ * value, set a bit, and yet again, read that same register
+ * just to discard the value. But that's the way the hardware
+ * seems to like it.
+ */
+ iwm_read_prph(sc, IWM_OSC_CLK);
+ iwm_read_prph(sc, IWM_OSC_CLK);
+ iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
+ iwm_read_prph(sc, IWM_OSC_CLK);
+ iwm_read_prph(sc, IWM_OSC_CLK);
+
+ /*
+ * Enable DMA clock and wait for it to stabilize.
+ *
+ * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
+ * do not disable clocks. This preserves any hardware bits already
+ * set by default in "CLK_CTRL_REG" after reset.
+ */
+ iwm_write_prph(sc, IWM_APMG_CLK_EN_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
+ //kpause("iwmapm", 0, mstohz(20), NULL);
+ DELAY(20);
+
+ /* Disable L1-Active */
+ iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
+ IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+
+ /* Clear the interrupt in APMG if the NIC is in RFKILL */
+ iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
+ IWM_APMG_RTC_INT_STT_RFKILL);
+
+ out:
+ if (error)
+ printf("%s: apm init error %d\n", DEVNAME(sc), error);
+ return error;
+}
+
+/* iwlwifi/pcie/trans.c */
+void
+iwm_apm_stop(struct iwm_softc *sc)
+{
+ /* stop device's busmaster DMA activity */
+ IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
+
+ if (!iwm_poll_bit(sc, IWM_CSR_RESET,
+ IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
+ IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
+ printf("%s: Master Disable Timed Out, 100 usec\n", DEVNAME(sc));
+ DPRINTF(("iwm apm stop\n"));
+}
+
+/* iwlwifi pcie/trans.c */
+int
+iwm_start_hw(struct iwm_softc *sc)
+{
+ int error;
+
+ if ((error = iwm_prepare_card_hw(sc)) != 0)
+ return error;
+
+ /* Reset the entire device */
+ IWM_WRITE(sc, IWM_CSR_RESET,
+ IWM_CSR_RESET_REG_FLAG_SW_RESET |
+ IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
+ DELAY(10);
+
+ if ((error = iwm_apm_init(sc)) != 0)
+ return error;
+
+ iwm_enable_rfkill_int(sc);
+ iwm_check_rfkill(sc);
+
+ return 0;
+}
+
+/* iwlwifi pcie/trans.c */
+
+void
+iwm_stop_device(struct iwm_softc *sc)
+{
+ int chnl, ntries;
+ int qid;
+
+ /* tell the device to stop sending interrupts */
+ iwm_disable_interrupts(sc);
+
+ /* device going down, Stop using ICT table */
+ sc->sc_flags &= ~IWM_FLAG_USE_ICT;
+
+ /* stop tx and rx. tx and rx bits, as usual, are from if_iwn */
+
+ iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
+
+ /* Stop all DMA channels. */
+ if (iwm_nic_lock(sc)) {
+ for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
+ IWM_WRITE(sc,
+ IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
+ for (ntries = 0; ntries < 200; ntries++) {
+ uint32_t r;
+
+ r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
+ if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
+ chnl))
+ break;
+ DELAY(20);
+ }
+ if (ntries == 200) {
+ printf("%s: unable to detect idle tx "
+ "chan after reset\n", DEVNAME(sc));
+ }
+ }
+ iwm_nic_unlock(sc);
+ }
+
+ /* Stop RX ring. */
+ iwm_reset_rx_ring(sc, &sc->rxq);
+
+ /* Reset all TX rings. */
+ for (qid = 0; qid < nitems(sc->txq); qid++)
+ iwm_reset_tx_ring(sc, &sc->txq[qid]);
+
+ /*
+ * Power-down device's busmaster DMA clocks
+ */
+ iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
+ DELAY(5);
+
+ /* Make sure (redundant) we've released our request to stay awake */
+ IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
+ IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+ /* Stop the device, and put it in low power state */
+ iwm_apm_stop(sc);
+
+ /* Upon stop, the APM issues an interrupt if HW RF kill is set.
+ * Clean again the interrupt here
+ */
+ iwm_disable_interrupts(sc);
+ /* stop and reset the on-board processor */
+ IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
+
+ /*
+ * Even if we stop the HW, we still want the RF kill
+ * interrupt
+ */
+ iwm_enable_rfkill_int(sc);
+ iwm_check_rfkill(sc);
+}
+
+/* iwlwifi pcie/trans.c (always main power) */
+void
+iwm_set_pwr(struct iwm_softc *sc)
+{
+ iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
+ IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
+}
+
+/* iwlwifi: mvm/ops.c */
+void
+iwm_mvm_nic_config(struct iwm_softc *sc)
+{
+ uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
+ uint32_t reg_val = 0;
+
+ radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
+ IWM_FW_PHY_CFG_RADIO_TYPE_POS;
+ radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
+ IWM_FW_PHY_CFG_RADIO_STEP_POS;
+ radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
+ IWM_FW_PHY_CFG_RADIO_DASH_POS;
+
+ /* SKU control */
+ reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
+ IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
+ reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
+ IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
+
+ /* radio configuration */
+ reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
+ reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
+ reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
+
+ IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
+
+ DPRINTF(("Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
+ radio_cfg_step, radio_cfg_dash));
+
+ /*
+ * W/A : NIC is stuck in a reset state after Early PCIe power off
+ * (PCIe power is lost before PERST# is asserted), causing ME FW
+ * to lose ownership and not being able to obtain it back.
+ */
+ iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
+ IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
+ ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
+}
+
+int
+iwm_nic_rx_init(struct iwm_softc *sc)
+{
+ if (!iwm_nic_lock(sc))
+ return EBUSY;
+
+ /*
+ * Initialize RX ring. This is from the iwn driver.
+ */
+ memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
+
+ /* stop DMA */
+ IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
+ IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
+ IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
+ IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
+
+ /* Set physical address of RX ring (256-byte aligned). */
+ IWM_WRITE(sc,
+ IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
+
+ /* Set physical address of RX status (16-byte aligned). */
+ IWM_WRITE(sc,
+ IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
+
+ /* Enable RX. */
+ /*
+ * Note: Linux driver also sets this:
+ * (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
+ *
+ * It causes weird behavior. YMMV.
+ */
+ IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
+ IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
+ IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */
+ IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
+ IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
+ IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
+
+ IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
+ IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
+
+ /*
+ * Thus sayeth el jefe (iwlwifi) via a comment:
+ *
+ * This value should initially be 0 (before preparing any
+ * RBs), should be 8 after preparing the first 8 RBs (for example)
+ */
+ IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
+
+ iwm_nic_unlock(sc);
+
+ return 0;
+}
+
+int
+iwm_nic_tx_init(struct iwm_softc *sc)
+{
+ int qid;
+
+ if (!iwm_nic_lock(sc))
+ return EBUSY;
+
+ /* Deactivate TX scheduler. */
+ iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
+
+ /* Set physical address of "keep warm" page (16-byte aligned). */
+ IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
+
+ /* Initialize TX rings. */
+ for (qid = 0; qid < nitems(sc->txq); qid++) {
+ struct iwm_tx_ring *txq = &sc->txq[qid];
+
+ /* Set physical address of TX ring (256-byte aligned). */
+ IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
+ txq->desc_dma.paddr >> 8);
+ DPRINTF(("loading ring %d descriptors (%p) at %lx\n",
+ qid, txq->desc, txq->desc_dma.paddr >> 8));
+ }
+ iwm_nic_unlock(sc);
+
+ return 0;
+}
+
+int
+iwm_nic_init(struct iwm_softc *sc)
+{
+ int error;
+
+ iwm_apm_init(sc);
+ iwm_set_pwr(sc);
+
+ iwm_mvm_nic_config(sc);
+
+ if ((error = iwm_nic_rx_init(sc)) != 0)
+ return error;
+
+ /*
+ * Ditto for TX, from iwn
+ */
+ if ((error = iwm_nic_tx_init(sc)) != 0)
+ return error;
+
+ DPRINTF(("shadow registers enabled\n"));
+ IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
+
+ return 0;
+}
+
+enum iwm_mvm_tx_fifo {
+ IWM_MVM_TX_FIFO_BK = 0,
+ IWM_MVM_TX_FIFO_BE,
+ IWM_MVM_TX_FIFO_VI,
+ IWM_MVM_TX_FIFO_VO,
+ IWM_MVM_TX_FIFO_MCAST = 5,
+};
+
+const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
+ IWM_MVM_TX_FIFO_VO,
+ IWM_MVM_TX_FIFO_VI,
+ IWM_MVM_TX_FIFO_BE,
+ IWM_MVM_TX_FIFO_BK,
+};
+
+void
+iwm_enable_txq(struct iwm_softc *sc, int qid, int fifo)
+{
+ if (!iwm_nic_lock(sc)) {
+ printf("%s: cannot enable txq %d\n", DEVNAME(sc), qid);
+ return;
+ }
+
+ /* unactivate before configuration */
+ iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
+ (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
+ | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
+
+ if (qid != IWM_MVM_CMD_QUEUE) {
+ iwm_set_bits_prph(sc, IWM_SCD_QUEUECHAIN_SEL, (1 << qid));
+ }
+
+ iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
+
+ IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
+ iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
+
+ iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
+ /* Set scheduler window size and frame limit. */
+ iwm_write_mem32(sc,
+ sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
+ sizeof(uint32_t),
+ ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
+ IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
+ ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+ IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
+
+ iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
+ (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
+ (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
+ (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
+ IWM_SCD_QUEUE_STTS_REG_MSK);
+
+ iwm_nic_unlock(sc);
+
+ DPRINTF(("enabled txq %d FIFO %d\n", qid, fifo));
+}
+
+int
+iwm_post_alive(struct iwm_softc *sc)
+{
+ int nwords;
+ int error, chnl;
+
+ if (!iwm_nic_lock(sc))
+ return EBUSY;
+
+ if (sc->sched_base != iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR)) {
+ printf("%s: sched addr mismatch", DEVNAME(sc));
+ error = EINVAL;
+ goto out;
+ }
+
+ iwm_ict_reset(sc);
+
+ /* Clear TX scheduler state in SRAM. */
+ nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
+ IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
+ / sizeof(uint32_t);
+ error = iwm_write_mem(sc,
+ sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
+ NULL, nwords);
+ if (error)
+ goto out;
+
+ /* Set physical address of TX scheduler rings (1KB aligned). */
+ iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
+
+ iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
+
+ /* enable command channel */
+ iwm_enable_txq(sc, IWM_MVM_CMD_QUEUE, 7);
+
+ iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
+
+ /* Enable DMA channels. */
+ for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
+ IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
+ IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
+ }
+
+ IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
+ IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
+
+ /* Enable L1-Active */
+ iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
+ IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+
+ out:
+ iwm_nic_unlock(sc);
+ return error;
+}
+
+/*
+ * PHY db
+ * iwlwifi/iwl-phy-db.c
+ */
+
+/*
+ * BEGIN iwl-phy-db.c
+ */
+
+enum iwm_phy_db_section_type {
+ IWM_PHY_DB_CFG = 1,
+ IWM_PHY_DB_CALIB_NCH,
+ IWM_PHY_DB_UNUSED,
+ IWM_PHY_DB_CALIB_CHG_PAPD,
+ IWM_PHY_DB_CALIB_CHG_TXP,
+ IWM_PHY_DB_MAX
+};
+
+#define IWM_PHY_DB_CMD 0x6c /* TEMP API - The actual is 0x8c */
+
+/*
+ * phy db - configure operational ucode
+ */
+struct iwm_phy_db_cmd {
+ uint16_t type;
+ uint16_t length;
+ uint8_t data[];
+} __packed;
+
+/* for parsing of tx power channel group data that comes from the firmware*/
+struct iwm_phy_db_chg_txp {
+ uint32_t space;
+ uint16_t max_channel_idx;
+} __packed;
+
+/*
+ * phy db - Receive phy db chunk after calibrations
+ */
+struct iwm_calib_res_notif_phy_db {
+ uint16_t type;
+ uint16_t length;
+ uint8_t data[];
+} __packed;
+
+/*
+ * get phy db section: returns a pointer to a phy db section specified by
+ * type and channel group id.
+ */
+struct iwm_phy_db_entry *
+iwm_phy_db_get_section(struct iwm_softc *sc,
+ enum iwm_phy_db_section_type type, uint16_t chg_id)
+{
+ struct iwm_phy_db *phy_db = &sc->sc_phy_db;
+
+ if (type >= IWM_PHY_DB_MAX)
+ return NULL;
+
+ switch (type) {
+ case IWM_PHY_DB_CFG:
+ return &phy_db->cfg;
+ case IWM_PHY_DB_CALIB_NCH:
+ return &phy_db->calib_nch;
+ case IWM_PHY_DB_CALIB_CHG_PAPD:
+ if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
+ return NULL;
+ return &phy_db->calib_ch_group_papd[chg_id];
+ case IWM_PHY_DB_CALIB_CHG_TXP:
+ if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
+ return NULL;
+ return &phy_db->calib_ch_group_txp[chg_id];
+ default:
+ return NULL;
+ }
+ return NULL;
+}
+
+int
+iwm_phy_db_set_section(struct iwm_softc *sc,
+ struct iwm_calib_res_notif_phy_db *phy_db_notif)
+{
+ enum iwm_phy_db_section_type type = le16toh(phy_db_notif->type);
+ uint16_t size = le16toh(phy_db_notif->length);
+ struct iwm_phy_db_entry *entry;
+ uint16_t chg_id = 0;
+
+ if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
+ type == IWM_PHY_DB_CALIB_CHG_TXP)
+ chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
+
+ entry = iwm_phy_db_get_section(sc, type, chg_id);
+ if (!entry)
+ return EINVAL;
+
+ if (entry->data)
+ free(entry->data, M_DEVBUF, entry->size);
+ entry->data = malloc(size, M_DEVBUF, M_NOWAIT);
+ if (!entry->data) {
+ entry->size = 0;
+ return ENOMEM;
+ }
+ memcpy(entry->data, phy_db_notif->data, size);
+ entry->size = size;
+
+ DPRINTFN(10, ("%s(%d): [PHYDB]SET: Type %d , Size: %d, data: %p\n",
+ __func__, __LINE__, type, size, entry->data));
+
+ return 0;
+}
+
+int
+iwm_is_valid_channel(uint16_t ch_id)
+{
+ if (ch_id <= 14 ||
+ (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
+ (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
+ (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
+ return 1;
+ return 0;
+}
+
+uint8_t
+iwm_ch_id_to_ch_index(uint16_t ch_id)
+{
+ if (!iwm_is_valid_channel(ch_id))
+ return 0xff;
+
+ if (ch_id <= 14)
+ return ch_id - 1;
+ if (ch_id <= 64)
+ return (ch_id + 20) / 4;
+ if (ch_id <= 140)
+ return (ch_id - 12) / 4;
+ return (ch_id - 13) / 4;
+}
+
+
+uint16_t
+iwm_channel_id_to_papd(uint16_t ch_id)
+{
+ if (!iwm_is_valid_channel(ch_id))
+ return 0xff;
+
+ if (1 <= ch_id && ch_id <= 14)
+ return 0;
+ if (36 <= ch_id && ch_id <= 64)
+ return 1;
+ if (100 <= ch_id && ch_id <= 140)
+ return 2;
+ return 3;
+}
+
+uint16_t
+iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
+{
+ struct iwm_phy_db *phy_db = &sc->sc_phy_db;
+ struct iwm_phy_db_chg_txp *txp_chg;
+ int i;
+ uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
+
+ if (ch_index == 0xff)
+ return 0xff;
+
+ for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
+ txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
+ if (!txp_chg)
+ return 0xff;
+ /*
+ * Looking for the first channel group that its max channel is
+ * higher then wanted channel.
+ */
+ if (le16toh(txp_chg->max_channel_idx) >= ch_index)
+ return i;
+ }
+ return 0xff;
+}
+
+int
+iwm_phy_db_get_section_data(struct iwm_softc *sc,
+ uint32_t type, uint8_t **data, uint16_t *size, uint16_t ch_id)
+{
+ struct iwm_phy_db_entry *entry;
+ uint16_t ch_group_id = 0;
+
+ /* find wanted channel group */
+ if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
+ ch_group_id = iwm_channel_id_to_papd(ch_id);
+ else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
+ ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
+
+ entry = iwm_phy_db_get_section(sc, type, ch_group_id);
+ if (!entry)
+ return EINVAL;
+
+ *data = entry->data;
+ *size = entry->size;
+
+ DPRINTFN(10, ("%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
+ __func__, __LINE__, type, *size));
+
+ return 0;
+}
+
+int
+iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type,
+ uint16_t length, void *data)
+{
+ struct iwm_phy_db_cmd phy_db_cmd;
+ struct iwm_host_cmd cmd = {
+ .id = IWM_PHY_DB_CMD,
+ .flags = IWM_CMD_SYNC,
+ };
+
+ DPRINTFN(10, ("Sending PHY-DB hcmd of type %d, of length %d\n", type, length));
+
+ /* Set phy db cmd variables */
+ phy_db_cmd.type = le16toh(type);
+ phy_db_cmd.length = le16toh(length);
+
+ /* Set hcmd variables */
+ cmd.data[0] = &phy_db_cmd;
+ cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
+ cmd.data[1] = data;
+ cmd.len[1] = length;
+ cmd.dataflags[1] = IWM_HCMD_DFL_NOCOPY;
+
+ return iwm_send_cmd(sc, &cmd);
+}
+
+int
+iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc,
+ enum iwm_phy_db_section_type type, uint8_t max_ch_groups)
+{
+ uint16_t i;
+ int err;
+ struct iwm_phy_db_entry *entry;
+
+ /* Send all the channel-specific groups to operational fw */
+ for (i = 0; i < max_ch_groups; i++) {
+ entry = iwm_phy_db_get_section(sc, type, i);
+ if (!entry)
+ return EINVAL;
+
+ if (!entry->size)
+ continue;
+
+ /* Send the requested PHY DB section */
+ err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
+ if (err) {
+ printf("%s: Can't SEND phy_db section %d (%d), err %d",
+ DEVNAME(sc), type, i, err);
+ return err;
+ }
+
+ DPRINTFN(10, ("Sent PHY_DB HCMD, type = %d num = %d\n", type, i));
+ }
+
+ return 0;
+}
+
+int
+iwm_send_phy_db_data(struct iwm_softc *sc)
+{
+ uint8_t *data = NULL;
+ uint16_t size = 0;
+ int err;
+
+ DPRINTF(("Sending phy db data and configuration to runtime image\n"));
+
+ /* Send PHY DB CFG section */
+ err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
+ if (err) {
+ printf("%s: Cannot get Phy DB cfg section\n", DEVNAME(sc));
+ return err;
+ }
+
+ err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
+ if (err) {
+ printf("%s: Cannot send HCMD of Phy DB cfg section\n",
+ DEVNAME(sc));
+ return err;
+ }
+
+ err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
+ &data, &size, 0);
+ if (err) {
+ printf("%s: Cannot get Phy DB non specific channel section\n",
+ DEVNAME(sc));
+ return err;
+ }
+
+ err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
+ if (err) {
+ printf("%s: Cannot send HCMD of Phy DB non specific channel "
+ "sect, %d\n", DEVNAME(sc), err);
+ return err;
+ }
+
+ /* Send all the TXP channel specific data */
+ err = iwm_phy_db_send_all_channel_groups(sc,
+ IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
+ if (err) {
+ printf("%s: Cannot send channel specific PAPD groups",
+ DEVNAME(sc));
+ return err;
+ }
+
+ /* Send all the TXP channel specific data */
+ err = iwm_phy_db_send_all_channel_groups(sc,
+ IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
+ if (err) {
+ printf("%s: Cannot send channel specific TX power groups",
+ DEVNAME(sc));
+ return err;
+ }
+
+ DPRINTF(("Finished sending phy db non channel data\n"));
+ return 0;
+}
+
+/*
+ * END iwl-phy-db.c
+ */
+
+/*
+ * BEGIN iwlwifi/mvm/time-event.c
+ */
+
+/*
+ * For the high priority TE use a time event type that has similar priority to
+ * the FW's action scan priority.
+ */
+#define IWM_MVM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
+#define IWM_MVM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
+
+/* used to convert from time event API v2 to v1 */
+#define IWM_TE_V2_DEP_POLICY_MSK (IWM_TE_V2_DEP_OTHER | IWM_TE_V2_DEP_TSF |\
+ IWM_TE_V2_EVENT_SOCIOPATHIC)
+static inline uint16_t
+iwm_te_v2_get_notify(uint16_t policy)
+{
+ return le16toh(policy) & IWM_TE_V2_NOTIF_MSK;
+}
+
+static inline uint16_t
+iwm_te_v2_get_dep_policy(uint16_t policy)
+{
+ return (le16toh(policy) & IWM_TE_V2_DEP_POLICY_MSK) >>
+ IWM_TE_V2_PLACEMENT_POS;
+}
+
+static inline uint16_t
+iwm_te_v2_get_absence(uint16_t policy)
+{
+ return (le16toh(policy) & IWM_TE_V2_ABSENCE) >> IWM_TE_V2_ABSENCE_POS;
+}
+
+void
+iwm_mvm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *cmd_v2,
+ struct iwm_time_event_cmd_v1 *cmd_v1)
+{
+ cmd_v1->id_and_color = cmd_v2->id_and_color;
+ cmd_v1->action = cmd_v2->action;
+ cmd_v1->id = cmd_v2->id;
+ cmd_v1->apply_time = cmd_v2->apply_time;
+ cmd_v1->max_delay = cmd_v2->max_delay;
+ cmd_v1->depends_on = cmd_v2->depends_on;
+ cmd_v1->interval = cmd_v2->interval;
+ cmd_v1->duration = cmd_v2->duration;
+ if (cmd_v2->repeat == IWM_TE_V2_REPEAT_ENDLESS)
+ cmd_v1->repeat = htole32(IWM_TE_V1_REPEAT_ENDLESS);
+ else
+ cmd_v1->repeat = htole32(cmd_v2->repeat);
+ cmd_v1->max_frags = htole32(cmd_v2->max_frags);
+ cmd_v1->interval_reciprocal = 0; /* unused */
+
+ cmd_v1->dep_policy = htole32(iwm_te_v2_get_dep_policy(cmd_v2->policy));
+ cmd_v1->is_present = htole32(!iwm_te_v2_get_absence(cmd_v2->policy));
+ cmd_v1->notify = htole32(iwm_te_v2_get_notify(cmd_v2->policy));
+}
+
+int
+iwm_mvm_send_time_event_cmd(struct iwm_softc *sc,
+ const struct iwm_time_event_cmd_v2 *cmd)
+{
+ struct iwm_time_event_cmd_v1 cmd_v1;
+
+ if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
+ return iwm_mvm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD,
+ IWM_CMD_SYNC, sizeof(*cmd), cmd);
+
+ iwm_mvm_te_v2_to_v1(cmd, &cmd_v1);
+ return iwm_mvm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, IWM_CMD_SYNC,
+ sizeof(cmd_v1), &cmd_v1);
+}
+
+int
+iwm_mvm_time_event_send_add(struct iwm_softc *sc, struct iwm_node *in,
+ void *te_data, struct iwm_time_event_cmd_v2 *te_cmd)
+{
+ int ret;
+
+ DPRINTF(("Add new TE, duration %d TU\n", le32toh(te_cmd->duration)));
+
+ ret = iwm_mvm_send_time_event_cmd(sc, te_cmd);
+ if (ret) {
+ printf("%s: Couldn't send IWM_TIME_EVENT_CMD: %d\n",
+ DEVNAME(sc), ret);
+ }
+
+ return ret;
+}
+
+void
+iwm_mvm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
+ uint32_t duration, uint32_t min_duration, uint32_t max_delay)
+{
+ struct iwm_time_event_cmd_v2 time_cmd;
+
+ memset(&time_cmd, 0, sizeof(time_cmd));
+
+ time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
+ time_cmd.id_and_color =
+ htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
+ time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
+
+ time_cmd.apply_time = htole32(iwm_read_prph(sc,
+ IWM_DEVICE_SYSTEM_TIME_REG));
+
+ time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
+ time_cmd.max_delay = htole32(max_delay);
+ /* TODO: why do we need to interval = bi if it is not periodic? */
+ time_cmd.interval = htole32(1);
+ time_cmd.duration = htole32(duration);
+ time_cmd.repeat = 1;
+ time_cmd.policy
+ = htole32(IWM_TE_V2_NOTIF_HOST_EVENT_START |
+ IWM_TE_V2_NOTIF_HOST_EVENT_END);
+
+ iwm_mvm_time_event_send_add(sc, in, /*te_data*/NULL, &time_cmd);
+}
+
+/*
+ * END iwlwifi/mvm/time-event.c
+ */
+
+/*
+ * NVM read access and content parsing. We do not support
+ * external NVM or writing NVM.
+ * iwlwifi/mvm/nvm.c
+ */
+
+/* list of NVM sections we are allowed/need to read */
+const int nvm_to_read[] = {
+ IWM_NVM_SECTION_TYPE_HW,
+ IWM_NVM_SECTION_TYPE_SW,
+ IWM_NVM_SECTION_TYPE_CALIBRATION,
+ IWM_NVM_SECTION_TYPE_PRODUCTION,
+};
+
+/* Default NVM size to read */
+#define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
+#define IWM_MAX_NVM_SECTION_SIZE 7000
+
+#define IWM_NVM_WRITE_OPCODE 1
+#define IWM_NVM_READ_OPCODE 0
+
+int
+iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
+ uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
+{
+ offset = 0;
+ struct iwm_nvm_access_cmd nvm_access_cmd = {
+ .offset = htole16(offset),
+ .length = htole16(length),
+ .type = htole16(section),
+ .op_code = IWM_NVM_READ_OPCODE,
+ };
+ struct iwm_nvm_access_resp *nvm_resp;
+ struct iwm_rx_packet *pkt;
+ struct iwm_host_cmd cmd = {
+ .id = IWM_NVM_ACCESS_CMD,
+ .flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
+ IWM_CMD_SEND_IN_RFKILL,
+ .data = { &nvm_access_cmd, },
+ };
+ int ret, bytes_read, offset_read;
+ uint8_t *resp_data;
+
+ cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
+
+ ret = iwm_send_cmd(sc, &cmd);
+ if (ret)
+ return ret;
+
+ pkt = cmd.resp_pkt;
+ if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
+ printf("%s: Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
+ DEVNAME(sc), pkt->hdr.flags);
+ ret = EIO;
+ goto exit;
+ }
+
+ /* Extract NVM response */
+ nvm_resp = (void *)pkt->data;
+
+ ret = le16toh(nvm_resp->status);
+ bytes_read = le16toh(nvm_resp->length);
+ offset_read = le16toh(nvm_resp->offset);
+ resp_data = nvm_resp->data;
+ if (ret) {
+ printf("%s: NVM access command failed with status %d\n",
+ DEVNAME(sc), ret);
+ ret = EINVAL;
+ goto exit;
+ }
+
+ if (offset_read != offset) {
+ printf("%s: NVM ACCESS response with invalid offset %d\n",
+ DEVNAME(sc), offset_read);
+ ret = EINVAL;
+ goto exit;
+ }
+
+ memcpy(data + offset, resp_data, bytes_read);
+ *len = bytes_read;
+
+ exit:
+ iwm_free_resp(sc, &cmd);
+ return ret;
+}
+
+/*
+ * Reads an NVM section completely.
+ * NICs prior to 7000 family doesn't have a real NVM, but just read
+ * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
+ * by uCode, we need to manually check in this case that we don't
+ * overflow and try to read more than the EEPROM size.
+ * For 7000 family NICs, we supply the maximal size we can read, and
+ * the uCode fills the response with as much data as we can,
+ * without overflowing, so no check is needed.
+ */
+int
+iwm_nvm_read_section(struct iwm_softc *sc,
+ uint16_t section, uint8_t *data, uint16_t *len)
+{
+ uint16_t length, seglen;
+ int error;
+
+ /* Set nvm section read length */
+ length = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
+ *len = 0;
+
+ /* Read the NVM until exhausted (reading less than requested) */
+ while (seglen == length) {
+ error = iwm_nvm_read_chunk(sc,
+ section, *len, length, data, &seglen);
+ if (error) {
+ printf("%s: Cannot read NVM from section "
+ "%d offset %d, length %d\n",
+ DEVNAME(sc), section, *len, length);
+ return error;
+ }
+ *len += seglen;
+ }
+
+ DPRINTFN(4, ("NVM section %d read completed\n", section));
+ return 0;
+}
+
+/*
+ * BEGIN IWM_NVM_PARSE
+ */
+
+/* iwlwifi/iwl-nvm-parse.c */
+
+/* NVM offsets (in words) definitions */
+enum wkp_nvm_offsets {
+ /* NVM HW-Section offset (in words) definitions */
+ IWM_HW_ADDR = 0x15,
+
+/* NVM SW-Section offset (in words) definitions */
+ IWM_NVM_SW_SECTION = 0x1C0,
+ IWM_NVM_VERSION = 0,
+ IWM_RADIO_CFG = 1,
+ IWM_SKU = 2,
+ IWM_N_HW_ADDRS = 3,
+ IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
+
+/* NVM calibration section offset (in words) definitions */
+ IWM_NVM_CALIB_SECTION = 0x2B8,
+ IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
+};
+
+/* SKU Capabilities (actual values from NVM definition) */
+enum nvm_sku_bits {
+ IWM_NVM_SKU_CAP_BAND_24GHZ = (1 << 0),
+ IWM_NVM_SKU_CAP_BAND_52GHZ = (1 << 1),
+ IWM_NVM_SKU_CAP_11N_ENABLE = (1 << 2),
+ IWM_NVM_SKU_CAP_11AC_ENABLE = (1 << 3),
+};
+
+/* radio config bits (actual values from NVM definition) */
+#define IWM_NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */
+#define IWM_NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
+#define IWM_NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
+#define IWM_NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
+#define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
+#define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
+
+#define DEFAULT_MAX_TX_POWER 16
+
+/**
+ * enum iwm_nvm_channel_flags - channel flags in NVM
+ * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
+ * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
+ * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
+ * @IWM_NVM_CHANNEL_RADAR: radar detection required
+ * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
+ * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
+ * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
+ * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
+ * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
+ */
+enum iwm_nvm_channel_flags {
+ IWM_NVM_CHANNEL_VALID = (1 << 0),
+ IWM_NVM_CHANNEL_IBSS = (1 << 1),
+ IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
+ IWM_NVM_CHANNEL_RADAR = (1 << 4),
+ IWM_NVM_CHANNEL_DFS = (1 << 7),
+ IWM_NVM_CHANNEL_WIDE = (1 << 8),
+ IWM_NVM_CHANNEL_40MHZ = (1 << 9),
+ IWM_NVM_CHANNEL_80MHZ = (1 << 10),
+ IWM_NVM_CHANNEL_160MHZ = (1 << 11),
+};
+
+void
+iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwm_nvm_data *data = &sc->sc_nvm;
+ int ch_idx;
+ struct ieee80211_channel *channel;
+ uint16_t ch_flags;
+ int is_5ghz;
+ int flags, hw_value;
+
+ for (ch_idx = 0; ch_idx < nitems(iwm_nvm_channels); ch_idx++) {
+ ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
+
+ if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
+ !data->sku_cap_band_52GHz_enable)
+ ch_flags &= ~IWM_NVM_CHANNEL_VALID;
+
+ if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
+ DPRINTF(("Ch. %d Flags %x [%sGHz] - No traffic\n",
+ iwm_nvm_channels[ch_idx],
+ ch_flags,
+ (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
+ "5.2" : "2.4"));
+ continue;
+ }
+
+ hw_value = iwm_nvm_channels[ch_idx];
+ channel = &ic->ic_channels[hw_value];
+
+ is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
+ if (!is_5ghz) {
+ flags = IEEE80211_CHAN_2GHZ;
+ channel->ic_flags
+ = IEEE80211_CHAN_CCK
+ | IEEE80211_CHAN_OFDM
+ | IEEE80211_CHAN_DYN
+ | IEEE80211_CHAN_2GHZ;
+ } else {
+ flags = IEEE80211_CHAN_5GHZ;
+ channel->ic_flags =
+ IEEE80211_CHAN_A;
+ }
+ channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
+
+ if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
+ channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
+ }
+}
+
+int
+iwm_parse_nvm_data(struct iwm_softc *sc,
+ const uint16_t *nvm_hw, const uint16_t *nvm_sw,
+ const uint16_t *nvm_calib, uint8_t tx_chains, uint8_t rx_chains)
+{
+ struct iwm_nvm_data *data = &sc->sc_nvm;
+ uint8_t hw_addr[ETHER_ADDR_LEN];
+ uint16_t radio_cfg, sku;
+
+ data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
+
+ radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
+ data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
+ data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
+ data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
+ data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
+ data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
+ data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
+
+ sku = le16_to_cpup(nvm_sw + IWM_SKU);
+ data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
+ data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
+ data->sku_cap_11n_enable = 0;
+
+ if (!data->valid_tx_ant || !data->valid_rx_ant) {
+ printf("%s: invalid antennas (0x%x, 0x%x)\n",
+ DEVNAME(sc), data->valid_tx_ant,
+ data->valid_rx_ant);
+ return EINVAL;
+ }
+
+ data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
+
+ data->xtal_calib[0] = *(nvm_calib + IWM_XTAL_CALIB);
+ data->xtal_calib[1] = *(nvm_calib + IWM_XTAL_CALIB + 1);
+
+ /* The byte order is little endian 16 bit, meaning 214365 */
+ memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
+ data->hw_addr[0] = hw_addr[1];
+ data->hw_addr[1] = hw_addr[0];
+ data->hw_addr[2] = hw_addr[3];
+ data->hw_addr[3] = hw_addr[2];
+ data->hw_addr[4] = hw_addr[5];
+ data->hw_addr[5] = hw_addr[4];
+
+ iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS]);
+ data->calib_version = 255; /* TODO:
+ this value will prevent some checks from
+ failing, we need to check if this
+ field is still needed, and if it does,
+ where is it in the NVM */
+
+ return 0;
+}
+
+/*
+ * END NVM PARSE
+ */
+
+struct iwm_nvm_section {
+ uint16_t length;
+ const uint8_t *data;
+};
+
+#define IWM_FW_VALID_TX_ANT(sc) \
+ ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN) \
+ >> IWM_FW_PHY_CFG_TX_CHAIN_POS)
+#define IWM_FW_VALID_RX_ANT(sc) \
+ ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN) \
+ >> IWM_FW_PHY_CFG_RX_CHAIN_POS)
+
+int
+iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
+{
+ const uint16_t *hw, *sw, *calib;
+
+ /* Checking for required sections */
+ if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
+ !sections[IWM_NVM_SECTION_TYPE_HW].data) {
+ printf("%s: Can't parse empty NVM sections\n", DEVNAME(sc));
+ return ENOENT;
+ }
+
+ hw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_HW].data;
+ sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
+ calib = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
+ return iwm_parse_nvm_data(sc, hw, sw, calib,
+ IWM_FW_VALID_TX_ANT(sc), IWM_FW_VALID_RX_ANT(sc));
+}
+
+int
+iwm_nvm_init(struct iwm_softc *sc)
+{
+ struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
+ int i, section, error;
+ uint16_t len;
+ uint8_t *nvm_buffer, *temp;
+
+ /* Read From FW NVM */
+ DPRINTF(("Read NVM\n"));
+
+ /* TODO: find correct NVM max size for a section */
+ nvm_buffer = malloc(IWM_OTP_LOW_IMAGE_SIZE, M_DEVBUF, M_WAIT);
+ for (i = 0; i < nitems(nvm_to_read); i++) {
+ section = nvm_to_read[i];
+ KASSERT(section <= nitems(nvm_sections));
+
+ error = iwm_nvm_read_section(sc, section, nvm_buffer, &len);
+ if (error)
+ break;
+
+ temp = malloc(len, M_DEVBUF, M_WAIT);
+ memcpy(temp, nvm_buffer, len);
+ nvm_sections[section].data = temp;
+ nvm_sections[section].length = len;
+ }
+ free(nvm_buffer, M_DEVBUF, IWM_OTP_LOW_IMAGE_SIZE);
+ if (error)
+ return error;
+
+ return iwm_parse_nvm_sections(sc, nvm_sections);
+}
+
+/*
+ * Firmware loading gunk. This is kind of a weird hybrid between the
+ * iwn driver and the Linux iwlwifi driver.
+ */
+
+int
+iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
+ const uint8_t *section, uint32_t byte_cnt)
+{
+ struct iwm_dma_info *dma = &sc->fw_dma;
+ int error;
+
+ /* Copy firmware section into pre-allocated DMA-safe memory. */
+ memcpy(dma->vaddr, section, byte_cnt);
+ bus_dmamap_sync(sc->sc_dmat,
+ dma->map, 0, byte_cnt, BUS_DMASYNC_PREWRITE);
+
+ if (!iwm_nic_lock(sc))
+ return EBUSY;
+
+ sc->sc_fw_chunk_done = 0;
+
+ IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
+ IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
+ IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
+ dst_addr);
+ IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
+ dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
+ IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
+ (iwm_get_dma_hi_addr(dma->paddr)
+ << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
+ IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
+ 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
+ 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
+ IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
+ IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
+ IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
+ IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
+
+ iwm_nic_unlock(sc);
+
+ /* wait 1s for this segment to load */
+ while (!sc->sc_fw_chunk_done)
+ if ((error = tsleep(&sc->sc_fw, 0, "iwmfw", hz)) != 0)
+ break;
+
+ return error;
+}
+
+int
+iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
+{
+ struct iwm_fw_sects *fws;
+ int error, i, w;
+ void *data;
+ uint32_t dlen;
+ uint32_t offset;
+
+ sc->sc_uc.uc_intr = 0;
+
+ fws = &sc->sc_fw.fw_sects[ucode_type];
+ for (i = 0; i < fws->fw_count; i++) {
+ data = fws->fw_sect[i].fws_data;
+ dlen = fws->fw_sect[i].fws_len;
+ offset = fws->fw_sect[i].fws_devoff;
+ DPRINTF(("LOAD FIRMWARE type %d offset %u len %d\n",
+ ucode_type, offset, dlen));
+ error = iwm_firmware_load_chunk(sc, offset, data, dlen);
+ if (error) {
+ DPRINTF(("iwm_firmware_load_chunk() chunk %u of %u returned error %02d\n", i, fws->fw_count, error));
+ return error;
+ }
+ }
+
+ /* wait for the firmware to load */
+ IWM_WRITE(sc, IWM_CSR_RESET, 0);
+
+ for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
+ error = tsleep(&sc->sc_uc, 0, "iwmuc", hz/10);
+ }
+
+ return error;
+}
+
+/* iwlwifi: pcie/trans.c */
+int
+iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
+{
+ int error;
+
+ IWM_WRITE(sc, IWM_CSR_INT, ~0);
+
+ if ((error = iwm_nic_init(sc)) != 0) {
+ printf("%s: Unable to init nic\n", DEVNAME(sc));
+ return error;
+ }
+
+ /* make sure rfkill handshake bits are cleared */
+ IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
+ IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
+ IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ /* clear (again), then enable host interrupts */
+ IWM_WRITE(sc, IWM_CSR_INT, ~0);
+ iwm_enable_interrupts(sc);
+
+ /* really make sure rfkill handshake bits are cleared */
+ /* maybe we should write a few times more? just to make sure */
+ IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
+ IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
+
+ /* Load the given image to the HW */
+ return iwm_load_firmware(sc, ucode_type);
+}
+
+int
+iwm_fw_alive(struct iwm_softc *sc, uint32_t sched_base)
+{
+ return iwm_post_alive(sc);
+}
+
+int
+iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
+{
+ struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
+ .valid = htole32(valid_tx_ant),
+ };
+
+ return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
+ IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
+}
+
+/* iwlwifi: mvm/fw.c */
+int
+iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
+{
+ struct iwm_phy_cfg_cmd phy_cfg_cmd;
+ enum iwm_ucode_type ucode_type = sc->sc_uc_current;
+
+ /* Set parameters */
+ phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
+ phy_cfg_cmd.calib_control.event_trigger =
+ sc->sc_default_calib[ucode_type].event_trigger;
+ phy_cfg_cmd.calib_control.flow_trigger =
+ sc->sc_default_calib[ucode_type].flow_trigger;
+
+ DPRINTFN(10, ("Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg));
+ return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
+ sizeof(phy_cfg_cmd), &phy_cfg_cmd);
+}
+
+int
+iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
+ enum iwm_ucode_type ucode_type)
+{
+ enum iwm_ucode_type old_type = sc->sc_uc_current;
+ int error;
+
+ if ((error = iwm_read_firmware(sc)) != 0)
+ return error;
+
+ sc->sc_uc_current = ucode_type;
+ error = iwm_start_fw(sc, ucode_type);
+ if (error) {
+ sc->sc_uc_current = old_type;
+ return error;
+ }
+
+ return iwm_fw_alive(sc, sc->sched_base);
+}
+
+/*
+ * mvm misc bits
+ */
+
+/*
+ * follows iwlwifi/fw.c
+ */
+int
+iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
+{
+ int error;
+
+ /* do not operate with rfkill switch turned on */
+ if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
+ printf("%s: rfkill active, no go\n", DEVNAME(sc));
+ return EPERM;
+ }
+
+ sc->sc_init_complete = 0;
+ if ((error = iwm_mvm_load_ucode_wait_alive(sc,
+ IWM_UCODE_TYPE_INIT)) != 0)
+ return error;
+
+ if (justnvm) {
+ if ((error = iwm_nvm_init(sc)) != 0) {
+ printf("%s: failed to read nvm\n", DEVNAME(sc));
+ return error;
+ }
+ memcpy(&sc->sc_ic.ic_myaddr,
+ &sc->sc_nvm.hw_addr, ETHER_ADDR_LEN);
+
+ sc->sc_scan_cmd_len = sizeof(struct iwm_scan_cmd)
+ + sc->sc_capa_max_probe_len
+ + IWM_MAX_NUM_SCAN_CHANNELS
+ * sizeof(struct iwm_scan_channel);
+ sc->sc_scan_cmd = malloc(sc->sc_scan_cmd_len, M_DEVBUF, M_WAIT);
+
+ return 0;
+ }
+
+ /* Send TX valid antennas before triggering calibrations */
+ if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
+ return error;
+
+ /*
+ * Send phy configurations command to init uCode
+ * to start the 16.0 uCode init image internal calibrations.
+ */
+ if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
+ printf("%s: Failed to run INIT "
+ "calibrations: %d\n", DEVNAME(sc), error);
+ return error;
+ }
+
+ /*
+ * Nothing to do but wait for the init complete notification
+ * from the firmware
+ */
+ while (!sc->sc_init_complete)
+ if ((error = tsleep(&sc->sc_init_complete,
+ 0, "iwminit", 2*hz)) != 0)
+ break;
+
+ return error;
+}
+
+/*
+ * receive side
+ */
+
+/* (re)stock rx ring, called at init-time and at runtime */
+int
+iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
+{
+ struct iwm_rx_ring *ring = &sc->rxq;
+ struct iwm_rx_data *data = &ring->data[idx];
+ struct mbuf *m;
+ int error;
+ int fatal = 0;
+
+ m = m_gethdr(M_DONTWAIT, MT_DATA);
+ if (m == NULL)
+ return ENOBUFS;
+
+ if (size <= MCLBYTES) {
+ MCLGET(m, M_DONTWAIT);
+ } else {
+ MCLGETI(m, M_DONTWAIT, NULL, IWM_RBUF_SIZE);
+ }
+ if ((m->m_flags & M_EXT) == 0) {
+ m_freem(m);
+ return ENOBUFS;
+ }
+
+ if (data->m != NULL) {
+ bus_dmamap_unload(sc->sc_dmat, data->map);
+ fatal = 1;
+ }
+
+ m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
+ if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
+ BUS_DMA_READ|BUS_DMA_NOWAIT)) != 0) {
+ /* XXX */
+ if (fatal)
+ panic("iwm: could not load RX mbuf");
+ m_freem(m);
+ return error;
+ }
+ data->m = m;
+ bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
+
+ /* Update RX descriptor. */
+ ring->desc[idx] = htole32(data->map->dm_segs[0].ds_addr >> 8);
+ bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
+ idx * sizeof(uint32_t), sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
+
+ return 0;
+}
+
+/* iwlwifi: mvm/rx.c */
+#define IWM_RSSI_OFFSET 50
+int
+iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
+{
+ int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
+ uint32_t agc_a, agc_b;
+ uint32_t val;
+
+ val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
+ agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
+ agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
+
+ val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
+ rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
+ rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
+
+ /*
+ * dBm = rssi dB - agc dB - constant.
+ * Higher AGC (higher radio gain) means lower signal.
+ */
+ rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
+ rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
+ max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
+
+ DPRINTF(("Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
+ rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b));
+
+ return max_rssi_dbm;
+}
+
+/* iwlwifi: mvm/rx.c */
+/*
+ * iwm_mvm_get_signal_strength - use new rx PHY INFO API
+ * values are reported by the fw as positive values - need to negate
+ * to obtain their dBM. Account for missing antennas by replacing 0
+ * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
+ */
+int
+iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
+{
+ int energy_a, energy_b, energy_c, max_energy;
+ uint32_t val;
+
+ val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
+ energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
+ IWM_RX_INFO_ENERGY_ANT_A_POS;
+ energy_a = energy_a ? -energy_a : -256;
+ energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
+ IWM_RX_INFO_ENERGY_ANT_B_POS;
+ energy_b = energy_b ? -energy_b : -256;
+ energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
+ IWM_RX_INFO_ENERGY_ANT_C_POS;
+ energy_c = energy_c ? -energy_c : -256;
+ max_energy = MAX(energy_a, energy_b);
+ max_energy = MAX(max_energy, energy_c);
+
+ DPRINTFN(12, ("energy In A %d B %d C %d , and max %d\n",
+ energy_a, energy_b, energy_c, max_energy));
+
+ return max_energy;
+}
+
+void
+iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
+ struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
+{
+ struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
+
+ DPRINTFN(20, ("received PHY stats\n"));
+ bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
+ sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
+
+ memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
+}
+
+/*
+ * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
+ *
+ * Handles the actual data of the Rx packet from the fw
+ */
+void
+iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
+ struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_frame *wh;
+ struct ieee80211_node *ni;
+ struct ieee80211_channel *c = NULL;
+ struct ieee80211_rxinfo rxi;
+ struct mbuf *m;
+ struct iwm_rx_phy_info *phy_info;
+ struct iwm_rx_mpdu_res_start *rx_res;
+ int device_timestamp;
+ uint32_t len;
+ uint32_t rx_pkt_status;
+ int rssi;
+
+ bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
+ BUS_DMASYNC_POSTREAD);
+
+ phy_info = &sc->sc_last_phy_info;
+ rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
+ wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
+ len = le16toh(rx_res->byte_count);
+ rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
+
+ m = data->m;
+ m->m_data = pkt->data + sizeof(*rx_res);
+ m->m_pkthdr.len = m->m_len = len;
+
+ if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
+ DPRINTF(("dsp size out of range [0,20]: %d\n",
+ phy_info->cfg_phy_cnt));
+ return;
+ }
+
+ if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
+ !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
+ DPRINTF(("Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status));
+ return; /* drop */
+ }
+
+ device_timestamp = le32toh(phy_info->system_timestamp);
+
+ if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
+ rssi = iwm_mvm_get_signal_strength(sc, phy_info);
+ } else {
+ rssi = iwm_mvm_calc_rssi(sc, phy_info);
+ }
+ rssi = (0 - IWM_MIN_DBM) + rssi; /* normalize */
+ rssi = MIN(rssi, ic->ic_max_rssi); /* clip to max. 100% */
+
+ /* replenish ring for the buffer we're going to feed to the sharks */
+ if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0)
+ return;
+
+ m->m_pkthdr.rcvif = IC2IFP(ic);
+
+ if (sc->sc_scanband == IEEE80211_CHAN_5GHZ) {
+ if (le32toh(phy_info->channel) < nitems(ic->ic_channels))
+ c = &ic->ic_channels[le32toh(phy_info->channel)];
+ }
+
+ memset(&rxi, 0, sizeof(rxi));
+ rxi.rxi_rssi = rssi;
+ rxi.rxi_tstamp = device_timestamp;
+ ni = ieee80211_find_rxnode(ic, wh);
+ if (c)
+ ni->ni_chan = c;
+ ieee80211_input(IC2IFP(ic), m, ni, &rxi);
+ ieee80211_release_node(ic, ni);
+}
+
+void
+iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
+ struct iwm_node *in)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ifnet *ifp = IC2IFP(ic);
+ struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
+ int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
+ int failack = tx_resp->failure_frame;
+
+ KASSERT(tx_resp->frame_count == 1);
+
+ /* Update rate control statistics. */
+ in->in_amn.amn_txcnt++;
+ if (failack > 0) {
+ in->in_amn.amn_retrycnt++;
+ }
+
+ if (status != IWM_TX_STATUS_SUCCESS &&
+ status != IWM_TX_STATUS_DIRECT_DONE)
+ ifp->if_oerrors++;
+ else
+ ifp->if_opackets++;
+}
+
+void
+iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
+ struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ifnet *ifp = IC2IFP(ic);
+ struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
+ int idx = cmd_hdr->idx;
+ int qid = cmd_hdr->qid;
+ struct iwm_tx_ring *ring = &sc->txq[qid];
+ struct iwm_tx_data *txd = &ring->data[idx];
+ struct iwm_node *in = txd->in;
+
+ if (txd->done) {
+ printf("%s: got tx interrupt that's already been handled!\n",
+ DEVNAME(sc));
+ return;
+ }
+
+ bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
+ BUS_DMASYNC_POSTREAD);
+
+ sc->sc_tx_timer = 0;
+
+ iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
+
+ /* Unmap and free mbuf. */
+ bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->sc_dmat, txd->map);
+ m_freem(txd->m);
+
+ DPRINTFN(8, ("free txd %p, in %p\n", txd, txd->in));
+ KASSERT(txd->done == 0);
+ txd->done = 1;
+ KASSERT(txd->in);
+
+ txd->m = NULL;
+ txd->in = NULL;
+ ieee80211_release_node(ic, &in->in_ni);
+
+ if (--ring->queued < IWM_TX_RING_LOMARK) {
+ sc->qfullmsk &= ~(1 << ring->qid);
+ if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) {
+ ifp->if_flags &= ~IFF_OACTIVE;
+ /*
+ * Well, we're in interrupt context, but then again
+ * I guess net80211 does all sorts of stunts in
+ * interrupt context, so maybe this is no biggie.
+ */
+ (*ifp->if_start)(ifp);
+ }
+ }
+}
+
+/*
+ * BEGIN iwlwifi/mvm/binding.c
+ */
+
+int
+iwm_mvm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
+{
+ struct iwm_binding_cmd cmd;
+ struct iwm_mvm_phy_ctxt *phyctxt = in->in_phyctxt;
+ int i, ret;
+ uint32_t status;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.id_and_color
+ = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
+ cmd.action = htole32(action);
+ cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
+
+ cmd.macs[0] = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
+ for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
+ cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
+
+ status = 0;
+ ret = iwm_mvm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
+ sizeof(cmd), &cmd, &status);
+ if (ret) {
+ printf("%s: Failed to send binding (action:%d): %d\n",
+ DEVNAME(sc), action, ret);
+ return ret;
+ }
+
+ if (status) {
+ printf("%s: Binding command failed: %u\n", DEVNAME(sc), status);
+ ret = EIO;
+ }
+
+ return ret;
+}
+
+int
+iwm_mvm_binding_update(struct iwm_softc *sc, struct iwm_node *in, int add)
+{
+ return iwm_mvm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
+}
+
+int
+iwm_mvm_binding_add_vif(struct iwm_softc *sc, struct iwm_node *in)
+{
+ return iwm_mvm_binding_update(sc, in, IWM_FW_CTXT_ACTION_ADD);
+}
+
+/*
+ * END iwlwifi/mvm/binding.c
+ */
+
+/*
+ * BEGIN iwlwifi/mvm/phy-ctxt.c
+ */
+
+/*
+ * Construct the generic fields of the PHY context command
+ */
+void
+iwm_mvm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_mvm_phy_ctxt *ctxt,
+ struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
+{
+ memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
+
+ cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
+ ctxt->color));
+ cmd->action = htole32(action);
+ cmd->apply_time = htole32(apply_time);
+}
+
+/*
+ * Add the phy configuration to the PHY context command
+ */
+void
+iwm_mvm_phy_ctxt_cmd_data(struct iwm_softc *sc,
+ struct iwm_phy_context_cmd *cmd, struct ieee80211_channel *chan,
+ uint8_t chains_static, uint8_t chains_dynamic)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ uint8_t active_cnt, idle_cnt;
+
+ cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
+ IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
+
+ cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
+ cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
+ cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
+
+ /* Set rx the chains */
+ idle_cnt = chains_static;
+ active_cnt = chains_dynamic;
+
+ cmd->rxchain_info = htole32(IWM_FW_VALID_RX_ANT(sc) <<
+ IWM_PHY_RX_CHAIN_VALID_POS);
+ cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
+ cmd->rxchain_info |= htole32(active_cnt <<
+ IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
+
+ cmd->txchain_info = htole32(IWM_FW_VALID_TX_ANT(sc));
+}
+
+/*
+ * Send a command
+ * only if something in the configuration changed: in case that this is the
+ * first time that the phy configuration is applied or in case that the phy
+ * configuration changed from the previous apply.
+ */
+int
+iwm_mvm_phy_ctxt_apply(struct iwm_softc *sc,
+ struct iwm_mvm_phy_ctxt *ctxt,
+ uint8_t chains_static, uint8_t chains_dynamic,
+ uint32_t action, uint32_t apply_time)
+{
+ struct iwm_phy_context_cmd cmd;
+ int ret;
+
+ /* Set the command header fields */
+ iwm_mvm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
+
+ /* Set the command data */
+ iwm_mvm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
+ chains_static, chains_dynamic);
+
+ ret = iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, IWM_CMD_SYNC,
+ sizeof(struct iwm_phy_context_cmd), &cmd);
+ if (ret) {
+ DPRINTF(("PHY ctxt cmd error. ret=%d\n", ret));
+ }
+ return ret;
+}
+
+/*
+ * Send a command to add a PHY context based on the current HW configuration.
+ */
+int
+iwm_mvm_phy_ctxt_add(struct iwm_softc *sc, struct iwm_mvm_phy_ctxt *ctxt,
+ struct ieee80211_channel *chan,
+ uint8_t chains_static, uint8_t chains_dynamic)
+{
+ ctxt->channel = chan;
+ return iwm_mvm_phy_ctxt_apply(sc, ctxt,
+ chains_static, chains_dynamic, IWM_FW_CTXT_ACTION_ADD, 0);
+}
+
+/*
+ * Send a command to modify the PHY context based on the current HW
+ * configuration. Note that the function does not check that the configuration
+ * changed.
+ */
+int
+iwm_mvm_phy_ctxt_changed(struct iwm_softc *sc,
+ struct iwm_mvm_phy_ctxt *ctxt, struct ieee80211_channel *chan,
+ uint8_t chains_static, uint8_t chains_dynamic)
+{
+ ctxt->channel = chan;
+ return iwm_mvm_phy_ctxt_apply(sc, ctxt,
+ chains_static, chains_dynamic, IWM_FW_CTXT_ACTION_MODIFY, 0);
+}
+
+/*
+ * END iwlwifi/mvm/phy-ctxt.c
+ */
+
+/*
+ * transmit side
+ */
+
+/*
+ * Send a command to the firmware. We try to implement the Linux
+ * driver interface for the routine.
+ * mostly from if_iwn (iwn_cmd()).
+ *
+ * For now, we always copy the first part and map the second one (if it exists).
+ */
+int
+iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
+{
+ struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
+ struct iwm_tfd *desc;
+ struct iwm_tx_data *data;
+ struct iwm_device_cmd *cmd;
+ struct mbuf *m;
+ bus_addr_t paddr;
+ uint32_t addr_lo;
+ int error, i, paylen, off, s;
+ int code;
+ int async, wantresp;
+
+ code = hcmd->id;
+ async = hcmd->flags & IWM_CMD_ASYNC;
+ wantresp = hcmd->flags & IWM_CMD_WANT_SKB;
+
+ for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
+ paylen += hcmd->len[i];
+ }
+
+ /* if the command wants an answer, busy sc_cmd_resp */
+ if (wantresp) {
+ KASSERT(!async);
+ while (sc->sc_wantresp != -1)
+ tsleep(&sc->sc_wantresp, 0, "iwmcmdsl", 0);
+ sc->sc_wantresp = ring->qid << 16 | ring->cur;
+ DPRINTFN(12, ("wantresp is %x\n", sc->sc_wantresp));
+ }
+
+ /*
+ * Is the hardware still available? (after e.g. above wait).
+ */
+ s = splnet();
+ if (sc->sc_flags & IWM_FLAG_STOPPED) {
+ error = ENXIO;
+ goto out;
+ }
+
+ desc = &ring->desc[ring->cur];
+ data = &ring->data[ring->cur];
+
+ if (paylen > sizeof(cmd->data)) {
+ /* Command is too large */
+ if (sizeof(cmd->hdr) + paylen > IWM_RBUF_SIZE) {
+ error = EINVAL;
+ goto out;
+ }
+ m = m_gethdr(M_DONTWAIT, MT_DATA);
+ if (m == NULL) {
+ error = ENOMEM;
+ goto out;
+ }
+ MCLGETI(m, M_DONTWAIT, NULL, IWM_RBUF_SIZE);
+ if (!(m->m_flags & M_EXT)) {
+ m_freem(m);
+ error = ENOMEM;
+ goto out;
+ }
+ cmd = mtod(m, struct iwm_device_cmd *);
+ error = bus_dmamap_load(sc->sc_dmat, data->map, cmd,
+ hcmd->len[0], NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
+ if (error != 0) {
+ m_freem(m);
+ goto out;
+ }
+ data->m = m;
+ paddr = data->map->dm_segs[0].ds_addr;
+ } else {
+ cmd = &ring->cmd[ring->cur];
+ paddr = data->cmd_paddr;
+ }
+
+ cmd->hdr.code = code;
+ cmd->hdr.flags = 0;
+ cmd->hdr.qid = ring->qid;
+ cmd->hdr.idx = ring->cur;
+
+ for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
+ if (hcmd->len[i] == 0)
+ continue;
+ memcpy(cmd->data + off, hcmd->data[i], hcmd->len[i]);
+ off += hcmd->len[i];
+ }
+ KASSERT(off == paylen);
+
+ /* lo field is not aligned */
+ addr_lo = htole32((uint32_t)paddr);
+ memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
+ desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(paddr)
+ | ((sizeof(cmd->hdr) + paylen) << 4));
+ desc->num_tbs = 1;
+
+ DPRINTFN(8, ("iwm_send_cmd 0x%x size=%lu %s\n",
+ code, hcmd->len[0] + hcmd->len[1] + sizeof(cmd->hdr),
+ async ? " (async)" : ""));
+
+ if (hcmd->len[0] > sizeof(cmd->data)) {
+ bus_dmamap_sync(sc->sc_dmat, data->map, 0, hcmd->len[0],
+ BUS_DMASYNC_PREWRITE);
+ } else {
+ bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
+ (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
+ hcmd->len[0] + 4, BUS_DMASYNC_PREWRITE);
+ }
+ bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
+ (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
+ sizeof (*desc), BUS_DMASYNC_PREWRITE);
+
+ IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
+ IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
+ IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+ (IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
+ IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000)) {
+ printf("%s: acquiring device failed\n", DEVNAME(sc));
+ error = EBUSY;
+ goto out;
+ }
+
+#if 0
+ iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
+#endif
+ DPRINTF(("sending command 0x%x qid %d, idx %d\n",
+ code, ring->qid, ring->cur));
+
+ /* Kick command ring. */
+ ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
+ IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
+
+ if (!async) {
+ /* m..m-mmyy-mmyyyy-mym-ym m-my generation */
+ int generation = sc->sc_generation;
+ error = tsleep(desc, PCATCH, "iwmcmd", hz);
+ if (error == 0) {
+ /* if hardware is no longer up, return error */
+ if (generation != sc->sc_generation) {
+ error = ENXIO;
+ } else {
+ hcmd->resp_pkt = (void *)sc->sc_cmd_resp;
+ }
+ }
+ }
+ out:
+ if (wantresp && error != 0) {
+ iwm_free_resp(sc, hcmd);
+ }
+ splx(s);
+
+ return error;
+}
+
+/* iwlwifi: mvm/utils.c */
+int
+iwm_mvm_send_cmd_pdu(struct iwm_softc *sc, uint8_t id,
+ uint32_t flags, uint16_t len, const void *data)
+{
+ struct iwm_host_cmd cmd = {
+ .id = id,
+ .len = { len, },
+ .data = { data, },
+ .flags = flags,
+ };
+
+ return iwm_send_cmd(sc, &cmd);
+}
+
+/* iwlwifi: mvm/utils.c */
+int
+iwm_mvm_send_cmd_status(struct iwm_softc *sc,
+ struct iwm_host_cmd *cmd, uint32_t *status)
+{
+ struct iwm_rx_packet *pkt;
+ struct iwm_cmd_response *resp;
+ int error, resp_len;
+
+ //lockdep_assert_held(&mvm->mutex);
+
+ KASSERT((cmd->flags & IWM_CMD_WANT_SKB) == 0);
+ cmd->flags |= IWM_CMD_SYNC | IWM_CMD_WANT_SKB;
+
+ if ((error = iwm_send_cmd(sc, cmd)) != 0)
+ return error;
+ pkt = cmd->resp_pkt;
+
+ /* Can happen if RFKILL is asserted */
+ if (!pkt) {
+ error = 0;
+ goto out_free_resp;
+ }
+
+ if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
+ error = EIO;
+ goto out_free_resp;
+ }
+
+ resp_len = iwm_rx_packet_payload_len(pkt);
+ if (resp_len != sizeof(*resp)) {
+ error = EIO;
+ goto out_free_resp;
+ }
+
+ resp = (void *)pkt->data;
+ *status = le32toh(resp->status);
+ out_free_resp:
+ iwm_free_resp(sc, cmd);
+ return error;
+}
+
+/* iwlwifi/mvm/utils.c */
+int
+iwm_mvm_send_cmd_pdu_status(struct iwm_softc *sc, uint8_t id,
+ uint16_t len, const void *data, uint32_t *status)
+{
+ struct iwm_host_cmd cmd = {
+ .id = id,
+ .len = { len, },
+ .data = { data, },
+ };
+
+ return iwm_mvm_send_cmd_status(sc, &cmd, status);
+}
+
+void
+iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
+{
+ KASSERT(sc->sc_wantresp != -1);
+ KASSERT((hcmd->flags & (IWM_CMD_WANT_SKB|IWM_CMD_SYNC))
+ == (IWM_CMD_WANT_SKB|IWM_CMD_SYNC));
+ sc->sc_wantresp = -1;
+ wakeup(&sc->sc_wantresp);
+}
+
+/*
+ * Process a "command done" firmware notification. This is where we wakeup
+ * processes waiting for a synchronous command completion.
+ * from if_iwn
+ */
+void
+iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
+{
+ struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
+ struct iwm_tx_data *data;
+
+ if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
+ return; /* Not a command ack. */
+ }
+
+ data = &ring->data[pkt->hdr.idx];
+
+ /* If the command was mapped in an mbuf, free it. */
+ if (data->m != NULL) {
+ bus_dmamap_sync(sc->sc_dmat, data->map, 0,
+ data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->sc_dmat, data->map);
+ m_freem(data->m);
+ data->m = NULL;
+ }
+ wakeup(&ring->desc[pkt->hdr.idx]);
+}
+
+#if 0
+/*
+ * necessary only for block ack mode
+ */
+void
+iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
+ uint16_t len)
+{
+ struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
+ uint16_t w_val;
+
+ scd_bc_tbl = sc->sched_dma.vaddr;
+
+ len += 8; /* magic numbers came naturally from paris */
+ if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
+ len = roundup(len, 4) / 4;
+
+ w_val = htole16(sta_id << 12 | len);
+
+ /* Update TX scheduler. */
+ scd_bc_tbl[qid].tfd_offset[idx] = w_val;
+ bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
+ (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
+ sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
+
+ /* I really wonder what this is ?!? */
+ if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
+ scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
+ bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
+ (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
+ (char *)(void *)sc->sched_dma.vaddr,
+ sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
+ }
+}
+#endif
+
+/*
+ * Fill in various bit for management frames, and leave them
+ * unfilled for data frames (firmware takes care of that).
+ */
+void
+iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
+ struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
+{
+ const struct iwm_rate *rinfo;
+ int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+ int ridx, rate_flags;
+ int nrates = in->in_ni.ni_rates.rs_nrates;
+
+ tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
+ tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
+
+ /* for data frames, use RS table */
+ if (type == IEEE80211_FC0_TYPE_DATA) {
+ if (sc->sc_fixed_ridx != -1) {
+ tx->initial_rate_index = sc->sc_fixed_ridx;
+ } else {
+ tx->initial_rate_index = (nrates-1) - in->in_ni.ni_txrate;
+ }
+ tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
+ DPRINTFN(12, ("start with txrate %d\n", tx->initial_rate_index));
+ return;
+ }
+
+ /* for non-data, use the lowest supported rate */
+ ridx = in->in_ridx[0];
+ rinfo = &iwm_rates[ridx];
+
+ rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
+ if (IWM_RIDX_IS_CCK(ridx))
+ rate_flags |= IWM_RATE_MCS_CCK_MSK;
+ tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
+}
+
+#define TB0_SIZE 16
+int
+iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwm_node *in = (void *)ni;
+ struct iwm_tx_ring *ring;
+ struct iwm_tx_data *data;
+ struct iwm_tfd *desc;
+ struct iwm_device_cmd *cmd;
+ struct iwm_tx_cmd *tx;
+ struct ieee80211_frame *wh;
+ struct ieee80211_key *k = NULL;
+ struct mbuf *m1;
+ uint32_t flags;
+ u_int hdrlen;
+ bus_dma_segment_t *seg;
+ uint8_t tid, type;
+ int i, totlen, error, pad;
+ int hdrlen2;
+
+ wh = mtod(m, struct ieee80211_frame *);
+ hdrlen = ieee80211_get_hdrlen(wh);
+ type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+
+ hdrlen2 = (ieee80211_has_qos(wh)) ?
+ sizeof (struct ieee80211_qosframe) :
+ sizeof (struct ieee80211_frame);
+
+ if (hdrlen != hdrlen2)
+ printf("%s: hdrlen error (%d != %d)\n",
+ DEVNAME(sc), hdrlen, hdrlen2);
+
+ tid = 0;
+
+ ring = &sc->txq[ac];
+ desc = &ring->desc[ring->cur];
+ memset(desc, 0, sizeof(*desc));
+ data = &ring->data[ring->cur];
+
+ /* Encrypt the frame if need be. */
+ if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
+ /* Retrieve key for TX && do software encryption. */
+ k = ieee80211_get_txkey(ic, wh, ni);
+ if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
+ return ENOBUFS;
+ /* 802.11 header may have moved. */
+ wh = mtod(m, struct ieee80211_frame *);
+ }
+ totlen = m->m_pkthdr.len;
+
+ /* Fill out iwm_tx_cmd to send to the firmware */
+ cmd = &ring->cmd[ring->cur];
+ cmd->hdr.code = IWM_TX_CMD;
+ cmd->hdr.flags = 0;
+ cmd->hdr.qid = ring->qid;
+ cmd->hdr.idx = ring->cur;
+
+ tx = (void *)cmd->data;
+ memset(tx, 0, sizeof(*tx));
+
+ flags = 0;
+ if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
+ flags |= IWM_TX_CMD_FLG_ACK;
+ }
+
+ if (type != IEEE80211_FC0_TYPE_DATA
+ && (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold)
+ && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
+ flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
+ }
+
+ if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
+ type != IEEE80211_FC0_TYPE_DATA)
+ tx->sta_id = sc->sc_aux_sta.sta_id;
+ else
+ tx->sta_id = IWM_STATION_ID;
+
+ if (type == IEEE80211_FC0_TYPE_MGT) {
+ uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
+
+ if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
+ subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
+ tx->pm_frame_timeout = htole16(3);
+ else
+ tx->pm_frame_timeout = htole16(2);
+ } else {
+ tx->pm_frame_timeout = htole16(0);
+ }
+
+ if (hdrlen & 3) {
+ /* First segment length must be a multiple of 4. */
+ flags |= IWM_TX_CMD_FLG_MH_PAD;
+ pad = 4 - (hdrlen & 3);
+ } else
+ pad = 0;
+
+ tx->driver_txop = 0;
+ tx->next_frame_len = 0;
+
+ tx->len = htole16(totlen);
+ tx->tid_tspec = tid;
+ tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
+
+ /* Set physical address of "scratch area". */
+ tx->dram_lsb_ptr = htole32(data->scratch_paddr);
+ tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
+
+ /* Copy 802.11 header in TX command. */
+ memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
+
+ flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
+
+ tx->sec_ctl = 0;
+ tx->tx_flags |= htole32(flags);
+
+ iwm_tx_fill_cmd(sc, in, wh, tx);
+
+ /* Trim 802.11 header. */
+ m_adj(m, hdrlen);
+
+ error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
+ BUS_DMA_NOWAIT | BUS_DMA_WRITE);
+ if (error != 0) {
+ if (error != EFBIG) {
+ printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc),
+ error);
+ m_freem(m);
+ return error;
+ }
+ /* Too many DMA segments, linearize mbuf. */
+ MGETHDR(m1, M_DONTWAIT, MT_DATA);
+ if (m1 == NULL) {
+ m_freem(m);
+ return ENOBUFS;
+ }
+ if (m->m_pkthdr.len > MHLEN) {
+ MCLGET(m1, M_DONTWAIT);
+ if (!(m1->m_flags & M_EXT)) {
+ m_freem(m);
+ m_freem(m1);
+ return ENOBUFS;
+ }
+ }
+ m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
+ m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
+ m_freem(m);
+ m = m1;
+
+ error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
+ BUS_DMA_NOWAIT | BUS_DMA_WRITE);
+ if (error != 0) {
+ printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc),
+ error);
+ m_freem(m);
+ return error;
+ }
+ }
+ data->m = m;
+ data->in = in;
+ data->done = 0;
+
+ DPRINTFN(8, ("sending txd %p, in %p\n", data, data->in));
+ KASSERT(data->in != NULL);
+
+ DPRINTFN(8, ("sending data: qid=%d idx=%d len=%d nsegs=%d\n",
+ ring->qid, ring->cur, totlen, data->map->dm_nsegs));
+
+ /* Fill TX descriptor. */
+ desc->num_tbs = 2 + data->map->dm_nsegs;
+
+ desc->tbs[0].lo = htole32(data->cmd_paddr);
+ desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
+ (TB0_SIZE << 4);
+ desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
+ desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
+ ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
+ + hdrlen + pad - TB0_SIZE) << 4);
+
+ /* Other DMA segments are for data payload. */
+ seg = data->map->dm_segs;
+ for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
+ desc->tbs[i+2].lo = htole32(seg->ds_addr);
+ desc->tbs[i+2].hi_n_len = \
+ htole16(iwm_get_dma_hi_addr(seg->ds_addr))
+ | ((seg->ds_len) << 4);
+ }
+
+ bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
+ BUS_DMASYNC_PREWRITE);
+ bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
+ (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
+ sizeof (*cmd), BUS_DMASYNC_PREWRITE);
+ bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
+ (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
+ sizeof (*desc), BUS_DMASYNC_PREWRITE);
+
+#if 0
+ iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
+#endif
+
+ /* Kick TX ring. */
+ ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
+ IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
+
+ /* Mark TX ring as full if we reach a certain threshold. */
+ if (++ring->queued > IWM_TX_RING_HIMARK) {
+ sc->qfullmsk |= 1 << ring->qid;
+ }
+
+ return 0;
+}
+
+#if 0
+/* not necessary? */
+int
+iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
+{
+ struct iwm_tx_path_flush_cmd flush_cmd = {
+ .queues_ctl = htole32(tfd_msk),
+ .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
+ };
+ int ret;
+
+ ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
+ sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
+ sizeof(flush_cmd), &flush_cmd);
+ if (ret)
+ printf("%s: Flushing tx queue failed: %d\n", DEVNAME(sc), ret);
+ return ret;
+}
+#endif
+
+
+/*
+ * BEGIN mvm/power.c
+ */
+
+#define IWM_POWER_KEEP_ALIVE_PERIOD_SEC 25
+
+int
+iwm_mvm_beacon_filter_send_cmd(struct iwm_softc *sc,
+ struct iwm_beacon_filter_cmd *cmd)
+{
+ int ret;
+
+ ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
+ IWM_CMD_SYNC, sizeof(struct iwm_beacon_filter_cmd), cmd);
+
+ if (!ret) {
+ DPRINTF(("ba_enable_beacon_abort is: %d\n",
+ le32toh(cmd->ba_enable_beacon_abort)));
+ DPRINTF(("ba_escape_timer is: %d\n",
+ le32toh(cmd->ba_escape_timer)));
+ DPRINTF(("bf_debug_flag is: %d\n",
+ le32toh(cmd->bf_debug_flag)));
+ DPRINTF(("bf_enable_beacon_filter is: %d\n",
+ le32toh(cmd->bf_enable_beacon_filter)));
+ DPRINTF(("bf_energy_delta is: %d\n",
+ le32toh(cmd->bf_energy_delta)));
+ DPRINTF(("bf_escape_timer is: %d\n",
+ le32toh(cmd->bf_escape_timer)));
+ DPRINTF(("bf_roaming_energy_delta is: %d\n",
+ le32toh(cmd->bf_roaming_energy_delta)));
+ DPRINTF(("bf_roaming_state is: %d\n",
+ le32toh(cmd->bf_roaming_state)));
+ DPRINTF(("bf_temp_threshold is: %d\n",
+ le32toh(cmd->bf_temp_threshold)));
+ DPRINTF(("bf_temp_fast_filter is: %d\n",
+ le32toh(cmd->bf_temp_fast_filter)));
+ DPRINTF(("bf_temp_slow_filter is: %d\n",
+ le32toh(cmd->bf_temp_slow_filter)));
+ }
+ return ret;
+}
+
+void
+iwm_mvm_beacon_filter_set_cqm_params(struct iwm_softc *sc,
+ struct iwm_node *in, struct iwm_beacon_filter_cmd *cmd)
+{
+ cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
+}
+
+int
+iwm_mvm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in,
+ int enable)
+{
+ struct iwm_beacon_filter_cmd cmd = {
+ IWM_BF_CMD_CONFIG_DEFAULTS,
+ .bf_enable_beacon_filter = htole32(1),
+ .ba_enable_beacon_abort = htole32(enable),
+ };
+
+ if (!sc->sc_bf.bf_enabled)
+ return 0;
+
+ sc->sc_bf.ba_enabled = enable;
+ iwm_mvm_beacon_filter_set_cqm_params(sc, in, &cmd);
+ return iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
+}
+
+void
+iwm_mvm_power_log(struct iwm_softc *sc, struct iwm_mac_power_cmd *cmd)
+{
+ DPRINTF(("Sending power table command on mac id 0x%X for "
+ "power level %d, flags = 0x%X\n",
+ cmd->id_and_color, IWM_POWER_SCHEME_CAM, le16toh(cmd->flags)));
+ DPRINTF(("Keep alive = %u sec\n", le16toh(cmd->keep_alive_seconds)));
+
+ if (!(cmd->flags & htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) {
+ DPRINTF(("Disable power management\n"));
+ return;
+ }
+ KASSERT(0);
+
+#if 0
+ DPRINTF(mvm, "Rx timeout = %u usec\n",
+ le32_to_cpu(cmd->rx_data_timeout));
+ DPRINTF(mvm, "Tx timeout = %u usec\n",
+ le32_to_cpu(cmd->tx_data_timeout));
+ if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_SKIP_OVER_DTIM_MSK))
+ DPRINTF(mvm, "DTIM periods to skip = %u\n",
+ cmd->skip_dtim_periods);
+ if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_LPRX_ENA_MSK))
+ DPRINTF(mvm, "LP RX RSSI threshold = %u\n",
+ cmd->lprx_rssi_threshold);
+ if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
+ DPRINTF(mvm, "uAPSD enabled\n");
+ DPRINTF(mvm, "Rx timeout (uAPSD) = %u usec\n",
+ le32_to_cpu(cmd->rx_data_timeout_uapsd));
+ DPRINTF(mvm, "Tx timeout (uAPSD) = %u usec\n",
+ le32_to_cpu(cmd->tx_data_timeout_uapsd));
+ DPRINTF(mvm, "QNDP TID = %d\n", cmd->qndp_tid);
+ DPRINTF(mvm, "ACs flags = 0x%x\n", cmd->uapsd_ac_flags);
+ DPRINTF(mvm, "Max SP = %d\n", cmd->uapsd_max_sp);
+ }
+#endif
+}
+
+void
+iwm_mvm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
+ struct iwm_mac_power_cmd *cmd)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_node *ni = &in->in_ni;
+ int dtimper, dtimper_msec;
+ int keep_alive;
+
+ cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
+ in->in_color));
+ dtimper = ic->ic_dtim_period ?: 1;
+
+ /*
+ * Regardless of power management state the driver must set
+ * keep alive period. FW will use it for sending keep alive NDPs
+ * immediately after association. Check that keep alive period
+ * is at least 3 * DTIM
+ */
+ dtimper_msec = dtimper * ni->ni_intval;
+ keep_alive
+ = MAX(3 * dtimper_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
+ keep_alive = roundup(keep_alive, 1000) / 1000;
+ cmd->keep_alive_seconds = htole16(keep_alive);
+}
+
+int
+iwm_mvm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
+{
+ int ret;
+ int ba_enable;
+ struct iwm_mac_power_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ iwm_mvm_power_build_cmd(sc, in, &cmd);
+ iwm_mvm_power_log(sc, &cmd);
+
+ if ((ret = iwm_mvm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE,
+ IWM_CMD_SYNC, sizeof(cmd), &cmd)) != 0)
+ return ret;
+
+ ba_enable = !!(cmd.flags &
+ htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
+ return iwm_mvm_update_beacon_abort(sc, in, ba_enable);
+}
+
+int
+iwm_mvm_power_update_device(struct iwm_softc *sc)
+{
+ struct iwm_device_power_cmd cmd = {
+ .flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
+ };
+
+ if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
+ return 0;
+
+ cmd.flags |= htole16(IWM_DEVICE_POWER_FLAGS_CAM_MSK);
+ DPRINTF(("Sending device power command with flags = 0x%X\n", cmd.flags));
+
+ return iwm_mvm_send_cmd_pdu(sc,
+ IWM_POWER_TABLE_CMD, IWM_CMD_SYNC, sizeof(cmd), &cmd);
+}
+
+int
+iwm_mvm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
+{
+ struct iwm_beacon_filter_cmd cmd = {
+ IWM_BF_CMD_CONFIG_DEFAULTS,
+ .bf_enable_beacon_filter = htole32(1),
+ };
+ int ret;
+
+ iwm_mvm_beacon_filter_set_cqm_params(sc, in, &cmd);
+ ret = iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
+
+ if (ret == 0)
+ sc->sc_bf.bf_enabled = 1;
+
+ return ret;
+}
+
+int
+iwm_mvm_disable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
+{
+ struct iwm_beacon_filter_cmd cmd;
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ if ((sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_BF_UPDATED) == 0)
+ return 0;
+
+ ret = iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
+ if (ret == 0)
+ sc->sc_bf.bf_enabled = 0;
+
+ return ret;
+}
+
+#if 0
+int
+iwm_mvm_update_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
+{
+ if (!sc->sc_bf.bf_enabled)
+ return 0;
+
+ return iwm_mvm_enable_beacon_filter(sc, in);
+}
+#endif
+
+/*
+ * END mvm/power.c
+ */
+
+/*
+ * BEGIN mvm/sta.c
+ */
+
+void
+iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *cmd_v6,
+ struct iwm_mvm_add_sta_cmd_v5 *cmd_v5)
+{
+ memset(cmd_v5, 0, sizeof(*cmd_v5));
+
+ cmd_v5->add_modify = cmd_v6->add_modify;
+ cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
+ cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
+ memcpy(cmd_v5->addr, cmd_v6->addr, ETHER_ADDR_LEN);
+ cmd_v5->sta_id = cmd_v6->sta_id;
+ cmd_v5->modify_mask = cmd_v6->modify_mask;
+ cmd_v5->station_flags = cmd_v6->station_flags;
+ cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
+ cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
+ cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
+ cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
+ cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
+ cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
+ cmd_v5->assoc_id = cmd_v6->assoc_id;
+ cmd_v5->beamform_flags = cmd_v6->beamform_flags;
+ cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
+}
+
+int
+iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
+ struct iwm_mvm_add_sta_cmd_v6 *cmd, int *status)
+{
+ struct iwm_mvm_add_sta_cmd_v5 cmd_v5;
+
+ if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_STA_KEY_CMD) {
+ return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA,
+ sizeof(*cmd), cmd, status);
+ }
+
+ iwm_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
+
+ return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd_v5),
+ &cmd_v5, status);
+}
+
+/* send station add/update command to firmware */
+int
+iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
+{
+ struct iwm_mvm_add_sta_cmd_v6 add_sta_cmd;
+ int ret;
+ uint32_t status;
+
+ memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
+
+ add_sta_cmd.sta_id = IWM_STATION_ID;
+ add_sta_cmd.mac_id_n_color
+ = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
+ if (!update) {
+ add_sta_cmd.tfd_queue_msk = htole32(0xf);
+ IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
+ }
+ add_sta_cmd.add_modify = update ? 1 : 0;
+ add_sta_cmd.station_flags_msk
+ |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
+
+ status = IWM_ADD_STA_SUCCESS;
+ ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
+ if (ret)
+ return ret;
+
+ switch (status) {
+ case IWM_ADD_STA_SUCCESS:
+ break;
+ default:
+ ret = EIO;
+ DPRINTF(("IWM_ADD_STA failed\n"));
+ break;
+ }
+
+ return ret;
+}
+
+int
+iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
+{
+ int ret;
+
+ ret = iwm_mvm_sta_send_to_fw(sc, in, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int
+iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
+{
+ return iwm_mvm_sta_send_to_fw(sc, in, 1);
+}
+
+int
+iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
+ const uint8_t *addr, uint16_t mac_id, uint16_t color)
+{
+ struct iwm_mvm_add_sta_cmd_v6 cmd;
+ int ret;
+ uint32_t status;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.sta_id = sta->sta_id;
+ cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
+
+ cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
+
+ if (addr)
+ memcpy(cmd.addr, addr, ETHER_ADDR_LEN);
+
+ ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
+ if (ret)
+ return ret;
+
+ switch (status) {
+ case IWM_ADD_STA_SUCCESS:
+ DPRINTF(("Internal station added.\n"));
+ return 0;
+ default:
+ printf("%s: Add internal station failed, status=0x%x\n",
+ DEVNAME(sc), status);
+ ret = EIO;
+ break;
+ }
+ return ret;
+}
+
+int
+iwm_mvm_add_aux_sta(struct iwm_softc *sc)
+{
+ int ret;
+
+ sc->sc_aux_sta.sta_id = 3;
+ sc->sc_aux_sta.tfd_queue_msk = 0;
+
+ ret = iwm_mvm_add_int_sta_common(sc,
+ &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
+
+ if (ret)
+ memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
+ return ret;
+}
+
+/*
+ * END mvm/sta.c
+ */
+
+/*
+ * BEGIN mvm/scan.c
+ */
+
+#define IWM_PLCP_QUIET_THRESH 1
+#define IWM_ACTIVE_QUIET_TIME 10
+#define LONG_OUT_TIME_PERIOD 600
+#define SHORT_OUT_TIME_PERIOD 200
+#define SUSPEND_TIME_PERIOD 100
+
+uint16_t
+iwm_mvm_scan_rx_chain(struct iwm_softc *sc)
+{
+ uint16_t rx_chain;
+ uint8_t rx_ant;
+
+ rx_ant = IWM_FW_VALID_RX_ANT(sc);
+ rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
+ rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
+ rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
+ rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
+ return htole16(rx_chain);
+}
+
+#define ieee80211_tu_to_usec(a) (1024*(a))
+
+uint32_t
+iwm_mvm_scan_max_out_time(struct iwm_softc *sc, uint32_t flags, int is_assoc)
+{
+ if (!is_assoc)
+ return 0;
+ if (flags & 0x1)
+ return htole32(ieee80211_tu_to_usec(SHORT_OUT_TIME_PERIOD));
+ return htole32(ieee80211_tu_to_usec(LONG_OUT_TIME_PERIOD));
+}
+
+uint32_t
+iwm_mvm_scan_suspend_time(struct iwm_softc *sc, int is_assoc)
+{
+ if (!is_assoc)
+ return 0;
+ return htole32(ieee80211_tu_to_usec(SUSPEND_TIME_PERIOD));
+}
+
+uint32_t
+iwm_mvm_scan_rxon_flags(struct iwm_softc *sc, int flags)
+{
+ if (flags & IEEE80211_CHAN_2GHZ)
+ return htole32(IWM_PHY_BAND_24);
+ else
+ return htole32(IWM_PHY_BAND_5);
+}
+
+uint32_t
+iwm_mvm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
+{
+ uint32_t tx_ant;
+ int i, ind;
+
+ for (i = 0, ind = sc->sc_scan_last_antenna;
+ i < IWM_RATE_MCS_ANT_NUM; i++) {
+ ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
+ if (IWM_FW_VALID_TX_ANT(sc) & (1 << ind)) {
+ sc->sc_scan_last_antenna = ind;
+ break;
+ }
+ }
+ tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
+
+ if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
+ return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
+ tx_ant);
+ else
+ return htole32(IWM_RATE_6M_PLCP | tx_ant);
+}
+
+/*
+ * If req->n_ssids > 0, it means we should do an active scan.
+ * In case of active scan w/o directed scan, we receive a zero-length SSID
+ * just to notify that this scan is active and not passive.
+ * In order to notify the FW of the number of SSIDs we wish to scan (including
+ * the zero-length one), we need to set the corresponding bits in chan->type,
+ * one for each SSID, and set the active bit (first). If the first SSID is
+ * already included in the probe template, so we need to set only
+ * req->n_ssids - 1 bits in addition to the first bit.
+ */
+uint16_t
+iwm_mvm_get_active_dwell(struct iwm_softc *sc, int flags, int n_ssids)
+{
+ if (flags & IEEE80211_CHAN_2GHZ)
+ return 30 + 3 * (n_ssids + 1);
+ return 20 + 2 * (n_ssids + 1);
+}
+
+uint16_t
+iwm_mvm_get_passive_dwell(struct iwm_softc *sc, int flags)
+{
+ return (flags & IEEE80211_CHAN_2GHZ) ? 100 + 20 : 100 + 10;
+}
+
+int
+iwm_mvm_scan_fill_channels(struct iwm_softc *sc, struct iwm_scan_cmd *cmd,
+ int flags, int n_ssids, int basic_ssid)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ uint16_t passive_dwell = iwm_mvm_get_passive_dwell(sc, flags);
+ uint16_t active_dwell = iwm_mvm_get_active_dwell(sc, flags, n_ssids);
+ struct iwm_scan_channel *chan = (struct iwm_scan_channel *)
+ (cmd->data + le16toh(cmd->tx_cmd.len));
+ int type = (1 << n_ssids) - 1;
+ struct ieee80211_channel *c;
+ int nchan;
+
+ if (!basic_ssid)
+ type |= (1 << n_ssids);
+
+ for (nchan = 0, c = &ic->ic_channels[1];
+ c <= &ic->ic_channels[IEEE80211_CHAN_MAX];
+ c++) {
+ if ((c->ic_flags & flags) != flags)
+ continue;
+
+ chan->channel = htole16(ieee80211_mhz2ieee(c->ic_freq, flags));
+ chan->type = htole32(type);
+ if (c->ic_flags & IEEE80211_CHAN_PASSIVE)
+ chan->type &= htole32(~IWM_SCAN_CHANNEL_TYPE_ACTIVE);
+ chan->active_dwell = htole16(active_dwell);
+ chan->passive_dwell = htole16(passive_dwell);
+ chan->iteration_count = htole16(1);
+ chan++;
+ nchan++;
+ }
+ if (nchan == 0)
+ printf("%s: NO CHANNEL!\n", DEVNAME(sc));
+ return nchan;
+}
+
+/*
+ * Fill in probe request with the following parameters:
+ * TA is our vif HW address, which mac80211 ensures we have.
+ * Packet is broadcasted, so this is both SA and DA.
+ * The probe request IE is made out of two: first comes the most prioritized
+ * SSID if a directed scan is requested. Second comes whatever extra
+ * information was given to us as the scan request IE.
+ */
+uint16_t
+iwm_mvm_fill_probe_req(struct iwm_softc *sc, struct ieee80211_frame *frame,
+ const uint8_t *ta, int n_ssids, const uint8_t *ssid, int ssid_len,
+ const uint8_t *ie, int ie_len, int left)
+{
+ int len = 0;
+ uint8_t *pos = NULL;
+
+ /* Make sure there is enough space for the probe request,
+ * two mandatory IEs and the data */
+ left -= sizeof(*frame);
+ if (left < 0)
+ return 0;
+
+ frame->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
+ IEEE80211_FC0_SUBTYPE_PROBE_REQ;
+ frame->i_fc[1] = IEEE80211_FC1_DIR_NODS;
+ IEEE80211_ADDR_COPY(frame->i_addr1, etherbroadcastaddr);
+ memcpy(frame->i_addr2, ta, ETHER_ADDR_LEN);
+ IEEE80211_ADDR_COPY(frame->i_addr3, etherbroadcastaddr);
+
+ len += sizeof(*frame);
+ CTASSERT(sizeof(*frame) == 24);
+
+ /* for passive scans, no need to fill anything */
+ if (n_ssids == 0)
+ return (uint16_t)len;
+
+ /* points to the payload of the request */
+ pos = (uint8_t *)frame + sizeof(*frame);
+
+ /* fill in our SSID IE */
+ left -= ssid_len + 2;
+ if (left < 0)
+ return 0;
+ *pos++ = IEEE80211_ELEMID_SSID;
+ *pos++ = ssid_len;
+ if (ssid && ssid_len) { /* ssid_len may be == 0 even if ssid is valid */
+ memcpy(pos, ssid, ssid_len);
+ pos += ssid_len;
+ }
+
+ len += ssid_len + 2;
+
+ if (left < ie_len)
+ return len;
+
+ if (ie && ie_len) {
+ memcpy(pos, ie, ie_len);
+ len += ie_len;
+ }
+
+ return (uint16_t)len;
+}
+
+int
+iwm_mvm_scan_request(struct iwm_softc *sc, int flags,
+ int n_ssids, uint8_t *ssid, int ssid_len)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwm_host_cmd hcmd = {
+ .id = IWM_SCAN_REQUEST_CMD,
+ .len = { 0, },
+ .data = { sc->sc_scan_cmd, },
+ .flags = IWM_CMD_SYNC,
+ .dataflags = { IWM_HCMD_DFL_NOCOPY, },
+ };
+ struct iwm_scan_cmd *cmd = sc->sc_scan_cmd;
+ int is_assoc = 0;
+ int ret;
+ uint32_t status;
+ int basic_ssid = !(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_NO_BASIC_SSID);
+
+ //lockdep_assert_held(&mvm->mutex);
+
+ sc->sc_scanband = flags & (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
+
+ DPRINTF(("Handling ieee80211 scan request\n"));
+ memset(cmd, 0, sc->sc_scan_cmd_len);
+
+ cmd->quiet_time = htole16(IWM_ACTIVE_QUIET_TIME);
+ cmd->quiet_plcp_th = htole16(IWM_PLCP_QUIET_THRESH);
+ cmd->rxchain_sel_flags = iwm_mvm_scan_rx_chain(sc);
+ cmd->max_out_time = iwm_mvm_scan_max_out_time(sc, 0, is_assoc);
+ cmd->suspend_time = iwm_mvm_scan_suspend_time(sc, is_assoc);
+ cmd->rxon_flags = iwm_mvm_scan_rxon_flags(sc, flags);
+ cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP |
+ IWM_MAC_FILTER_IN_BEACON);
+
+ cmd->type = htole32(IWM_SCAN_TYPE_FORCED);
+ cmd->repeats = htole32(1);
+
+ /*
+ * If the user asked for passive scan, don't change to active scan if
+ * you see any activity on the channel - remain passive.
+ */
+ if (n_ssids > 0) {
+ cmd->passive2active = htole16(1);
+ cmd->scan_flags |= IWM_SCAN_FLAGS_PASSIVE2ACTIVE;
+#if 0
+ if (basic_ssid) {
+ ssid = req->ssids[0].ssid;
+ ssid_len = req->ssids[0].ssid_len;
+ }
+#endif
+ } else {
+ cmd->passive2active = 0;
+ cmd->scan_flags &= ~IWM_SCAN_FLAGS_PASSIVE2ACTIVE;
+ }
+
+ cmd->tx_cmd.tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
+ IWM_TX_CMD_FLG_BT_DIS);
+ cmd->tx_cmd.sta_id = sc->sc_aux_sta.sta_id;
+ cmd->tx_cmd.life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
+ cmd->tx_cmd.rate_n_flags = iwm_mvm_scan_rate_n_flags(sc, flags, 1/*XXX*/);
+
+ cmd->tx_cmd.len = htole16(iwm_mvm_fill_probe_req(sc,
+ (struct ieee80211_frame *)cmd->data,
+ ic->ic_myaddr, n_ssids, ssid, ssid_len,
+ NULL, 0, sc->sc_capa_max_probe_len));
+
+ cmd->channel_count
+ = iwm_mvm_scan_fill_channels(sc, cmd, flags, n_ssids, basic_ssid);
+
+ cmd->len = htole16(sizeof(struct iwm_scan_cmd) +
+ le16toh(cmd->tx_cmd.len) +
+ (cmd->channel_count * sizeof(struct iwm_scan_channel)));
+ hcmd.len[0] = le16toh(cmd->len);
+
+ status = IWM_SCAN_RESPONSE_OK;
+ ret = iwm_mvm_send_cmd_status(sc, &hcmd, &status);
+ if (!ret && status == IWM_SCAN_RESPONSE_OK) {
+ DPRINTF(("Scan request was sent successfully\n"));
+ } else {
+ /*
+ * If the scan failed, it usually means that the FW was unable
+ * to allocate the time events. Warn on it, but maybe we
+ * should try to send the command again with different params.
+ */
+ printf("%s: Scan failed! status 0x%x ret %d\n", DEVNAME(sc),
+ status, ret);
+ sc->sc_scanband = 0;
+ ret = EIO;
+ }
+ return ret;
+}
+
+/*
+ * END mvm/scan.c
+ */
+
+/*
+ * BEGIN mvm/mac-ctxt.c
+ */
+
+void
+iwm_mvm_ack_rates(struct iwm_softc *sc, struct iwm_node *in,
+ int *cck_rates, int *ofdm_rates)
+{
+ int lowest_present_ofdm = 100;
+ int lowest_present_cck = 100;
+ uint8_t cck = 0;
+ uint8_t ofdm = 0;
+ int i;
+
+ for (i = 0; i <= IWM_LAST_CCK_RATE; i++) {
+ cck |= (1 << i);
+ if (lowest_present_cck > i)
+ lowest_present_cck = i;
+ }
+ for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
+ int adj = i - IWM_FIRST_OFDM_RATE;
+ ofdm |= (1 << adj);
+ if (lowest_present_cck > adj)
+ lowest_present_cck = adj;
+ }
+
+ /*
+ * Now we've got the basic rates as bitmaps in the ofdm and cck
+ * variables. This isn't sufficient though, as there might not
+ * be all the right rates in the bitmap. E.g. if the only basic
+ * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
+ * and 6 Mbps because the 802.11-2007 standard says in 9.6:
+ *
+ * [...] a STA responding to a received frame shall transmit
+ * its Control Response frame [...] at the highest rate in the
+ * BSSBasicRateSet parameter that is less than or equal to the
+ * rate of the immediately previous frame in the frame exchange
+ * sequence ([...]) and that is of the same modulation class
+ * ([...]) as the received frame. If no rate contained in the
+ * BSSBasicRateSet parameter meets these conditions, then the
+ * control frame sent in response to a received frame shall be
+ * transmitted at the highest mandatory rate of the PHY that is
+ * less than or equal to the rate of the received frame, and
+ * that is of the same modulation class as the received frame.
+ *
+ * As a consequence, we need to add all mandatory rates that are
+ * lower than all of the basic rates to these bitmaps.
+ */
+
+ if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
+ ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
+ if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
+ ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
+ /* 6M already there or needed so always add */
+ ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
+
+ /*
+ * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
+ * Note, however:
+ * - if no CCK rates are basic, it must be ERP since there must
+ * be some basic rates at all, so they're OFDM => ERP PHY
+ * (or we're in 5 GHz, and the cck bitmap will never be used)
+ * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
+ * - if 5.5M is basic, 1M and 2M are mandatory
+ * - if 2M is basic, 1M is mandatory
+ * - if 1M is basic, that's the only valid ACK rate.
+ * As a consequence, it's not as complicated as it sounds, just add
+ * any lower rates to the ACK rate bitmap.
+ */
+ if (IWM_RATE_11M_INDEX < lowest_present_cck)
+ cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
+ if (IWM_RATE_5M_INDEX < lowest_present_cck)
+ cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
+ if (IWM_RATE_2M_INDEX < lowest_present_cck)
+ cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
+ /* 1M already there or needed so always add */
+ cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
+
+ *cck_rates = cck;
+ *ofdm_rates = ofdm;
+}
+
+void
+iwm_mvm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
+ struct iwm_mac_ctx_cmd *cmd, uint32_t action)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_node *ni = ic->ic_bss;
+ int cck_ack_rates, ofdm_ack_rates;
+ int i;
+
+ cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
+ in->in_color));
+ cmd->action = htole32(action);
+
+ cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
+ cmd->tsf_id = htole32(in->in_tsfid);
+
+ IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
+ if (in->in_assoc) {
+ IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
+ } else {
+ memset(cmd->bssid_addr, 0, sizeof(cmd->bssid_addr));
+ }
+ iwm_mvm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
+ cmd->cck_rates = htole32(cck_ack_rates);
+ cmd->ofdm_rates = htole32(ofdm_ack_rates);
+
+ cmd->cck_short_preamble
+ = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
+ ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
+ cmd->short_slot
+ = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
+ ? IWM_MAC_FLG_SHORT_SLOT : 0);
+
+ for (i = 0; i < IWM_AC_NUM+1; i++) {
+ int txf = i;
+
+ cmd->ac[txf].cw_min = htole16(0x0f);
+ cmd->ac[txf].cw_max = htole16(0x3f);
+ cmd->ac[txf].aifsn = 1;
+ cmd->ac[txf].fifos_mask = (1 << txf);
+ cmd->ac[txf].edca_txop = 0;
+ }
+
+ cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
+ cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_SELF_CTS_EN);
+
+ cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
+}
+
+int
+iwm_mvm_mac_ctxt_send_cmd(struct iwm_softc *sc, struct iwm_mac_ctx_cmd *cmd)
+{
+ int ret = iwm_mvm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, IWM_CMD_SYNC,
+ sizeof(*cmd), cmd);
+ if (ret)
+ printf("%s: Failed to send MAC context (action:%d): %d\n",
+ DEVNAME(sc), le32toh(cmd->action), ret);
+ return ret;
+}
+
+/*
+ * Fill the specific data for mac context of type station or p2p client
+ */
+void
+iwm_mvm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
+ struct iwm_mac_data_sta *ctxt_sta, int force_assoc_off)
+{
+ struct ieee80211_node *ni = &in->in_ni;
+ unsigned dtim_period, dtim_count;
+ struct ieee80211com *ic = &sc->sc_ic;
+
+ /* will this work? */
+ dtim_period = ic->ic_dtim_period;
+ dtim_count = ic->ic_dtim_count;
+ DPRINTF(("dtim %d %d\n", dtim_period, dtim_count));
+
+ /* We need the dtim_period to set the MAC as associated */
+ if (in->in_assoc && dtim_period && !force_assoc_off) {
+ uint64_t tsf;
+ uint32_t dtim_offs;
+
+ /*
+ * The DTIM count counts down, so when it is N that means N
+ * more beacon intervals happen until the DTIM TBTT. Therefore
+ * add this to the current time. If that ends up being in the
+ * future, the firmware will handle it.
+ *
+ * Also note that the system_timestamp (which we get here as
+ * "sync_device_ts") and TSF timestamp aren't at exactly the
+ * same offset in the frame -- the TSF is at the first symbol
+ * of the TSF, the system timestamp is at signal acquisition
+ * time. This means there's an offset between them of at most
+ * a few hundred microseconds (24 * 8 bits + PLCP time gives
+ * 384us in the longest case), this is currently not relevant
+ * as the firmware wakes up around 2ms before the TBTT.
+ */
+ dtim_offs = dtim_count * ni->ni_intval;
+ /* convert TU to usecs */
+ dtim_offs *= 1024;
+
+ /* XXX: byte order? */
+ memcpy(&tsf, ni->ni_tstamp, sizeof(tsf));
+
+ ctxt_sta->dtim_tsf = htole64(tsf + dtim_offs);
+ ctxt_sta->dtim_time = htole64(ni->ni_rstamp + dtim_offs);
+
+ DPRINTF(("DTIM TBTT is 0x%llx/0x%x, offset %d\n",
+ (long long)le64toh(ctxt_sta->dtim_tsf),
+ le32toh(ctxt_sta->dtim_time), dtim_offs));
+
+ ctxt_sta->is_assoc = htole32(1);
+ } else {
+ ctxt_sta->is_assoc = htole32(0);
+ }
+
+ ctxt_sta->bi = htole32(ni->ni_intval);
+ ctxt_sta->bi_reciprocal = htole32(iwm_mvm_reciprocal(ni->ni_intval));
+ ctxt_sta->dtim_interval = htole32(ni->ni_intval * dtim_period);
+ ctxt_sta->dtim_reciprocal =
+ htole32(iwm_mvm_reciprocal(ni->ni_intval * dtim_period));
+
+ /* 10 = CONN_MAX_LISTEN_INTERVAL */
+ ctxt_sta->listen_interval = htole32(10);
+ ctxt_sta->assoc_id = htole32(ni->ni_associd);
+}
+
+int
+iwm_mvm_mac_ctxt_cmd_station(struct iwm_softc *sc, struct iwm_node *in,
+ uint32_t action)
+{
+ struct iwm_mac_ctx_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ /* Fill the common data for all mac context types */
+ iwm_mvm_mac_ctxt_cmd_common(sc, in, &cmd, action);
+
+ if (in->in_assoc)
+ cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
+ else
+ cmd.filter_flags &= ~htole32(IWM_MAC_FILTER_IN_BEACON);
+
+ /* Fill the data specific for station mode */
+ iwm_mvm_mac_ctxt_cmd_fill_sta(sc, in,
+ &cmd.sta, action == IWM_FW_CTXT_ACTION_ADD);
+
+ return iwm_mvm_mac_ctxt_send_cmd(sc, &cmd);
+}
+
+int
+iwm_mvm_mac_ctx_send(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
+{
+ return iwm_mvm_mac_ctxt_cmd_station(sc, in, action);
+}
+
+int
+iwm_mvm_mac_ctxt_add(struct iwm_softc *sc, struct iwm_node *in)
+{
+ int ret;
+
+ ret = iwm_mvm_mac_ctx_send(sc, in, IWM_FW_CTXT_ACTION_ADD);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int
+iwm_mvm_mac_ctxt_changed(struct iwm_softc *sc, struct iwm_node *in)
+{
+ return iwm_mvm_mac_ctx_send(sc, in, IWM_FW_CTXT_ACTION_MODIFY);
+}
+
+#if 0
+int
+iwm_mvm_mac_ctxt_remove(struct iwm_softc *sc, struct iwm_node *in)
+{
+ struct iwm_mac_ctx_cmd cmd;
+ int ret;
+
+ if (!in->in_uploaded) {
+ print("%s: attempt to remove !uploaded node %p", DEVNAME(sc), in);
+ return EIO;
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
+ in->in_color));
+ cmd.action = htole32(IWM_FW_CTXT_ACTION_REMOVE);
+
+ ret = iwm_mvm_send_cmd_pdu(sc,
+ IWM_MAC_CONTEXT_CMD, IWM_CMD_SYNC, sizeof(cmd), &cmd);
+ if (ret) {
+ printf("%s: Failed to remove MAC context: %d\n", DEVNAME(sc), ret);
+ return ret;
+ }
+ in->in_uploaded = 0;
+
+ return 0;
+}
+#endif
+
+/*
+ * END mvm/mac-ctxt.c
+ */
+
+/*
+ * BEGIN mvm/quota.c
+ */
+
+int
+iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
+{
+ struct iwm_time_quota_cmd cmd;
+ int i, idx, ret, num_active_macs, quota, quota_rem;
+ int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
+ int n_ifs[IWM_MAX_BINDINGS] = {0, };
+ uint16_t id;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ /* currently, PHY ID == binding ID */
+ if (in) {
+ id = in->in_phyctxt->id;
+ KASSERT(id < IWM_MAX_BINDINGS);
+ colors[id] = in->in_phyctxt->color;
+
+ if (1)
+ n_ifs[id] = 1;
+ }
+
+ /*
+ * The FW's scheduling session consists of
+ * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
+ * equally between all the bindings that require quota
+ */
+ num_active_macs = 0;
+ for (i = 0; i < IWM_MAX_BINDINGS; i++) {
+ cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
+ num_active_macs += n_ifs[i];
+ }
+
+ quota = 0;
+ quota_rem = 0;
+ if (num_active_macs) {
+ quota = IWM_MVM_MAX_QUOTA / num_active_macs;
+ quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
+ }
+
+ for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
+ if (colors[i] < 0)
+ continue;
+
+ cmd.quotas[idx].id_and_color =
+ htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
+
+ if (n_ifs[i] <= 0) {
+ cmd.quotas[idx].quota = htole32(0);
+ cmd.quotas[idx].max_duration = htole32(0);
+ } else {
+ cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
+ cmd.quotas[idx].max_duration = htole32(0);
+ }
+ idx++;
+ }
+
+ /* Give the remainder of the session to the first binding */
+ cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
+
+ ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
+ sizeof(cmd), &cmd);
+ if (ret)
+ printf("%s: Failed to send quota: %d\n", DEVNAME(sc), ret);
+ return ret;
+}
+
+/*
+ * END mvm/quota.c
+ */
+
+/*
+ * aieee80211 routines
+ */
+
+/*
+ * Change to AUTH state in 80211 state machine. Roughly matches what
+ * Linux does in bss_info_changed().
+ */
+int
+iwm_auth(struct iwm_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwm_node *in = (void *)ic->ic_bss;
+ uint32_t duration;
+ uint32_t min_duration;
+ int error;
+
+ in->in_assoc = 0;
+ if ((error = iwm_mvm_mac_ctxt_add(sc, in)) != 0) {
+ printf("%s: failed to add MAC\n", DEVNAME(sc));
+ return error;
+ }
+
+ if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
+ in->in_ni.ni_chan, 1, 1)) != 0) {
+ printf("%s: failed add phy ctxt\n", DEVNAME(sc));
+ return error;
+ }
+ in->in_phyctxt = &sc->sc_phyctxt[0];
+
+ if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
+ printf("%s: binding cmd\n", DEVNAME(sc));
+ return error;
+ }
+
+ if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
+ printf("%s: failed to add MAC\n", DEVNAME(sc));
+ return error;
+ }
+
+ /* a bit superfluous? */
+ while (sc->sc_auth_prot)
+ tsleep(&sc->sc_auth_prot, 0, "iwmauth", 0);
+ sc->sc_auth_prot = 1;
+
+ duration = min(IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS,
+ 200 + in->in_ni.ni_intval);
+ min_duration = min(IWM_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS,
+ 100 + in->in_ni.ni_intval);
+ iwm_mvm_protect_session(sc, in, duration, min_duration, 500);
+
+ while (sc->sc_auth_prot != 2) {
+ /*
+ * well, meh, but if the kernel is sleeping for half a
+ * second, we have bigger problems
+ */
+ if (sc->sc_auth_prot == 0) {
+ printf("%s: missed auth window!\n", DEVNAME(sc));
+ return ETIMEDOUT;
+ } else if (sc->sc_auth_prot == -1) {
+ printf("%s: no time event, denied!\n", DEVNAME(sc));
+ sc->sc_auth_prot = 0;
+ return EAUTH;
+ }
+ tsleep(&sc->sc_auth_prot, 0, "iwmau2", 0);
+ }
+
+ return 0;
+}
+
+int
+iwm_assoc(struct iwm_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwm_node *in = (void *)ic->ic_bss;
+ int error;
+
+ if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
+ printf("%s: failed to update STA\n", DEVNAME(sc));
+ return error;
+ }
+
+ in->in_assoc = 1;
+ if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
+ printf("%s: failed to update MAC\n", DEVNAME(sc));
+ return error;
+ }
+
+ return 0;
+}
+
+int
+iwm_release(struct iwm_softc *sc, struct iwm_node *in)
+{
+ /*
+ * Ok, so *technically* the proper set of calls for going
+ * from RUN back to SCAN is:
+ *
+ * iwm_mvm_power_mac_disable(sc, in);
+ * iwm_mvm_mac_ctxt_changed(sc, in);
+ * iwm_mvm_rm_sta(sc, in);
+ * iwm_mvm_update_quotas(sc, NULL);
+ * iwm_mvm_mac_ctxt_changed(sc, in);
+ * iwm_mvm_binding_remove_vif(sc, in);
+ * iwm_mvm_mac_ctxt_remove(sc, in);
+ *
+ * However, that freezes the device not matter which permutations
+ * and modifications are attempted. Obviously, this driver is missing
+ * something since it works in the Linux driver, but figuring out what
+ * is missing is a little more complicated. Now, since we're going
+ * back to nothing anyway, we'll just do a complete device reset.
+ * Up your's, device!
+ */
+ //iwm_mvm_flush_tx_path(sc, 0xf, 1);
+ iwm_stop_device(sc);
+ iwm_init_hw(sc);
+ if (in)
+ in->in_assoc = 0;
+ return 0;
+
+#if 0
+ int error;
+
+ iwm_mvm_power_mac_disable(sc, in);
+
+ if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
+ printf("%s: mac ctxt change fail 1 %d\n", DEVNAME(sc), error);
+ return error;
+ }
+
+ if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
+ printf("%s: sta remove fail %d\n", DEVNAME(sc), error);
+ return error;
+ }
+ error = iwm_mvm_rm_sta(sc, in);
+ in->in_assoc = 0;
+ iwm_mvm_update_quotas(sc, NULL);
+ if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
+ printf("%s: mac ctxt change fail 2 %d\n", DEVNAME(sc), error);
+ return error;
+ }
+ iwm_mvm_binding_remove_vif(sc, in);
+
+ iwm_mvm_mac_ctxt_remove(sc, in);
+
+ return error;
+#endif
+}
+
+struct ieee80211_node *
+iwm_node_alloc(struct ieee80211com *ic)
+{
+ return malloc(sizeof (struct iwm_node), M_DEVBUF, M_NOWAIT | M_ZERO);
+}
+
+void
+iwm_calib_timeout(void *arg)
+{
+ struct iwm_softc *sc = arg;
+ struct ieee80211com *ic = &sc->sc_ic;
+ int s;
+
+ s = splnet();
+ if (ic->ic_fixed_rate == -1
+ && ic->ic_opmode == IEEE80211_M_STA
+ && ic->ic_bss) {
+ struct iwm_node *in = (void *)ic->ic_bss;
+ ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
+ }
+ splx(s);
+
+ timeout_add(&sc->sc_calib_to, hz/2);
+}
+
+void
+iwm_setrates(struct iwm_node *in)
+{
+ struct ieee80211_node *ni = &in->in_ni;
+ struct ieee80211com *ic = ni->ni_ic;
+ struct iwm_softc *sc = IC2IFP(ic)->if_softc;
+ struct iwm_lq_cmd *lq = &in->in_lq;
+ int nrates = ni->ni_rates.rs_nrates;
+ int i, ridx, tab = 0;
+ int txant = 0;
+
+ if (nrates > nitems(lq->rs_table)) {
+ printf("%s: node supports %d rates, driver handles only %zu\n",
+ DEVNAME(sc), nrates, nitems(lq->rs_table));
+ return;
+ }
+
+ /* first figure out which rates we should support */
+ memset(&in->in_ridx, -1, sizeof(in->in_ridx));
+ for (i = 0; i < nrates; i++) {
+ int rate = ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL;
+
+ /* Map 802.11 rate to HW rate index. */
+ for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
+ if (iwm_rates[ridx].rate == rate)
+ break;
+ if (ridx > IWM_RIDX_MAX)
+ printf("%s: WARNING: device rate for %d not found!\n", DEVNAME(sc), rate);
+ else
+ in->in_ridx[i] = ridx;
+ }
+
+ /* then construct a lq_cmd based on those */
+ memset(lq, 0, sizeof(*lq));
+ lq->sta_id = IWM_STATION_ID;
+
+ /*
+ * are these used? (we don't do SISO or MIMO)
+ * need to set them to non-zero, though, or we get an error.
+ */
+ lq->single_stream_ant_msk = 1;
+ lq->dual_stream_ant_msk = 1;
+
+ /*
+ * Build the actual rate selection table.
+ * The lowest bits are the rates. Additionally,
+ * CCK needs bit 9 to be set. The rest of the bits
+ * we add to the table select the tx antenna
+ * Note that we add the rates in the highest rate first
+ * (opposite of ni_rates).
+ */
+ for (i = 0; i < nrates; i++) {
+ int nextant;
+
+ if (txant == 0)
+ txant = IWM_FW_VALID_TX_ANT(sc);
+ nextant = 1<<(ffs(txant)-1);
+ txant &= ~nextant;
+
+ ridx = in->in_ridx[(nrates-1)-i];
+ tab = iwm_rates[ridx].plcp;
+ tab |= nextant << IWM_RATE_MCS_ANT_POS;
+ if (IWM_RIDX_IS_CCK(ridx))
+ tab |= IWM_RATE_MCS_CCK_MSK;
+ DPRINTFN(2, ("station rate %d %x\n", i, tab));
+ lq->rs_table[i] = htole32(tab);
+ }
+ /* then fill the rest with the lowest possible rate */
+ for (i = nrates; i < nitems(lq->rs_table); i++) {
+ KASSERT(tab != 0);
+ lq->rs_table[i] = htole32(tab);
+ }
+
+ /* init amrr */
+ ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
+ ni->ni_txrate = nrates-1;
+}
+
+int
+iwm_media_change(struct ifnet *ifp)
+{
+ struct iwm_softc *sc = ifp->if_softc;
+ struct ieee80211com *ic = &sc->sc_ic;
+ uint8_t rate, ridx;
+ int error;
+
+ error = ieee80211_media_change(ifp);
+ if (error != ENETRESET)
+ return error;
+
+ if (ic->ic_fixed_rate != -1) {
+ rate = ic->ic_sup_rates[ic->ic_curmode].
+ rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
+ /* Map 802.11 rate to HW rate index. */
+ for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
+ if (iwm_rates[ridx].rate == rate)
+ break;
+ sc->sc_fixed_ridx = ridx;
+ }
+
+ if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
+ (IFF_UP | IFF_RUNNING)) {
+ iwm_stop(ifp, 0);
+ error = iwm_init(ifp);
+ }
+ return error;
+}
+
+void
+iwm_newstate_cb(void *wk)
+{
+ struct iwm_newstate_state *iwmns = (void *)wk;
+ struct ieee80211com *ic = iwmns->ns_ic;
+ enum ieee80211_state nstate = iwmns->ns_nstate;
+ int generation = iwmns->ns_generation;
+ struct iwm_node *in;
+ int arg = iwmns->ns_arg;
+ struct ifnet *ifp = IC2IFP(ic);
+ struct iwm_softc *sc = ifp->if_softc;
+ int error;
+
+ free(iwmns, M_DEVBUF, sizeof(*iwmns));
+
+ DPRINTF(("Prepare to switch state %d->%d\n", ic->ic_state, nstate));
+ if (sc->sc_generation != generation) {
+ DPRINTF(("newstate_cb: someone pulled the plug meanwhile\n"));
+ if (nstate == IEEE80211_S_INIT) {
+ DPRINTF(("newstate_cb: nstate == IEEE80211_S_INIT: calling sc_newstate()\n"));
+ sc->sc_newstate(ic, nstate, arg);
+ }
+ return;
+ }
+
+ DPRINTF(("switching state %d->%d\n", ic->ic_state, nstate));
+
+ /* disable beacon filtering if we're hopping out of RUN */
+ if (ic->ic_state == IEEE80211_S_RUN && nstate != ic->ic_state) {
+ iwm_mvm_disable_beacon_filter(sc, (void *)ic->ic_bss);
+
+ if (((in = (void *)ic->ic_bss) != NULL))
+ in->in_assoc = 0;
+ iwm_release(sc, NULL);
+
+ /*
+ * It's impossible to directly go RUN->SCAN. If we iwm_release()
+ * above then the card will be completely reinitialized,
+ * so the driver must do everything necessary to bring the card
+ * from INIT to SCAN.
+ *
+ * Additionally, upon receiving deauth frame from AP,
+ * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
+ * state. This will also fail with this driver, so bring the FSM
+ * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
+ */
+ if (nstate == IEEE80211_S_SCAN ||
+ nstate == IEEE80211_S_AUTH ||
+ nstate == IEEE80211_S_ASSOC) {
+ DPRINTF(("Force transition to INIT; MGT=%d\n", arg));
+ sc->sc_newstate(ic, IEEE80211_S_INIT, arg);
+ DPRINTF(("Going INIT->SCAN\n"));
+ nstate = IEEE80211_S_SCAN;
+ }
+ }
+
+ switch (nstate) {
+ case IEEE80211_S_INIT:
+ sc->sc_scanband = 0;
+ break;
+
+ case IEEE80211_S_SCAN:
+ if (sc->sc_scanband) {
+ /* how does this print match the clause? */
+ if (ic->ic_state != nstate)
+ printf("%s: scan request(%d) "
+ "while scanning(%d) ignored\n",
+ DEVNAME(sc), nstate, ic->ic_state);
+ break;
+ }
+
+ if ((error = iwm_mvm_scan_request(sc, IEEE80211_CHAN_2GHZ,
+ ic->ic_des_esslen != 0,
+ ic->ic_des_essid, ic->ic_des_esslen)) != 0) {
+ printf("%s: could not initiate scan\n", DEVNAME(sc));
+ return;
+ }
+ ic->ic_state = nstate;
+ return;
+
+ case IEEE80211_S_AUTH:
+ if ((error = iwm_auth(sc)) != 0) {
+ printf("%s: could not move to auth state: %d\n",
+ DEVNAME(sc), error);
+ return;
+ }
+
+ break;
+
+ case IEEE80211_S_ASSOC:
+ if ((error = iwm_assoc(sc)) != 0) {
+ printf("%s: failed to associate: %d\n", DEVNAME(sc),
+ error);
+ return;
+ }
+ break;
+
+ case IEEE80211_S_RUN: {
+ struct iwm_host_cmd cmd = {
+ .id = IWM_LQ_CMD,
+ .len = { sizeof(in->in_lq), },
+ .flags = IWM_CMD_SYNC,
+ };
+
+ in = (struct iwm_node *)ic->ic_bss;
+ iwm_mvm_power_mac_update_mode(sc, in);
+ iwm_mvm_enable_beacon_filter(sc, in);
+ iwm_mvm_update_quotas(sc, in);
+ iwm_setrates(in);
+
+ cmd.data[0] = &in->in_lq;
+ if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
+ printf("%s: IWM_LQ_CMD failed\n", DEVNAME(sc));
+ }
+
+ timeout_add(&sc->sc_calib_to, hz/2);
+
+ break; }
+
+ default:
+ panic("unsupported state %d\n", nstate);
+ }
+
+ sc->sc_newstate(ic, nstate, arg);
+}
+
+int
+iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
+{
+ struct iwm_newstate_state *iwmns;
+ struct ifnet *ifp = IC2IFP(ic);
+ struct iwm_softc *sc = ifp->if_softc;
+
+ timeout_del(&sc->sc_calib_to);
+
+ iwmns = malloc(sizeof(*iwmns), M_DEVBUF, M_NOWAIT);
+ if (!iwmns) {
+ printf("%s: allocating state cb mem failed\n", DEVNAME(sc));
+ return ENOMEM;
+ }
+
+ iwmns->ns_ic = ic;
+ iwmns->ns_nstate = nstate;
+ iwmns->ns_arg = arg;
+ iwmns->ns_generation = sc->sc_generation;
+
+ task_set(&iwmns->ns_wk, iwm_newstate_cb, iwmns);
+ task_add(sc->sc_nswq, &iwmns->ns_wk);
+
+ return 0;
+}
+
+void
+iwm_endscan_cb(void *arg)
+{
+ struct iwm_softc *sc = arg;
+ struct ieee80211com *ic = &sc->sc_ic;
+ int done;
+
+ DPRINTF(("scan ended\n"));
+
+ if (sc->sc_scanband == IEEE80211_CHAN_2GHZ) {
+#ifndef IWM_NO_5GHZ /* for quick testing, makes scan few sec faster */
+ int error;
+ done = 0;
+ if ((error = iwm_mvm_scan_request(sc,
+ IEEE80211_CHAN_5GHZ, ic->ic_des_esslen != 0,
+ ic->ic_des_essid, ic->ic_des_esslen)) != 0) {
+ printf("%s: could not initiate 5ghz scan\n",
+ DEVNAME(sc));
+ done = 1;
+ }
+#else
+ done = 1;
+#endif
+ } else {
+ done = 1;
+ }
+
+ if (done) {
+ if (!sc->sc_scanband) {
+ ic->ic_scan_lock = IEEE80211_SCAN_UNLOCKED;
+ } else {
+ ieee80211_end_scan(&ic->ic_if);
+ }
+ sc->sc_scanband = 0;
+ }
+}
+
+int
+iwm_init_hw(struct iwm_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ int error, i, qid;
+
+ if ((error = iwm_prepare_card_hw(sc)) != 0)
+ return error;
+
+ if ((error = iwm_start_hw(sc)) != 0)
+ return error;
+
+ if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
+ return error;
+ }
+
+ /*
+ * should stop and start HW since that INIT
+ * image just loaded
+ */
+ iwm_stop_device(sc);
+ if ((error = iwm_start_hw(sc)) != 0)
+ return error;
+
+ /* omstart, this time with the regular firmware */
+ error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
+ if (error) {
+ printf("%s: Failed to start RT ucode: %d\n", DEVNAME(sc),
+ error);
+ goto error;
+ }
+
+ if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
+ goto error;
+
+ /* Send phy db control command and then phy db calibration*/
+ if ((error = iwm_send_phy_db_data(sc)) != 0)
+ goto error;
+
+ if ((error = iwm_send_phy_cfg_cmd(sc)) != 0)
+ goto error;
+
+ /* Add auxiliary station for scanning */
+ if ((error = iwm_mvm_add_aux_sta(sc)) != 0)
+ goto error;
+
+ for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
+ /*
+ * The channel used here isn't relevant as it's
+ * going to be overwritten in the other flows.
+ * For now use the first channel we have.
+ */
+ if ((error = iwm_mvm_phy_ctxt_add(sc,
+ &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
+ goto error;
+ }
+
+ error = iwm_mvm_power_update_device(sc);
+ if (error)
+ goto error;
+
+ /* Mark TX rings as active. */
+ for (qid = 0; qid < 4; qid++) {
+ iwm_enable_txq(sc, qid, qid);
+ }
+
+ return 0;
+
+ error:
+ iwm_stop_device(sc);
+ return error;
+}
+
+/*
+ * ifnet interfaces
+ */
+
+int
+iwm_init(struct ifnet *ifp)
+{
+ struct iwm_softc *sc = ifp->if_softc;
+ int error;
+
+ if (sc->sc_flags & IWM_FLAG_HW_INITED) {
+ return 0;
+ }
+ sc->sc_generation++;
+ sc->sc_flags &= ~IWM_FLAG_STOPPED;
+
+ if ((error = iwm_init_hw(sc)) != 0) {
+ iwm_stop(ifp, 1);
+ return error;
+ }
+
+ /*
+ * Ok, firmware loaded and we are jogging
+ */
+
+ ifp->if_flags &= ~IFF_OACTIVE;
+ ifp->if_flags |= IFF_RUNNING;
+
+ ieee80211_begin_scan(ifp);
+ sc->sc_flags |= IWM_FLAG_HW_INITED;
+
+ return 0;
+}
+
+/*
+ * Dequeue packets from sendq and call send.
+ * mostly from iwn
+ */
+void
+iwm_start(struct ifnet *ifp)
+{
+ struct iwm_softc *sc = ifp->if_softc;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_node *ni;
+ struct ether_header *eh;
+ struct mbuf *m;
+ int ac;
+
+ if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
+ return;
+
+ for (;;) {
+ /* why isn't this done per-queue? */
+ if (sc->qfullmsk != 0) {
+ ifp->if_flags |= IFF_OACTIVE;
+ break;
+ }
+
+ /* need to send management frames even if we're not RUNning */
+ IF_DEQUEUE(&ic->ic_mgtq, m);
+ if (m) {
+ ni = m->m_pkthdr.ph_cookie;
+ ac = 0;
+ goto sendit;
+ }
+ if (ic->ic_state != IEEE80211_S_RUN) {
+ break;
+ }
+
+ IFQ_DEQUEUE(&ifp->if_snd, m);
+ if (!m)
+ break;
+ if (m->m_len < sizeof (*eh) &&
+ (m = m_pullup(m, sizeof (*eh))) == NULL) {
+ ifp->if_oerrors++;
+ continue;
+ }
+ if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
+ ifp->if_oerrors++;
+ continue;
+ }
+
+ sendit:
+ if (iwm_tx(sc, m, ni, ac) != 0) {
+ ieee80211_release_node(ic, ni);
+ ifp->if_oerrors++;
+ continue;
+ }
+
+ if (ifp->if_flags & IFF_UP) {
+ sc->sc_tx_timer = 15;
+ ifp->if_timer = 1;
+ }
+ }
+
+ return;
+}
+
+void
+iwm_stop(struct ifnet *ifp, int disable)
+{
+ struct iwm_softc *sc = ifp->if_softc;
+ struct ieee80211com *ic = &sc->sc_ic;
+
+ sc->sc_flags &= ~IWM_FLAG_HW_INITED;
+ sc->sc_flags |= IWM_FLAG_STOPPED;
+ sc->sc_generation++;
+ sc->sc_scanband = 0;
+ sc->sc_auth_prot = 0;
+ ic->ic_scan_lock = IEEE80211_SCAN_UNLOCKED;
+ ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
+
+ if (ic->ic_state != IEEE80211_S_INIT)
+ ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
+
+ ifp->if_timer = sc->sc_tx_timer = 0;
+ iwm_stop_device(sc);
+}
+
+void
+iwm_watchdog(struct ifnet *ifp)
+{
+ struct iwm_softc *sc = ifp->if_softc;
+
+ ifp->if_timer = 0;
+ if (sc->sc_tx_timer > 0) {
+ if (--sc->sc_tx_timer == 0) {
+ printf("%s: device timeout\n", DEVNAME(sc));
+ iwm_nic_error(sc);
+ ifp->if_flags &= ~IFF_UP;
+ iwm_stop(ifp, 1);
+ ifp->if_oerrors++;
+ return;
+ }
+ ifp->if_timer = 1;
+ }
+
+ ieee80211_watchdog(ifp);
+}
+
+int
+iwm_ioctl(struct ifnet *ifp, u_long cmd, iwm_caddr_t data)
+{
+ struct iwm_softc *sc = ifp->if_softc;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ifaddr *ifa;
+ struct ifreq *ifr;
+ int s, error = 0;
+
+ s = splnet();
+
+ switch (cmd) {
+ case SIOCSIFADDR:
+ ifp->if_flags |= IFF_UP;
+ ifa = (struct ifaddr *)data;
+ if (ifa->ifa_addr->sa_family == AF_INET)
+ arp_ifinit(&ic->ic_ac, ifa);
+ /* FALLTHROUGH */
+ case SIOCSIFFLAGS:
+ if (ifp->if_flags & IFF_UP) {
+ if (!(ifp->if_flags & IFF_RUNNING)) {
+ if ((error = iwm_init(ifp)) != 0)
+ ifp->if_flags &= ~IFF_UP;
+ }
+ } else {
+ if (ifp->if_flags & IFF_RUNNING)
+ iwm_stop(ifp, 1);
+ }
+ break;
+
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ ifr = (struct ifreq *)data;
+ error = (cmd == SIOCADDMULTI) ?
+ ether_addmulti(ifr, &ic->ic_ac) :
+ ether_delmulti(ifr, &ic->ic_ac);
+ if (error == ENETRESET)
+ error = 0;
+ break;
+
+ default:
+ error = ieee80211_ioctl(ifp, cmd, data);
+ }
+
+ if (error == ENETRESET) {
+ error = 0;
+ if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
+ (IFF_UP | IFF_RUNNING)) {
+ iwm_stop(ifp, 0);
+ error = iwm_init(ifp);
+ }
+ }
+
+ splx(s);
+ return error;
+}
+
+/*
+ * The interrupt side of things
+ */
+
+/*
+ * error dumping routines are from iwlwifi/mvm/utils.c
+ */
+
+/*
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with uint32_t-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwm_error_event_table {
+ uint32_t valid; /* (nonzero) valid, (0) log is empty */
+ uint32_t error_id; /* type of error */
+ uint32_t pc; /* program counter */
+ uint32_t blink1; /* branch link */
+ uint32_t blink2; /* branch link */
+ uint32_t ilink1; /* interrupt link */
+ uint32_t ilink2; /* interrupt link */
+ uint32_t data1; /* error-specific data */
+ uint32_t data2; /* error-specific data */
+ uint32_t data3; /* error-specific data */
+ uint32_t bcon_time; /* beacon timer */
+ uint32_t tsf_low; /* network timestamp function timer */
+ uint32_t tsf_hi; /* network timestamp function timer */
+ uint32_t gp1; /* GP1 timer register */
+ uint32_t gp2; /* GP2 timer register */
+ uint32_t gp3; /* GP3 timer register */
+ uint32_t ucode_ver; /* uCode version */
+ uint32_t hw_ver; /* HW Silicon version */
+ uint32_t brd_ver; /* HW board version */
+ uint32_t log_pc; /* log program counter */
+ uint32_t frame_ptr; /* frame pointer */
+ uint32_t stack_ptr; /* stack pointer */
+ uint32_t hcmd; /* last host command header */
+ uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
+ * rxtx_flag */
+ uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
+ * host_flag */
+ uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
+ * enc_flag */
+ uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
+ * time_flag */
+ uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
+ * wico interrupt */
+ uint32_t isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
+ uint32_t wait_event; /* wait event() caller address */
+ uint32_t l2p_control; /* L2pControlField */
+ uint32_t l2p_duration; /* L2pDurationField */
+ uint32_t l2p_mhvalid; /* L2pMhValidBits */
+ uint32_t l2p_addr_match; /* L2pAddrMatchStat */
+ uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
+ * (LMPM_PMG_SEL) */
+ uint32_t u_timestamp; /* indicate when the date and time of the
+ * compilation */
+ uint32_t flow_handler; /* FH read/write pointers, RX credit */
+} __packed;
+
+#define ERROR_START_OFFSET (1 * sizeof(uint32_t))
+#define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
+
+struct {
+ const char *name;
+ uint8_t num;
+} advanced_lookup[] = {
+ { "NMI_INTERRUPT_WDG", 0x34 },
+ { "SYSASSERT", 0x35 },
+ { "UCODE_VERSION_MISMATCH", 0x37 },
+ { "BAD_COMMAND", 0x38 },
+ { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
+ { "FATAL_ERROR", 0x3D },
+ { "NMI_TRM_HW_ERR", 0x46 },
+ { "NMI_INTERRUPT_TRM", 0x4C },
+ { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
+ { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
+ { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
+ { "NMI_INTERRUPT_HOST", 0x66 },
+ { "NMI_INTERRUPT_ACTION_PT", 0x7C },
+ { "NMI_INTERRUPT_UNKNOWN", 0x84 },
+ { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
+ { "ADVANCED_SYSASSERT", 0 },
+};
+
+const char *
+iwm_desc_lookup(uint32_t num)
+{
+ int i;
+
+ for (i = 0; i < nitems(advanced_lookup) - 1; i++)
+ if (advanced_lookup[i].num == num)
+ return advanced_lookup[i].name;
+
+ /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
+ return advanced_lookup[i].name;
+}
+
+/*
+ * Support for dumping the error log seemed like a good idea ...
+ * but it's mostly hex junk and the only sensible thing is the
+ * hw/ucode revision (which we know anyway). Since it's here,
+ * I'll just leave it in, just in case e.g. the Intel guys want to
+ * help us decipher some "ADVANCED_SYSASSERT" later.
+ */
+void
+iwm_nic_error(struct iwm_softc *sc)
+{
+ struct iwm_error_event_table table;
+ uint32_t base;
+
+ printf("%s: dumping device error log\n", DEVNAME(sc));
+ base = sc->sc_uc.uc_error_event_table;
+ if (base < 0x800000 || base >= 0x80C000) {
+ printf("%s: Not valid error log pointer 0x%08x\n",
+ DEVNAME(sc), base);
+ return;
+ }
+
+ if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t)) != 0) {
+ printf("%s: reading errlog failed\n", DEVNAME(sc));
+ return;
+ }
+
+ if (!table.valid) {
+ printf("%s: errlog not found, skipping\n", DEVNAME(sc));
+ return;
+ }
+
+ if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+ printf("%s: Start IWL Error Log Dump:\n", DEVNAME(sc));
+ printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
+ sc->sc_flags, table.valid);
+ }
+
+ printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
+ iwm_desc_lookup(table.error_id));
+ printf("%s: %08X | uPc\n", DEVNAME(sc), table.pc);
+ printf("%s: %08X | branchlink1\n", DEVNAME(sc), table.blink1);
+ printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
+ printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
+ printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
+ printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
+ printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
+ printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
+ printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
+ printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
+ printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
+ printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
+ printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
+ printf("%s: %08X | time gp3\n", DEVNAME(sc), table.gp3);
+ printf("%s: %08X | uCode version\n", DEVNAME(sc), table.ucode_ver);
+ printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
+ printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
+ printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
+ printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
+ printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
+ printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
+ printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
+ printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
+ printf("%s: %08X | isr_pref\n", DEVNAME(sc), table.isr_pref);
+ printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
+ printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
+ printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
+ printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
+ printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
+ printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
+ printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
+ printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
+}
+
+#define SYNC_RESP_STRUCT(_var_, _pkt_) \
+do { \
+ bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \
+ sizeof(*(_var_)), BUS_DMASYNC_POSTREAD); \
+ _var_ = (void *)((_pkt_)+1); \
+} while (/*CONSTCOND*/0)
+
+#define SYNC_RESP_PTR(_ptr_, _len_, _pkt_) \
+do { \
+ bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \
+ sizeof(len), BUS_DMASYNC_POSTREAD); \
+ _ptr_ = (void *)((_pkt_)+1); \
+} while (/*CONSTCOND*/0)
+
+#define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
+
+/*
+ * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
+ * Basic structure from if_iwn
+ */
+void
+iwm_notif_intr(struct iwm_softc *sc)
+{
+ uint16_t hw;
+
+ bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
+ 0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
+
+ hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
+ while (sc->rxq.cur != hw) {
+ struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
+ struct iwm_rx_packet *pkt;
+ struct iwm_cmd_response *cresp;
+ int qid, idx;
+
+ bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof(*pkt),
+ BUS_DMASYNC_POSTREAD);
+ pkt = mtod(data->m, struct iwm_rx_packet *);
+
+ qid = pkt->hdr.qid & ~0x80;
+ idx = pkt->hdr.idx;
+
+ DPRINTFN(12, ("rx packet qid=%d idx=%d flags=%x type=%x %d %d\n",
+ pkt->hdr.qid & ~0x80, pkt->hdr.idx, pkt->hdr.flags,
+ pkt->hdr.code, sc->rxq.cur, hw));
+
+ /*
+ * randomly get these from the firmware, no idea why.
+ * they at least seem harmless, so just ignore them for now
+ */
+ if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
+ || pkt->len_n_flags == htole32(0x55550000))) {
+ ADVANCE_RXQ(sc);
+ continue;
+ }
+
+ switch (pkt->hdr.code) {
+ case IWM_REPLY_RX_PHY_CMD:
+ iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
+ break;
+
+ case IWM_REPLY_RX_MPDU_CMD:
+ iwm_mvm_rx_rx_mpdu(sc, pkt, data);
+ break;
+
+ case IWM_TX_CMD:
+ iwm_mvm_rx_tx_cmd(sc, pkt, data);
+ break;
+
+ case IWM_MISSED_BEACONS_NOTIFICATION:
+ /* OpenBSD does not provide ieee80211_beacon_miss() */
+ break;
+
+ case IWM_MVM_ALIVE: {
+ struct iwm_mvm_alive_resp *resp;
+ SYNC_RESP_STRUCT(resp, pkt);
+
+ sc->sc_uc.uc_error_event_table
+ = le32toh(resp->error_event_table_ptr);
+ sc->sc_uc.uc_log_event_table
+ = le32toh(resp->log_event_table_ptr);
+ sc->sched_base = le32toh(resp->scd_base_ptr);
+ sc->sc_uc.uc_ok = resp->status == IWM_ALIVE_STATUS_OK;
+
+ sc->sc_uc.uc_intr = 1;
+ wakeup(&sc->sc_uc);
+ break; }
+
+ case IWM_CALIB_RES_NOTIF_PHY_DB: {
+ struct iwm_calib_res_notif_phy_db *phy_db_notif;
+ SYNC_RESP_STRUCT(phy_db_notif, pkt);
+
+ iwm_phy_db_set_section(sc, phy_db_notif);
+
+ break; }
+
+ case IWM_STATISTICS_NOTIFICATION: {
+ struct iwm_notif_statistics *stats;
+ SYNC_RESP_STRUCT(stats, pkt);
+ memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
+ break; }
+
+ case IWM_NVM_ACCESS_CMD:
+ if (sc->sc_wantresp == ((qid << 16) | idx)) {
+ bus_dmamap_sync(sc->sc_dmat, data->map, 0,
+ sizeof(sc->sc_cmd_resp),
+ BUS_DMASYNC_POSTREAD);
+ memcpy(sc->sc_cmd_resp,
+ pkt, sizeof(sc->sc_cmd_resp));
+ }
+ break;
+
+ case IWM_PHY_CONFIGURATION_CMD:
+ case IWM_TX_ANT_CONFIGURATION_CMD:
+ case IWM_ADD_STA:
+ case IWM_MAC_CONTEXT_CMD:
+ case IWM_REPLY_SF_CFG_CMD:
+ case IWM_POWER_TABLE_CMD:
+ case IWM_PHY_CONTEXT_CMD:
+ case IWM_BINDING_CONTEXT_CMD:
+ case IWM_TIME_EVENT_CMD:
+ case IWM_SCAN_REQUEST_CMD:
+ case IWM_REPLY_BEACON_FILTERING_CMD:
+ case IWM_MAC_PM_POWER_TABLE:
+ case IWM_TIME_QUOTA_CMD:
+ case IWM_REMOVE_STA:
+ case IWM_TXPATH_FLUSH:
+ case IWM_LQ_CMD:
+ SYNC_RESP_STRUCT(cresp, pkt);
+ if (sc->sc_wantresp == ((qid << 16) | idx)) {
+ memcpy(sc->sc_cmd_resp,
+ pkt, sizeof(*pkt)+sizeof(*cresp));
+ }
+ break;
+
+ /* ignore */
+ case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
+ break;
+
+ case IWM_INIT_COMPLETE_NOTIF:
+ sc->sc_init_complete = 1;
+ wakeup(&sc->sc_init_complete);
+ break;
+
+ case IWM_SCAN_COMPLETE_NOTIFICATION: {
+ struct iwm_scan_complete_notif *notif;
+ SYNC_RESP_STRUCT(notif, pkt);
+
+ task_add(sc->sc_eswq, &sc->sc_eswk);
+ break; }
+
+ case IWM_REPLY_ERROR: {
+ struct iwm_error_resp *resp;
+ SYNC_RESP_STRUCT(resp, pkt);
+
+ printf("%s: Firmware error 0x%x, cmd 0x%x\n",
+ DEVNAME(sc), le32toh(resp->error_type),
+ resp->cmd_id);
+ break; }
+
+ case IWM_TIME_EVENT_NOTIFICATION: {
+ struct iwm_time_event_notif *notif;
+ SYNC_RESP_STRUCT(notif, pkt);
+
+ if (notif->status) {
+ if (le32toh(notif->action) &
+ IWM_TE_V2_NOTIF_HOST_EVENT_START)
+ sc->sc_auth_prot = 2;
+ else
+ sc->sc_auth_prot = 0;
+ } else {
+ sc->sc_auth_prot = -1;
+ }
+ wakeup(&sc->sc_auth_prot);
+ break; }
+
+ default:
+ printf("%s: frame %d/%d %x UNHANDLED (this should "
+ "not happen)\n", DEVNAME(sc), qid, idx,
+ pkt->len_n_flags);
+ break;
+ }
+
+ /*
+ * Why test bit 0x80? The Linux driver:
+ *
+ * There is one exception: uCode sets bit 15 when it
+ * originates the response/notification, i.e. when the
+ * response/notification is not a direct response to a
+ * command sent by the driver. For example, uCode issues
+ * IWM_REPLY_RX when it sends a received frame to the driver;
+ * it is not a direct response to any driver command.
+ *
+ * Ok, so since when is 7 == 15? Well, the Linux driver
+ * uses a slightly different format for pkt->hdr, and "qid"
+ * is actually the upper byte of a two-byte field.
+ */
+ if (!(pkt->hdr.qid & (1 << 7))) {
+ iwm_cmd_done(sc, pkt);
+ }
+
+ ADVANCE_RXQ(sc);
+ }
+
+ IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
+ IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+ /*
+ * Tell the firmware what we have processed.
+ * Seems like the hardware gets upset unless we align
+ * the write by 8??
+ */
+ hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
+ IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
+}
+
+int
+iwm_intr(void *arg)
+{
+ struct iwm_softc *sc = arg;
+ struct ifnet *ifp = IC2IFP(&sc->sc_ic);
+ int handled = 0;
+ int r1, r2, rv = 0;
+ int isperiodic = 0;
+
+ IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
+
+ if (sc->sc_flags & IWM_FLAG_USE_ICT) {
+ uint32_t *ict = sc->ict_dma.vaddr;
+ int tmp;
+
+ tmp = htole32(ict[sc->ict_cur]);
+ if (!tmp)
+ goto out_ena;
+
+ /*
+ * ok, there was something. keep plowing until we have all.
+ */
+ r1 = r2 = 0;
+ while (tmp) {
+ r1 |= tmp;
+ ict[sc->ict_cur] = 0;
+ sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
+ tmp = htole32(ict[sc->ict_cur]);
+ }
+
+ /* this is where the fun begins. don't ask */
+ if (r1 == 0xffffffff)
+ r1 = 0;
+
+ /* i am not expected to understand this */
+ if (r1 & 0xc0000)
+ r1 |= 0x8000;
+ r1 = (0xff & r1) | ((0xff00 & r1) << 16);
+ } else {
+ r1 = IWM_READ(sc, IWM_CSR_INT);
+ /* "hardware gone" (where, fishing?) */
+ if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
+ goto out;
+ r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
+ }
+ if (r1 == 0 && r2 == 0) {
+ goto out_ena;
+ }
+
+ IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
+
+ /* ignored */
+ handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
+
+ if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
+#ifdef IWM_DEBUG
+ int i;
+
+ iwm_nic_error(sc);
+
+ /* Dump driver status (TX and RX rings) while we're here. */
+ DPRINTF(("driver status:\n"));
+ for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
+ struct iwm_tx_ring *ring = &sc->txq[i];
+ DPRINTF((" tx ring %2d: qid=%-2d cur=%-3d "
+ "queued=%-3d\n",
+ i, ring->qid, ring->cur, ring->queued));
+ }
+ DPRINTF((" rx ring: cur=%d\n", sc->rxq.cur));
+ DPRINTF((" 802.11 state %d\n", sc->sc_ic.ic_state));
+#endif
+
+ printf("%s: firmware error, stopping device\n", DEVNAME(sc));
+ ifp->if_flags &= ~IFF_UP;
+ iwm_stop(ifp, 1);
+ rv = 1;
+ goto out;
+
+ }
+
+ if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
+ handled |= IWM_CSR_INT_BIT_HW_ERR;
+ printf("%s: hardware error, stopping device \n", DEVNAME(sc));
+ ifp->if_flags &= ~IFF_UP;
+ iwm_stop(ifp, 1);
+ rv = 1;
+ goto out;
+ }
+
+ /* firmware chunk loaded */
+ if (r1 & IWM_CSR_INT_BIT_FH_TX) {
+ IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
+ handled |= IWM_CSR_INT_BIT_FH_TX;
+
+ sc->sc_fw_chunk_done = 1;
+ wakeup(&sc->sc_fw);
+ }
+
+ if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
+ handled |= IWM_CSR_INT_BIT_RF_KILL;
+ if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP)) {
+ printf("%s: rfkill switch, disabling interface\n",
+ DEVNAME(sc));
+ ifp->if_flags &= ~IFF_UP;
+ iwm_stop(ifp, 1);
+ }
+ }
+
+ /*
+ * The Linux driver uses periodic interrupts to avoid races.
+ * We cargo-cult like it's going out of fashion.
+ */
+ if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
+ handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
+ IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
+ if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
+ IWM_WRITE_1(sc,
+ IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
+ isperiodic = 1;
+ }
+
+ if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
+ handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
+ IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
+
+ iwm_notif_intr(sc);
+
+ /* enable periodic interrupt, see above */
+ if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
+ IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
+ IWM_CSR_INT_PERIODIC_ENA);
+ }
+
+ if (__predict_false(r1 & ~handled))
+ printf("%s: unhandled interrupts: %x\n", DEVNAME(sc), r1);
+ rv = 1;
+
+ out_ena:
+ iwm_restore_interrupts(sc);
+ out:
+ return rv;
+}
+
+/*
+ * Autoconf glue-sniffing
+ */
+
+typedef void *iwm_match_t;
+
+static const struct pci_matchid iwm_devices[] = {
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7260_1 },
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7260_2 },
+};
+
+int
+iwm_match(struct device *parent, iwm_match_t match __unused, void *aux)
+{
+ return pci_matchbyid((struct pci_attach_args *)aux, iwm_devices,
+ nitems(iwm_devices));
+}
+
+int
+iwm_preinit(struct iwm_softc *sc)
+{
+ int error;
+
+ if ((error = iwm_prepare_card_hw(sc)) != 0)
+ return error;
+
+ if ((error = iwm_start_hw(sc)) != 0)
+ return error;
+
+ if ((error = iwm_run_init_mvm_ucode(sc, 1)) != 0) {
+ return error;
+ }
+
+ iwm_stop_device(sc);
+ return 0;
+}
+
+void
+iwm_attach_hook(iwm_hookarg_t arg)
+{
+ struct iwm_softc *sc = arg;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ifnet *ifp = &ic->ic_if;
+ int error;
+ int txq_i, i;
+
+ KASSERT(!cold);
+
+ sc->sc_wantresp = -1;
+
+ /* only one firmware possibility for now */
+ sc->sc_fwname = IWM_FWNAME;
+ sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
+
+ /*
+ * We now start fiddling with the hardware
+ */
+
+ sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
+ if (iwm_prepare_card_hw(sc) != 0) {
+ printf("%s: failed to init hw\n", DEVNAME(sc));
+ return;
+ }
+
+ /* Allocate DMA memory for firmware transfers. */
+ if ((error = iwm_alloc_fwmem(sc)) != 0) {
+ printf("%s: could not allocate memory for firmware\n",
+ DEVNAME(sc));
+ return;
+ }
+
+ /* Allocate "Keep Warm" page. */
+ if ((error = iwm_alloc_kw(sc)) != 0) {
+ printf("%s: could not allocate keep warm page\n", DEVNAME(sc));
+ goto fail1;
+ }
+
+ /* We use ICT interrupts */
+ if ((error = iwm_alloc_ict(sc)) != 0) {
+ printf("%s: could not allocate ICT table\n", DEVNAME(sc));
+ goto fail2;
+ }
+
+ /* Allocate TX scheduler "rings". */
+ if ((error = iwm_alloc_sched(sc)) != 0) {
+ printf("%s: could not allocate TX scheduler rings\n",
+ DEVNAME(sc));
+ goto fail3;
+ }
+
+ /* Allocate TX rings */
+ for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
+ if ((error = iwm_alloc_tx_ring(sc,
+ &sc->txq[txq_i], txq_i)) != 0) {
+ printf("%s: could not allocate TX ring %d\n",
+ DEVNAME(sc), txq_i);
+ goto fail4;
+ }
+ }
+
+ /* Allocate RX ring. */
+ if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
+ printf("%s: could not allocate RX ring\n", DEVNAME(sc));
+ goto fail4;
+ }
+
+ sc->sc_eswq = taskq_create("iwmes", 1, IPL_NET);
+ if (sc->sc_eswq == NULL)
+ goto fail4;
+ sc->sc_nswq = taskq_create("iwmns", 1, IPL_NET);
+ if (sc->sc_nswq == NULL)
+ goto fail4;
+
+ /* Clear pending interrupts. */
+ IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
+
+ if ((error = iwm_preinit(sc)) != 0) {
+ goto fail4;
+ }
+
+ printf("%s: hw rev: 0x%x, fw ver %d.%d (API ver %d), address %s\n",
+ DEVNAME(sc), sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
+ IWM_UCODE_MAJOR(sc->sc_fwver),
+ IWM_UCODE_MINOR(sc->sc_fwver),
+ IWM_UCODE_API(sc->sc_fwver),
+ ether_sprintf(sc->sc_nvm.hw_addr));
+
+ ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
+ ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
+ ic->ic_state = IEEE80211_S_INIT;
+
+ /* Set device capabilities. */
+ ic->ic_caps =
+ IEEE80211_C_RSN | /* WPA/RSN */
+ IEEE80211_C_SCANALL | /* device scans all channels at once */
+ IEEE80211_C_SHSLOT | /* short slot time supported */
+ IEEE80211_C_SHPREAMBLE; /* short preamble supported */
+
+ ic->ic_sup_rates[IEEE80211_MODE_11A] = iwm_rateset_11a;
+ ic->ic_sup_rates[IEEE80211_MODE_11B] = iwm_rateset_11b;
+ ic->ic_sup_rates[IEEE80211_MODE_11G] = iwm_rateset_11g;
+
+ for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
+ sc->sc_phyctxt[i].id = i;
+ }
+
+ sc->sc_amrr.amrr_min_success_threshold = 1;
+ sc->sc_amrr.amrr_max_success_threshold = 15;
+
+ /* IBSS channel undefined for now. */
+ ic->ic_ibss_chan = &ic->ic_channels[1];
+
+ /* Max RSSI */
+ ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
+
+ ifp->if_softc = sc;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_ioctl = iwm_ioctl;
+ ifp->if_start = iwm_start;
+ ifp->if_watchdog = iwm_watchdog;
+ IFQ_SET_READY(&ifp->if_snd);
+ memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
+
+ if_attach(ifp);
+ ieee80211_ifattach(ifp);
+
+ ic->ic_node_alloc = iwm_node_alloc;
+
+ /* Override 802.11 state transition machine. */
+ sc->sc_newstate = ic->ic_newstate;
+ ic->ic_newstate = iwm_newstate;
+ ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
+
+ timeout_set(&sc->sc_calib_to, iwm_calib_timeout, sc);
+
+ return;
+
+ /* Free allocated memory if something failed during attachment. */
+fail4: while (--txq_i >= 0)
+ iwm_free_tx_ring(sc, &sc->txq[txq_i]);
+ iwm_free_sched(sc);
+fail3: if (sc->ict_dma.vaddr != NULL)
+ iwm_free_ict(sc);
+fail2: iwm_free_kw(sc);
+fail1: iwm_free_fwmem(sc);
+ return;
+}
+
+void
+iwm_attach(struct device *parent, struct device *self, void *aux)
+{
+ struct iwm_softc *sc = (void *)self;
+ struct pci_attach_args *pa = aux;
+ pci_intr_handle_t ih;
+ pcireg_t reg, memtype;
+ const char *intrstr;
+ int error;
+
+ sc->sc_pct = pa->pa_pc;
+ sc->sc_pcitag = pa->pa_tag;
+ sc->sc_dmat = pa->pa_dmat;
+
+ task_set(&sc->sc_eswk, iwm_endscan_cb, sc);
+
+ /*
+ * Get the offset of the PCI Express Capability Structure in PCI
+ * Configuration Space.
+ */
+ error = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
+ PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
+ if (error == 0) {
+ printf("%s: PCIe capability structure not found!\n",
+ DEVNAME(sc));
+ return;
+ }
+
+ /* Clear device-specific "PCI retry timeout" register (41h). */
+ reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
+ pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
+
+ /* Enable bus-mastering and hardware bug workaround. */
+ reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
+ reg |= PCI_COMMAND_MASTER_ENABLE;
+ /* if !MSI */
+ if (reg & PCI_COMMAND_INTERRUPT_DISABLE) {
+ reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
+ }
+ pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
+
+ memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
+ error = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
+ &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz, 0);
+ if (error != 0) {
+ printf("%s: can't map mem space\n", DEVNAME(sc));
+ return;
+ }
+
+ /* Install interrupt handler. */
+ if (pci_intr_map_msi(pa, &ih) && pci_intr_map(pa, &ih)) {
+ printf("%s: can't map interrupt\n", DEVNAME(sc));
+ return;
+ }
+
+ intrstr = pci_intr_string(sc->sc_pct, ih);
+ sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET, iwm_intr, sc,
+ DEVNAME(sc));
+
+ if (sc->sc_ih == NULL) {
+ printf("\n");
+ printf("%s: can't establish interrupt", DEVNAME(sc));
+ if (intrstr != NULL)
+ printf(" at %s", intrstr);
+ printf("\n");
+ return;
+ }
+ printf(", %s\n", intrstr);
+
+ /*
+ * We can't do normal attach before the file system is mounted
+ * because we cannot read the MAC address without loading the
+ * firmware from disk. So we postpone until mountroot is done.
+ * Notably, this will require a full driver unload/load cycle
+ * (or reboot) in case the firmware is not present when the
+ * hook runs.
+ */
+ if (rootvp == NULL)
+ mountroothook_establish(iwm_attach_hook, sc);
+ else
+ iwm_attach_hook(sc);
+}
+
+struct cfdriver iwm_cd = {
+ NULL, "iwm", DV_IFNET
+};
+
+struct cfattach iwm_ca = {
+ sizeof(struct iwm_softc), iwm_match, iwm_attach,
+ NULL, NULL
+};
diff --git a/sys/dev/pci/if_iwmreg.h b/sys/dev/pci/if_iwmreg.h
new file mode 100644
index 00000000000..ac3acab9c47
--- /dev/null
+++ b/sys/dev/pci/if_iwmreg.h
@@ -0,0 +1,5304 @@
+/* $OpenBSD: if_iwmreg.h,v 1.1 2015/02/06 19:49:29 stsp Exp $ */
+
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+/*
+ * BEGIN iwl-csr.h
+ */
+
+/*
+ * CSR (control and status registers)
+ *
+ * CSR registers are mapped directly into PCI bus space, and are accessible
+ * whenever platform supplies power to device, even when device is in
+ * low power states due to driver-invoked device resets
+ * (e.g. IWM_CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes.
+ *
+ * Use iwl_write32() and iwl_read32() family to access these registers;
+ * these provide simple PCI bus access, without waking up the MAC.
+ * Do not use iwl_write_direct32() family for these registers;
+ * no need to "grab nic access" via IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ.
+ * The MAC (uCode processor, etc.) does not need to be powered up for accessing
+ * the CSR registers.
+ *
+ * NOTE: Device does need to be awake in order to read this memory
+ * via IWM_CSR_EEPROM and IWM_CSR_OTP registers
+ */
+#define IWM_CSR_HW_IF_CONFIG_REG (0x000) /* hardware interface config */
+#define IWM_CSR_INT_COALESCING (0x004) /* accum ints, 32-usec units */
+#define IWM_CSR_INT (0x008) /* host interrupt status/ack */
+#define IWM_CSR_INT_MASK (0x00c) /* host interrupt enable */
+#define IWM_CSR_FH_INT_STATUS (0x010) /* busmaster int status/ack*/
+#define IWM_CSR_GPIO_IN (0x018) /* read external chip pins */
+#define IWM_CSR_RESET (0x020) /* busmaster enable, NMI, etc*/
+#define IWM_CSR_GP_CNTRL (0x024)
+
+/* 2nd byte of IWM_CSR_INT_COALESCING, not accessible via iwl_write32()! */
+#define IWM_CSR_INT_PERIODIC_REG (0x005)
+
+/*
+ * Hardware revision info
+ * Bit fields:
+ * 31-16: Reserved
+ * 15-4: Type of device: see IWM_CSR_HW_REV_TYPE_xxx definitions
+ * 3-2: Revision step: 0 = A, 1 = B, 2 = C, 3 = D
+ * 1-0: "Dash" (-) value, as in A-1, etc.
+ */
+#define IWM_CSR_HW_REV (0x028)
+
+/*
+ * EEPROM and OTP (one-time-programmable) memory reads
+ *
+ * NOTE: Device must be awake, initialized via apm_ops.init(),
+ * in order to read.
+ */
+#define IWM_CSR_EEPROM_REG (0x02c)
+#define IWM_CSR_EEPROM_GP (0x030)
+#define IWM_CSR_OTP_GP_REG (0x034)
+
+#define IWM_CSR_GIO_REG (0x03C)
+#define IWM_CSR_GP_UCODE_REG (0x048)
+#define IWM_CSR_GP_DRIVER_REG (0x050)
+
+/*
+ * UCODE-DRIVER GP (general purpose) mailbox registers.
+ * SET/CLR registers set/clear bit(s) if "1" is written.
+ */
+#define IWM_CSR_UCODE_DRV_GP1 (0x054)
+#define IWM_CSR_UCODE_DRV_GP1_SET (0x058)
+#define IWM_CSR_UCODE_DRV_GP1_CLR (0x05c)
+#define IWM_CSR_UCODE_DRV_GP2 (0x060)
+
+#define IWM_CSR_LED_REG (0x094)
+#define IWM_CSR_DRAM_INT_TBL_REG (0x0A0)
+#define IWM_CSR_MAC_SHADOW_REG_CTRL (0x0A8) /* 6000 and up */
+
+
+/* GIO Chicken Bits (PCI Express bus link power management) */
+#define IWM_CSR_GIO_CHICKEN_BITS (0x100)
+
+/* Analog phase-lock-loop configuration */
+#define IWM_CSR_ANA_PLL_CFG (0x20c)
+
+/*
+ * CSR Hardware Revision Workaround Register. Indicates hardware rev;
+ * "step" determines CCK backoff for txpower calculation. Used for 4965 only.
+ * See also IWM_CSR_HW_REV register.
+ * Bit fields:
+ * 3-2: 0 = A, 1 = B, 2 = C, 3 = D step
+ * 1-0: "Dash" (-) value, as in C-1, etc.
+ */
+#define IWM_CSR_HW_REV_WA_REG (0x22C)
+
+#define IWM_CSR_DBG_HPET_MEM_REG (0x240)
+#define IWM_CSR_DBG_LINK_PWR_MGMT_REG (0x250)
+
+/* Bits for IWM_CSR_HW_IF_CONFIG_REG */
+#define IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH (0x00000003)
+#define IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP (0x0000000C)
+#define IWM_CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x000000C0)
+#define IWM_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100)
+#define IWM_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200)
+#define IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE (0x00000C00)
+#define IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH (0x00003000)
+#define IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP (0x0000C000)
+
+#define IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH (0)
+#define IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP (2)
+#define IWM_CSR_HW_IF_CONFIG_REG_POS_BOARD_VER (6)
+#define IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE (10)
+#define IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH (12)
+#define IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP (14)
+
+#define IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000)
+#define IWM_CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
+#define IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
+#define IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
+#define IWM_CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
+
+#define IWM_CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/
+#define IWM_CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/
+
+/* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
+ * acknowledged (reset) by host writing "1" to flagged bits. */
+#define IWM_CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */
+#define IWM_CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */
+#define IWM_CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */
+#define IWM_CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */
+#define IWM_CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */
+#define IWM_CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
+#define IWM_CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
+#define IWM_CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
+#define IWM_CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses */
+#define IWM_CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */
+#define IWM_CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */
+
+#define IWM_CSR_INI_SET_MASK (IWM_CSR_INT_BIT_FH_RX | \
+ IWM_CSR_INT_BIT_HW_ERR | \
+ IWM_CSR_INT_BIT_FH_TX | \
+ IWM_CSR_INT_BIT_SW_ERR | \
+ IWM_CSR_INT_BIT_RF_KILL | \
+ IWM_CSR_INT_BIT_SW_RX | \
+ IWM_CSR_INT_BIT_WAKEUP | \
+ IWM_CSR_INT_BIT_ALIVE | \
+ IWM_CSR_INT_BIT_RX_PERIODIC)
+
+/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
+#define IWM_CSR_FH_INT_BIT_ERR (1 << 31) /* Error */
+#define IWM_CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */
+#define IWM_CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */
+#define IWM_CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */
+#define IWM_CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */
+#define IWM_CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */
+
+#define IWM_CSR_FH_INT_RX_MASK (IWM_CSR_FH_INT_BIT_HI_PRIOR | \
+ IWM_CSR_FH_INT_BIT_RX_CHNL1 | \
+ IWM_CSR_FH_INT_BIT_RX_CHNL0)
+
+#define IWM_CSR_FH_INT_TX_MASK (IWM_CSR_FH_INT_BIT_TX_CHNL1 | \
+ IWM_CSR_FH_INT_BIT_TX_CHNL0)
+
+/* GPIO */
+#define IWM_CSR_GPIO_IN_BIT_AUX_POWER (0x00000200)
+#define IWM_CSR_GPIO_IN_VAL_VAUX_PWR_SRC (0x00000000)
+#define IWM_CSR_GPIO_IN_VAL_VMAIN_PWR_SRC (0x00000200)
+
+/* RESET */
+#define IWM_CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001)
+#define IWM_CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002)
+#define IWM_CSR_RESET_REG_FLAG_SW_RESET (0x00000080)
+#define IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100)
+#define IWM_CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200)
+#define IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED (0x80000000)
+
+/*
+ * GP (general purpose) CONTROL REGISTER
+ * Bit fields:
+ * 27: HW_RF_KILL_SW
+ * Indicates state of (platform's) hardware RF-Kill switch
+ * 26-24: POWER_SAVE_TYPE
+ * Indicates current power-saving mode:
+ * 000 -- No power saving
+ * 001 -- MAC power-down
+ * 010 -- PHY (radio) power-down
+ * 011 -- Error
+ * 9-6: SYS_CONFIG
+ * Indicates current system configuration, reflecting pins on chip
+ * as forced high/low by device circuit board.
+ * 4: GOING_TO_SLEEP
+ * Indicates MAC is entering a power-saving sleep power-down.
+ * Not a good time to access device-internal resources.
+ * 3: MAC_ACCESS_REQ
+ * Host sets this to request and maintain MAC wakeup, to allow host
+ * access to device-internal resources. Host must wait for
+ * MAC_CLOCK_READY (and !GOING_TO_SLEEP) before accessing non-CSR
+ * device registers.
+ * 2: INIT_DONE
+ * Host sets this to put device into fully operational D0 power mode.
+ * Host resets this after SW_RESET to put device into low power mode.
+ * 0: MAC_CLOCK_READY
+ * Indicates MAC (ucode processor, etc.) is powered up and can run.
+ * Internal resources are accessible.
+ * NOTE: This does not indicate that the processor is actually running.
+ * NOTE: This does not indicate that device has completed
+ * init or post-power-down restore of internal SRAM memory.
+ * Use IWM_CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that
+ * SRAM is restored and uCode is in normal operation mode.
+ * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
+ * do not need to save/restore it.
+ * NOTE: After device reset, this bit remains "0" until host sets
+ * INIT_DONE
+ */
+#define IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001)
+#define IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004)
+#define IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008)
+#define IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010)
+
+#define IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001)
+
+#define IWM_CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000)
+#define IWM_CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000)
+#define IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000)
+
+
+/* HW REV */
+#define IWM_CSR_HW_REV_DASH(_val) (((_val) & 0x0000003) >> 0)
+#define IWM_CSR_HW_REV_STEP(_val) (((_val) & 0x000000C) >> 2)
+
+#define IWM_CSR_HW_REV_TYPE_MSK (0x000FFF0)
+#define IWM_CSR_HW_REV_TYPE_5300 (0x0000020)
+#define IWM_CSR_HW_REV_TYPE_5350 (0x0000030)
+#define IWM_CSR_HW_REV_TYPE_5100 (0x0000050)
+#define IWM_CSR_HW_REV_TYPE_5150 (0x0000040)
+#define IWM_CSR_HW_REV_TYPE_1000 (0x0000060)
+#define IWM_CSR_HW_REV_TYPE_6x00 (0x0000070)
+#define IWM_CSR_HW_REV_TYPE_6x50 (0x0000080)
+#define IWM_CSR_HW_REV_TYPE_6150 (0x0000084)
+#define IWM_CSR_HW_REV_TYPE_6x05 (0x00000B0)
+#define IWM_CSR_HW_REV_TYPE_6x30 IWM_CSR_HW_REV_TYPE_6x05
+#define IWM_CSR_HW_REV_TYPE_6x35 IWM_CSR_HW_REV_TYPE_6x05
+#define IWM_CSR_HW_REV_TYPE_2x30 (0x00000C0)
+#define IWM_CSR_HW_REV_TYPE_2x00 (0x0000100)
+#define IWM_CSR_HW_REV_TYPE_105 (0x0000110)
+#define IWM_CSR_HW_REV_TYPE_135 (0x0000120)
+#define IWM_CSR_HW_REV_TYPE_NONE (0x00001F0)
+
+/* EEPROM REG */
+#define IWM_CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
+#define IWM_CSR_EEPROM_REG_BIT_CMD (0x00000002)
+#define IWM_CSR_EEPROM_REG_MSK_ADDR (0x0000FFFC)
+#define IWM_CSR_EEPROM_REG_MSK_DATA (0xFFFF0000)
+
+/* EEPROM GP */
+#define IWM_CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */
+#define IWM_CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180)
+#define IWM_CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP (0x00000000)
+#define IWM_CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP (0x00000001)
+#define IWM_CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K (0x00000002)
+#define IWM_CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K (0x00000004)
+
+/* One-time-programmable memory general purpose reg */
+#define IWM_CSR_OTP_GP_REG_DEVICE_SELECT (0x00010000) /* 0 - EEPROM, 1 - OTP */
+#define IWM_CSR_OTP_GP_REG_OTP_ACCESS_MODE (0x00020000) /* 0 - absolute, 1 - relative */
+#define IWM_CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK (0x00100000) /* bit 20 */
+#define IWM_CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK (0x00200000) /* bit 21 */
+
+/* GP REG */
+#define IWM_CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */
+#define IWM_CSR_GP_REG_NO_POWER_SAVE (0x00000000)
+#define IWM_CSR_GP_REG_MAC_POWER_SAVE (0x01000000)
+#define IWM_CSR_GP_REG_PHY_POWER_SAVE (0x02000000)
+#define IWM_CSR_GP_REG_POWER_SAVE_ERROR (0x03000000)
+
+
+/* CSR GIO */
+#define IWM_CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002)
+
+/*
+ * UCODE-DRIVER GP (general purpose) mailbox register 1
+ * Host driver and uCode write and/or read this register to communicate with
+ * each other.
+ * Bit fields:
+ * 4: UCODE_DISABLE
+ * Host sets this to request permanent halt of uCode, same as
+ * sending CARD_STATE command with "halt" bit set.
+ * 3: CT_KILL_EXIT
+ * Host sets this to request exit from CT_KILL state, i.e. host thinks
+ * device temperature is low enough to continue normal operation.
+ * 2: CMD_BLOCKED
+ * Host sets this during RF KILL power-down sequence (HW, SW, CT KILL)
+ * to release uCode to clear all Tx and command queues, enter
+ * unassociated mode, and power down.
+ * NOTE: Some devices also use HBUS_TARG_MBX_C register for this bit.
+ * 1: SW_BIT_RFKILL
+ * Host sets this when issuing CARD_STATE command to request
+ * device sleep.
+ * 0: MAC_SLEEP
+ * uCode sets this when preparing a power-saving power-down.
+ * uCode resets this when power-up is complete and SRAM is sane.
+ * NOTE: device saves internal SRAM data to host when powering down,
+ * and must restore this data after powering back up.
+ * MAC_SLEEP is the best indication that restore is complete.
+ * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
+ * do not need to save/restore it.
+ */
+#define IWM_CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001)
+#define IWM_CSR_UCODE_SW_BIT_RFKILL (0x00000002)
+#define IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004)
+#define IWM_CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008)
+#define IWM_CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE (0x00000020)
+
+/* GP Driver */
+#define IWM_CSR_GP_DRIVER_REG_BIT_RADIO_SKU_MSK (0x00000003)
+#define IWM_CSR_GP_DRIVER_REG_BIT_RADIO_SKU_3x3_HYB (0x00000000)
+#define IWM_CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_HYB (0x00000001)
+#define IWM_CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA (0x00000002)
+#define IWM_CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6 (0x00000004)
+#define IWM_CSR_GP_DRIVER_REG_BIT_6050_1x2 (0x00000008)
+
+#define IWM_CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER (0x00000080)
+
+/* GIO Chicken Bits (PCI Express bus link power management) */
+#define IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000)
+#define IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000)
+
+/* LED */
+#define IWM_CSR_LED_BSM_CTRL_MSK (0xFFFFFFDF)
+#define IWM_CSR_LED_REG_TURN_ON (0x60)
+#define IWM_CSR_LED_REG_TURN_OFF (0x20)
+
+/* ANA_PLL */
+#define IWM_CSR50_ANA_PLL_CFG_VAL (0x00880300)
+
+/* HPET MEM debug */
+#define IWM_CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000)
+
+/* DRAM INT TABLE */
+#define IWM_CSR_DRAM_INT_TBL_ENABLE (1 << 31)
+#define IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
+
+/* SECURE boot registers */
+#define IWM_CSR_SECURE_BOOT_CONFIG_ADDR (0x100)
+enum iwm_secure_boot_config_reg {
+ IWM_CSR_SECURE_BOOT_CONFIG_INSPECTOR_BURNED_IN_OTP = 0x00000001,
+ IWM_CSR_SECURE_BOOT_CONFIG_INSPECTOR_NOT_REQ = 0x00000002,
+};
+
+#define IWM_CSR_SECURE_BOOT_CPU1_STATUS_ADDR (0x100)
+#define IWM_CSR_SECURE_BOOT_CPU2_STATUS_ADDR (0x100)
+enum iwm_secure_boot_status_reg {
+ IWM_CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS = 0x00000003,
+ IWM_CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED = 0x00000002,
+ IWM_CSR_SECURE_BOOT_CPU_STATUS_VERF_SUCCESS = 0x00000004,
+ IWM_CSR_SECURE_BOOT_CPU_STATUS_VERF_FAIL = 0x00000008,
+ IWM_CSR_SECURE_BOOT_CPU_STATUS_SIGN_VERF_FAIL = 0x00000010,
+};
+
+#define IWM_CSR_UCODE_LOAD_STATUS_ADDR (0x100)
+enum iwm_secure_load_status_reg {
+ IWM_CSR_CPU_STATUS_LOADING_STARTED = 0x00000001,
+ IWM_CSR_CPU_STATUS_LOADING_COMPLETED = 0x00000002,
+ IWM_CSR_CPU_STATUS_NUM_OF_LAST_COMPLETED = 0x000000F8,
+ IWM_CSR_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK = 0x0000FF00,
+};
+
+#define IWM_CSR_SECURE_INSPECTOR_CODE_ADDR (0x100)
+#define IWM_CSR_SECURE_INSPECTOR_DATA_ADDR (0x100)
+
+#define IWM_CSR_SECURE_TIME_OUT (100)
+
+#define IWM_FH_TCSR_0_REG0 (0x1D00)
+
+/*
+ * HBUS (Host-side Bus)
+ *
+ * HBUS registers are mapped directly into PCI bus space, but are used
+ * to indirectly access device's internal memory or registers that
+ * may be powered-down.
+ *
+ * Use iwl_write_direct32()/iwl_read_direct32() family for these registers;
+ * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
+ * to make sure the MAC (uCode processor, etc.) is powered up for accessing
+ * internal resources.
+ *
+ * Do not use iwl_write32()/iwl_read32() family to access these registers;
+ * these provide only simple PCI bus access, without waking up the MAC.
+ */
+#define IWM_HBUS_BASE (0x400)
+
+/*
+ * Registers for accessing device's internal SRAM memory (e.g. SCD SRAM
+ * structures, error log, event log, verifying uCode load).
+ * First write to address register, then read from or write to data register
+ * to complete the job. Once the address register is set up, accesses to
+ * data registers auto-increment the address by one dword.
+ * Bit usage for address registers (read or write):
+ * 0-31: memory address within device
+ */
+#define IWM_HBUS_TARG_MEM_RADDR (IWM_HBUS_BASE+0x00c)
+#define IWM_HBUS_TARG_MEM_WADDR (IWM_HBUS_BASE+0x010)
+#define IWM_HBUS_TARG_MEM_WDAT (IWM_HBUS_BASE+0x018)
+#define IWM_HBUS_TARG_MEM_RDAT (IWM_HBUS_BASE+0x01c)
+
+/* Mailbox C, used as workaround alternative to CSR_UCODE_DRV_GP1 mailbox */
+#define IWM_HBUS_TARG_MBX_C (IWM_HBUS_BASE+0x030)
+#define IWM_HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED (0x00000004)
+
+/*
+ * Registers for accessing device's internal peripheral registers
+ * (e.g. SCD, BSM, etc.). First write to address register,
+ * then read from or write to data register to complete the job.
+ * Bit usage for address registers (read or write):
+ * 0-15: register address (offset) within device
+ * 24-25: (# bytes - 1) to read or write (e.g. 3 for dword)
+ */
+#define IWM_HBUS_TARG_PRPH_WADDR (IWM_HBUS_BASE+0x044)
+#define IWM_HBUS_TARG_PRPH_RADDR (IWM_HBUS_BASE+0x048)
+#define IWM_HBUS_TARG_PRPH_WDAT (IWM_HBUS_BASE+0x04c)
+#define IWM_HBUS_TARG_PRPH_RDAT (IWM_HBUS_BASE+0x050)
+
+/* Used to enable DBGM */
+#define IWM_HBUS_TARG_TEST_REG (IWM_HBUS_BASE+0x05c)
+
+/*
+ * Per-Tx-queue write pointer (index, really!)
+ * Indicates index to next TFD that driver will fill (1 past latest filled).
+ * Bit usage:
+ * 0-7: queue write index
+ * 11-8: queue selector
+ */
+#define IWM_HBUS_TARG_WRPTR (IWM_HBUS_BASE+0x060)
+
+/**********************************************************
+ * CSR values
+ **********************************************************/
+ /*
+ * host interrupt timeout value
+ * used with setting interrupt coalescing timer
+ * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
+ *
+ * default interrupt coalescing timer is 64 x 32 = 2048 usecs
+ */
+#define IWM_HOST_INT_TIMEOUT_MAX (0xFF)
+#define IWM_HOST_INT_TIMEOUT_DEF (0x40)
+#define IWM_HOST_INT_TIMEOUT_MIN (0x0)
+#define IWM_HOST_INT_OPER_MODE (1 << 31)
+
+/*****************************************************************************
+ * 7000/3000 series SHR DTS addresses *
+ *****************************************************************************/
+
+/* Diode Results Register Structure: */
+enum iwm_dtd_diode_reg {
+ IWM_DTS_DIODE_REG_DIG_VAL = 0x000000FF, /* bits [7:0] */
+ IWM_DTS_DIODE_REG_VREF_LOW = 0x0000FF00, /* bits [15:8] */
+ IWM_DTS_DIODE_REG_VREF_HIGH = 0x00FF0000, /* bits [23:16] */
+ IWM_DTS_DIODE_REG_VREF_ID = 0x03000000, /* bits [25:24] */
+ IWM_DTS_DIODE_REG_PASS_ONCE = 0x80000000, /* bits [31:31] */
+ IWM_DTS_DIODE_REG_FLAGS_MSK = 0xFF000000, /* bits [31:24] */
+/* Those are the masks INSIDE the flags bit-field: */
+ IWM_DTS_DIODE_REG_FLAGS_VREFS_ID_POS = 0,
+ IWM_DTS_DIODE_REG_FLAGS_VREFS_ID = 0x00000003, /* bits [1:0] */
+ IWM_DTS_DIODE_REG_FLAGS_PASS_ONCE_POS = 7,
+ IWM_DTS_DIODE_REG_FLAGS_PASS_ONCE = 0x00000080, /* bits [7:7] */
+};
+
+/*
+ * END iwl-csr.h
+ */
+
+/*
+ * BEGIN iwl-fw.h
+ */
+
+/**
+ * enum iwl_ucode_tlv_flag - ucode API flags
+ * @IWM_UCODE_TLV_FLAGS_PAN: This is PAN capable microcode; this previously
+ * was a separate TLV but moved here to save space.
+ * @IWM_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behaviour on hidden SSID,
+ * treats good CRC threshold as a boolean
+ * @IWM_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
+ * @IWM_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
+ * @IWM_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
+ * @IWM_UCODE_TLV_FLAGS_UAPSD: This uCode image supports uAPSD
+ * @IWM_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan
+ * offload profile config command.
+ * @IWM_UCODE_TLV_FLAGS_RX_ENERGY_API: supports rx signal strength api
+ * @IWM_UCODE_TLV_FLAGS_TIME_EVENT_API_V2: using the new time event API.
+ * @IWM_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
+ * (rather than two) IPv6 addresses
+ * @IWM_UCODE_TLV_FLAGS_BF_UPDATED: new beacon filtering API
+ * @IWM_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element
+ * from the probe request template.
+ * @IWM_UCODE_TLV_FLAGS_D3_CONTINUITY_API: modified D3 API to allow keeping
+ * connection when going back to D0
+ * @IWM_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version)
+ * @IWM_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
+ * @IWM_UCODE_TLV_FLAGS_SCHED_SCAN: this uCode image supports scheduled scan.
+ * @IWM_UCODE_TLV_FLAGS_STA_KEY_CMD: new ADD_STA and ADD_STA_KEY command API
+ * @IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD: support device wide power command
+ * containing CAM (Continuous Active Mode) indication.
+ * @IWM_UCODE_TLV_FLAGS_P2P_PS: P2P client power save is supported (only on a
+ * single bound interface).
+ * @IWM_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
+ */
+enum iwm_ucode_tlv_flag {
+ IWM_UCODE_TLV_FLAGS_PAN = (1 << 0),
+ IWM_UCODE_TLV_FLAGS_NEWSCAN = (1 << 1),
+ IWM_UCODE_TLV_FLAGS_MFP = (1 << 2),
+ IWM_UCODE_TLV_FLAGS_P2P = (1 << 3),
+ IWM_UCODE_TLV_FLAGS_DW_BC_TABLE = (1 << 4),
+ IWM_UCODE_TLV_FLAGS_NEWBT_COEX = (1 << 5),
+ IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT = (1 << 6),
+ IWM_UCODE_TLV_FLAGS_SHORT_BL = (1 << 7),
+ IWM_UCODE_TLV_FLAGS_RX_ENERGY_API = (1 << 8),
+ IWM_UCODE_TLV_FLAGS_TIME_EVENT_API_V2 = (1 << 9),
+ IWM_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = (1 << 10),
+ IWM_UCODE_TLV_FLAGS_BF_UPDATED = (1 << 11),
+ IWM_UCODE_TLV_FLAGS_NO_BASIC_SSID = (1 << 12),
+ IWM_UCODE_TLV_FLAGS_D3_CONTINUITY_API = (1 << 14),
+ IWM_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = (1 << 15),
+ IWM_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = (1 << 16),
+ IWM_UCODE_TLV_FLAGS_SCHED_SCAN = (1 << 17),
+ IWM_UCODE_TLV_FLAGS_STA_KEY_CMD = (1 << 19),
+ IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD = (1 << 20),
+ IWM_UCODE_TLV_FLAGS_P2P_PS = (1 << 21),
+ IWM_UCODE_TLV_FLAGS_UAPSD_SUPPORT = (1 << 24),
+ IWM_UCODE_TLV_FLAGS_P2P_PS_UAPSD = (1 << 26),
+};
+
+/* The default calibrate table size if not specified by firmware file */
+#define IWM_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
+#define IWM_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE 19
+#define IWM_MAX_PHY_CALIBRATE_TBL_SIZE 253
+
+/* The default max probe length if not specified by the firmware file */
+#define IWM_DEFAULT_MAX_PROBE_LENGTH 200
+
+/*
+ * enumeration of ucode section.
+ * This enumeration is used directly for older firmware (before 16.0).
+ * For new firmware, there can be up to 4 sections (see below) but the
+ * first one packaged into the firmware file is the DATA section and
+ * some debugging code accesses that.
+ */
+enum iwm_ucode_sec {
+ IWM_UCODE_SECTION_DATA,
+ IWM_UCODE_SECTION_INST,
+};
+/*
+ * For 16.0 uCode and above, there is no differentiation between sections,
+ * just an offset to the HW address.
+ */
+#define IWM_UCODE_SECTION_MAX 6
+#define IWM_UCODE_FIRST_SECTION_OF_SECOND_CPU (IWM_UCODE_SECTION_MAX/2)
+
+/* uCode version contains 4 values: Major/Minor/API/Serial */
+#define IWM_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
+#define IWM_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
+#define IWM_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
+#define IWM_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
+
+/*
+ * Calibration control struct.
+ * Sent as part of the phy configuration command.
+ * @flow_trigger: bitmap for which calibrations to perform according to
+ * flow triggers.
+ * @event_trigger: bitmap for which calibrations to perform according to
+ * event triggers.
+ */
+struct iwm_tlv_calib_ctrl {
+ uint32_t flow_trigger;
+ uint32_t event_trigger;
+} __packed;
+
+enum iwm_fw_phy_cfg {
+ IWM_FW_PHY_CFG_RADIO_TYPE_POS = 0,
+ IWM_FW_PHY_CFG_RADIO_TYPE = 0x3 << IWM_FW_PHY_CFG_RADIO_TYPE_POS,
+ IWM_FW_PHY_CFG_RADIO_STEP_POS = 2,
+ IWM_FW_PHY_CFG_RADIO_STEP = 0x3 << IWM_FW_PHY_CFG_RADIO_STEP_POS,
+ IWM_FW_PHY_CFG_RADIO_DASH_POS = 4,
+ IWM_FW_PHY_CFG_RADIO_DASH = 0x3 << IWM_FW_PHY_CFG_RADIO_DASH_POS,
+ IWM_FW_PHY_CFG_TX_CHAIN_POS = 16,
+ IWM_FW_PHY_CFG_TX_CHAIN = 0xf << IWM_FW_PHY_CFG_TX_CHAIN_POS,
+ IWM_FW_PHY_CFG_RX_CHAIN_POS = 20,
+ IWM_FW_PHY_CFG_RX_CHAIN = 0xf << IWM_FW_PHY_CFG_RX_CHAIN_POS,
+};
+
+#define IWM_UCODE_MAX_CS 1
+
+/**
+ * struct iwm_fw_cipher_scheme - a cipher scheme supported by FW.
+ * @cipher: a cipher suite selector
+ * @flags: cipher scheme flags (currently reserved for a future use)
+ * @hdr_len: a size of MPDU security header
+ * @pn_len: a size of PN
+ * @pn_off: an offset of pn from the beginning of the security header
+ * @key_idx_off: an offset of key index byte in the security header
+ * @key_idx_mask: a bit mask of key_idx bits
+ * @key_idx_shift: bit shift needed to get key_idx
+ * @mic_len: mic length in bytes
+ * @hw_cipher: a HW cipher index used in host commands
+ */
+struct iwm_fw_cipher_scheme {
+ uint32_t cipher;
+ uint8_t flags;
+ uint8_t hdr_len;
+ uint8_t pn_len;
+ uint8_t pn_off;
+ uint8_t key_idx_off;
+ uint8_t key_idx_mask;
+ uint8_t key_idx_shift;
+ uint8_t mic_len;
+ uint8_t hw_cipher;
+} __packed;
+
+/**
+ * struct iwm_fw_cscheme_list - a cipher scheme list
+ * @size: a number of entries
+ * @cs: cipher scheme entries
+ */
+struct iwm_fw_cscheme_list {
+ uint8_t size;
+ struct iwm_fw_cipher_scheme cs[];
+} __packed;
+
+/*
+ * END iwl-fw.h
+ */
+
+/*
+ * BEGIN iwl-fw-file.h
+ */
+
+/* v1/v2 uCode file layout */
+struct iwm_ucode_header {
+ uint32_t ver; /* major/minor/API/serial */
+ union {
+ struct {
+ uint32_t inst_size; /* bytes of runtime code */
+ uint32_t data_size; /* bytes of runtime data */
+ uint32_t init_size; /* bytes of init code */
+ uint32_t init_data_size; /* bytes of init data */
+ uint32_t boot_size; /* bytes of bootstrap code */
+ uint8_t data[0]; /* in same order as sizes */
+ } v1;
+ struct {
+ uint32_t build; /* build number */
+ uint32_t inst_size; /* bytes of runtime code */
+ uint32_t data_size; /* bytes of runtime data */
+ uint32_t init_size; /* bytes of init code */
+ uint32_t init_data_size; /* bytes of init data */
+ uint32_t boot_size; /* bytes of bootstrap code */
+ uint8_t data[0]; /* in same order as sizes */
+ } v2;
+ } u;
+};
+
+/*
+ * new TLV uCode file layout
+ *
+ * The new TLV file format contains TLVs, that each specify
+ * some piece of data.
+ */
+
+enum iwm_ucode_tlv_type {
+ IWM_UCODE_TLV_INVALID = 0, /* unused */
+ IWM_UCODE_TLV_INST = 1,
+ IWM_UCODE_TLV_DATA = 2,
+ IWM_UCODE_TLV_INIT = 3,
+ IWM_UCODE_TLV_INIT_DATA = 4,
+ IWM_UCODE_TLV_BOOT = 5,
+ IWM_UCODE_TLV_PROBE_MAX_LEN = 6, /* a uint32_t value */
+ IWM_UCODE_TLV_PAN = 7,
+ IWM_UCODE_TLV_RUNT_EVTLOG_PTR = 8,
+ IWM_UCODE_TLV_RUNT_EVTLOG_SIZE = 9,
+ IWM_UCODE_TLV_RUNT_ERRLOG_PTR = 10,
+ IWM_UCODE_TLV_INIT_EVTLOG_PTR = 11,
+ IWM_UCODE_TLV_INIT_EVTLOG_SIZE = 12,
+ IWM_UCODE_TLV_INIT_ERRLOG_PTR = 13,
+ IWM_UCODE_TLV_ENHANCE_SENS_TBL = 14,
+ IWM_UCODE_TLV_PHY_CALIBRATION_SIZE = 15,
+ IWM_UCODE_TLV_WOWLAN_INST = 16,
+ IWM_UCODE_TLV_WOWLAN_DATA = 17,
+ IWM_UCODE_TLV_FLAGS = 18,
+ IWM_UCODE_TLV_SEC_RT = 19,
+ IWM_UCODE_TLV_SEC_INIT = 20,
+ IWM_UCODE_TLV_SEC_WOWLAN = 21,
+ IWM_UCODE_TLV_DEF_CALIB = 22,
+ IWM_UCODE_TLV_PHY_SKU = 23,
+ IWM_UCODE_TLV_SECURE_SEC_RT = 24,
+ IWM_UCODE_TLV_SECURE_SEC_INIT = 25,
+ IWM_UCODE_TLV_SECURE_SEC_WOWLAN = 26,
+ IWM_UCODE_TLV_NUM_OF_CPU = 27,
+ IWM_UCODE_TLV_CSCHEME = 28,
+
+ /*
+ * Following two are not in our base tag, but allow
+ * handling ucode version 9.
+ */
+ IWM_UCODE_TLV_API_CHANGES_SET = 29,
+ IWM_UCODE_TLV_ENABLED_CAPABILITIES = 30
+};
+
+struct iwm_ucode_tlv {
+ uint32_t type; /* see above */
+ uint32_t length; /* not including type/length fields */
+ uint8_t data[0];
+};
+
+#define IWM_TLV_UCODE_MAGIC 0x0a4c5749
+
+struct iwm_tlv_ucode_header {
+ /*
+ * The TLV style ucode header is distinguished from
+ * the v1/v2 style header by first four bytes being
+ * zero, as such is an invalid combination of
+ * major/minor/API/serial versions.
+ */
+ uint32_t zero;
+ uint32_t magic;
+ uint8_t human_readable[64];
+ uint32_t ver; /* major/minor/API/serial */
+ uint32_t build;
+ uint64_t ignore;
+ /*
+ * The data contained herein has a TLV layout,
+ * see above for the TLV header and types.
+ * Note that each TLV is padded to a length
+ * that is a multiple of 4 for alignment.
+ */
+ uint8_t data[0];
+};
+
+/*
+ * END iwl-fw-file.h
+ */
+
+/*
+ * BEGIN iwl-prph.h
+ */
+
+/*
+ * Registers in this file are internal, not PCI bus memory mapped.
+ * Driver accesses these via IWM_HBUS_TARG_PRPH_* registers.
+ */
+#define IWM_PRPH_BASE (0x00000)
+#define IWM_PRPH_END (0xFFFFF)
+
+/* APMG (power management) constants */
+#define IWM_APMG_BASE (IWM_PRPH_BASE + 0x3000)
+#define IWM_APMG_CLK_CTRL_REG (IWM_APMG_BASE + 0x0000)
+#define IWM_APMG_CLK_EN_REG (IWM_APMG_BASE + 0x0004)
+#define IWM_APMG_CLK_DIS_REG (IWM_APMG_BASE + 0x0008)
+#define IWM_APMG_PS_CTRL_REG (IWM_APMG_BASE + 0x000c)
+#define IWM_APMG_PCIDEV_STT_REG (IWM_APMG_BASE + 0x0010)
+#define IWM_APMG_RFKILL_REG (IWM_APMG_BASE + 0x0014)
+#define IWM_APMG_RTC_INT_STT_REG (IWM_APMG_BASE + 0x001c)
+#define IWM_APMG_RTC_INT_MSK_REG (IWM_APMG_BASE + 0x0020)
+#define IWM_APMG_DIGITAL_SVR_REG (IWM_APMG_BASE + 0x0058)
+#define IWM_APMG_ANALOG_SVR_REG (IWM_APMG_BASE + 0x006C)
+
+#define IWM_APMS_CLK_VAL_MRB_FUNC_MODE (0x00000001)
+#define IWM_APMG_CLK_VAL_DMA_CLK_RQT (0x00000200)
+#define IWM_APMG_CLK_VAL_BSM_CLK_RQT (0x00000800)
+
+#define IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS (0x00400000)
+#define IWM_APMG_PS_CTRL_VAL_RESET_REQ (0x04000000)
+#define IWM_APMG_PS_CTRL_MSK_PWR_SRC (0x03000000)
+#define IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000)
+#define IWM_APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x02000000)
+#define IWM_APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */
+#define IWM_APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060)
+
+#define IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
+
+#define IWM_APMG_RTC_INT_STT_RFKILL (0x10000000)
+
+/* Device system time */
+#define IWM_DEVICE_SYSTEM_TIME_REG 0xA0206C
+
+/* Device NMI register */
+#define IWM_DEVICE_SET_NMI_REG 0x00a01c30
+
+/*****************************************************************************
+ * 7000/3000 series SHR DTS addresses *
+ *****************************************************************************/
+
+#define IWM_SHR_MISC_WFM_DTS_EN (0x00a10024)
+#define IWM_DTSC_CFG_MODE (0x00a10604)
+#define IWM_DTSC_VREF_AVG (0x00a10648)
+#define IWM_DTSC_VREF5_AVG (0x00a1064c)
+#define IWM_DTSC_CFG_MODE_PERIODIC (0x2)
+#define IWM_DTSC_PTAT_AVG (0x00a10650)
+
+
+/**
+ * Tx Scheduler
+ *
+ * The Tx Scheduler selects the next frame to be transmitted, choosing TFDs
+ * (Transmit Frame Descriptors) from up to 16 circular Tx queues resident in
+ * host DRAM. It steers each frame's Tx command (which contains the frame
+ * data) into one of up to 7 prioritized Tx DMA FIFO channels within the
+ * device. A queue maps to only one (selectable by driver) Tx DMA channel,
+ * but one DMA channel may take input from several queues.
+ *
+ * Tx DMA FIFOs have dedicated purposes.
+ *
+ * For 5000 series and up, they are used differently
+ * (cf. iwl5000_default_queue_to_tx_fifo in iwl-5000.c):
+ *
+ * 0 -- EDCA BK (background) frames, lowest priority
+ * 1 -- EDCA BE (best effort) frames, normal priority
+ * 2 -- EDCA VI (video) frames, higher priority
+ * 3 -- EDCA VO (voice) and management frames, highest priority
+ * 4 -- unused
+ * 5 -- unused
+ * 6 -- unused
+ * 7 -- Commands
+ *
+ * Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6.
+ * In addition, driver can map the remaining queues to Tx DMA/FIFO
+ * channels 0-3 to support 11n aggregation via EDCA DMA channels.
+ *
+ * The driver sets up each queue to work in one of two modes:
+ *
+ * 1) Scheduler-Ack, in which the scheduler automatically supports a
+ * block-ack (BA) window of up to 64 TFDs. In this mode, each queue
+ * contains TFDs for a unique combination of Recipient Address (RA)
+ * and Traffic Identifier (TID), that is, traffic of a given
+ * Quality-Of-Service (QOS) priority, destined for a single station.
+ *
+ * In scheduler-ack mode, the scheduler keeps track of the Tx status of
+ * each frame within the BA window, including whether it's been transmitted,
+ * and whether it's been acknowledged by the receiving station. The device
+ * automatically processes block-acks received from the receiving STA,
+ * and reschedules un-acked frames to be retransmitted (successful
+ * Tx completion may end up being out-of-order).
+ *
+ * The driver must maintain the queue's Byte Count table in host DRAM
+ * for this mode.
+ * This mode does not support fragmentation.
+ *
+ * 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order.
+ * The device may automatically retry Tx, but will retry only one frame
+ * at a time, until receiving ACK from receiving station, or reaching
+ * retry limit and giving up.
+ *
+ * The command queue (#4/#9) must use this mode!
+ * This mode does not require use of the Byte Count table in host DRAM.
+ *
+ * Driver controls scheduler operation via 3 means:
+ * 1) Scheduler registers
+ * 2) Shared scheduler data base in internal SRAM
+ * 3) Shared data in host DRAM
+ *
+ * Initialization:
+ *
+ * When loading, driver should allocate memory for:
+ * 1) 16 TFD circular buffers, each with space for (typically) 256 TFDs.
+ * 2) 16 Byte Count circular buffers in 16 KBytes contiguous memory
+ * (1024 bytes for each queue).
+ *
+ * After receiving "Alive" response from uCode, driver must initialize
+ * the scheduler (especially for queue #4/#9, the command queue, otherwise
+ * the driver can't issue commands!):
+ */
+#define IWM_SCD_MEM_LOWER_BOUND (0x0000)
+
+/**
+ * Max Tx window size is the max number of contiguous TFDs that the scheduler
+ * can keep track of at one time when creating block-ack chains of frames.
+ * Note that "64" matches the number of ack bits in a block-ack packet.
+ */
+#define IWM_SCD_WIN_SIZE 64
+#define IWM_SCD_FRAME_LIMIT 64
+
+#define IWM_SCD_TXFIFO_POS_TID (0)
+#define IWM_SCD_TXFIFO_POS_RA (4)
+#define IWM_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
+
+/* agn SCD */
+#define IWM_SCD_QUEUE_STTS_REG_POS_TXF (0)
+#define IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE (3)
+#define IWM_SCD_QUEUE_STTS_REG_POS_WSL (4)
+#define IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19)
+#define IWM_SCD_QUEUE_STTS_REG_MSK (0x017F0000)
+
+#define IWM_SCD_QUEUE_CTX_REG1_CREDIT_POS (8)
+#define IWM_SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00)
+#define IWM_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_POS (24)
+#define IWM_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_MSK (0xFF000000)
+#define IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS (0)
+#define IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK (0x0000007F)
+#define IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
+#define IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
+
+/* Context Data */
+#define IWM_SCD_CONTEXT_MEM_LOWER_BOUND (IWM_SCD_MEM_LOWER_BOUND + 0x600)
+#define IWM_SCD_CONTEXT_MEM_UPPER_BOUND (IWM_SCD_MEM_LOWER_BOUND + 0x6A0)
+
+/* Tx status */
+#define IWM_SCD_TX_STTS_MEM_LOWER_BOUND (IWM_SCD_MEM_LOWER_BOUND + 0x6A0)
+#define IWM_SCD_TX_STTS_MEM_UPPER_BOUND (IWM_SCD_MEM_LOWER_BOUND + 0x7E0)
+
+/* Translation Data */
+#define IWM_SCD_TRANS_TBL_MEM_LOWER_BOUND (IWM_SCD_MEM_LOWER_BOUND + 0x7E0)
+#define IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND (IWM_SCD_MEM_LOWER_BOUND + 0x808)
+
+#define IWM_SCD_CONTEXT_QUEUE_OFFSET(x)\
+ (IWM_SCD_CONTEXT_MEM_LOWER_BOUND + ((x) * 8))
+
+#define IWM_SCD_TX_STTS_QUEUE_OFFSET(x)\
+ (IWM_SCD_TX_STTS_MEM_LOWER_BOUND + ((x) * 16))
+
+#define IWM_SCD_TRANS_TBL_OFFSET_QUEUE(x) \
+ ((IWM_SCD_TRANS_TBL_MEM_LOWER_BOUND + ((x) * 2)) & 0xfffc)
+
+#define IWM_SCD_BASE (IWM_PRPH_BASE + 0xa02c00)
+
+#define IWM_SCD_SRAM_BASE_ADDR (IWM_SCD_BASE + 0x0)
+#define IWM_SCD_DRAM_BASE_ADDR (IWM_SCD_BASE + 0x8)
+#define IWM_SCD_AIT (IWM_SCD_BASE + 0x0c)
+#define IWM_SCD_TXFACT (IWM_SCD_BASE + 0x10)
+#define IWM_SCD_ACTIVE (IWM_SCD_BASE + 0x14)
+#define IWM_SCD_QUEUECHAIN_SEL (IWM_SCD_BASE + 0xe8)
+#define IWM_SCD_CHAINEXT_EN (IWM_SCD_BASE + 0x244)
+#define IWM_SCD_AGGR_SEL (IWM_SCD_BASE + 0x248)
+#define IWM_SCD_INTERRUPT_MASK (IWM_SCD_BASE + 0x108)
+
+static inline unsigned int IWM_SCD_QUEUE_WRPTR(unsigned int chnl)
+{
+ if (chnl < 20)
+ return IWM_SCD_BASE + 0x18 + chnl * 4;
+ return IWM_SCD_BASE + 0x284 + (chnl - 20) * 4;
+}
+
+static inline unsigned int IWM_SCD_QUEUE_RDPTR(unsigned int chnl)
+{
+ if (chnl < 20)
+ return IWM_SCD_BASE + 0x68 + chnl * 4;
+ return IWM_SCD_BASE + 0x2B4 + (chnl - 20) * 4;
+}
+
+static inline unsigned int IWM_SCD_QUEUE_STATUS_BITS(unsigned int chnl)
+{
+ if (chnl < 20)
+ return IWM_SCD_BASE + 0x10c + chnl * 4;
+ return IWM_SCD_BASE + 0x384 + (chnl - 20) * 4;
+}
+
+/*********************** END TX SCHEDULER *************************************/
+
+/* Oscillator clock */
+#define IWM_OSC_CLK (0xa04068)
+#define IWM_OSC_CLK_FORCE_CONTROL (0x8)
+
+/*
+ * END iwl-prph.h
+ */
+
+/*
+ * BEGIN iwl-fh.h
+ */
+
+/****************************/
+/* Flow Handler Definitions */
+/****************************/
+
+/**
+ * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
+ * Addresses are offsets from device's PCI hardware base address.
+ */
+#define IWM_FH_MEM_LOWER_BOUND (0x1000)
+#define IWM_FH_MEM_UPPER_BOUND (0x2000)
+
+/**
+ * Keep-Warm (KW) buffer base address.
+ *
+ * Driver must allocate a 4KByte buffer that is for keeping the
+ * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
+ * DRAM access when doing Txing or Rxing. The dummy accesses prevent host
+ * from going into a power-savings mode that would cause higher DRAM latency,
+ * and possible data over/under-runs, before all Tx/Rx is complete.
+ *
+ * Driver loads IWM_FH_KW_MEM_ADDR_REG with the physical address (bits 35:4)
+ * of the buffer, which must be 4K aligned. Once this is set up, the device
+ * automatically invokes keep-warm accesses when normal accesses might not
+ * be sufficient to maintain fast DRAM response.
+ *
+ * Bit fields:
+ * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned
+ */
+#define IWM_FH_KW_MEM_ADDR_REG (IWM_FH_MEM_LOWER_BOUND + 0x97C)
+
+
+/**
+ * TFD Circular Buffers Base (CBBC) addresses
+ *
+ * Device has 16 base pointer registers, one for each of 16 host-DRAM-resident
+ * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
+ * (see struct iwm_tfd_frame). These 16 pointer registers are offset by 0x04
+ * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
+ * aligned (address bits 0-7 must be 0).
+ * Later devices have 20 (5000 series) or 30 (higher) queues, but the registers
+ * for them are in different places.
+ *
+ * Bit fields in each pointer register:
+ * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
+ */
+#define IWM_FH_MEM_CBBC_0_15_LOWER_BOUND (IWM_FH_MEM_LOWER_BOUND + 0x9D0)
+#define IWM_FH_MEM_CBBC_0_15_UPPER_BOUN (IWM_FH_MEM_LOWER_BOUND + 0xA10)
+#define IWM_FH_MEM_CBBC_16_19_LOWER_BOUND (IWM_FH_MEM_LOWER_BOUND + 0xBF0)
+#define IWM_FH_MEM_CBBC_16_19_UPPER_BOUND (IWM_FH_MEM_LOWER_BOUND + 0xC00)
+#define IWM_FH_MEM_CBBC_20_31_LOWER_BOUND (IWM_FH_MEM_LOWER_BOUND + 0xB20)
+#define IWM_FH_MEM_CBBC_20_31_UPPER_BOUND (IWM_FH_MEM_LOWER_BOUND + 0xB80)
+
+/* Find TFD CB base pointer for given queue */
+static inline unsigned int IWM_FH_MEM_CBBC_QUEUE(unsigned int chnl)
+{
+ if (chnl < 16)
+ return IWM_FH_MEM_CBBC_0_15_LOWER_BOUND + 4 * chnl;
+ if (chnl < 20)
+ return IWM_FH_MEM_CBBC_16_19_LOWER_BOUND + 4 * (chnl - 16);
+ return IWM_FH_MEM_CBBC_20_31_LOWER_BOUND + 4 * (chnl - 20);
+}
+
+
+/**
+ * Rx SRAM Control and Status Registers (RSCSR)
+ *
+ * These registers provide handshake between driver and device for the Rx queue
+ * (this queue handles *all* command responses, notifications, Rx data, etc.
+ * sent from uCode to host driver). Unlike Tx, there is only one Rx
+ * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can
+ * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
+ * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
+ * mapping between RBDs and RBs.
+ *
+ * Driver must allocate host DRAM memory for the following, and set the
+ * physical address of each into device registers:
+ *
+ * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
+ * entries (although any power of 2, up to 4096, is selectable by driver).
+ * Each entry (1 dword) points to a receive buffer (RB) of consistent size
+ * (typically 4K, although 8K or 16K are also selectable by driver).
+ * Driver sets up RB size and number of RBDs in the CB via Rx config
+ * register IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG.
+ *
+ * Bit fields within one RBD:
+ * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned
+ *
+ * Driver sets physical address [35:8] of base of RBD circular buffer
+ * into IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
+ *
+ * 2) Rx status buffer, 8 bytes, in which uCode indicates which Rx Buffers
+ * (RBs) have been filled, via a "write pointer", actually the index of
+ * the RB's corresponding RBD within the circular buffer. Driver sets
+ * physical address [35:4] into IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
+ *
+ * Bit fields in lower dword of Rx status buffer (upper dword not used
+ * by driver:
+ * 31-12: Not used by driver
+ * 11- 0: Index of last filled Rx buffer descriptor
+ * (device writes, driver reads this value)
+ *
+ * As the driver prepares Receive Buffers (RBs) for device to fill, driver must
+ * enter pointers to these RBs into contiguous RBD circular buffer entries,
+ * and update the device's "write" index register,
+ * IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG.
+ *
+ * This "write" index corresponds to the *next* RBD that the driver will make
+ * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
+ * the circular buffer. This value should initially be 0 (before preparing any
+ * RBs), should be 8 after preparing the first 8 RBs (for example), and must
+ * wrap back to 0 at the end of the circular buffer (but don't wrap before
+ * "read" index has advanced past 1! See below).
+ * NOTE: DEVICE EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8.
+ *
+ * As the device fills RBs (referenced from contiguous RBDs within the circular
+ * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
+ * to tell the driver the index of the latest filled RBD. The driver must
+ * read this "read" index from DRAM after receiving an Rx interrupt from device
+ *
+ * The driver must also internally keep track of a third index, which is the
+ * next RBD to process. When receiving an Rx interrupt, driver should process
+ * all filled but unprocessed RBs up to, but not including, the RB
+ * corresponding to the "read" index. For example, if "read" index becomes "1",
+ * driver may process the RB pointed to by RBD 0. Depending on volume of
+ * traffic, there may be many RBs to process.
+ *
+ * If read index == write index, device thinks there is no room to put new data.
+ * Due to this, the maximum number of filled RBs is 255, instead of 256. To
+ * be safe, make sure that there is a gap of at least 2 RBDs between "write"
+ * and "read" indexes; that is, make sure that there are no more than 254
+ * buffers waiting to be filled.
+ */
+#define IWM_FH_MEM_RSCSR_LOWER_BOUND (IWM_FH_MEM_LOWER_BOUND + 0xBC0)
+#define IWM_FH_MEM_RSCSR_UPPER_BOUND (IWM_FH_MEM_LOWER_BOUND + 0xC00)
+#define IWM_FH_MEM_RSCSR_CHNL0 (IWM_FH_MEM_RSCSR_LOWER_BOUND)
+
+/**
+ * Physical base address of 8-byte Rx Status buffer.
+ * Bit fields:
+ * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
+ */
+#define IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG (IWM_FH_MEM_RSCSR_CHNL0)
+
+/**
+ * Physical base address of Rx Buffer Descriptor Circular Buffer.
+ * Bit fields:
+ * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
+ */
+#define IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG (IWM_FH_MEM_RSCSR_CHNL0 + 0x004)
+
+/**
+ * Rx write pointer (index, really!).
+ * Bit fields:
+ * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
+ * NOTE: For 256-entry circular buffer, use only bits [7:0].
+ */
+#define IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG (IWM_FH_MEM_RSCSR_CHNL0 + 0x008)
+#define IWM_FH_RSCSR_CHNL0_WPTR (IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG)
+
+#define IWM_FW_RSCSR_CHNL0_RXDCB_RDPTR_REG (IWM_FH_MEM_RSCSR_CHNL0 + 0x00c)
+#define IWM_FH_RSCSR_CHNL0_RDPTR IWM_FW_RSCSR_CHNL0_RXDCB_RDPTR_REG
+
+/**
+ * Rx Config/Status Registers (RCSR)
+ * Rx Config Reg for channel 0 (only channel used)
+ *
+ * Driver must initialize IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for
+ * normal operation (see bit fields).
+ *
+ * Clearing IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
+ * Driver should poll IWM_FH_MEM_RSSR_RX_STATUS_REG for
+ * IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
+ *
+ * Bit fields:
+ * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
+ * '10' operate normally
+ * 29-24: reserved
+ * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
+ * min "5" for 32 RBDs, max "12" for 4096 RBDs.
+ * 19-18: reserved
+ * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
+ * '10' 12K, '11' 16K.
+ * 15-14: reserved
+ * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
+ * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
+ * typical value 0x10 (about 1/2 msec)
+ * 3- 0: reserved
+ */
+#define IWM_FH_MEM_RCSR_LOWER_BOUND (IWM_FH_MEM_LOWER_BOUND + 0xC00)
+#define IWM_FH_MEM_RCSR_UPPER_BOUND (IWM_FH_MEM_LOWER_BOUND + 0xCC0)
+#define IWM_FH_MEM_RCSR_CHNL0 (IWM_FH_MEM_RCSR_LOWER_BOUND)
+
+#define IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG (IWM_FH_MEM_RCSR_CHNL0)
+#define IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR (IWM_FH_MEM_RCSR_CHNL0 + 0x8)
+#define IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ (IWM_FH_MEM_RCSR_CHNL0 + 0x10)
+
+#define IWM_FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
+#define IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */
+#define IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */
+#define IWM_FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */
+#define IWM_FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
+#define IWM_FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/
+
+#define IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20)
+#define IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4)
+#define IWM_RX_RB_TIMEOUT (0x11)
+
+#define IWM_FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
+#define IWM_FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
+#define IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000)
+
+#define IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000)
+#define IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000)
+#define IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000)
+#define IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000)
+
+#define IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004)
+#define IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
+#define IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
+
+/**
+ * Rx Shared Status Registers (RSSR)
+ *
+ * After stopping Rx DMA channel (writing 0 to
+ * IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
+ * IWM_FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
+ *
+ * Bit fields:
+ * 24: 1 = Channel 0 is idle
+ *
+ * IWM_FH_MEM_RSSR_SHARED_CTRL_REG and IWM_FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
+ * contain default values that should not be altered by the driver.
+ */
+#define IWM_FH_MEM_RSSR_LOWER_BOUND (IWM_FH_MEM_LOWER_BOUND + 0xC40)
+#define IWM_FH_MEM_RSSR_UPPER_BOUND (IWM_FH_MEM_LOWER_BOUND + 0xD00)
+
+#define IWM_FH_MEM_RSSR_SHARED_CTRL_REG (IWM_FH_MEM_RSSR_LOWER_BOUND)
+#define IWM_FH_MEM_RSSR_RX_STATUS_REG (IWM_FH_MEM_RSSR_LOWER_BOUND + 0x004)
+#define IWM_FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
+ (IWM_FH_MEM_RSSR_LOWER_BOUND + 0x008)
+
+#define IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
+
+#define IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
+
+/* TFDB Area - TFDs buffer table */
+#define IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
+#define IWM_FH_TFDIB_LOWER_BOUND (IWM_FH_MEM_LOWER_BOUND + 0x900)
+#define IWM_FH_TFDIB_UPPER_BOUND (IWM_FH_MEM_LOWER_BOUND + 0x958)
+#define IWM_FH_TFDIB_CTRL0_REG(_chnl) (IWM_FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
+#define IWM_FH_TFDIB_CTRL1_REG(_chnl) (IWM_FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
+
+/**
+ * Transmit DMA Channel Control/Status Registers (TCSR)
+ *
+ * Device has one configuration register for each of 8 Tx DMA/FIFO channels
+ * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
+ * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
+ *
+ * To use a Tx DMA channel, driver must initialize its
+ * IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
+ *
+ * IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ * IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
+ *
+ * All other bits should be 0.
+ *
+ * Bit fields:
+ * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
+ * '10' operate normally
+ * 29- 4: Reserved, set to "0"
+ * 3: Enable internal DMA requests (1, normal operation), disable (0)
+ * 2- 0: Reserved, set to "0"
+ */
+#define IWM_FH_TCSR_LOWER_BOUND (IWM_FH_MEM_LOWER_BOUND + 0xD00)
+#define IWM_FH_TCSR_UPPER_BOUND (IWM_FH_MEM_LOWER_BOUND + 0xE60)
+
+/* Find Control/Status reg for given Tx DMA/FIFO channel */
+#define IWM_FH_TCSR_CHNL_NUM (8)
+
+/* TCSR: tx_config register values */
+#define IWM_FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
+ (IWM_FH_TCSR_LOWER_BOUND + 0x20 * (_chnl))
+#define IWM_FH_TCSR_CHNL_TX_CREDIT_REG(_chnl) \
+ (IWM_FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
+#define IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \
+ (IWM_FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
+
+#define IWM_FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
+#define IWM_FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001)
+
+#define IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000)
+#define IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008)
+
+#define IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000)
+#define IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000)
+#define IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
+
+#define IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
+#define IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000)
+#define IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000)
+
+#define IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
+#define IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000)
+#define IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
+
+#define IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000)
+#define IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000)
+#define IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003)
+
+#define IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20)
+#define IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12)
+
+/**
+ * Tx Shared Status Registers (TSSR)
+ *
+ * After stopping Tx DMA channel (writing 0 to
+ * IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
+ * IWM_FH_TSSR_TX_STATUS_REG until selected Tx channel is idle
+ * (channel's buffers empty | no pending requests).
+ *
+ * Bit fields:
+ * 31-24: 1 = Channel buffers empty (channel 7:0)
+ * 23-16: 1 = No pending requests (channel 7:0)
+ */
+#define IWM_FH_TSSR_LOWER_BOUND (IWM_FH_MEM_LOWER_BOUND + 0xEA0)
+#define IWM_FH_TSSR_UPPER_BOUND (IWM_FH_MEM_LOWER_BOUND + 0xEC0)
+
+#define IWM_FH_TSSR_TX_STATUS_REG (IWM_FH_TSSR_LOWER_BOUND + 0x010)
+
+/**
+ * Bit fields for TSSR(Tx Shared Status & Control) error status register:
+ * 31: Indicates an address error when accessed to internal memory
+ * uCode/driver must write "1" in order to clear this flag
+ * 30: Indicates that Host did not send the expected number of dwords to FH
+ * uCode/driver must write "1" in order to clear this flag
+ * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
+ * command was received from the scheduler while the TRB was already full
+ * with previous command
+ * uCode/driver must write "1" in order to clear this flag
+ * 7-0: Each status bit indicates a channel's TxCredit error. When an error
+ * bit is set, it indicates that the FH has received a full indication
+ * from the RTC TxFIFO and the current value of the TxCredit counter was
+ * not equal to zero. This mean that the credit mechanism was not
+ * synchronized to the TxFIFO status
+ * uCode/driver must write "1" in order to clear this flag
+ */
+#define IWM_FH_TSSR_TX_ERROR_REG (IWM_FH_TSSR_LOWER_BOUND + 0x018)
+#define IWM_FH_TSSR_TX_MSG_CONFIG_REG (IWM_FH_TSSR_LOWER_BOUND + 0x008)
+
+#define IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
+
+/* Tx service channels */
+#define IWM_FH_SRVC_CHNL (9)
+#define IWM_FH_SRVC_LOWER_BOUND (IWM_FH_MEM_LOWER_BOUND + 0x9C8)
+#define IWM_FH_SRVC_UPPER_BOUND (IWM_FH_MEM_LOWER_BOUND + 0x9D0)
+#define IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
+ (IWM_FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
+
+#define IWM_FH_TX_CHICKEN_BITS_REG (IWM_FH_MEM_LOWER_BOUND + 0xE98)
+#define IWM_FH_TX_TRB_REG(_chan) (IWM_FH_MEM_LOWER_BOUND + 0x958 + \
+ (_chan) * 4)
+
+/* Instruct FH to increment the retry count of a packet when
+ * it is brought from the memory to TX-FIFO
+ */
+#define IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002)
+
+#define IWM_RX_QUEUE_SIZE 256
+#define IWM_RX_QUEUE_MASK 255
+#define IWM_RX_QUEUE_SIZE_LOG 8
+
+/*
+ * RX related structures and functions
+ */
+#define IWM_RX_FREE_BUFFERS 64
+#define IWM_RX_LOW_WATERMARK 8
+
+/**
+ * struct iwm_rb_status - reseve buffer status
+ * host memory mapped FH registers
+ * @closed_rb_num [0:11] - Indicates the index of the RB which was closed
+ * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
+ * @finished_rb_num [0:11] - Indicates the index of the current RB
+ * in which the last frame was written to
+ * @finished_fr_num [0:11] - Indicates the index of the RX Frame
+ * which was transferred
+ */
+struct iwm_rb_status {
+ uint16_t closed_rb_num;
+ uint16_t closed_fr_num;
+ uint16_t finished_rb_num;
+ uint16_t finished_fr_nam;
+ uint32_t unused;
+} __packed;
+
+
+#define IWM_TFD_QUEUE_SIZE_MAX (256)
+#define IWM_TFD_QUEUE_SIZE_BC_DUP (64)
+#define IWM_TFD_QUEUE_BC_SIZE (IWM_TFD_QUEUE_SIZE_MAX + \
+ IWM_TFD_QUEUE_SIZE_BC_DUP)
+#define IWM_TX_DMA_MASK DMA_BIT_MASK(36)
+#define IWM_NUM_OF_TBS 20
+
+static inline uint8_t iwm_get_dma_hi_addr(bus_addr_t addr)
+{
+ return (sizeof(addr) > sizeof(uint32_t) ? (addr >> 16) >> 16 : 0) & 0xF;
+}
+/**
+ * struct iwm_tfd_tb transmit buffer descriptor within transmit frame descriptor
+ *
+ * This structure contains dma address and length of transmission address
+ *
+ * @lo: low [31:0] portion of the dma address of TX buffer
+ * every even is unaligned on 16 bit boundary
+ * @hi_n_len 0-3 [35:32] portion of dma
+ * 4-15 length of the tx buffer
+ */
+struct iwm_tfd_tb {
+ uint32_t lo;
+ uint16_t hi_n_len;
+} __packed;
+
+/**
+ * struct iwm_tfd
+ *
+ * Transmit Frame Descriptor (TFD)
+ *
+ * @ __reserved1[3] reserved
+ * @ num_tbs 0-4 number of active tbs
+ * 5 reserved
+ * 6-7 padding (not used)
+ * @ tbs[20] transmit frame buffer descriptors
+ * @ __pad padding
+ *
+ * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
+ * Both driver and device share these circular buffers, each of which must be
+ * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
+ *
+ * Driver must indicate the physical address of the base of each
+ * circular buffer via the IWM_FH_MEM_CBBC_QUEUE registers.
+ *
+ * Each TFD contains pointer/size information for up to 20 data buffers
+ * in host DRAM. These buffers collectively contain the (one) frame described
+ * by the TFD. Each buffer must be a single contiguous block of memory within
+ * itself, but buffers may be scattered in host DRAM. Each buffer has max size
+ * of (4K - 4). The concatenates all of a TFD's buffers into a single
+ * Tx frame, up to 8 KBytes in size.
+ *
+ * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
+ */
+struct iwm_tfd {
+ uint8_t __reserved1[3];
+ uint8_t num_tbs;
+ struct iwm_tfd_tb tbs[IWM_NUM_OF_TBS];
+ uint32_t __pad;
+} __packed;
+
+/* Keep Warm Size */
+#define IWM_KW_SIZE 0x1000 /* 4k */
+
+/* Fixed (non-configurable) rx data from phy */
+
+/**
+ * struct iwm_agn_schedq_bc_tbl scheduler byte count table
+ * base physical address provided by IWM_SCD_DRAM_BASE_ADDR
+ * @tfd_offset 0-12 - tx command byte count
+ * 12-16 - station index
+ */
+struct iwm_agn_scd_bc_tbl {
+ uint16_t tfd_offset[IWM_TFD_QUEUE_BC_SIZE];
+} __packed;
+
+/*
+ * END iwl-fh.h
+ */
+
+/*
+ * BEGIN mvm/fw-api.h
+ */
+
+/* maximal number of Tx queues in any platform */
+#define IWM_MVM_MAX_QUEUES 20
+
+/* Tx queue numbers */
+enum {
+ IWM_MVM_OFFCHANNEL_QUEUE = 8,
+ IWM_MVM_CMD_QUEUE = 9,
+};
+
+#define IWM_MVM_CMD_FIFO 7
+
+#define IWM_MVM_STATION_COUNT 16
+
+/* commands */
+enum {
+ IWM_MVM_ALIVE = 0x1,
+ IWM_REPLY_ERROR = 0x2,
+
+ IWM_INIT_COMPLETE_NOTIF = 0x4,
+
+ /* PHY context commands */
+ IWM_PHY_CONTEXT_CMD = 0x8,
+ IWM_DBG_CFG = 0x9,
+
+ /* station table */
+ IWM_ADD_STA_KEY = 0x17,
+ IWM_ADD_STA = 0x18,
+ IWM_REMOVE_STA = 0x19,
+
+ /* TX */
+ IWM_TX_CMD = 0x1c,
+ IWM_TXPATH_FLUSH = 0x1e,
+ IWM_MGMT_MCAST_KEY = 0x1f,
+
+ /* global key */
+ IWM_WEP_KEY = 0x20,
+
+ /* MAC and Binding commands */
+ IWM_MAC_CONTEXT_CMD = 0x28,
+ IWM_TIME_EVENT_CMD = 0x29, /* both CMD and response */
+ IWM_TIME_EVENT_NOTIFICATION = 0x2a,
+ IWM_BINDING_CONTEXT_CMD = 0x2b,
+ IWM_TIME_QUOTA_CMD = 0x2c,
+ IWM_NON_QOS_TX_COUNTER_CMD = 0x2d,
+
+ IWM_LQ_CMD = 0x4e,
+
+ /* Calibration */
+ IWM_TEMPERATURE_NOTIFICATION = 0x62,
+ IWM_CALIBRATION_CFG_CMD = 0x65,
+ IWM_CALIBRATION_RES_NOTIFICATION = 0x66,
+ IWM_CALIBRATION_COMPLETE_NOTIFICATION = 0x67,
+ IWM_RADIO_VERSION_NOTIFICATION = 0x68,
+
+ /* Scan offload */
+ IWM_SCAN_OFFLOAD_REQUEST_CMD = 0x51,
+ IWM_SCAN_OFFLOAD_ABORT_CMD = 0x52,
+ IWM_SCAN_OFFLOAD_COMPLETE = 0x6D,
+ IWM_SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6E,
+ IWM_SCAN_OFFLOAD_CONFIG_CMD = 0x6f,
+ IWM_MATCH_FOUND_NOTIFICATION = 0xd9,
+
+ /* Phy */
+ IWM_PHY_CONFIGURATION_CMD = 0x6a,
+ IWM_CALIB_RES_NOTIF_PHY_DB = 0x6b,
+ /* IWM_PHY_DB_CMD = 0x6c, */
+
+ /* Power - legacy power table command */
+ IWM_POWER_TABLE_CMD = 0x77,
+ IWM_PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78,
+
+ /* Thermal Throttling*/
+ IWM_REPLY_THERMAL_MNG_BACKOFF = 0x7e,
+
+ /* Scanning */
+ IWM_SCAN_REQUEST_CMD = 0x80,
+ IWM_SCAN_ABORT_CMD = 0x81,
+ IWM_SCAN_START_NOTIFICATION = 0x82,
+ IWM_SCAN_RESULTS_NOTIFICATION = 0x83,
+ IWM_SCAN_COMPLETE_NOTIFICATION = 0x84,
+
+ /* NVM */
+ IWM_NVM_ACCESS_CMD = 0x88,
+
+ IWM_SET_CALIB_DEFAULT_CMD = 0x8e,
+
+ IWM_BEACON_NOTIFICATION = 0x90,
+ IWM_BEACON_TEMPLATE_CMD = 0x91,
+ IWM_TX_ANT_CONFIGURATION_CMD = 0x98,
+ IWM_BT_CONFIG = 0x9b,
+ IWM_STATISTICS_NOTIFICATION = 0x9d,
+ IWM_REDUCE_TX_POWER_CMD = 0x9f,
+
+ /* RF-KILL commands and notifications */
+ IWM_CARD_STATE_CMD = 0xa0,
+ IWM_CARD_STATE_NOTIFICATION = 0xa1,
+
+ IWM_MISSED_BEACONS_NOTIFICATION = 0xa2,
+
+ /* Power - new power table command */
+ IWM_MAC_PM_POWER_TABLE = 0xa9,
+
+ IWM_REPLY_RX_PHY_CMD = 0xc0,
+ IWM_REPLY_RX_MPDU_CMD = 0xc1,
+ IWM_BA_NOTIF = 0xc5,
+
+ /* BT Coex */
+ IWM_BT_COEX_PRIO_TABLE = 0xcc,
+ IWM_BT_COEX_PROT_ENV = 0xcd,
+ IWM_BT_PROFILE_NOTIFICATION = 0xce,
+ IWM_BT_COEX_CI = 0x5d,
+
+ IWM_REPLY_SF_CFG_CMD = 0xd1,
+ IWM_REPLY_BEACON_FILTERING_CMD = 0xd2,
+
+ IWM_REPLY_DEBUG_CMD = 0xf0,
+ IWM_DEBUG_LOG_MSG = 0xf7,
+
+ IWM_MCAST_FILTER_CMD = 0xd0,
+
+ /* D3 commands/notifications */
+ IWM_D3_CONFIG_CMD = 0xd3,
+ IWM_PROT_OFFLOAD_CONFIG_CMD = 0xd4,
+ IWM_OFFLOADS_QUERY_CMD = 0xd5,
+ IWM_REMOTE_WAKE_CONFIG_CMD = 0xd6,
+
+ /* for WoWLAN in particular */
+ IWM_WOWLAN_PATTERNS = 0xe0,
+ IWM_WOWLAN_CONFIGURATION = 0xe1,
+ IWM_WOWLAN_TSC_RSC_PARAM = 0xe2,
+ IWM_WOWLAN_TKIP_PARAM = 0xe3,
+ IWM_WOWLAN_KEK_KCK_MATERIAL = 0xe4,
+ IWM_WOWLAN_GET_STATUSES = 0xe5,
+ IWM_WOWLAN_TX_POWER_PER_DB = 0xe6,
+
+ /* and for NetDetect */
+ IWM_NET_DETECT_CONFIG_CMD = 0x54,
+ IWM_NET_DETECT_PROFILES_QUERY_CMD = 0x56,
+ IWM_NET_DETECT_PROFILES_CMD = 0x57,
+ IWM_NET_DETECT_HOTSPOTS_CMD = 0x58,
+ IWM_NET_DETECT_HOTSPOTS_QUERY_CMD = 0x59,
+
+ IWM_REPLY_MAX = 0xff,
+};
+
+/**
+ * struct iwm_cmd_response - generic response struct for most commands
+ * @status: status of the command asked, changes for each one
+ */
+struct iwm_cmd_response {
+ uint32_t status;
+};
+
+/*
+ * struct iwm_tx_ant_cfg_cmd
+ * @valid: valid antenna configuration
+ */
+struct iwm_tx_ant_cfg_cmd {
+ uint32_t valid;
+} __packed;
+
+/**
+ * struct iwm_reduce_tx_power_cmd - TX power reduction command
+ * IWM_REDUCE_TX_POWER_CMD = 0x9f
+ * @flags: (reserved for future implementation)
+ * @mac_context_id: id of the mac ctx for which we are reducing TX power.
+ * @pwr_restriction: TX power restriction in dBms.
+ */
+struct iwm_reduce_tx_power_cmd {
+ uint8_t flags;
+ uint8_t mac_context_id;
+ uint16_t pwr_restriction;
+} __packed; /* IWM_TX_REDUCED_POWER_API_S_VER_1 */
+
+/*
+ * Calibration control struct.
+ * Sent as part of the phy configuration command.
+ * @flow_trigger: bitmap for which calibrations to perform according to
+ * flow triggers.
+ * @event_trigger: bitmap for which calibrations to perform according to
+ * event triggers.
+ */
+struct iwm_calib_ctrl {
+ uint32_t flow_trigger;
+ uint32_t event_trigger;
+} __packed;
+
+/* This enum defines the bitmap of various calibrations to enable in both
+ * init ucode and runtime ucode through IWM_CALIBRATION_CFG_CMD.
+ */
+enum iwm_calib_cfg {
+ IWM_CALIB_CFG_XTAL_IDX = (1 << 0),
+ IWM_CALIB_CFG_TEMPERATURE_IDX = (1 << 1),
+ IWM_CALIB_CFG_VOLTAGE_READ_IDX = (1 << 2),
+ IWM_CALIB_CFG_PAPD_IDX = (1 << 3),
+ IWM_CALIB_CFG_TX_PWR_IDX = (1 << 4),
+ IWM_CALIB_CFG_DC_IDX = (1 << 5),
+ IWM_CALIB_CFG_BB_FILTER_IDX = (1 << 6),
+ IWM_CALIB_CFG_LO_LEAKAGE_IDX = (1 << 7),
+ IWM_CALIB_CFG_TX_IQ_IDX = (1 << 8),
+ IWM_CALIB_CFG_TX_IQ_SKEW_IDX = (1 << 9),
+ IWM_CALIB_CFG_RX_IQ_IDX = (1 << 10),
+ IWM_CALIB_CFG_RX_IQ_SKEW_IDX = (1 << 11),
+ IWM_CALIB_CFG_SENSITIVITY_IDX = (1 << 12),
+ IWM_CALIB_CFG_CHAIN_NOISE_IDX = (1 << 13),
+ IWM_CALIB_CFG_DISCONNECTED_ANT_IDX = (1 << 14),
+ IWM_CALIB_CFG_ANT_COUPLING_IDX = (1 << 15),
+ IWM_CALIB_CFG_DAC_IDX = (1 << 16),
+ IWM_CALIB_CFG_ABS_IDX = (1 << 17),
+ IWM_CALIB_CFG_AGC_IDX = (1 << 18),
+};
+
+/*
+ * Phy configuration command.
+ */
+struct iwm_phy_cfg_cmd {
+ uint32_t phy_cfg;
+ struct iwm_calib_ctrl calib_control;
+} __packed;
+
+#define IWM_PHY_CFG_RADIO_TYPE ((1 << 0) | (1 << 1))
+#define IWM_PHY_CFG_RADIO_STEP ((1 << 2) | (1 << 3))
+#define IWM_PHY_CFG_RADIO_DASH ((1 << 4) | (1 << 5))
+#define IWM_PHY_CFG_PRODUCT_NUMBER ((1 << 6) | (1 << 7))
+#define IWM_PHY_CFG_TX_CHAIN_A (1 << 8)
+#define IWM_PHY_CFG_TX_CHAIN_B (1 << 9)
+#define IWM_PHY_CFG_TX_CHAIN_C (1 << 10)
+#define IWM_PHY_CFG_RX_CHAIN_A (1 << 12)
+#define IWM_PHY_CFG_RX_CHAIN_B (1 << 13)
+#define IWM_PHY_CFG_RX_CHAIN_C (1 << 14)
+
+
+/* Target of the IWM_NVM_ACCESS_CMD */
+enum {
+ IWM_NVM_ACCESS_TARGET_CACHE = 0,
+ IWM_NVM_ACCESS_TARGET_OTP = 1,
+ IWM_NVM_ACCESS_TARGET_EEPROM = 2,
+};
+
+/* Section types for IWM_NVM_ACCESS_CMD */
+enum {
+ IWM_NVM_SECTION_TYPE_HW = 0,
+ IWM_NVM_SECTION_TYPE_SW,
+ IWM_NVM_SECTION_TYPE_PAPD,
+ IWM_NVM_SECTION_TYPE_BT,
+ IWM_NVM_SECTION_TYPE_CALIBRATION,
+ IWM_NVM_SECTION_TYPE_PRODUCTION,
+ IWM_NVM_SECTION_TYPE_POST_FCS_CALIB,
+ IWM_NVM_NUM_OF_SECTIONS,
+};
+
+/**
+ * struct iwm_nvm_access_cmd_ver2 - Request the device to send an NVM section
+ * @op_code: 0 - read, 1 - write
+ * @target: IWM_NVM_ACCESS_TARGET_*
+ * @type: IWM_NVM_SECTION_TYPE_*
+ * @offset: offset in bytes into the section
+ * @length: in bytes, to read/write
+ * @data: if write operation, the data to write. On read its empty
+ */
+struct iwm_nvm_access_cmd {
+ uint8_t op_code;
+ uint8_t target;
+ uint16_t type;
+ uint16_t offset;
+ uint16_t length;
+ uint8_t data[];
+} __packed; /* IWM_NVM_ACCESS_CMD_API_S_VER_2 */
+
+/**
+ * struct iwm_nvm_access_resp_ver2 - response to IWM_NVM_ACCESS_CMD
+ * @offset: offset in bytes into the section
+ * @length: in bytes, either how much was written or read
+ * @type: IWM_NVM_SECTION_TYPE_*
+ * @status: 0 for success, fail otherwise
+ * @data: if read operation, the data returned. Empty on write.
+ */
+struct iwm_nvm_access_resp {
+ uint16_t offset;
+ uint16_t length;
+ uint16_t type;
+ uint16_t status;
+ uint8_t data[];
+} __packed; /* IWM_NVM_ACCESS_CMD_RESP_API_S_VER_2 */
+
+/* IWM_MVM_ALIVE 0x1 */
+
+/* alive response is_valid values */
+#define IWM_ALIVE_RESP_UCODE_OK (1 << 0)
+#define IWM_ALIVE_RESP_RFKILL (1 << 1)
+
+/* alive response ver_type values */
+enum {
+ IWM_FW_TYPE_HW = 0,
+ IWM_FW_TYPE_PROT = 1,
+ IWM_FW_TYPE_AP = 2,
+ IWM_FW_TYPE_WOWLAN = 3,
+ IWM_FW_TYPE_TIMING = 4,
+ IWM_FW_TYPE_WIPAN = 5
+};
+
+/* alive response ver_subtype values */
+enum {
+ IWM_FW_SUBTYPE_FULL_FEATURE = 0,
+ IWM_FW_SUBTYPE_BOOTSRAP = 1, /* Not valid */
+ IWM_FW_SUBTYPE_REDUCED = 2,
+ IWM_FW_SUBTYPE_ALIVE_ONLY = 3,
+ IWM_FW_SUBTYPE_WOWLAN = 4,
+ IWM_FW_SUBTYPE_AP_SUBTYPE = 5,
+ IWM_FW_SUBTYPE_WIPAN = 6,
+ IWM_FW_SUBTYPE_INITIALIZE = 9
+};
+
+#define IWM_ALIVE_STATUS_ERR 0xDEAD
+#define IWM_ALIVE_STATUS_OK 0xCAFE
+
+#define IWM_ALIVE_FLG_RFKILL (1 << 0)
+
+struct iwm_mvm_alive_resp {
+ uint16_t status;
+ uint16_t flags;
+ uint8_t ucode_minor;
+ uint8_t ucode_major;
+ uint16_t id;
+ uint8_t api_minor;
+ uint8_t api_major;
+ uint8_t ver_subtype;
+ uint8_t ver_type;
+ uint8_t mac;
+ uint8_t opt;
+ uint16_t reserved2;
+ uint32_t timestamp;
+ uint32_t error_event_table_ptr; /* SRAM address for error log */
+ uint32_t log_event_table_ptr; /* SRAM address for event log */
+ uint32_t cpu_register_ptr;
+ uint32_t dbgm_config_ptr;
+ uint32_t alive_counter_ptr;
+ uint32_t scd_base_ptr; /* SRAM address for SCD */
+} __packed; /* IWM_ALIVE_RES_API_S_VER_1 */
+
+/* Error response/notification */
+enum {
+ IWM_FW_ERR_UNKNOWN_CMD = 0x0,
+ IWM_FW_ERR_INVALID_CMD_PARAM = 0x1,
+ IWM_FW_ERR_SERVICE = 0x2,
+ IWM_FW_ERR_ARC_MEMORY = 0x3,
+ IWM_FW_ERR_ARC_CODE = 0x4,
+ IWM_FW_ERR_WATCH_DOG = 0x5,
+ IWM_FW_ERR_WEP_GRP_KEY_INDX = 0x10,
+ IWM_FW_ERR_WEP_KEY_SIZE = 0x11,
+ IWM_FW_ERR_OBSOLETE_FUNC = 0x12,
+ IWM_FW_ERR_UNEXPECTED = 0xFE,
+ IWM_FW_ERR_FATAL = 0xFF
+};
+
+/**
+ * struct iwm_error_resp - FW error indication
+ * ( IWM_REPLY_ERROR = 0x2 )
+ * @error_type: one of IWM_FW_ERR_*
+ * @cmd_id: the command ID for which the error occured
+ * @bad_cmd_seq_num: sequence number of the erroneous command
+ * @error_service: which service created the error, applicable only if
+ * error_type = 2, otherwise 0
+ * @timestamp: TSF in usecs.
+ */
+struct iwm_error_resp {
+ uint32_t error_type;
+ uint8_t cmd_id;
+ uint8_t reserved1;
+ uint16_t bad_cmd_seq_num;
+ uint32_t error_service;
+ uint64_t timestamp;
+} __packed;
+
+
+/* Common PHY, MAC and Bindings definitions */
+
+#define IWM_MAX_MACS_IN_BINDING (3)
+#define IWM_MAX_BINDINGS (4)
+#define IWM_AUX_BINDING_INDEX (3)
+#define IWM_MAX_PHYS (4)
+
+/* Used to extract ID and color from the context dword */
+#define IWM_FW_CTXT_ID_POS (0)
+#define IWM_FW_CTXT_ID_MSK (0xff << IWM_FW_CTXT_ID_POS)
+#define IWM_FW_CTXT_COLOR_POS (8)
+#define IWM_FW_CTXT_COLOR_MSK (0xff << IWM_FW_CTXT_COLOR_POS)
+#define IWM_FW_CTXT_INVALID (0xffffffff)
+
+#define IWM_FW_CMD_ID_AND_COLOR(_id, _color) ((_id << IWM_FW_CTXT_ID_POS) |\
+ (_color << IWM_FW_CTXT_COLOR_POS))
+
+/* Possible actions on PHYs, MACs and Bindings */
+enum {
+ IWM_FW_CTXT_ACTION_STUB = 0,
+ IWM_FW_CTXT_ACTION_ADD,
+ IWM_FW_CTXT_ACTION_MODIFY,
+ IWM_FW_CTXT_ACTION_REMOVE,
+ IWM_FW_CTXT_ACTION_NUM
+}; /* COMMON_CONTEXT_ACTION_API_E_VER_1 */
+
+/* Time Events */
+
+/* Time Event types, according to MAC type */
+enum iwm_time_event_type {
+ /* BSS Station Events */
+ IWM_TE_BSS_STA_AGGRESSIVE_ASSOC,
+ IWM_TE_BSS_STA_ASSOC,
+ IWM_TE_BSS_EAP_DHCP_PROT,
+ IWM_TE_BSS_QUIET_PERIOD,
+
+ /* P2P Device Events */
+ IWM_TE_P2P_DEVICE_DISCOVERABLE,
+ IWM_TE_P2P_DEVICE_LISTEN,
+ IWM_TE_P2P_DEVICE_ACTION_SCAN,
+ IWM_TE_P2P_DEVICE_FULL_SCAN,
+
+ /* P2P Client Events */
+ IWM_TE_P2P_CLIENT_AGGRESSIVE_ASSOC,
+ IWM_TE_P2P_CLIENT_ASSOC,
+ IWM_TE_P2P_CLIENT_QUIET_PERIOD,
+
+ /* P2P GO Events */
+ IWM_TE_P2P_GO_ASSOC_PROT,
+ IWM_TE_P2P_GO_REPETITIVE_NOA,
+ IWM_TE_P2P_GO_CT_WINDOW,
+
+ /* WiDi Sync Events */
+ IWM_TE_WIDI_TX_SYNC,
+
+ IWM_TE_MAX
+}; /* IWM_MAC_EVENT_TYPE_API_E_VER_1 */
+
+
+
+/* Time event - defines for command API v1 */
+
+/*
+ * @IWM_TE_V1_FRAG_NONE: fragmentation of the time event is NOT allowed.
+ * @IWM_TE_V1_FRAG_SINGLE: fragmentation of the time event is allowed, but only
+ * the first fragment is scheduled.
+ * @IWM_TE_V1_FRAG_DUAL: fragmentation of the time event is allowed, but only
+ * the first 2 fragments are scheduled.
+ * @IWM_TE_V1_FRAG_ENDLESS: fragmentation of the time event is allowed, and any
+ * number of fragments are valid.
+ *
+ * Other than the constant defined above, specifying a fragmentation value 'x'
+ * means that the event can be fragmented but only the first 'x' will be
+ * scheduled.
+ */
+enum {
+ IWM_TE_V1_FRAG_NONE = 0,
+ IWM_TE_V1_FRAG_SINGLE = 1,
+ IWM_TE_V1_FRAG_DUAL = 2,
+ IWM_TE_V1_FRAG_ENDLESS = 0xffffffff
+};
+
+/* If a Time Event can be fragmented, this is the max number of fragments */
+#define IWM_TE_V1_FRAG_MAX_MSK 0x0fffffff
+/* Repeat the time event endlessly (until removed) */
+#define IWM_TE_V1_REPEAT_ENDLESS 0xffffffff
+/* If a Time Event has bounded repetitions, this is the maximal value */
+#define IWM_TE_V1_REPEAT_MAX_MSK_V1 0x0fffffff
+
+/* Time Event dependencies: none, on another TE, or in a specific time */
+enum {
+ IWM_TE_V1_INDEPENDENT = 0,
+ IWM_TE_V1_DEP_OTHER = (1 << 0),
+ IWM_TE_V1_DEP_TSF = (1 << 1),
+ IWM_TE_V1_EVENT_SOCIOPATHIC = (1 << 2),
+}; /* IWM_MAC_EVENT_DEPENDENCY_POLICY_API_E_VER_2 */
+
+/*
+ * @IWM_TE_V1_NOTIF_NONE: no notifications
+ * @IWM_TE_V1_NOTIF_HOST_EVENT_START: request/receive notification on event start
+ * @IWM_TE_V1_NOTIF_HOST_EVENT_END:request/receive notification on event end
+ * @IWM_TE_V1_NOTIF_INTERNAL_EVENT_START: internal FW use
+ * @IWM_TE_V1_NOTIF_INTERNAL_EVENT_END: internal FW use.
+ * @IWM_TE_V1_NOTIF_HOST_FRAG_START: request/receive notification on frag start
+ * @IWM_TE_V1_NOTIF_HOST_FRAG_END:request/receive notification on frag end
+ * @IWM_TE_V1_NOTIF_INTERNAL_FRAG_START: internal FW use.
+ * @IWM_TE_V1_NOTIF_INTERNAL_FRAG_END: internal FW use.
+ *
+ * Supported Time event notifications configuration.
+ * A notification (both event and fragment) includes a status indicating weather
+ * the FW was able to schedule the event or not. For fragment start/end
+ * notification the status is always success. There is no start/end fragment
+ * notification for monolithic events.
+ */
+enum {
+ IWM_TE_V1_NOTIF_NONE = 0,
+ IWM_TE_V1_NOTIF_HOST_EVENT_START = (1 << 0),
+ IWM_TE_V1_NOTIF_HOST_EVENT_END = (1 << 1),
+ IWM_TE_V1_NOTIF_INTERNAL_EVENT_START = (1 << 2),
+ IWM_TE_V1_NOTIF_INTERNAL_EVENT_END = (1 << 3),
+ IWM_TE_V1_NOTIF_HOST_FRAG_START = (1 << 4),
+ IWM_TE_V1_NOTIF_HOST_FRAG_END = (1 << 5),
+ IWM_TE_V1_NOTIF_INTERNAL_FRAG_START = (1 << 6),
+ IWM_TE_V1_NOTIF_INTERNAL_FRAG_END = (1 << 7),
+}; /* IWM_MAC_EVENT_ACTION_API_E_VER_2 */
+
+
+/**
+ * struct iwm_time_event_cmd_api_v1 - configuring Time Events
+ * with struct IWM_MAC_TIME_EVENT_DATA_API_S_VER_1 (see also
+ * with version 2. determined by IWM_UCODE_TLV_FLAGS)
+ * ( IWM_TIME_EVENT_CMD = 0x29 )
+ * @id_and_color: ID and color of the relevant MAC
+ * @action: action to perform, one of IWM_FW_CTXT_ACTION_*
+ * @id: this field has two meanings, depending on the action:
+ * If the action is ADD, then it means the type of event to add.
+ * For all other actions it is the unique event ID assigned when the
+ * event was added by the FW.
+ * @apply_time: When to start the Time Event (in GP2)
+ * @max_delay: maximum delay to event's start (apply time), in TU
+ * @depends_on: the unique ID of the event we depend on (if any)
+ * @interval: interval between repetitions, in TU
+ * @interval_reciprocal: 2^32 / interval
+ * @duration: duration of event in TU
+ * @repeat: how many repetitions to do, can be IWM_TE_REPEAT_ENDLESS
+ * @dep_policy: one of IWM_TE_V1_INDEPENDENT, IWM_TE_V1_DEP_OTHER, IWM_TE_V1_DEP_TSF
+ * and IWM_TE_V1_EVENT_SOCIOPATHIC
+ * @is_present: 0 or 1, are we present or absent during the Time Event
+ * @max_frags: maximal number of fragments the Time Event can be divided to
+ * @notify: notifications using IWM_TE_V1_NOTIF_* (whom to notify when)
+ */
+struct iwm_time_event_cmd_v1 {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ uint32_t id_and_color;
+ uint32_t action;
+ uint32_t id;
+ /* IWM_MAC_TIME_EVENT_DATA_API_S_VER_1 */
+ uint32_t apply_time;
+ uint32_t max_delay;
+ uint32_t dep_policy;
+ uint32_t depends_on;
+ uint32_t is_present;
+ uint32_t max_frags;
+ uint32_t interval;
+ uint32_t interval_reciprocal;
+ uint32_t duration;
+ uint32_t repeat;
+ uint32_t notify;
+} __packed; /* IWM_MAC_TIME_EVENT_CMD_API_S_VER_1 */
+
+
+/* Time event - defines for command API v2 */
+
+/*
+ * @IWM_TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed.
+ * @IWM_TE_V2_FRAG_SINGLE: fragmentation of the time event is allowed, but only
+ * the first fragment is scheduled.
+ * @IWM_TE_V2_FRAG_DUAL: fragmentation of the time event is allowed, but only
+ * the first 2 fragments are scheduled.
+ * @IWM_TE_V2_FRAG_ENDLESS: fragmentation of the time event is allowed, and any
+ * number of fragments are valid.
+ *
+ * Other than the constant defined above, specifying a fragmentation value 'x'
+ * means that the event can be fragmented but only the first 'x' will be
+ * scheduled.
+ */
+enum {
+ IWM_TE_V2_FRAG_NONE = 0,
+ IWM_TE_V2_FRAG_SINGLE = 1,
+ IWM_TE_V2_FRAG_DUAL = 2,
+ IWM_TE_V2_FRAG_MAX = 0xfe,
+ IWM_TE_V2_FRAG_ENDLESS = 0xff
+};
+
+/* Repeat the time event endlessly (until removed) */
+#define IWM_TE_V2_REPEAT_ENDLESS 0xff
+/* If a Time Event has bounded repetitions, this is the maximal value */
+#define IWM_TE_V2_REPEAT_MAX 0xfe
+
+#define IWM_TE_V2_PLACEMENT_POS 12
+#define IWM_TE_V2_ABSENCE_POS 15
+
+/* Time event policy values (for time event cmd api v2)
+ * A notification (both event and fragment) includes a status indicating weather
+ * the FW was able to schedule the event or not. For fragment start/end
+ * notification the status is always success. There is no start/end fragment
+ * notification for monolithic events.
+ *
+ * @IWM_TE_V2_DEFAULT_POLICY: independent, social, present, unoticable
+ * @IWM_TE_V2_NOTIF_HOST_EVENT_START: request/receive notification on event start
+ * @IWM_TE_V2_NOTIF_HOST_EVENT_END:request/receive notification on event end
+ * @IWM_TE_V2_NOTIF_INTERNAL_EVENT_START: internal FW use
+ * @IWM_TE_V2_NOTIF_INTERNAL_EVENT_END: internal FW use.
+ * @IWM_TE_V2_NOTIF_HOST_FRAG_START: request/receive notification on frag start
+ * @IWM_TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end
+ * @IWM_TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use.
+ * @IWM_TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use.
+ * @IWM_TE_V2_DEP_OTHER: depends on another time event
+ * @IWM_TE_V2_DEP_TSF: depends on a specific time
+ * @IWM_TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC
+ * @IWM_TE_V2_ABSENCE: are we present or absent during the Time Event.
+ */
+enum {
+ IWM_TE_V2_DEFAULT_POLICY = 0x0,
+
+ /* notifications (event start/stop, fragment start/stop) */
+ IWM_TE_V2_NOTIF_HOST_EVENT_START = (1 << 0),
+ IWM_TE_V2_NOTIF_HOST_EVENT_END = (1 << 1),
+ IWM_TE_V2_NOTIF_INTERNAL_EVENT_START = (1 << 2),
+ IWM_TE_V2_NOTIF_INTERNAL_EVENT_END = (1 << 3),
+
+ IWM_TE_V2_NOTIF_HOST_FRAG_START = (1 << 4),
+ IWM_TE_V2_NOTIF_HOST_FRAG_END = (1 << 5),
+ IWM_TE_V2_NOTIF_INTERNAL_FRAG_START = (1 << 6),
+ IWM_TE_V2_NOTIF_INTERNAL_FRAG_END = (1 << 7),
+
+ IWM_TE_V2_NOTIF_MSK = 0xff,
+
+ /* placement characteristics */
+ IWM_TE_V2_DEP_OTHER = (1 << IWM_TE_V2_PLACEMENT_POS),
+ IWM_TE_V2_DEP_TSF = (1 << (IWM_TE_V2_PLACEMENT_POS + 1)),
+ IWM_TE_V2_EVENT_SOCIOPATHIC = (1 << (IWM_TE_V2_PLACEMENT_POS + 2)),
+
+ /* are we present or absent during the Time Event. */
+ IWM_TE_V2_ABSENCE = (1 << IWM_TE_V2_ABSENCE_POS),
+};
+
+/**
+ * struct iwm_time_event_cmd_api_v2 - configuring Time Events
+ * with struct IWM_MAC_TIME_EVENT_DATA_API_S_VER_2 (see also
+ * with version 1. determined by IWM_UCODE_TLV_FLAGS)
+ * ( IWM_TIME_EVENT_CMD = 0x29 )
+ * @id_and_color: ID and color of the relevant MAC
+ * @action: action to perform, one of IWM_FW_CTXT_ACTION_*
+ * @id: this field has two meanings, depending on the action:
+ * If the action is ADD, then it means the type of event to add.
+ * For all other actions it is the unique event ID assigned when the
+ * event was added by the FW.
+ * @apply_time: When to start the Time Event (in GP2)
+ * @max_delay: maximum delay to event's start (apply time), in TU
+ * @depends_on: the unique ID of the event we depend on (if any)
+ * @interval: interval between repetitions, in TU
+ * @duration: duration of event in TU
+ * @repeat: how many repetitions to do, can be IWM_TE_REPEAT_ENDLESS
+ * @max_frags: maximal number of fragments the Time Event can be divided to
+ * @policy: defines whether uCode shall notify the host or other uCode modules
+ * on event and/or fragment start and/or end
+ * using one of IWM_TE_INDEPENDENT, IWM_TE_DEP_OTHER, IWM_TE_DEP_TSF
+ * IWM_TE_EVENT_SOCIOPATHIC
+ * using IWM_TE_ABSENCE and using IWM_TE_NOTIF_*
+ */
+struct iwm_time_event_cmd_v2 {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ uint32_t id_and_color;
+ uint32_t action;
+ uint32_t id;
+ /* IWM_MAC_TIME_EVENT_DATA_API_S_VER_2 */
+ uint32_t apply_time;
+ uint32_t max_delay;
+ uint32_t depends_on;
+ uint32_t interval;
+ uint32_t duration;
+ uint8_t repeat;
+ uint8_t max_frags;
+ uint16_t policy;
+} __packed; /* IWM_MAC_TIME_EVENT_CMD_API_S_VER_2 */
+
+/**
+ * struct iwm_time_event_resp - response structure to iwm_time_event_cmd
+ * @status: bit 0 indicates success, all others specify errors
+ * @id: the Time Event type
+ * @unique_id: the unique ID assigned (in ADD) or given (others) to the TE
+ * @id_and_color: ID and color of the relevant MAC
+ */
+struct iwm_time_event_resp {
+ uint32_t status;
+ uint32_t id;
+ uint32_t unique_id;
+ uint32_t id_and_color;
+} __packed; /* IWM_MAC_TIME_EVENT_RSP_API_S_VER_1 */
+
+/**
+ * struct iwm_time_event_notif - notifications of time event start/stop
+ * ( IWM_TIME_EVENT_NOTIFICATION = 0x2a )
+ * @timestamp: action timestamp in GP2
+ * @session_id: session's unique id
+ * @unique_id: unique id of the Time Event itself
+ * @id_and_color: ID and color of the relevant MAC
+ * @action: one of IWM_TE_NOTIF_START or IWM_TE_NOTIF_END
+ * @status: true if scheduled, false otherwise (not executed)
+ */
+struct iwm_time_event_notif {
+ uint32_t timestamp;
+ uint32_t session_id;
+ uint32_t unique_id;
+ uint32_t id_and_color;
+ uint32_t action;
+ uint32_t status;
+} __packed; /* IWM_MAC_TIME_EVENT_NTFY_API_S_VER_1 */
+
+
+/* Bindings and Time Quota */
+
+/**
+ * struct iwm_binding_cmd - configuring bindings
+ * ( IWM_BINDING_CONTEXT_CMD = 0x2b )
+ * @id_and_color: ID and color of the relevant Binding
+ * @action: action to perform, one of IWM_FW_CTXT_ACTION_*
+ * @macs: array of MAC id and colors which belong to the binding
+ * @phy: PHY id and color which belongs to the binding
+ */
+struct iwm_binding_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ uint32_t id_and_color;
+ uint32_t action;
+ /* IWM_BINDING_DATA_API_S_VER_1 */
+ uint32_t macs[IWM_MAX_MACS_IN_BINDING];
+ uint32_t phy;
+} __packed; /* IWM_BINDING_CMD_API_S_VER_1 */
+
+/* The maximal number of fragments in the FW's schedule session */
+#define IWM_MVM_MAX_QUOTA 128
+
+/**
+ * struct iwm_time_quota_data - configuration of time quota per binding
+ * @id_and_color: ID and color of the relevant Binding
+ * @quota: absolute time quota in TU. The scheduler will try to divide the
+ * remainig quota (after Time Events) according to this quota.
+ * @max_duration: max uninterrupted context duration in TU
+ */
+struct iwm_time_quota_data {
+ uint32_t id_and_color;
+ uint32_t quota;
+ uint32_t max_duration;
+} __packed; /* IWM_TIME_QUOTA_DATA_API_S_VER_1 */
+
+/**
+ * struct iwm_time_quota_cmd - configuration of time quota between bindings
+ * ( IWM_TIME_QUOTA_CMD = 0x2c )
+ * @quotas: allocations per binding
+ */
+struct iwm_time_quota_cmd {
+ struct iwm_time_quota_data quotas[IWM_MAX_BINDINGS];
+} __packed; /* IWM_TIME_QUOTA_ALLOCATION_CMD_API_S_VER_1 */
+
+
+/* PHY context */
+
+/* Supported bands */
+#define IWM_PHY_BAND_5 (0)
+#define IWM_PHY_BAND_24 (1)
+
+/* Supported channel width, vary if there is VHT support */
+#define IWM_PHY_VHT_CHANNEL_MODE20 (0x0)
+#define IWM_PHY_VHT_CHANNEL_MODE40 (0x1)
+#define IWM_PHY_VHT_CHANNEL_MODE80 (0x2)
+#define IWM_PHY_VHT_CHANNEL_MODE160 (0x3)
+
+/*
+ * Control channel position:
+ * For legacy set bit means upper channel, otherwise lower.
+ * For VHT - bit-2 marks if the control is lower/upper relative to center-freq
+ * bits-1:0 mark the distance from the center freq. for 20Mhz, offset is 0.
+ * center_freq
+ * |
+ * 40Mhz |_______|_______|
+ * 80Mhz |_______|_______|_______|_______|
+ * 160Mhz |_______|_______|_______|_______|_______|_______|_______|_______|
+ * code 011 010 001 000 | 100 101 110 111
+ */
+#define IWM_PHY_VHT_CTRL_POS_1_BELOW (0x0)
+#define IWM_PHY_VHT_CTRL_POS_2_BELOW (0x1)
+#define IWM_PHY_VHT_CTRL_POS_3_BELOW (0x2)
+#define IWM_PHY_VHT_CTRL_POS_4_BELOW (0x3)
+#define IWM_PHY_VHT_CTRL_POS_1_ABOVE (0x4)
+#define IWM_PHY_VHT_CTRL_POS_2_ABOVE (0x5)
+#define IWM_PHY_VHT_CTRL_POS_3_ABOVE (0x6)
+#define IWM_PHY_VHT_CTRL_POS_4_ABOVE (0x7)
+
+/*
+ * @band: IWM_PHY_BAND_*
+ * @channel: channel number
+ * @width: PHY_[VHT|LEGACY]_CHANNEL_*
+ * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_*
+ */
+struct iwm_fw_channel_info {
+ uint8_t band;
+ uint8_t channel;
+ uint8_t width;
+ uint8_t ctrl_pos;
+} __packed;
+
+#define IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS (0)
+#define IWM_PHY_RX_CHAIN_DRIVER_FORCE_MSK \
+ (0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS)
+#define IWM_PHY_RX_CHAIN_VALID_POS (1)
+#define IWM_PHY_RX_CHAIN_VALID_MSK \
+ (0x7 << IWM_PHY_RX_CHAIN_VALID_POS)
+#define IWM_PHY_RX_CHAIN_FORCE_SEL_POS (4)
+#define IWM_PHY_RX_CHAIN_FORCE_SEL_MSK \
+ (0x7 << IWM_PHY_RX_CHAIN_FORCE_SEL_POS)
+#define IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS (7)
+#define IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_MSK \
+ (0x7 << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS)
+#define IWM_PHY_RX_CHAIN_CNT_POS (10)
+#define IWM_PHY_RX_CHAIN_CNT_MSK \
+ (0x3 << IWM_PHY_RX_CHAIN_CNT_POS)
+#define IWM_PHY_RX_CHAIN_MIMO_CNT_POS (12)
+#define IWM_PHY_RX_CHAIN_MIMO_CNT_MSK \
+ (0x3 << IWM_PHY_RX_CHAIN_MIMO_CNT_POS)
+#define IWM_PHY_RX_CHAIN_MIMO_FORCE_POS (14)
+#define IWM_PHY_RX_CHAIN_MIMO_FORCE_MSK \
+ (0x1 << IWM_PHY_RX_CHAIN_MIMO_FORCE_POS)
+
+/* TODO: fix the value, make it depend on firmware at runtime? */
+#define IWM_NUM_PHY_CTX 3
+
+/* TODO: complete missing documentation */
+/**
+ * struct iwm_phy_context_cmd - config of the PHY context
+ * ( IWM_PHY_CONTEXT_CMD = 0x8 )
+ * @id_and_color: ID and color of the relevant Binding
+ * @action: action to perform, one of IWM_FW_CTXT_ACTION_*
+ * @apply_time: 0 means immediate apply and context switch.
+ * other value means apply new params after X usecs
+ * @tx_param_color: ???
+ * @channel_info:
+ * @txchain_info: ???
+ * @rxchain_info: ???
+ * @acquisition_data: ???
+ * @dsp_cfg_flags: set to 0
+ */
+struct iwm_phy_context_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ uint32_t id_and_color;
+ uint32_t action;
+ /* IWM_PHY_CONTEXT_DATA_API_S_VER_1 */
+ uint32_t apply_time;
+ uint32_t tx_param_color;
+ struct iwm_fw_channel_info ci;
+ uint32_t txchain_info;
+ uint32_t rxchain_info;
+ uint32_t acquisition_data;
+ uint32_t dsp_cfg_flags;
+} __packed; /* IWM_PHY_CONTEXT_CMD_API_VER_1 */
+
+#define IWM_RX_INFO_PHY_CNT 8
+#define IWM_RX_INFO_ENERGY_ANT_ABC_IDX 1
+#define IWM_RX_INFO_ENERGY_ANT_A_MSK 0x000000ff
+#define IWM_RX_INFO_ENERGY_ANT_B_MSK 0x0000ff00
+#define IWM_RX_INFO_ENERGY_ANT_C_MSK 0x00ff0000
+#define IWM_RX_INFO_ENERGY_ANT_A_POS 0
+#define IWM_RX_INFO_ENERGY_ANT_B_POS 8
+#define IWM_RX_INFO_ENERGY_ANT_C_POS 16
+
+#define IWM_RX_INFO_AGC_IDX 1
+#define IWM_RX_INFO_RSSI_AB_IDX 2
+#define IWM_OFDM_AGC_A_MSK 0x0000007f
+#define IWM_OFDM_AGC_A_POS 0
+#define IWM_OFDM_AGC_B_MSK 0x00003f80
+#define IWM_OFDM_AGC_B_POS 7
+#define IWM_OFDM_AGC_CODE_MSK 0x3fe00000
+#define IWM_OFDM_AGC_CODE_POS 20
+#define IWM_OFDM_RSSI_INBAND_A_MSK 0x00ff
+#define IWM_OFDM_RSSI_A_POS 0
+#define IWM_OFDM_RSSI_ALLBAND_A_MSK 0xff00
+#define IWM_OFDM_RSSI_ALLBAND_A_POS 8
+#define IWM_OFDM_RSSI_INBAND_B_MSK 0xff0000
+#define IWM_OFDM_RSSI_B_POS 16
+#define IWM_OFDM_RSSI_ALLBAND_B_MSK 0xff000000
+#define IWM_OFDM_RSSI_ALLBAND_B_POS 24
+
+/**
+ * struct iwm_rx_phy_info - phy info
+ * (IWM_REPLY_RX_PHY_CMD = 0xc0)
+ * @non_cfg_phy_cnt: non configurable DSP phy data byte count
+ * @cfg_phy_cnt: configurable DSP phy data byte count
+ * @stat_id: configurable DSP phy data set ID
+ * @reserved1:
+ * @system_timestamp: GP2 at on air rise
+ * @timestamp: TSF at on air rise
+ * @beacon_time_stamp: beacon at on-air rise
+ * @phy_flags: general phy flags: band, modulation, ...
+ * @channel: channel number
+ * @non_cfg_phy_buf: for various implementations of non_cfg_phy
+ * @rate_n_flags: IWM_RATE_MCS_*
+ * @byte_count: frame's byte-count
+ * @frame_time: frame's time on the air, based on byte count and frame rate
+ * calculation
+ * @mac_active_msk: what MACs were active when the frame was received
+ *
+ * Before each Rx, the device sends this data. It contains PHY information
+ * about the reception of the packet.
+ */
+struct iwm_rx_phy_info {
+ uint8_t non_cfg_phy_cnt;
+ uint8_t cfg_phy_cnt;
+ uint8_t stat_id;
+ uint8_t reserved1;
+ uint32_t system_timestamp;
+ uint64_t timestamp;
+ uint32_t beacon_time_stamp;
+ uint16_t phy_flags;
+ uint16_t channel;
+ uint32_t non_cfg_phy[IWM_RX_INFO_PHY_CNT];
+ uint32_t rate_n_flags;
+ uint32_t byte_count;
+ uint16_t mac_active_msk;
+ uint16_t frame_time;
+} __packed;
+
+struct iwm_rx_mpdu_res_start {
+ uint16_t byte_count;
+ uint16_t reserved;
+} __packed;
+
+/**
+ * enum iwm_rx_phy_flags - to parse %iwm_rx_phy_info phy_flags
+ * @IWM_RX_RES_PHY_FLAGS_BAND_24: true if the packet was received on 2.4 band
+ * @IWM_RX_RES_PHY_FLAGS_MOD_CCK:
+ * @IWM_RX_RES_PHY_FLAGS_SHORT_PREAMBLE: true if packet's preamble was short
+ * @IWM_RX_RES_PHY_FLAGS_NARROW_BAND:
+ * @IWM_RX_RES_PHY_FLAGS_ANTENNA: antenna on which the packet was received
+ * @IWM_RX_RES_PHY_FLAGS_AGG: set if the packet was part of an A-MPDU
+ * @IWM_RX_RES_PHY_FLAGS_OFDM_HT: The frame was an HT frame
+ * @IWM_RX_RES_PHY_FLAGS_OFDM_GF: The frame used GF preamble
+ * @IWM_RX_RES_PHY_FLAGS_OFDM_VHT: The frame was a VHT frame
+ */
+enum iwm_rx_phy_flags {
+ IWM_RX_RES_PHY_FLAGS_BAND_24 = (1 << 0),
+ IWM_RX_RES_PHY_FLAGS_MOD_CCK = (1 << 1),
+ IWM_RX_RES_PHY_FLAGS_SHORT_PREAMBLE = (1 << 2),
+ IWM_RX_RES_PHY_FLAGS_NARROW_BAND = (1 << 3),
+ IWM_RX_RES_PHY_FLAGS_ANTENNA = (0x7 << 4),
+ IWM_RX_RES_PHY_FLAGS_ANTENNA_POS = 4,
+ IWM_RX_RES_PHY_FLAGS_AGG = (1 << 7),
+ IWM_RX_RES_PHY_FLAGS_OFDM_HT = (1 << 8),
+ IWM_RX_RES_PHY_FLAGS_OFDM_GF = (1 << 9),
+ IWM_RX_RES_PHY_FLAGS_OFDM_VHT = (1 << 10),
+};
+
+/**
+ * enum iwm_mvm_rx_status - written by fw for each Rx packet
+ * @IWM_RX_MPDU_RES_STATUS_CRC_OK: CRC is fine
+ * @IWM_RX_MPDU_RES_STATUS_OVERRUN_OK: there was no RXE overflow
+ * @IWM_RX_MPDU_RES_STATUS_SRC_STA_FOUND:
+ * @IWM_RX_MPDU_RES_STATUS_KEY_VALID:
+ * @IWM_RX_MPDU_RES_STATUS_KEY_PARAM_OK:
+ * @IWM_RX_MPDU_RES_STATUS_ICV_OK: ICV is fine, if not, the packet is destroyed
+ * @IWM_RX_MPDU_RES_STATUS_MIC_OK: used for CCM alg only. TKIP MIC is checked
+ * in the driver.
+ * @IWM_RX_MPDU_RES_STATUS_TTAK_OK: TTAK is fine
+ * @IWM_RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR: valid for alg = CCM_CMAC or
+ * alg = CCM only. Checks replay attack for 11w frames. Relevant only if
+ * %IWM_RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME is set.
+ * @IWM_RX_MPDU_RES_STATUS_SEC_NO_ENC: this frame is not encrypted
+ * @IWM_RX_MPDU_RES_STATUS_SEC_WEP_ENC: this frame is encrypted using WEP
+ * @IWM_RX_MPDU_RES_STATUS_SEC_CCM_ENC: this frame is encrypted using CCM
+ * @IWM_RX_MPDU_RES_STATUS_SEC_TKIP_ENC: this frame is encrypted using TKIP
+ * @IWM_RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC: this frame is encrypted using CCM_CMAC
+ * @IWM_RX_MPDU_RES_STATUS_SEC_ENC_ERR: this frame couldn't be decrypted
+ * @IWM_RX_MPDU_RES_STATUS_SEC_ENC_MSK: bitmask of the encryption algorithm
+ * @IWM_RX_MPDU_RES_STATUS_DEC_DONE: this frame has been successfully decrypted
+ * @IWM_RX_MPDU_RES_STATUS_PROTECT_FRAME_BIT_CMP:
+ * @IWM_RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP:
+ * @IWM_RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT:
+ * @IWM_RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME: this frame is an 11w management frame
+ * @IWM_RX_MPDU_RES_STATUS_HASH_INDEX_MSK:
+ * @IWM_RX_MPDU_RES_STATUS_STA_ID_MSK:
+ * @IWM_RX_MPDU_RES_STATUS_RRF_KILL:
+ * @IWM_RX_MPDU_RES_STATUS_FILTERING_MSK:
+ * @IWM_RX_MPDU_RES_STATUS2_FILTERING_MSK:
+ */
+enum iwm_mvm_rx_status {
+ IWM_RX_MPDU_RES_STATUS_CRC_OK = (1 << 0),
+ IWM_RX_MPDU_RES_STATUS_OVERRUN_OK = (1 << 1),
+ IWM_RX_MPDU_RES_STATUS_SRC_STA_FOUND = (1 << 2),
+ IWM_RX_MPDU_RES_STATUS_KEY_VALID = (1 << 3),
+ IWM_RX_MPDU_RES_STATUS_KEY_PARAM_OK = (1 << 4),
+ IWM_RX_MPDU_RES_STATUS_ICV_OK = (1 << 5),
+ IWM_RX_MPDU_RES_STATUS_MIC_OK = (1 << 6),
+ IWM_RX_MPDU_RES_STATUS_TTAK_OK = (1 << 7),
+ IWM_RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR = (1 << 7),
+ IWM_RX_MPDU_RES_STATUS_SEC_NO_ENC = (0 << 8),
+ IWM_RX_MPDU_RES_STATUS_SEC_WEP_ENC = (1 << 8),
+ IWM_RX_MPDU_RES_STATUS_SEC_CCM_ENC = (2 << 8),
+ IWM_RX_MPDU_RES_STATUS_SEC_TKIP_ENC = (3 << 8),
+ IWM_RX_MPDU_RES_STATUS_SEC_EXT_ENC = (4 << 8),
+ IWM_RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC = (6 << 8),
+ IWM_RX_MPDU_RES_STATUS_SEC_ENC_ERR = (7 << 8),
+ IWM_RX_MPDU_RES_STATUS_SEC_ENC_MSK = (7 << 8),
+ IWM_RX_MPDU_RES_STATUS_DEC_DONE = (1 << 11),
+ IWM_RX_MPDU_RES_STATUS_PROTECT_FRAME_BIT_CMP = (1 << 12),
+ IWM_RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP = (1 << 13),
+ IWM_RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT = (1 << 14),
+ IWM_RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME = (1 << 15),
+ IWM_RX_MPDU_RES_STATUS_HASH_INDEX_MSK = (0x3F0000),
+ IWM_RX_MPDU_RES_STATUS_STA_ID_MSK = (0x1f000000),
+ IWM_RX_MPDU_RES_STATUS_RRF_KILL = (1 << 29),
+ IWM_RX_MPDU_RES_STATUS_FILTERING_MSK = (0xc00000),
+ IWM_RX_MPDU_RES_STATUS2_FILTERING_MSK = (0xc0000000),
+};
+
+/**
+ * struct iwm_radio_version_notif - information on the radio version
+ * ( IWM_RADIO_VERSION_NOTIFICATION = 0x68 )
+ * @radio_flavor:
+ * @radio_step:
+ * @radio_dash:
+ */
+struct iwm_radio_version_notif {
+ uint32_t radio_flavor;
+ uint32_t radio_step;
+ uint32_t radio_dash;
+} __packed; /* IWM_RADIO_VERSION_NOTOFICATION_S_VER_1 */
+
+enum iwm_card_state_flags {
+ IWM_CARD_ENABLED = 0x00,
+ IWM_HW_CARD_DISABLED = 0x01,
+ IWM_SW_CARD_DISABLED = 0x02,
+ IWM_CT_KILL_CARD_DISABLED = 0x04,
+ IWM_HALT_CARD_DISABLED = 0x08,
+ IWM_CARD_DISABLED_MSK = 0x0f,
+ IWM_CARD_IS_RX_ON = 0x10,
+};
+
+/**
+ * struct iwm_radio_version_notif - information on the radio version
+ * (IWM_CARD_STATE_NOTIFICATION = 0xa1 )
+ * @flags: %iwm_card_state_flags
+ */
+struct iwm_card_state_notif {
+ uint32_t flags;
+} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwm_missed_beacons_notif - information on missed beacons
+ * ( IWM_MISSED_BEACONS_NOTIFICATION = 0xa2 )
+ * @mac_id: interface ID
+ * @consec_missed_beacons_since_last_rx: number of consecutive missed
+ * beacons since last RX.
+ * @consec_missed_beacons: number of consecutive missed beacons
+ * @num_expected_beacons:
+ * @num_recvd_beacons:
+ */
+struct iwm_missed_beacons_notif {
+ uint32_t mac_id;
+ uint32_t consec_missed_beacons_since_last_rx;
+ uint32_t consec_missed_beacons;
+ uint32_t num_expected_beacons;
+ uint32_t num_recvd_beacons;
+} __packed; /* IWM_MISSED_BEACON_NTFY_API_S_VER_3 */
+
+/**
+ * struct iwm_set_calib_default_cmd - set default value for calibration.
+ * ( IWM_SET_CALIB_DEFAULT_CMD = 0x8e )
+ * @calib_index: the calibration to set value for
+ * @length: of data
+ * @data: the value to set for the calibration result
+ */
+struct iwm_set_calib_default_cmd {
+ uint16_t calib_index;
+ uint16_t length;
+ uint8_t data[0];
+} __packed; /* IWM_PHY_CALIB_OVERRIDE_VALUES_S */
+
+#define IWM_MAX_PORT_ID_NUM 2
+#define IWM_MAX_MCAST_FILTERING_ADDRESSES 256
+
+/**
+ * struct iwm_mcast_filter_cmd - configure multicast filter.
+ * @filter_own: Set 1 to filter out multicast packets sent by station itself
+ * @port_id: Multicast MAC addresses array specifier. This is a strange way
+ * to identify network interface adopted in host-device IF.
+ * It is used by FW as index in array of addresses. This array has
+ * IWM_MAX_PORT_ID_NUM members.
+ * @count: Number of MAC addresses in the array
+ * @pass_all: Set 1 to pass all multicast packets.
+ * @bssid: current association BSSID.
+ * @addr_list: Place holder for array of MAC addresses.
+ * IMPORTANT: add padding if necessary to ensure DWORD alignment.
+ */
+struct iwm_mcast_filter_cmd {
+ uint8_t filter_own;
+ uint8_t port_id;
+ uint8_t count;
+ uint8_t pass_all;
+ uint8_t bssid[6];
+ uint8_t reserved[2];
+ uint8_t addr_list[0];
+} __packed; /* IWM_MCAST_FILTERING_CMD_API_S_VER_1 */
+
+struct iwm_mvm_statistics_dbg {
+ uint32_t burst_check;
+ uint32_t burst_count;
+ uint32_t wait_for_silence_timeout_cnt;
+ uint32_t reserved[3];
+} __packed; /* IWM_STATISTICS_DEBUG_API_S_VER_2 */
+
+struct iwm_mvm_statistics_div {
+ uint32_t tx_on_a;
+ uint32_t tx_on_b;
+ uint32_t exec_time;
+ uint32_t probe_time;
+ uint32_t rssi_ant;
+ uint32_t reserved2;
+} __packed; /* IWM_STATISTICS_SLOW_DIV_API_S_VER_2 */
+
+struct iwm_mvm_statistics_general_common {
+ uint32_t temperature; /* radio temperature */
+ uint32_t temperature_m; /* radio voltage */
+ struct iwm_mvm_statistics_dbg dbg;
+ uint32_t sleep_time;
+ uint32_t slots_out;
+ uint32_t slots_idle;
+ uint32_t ttl_timestamp;
+ struct iwm_mvm_statistics_div div;
+ uint32_t rx_enable_counter;
+ /*
+ * num_of_sos_states:
+ * count the number of times we have to re-tune
+ * in order to get out of bad PHY status
+ */
+ uint32_t num_of_sos_states;
+} __packed; /* IWM_STATISTICS_GENERAL_API_S_VER_5 */
+
+struct iwm_mvm_statistics_rx_non_phy {
+ uint32_t bogus_cts; /* CTS received when not expecting CTS */
+ uint32_t bogus_ack; /* ACK received when not expecting ACK */
+ uint32_t non_bssid_frames; /* number of frames with BSSID that
+ * doesn't belong to the STA BSSID */
+ uint32_t filtered_frames; /* count frames that were dumped in the
+ * filtering process */
+ uint32_t non_channel_beacons; /* beacons with our bss id but not on
+ * our serving channel */
+ uint32_t channel_beacons; /* beacons with our bss id and in our
+ * serving channel */
+ uint32_t num_missed_bcon; /* number of missed beacons */
+ uint32_t adc_rx_saturation_time; /* count in 0.8us units the time the
+ * ADC was in saturation */
+ uint32_t ina_detection_search_time;/* total time (in 0.8us) searched
+ * for INA */
+ uint32_t beacon_silence_rssi_a; /* RSSI silence after beacon frame */
+ uint32_t beacon_silence_rssi_b; /* RSSI silence after beacon frame */
+ uint32_t beacon_silence_rssi_c; /* RSSI silence after beacon frame */
+ uint32_t interference_data_flag; /* flag for interference data
+ * availability. 1 when data is
+ * available. */
+ uint32_t channel_load; /* counts RX Enable time in uSec */
+ uint32_t dsp_false_alarms; /* DSP false alarm (both OFDM
+ * and CCK) counter */
+ uint32_t beacon_rssi_a;
+ uint32_t beacon_rssi_b;
+ uint32_t beacon_rssi_c;
+ uint32_t beacon_energy_a;
+ uint32_t beacon_energy_b;
+ uint32_t beacon_energy_c;
+ uint32_t num_bt_kills;
+ uint32_t mac_id;
+ uint32_t directed_data_mpdu;
+} __packed; /* IWM_STATISTICS_RX_NON_PHY_API_S_VER_3 */
+
+struct iwm_mvm_statistics_rx_phy {
+ uint32_t ina_cnt;
+ uint32_t fina_cnt;
+ uint32_t plcp_err;
+ uint32_t crc32_err;
+ uint32_t overrun_err;
+ uint32_t early_overrun_err;
+ uint32_t crc32_good;
+ uint32_t false_alarm_cnt;
+ uint32_t fina_sync_err_cnt;
+ uint32_t sfd_timeout;
+ uint32_t fina_timeout;
+ uint32_t unresponded_rts;
+ uint32_t rxe_frame_limit_overrun;
+ uint32_t sent_ack_cnt;
+ uint32_t sent_cts_cnt;
+ uint32_t sent_ba_rsp_cnt;
+ uint32_t dsp_self_kill;
+ uint32_t mh_format_err;
+ uint32_t re_acq_main_rssi_sum;
+ uint32_t reserved;
+} __packed; /* IWM_STATISTICS_RX_PHY_API_S_VER_2 */
+
+struct iwm_mvm_statistics_rx_ht_phy {
+ uint32_t plcp_err;
+ uint32_t overrun_err;
+ uint32_t early_overrun_err;
+ uint32_t crc32_good;
+ uint32_t crc32_err;
+ uint32_t mh_format_err;
+ uint32_t agg_crc32_good;
+ uint32_t agg_mpdu_cnt;
+ uint32_t agg_cnt;
+ uint32_t unsupport_mcs;
+} __packed; /* IWM_STATISTICS_HT_RX_PHY_API_S_VER_1 */
+
+#define IWM_MAX_CHAINS 3
+
+struct iwm_mvm_statistics_tx_non_phy_agg {
+ uint32_t ba_timeout;
+ uint32_t ba_reschedule_frames;
+ uint32_t scd_query_agg_frame_cnt;
+ uint32_t scd_query_no_agg;
+ uint32_t scd_query_agg;
+ uint32_t scd_query_mismatch;
+ uint32_t frame_not_ready;
+ uint32_t underrun;
+ uint32_t bt_prio_kill;
+ uint32_t rx_ba_rsp_cnt;
+ int8_t txpower[IWM_MAX_CHAINS];
+ int8_t reserved;
+ uint32_t reserved2;
+} __packed; /* IWM_STATISTICS_TX_NON_PHY_AGG_API_S_VER_1 */
+
+struct iwm_mvm_statistics_tx_channel_width {
+ uint32_t ext_cca_narrow_ch20[1];
+ uint32_t ext_cca_narrow_ch40[2];
+ uint32_t ext_cca_narrow_ch80[3];
+ uint32_t ext_cca_narrow_ch160[4];
+ uint32_t last_tx_ch_width_indx;
+ uint32_t rx_detected_per_ch_width[4];
+ uint32_t success_per_ch_width[4];
+ uint32_t fail_per_ch_width[4];
+}; /* IWM_STATISTICS_TX_CHANNEL_WIDTH_API_S_VER_1 */
+
+struct iwm_mvm_statistics_tx {
+ uint32_t preamble_cnt;
+ uint32_t rx_detected_cnt;
+ uint32_t bt_prio_defer_cnt;
+ uint32_t bt_prio_kill_cnt;
+ uint32_t few_bytes_cnt;
+ uint32_t cts_timeout;
+ uint32_t ack_timeout;
+ uint32_t expected_ack_cnt;
+ uint32_t actual_ack_cnt;
+ uint32_t dump_msdu_cnt;
+ uint32_t burst_abort_next_frame_mismatch_cnt;
+ uint32_t burst_abort_missing_next_frame_cnt;
+ uint32_t cts_timeout_collision;
+ uint32_t ack_or_ba_timeout_collision;
+ struct iwm_mvm_statistics_tx_non_phy_agg agg;
+ struct iwm_mvm_statistics_tx_channel_width channel_width;
+} __packed; /* IWM_STATISTICS_TX_API_S_VER_4 */
+
+
+struct iwm_mvm_statistics_bt_activity {
+ uint32_t hi_priority_tx_req_cnt;
+ uint32_t hi_priority_tx_denied_cnt;
+ uint32_t lo_priority_tx_req_cnt;
+ uint32_t lo_priority_tx_denied_cnt;
+ uint32_t hi_priority_rx_req_cnt;
+ uint32_t hi_priority_rx_denied_cnt;
+ uint32_t lo_priority_rx_req_cnt;
+ uint32_t lo_priority_rx_denied_cnt;
+} __packed; /* IWM_STATISTICS_BT_ACTIVITY_API_S_VER_1 */
+
+struct iwm_mvm_statistics_general {
+ struct iwm_mvm_statistics_general_common common;
+ uint32_t beacon_filtered;
+ uint32_t missed_beacons;
+ int8_t beacon_filter_average_energy;
+ int8_t beacon_filter_reason;
+ int8_t beacon_filter_current_energy;
+ int8_t beacon_filter_reserved;
+ uint32_t beacon_filter_delta_time;
+ struct iwm_mvm_statistics_bt_activity bt_activity;
+} __packed; /* IWM_STATISTICS_GENERAL_API_S_VER_5 */
+
+struct iwm_mvm_statistics_rx {
+ struct iwm_mvm_statistics_rx_phy ofdm;
+ struct iwm_mvm_statistics_rx_phy cck;
+ struct iwm_mvm_statistics_rx_non_phy general;
+ struct iwm_mvm_statistics_rx_ht_phy ofdm_ht;
+} __packed; /* IWM_STATISTICS_RX_API_S_VER_3 */
+
+/*
+ * IWM_STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
+ *
+ * By default, uCode issues this notification after receiving a beacon
+ * while associated. To disable this behavior, set DISABLE_NOTIF flag in the
+ * IWM_REPLY_STATISTICS_CMD 0x9c, above.
+ *
+ * Statistics counters continue to increment beacon after beacon, but are
+ * cleared when changing channels or when driver issues IWM_REPLY_STATISTICS_CMD
+ * 0x9c with CLEAR_STATS bit set (see above).
+ *
+ * uCode also issues this notification during scans. uCode clears statistics
+ * appropriately so that each notification contains statistics for only the
+ * one channel that has just been scanned.
+ */
+
+struct iwm_notif_statistics { /* IWM_STATISTICS_NTFY_API_S_VER_8 */
+ uint32_t flag;
+ struct iwm_mvm_statistics_rx rx;
+ struct iwm_mvm_statistics_tx tx;
+ struct iwm_mvm_statistics_general general;
+} __packed;
+
+/***********************************
+ * Smart Fifo API
+ ***********************************/
+/* Smart Fifo state */
+enum iwm_sf_state {
+ IWM_SF_LONG_DELAY_ON = 0, /* should never be called by driver */
+ IWM_SF_FULL_ON,
+ IWM_SF_UNINIT,
+ IWM_SF_INIT_OFF,
+ IWM_SF_HW_NUM_STATES
+};
+
+/* Smart Fifo possible scenario */
+enum iwm_sf_scenario {
+ IWM_SF_SCENARIO_SINGLE_UNICAST,
+ IWM_SF_SCENARIO_AGG_UNICAST,
+ IWM_SF_SCENARIO_MULTICAST,
+ IWM_SF_SCENARIO_BA_RESP,
+ IWM_SF_SCENARIO_TX_RESP,
+ IWM_SF_NUM_SCENARIO
+};
+
+#define IWM_SF_TRANSIENT_STATES_NUMBER 2 /* IWM_SF_LONG_DELAY_ON and IWM_SF_FULL_ON */
+#define IWM_SF_NUM_TIMEOUT_TYPES 2 /* Aging timer and Idle timer */
+
+/* smart FIFO default values */
+#define IWM_SF_W_MARK_SISO 4096
+#define IWM_SF_W_MARK_MIMO2 8192
+#define IWM_SF_W_MARK_MIMO3 6144
+#define IWM_SF_W_MARK_LEGACY 4096
+#define IWM_SF_W_MARK_SCAN 4096
+
+/* SF Scenarios timers for FULL_ON state (aligned to 32 uSec) */
+#define IWM_SF_SINGLE_UNICAST_IDLE_TIMER 320 /* 300 uSec */
+#define IWM_SF_SINGLE_UNICAST_AGING_TIMER 2016 /* 2 mSec */
+#define IWM_SF_AGG_UNICAST_IDLE_TIMER 320 /* 300 uSec */
+#define IWM_SF_AGG_UNICAST_AGING_TIMER 2016 /* 2 mSec */
+#define IWM_SF_MCAST_IDLE_TIMER 2016 /* 2 mSec */
+#define IWM_SF_MCAST_AGING_TIMER 10016 /* 10 mSec */
+#define IWM_SF_BA_IDLE_TIMER 320 /* 300 uSec */
+#define IWM_SF_BA_AGING_TIMER 2016 /* 2 mSec */
+#define IWM_SF_TX_RE_IDLE_TIMER 320 /* 300 uSec */
+#define IWM_SF_TX_RE_AGING_TIMER 2016 /* 2 mSec */
+
+#define IWM_SF_LONG_DELAY_AGING_TIMER 1000000 /* 1 Sec */
+
+/**
+ * Smart Fifo configuration command.
+ * @state: smart fifo state, types listed in iwm_sf_sate.
+ * @watermark: Minimum allowed availabe free space in RXF for transient state.
+ * @long_delay_timeouts: aging and idle timer values for each scenario
+ * in long delay state.
+ * @full_on_timeouts: timer values for each scenario in full on state.
+ */
+struct iwm_sf_cfg_cmd {
+ enum iwm_sf_state state;
+ uint32_t watermark[IWM_SF_TRANSIENT_STATES_NUMBER];
+ uint32_t long_delay_timeouts[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES];
+ uint32_t full_on_timeouts[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES];
+} __packed; /* IWM_SF_CFG_API_S_VER_2 */
+
+/*
+ * END mvm/fw-api.h
+ */
+
+/*
+ * BEGIN mvm/fw-api-mac.h
+ */
+
+/*
+ * The first MAC indices (starting from 0)
+ * are available to the driver, AUX follows
+ */
+#define IWM_MAC_INDEX_AUX 4
+#define IWM_MAC_INDEX_MIN_DRIVER 0
+#define IWM_NUM_MAC_INDEX_DRIVER IWM_MAC_INDEX_AUX
+
+enum iwm_ac {
+ IWM_AC_BK,
+ IWM_AC_BE,
+ IWM_AC_VI,
+ IWM_AC_VO,
+ IWM_AC_NUM,
+};
+
+/**
+ * enum iwm_mac_protection_flags - MAC context flags
+ * @IWM_MAC_PROT_FLG_TGG_PROTECT: 11g protection when transmitting OFDM frames,
+ * this will require CCK RTS/CTS2self.
+ * RTS/CTS will protect full burst time.
+ * @IWM_MAC_PROT_FLG_HT_PROT: enable HT protection
+ * @IWM_MAC_PROT_FLG_FAT_PROT: protect 40 MHz transmissions
+ * @IWM_MAC_PROT_FLG_SELF_CTS_EN: allow CTS2self
+ */
+enum iwm_mac_protection_flags {
+ IWM_MAC_PROT_FLG_TGG_PROTECT = (1 << 3),
+ IWM_MAC_PROT_FLG_HT_PROT = (1 << 23),
+ IWM_MAC_PROT_FLG_FAT_PROT = (1 << 24),
+ IWM_MAC_PROT_FLG_SELF_CTS_EN = (1 << 30),
+};
+
+#define IWM_MAC_FLG_SHORT_SLOT (1 << 4)
+#define IWM_MAC_FLG_SHORT_PREAMBLE (1 << 5)
+
+/**
+ * enum iwm_mac_types - Supported MAC types
+ * @IWM_FW_MAC_TYPE_FIRST: lowest supported MAC type
+ * @IWM_FW_MAC_TYPE_AUX: Auxiliary MAC (internal)
+ * @IWM_FW_MAC_TYPE_LISTENER: monitor MAC type (?)
+ * @IWM_FW_MAC_TYPE_PIBSS: Pseudo-IBSS
+ * @IWM_FW_MAC_TYPE_IBSS: IBSS
+ * @IWM_FW_MAC_TYPE_BSS_STA: BSS (managed) station
+ * @IWM_FW_MAC_TYPE_P2P_DEVICE: P2P Device
+ * @IWM_FW_MAC_TYPE_P2P_STA: P2P client
+ * @IWM_FW_MAC_TYPE_GO: P2P GO
+ * @IWM_FW_MAC_TYPE_TEST: ?
+ * @IWM_FW_MAC_TYPE_MAX: highest support MAC type
+ */
+enum iwm_mac_types {
+ IWM_FW_MAC_TYPE_FIRST = 1,
+ IWM_FW_MAC_TYPE_AUX = IWM_FW_MAC_TYPE_FIRST,
+ IWM_FW_MAC_TYPE_LISTENER,
+ IWM_FW_MAC_TYPE_PIBSS,
+ IWM_FW_MAC_TYPE_IBSS,
+ IWM_FW_MAC_TYPE_BSS_STA,
+ IWM_FW_MAC_TYPE_P2P_DEVICE,
+ IWM_FW_MAC_TYPE_P2P_STA,
+ IWM_FW_MAC_TYPE_GO,
+ IWM_FW_MAC_TYPE_TEST,
+ IWM_FW_MAC_TYPE_MAX = IWM_FW_MAC_TYPE_TEST
+}; /* IWM_MAC_CONTEXT_TYPE_API_E_VER_1 */
+
+/**
+ * enum iwm_tsf_id - TSF hw timer ID
+ * @IWM_TSF_ID_A: use TSF A
+ * @IWM_TSF_ID_B: use TSF B
+ * @IWM_TSF_ID_C: use TSF C
+ * @IWM_TSF_ID_D: use TSF D
+ * @IWM_NUM_TSF_IDS: number of TSF timers available
+ */
+enum iwm_tsf_id {
+ IWM_TSF_ID_A = 0,
+ IWM_TSF_ID_B = 1,
+ IWM_TSF_ID_C = 2,
+ IWM_TSF_ID_D = 3,
+ IWM_NUM_TSF_IDS = 4,
+}; /* IWM_TSF_ID_API_E_VER_1 */
+
+/**
+ * struct iwm_mac_data_ap - configuration data for AP MAC context
+ * @beacon_time: beacon transmit time in system time
+ * @beacon_tsf: beacon transmit time in TSF
+ * @bi: beacon interval in TU
+ * @bi_reciprocal: 2^32 / bi
+ * @dtim_interval: dtim transmit time in TU
+ * @dtim_reciprocal: 2^32 / dtim_interval
+ * @mcast_qid: queue ID for multicast traffic
+ * @beacon_template: beacon template ID
+ */
+struct iwm_mac_data_ap {
+ uint32_t beacon_time;
+ uint64_t beacon_tsf;
+ uint32_t bi;
+ uint32_t bi_reciprocal;
+ uint32_t dtim_interval;
+ uint32_t dtim_reciprocal;
+ uint32_t mcast_qid;
+ uint32_t beacon_template;
+} __packed; /* AP_MAC_DATA_API_S_VER_1 */
+
+/**
+ * struct iwm_mac_data_ibss - configuration data for IBSS MAC context
+ * @beacon_time: beacon transmit time in system time
+ * @beacon_tsf: beacon transmit time in TSF
+ * @bi: beacon interval in TU
+ * @bi_reciprocal: 2^32 / bi
+ * @beacon_template: beacon template ID
+ */
+struct iwm_mac_data_ibss {
+ uint32_t beacon_time;
+ uint64_t beacon_tsf;
+ uint32_t bi;
+ uint32_t bi_reciprocal;
+ uint32_t beacon_template;
+} __packed; /* IBSS_MAC_DATA_API_S_VER_1 */
+
+/**
+ * struct iwm_mac_data_sta - configuration data for station MAC context
+ * @is_assoc: 1 for associated state, 0 otherwise
+ * @dtim_time: DTIM arrival time in system time
+ * @dtim_tsf: DTIM arrival time in TSF
+ * @bi: beacon interval in TU, applicable only when associated
+ * @bi_reciprocal: 2^32 / bi , applicable only when associated
+ * @dtim_interval: DTIM interval in TU, applicable only when associated
+ * @dtim_reciprocal: 2^32 / dtim_interval , applicable only when associated
+ * @listen_interval: in beacon intervals, applicable only when associated
+ * @assoc_id: unique ID assigned by the AP during association
+ */
+struct iwm_mac_data_sta {
+ uint32_t is_assoc;
+ uint32_t dtim_time;
+ uint64_t dtim_tsf;
+ uint32_t bi;
+ uint32_t bi_reciprocal;
+ uint32_t dtim_interval;
+ uint32_t dtim_reciprocal;
+ uint32_t listen_interval;
+ uint32_t assoc_id;
+ uint32_t assoc_beacon_arrive_time;
+} __packed; /* IWM_STA_MAC_DATA_API_S_VER_1 */
+
+/**
+ * struct iwm_mac_data_go - configuration data for P2P GO MAC context
+ * @ap: iwm_mac_data_ap struct with most config data
+ * @ctwin: client traffic window in TU (period after TBTT when GO is present).
+ * 0 indicates that there is no CT window.
+ * @opp_ps_enabled: indicate that opportunistic PS allowed
+ */
+struct iwm_mac_data_go {
+ struct iwm_mac_data_ap ap;
+ uint32_t ctwin;
+ uint32_t opp_ps_enabled;
+} __packed; /* GO_MAC_DATA_API_S_VER_1 */
+
+/**
+ * struct iwm_mac_data_p2p_sta - configuration data for P2P client MAC context
+ * @sta: iwm_mac_data_sta struct with most config data
+ * @ctwin: client traffic window in TU (period after TBTT when GO is present).
+ * 0 indicates that there is no CT window.
+ */
+struct iwm_mac_data_p2p_sta {
+ struct iwm_mac_data_sta sta;
+ uint32_t ctwin;
+} __packed; /* P2P_STA_MAC_DATA_API_S_VER_1 */
+
+/**
+ * struct iwm_mac_data_pibss - Pseudo IBSS config data
+ * @stats_interval: interval in TU between statistics notifications to host.
+ */
+struct iwm_mac_data_pibss {
+ uint32_t stats_interval;
+} __packed; /* PIBSS_MAC_DATA_API_S_VER_1 */
+
+/*
+ * struct iwm_mac_data_p2p_dev - configuration data for the P2P Device MAC
+ * context.
+ * @is_disc_extended: if set to true, P2P Device discoverability is enabled on
+ * other channels as well. This should be to true only in case that the
+ * device is discoverable and there is an active GO. Note that setting this
+ * field when not needed, will increase the number of interrupts and have
+ * effect on the platform power, as this setting opens the Rx filters on
+ * all macs.
+ */
+struct iwm_mac_data_p2p_dev {
+ uint32_t is_disc_extended;
+} __packed; /* _P2P_DEV_MAC_DATA_API_S_VER_1 */
+
+/**
+ * enum iwm_mac_filter_flags - MAC context filter flags
+ * @IWM_MAC_FILTER_IN_PROMISC: accept all data frames
+ * @IWM_MAC_FILTER_IN_CONTROL_AND_MGMT: pass all mangement and
+ * control frames to the host
+ * @IWM_MAC_FILTER_ACCEPT_GRP: accept multicast frames
+ * @IWM_MAC_FILTER_DIS_DECRYPT: don't decrypt unicast frames
+ * @IWM_MAC_FILTER_DIS_GRP_DECRYPT: don't decrypt multicast frames
+ * @IWM_MAC_FILTER_IN_BEACON: transfer foreign BSS's beacons to host
+ * (in station mode when associated)
+ * @IWM_MAC_FILTER_OUT_BCAST: filter out all broadcast frames
+ * @IWM_MAC_FILTER_IN_CRC32: extract FCS and append it to frames
+ * @IWM_MAC_FILTER_IN_PROBE_REQUEST: pass probe requests to host
+ */
+enum iwm_mac_filter_flags {
+ IWM_MAC_FILTER_IN_PROMISC = (1 << 0),
+ IWM_MAC_FILTER_IN_CONTROL_AND_MGMT = (1 << 1),
+ IWM_MAC_FILTER_ACCEPT_GRP = (1 << 2),
+ IWM_MAC_FILTER_DIS_DECRYPT = (1 << 3),
+ IWM_MAC_FILTER_DIS_GRP_DECRYPT = (1 << 4),
+ IWM_MAC_FILTER_IN_BEACON = (1 << 6),
+ IWM_MAC_FILTER_OUT_BCAST = (1 << 8),
+ IWM_MAC_FILTER_IN_CRC32 = (1 << 11),
+ IWM_MAC_FILTER_IN_PROBE_REQUEST = (1 << 12),
+};
+
+/**
+ * enum iwm_mac_qos_flags - QoS flags
+ * @IWM_MAC_QOS_FLG_UPDATE_EDCA: ?
+ * @IWM_MAC_QOS_FLG_TGN: HT is enabled
+ * @IWM_MAC_QOS_FLG_TXOP_TYPE: ?
+ *
+ */
+enum iwm_mac_qos_flags {
+ IWM_MAC_QOS_FLG_UPDATE_EDCA = (1 << 0),
+ IWM_MAC_QOS_FLG_TGN = (1 << 1),
+ IWM_MAC_QOS_FLG_TXOP_TYPE = (1 << 4),
+};
+
+/**
+ * struct iwm_ac_qos - QOS timing params for IWM_MAC_CONTEXT_CMD
+ * @cw_min: Contention window, start value in numbers of slots.
+ * Should be a power-of-2, minus 1. Device's default is 0x0f.
+ * @cw_max: Contention window, max value in numbers of slots.
+ * Should be a power-of-2, minus 1. Device's default is 0x3f.
+ * @aifsn: Number of slots in Arbitration Interframe Space (before
+ * performing random backoff timing prior to Tx). Device default 1.
+ * @fifos_mask: FIFOs used by this MAC for this AC
+ * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0.
+ *
+ * One instance of this config struct for each of 4 EDCA access categories
+ * in struct iwm_qosparam_cmd.
+ *
+ * Device will automatically increase contention window by (2*CW) + 1 for each
+ * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW
+ * value, to cap the CW value.
+ */
+struct iwm_ac_qos {
+ uint16_t cw_min;
+ uint16_t cw_max;
+ uint8_t aifsn;
+ uint8_t fifos_mask;
+ uint16_t edca_txop;
+} __packed; /* IWM_AC_QOS_API_S_VER_2 */
+
+/**
+ * struct iwm_mac_ctx_cmd - command structure to configure MAC contexts
+ * ( IWM_MAC_CONTEXT_CMD = 0x28 )
+ * @id_and_color: ID and color of the MAC
+ * @action: action to perform, one of IWM_FW_CTXT_ACTION_*
+ * @mac_type: one of IWM_FW_MAC_TYPE_*
+ * @tsd_id: TSF HW timer, one of IWM_TSF_ID_*
+ * @node_addr: MAC address
+ * @bssid_addr: BSSID
+ * @cck_rates: basic rates available for CCK
+ * @ofdm_rates: basic rates available for OFDM
+ * @protection_flags: combination of IWM_MAC_PROT_FLG_FLAG_*
+ * @cck_short_preamble: 0x20 for enabling short preamble, 0 otherwise
+ * @short_slot: 0x10 for enabling short slots, 0 otherwise
+ * @filter_flags: combination of IWM_MAC_FILTER_*
+ * @qos_flags: from IWM_MAC_QOS_FLG_*
+ * @ac: one iwm_mac_qos configuration for each AC
+ * @mac_specific: one of struct iwm_mac_data_*, according to mac_type
+ */
+struct iwm_mac_ctx_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ uint32_t id_and_color;
+ uint32_t action;
+ /* IWM_MAC_CONTEXT_COMMON_DATA_API_S_VER_1 */
+ uint32_t mac_type;
+ uint32_t tsf_id;
+ uint8_t node_addr[6];
+ uint16_t reserved_for_node_addr;
+ uint8_t bssid_addr[6];
+ uint16_t reserved_for_bssid_addr;
+ uint32_t cck_rates;
+ uint32_t ofdm_rates;
+ uint32_t protection_flags;
+ uint32_t cck_short_preamble;
+ uint32_t short_slot;
+ uint32_t filter_flags;
+ /* IWM_MAC_QOS_PARAM_API_S_VER_1 */
+ uint32_t qos_flags;
+ struct iwm_ac_qos ac[IWM_AC_NUM+1];
+ /* IWM_MAC_CONTEXT_COMMON_DATA_API_S */
+ union {
+ struct iwm_mac_data_ap ap;
+ struct iwm_mac_data_go go;
+ struct iwm_mac_data_sta sta;
+ struct iwm_mac_data_p2p_sta p2p_sta;
+ struct iwm_mac_data_p2p_dev p2p_dev;
+ struct iwm_mac_data_pibss pibss;
+ struct iwm_mac_data_ibss ibss;
+ };
+} __packed; /* IWM_MAC_CONTEXT_CMD_API_S_VER_1 */
+
+static inline uint32_t iwm_mvm_reciprocal(uint32_t v)
+{
+ if (!v)
+ return 0;
+ return 0xFFFFFFFF / v;
+}
+
+#define IWM_NONQOS_SEQ_GET 0x1
+#define IWM_NONQOS_SEQ_SET 0x2
+struct iwm_nonqos_seq_query_cmd {
+ uint32_t get_set_flag;
+ uint32_t mac_id_n_color;
+ uint16_t value;
+ uint16_t reserved;
+} __packed; /* IWM_NON_QOS_TX_COUNTER_GET_SET_API_S_VER_1 */
+
+/*
+ * END mvm/fw-api-mac.h
+ */
+
+/*
+ * BEGIN mvm/fw-api-power.h
+ */
+
+/* Power Management Commands, Responses, Notifications */
+
+/* Radio LP RX Energy Threshold measured in dBm */
+#define IWM_POWER_LPRX_RSSI_THRESHOLD 75
+#define IWM_POWER_LPRX_RSSI_THRESHOLD_MAX 94
+#define IWM_POWER_LPRX_RSSI_THRESHOLD_MIN 30
+
+/**
+ * enum iwm_scan_flags - masks for power table command flags
+ * @IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
+ * receiver and transmitter. '0' - does not allow.
+ * @IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management,
+ * '1' Driver enables PM (use rest of parameters)
+ * @IWM_POWER_FLAGS_SKIP_OVER_DTIM_MSK: '0' PM have to walk up every DTIM,
+ * '1' PM could sleep over DTIM till listen Interval.
+ * @IWM_POWER_FLAGS_SNOOZE_ENA_MSK: Enable snoozing only if uAPSD is enabled and all
+ * access categories are both delivery and trigger enabled.
+ * @IWM_POWER_FLAGS_BT_SCO_ENA: Enable BT SCO coex only if uAPSD and
+ * PBW Snoozing enabled
+ * @IWM_POWER_FLAGS_ADVANCE_PM_ENA_MSK: Advanced PM (uAPSD) enable mask
+ * @IWM_POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable.
+ * @IWM_POWER_FLAGS_AP_UAPSD_MISBEHAVING_ENA_MSK: AP/GO's uAPSD misbehaving
+ * detection enablement
+*/
+enum iwm_power_flags {
+ IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK = (1 << 0),
+ IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK = (1 << 1),
+ IWM_POWER_FLAGS_SKIP_OVER_DTIM_MSK = (1 << 2),
+ IWM_POWER_FLAGS_SNOOZE_ENA_MSK = (1 << 5),
+ IWM_POWER_FLAGS_BT_SCO_ENA = (1 << 8),
+ IWM_POWER_FLAGS_ADVANCE_PM_ENA_MSK = (1 << 9),
+ IWM_POWER_FLAGS_LPRX_ENA_MSK = (1 << 11),
+ IWM_POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK = (1 << 12),
+};
+
+#define IWM_POWER_VEC_SIZE 5
+
+/**
+ * struct iwm_powertable_cmd - legacy power command. Beside old API support this
+ * is used also with a new power API for device wide power settings.
+ * IWM_POWER_TABLE_CMD = 0x77 (command, has simple generic response)
+ *
+ * @flags: Power table command flags from IWM_POWER_FLAGS_*
+ * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec.
+ * Minimum allowed:- 3 * DTIM. Keep alive period must be
+ * set regardless of power scheme or current power state.
+ * FW use this value also when PM is disabled.
+ * @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to
+ * PSM transition - legacy PM
+ * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to
+ * PSM transition - legacy PM
+ * @sleep_interval: not in use
+ * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag
+ * is set. For example, if it is required to skip over
+ * one DTIM, this value need to be set to 2 (DTIM periods).
+ * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled.
+ * Default: 80dbm
+ */
+struct iwm_powertable_cmd {
+ /* PM_POWER_TABLE_CMD_API_S_VER_6 */
+ uint16_t flags;
+ uint8_t keep_alive_seconds;
+ uint8_t debug_flags;
+ uint32_t rx_data_timeout;
+ uint32_t tx_data_timeout;
+ uint32_t sleep_interval[IWM_POWER_VEC_SIZE];
+ uint32_t skip_dtim_periods;
+ uint32_t lprx_rssi_threshold;
+} __packed;
+
+/**
+ * enum iwm_device_power_flags - masks for device power command flags
+ * @DEVIC_POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
+ * receiver and transmitter. '0' - does not allow. This flag should be
+ * always set to '1' unless one need to disable actual power down for debug
+ * purposes.
+ * @IWM_DEVICE_POWER_FLAGS_CAM_MSK: '1' CAM (Continuous Active Mode) is set, meaning
+ * that power management is disabled. '0' Power management is enabled, one
+ * of power schemes is applied.
+*/
+enum iwm_device_power_flags {
+ IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK = (1 << 0),
+ IWM_DEVICE_POWER_FLAGS_CAM_MSK = (1 << 13),
+};
+
+/**
+ * struct iwm_device_power_cmd - device wide power command.
+ * IWM_DEVICE_POWER_CMD = 0x77 (command, has simple generic response)
+ *
+ * @flags: Power table command flags from IWM_DEVICE_POWER_FLAGS_*
+ */
+struct iwm_device_power_cmd {
+ /* PM_POWER_TABLE_CMD_API_S_VER_6 */
+ uint16_t flags;
+ uint16_t reserved;
+} __packed;
+
+/**
+ * struct iwm_mac_power_cmd - New power command containing uAPSD support
+ * IWM_MAC_PM_POWER_TABLE = 0xA9 (command, has simple generic response)
+ * @id_and_color: MAC contex identifier
+ * @flags: Power table command flags from POWER_FLAGS_*
+ * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec.
+ * Minimum allowed:- 3 * DTIM. Keep alive period must be
+ * set regardless of power scheme or current power state.
+ * FW use this value also when PM is disabled.
+ * @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to
+ * PSM transition - legacy PM
+ * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to
+ * PSM transition - legacy PM
+ * @sleep_interval: not in use
+ * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag
+ * is set. For example, if it is required to skip over
+ * one DTIM, this value need to be set to 2 (DTIM periods).
+ * @rx_data_timeout_uapsd: Minimum time (usec) from last Rx packet for AM to
+ * PSM transition - uAPSD
+ * @tx_data_timeout_uapsd: Minimum time (usec) from last Tx packet for AM to
+ * PSM transition - uAPSD
+ * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled.
+ * Default: 80dbm
+ * @num_skip_dtim: Number of DTIMs to skip if Skip over DTIM flag is set
+ * @snooze_interval: Maximum time between attempts to retrieve buffered data
+ * from the AP [msec]
+ * @snooze_window: A window of time in which PBW snoozing insures that all
+ * packets received. It is also the minimum time from last
+ * received unicast RX packet, before client stops snoozing
+ * for data. [msec]
+ * @snooze_step: TBD
+ * @qndp_tid: TID client shall use for uAPSD QNDP triggers
+ * @uapsd_ac_flags: Set trigger-enabled and delivery-enabled indication for
+ * each corresponding AC.
+ * Use IEEE80211_WMM_IE_STA_QOSINFO_AC* for correct values.
+ * @uapsd_max_sp: Use IEEE80211_WMM_IE_STA_QOSINFO_SP_* for correct
+ * values.
+ * @heavy_tx_thld_packets: TX threshold measured in number of packets
+ * @heavy_rx_thld_packets: RX threshold measured in number of packets
+ * @heavy_tx_thld_percentage: TX threshold measured in load's percentage
+ * @heavy_rx_thld_percentage: RX threshold measured in load's percentage
+ * @limited_ps_threshold:
+*/
+struct iwm_mac_power_cmd {
+ /* CONTEXT_DESC_API_T_VER_1 */
+ uint32_t id_and_color;
+
+ /* CLIENT_PM_POWER_TABLE_S_VER_1 */
+ uint16_t flags;
+ uint16_t keep_alive_seconds;
+ uint32_t rx_data_timeout;
+ uint32_t tx_data_timeout;
+ uint32_t rx_data_timeout_uapsd;
+ uint32_t tx_data_timeout_uapsd;
+ uint8_t lprx_rssi_threshold;
+ uint8_t skip_dtim_periods;
+ uint16_t snooze_interval;
+ uint16_t snooze_window;
+ uint8_t snooze_step;
+ uint8_t qndp_tid;
+ uint8_t uapsd_ac_flags;
+ uint8_t uapsd_max_sp;
+ uint8_t heavy_tx_thld_packets;
+ uint8_t heavy_rx_thld_packets;
+ uint8_t heavy_tx_thld_percentage;
+ uint8_t heavy_rx_thld_percentage;
+ uint8_t limited_ps_threshold;
+ uint8_t reserved;
+} __packed;
+
+/*
+ * struct iwm_uapsd_misbehaving_ap_notif - FW sends this notification when
+ * associated AP is identified as improperly implementing uAPSD protocol.
+ * IWM_PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78
+ * @sta_id: index of station in uCode's station table - associated AP ID in
+ * this context.
+ */
+struct iwm_uapsd_misbehaving_ap_notif {
+ uint32_t sta_id;
+ uint8_t mac_id;
+ uint8_t reserved[3];
+} __packed;
+
+/**
+ * struct iwm_beacon_filter_cmd
+ * IWM_REPLY_BEACON_FILTERING_CMD = 0xd2 (command)
+ * @id_and_color: MAC contex identifier
+ * @bf_energy_delta: Used for RSSI filtering, if in 'normal' state. Send beacon
+ * to driver if delta in Energy values calculated for this and last
+ * passed beacon is greater than this threshold. Zero value means that
+ * the Energy change is ignored for beacon filtering, and beacon will
+ * not be forced to be sent to driver regardless of this delta. Typical
+ * energy delta 5dB.
+ * @bf_roaming_energy_delta: Used for RSSI filtering, if in 'roaming' state.
+ * Send beacon to driver if delta in Energy values calculated for this
+ * and last passed beacon is greater than this threshold. Zero value
+ * means that the Energy change is ignored for beacon filtering while in
+ * Roaming state, typical energy delta 1dB.
+ * @bf_roaming_state: Used for RSSI filtering. If absolute Energy values
+ * calculated for current beacon is less than the threshold, use
+ * Roaming Energy Delta Threshold, otherwise use normal Energy Delta
+ * Threshold. Typical energy threshold is -72dBm.
+ * @bf_temp_threshold: This threshold determines the type of temperature
+ * filtering (Slow or Fast) that is selected (Units are in Celsuis):
+ * If the current temperature is above this threshold - Fast filter
+ * will be used, If the current temperature is below this threshold -
+ * Slow filter will be used.
+ * @bf_temp_fast_filter: Send Beacon to driver if delta in temperature values
+ * calculated for this and the last passed beacon is greater than this
+ * threshold. Zero value means that the temperature change is ignored for
+ * beacon filtering; beacons will not be forced to be sent to driver
+ * regardless of whether its temerature has been changed.
+ * @bf_temp_slow_filter: Send Beacon to driver if delta in temperature values
+ * calculated for this and the last passed beacon is greater than this
+ * threshold. Zero value means that the temperature change is ignored for
+ * beacon filtering; beacons will not be forced to be sent to driver
+ * regardless of whether its temerature has been changed.
+ * @bf_enable_beacon_filter: 1, beacon filtering is enabled; 0, disabled.
+ * @bf_filter_escape_timer: Send beacons to to driver if no beacons were passed
+ * for a specific period of time. Units: Beacons.
+ * @ba_escape_timer: Fully receive and parse beacon if no beacons were passed
+ * for a longer period of time then this escape-timeout. Units: Beacons.
+ * @ba_enable_beacon_abort: 1, beacon abort is enabled; 0, disabled.
+ */
+struct iwm_beacon_filter_cmd {
+ uint32_t bf_energy_delta;
+ uint32_t bf_roaming_energy_delta;
+ uint32_t bf_roaming_state;
+ uint32_t bf_temp_threshold;
+ uint32_t bf_temp_fast_filter;
+ uint32_t bf_temp_slow_filter;
+ uint32_t bf_enable_beacon_filter;
+ uint32_t bf_debug_flag;
+ uint32_t bf_escape_timer;
+ uint32_t ba_escape_timer;
+ uint32_t ba_enable_beacon_abort;
+} __packed;
+
+/* Beacon filtering and beacon abort */
+#define IWM_BF_ENERGY_DELTA_DEFAULT 5
+#define IWM_BF_ENERGY_DELTA_MAX 255
+#define IWM_BF_ENERGY_DELTA_MIN 0
+
+#define IWM_BF_ROAMING_ENERGY_DELTA_DEFAULT 1
+#define IWM_BF_ROAMING_ENERGY_DELTA_MAX 255
+#define IWM_BF_ROAMING_ENERGY_DELTA_MIN 0
+
+#define IWM_BF_ROAMING_STATE_DEFAULT 72
+#define IWM_BF_ROAMING_STATE_MAX 255
+#define IWM_BF_ROAMING_STATE_MIN 0
+
+#define IWM_BF_TEMP_THRESHOLD_DEFAULT 112
+#define IWM_BF_TEMP_THRESHOLD_MAX 255
+#define IWM_BF_TEMP_THRESHOLD_MIN 0
+
+#define IWM_BF_TEMP_FAST_FILTER_DEFAULT 1
+#define IWM_BF_TEMP_FAST_FILTER_MAX 255
+#define IWM_BF_TEMP_FAST_FILTER_MIN 0
+
+#define IWM_BF_TEMP_SLOW_FILTER_DEFAULT 5
+#define IWM_BF_TEMP_SLOW_FILTER_MAX 255
+#define IWM_BF_TEMP_SLOW_FILTER_MIN 0
+
+#define IWM_BF_ENABLE_BEACON_FILTER_DEFAULT 1
+
+#define IWM_BF_DEBUG_FLAG_DEFAULT 0
+
+#define IWM_BF_ESCAPE_TIMER_DEFAULT 50
+#define IWM_BF_ESCAPE_TIMER_MAX 1024
+#define IWM_BF_ESCAPE_TIMER_MIN 0
+
+#define IWM_BA_ESCAPE_TIMER_DEFAULT 6
+#define IWM_BA_ESCAPE_TIMER_D3 9
+#define IWM_BA_ESCAPE_TIMER_MAX 1024
+#define IWM_BA_ESCAPE_TIMER_MIN 0
+
+#define IWM_BA_ENABLE_BEACON_ABORT_DEFAULT 1
+
+#define IWM_BF_CMD_CONFIG_DEFAULTS \
+ .bf_energy_delta = htole32(IWM_BF_ENERGY_DELTA_DEFAULT), \
+ .bf_roaming_energy_delta = \
+ htole32(IWM_BF_ROAMING_ENERGY_DELTA_DEFAULT), \
+ .bf_roaming_state = htole32(IWM_BF_ROAMING_STATE_DEFAULT), \
+ .bf_temp_threshold = htole32(IWM_BF_TEMP_THRESHOLD_DEFAULT), \
+ .bf_temp_fast_filter = htole32(IWM_BF_TEMP_FAST_FILTER_DEFAULT), \
+ .bf_temp_slow_filter = htole32(IWM_BF_TEMP_SLOW_FILTER_DEFAULT), \
+ .bf_debug_flag = htole32(IWM_BF_DEBUG_FLAG_DEFAULT), \
+ .bf_escape_timer = htole32(IWM_BF_ESCAPE_TIMER_DEFAULT), \
+ .ba_escape_timer = htole32(IWM_BA_ESCAPE_TIMER_DEFAULT)
+
+/*
+ * END mvm/fw-api-power.h
+ */
+
+/*
+ * BEGIN mvm/fw-api-rs.h
+ */
+
+/*
+ * These serve as indexes into
+ * struct iwm_rate_info fw_rate_idx_to_plcp[IWM_RATE_COUNT];
+ * TODO: avoid overlap between legacy and HT rates
+ */
+enum {
+ IWM_RATE_1M_INDEX = 0,
+ IWM_FIRST_CCK_RATE = IWM_RATE_1M_INDEX,
+ IWM_RATE_2M_INDEX,
+ IWM_RATE_5M_INDEX,
+ IWM_RATE_11M_INDEX,
+ IWM_LAST_CCK_RATE = IWM_RATE_11M_INDEX,
+ IWM_RATE_6M_INDEX,
+ IWM_FIRST_OFDM_RATE = IWM_RATE_6M_INDEX,
+ IWM_RATE_MCS_0_INDEX = IWM_RATE_6M_INDEX,
+ IWM_FIRST_HT_RATE = IWM_RATE_MCS_0_INDEX,
+ IWM_FIRST_VHT_RATE = IWM_RATE_MCS_0_INDEX,
+ IWM_RATE_9M_INDEX,
+ IWM_RATE_12M_INDEX,
+ IWM_RATE_MCS_1_INDEX = IWM_RATE_12M_INDEX,
+ IWM_RATE_18M_INDEX,
+ IWM_RATE_MCS_2_INDEX = IWM_RATE_18M_INDEX,
+ IWM_RATE_24M_INDEX,
+ IWM_RATE_MCS_3_INDEX = IWM_RATE_24M_INDEX,
+ IWM_RATE_36M_INDEX,
+ IWM_RATE_MCS_4_INDEX = IWM_RATE_36M_INDEX,
+ IWM_RATE_48M_INDEX,
+ IWM_RATE_MCS_5_INDEX = IWM_RATE_48M_INDEX,
+ IWM_RATE_54M_INDEX,
+ IWM_RATE_MCS_6_INDEX = IWM_RATE_54M_INDEX,
+ IWM_LAST_NON_HT_RATE = IWM_RATE_54M_INDEX,
+ IWM_RATE_60M_INDEX,
+ IWM_RATE_MCS_7_INDEX = IWM_RATE_60M_INDEX,
+ IWM_LAST_HT_RATE = IWM_RATE_MCS_7_INDEX,
+ IWM_RATE_MCS_8_INDEX,
+ IWM_RATE_MCS_9_INDEX,
+ IWM_LAST_VHT_RATE = IWM_RATE_MCS_9_INDEX,
+ IWM_RATE_COUNT_LEGACY = IWM_LAST_NON_HT_RATE + 1,
+ IWM_RATE_COUNT = IWM_LAST_VHT_RATE + 1,
+};
+
+#define IWM_RATE_BIT_MSK(r) (1 << (IWM_RATE_##r##M_INDEX))
+
+/* fw API values for legacy bit rates, both OFDM and CCK */
+enum {
+ IWM_RATE_6M_PLCP = 13,
+ IWM_RATE_9M_PLCP = 15,
+ IWM_RATE_12M_PLCP = 5,
+ IWM_RATE_18M_PLCP = 7,
+ IWM_RATE_24M_PLCP = 9,
+ IWM_RATE_36M_PLCP = 11,
+ IWM_RATE_48M_PLCP = 1,
+ IWM_RATE_54M_PLCP = 3,
+ IWM_RATE_1M_PLCP = 10,
+ IWM_RATE_2M_PLCP = 20,
+ IWM_RATE_5M_PLCP = 55,
+ IWM_RATE_11M_PLCP = 110,
+ IWM_RATE_INVM_PLCP = -1,
+};
+
+/*
+ * rate_n_flags bit fields
+ *
+ * The 32-bit value has different layouts in the low 8 bites depending on the
+ * format. There are three formats, HT, VHT and legacy (11abg, with subformats
+ * for CCK and OFDM).
+ *
+ * High-throughput (HT) rate format
+ * bit 8 is 1, bit 26 is 0, bit 9 is 0 (OFDM)
+ * Very High-throughput (VHT) rate format
+ * bit 8 is 0, bit 26 is 1, bit 9 is 0 (OFDM)
+ * Legacy OFDM rate format for bits 7:0
+ * bit 8 is 0, bit 26 is 0, bit 9 is 0 (OFDM)
+ * Legacy CCK rate format for bits 7:0:
+ * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK)
+ */
+
+/* Bit 8: (1) HT format, (0) legacy or VHT format */
+#define IWM_RATE_MCS_HT_POS 8
+#define IWM_RATE_MCS_HT_MSK (1 << IWM_RATE_MCS_HT_POS)
+
+/* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */
+#define IWM_RATE_MCS_CCK_POS 9
+#define IWM_RATE_MCS_CCK_MSK (1 << IWM_RATE_MCS_CCK_POS)
+
+/* Bit 26: (1) VHT format, (0) legacy format in bits 8:0 */
+#define IWM_RATE_MCS_VHT_POS 26
+#define IWM_RATE_MCS_VHT_MSK (1 << IWM_RATE_MCS_VHT_POS)
+
+
+/*
+ * High-throughput (HT) rate format for bits 7:0
+ *
+ * 2-0: MCS rate base
+ * 0) 6 Mbps
+ * 1) 12 Mbps
+ * 2) 18 Mbps
+ * 3) 24 Mbps
+ * 4) 36 Mbps
+ * 5) 48 Mbps
+ * 6) 54 Mbps
+ * 7) 60 Mbps
+ * 4-3: 0) Single stream (SISO)
+ * 1) Dual stream (MIMO)
+ * 2) Triple stream (MIMO)
+ * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data
+ * (bits 7-6 are zero)
+ *
+ * Together the low 5 bits work out to the MCS index because we don't
+ * support MCSes above 15/23, and 0-7 have one stream, 8-15 have two
+ * streams and 16-23 have three streams. We could also support MCS 32
+ * which is the duplicate 20 MHz MCS (bit 5 set, all others zero.)
+ */
+#define IWM_RATE_HT_MCS_RATE_CODE_MSK 0x7
+#define IWM_RATE_HT_MCS_NSS_POS 3
+#define IWM_RATE_HT_MCS_NSS_MSK (3 << IWM_RATE_HT_MCS_NSS_POS)
+
+/* Bit 10: (1) Use Green Field preamble */
+#define IWM_RATE_HT_MCS_GF_POS 10
+#define IWM_RATE_HT_MCS_GF_MSK (1 << IWM_RATE_HT_MCS_GF_POS)
+
+#define IWM_RATE_HT_MCS_INDEX_MSK 0x3f
+
+/*
+ * Very High-throughput (VHT) rate format for bits 7:0
+ *
+ * 3-0: VHT MCS (0-9)
+ * 5-4: number of streams - 1:
+ * 0) Single stream (SISO)
+ * 1) Dual stream (MIMO)
+ * 2) Triple stream (MIMO)
+ */
+
+/* Bit 4-5: (0) SISO, (1) MIMO2 (2) MIMO3 */
+#define IWM_RATE_VHT_MCS_RATE_CODE_MSK 0xf
+#define IWM_RATE_VHT_MCS_NSS_POS 4
+#define IWM_RATE_VHT_MCS_NSS_MSK (3 << IWM_RATE_VHT_MCS_NSS_POS)
+
+/*
+ * Legacy OFDM rate format for bits 7:0
+ *
+ * 3-0: 0xD) 6 Mbps
+ * 0xF) 9 Mbps
+ * 0x5) 12 Mbps
+ * 0x7) 18 Mbps
+ * 0x9) 24 Mbps
+ * 0xB) 36 Mbps
+ * 0x1) 48 Mbps
+ * 0x3) 54 Mbps
+ * (bits 7-4 are 0)
+ *
+ * Legacy CCK rate format for bits 7:0:
+ * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK):
+ *
+ * 6-0: 10) 1 Mbps
+ * 20) 2 Mbps
+ * 55) 5.5 Mbps
+ * 110) 11 Mbps
+ * (bit 7 is 0)
+ */
+#define IWM_RATE_LEGACY_RATE_MSK 0xff
+
+
+/*
+ * Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz
+ * 0 and 1 are valid for HT and VHT, 2 and 3 only for VHT
+ */
+#define IWM_RATE_MCS_CHAN_WIDTH_POS 11
+#define IWM_RATE_MCS_CHAN_WIDTH_MSK (3 << IWM_RATE_MCS_CHAN_WIDTH_POS)
+#define IWM_RATE_MCS_CHAN_WIDTH_20 (0 << IWM_RATE_MCS_CHAN_WIDTH_POS)
+#define IWM_RATE_MCS_CHAN_WIDTH_40 (1 << IWM_RATE_MCS_CHAN_WIDTH_POS)
+#define IWM_RATE_MCS_CHAN_WIDTH_80 (2 << IWM_RATE_MCS_CHAN_WIDTH_POS)
+#define IWM_RATE_MCS_CHAN_WIDTH_160 (3 << IWM_RATE_MCS_CHAN_WIDTH_POS)
+
+/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */
+#define IWM_RATE_MCS_SGI_POS 13
+#define IWM_RATE_MCS_SGI_MSK (1 << IWM_RATE_MCS_SGI_POS)
+
+/* Bit 14-16: Antenna selection (1) Ant A, (2) Ant B, (4) Ant C */
+#define IWM_RATE_MCS_ANT_POS 14
+#define IWM_RATE_MCS_ANT_A_MSK (1 << IWM_RATE_MCS_ANT_POS)
+#define IWM_RATE_MCS_ANT_B_MSK (2 << IWM_RATE_MCS_ANT_POS)
+#define IWM_RATE_MCS_ANT_C_MSK (4 << IWM_RATE_MCS_ANT_POS)
+#define IWM_RATE_MCS_ANT_AB_MSK (IWM_RATE_MCS_ANT_A_MSK | \
+ IWM_RATE_MCS_ANT_B_MSK)
+#define IWM_RATE_MCS_ANT_ABC_MSK (IWM_RATE_MCS_ANT_AB_MSK | \
+ IWM_RATE_MCS_ANT_C_MSK)
+#define IWM_RATE_MCS_ANT_MSK IWM_RATE_MCS_ANT_ABC_MSK
+#define IWM_RATE_MCS_ANT_NUM 3
+
+/* Bit 17-18: (0) SS, (1) SS*2 */
+#define IWM_RATE_MCS_STBC_POS 17
+#define IWM_RATE_MCS_STBC_MSK (1 << IWM_RATE_MCS_STBC_POS)
+
+/* Bit 19: (0) Beamforming is off, (1) Beamforming is on */
+#define IWM_RATE_MCS_BF_POS 19
+#define IWM_RATE_MCS_BF_MSK (1 << IWM_RATE_MCS_BF_POS)
+
+/* Bit 20: (0) ZLF is off, (1) ZLF is on */
+#define IWM_RATE_MCS_ZLF_POS 20
+#define IWM_RATE_MCS_ZLF_MSK (1 << IWM_RATE_MCS_ZLF_POS)
+
+/* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */
+#define IWM_RATE_MCS_DUP_POS 24
+#define IWM_RATE_MCS_DUP_MSK (3 << IWM_RATE_MCS_DUP_POS)
+
+/* Bit 27: (1) LDPC enabled, (0) LDPC disabled */
+#define IWM_RATE_MCS_LDPC_POS 27
+#define IWM_RATE_MCS_LDPC_MSK (1 << IWM_RATE_MCS_LDPC_POS)
+
+
+/* Link Quality definitions */
+
+/* # entries in rate scale table to support Tx retries */
+#define IWM_LQ_MAX_RETRY_NUM 16
+
+/* Link quality command flags bit fields */
+
+/* Bit 0: (0) Don't use RTS (1) Use RTS */
+#define IWM_LQ_FLAG_USE_RTS_POS 0
+#define IWM_LQ_FLAG_USE_RTS_MSK (1 << IWM_LQ_FLAG_USE_RTS_POS)
+
+/* Bit 1-3: LQ command color. Used to match responses to LQ commands */
+#define IWM_LQ_FLAG_COLOR_POS 1
+#define IWM_LQ_FLAG_COLOR_MSK (7 << IWM_LQ_FLAG_COLOR_POS)
+
+/* Bit 4-5: Tx RTS BW Signalling
+ * (0) No RTS BW signalling
+ * (1) Static BW signalling
+ * (2) Dynamic BW signalling
+ */
+#define IWM_LQ_FLAG_RTS_BW_SIG_POS 4
+#define IWM_LQ_FLAG_RTS_BW_SIG_NONE (0 << IWM_LQ_FLAG_RTS_BW_SIG_POS)
+#define IWM_LQ_FLAG_RTS_BW_SIG_STATIC (1 << IWM_LQ_FLAG_RTS_BW_SIG_POS)
+#define IWM_LQ_FLAG_RTS_BW_SIG_DYNAMIC (2 << IWM_LQ_FLAG_RTS_BW_SIG_POS)
+
+/* Bit 6: (0) No dynamic BW selection (1) Allow dynamic BW selection
+ * Dyanmic BW selection allows Tx with narrower BW then requested in rates
+ */
+#define IWM_LQ_FLAG_DYNAMIC_BW_POS 6
+#define IWM_LQ_FLAG_DYNAMIC_BW_MSK (1 << IWM_LQ_FLAG_DYNAMIC_BW_POS)
+
+/**
+ * struct iwm_lq_cmd - link quality command
+ * @sta_id: station to update
+ * @control: not used
+ * @flags: combination of IWM_LQ_FLAG_*
+ * @mimo_delim: the first SISO index in rs_table, which separates MIMO
+ * and SISO rates
+ * @single_stream_ant_msk: best antenna for SISO (can be dual in CDD).
+ * Should be ANT_[ABC]
+ * @dual_stream_ant_msk: best antennas for MIMO, combination of ANT_[ABC]
+ * @initial_rate_index: first index from rs_table per AC category
+ * @agg_time_limit: aggregation max time threshold in usec/100, meaning
+ * value of 100 is one usec. Range is 100 to 8000
+ * @agg_disable_start_th: try-count threshold for starting aggregation.
+ * If a frame has higher try-count, it should not be selected for
+ * starting an aggregation sequence.
+ * @agg_frame_cnt_limit: max frame count in an aggregation.
+ * 0: no limit
+ * 1: no aggregation (one frame per aggregation)
+ * 2 - 0x3f: maximal number of frames (up to 3f == 63)
+ * @rs_table: array of rates for each TX try, each is rate_n_flags,
+ * meaning it is a combination of IWM_RATE_MCS_* and IWM_RATE_*_PLCP
+ * @bf_params: beam forming params, currently not used
+ */
+struct iwm_lq_cmd {
+ uint8_t sta_id;
+ uint8_t reserved1;
+ uint16_t control;
+ /* LINK_QUAL_GENERAL_PARAMS_API_S_VER_1 */
+ uint8_t flags;
+ uint8_t mimo_delim;
+ uint8_t single_stream_ant_msk;
+ uint8_t dual_stream_ant_msk;
+ uint8_t initial_rate_index[IWM_AC_NUM];
+ /* LINK_QUAL_AGG_PARAMS_API_S_VER_1 */
+ uint16_t agg_time_limit;
+ uint8_t agg_disable_start_th;
+ uint8_t agg_frame_cnt_limit;
+ uint32_t reserved2;
+ uint32_t rs_table[IWM_LQ_MAX_RETRY_NUM];
+ uint32_t bf_params;
+}; /* LINK_QUALITY_CMD_API_S_VER_1 */
+
+/*
+ * END mvm/fw-api-rs.h
+ */
+
+/*
+ * BEGIN mvm/fw-api-tx.h
+ */
+
+/**
+ * enum iwm_tx_flags - bitmasks for tx_flags in TX command
+ * @IWM_TX_CMD_FLG_PROT_REQUIRE: use RTS or CTS-to-self to protect the frame
+ * @IWM_TX_CMD_FLG_ACK: expect ACK from receiving station
+ * @IWM_TX_CMD_FLG_STA_RATE: use RS table with initial index from the TX command.
+ * Otherwise, use rate_n_flags from the TX command
+ * @IWM_TX_CMD_FLG_BA: this frame is a block ack
+ * @IWM_TX_CMD_FLG_BAR: this frame is a BA request, immediate BAR is expected
+ * Must set IWM_TX_CMD_FLG_ACK with this flag.
+ * @IWM_TX_CMD_FLG_TXOP_PROT: protect frame with full TXOP protection
+ * @IWM_TX_CMD_FLG_VHT_NDPA: mark frame is NDPA for VHT beamformer sequence
+ * @IWM_TX_CMD_FLG_HT_NDPA: mark frame is NDPA for HT beamformer sequence
+ * @IWM_TX_CMD_FLG_CSI_FDBK2HOST: mark to send feedback to host (only if good CRC)
+ * @IWM_TX_CMD_FLG_BT_DIS: disable BT priority for this frame
+ * @IWM_TX_CMD_FLG_SEQ_CTL: set if FW should override the sequence control.
+ * Should be set for mgmt, non-QOS data, mcast, bcast and in scan command
+ * @IWM_TX_CMD_FLG_MORE_FRAG: this frame is non-last MPDU
+ * @IWM_TX_CMD_FLG_NEXT_FRAME: this frame includes information of the next frame
+ * @IWM_TX_CMD_FLG_TSF: FW should calculate and insert TSF in the frame
+ * Should be set for beacons and probe responses
+ * @IWM_TX_CMD_FLG_CALIB: activate PA TX power calibrations
+ * @IWM_TX_CMD_FLG_KEEP_SEQ_CTL: if seq_ctl is set, don't increase inner seq count
+ * @IWM_TX_CMD_FLG_AGG_START: allow this frame to start aggregation
+ * @IWM_TX_CMD_FLG_MH_PAD: driver inserted 2 byte padding after MAC header.
+ * Should be set for 26/30 length MAC headers
+ * @IWM_TX_CMD_FLG_RESP_TO_DRV: zero this if the response should go only to FW
+ * @IWM_TX_CMD_FLG_CCMP_AGG: this frame uses CCMP for aggregation acceleration
+ * @IWM_TX_CMD_FLG_TKIP_MIC_DONE: FW already performed TKIP MIC calculation
+ * @IWM_TX_CMD_FLG_DUR: disable duration overwriting used in PS-Poll Assoc-id
+ * @IWM_TX_CMD_FLG_FW_DROP: FW should mark frame to be dropped
+ * @IWM_TX_CMD_FLG_EXEC_PAPD: execute PAPD
+ * @IWM_TX_CMD_FLG_PAPD_TYPE: 0 for reference power, 1 for nominal power
+ * @IWM_TX_CMD_FLG_HCCA_CHUNK: mark start of TSPEC chunk
+ */
+enum iwm_tx_flags {
+ IWM_TX_CMD_FLG_PROT_REQUIRE = (1 << 0),
+ IWM_TX_CMD_FLG_ACK = (1 << 3),
+ IWM_TX_CMD_FLG_STA_RATE = (1 << 4),
+ IWM_TX_CMD_FLG_BA = (1 << 5),
+ IWM_TX_CMD_FLG_BAR = (1 << 6),
+ IWM_TX_CMD_FLG_TXOP_PROT = (1 << 7),
+ IWM_TX_CMD_FLG_VHT_NDPA = (1 << 8),
+ IWM_TX_CMD_FLG_HT_NDPA = (1 << 9),
+ IWM_TX_CMD_FLG_CSI_FDBK2HOST = (1 << 10),
+ IWM_TX_CMD_FLG_BT_DIS = (1 << 12),
+ IWM_TX_CMD_FLG_SEQ_CTL = (1 << 13),
+ IWM_TX_CMD_FLG_MORE_FRAG = (1 << 14),
+ IWM_TX_CMD_FLG_NEXT_FRAME = (1 << 15),
+ IWM_TX_CMD_FLG_TSF = (1 << 16),
+ IWM_TX_CMD_FLG_CALIB = (1 << 17),
+ IWM_TX_CMD_FLG_KEEP_SEQ_CTL = (1 << 18),
+ IWM_TX_CMD_FLG_AGG_START = (1 << 19),
+ IWM_TX_CMD_FLG_MH_PAD = (1 << 20),
+ IWM_TX_CMD_FLG_RESP_TO_DRV = (1 << 21),
+ IWM_TX_CMD_FLG_CCMP_AGG = (1 << 22),
+ IWM_TX_CMD_FLG_TKIP_MIC_DONE = (1 << 23),
+ IWM_TX_CMD_FLG_DUR = (1 << 25),
+ IWM_TX_CMD_FLG_FW_DROP = (1 << 26),
+ IWM_TX_CMD_FLG_EXEC_PAPD = (1 << 27),
+ IWM_TX_CMD_FLG_PAPD_TYPE = (1 << 28),
+ IWM_TX_CMD_FLG_HCCA_CHUNK = (1 << 31)
+}; /* IWM_TX_FLAGS_BITS_API_S_VER_1 */
+
+/*
+ * TX command security control
+ */
+#define IWM_TX_CMD_SEC_WEP 0x01
+#define IWM_TX_CMD_SEC_CCM 0x02
+#define IWM_TX_CMD_SEC_TKIP 0x03
+#define IWM_TX_CMD_SEC_EXT 0x04
+#define IWM_TX_CMD_SEC_MSK 0x07
+#define IWM_TX_CMD_SEC_WEP_KEY_IDX_POS 6
+#define IWM_TX_CMD_SEC_WEP_KEY_IDX_MSK 0xc0
+#define IWM_TX_CMD_SEC_KEY128 0x08
+
+/* TODO: how does these values are OK with only 16 bit variable??? */
+/*
+ * TX command next frame info
+ *
+ * bits 0:2 - security control (IWM_TX_CMD_SEC_*)
+ * bit 3 - immediate ACK required
+ * bit 4 - rate is taken from STA table
+ * bit 5 - frame belongs to BA stream
+ * bit 6 - immediate BA response expected
+ * bit 7 - unused
+ * bits 8:15 - Station ID
+ * bits 16:31 - rate
+ */
+#define IWM_TX_CMD_NEXT_FRAME_ACK_MSK (0x8)
+#define IWM_TX_CMD_NEXT_FRAME_STA_RATE_MSK (0x10)
+#define IWM_TX_CMD_NEXT_FRAME_BA_MSK (0x20)
+#define IWM_TX_CMD_NEXT_FRAME_IMM_BA_RSP_MSK (0x40)
+#define IWM_TX_CMD_NEXT_FRAME_FLAGS_MSK (0xf8)
+#define IWM_TX_CMD_NEXT_FRAME_STA_ID_MSK (0xff00)
+#define IWM_TX_CMD_NEXT_FRAME_STA_ID_POS (8)
+#define IWM_TX_CMD_NEXT_FRAME_RATE_MSK (0xffff0000)
+#define IWM_TX_CMD_NEXT_FRAME_RATE_POS (16)
+
+/*
+ * TX command Frame life time in us - to be written in pm_frame_timeout
+ */
+#define IWM_TX_CMD_LIFE_TIME_INFINITE 0xFFFFFFFF
+#define IWM_TX_CMD_LIFE_TIME_DEFAULT 2000000 /* 2000 ms*/
+#define IWM_TX_CMD_LIFE_TIME_PROBE_RESP 40000 /* 40 ms */
+#define IWM_TX_CMD_LIFE_TIME_EXPIRED_FRAME 0
+
+/*
+ * TID for non QoS frames - to be written in tid_tspec
+ */
+#define IWM_TID_NON_QOS IWM_MAX_TID_COUNT
+
+/*
+ * Limits on the retransmissions - to be written in {data,rts}_retry_limit
+ */
+#define IWM_DEFAULT_TX_RETRY 15
+#define IWM_MGMT_DFAULT_RETRY_LIMIT 3
+#define IWM_RTS_DFAULT_RETRY_LIMIT 60
+#define IWM_BAR_DFAULT_RETRY_LIMIT 60
+#define IWM_LOW_RETRY_LIMIT 7
+
+/* TODO: complete documentation for try_cnt and btkill_cnt */
+/**
+ * struct iwm_tx_cmd - TX command struct to FW
+ * ( IWM_TX_CMD = 0x1c )
+ * @len: in bytes of the payload, see below for details
+ * @next_frame_len: same as len, but for next frame (0 if not applicable)
+ * Used for fragmentation and bursting, but not in 11n aggregation.
+ * @tx_flags: combination of IWM_TX_CMD_FLG_*
+ * @rate_n_flags: rate for *all* Tx attempts, if IWM_TX_CMD_FLG_STA_RATE_MSK is
+ * cleared. Combination of IWM_RATE_MCS_*
+ * @sta_id: index of destination station in FW station table
+ * @sec_ctl: security control, IWM_TX_CMD_SEC_*
+ * @initial_rate_index: index into the the rate table for initial TX attempt.
+ * Applied if IWM_TX_CMD_FLG_STA_RATE_MSK is set, normally 0 for data frames.
+ * @key: security key
+ * @next_frame_flags: IWM_TX_CMD_SEC_* and IWM_TX_CMD_NEXT_FRAME_*
+ * @life_time: frame life time (usecs??)
+ * @dram_lsb_ptr: Physical address of scratch area in the command (try_cnt +
+ * btkill_cnd + reserved), first 32 bits. "0" disables usage.
+ * @dram_msb_ptr: upper bits of the scratch physical address
+ * @rts_retry_limit: max attempts for RTS
+ * @data_retry_limit: max attempts to send the data packet
+ * @tid_spec: TID/tspec
+ * @pm_frame_timeout: PM TX frame timeout
+ * @driver_txop: duration od EDCA TXOP, in 32-usec units. Set this if not
+ * specified by HCCA protocol
+ *
+ * The byte count (both len and next_frame_len) includes MAC header
+ * (24/26/30/32 bytes)
+ * + 2 bytes pad if 26/30 header size
+ * + 8 byte IV for CCM or TKIP (not used for WEP)
+ * + Data payload
+ * + 8-byte MIC (not used for CCM/WEP)
+ * It does not include post-MAC padding, i.e.,
+ * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.
+ * Range of len: 14-2342 bytes.
+ *
+ * After the struct fields the MAC header is placed, plus any padding,
+ * and then the actial payload.
+ */
+struct iwm_tx_cmd {
+ uint16_t len;
+ uint16_t next_frame_len;
+ uint32_t tx_flags;
+ struct {
+ uint8_t try_cnt;
+ uint8_t btkill_cnt;
+ uint16_t reserved;
+ } scratch; /* DRAM_SCRATCH_API_U_VER_1 */
+ uint32_t rate_n_flags;
+ uint8_t sta_id;
+ uint8_t sec_ctl;
+ uint8_t initial_rate_index;
+ uint8_t reserved2;
+ uint8_t key[16];
+ uint16_t next_frame_flags;
+ uint16_t reserved3;
+ uint32_t life_time;
+ uint32_t dram_lsb_ptr;
+ uint8_t dram_msb_ptr;
+ uint8_t rts_retry_limit;
+ uint8_t data_retry_limit;
+ uint8_t tid_tspec;
+ uint16_t pm_frame_timeout;
+ uint16_t driver_txop;
+ uint8_t payload[0];
+ struct ieee80211_frame hdr[0];
+} __packed; /* IWM_TX_CMD_API_S_VER_3 */
+
+/*
+ * TX response related data
+ */
+
+/*
+ * enum iwm_tx_status - status that is returned by the fw after attempts to Tx
+ * @IWM_TX_STATUS_SUCCESS:
+ * @IWM_TX_STATUS_DIRECT_DONE:
+ * @IWM_TX_STATUS_POSTPONE_DELAY:
+ * @IWM_TX_STATUS_POSTPONE_FEW_BYTES:
+ * @IWM_TX_STATUS_POSTPONE_BT_PRIO:
+ * @IWM_TX_STATUS_POSTPONE_QUIET_PERIOD:
+ * @IWM_TX_STATUS_POSTPONE_CALC_TTAK:
+ * @IWM_TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
+ * @IWM_TX_STATUS_FAIL_SHORT_LIMIT:
+ * @IWM_TX_STATUS_FAIL_LONG_LIMIT:
+ * @IWM_TX_STATUS_FAIL_UNDERRUN:
+ * @IWM_TX_STATUS_FAIL_DRAIN_FLOW:
+ * @IWM_TX_STATUS_FAIL_RFKILL_FLUSH:
+ * @IWM_TX_STATUS_FAIL_LIFE_EXPIRE:
+ * @IWM_TX_STATUS_FAIL_DEST_PS:
+ * @IWM_TX_STATUS_FAIL_HOST_ABORTED:
+ * @IWM_TX_STATUS_FAIL_BT_RETRY:
+ * @IWM_TX_STATUS_FAIL_STA_INVALID:
+ * @IWM_TX_TATUS_FAIL_FRAG_DROPPED:
+ * @IWM_TX_STATUS_FAIL_TID_DISABLE:
+ * @IWM_TX_STATUS_FAIL_FIFO_FLUSHED:
+ * @IWM_TX_STATUS_FAIL_SMALL_CF_POLL:
+ * @IWM_TX_STATUS_FAIL_FW_DROP:
+ * @IWM_TX_STATUS_FAIL_STA_COLOR_MISMATCH: mismatch between color of Tx cmd and
+ * STA table
+ * @IWM_TX_FRAME_STATUS_INTERNAL_ABORT:
+ * @IWM_TX_MODE_MSK:
+ * @IWM_TX_MODE_NO_BURST:
+ * @IWM_TX_MODE_IN_BURST_SEQ:
+ * @IWM_TX_MODE_FIRST_IN_BURST:
+ * @IWM_TX_QUEUE_NUM_MSK:
+ *
+ * Valid only if frame_count =1
+ * TODO: complete documentation
+ */
+enum iwm_tx_status {
+ IWM_TX_STATUS_MSK = 0x000000ff,
+ IWM_TX_STATUS_SUCCESS = 0x01,
+ IWM_TX_STATUS_DIRECT_DONE = 0x02,
+ /* postpone TX */
+ IWM_TX_STATUS_POSTPONE_DELAY = 0x40,
+ IWM_TX_STATUS_POSTPONE_FEW_BYTES = 0x41,
+ IWM_TX_STATUS_POSTPONE_BT_PRIO = 0x42,
+ IWM_TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43,
+ IWM_TX_STATUS_POSTPONE_CALC_TTAK = 0x44,
+ /* abort TX */
+ IWM_TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81,
+ IWM_TX_STATUS_FAIL_SHORT_LIMIT = 0x82,
+ IWM_TX_STATUS_FAIL_LONG_LIMIT = 0x83,
+ IWM_TX_STATUS_FAIL_UNDERRUN = 0x84,
+ IWM_TX_STATUS_FAIL_DRAIN_FLOW = 0x85,
+ IWM_TX_STATUS_FAIL_RFKILL_FLUSH = 0x86,
+ IWM_TX_STATUS_FAIL_LIFE_EXPIRE = 0x87,
+ IWM_TX_STATUS_FAIL_DEST_PS = 0x88,
+ IWM_TX_STATUS_FAIL_HOST_ABORTED = 0x89,
+ IWM_TX_STATUS_FAIL_BT_RETRY = 0x8a,
+ IWM_TX_STATUS_FAIL_STA_INVALID = 0x8b,
+ IWM_TX_STATUS_FAIL_FRAG_DROPPED = 0x8c,
+ IWM_TX_STATUS_FAIL_TID_DISABLE = 0x8d,
+ IWM_TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e,
+ IWM_TX_STATUS_FAIL_SMALL_CF_POLL = 0x8f,
+ IWM_TX_STATUS_FAIL_FW_DROP = 0x90,
+ IWM_TX_STATUS_FAIL_STA_COLOR_MISMATCH = 0x91,
+ IWM_TX_STATUS_INTERNAL_ABORT = 0x92,
+ IWM_TX_MODE_MSK = 0x00000f00,
+ IWM_TX_MODE_NO_BURST = 0x00000000,
+ IWM_TX_MODE_IN_BURST_SEQ = 0x00000100,
+ IWM_TX_MODE_FIRST_IN_BURST = 0x00000200,
+ IWM_TX_QUEUE_NUM_MSK = 0x0001f000,
+ IWM_TX_NARROW_BW_MSK = 0x00060000,
+ IWM_TX_NARROW_BW_1DIV2 = 0x00020000,
+ IWM_TX_NARROW_BW_1DIV4 = 0x00040000,
+ IWM_TX_NARROW_BW_1DIV8 = 0x00060000,
+};
+
+/*
+ * enum iwm_tx_agg_status - TX aggregation status
+ * @IWM_AGG_TX_STATE_STATUS_MSK:
+ * @IWM_AGG_TX_STATE_TRANSMITTED:
+ * @IWM_AGG_TX_STATE_UNDERRUN:
+ * @IWM_AGG_TX_STATE_BT_PRIO:
+ * @IWM_AGG_TX_STATE_FEW_BYTES:
+ * @IWM_AGG_TX_STATE_ABORT:
+ * @IWM_AGG_TX_STATE_LAST_SENT_TTL:
+ * @IWM_AGG_TX_STATE_LAST_SENT_TRY_CNT:
+ * @IWM_AGG_TX_STATE_LAST_SENT_BT_KILL:
+ * @IWM_AGG_TX_STATE_SCD_QUERY:
+ * @IWM_AGG_TX_STATE_TEST_BAD_CRC32:
+ * @IWM_AGG_TX_STATE_RESPONSE:
+ * @IWM_AGG_TX_STATE_DUMP_TX:
+ * @IWM_AGG_TX_STATE_DELAY_TX:
+ * @IWM_AGG_TX_STATE_TRY_CNT_MSK: Retry count for 1st frame in aggregation (retries
+ * occur if tx failed for this frame when it was a member of a previous
+ * aggregation block). If rate scaling is used, retry count indicates the
+ * rate table entry used for all frames in the new agg.
+ *@ IWM_AGG_TX_STATE_SEQ_NUM_MSK: Command ID and sequence number of Tx command for
+ * this frame
+ *
+ * TODO: complete documentation
+ */
+enum iwm_tx_agg_status {
+ IWM_AGG_TX_STATE_STATUS_MSK = 0x00fff,
+ IWM_AGG_TX_STATE_TRANSMITTED = 0x000,
+ IWM_AGG_TX_STATE_UNDERRUN = 0x001,
+ IWM_AGG_TX_STATE_BT_PRIO = 0x002,
+ IWM_AGG_TX_STATE_FEW_BYTES = 0x004,
+ IWM_AGG_TX_STATE_ABORT = 0x008,
+ IWM_AGG_TX_STATE_LAST_SENT_TTL = 0x010,
+ IWM_AGG_TX_STATE_LAST_SENT_TRY_CNT = 0x020,
+ IWM_AGG_TX_STATE_LAST_SENT_BT_KILL = 0x040,
+ IWM_AGG_TX_STATE_SCD_QUERY = 0x080,
+ IWM_AGG_TX_STATE_TEST_BAD_CRC32 = 0x0100,
+ IWM_AGG_TX_STATE_RESPONSE = 0x1ff,
+ IWM_AGG_TX_STATE_DUMP_TX = 0x200,
+ IWM_AGG_TX_STATE_DELAY_TX = 0x400,
+ IWM_AGG_TX_STATE_TRY_CNT_POS = 12,
+ IWM_AGG_TX_STATE_TRY_CNT_MSK = 0xf << IWM_AGG_TX_STATE_TRY_CNT_POS,
+};
+
+#define IWM_AGG_TX_STATE_LAST_SENT_MSK (IWM_AGG_TX_STATE_LAST_SENT_TTL| \
+ IWM_AGG_TX_STATE_LAST_SENT_TRY_CNT| \
+ IWM_AGG_TX_STATE_LAST_SENT_BT_KILL)
+
+/*
+ * The mask below describes a status where we are absolutely sure that the MPDU
+ * wasn't sent. For BA/Underrun we cannot be that sure. All we know that we've
+ * written the bytes to the TXE, but we know nothing about what the DSP did.
+ */
+#define IWM_AGG_TX_STAT_FRAME_NOT_SENT (IWM_AGG_TX_STATE_FEW_BYTES | \
+ IWM_AGG_TX_STATE_ABORT | \
+ IWM_AGG_TX_STATE_SCD_QUERY)
+
+/*
+ * IWM_REPLY_TX = 0x1c (response)
+ *
+ * This response may be in one of two slightly different formats, indicated
+ * by the frame_count field:
+ *
+ * 1) No aggregation (frame_count == 1). This reports Tx results for a single
+ * frame. Multiple attempts, at various bit rates, may have been made for
+ * this frame.
+ *
+ * 2) Aggregation (frame_count > 1). This reports Tx results for two or more
+ * frames that used block-acknowledge. All frames were transmitted at
+ * same rate. Rate scaling may have been used if first frame in this new
+ * agg block failed in previous agg block(s).
+ *
+ * Note that, for aggregation, ACK (block-ack) status is not delivered
+ * here; block-ack has not been received by the time the device records
+ * this status.
+ * This status relates to reasons the tx might have been blocked or aborted
+ * within the device, rather than whether it was received successfully by
+ * the destination station.
+ */
+
+/**
+ * struct iwm_agg_tx_status - per packet TX aggregation status
+ * @status: enum iwm_tx_agg_status
+ * @sequence: Sequence # for this frame's Tx cmd (not SSN!)
+ */
+struct iwm_agg_tx_status {
+ uint16_t status;
+ uint16_t sequence;
+} __packed;
+
+/*
+ * definitions for initial rate index field
+ * bits [3:0] initial rate index
+ * bits [6:4] rate table color, used for the initial rate
+ * bit-7 invalid rate indication
+ */
+#define IWM_TX_RES_INIT_RATE_INDEX_MSK 0x0f
+#define IWM_TX_RES_RATE_TABLE_COLOR_MSK 0x70
+#define IWM_TX_RES_INV_RATE_INDEX_MSK 0x80
+
+#define IWM_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f)
+#define IWM_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4)
+
+/**
+ * struct iwm_mvm_tx_resp - notifies that fw is TXing a packet
+ * ( IWM_REPLY_TX = 0x1c )
+ * @frame_count: 1 no aggregation, >1 aggregation
+ * @bt_kill_count: num of times blocked by bluetooth (unused for agg)
+ * @failure_rts: num of failures due to unsuccessful RTS
+ * @failure_frame: num failures due to no ACK (unused for agg)
+ * @initial_rate: for non-agg: rate of the successful Tx. For agg: rate of the
+ * Tx of all the batch. IWM_RATE_MCS_*
+ * @wireless_media_time: for non-agg: RTS + CTS + frame tx attempts time + ACK.
+ * for agg: RTS + CTS + aggregation tx time + block-ack time.
+ * in usec.
+ * @pa_status: tx power info
+ * @pa_integ_res_a: tx power info
+ * @pa_integ_res_b: tx power info
+ * @pa_integ_res_c: tx power info
+ * @measurement_req_id: tx power info
+ * @tfd_info: TFD information set by the FH
+ * @seq_ctl: sequence control from the Tx cmd
+ * @byte_cnt: byte count from the Tx cmd
+ * @tlc_info: TLC rate info
+ * @ra_tid: bits [3:0] = ra, bits [7:4] = tid
+ * @frame_ctrl: frame control
+ * @status: for non-agg: frame status IWM_TX_STATUS_*
+ * for agg: status of 1st frame, IWM_AGG_TX_STATE_*; other frame status fields
+ * follow this one, up to frame_count.
+ *
+ * After the array of statuses comes the SSN of the SCD. Look at
+ * %iwm_mvm_get_scd_ssn for more details.
+ */
+struct iwm_mvm_tx_resp {
+ uint8_t frame_count;
+ uint8_t bt_kill_count;
+ uint8_t failure_rts;
+ uint8_t failure_frame;
+ uint32_t initial_rate;
+ uint16_t wireless_media_time;
+
+ uint8_t pa_status;
+ uint8_t pa_integ_res_a[3];
+ uint8_t pa_integ_res_b[3];
+ uint8_t pa_integ_res_c[3];
+ uint16_t measurement_req_id;
+ uint16_t reserved;
+
+ uint32_t tfd_info;
+ uint16_t seq_ctl;
+ uint16_t byte_cnt;
+ uint8_t tlc_info;
+ uint8_t ra_tid;
+ uint16_t frame_ctrl;
+
+ struct iwm_agg_tx_status status;
+} __packed; /* IWM_TX_RSP_API_S_VER_3 */
+
+/**
+ * struct iwm_mvm_ba_notif - notifies about reception of BA
+ * ( IWM_BA_NOTIF = 0xc5 )
+ * @sta_addr_lo32: lower 32 bits of the MAC address
+ * @sta_addr_hi16: upper 16 bits of the MAC address
+ * @sta_id: Index of recipient (BA-sending) station in fw's station table
+ * @tid: tid of the session
+ * @seq_ctl:
+ * @bitmap: the bitmap of the BA notification as seen in the air
+ * @scd_flow: the tx queue this BA relates to
+ * @scd_ssn: the index of the last contiguously sent packet
+ * @txed: number of Txed frames in this batch
+ * @txed_2_done: number of Acked frames in this batch
+ */
+struct iwm_mvm_ba_notif {
+ uint32_t sta_addr_lo32;
+ uint16_t sta_addr_hi16;
+ uint16_t reserved;
+
+ uint8_t sta_id;
+ uint8_t tid;
+ uint16_t seq_ctl;
+ uint64_t bitmap;
+ uint16_t scd_flow;
+ uint16_t scd_ssn;
+ uint8_t txed;
+ uint8_t txed_2_done;
+ uint16_t reserved1;
+} __packed;
+
+/*
+ * struct iwm_mac_beacon_cmd - beacon template command
+ * @tx: the tx commands associated with the beacon frame
+ * @template_id: currently equal to the mac context id of the coresponding
+ * mac.
+ * @tim_idx: the offset of the tim IE in the beacon
+ * @tim_size: the length of the tim IE
+ * @frame: the template of the beacon frame
+ */
+struct iwm_mac_beacon_cmd {
+ struct iwm_tx_cmd tx;
+ uint32_t template_id;
+ uint32_t tim_idx;
+ uint32_t tim_size;
+ struct ieee80211_frame frame[0];
+} __packed;
+
+struct iwm_beacon_notif {
+ struct iwm_mvm_tx_resp beacon_notify_hdr;
+ uint64_t tsf;
+ uint32_t ibss_mgr_status;
+} __packed;
+
+/**
+ * enum iwm_dump_control - dump (flush) control flags
+ * @IWM_DUMP_TX_FIFO_FLUSH: Dump MSDUs until the the FIFO is empty
+ * and the TFD queues are empty.
+ */
+enum iwm_dump_control {
+ IWM_DUMP_TX_FIFO_FLUSH = (1 << 1),
+};
+
+/**
+ * struct iwm_tx_path_flush_cmd -- queue/FIFO flush command
+ * @queues_ctl: bitmap of queues to flush
+ * @flush_ctl: control flags
+ * @reserved: reserved
+ */
+struct iwm_tx_path_flush_cmd {
+ uint32_t queues_ctl;
+ uint16_t flush_ctl;
+ uint16_t reserved;
+} __packed; /* IWM_TX_PATH_FLUSH_CMD_API_S_VER_1 */
+
+/**
+ * iwm_mvm_get_scd_ssn - returns the SSN of the SCD
+ * @tx_resp: the Tx response from the fw (agg or non-agg)
+ *
+ * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since
+ * it can't know that everything will go well until the end of the AMPDU, it
+ * can't know in advance the number of MPDUs that will be sent in the current
+ * batch. This is why it writes the agg Tx response while it fetches the MPDUs.
+ * Hence, it can't know in advance what the SSN of the SCD will be at the end
+ * of the batch. This is why the SSN of the SCD is written at the end of the
+ * whole struct at a variable offset. This function knows how to cope with the
+ * variable offset and returns the SSN of the SCD.
+ */
+static inline uint32_t iwm_mvm_get_scd_ssn(struct iwm_mvm_tx_resp *tx_resp)
+{
+ return le32_to_cpup((uint32_t *)&tx_resp->status +
+ tx_resp->frame_count) & 0xfff;
+}
+
+/*
+ * END mvm/fw-api-tx.h
+ */
+
+/*
+ * BEGIN mvm/fw-api-scan.h
+ */
+
+/* Scan Commands, Responses, Notifications */
+
+/* Masks for iwm_scan_channel.type flags */
+#define IWM_SCAN_CHANNEL_TYPE_ACTIVE (1 << 0)
+#define IWM_SCAN_CHANNEL_NARROW_BAND (1 << 22)
+
+/* Max number of IEs for direct SSID scans in a command */
+#define IWM_PROBE_OPTION_MAX 20
+
+/**
+ * struct iwm_scan_channel - entry in IWM_REPLY_SCAN_CMD channel table
+ * @channel: band is selected by iwm_scan_cmd "flags" field
+ * @tx_gain: gain for analog radio
+ * @dsp_atten: gain for DSP
+ * @active_dwell: dwell time for active scan in TU, typically 5-50
+ * @passive_dwell: dwell time for passive scan in TU, typically 20-500
+ * @type: type is broken down to these bits:
+ * bit 0: 0 = passive, 1 = active
+ * bits 1-20: SSID direct bit map. If any of these bits is set then
+ * the corresponding SSID IE is transmitted in probe request
+ * (bit i adds IE in position i to the probe request)
+ * bit 22: channel width, 0 = regular, 1 = TGj narrow channel
+ *
+ * @iteration_count:
+ * @iteration_interval:
+ * This struct is used once for each channel in the scan list.
+ * Each channel can independently select:
+ * 1) SSID for directed active scans
+ * 2) Txpower setting (for rate specified within Tx command)
+ * 3) How long to stay on-channel (behavior may be modified by quiet_time,
+ * quiet_plcp_th, good_CRC_th)
+ *
+ * To avoid uCode errors, make sure the following are true (see comments
+ * under struct iwm_scan_cmd about max_out_time and quiet_time):
+ * 1) If using passive_dwell (i.e. passive_dwell != 0):
+ * active_dwell <= passive_dwell (< max_out_time if max_out_time != 0)
+ * 2) quiet_time <= active_dwell
+ * 3) If restricting off-channel time (i.e. max_out_time !=0):
+ * passive_dwell < max_out_time
+ * active_dwell < max_out_time
+ */
+struct iwm_scan_channel {
+ uint32_t type;
+ uint16_t channel;
+ uint16_t iteration_count;
+ uint32_t iteration_interval;
+ uint16_t active_dwell;
+ uint16_t passive_dwell;
+} __packed; /* IWM_SCAN_CHANNEL_CONTROL_API_S_VER_1 */
+
+/**
+ * struct iwm_ssid_ie - directed scan network information element
+ *
+ * Up to 20 of these may appear in IWM_REPLY_SCAN_CMD,
+ * selected by "type" bit field in struct iwm_scan_channel;
+ * each channel may select different ssids from among the 20 entries.
+ * SSID IEs get transmitted in reverse order of entry.
+ */
+struct iwm_ssid_ie {
+ uint8_t id;
+ uint8_t len;
+ uint8_t ssid[IEEE80211_NWID_LEN];
+} __packed; /* IWM_SCAN_DIRECT_SSID_IE_API_S_VER_1 */
+
+/**
+ * iwm_scan_flags - masks for scan command flags
+ *@IWM_SCAN_FLAGS_PERIODIC_SCAN:
+ *@IWM_SCAN_FLAGS_P2P_PUBLIC_ACTION_FRAME_TX:
+ *@IWM_SCAN_FLAGS_DELAYED_SCAN_LOWBAND:
+ *@IWM_SCAN_FLAGS_DELAYED_SCAN_HIGHBAND:
+ *@IWM_SCAN_FLAGS_FRAGMENTED_SCAN:
+ *@IWM_SCAN_FLAGS_PASSIVE2ACTIVE: use active scan on channels that was active
+ * in the past hour, even if they are marked as passive.
+ */
+enum iwm_scan_flags {
+ IWM_SCAN_FLAGS_PERIODIC_SCAN = (1 << 0),
+ IWM_SCAN_FLAGS_P2P_PUBLIC_ACTION_FRAME_TX = (1 << 1),
+ IWM_SCAN_FLAGS_DELAYED_SCAN_LOWBAND = (1 << 2),
+ IWM_SCAN_FLAGS_DELAYED_SCAN_HIGHBAND = (1 << 3),
+ IWM_SCAN_FLAGS_FRAGMENTED_SCAN = (1 << 4),
+ IWM_SCAN_FLAGS_PASSIVE2ACTIVE = (1 << 5),
+};
+
+/**
+ * enum iwm_scan_type - Scan types for scan command
+ * @IWM_SCAN_TYPE_FORCED:
+ * @IWM_SCAN_TYPE_BACKGROUND:
+ * @IWM_SCAN_TYPE_OS:
+ * @IWM_SCAN_TYPE_ROAMING:
+ * @IWM_SCAN_TYPE_ACTION:
+ * @IWM_SCAN_TYPE_DISCOVERY:
+ * @IWM_SCAN_TYPE_DISCOVERY_FORCED:
+ */
+enum iwm_scan_type {
+ IWM_SCAN_TYPE_FORCED = 0,
+ IWM_SCAN_TYPE_BACKGROUND = 1,
+ IWM_SCAN_TYPE_OS = 2,
+ IWM_SCAN_TYPE_ROAMING = 3,
+ IWM_SCAN_TYPE_ACTION = 4,
+ IWM_SCAN_TYPE_DISCOVERY = 5,
+ IWM_SCAN_TYPE_DISCOVERY_FORCED = 6,
+}; /* IWM_SCAN_ACTIVITY_TYPE_E_VER_1 */
+
+/* Maximal number of channels to scan */
+#define IWM_MAX_NUM_SCAN_CHANNELS 0x24
+
+/**
+ * struct iwm_scan_cmd - scan request command
+ * ( IWM_SCAN_REQUEST_CMD = 0x80 )
+ * @len: command length in bytes
+ * @scan_flags: scan flags from IWM_SCAN_FLAGS_*
+ * @channel_count: num of channels in channel list (1 - IWM_MAX_NUM_SCAN_CHANNELS)
+ * @quiet_time: in msecs, dwell this time for active scan on quiet channels
+ * @quiet_plcp_th: quiet PLCP threshold (channel is quiet if less than
+ * this number of packets were received (typically 1)
+ * @passive2active: is auto switching from passive to active during scan allowed
+ * @rxchain_sel_flags: RXON_RX_CHAIN_*
+ * @max_out_time: in usecs, max out of serving channel time
+ * @suspend_time: how long to pause scan when returning to service channel:
+ * bits 0-19: beacon interal in usecs (suspend before executing)
+ * bits 20-23: reserved
+ * bits 24-31: number of beacons (suspend between channels)
+ * @rxon_flags: RXON_FLG_*
+ * @filter_flags: RXON_FILTER_*
+ * @tx_cmd: for active scans (zero for passive), w/o payload,
+ * no RS so specify TX rate
+ * @direct_scan: direct scan SSIDs
+ * @type: one of IWM_SCAN_TYPE_*
+ * @repeats: how many time to repeat the scan
+ */
+struct iwm_scan_cmd {
+ uint16_t len;
+ uint8_t scan_flags;
+ uint8_t channel_count;
+ uint16_t quiet_time;
+ uint16_t quiet_plcp_th;
+ uint16_t passive2active;
+ uint16_t rxchain_sel_flags;
+ uint32_t max_out_time;
+ uint32_t suspend_time;
+ /* IWM_RX_ON_FLAGS_API_S_VER_1 */
+ uint32_t rxon_flags;
+ uint32_t filter_flags;
+ struct iwm_tx_cmd tx_cmd;
+ struct iwm_ssid_ie direct_scan[IWM_PROBE_OPTION_MAX];
+ uint32_t type;
+ uint32_t repeats;
+
+ /*
+ * Probe request frame, followed by channel list.
+ *
+ * Size of probe request frame is specified by byte count in tx_cmd.
+ * Channel list follows immediately after probe request frame.
+ * Number of channels in list is specified by channel_count.
+ * Each channel in list is of type:
+ *
+ * struct iwm_scan_channel channels[0];
+ *
+ * NOTE: Only one band of channels can be scanned per pass. You
+ * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
+ * for one scan to complete (i.e. receive IWM_SCAN_COMPLETE_NOTIFICATION)
+ * before requesting another scan.
+ */
+ uint8_t data[0];
+} __packed; /* IWM_SCAN_REQUEST_FIXED_PART_API_S_VER_5 */
+
+/* Response to scan request contains only status with one of these values */
+#define IWM_SCAN_RESPONSE_OK 0x1
+#define IWM_SCAN_RESPONSE_ERROR 0x2
+
+/*
+ * IWM_SCAN_ABORT_CMD = 0x81
+ * When scan abort is requested, the command has no fields except the common
+ * header. The response contains only a status with one of these values.
+ */
+#define IWM_SCAN_ABORT_POSSIBLE 0x1
+#define IWM_SCAN_ABORT_IGNORED 0x2 /* no pending scans */
+
+/* TODO: complete documentation */
+#define IWM_SCAN_OWNER_STATUS 0x1
+#define IWM_MEASURE_OWNER_STATUS 0x2
+
+/**
+ * struct iwm_scan_start_notif - notifies start of scan in the device
+ * ( IWM_SCAN_START_NOTIFICATION = 0x82 )
+ * @tsf_low: TSF timer (lower half) in usecs
+ * @tsf_high: TSF timer (higher half) in usecs
+ * @beacon_timer: structured as follows:
+ * bits 0:19 - beacon interval in usecs
+ * bits 20:23 - reserved (0)
+ * bits 24:31 - number of beacons
+ * @channel: which channel is scanned
+ * @band: 0 for 5.2 GHz, 1 for 2.4 GHz
+ * @status: one of *_OWNER_STATUS
+ */
+struct iwm_scan_start_notif {
+ uint32_t tsf_low;
+ uint32_t tsf_high;
+ uint32_t beacon_timer;
+ uint8_t channel;
+ uint8_t band;
+ uint8_t reserved[2];
+ uint32_t status;
+} __packed; /* IWM_SCAN_START_NTF_API_S_VER_1 */
+
+/* scan results probe_status first bit indicates success */
+#define IWM_SCAN_PROBE_STATUS_OK 0
+#define IWM_SCAN_PROBE_STATUS_TX_FAILED (1 << 0)
+/* error statuses combined with TX_FAILED */
+#define IWM_SCAN_PROBE_STATUS_FAIL_TTL (1 << 1)
+#define IWM_SCAN_PROBE_STATUS_FAIL_BT (1 << 2)
+
+/* How many statistics are gathered for each channel */
+#define IWM_SCAN_RESULTS_STATISTICS 1
+
+/**
+ * enum iwm_scan_complete_status - status codes for scan complete notifications
+ * @IWM_SCAN_COMP_STATUS_OK: scan completed successfully
+ * @IWM_SCAN_COMP_STATUS_ABORT: scan was aborted by user
+ * @IWM_SCAN_COMP_STATUS_ERR_SLEEP: sending null sleep packet failed
+ * @IWM_SCAN_COMP_STATUS_ERR_CHAN_TIMEOUT: timeout before channel is ready
+ * @IWM_SCAN_COMP_STATUS_ERR_PROBE: sending probe request failed
+ * @IWM_SCAN_COMP_STATUS_ERR_WAKEUP: sending null wakeup packet failed
+ * @IWM_SCAN_COMP_STATUS_ERR_ANTENNAS: invalid antennas chosen at scan command
+ * @IWM_SCAN_COMP_STATUS_ERR_INTERNAL: internal error caused scan abort
+ * @IWM_SCAN_COMP_STATUS_ERR_COEX: medium was lost ot WiMax
+ * @IWM_SCAN_COMP_STATUS_P2P_ACTION_OK: P2P public action frame TX was successful
+ * (not an error!)
+ * @IWM_SCAN_COMP_STATUS_ITERATION_END: indicates end of one repeatition the driver
+ * asked for
+ * @IWM_SCAN_COMP_STATUS_ERR_ALLOC_TE: scan could not allocate time events
+*/
+enum iwm_scan_complete_status {
+ IWM_SCAN_COMP_STATUS_OK = 0x1,
+ IWM_SCAN_COMP_STATUS_ABORT = 0x2,
+ IWM_SCAN_COMP_STATUS_ERR_SLEEP = 0x3,
+ IWM_SCAN_COMP_STATUS_ERR_CHAN_TIMEOUT = 0x4,
+ IWM_SCAN_COMP_STATUS_ERR_PROBE = 0x5,
+ IWM_SCAN_COMP_STATUS_ERR_WAKEUP = 0x6,
+ IWM_SCAN_COMP_STATUS_ERR_ANTENNAS = 0x7,
+ IWM_SCAN_COMP_STATUS_ERR_INTERNAL = 0x8,
+ IWM_SCAN_COMP_STATUS_ERR_COEX = 0x9,
+ IWM_SCAN_COMP_STATUS_P2P_ACTION_OK = 0xA,
+ IWM_SCAN_COMP_STATUS_ITERATION_END = 0x0B,
+ IWM_SCAN_COMP_STATUS_ERR_ALLOC_TE = 0x0C,
+};
+
+/**
+ * struct iwm_scan_results_notif - scan results for one channel
+ * ( IWM_SCAN_RESULTS_NOTIFICATION = 0x83 )
+ * @channel: which channel the results are from
+ * @band: 0 for 5.2 GHz, 1 for 2.4 GHz
+ * @probe_status: IWM_SCAN_PROBE_STATUS_*, indicates success of probe request
+ * @num_probe_not_sent: # of request that weren't sent due to not enough time
+ * @duration: duration spent in channel, in usecs
+ * @statistics: statistics gathered for this channel
+ */
+struct iwm_scan_results_notif {
+ uint8_t channel;
+ uint8_t band;
+ uint8_t probe_status;
+ uint8_t num_probe_not_sent;
+ uint32_t duration;
+ uint32_t statistics[IWM_SCAN_RESULTS_STATISTICS];
+} __packed; /* IWM_SCAN_RESULT_NTF_API_S_VER_2 */
+
+/**
+ * struct iwm_scan_complete_notif - notifies end of scanning (all channels)
+ * ( IWM_SCAN_COMPLETE_NOTIFICATION = 0x84 )
+ * @scanned_channels: number of channels scanned (and number of valid results)
+ * @status: one of IWM_SCAN_COMP_STATUS_*
+ * @bt_status: BT on/off status
+ * @last_channel: last channel that was scanned
+ * @tsf_low: TSF timer (lower half) in usecs
+ * @tsf_high: TSF timer (higher half) in usecs
+ * @results: all scan results, only "scanned_channels" of them are valid
+ */
+struct iwm_scan_complete_notif {
+ uint8_t scanned_channels;
+ uint8_t status;
+ uint8_t bt_status;
+ uint8_t last_channel;
+ uint32_t tsf_low;
+ uint32_t tsf_high;
+ struct iwm_scan_results_notif results[IWM_MAX_NUM_SCAN_CHANNELS];
+} __packed; /* IWM_SCAN_COMPLETE_NTF_API_S_VER_2 */
+
+/* scan offload */
+#define IWM_MAX_SCAN_CHANNELS 40
+#define IWM_SCAN_MAX_BLACKLIST_LEN 64
+#define IWM_SCAN_SHORT_BLACKLIST_LEN 16
+#define IWM_SCAN_MAX_PROFILES 11
+#define IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE 512
+
+/* Default watchdog (in MS) for scheduled scan iteration */
+#define IWM_SCHED_SCAN_WATCHDOG cpu_to_le16(15000)
+
+#define IWM_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
+#define IWM_CAN_ABORT_STATUS 1
+
+#define IWM_FULL_SCAN_MULTIPLIER 5
+#define IWM_FAST_SCHED_SCAN_ITERATIONS 3
+
+enum iwm_scan_framework_client {
+ IWM_SCAN_CLIENT_SCHED_SCAN = (1 << 0),
+ IWM_SCAN_CLIENT_NETDETECT = (1 << 1),
+ IWM_SCAN_CLIENT_ASSET_TRACKING = (1 << 2),
+};
+
+/**
+ * struct iwm_scan_offload_cmd - IWM_SCAN_REQUEST_FIXED_PART_API_S_VER_6
+ * @scan_flags: see enum iwm_scan_flags
+ * @channel_count: channels in channel list
+ * @quiet_time: dwell time, in milisiconds, on quiet channel
+ * @quiet_plcp_th: quiet channel num of packets threshold
+ * @good_CRC_th: passive to active promotion threshold
+ * @rx_chain: RXON rx chain.
+ * @max_out_time: max uSec to be out of assoceated channel
+ * @suspend_time: pause scan this long when returning to service channel
+ * @flags: RXON flags
+ * @filter_flags: RXONfilter
+ * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz.
+ * @direct_scan: list of SSIDs for directed active scan
+ * @scan_type: see enum iwm_scan_type.
+ * @rep_count: repetition count for each scheduled scan iteration.
+ */
+struct iwm_scan_offload_cmd {
+ uint16_t len;
+ uint8_t scan_flags;
+ uint8_t channel_count;
+ uint16_t quiet_time;
+ uint16_t quiet_plcp_th;
+ uint16_t good_CRC_th;
+ uint16_t rx_chain;
+ uint32_t max_out_time;
+ uint32_t suspend_time;
+ /* IWM_RX_ON_FLAGS_API_S_VER_1 */
+ uint32_t flags;
+ uint32_t filter_flags;
+ struct iwm_tx_cmd tx_cmd[2];
+ /* IWM_SCAN_DIRECT_SSID_IE_API_S_VER_1 */
+ struct iwm_ssid_ie direct_scan[IWM_PROBE_OPTION_MAX];
+ uint32_t scan_type;
+ uint32_t rep_count;
+} __packed;
+
+enum iwm_scan_offload_channel_flags {
+ IWM_SCAN_OFFLOAD_CHANNEL_ACTIVE = (1 << 0),
+ IWM_SCAN_OFFLOAD_CHANNEL_NARROW = (1 << 22),
+ IWM_SCAN_OFFLOAD_CHANNEL_FULL = (1 << 24),
+ IWM_SCAN_OFFLOAD_CHANNEL_PARTIAL = (1 << 25),
+};
+
+/**
+ * iwm_scan_channel_cfg - IWM_SCAN_CHANNEL_CFG_S
+ * @type: bitmap - see enum iwm_scan_offload_channel_flags.
+ * 0: passive (0) or active (1) scan.
+ * 1-20: directed scan to i'th ssid.
+ * 22: channel width configuation - 1 for narrow.
+ * 24: full scan.
+ * 25: partial scan.
+ * @channel_number: channel number 1-13 etc.
+ * @iter_count: repetition count for the channel.
+ * @iter_interval: interval between two innteration on one channel.
+ * @dwell_time: entry 0 - active scan, entry 1 - passive scan.
+ */
+struct iwm_scan_channel_cfg {
+ uint32_t type[IWM_MAX_SCAN_CHANNELS];
+ uint16_t channel_number[IWM_MAX_SCAN_CHANNELS];
+ uint16_t iter_count[IWM_MAX_SCAN_CHANNELS];
+ uint32_t iter_interval[IWM_MAX_SCAN_CHANNELS];
+ uint8_t dwell_time[IWM_MAX_SCAN_CHANNELS][2];
+} __packed;
+
+/**
+ * iwm_scan_offload_cfg - IWM_SCAN_OFFLOAD_CONFIG_API_S
+ * @scan_cmd: scan command fixed part
+ * @channel_cfg: scan channel configuration
+ * @data: probe request frames (one per band)
+ */
+struct iwm_scan_offload_cfg {
+ struct iwm_scan_offload_cmd scan_cmd;
+ struct iwm_scan_channel_cfg channel_cfg;
+ uint8_t data[0];
+} __packed;
+
+/**
+ * iwm_scan_offload_blacklist - IWM_SCAN_OFFLOAD_BLACKLIST_S
+ * @ssid: MAC address to filter out
+ * @reported_rssi: AP rssi reported to the host
+ * @client_bitmap: clients ignore this entry - enum scan_framework_client
+ */
+struct iwm_scan_offload_blacklist {
+ uint8_t ssid[ETHER_ADDR_LEN];
+ uint8_t reported_rssi;
+ uint8_t client_bitmap;
+} __packed;
+
+enum iwm_scan_offload_network_type {
+ IWM_NETWORK_TYPE_BSS = 1,
+ IWM_NETWORK_TYPE_IBSS = 2,
+ IWM_NETWORK_TYPE_ANY = 3,
+};
+
+enum iwm_scan_offload_band_selection {
+ IWM_SCAN_OFFLOAD_SELECT_2_4 = 0x4,
+ IWM_SCAN_OFFLOAD_SELECT_5_2 = 0x8,
+ IWM_SCAN_OFFLOAD_SELECT_ANY = 0xc,
+};
+
+/**
+ * iwm_scan_offload_profile - IWM_SCAN_OFFLOAD_PROFILE_S
+ * @ssid_index: index to ssid list in fixed part
+ * @unicast_cipher: encryption olgorithm to match - bitmap
+ * @aut_alg: authentication olgorithm to match - bitmap
+ * @network_type: enum iwm_scan_offload_network_type
+ * @band_selection: enum iwm_scan_offload_band_selection
+ * @client_bitmap: clients waiting for match - enum scan_framework_client
+ */
+struct iwm_scan_offload_profile {
+ uint8_t ssid_index;
+ uint8_t unicast_cipher;
+ uint8_t auth_alg;
+ uint8_t network_type;
+ uint8_t band_selection;
+ uint8_t client_bitmap;
+ uint8_t reserved[2];
+} __packed;
+
+/**
+ * iwm_scan_offload_profile_cfg - IWM_SCAN_OFFLOAD_PROFILES_CFG_API_S_VER_1
+ * @blaclist: AP list to filter off from scan results
+ * @profiles: profiles to search for match
+ * @blacklist_len: length of blacklist
+ * @num_profiles: num of profiles in the list
+ * @match_notify: clients waiting for match found notification
+ * @pass_match: clients waiting for the results
+ * @active_clients: active clients bitmap - enum scan_framework_client
+ * @any_beacon_notify: clients waiting for match notification without match
+ */
+struct iwm_scan_offload_profile_cfg {
+ struct iwm_scan_offload_profile profiles[IWM_SCAN_MAX_PROFILES];
+ uint8_t blacklist_len;
+ uint8_t num_profiles;
+ uint8_t match_notify;
+ uint8_t pass_match;
+ uint8_t active_clients;
+ uint8_t any_beacon_notify;
+ uint8_t reserved[2];
+} __packed;
+
+/**
+ * iwm_scan_offload_schedule - schedule of scan offload
+ * @delay: delay between iterations, in seconds.
+ * @iterations: num of scan iterations
+ * @full_scan_mul: number of partial scans before each full scan
+ */
+struct iwm_scan_offload_schedule {
+ uint16_t delay;
+ uint8_t iterations;
+ uint8_t full_scan_mul;
+} __packed;
+
+/*
+ * iwm_scan_offload_flags
+ *
+ * IWM_SCAN_OFFLOAD_FLAG_PASS_ALL: pass all results - no filtering.
+ * IWM_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL: add cached channels to partial scan.
+ * IWM_SCAN_OFFLOAD_FLAG_ENERGY_SCAN: use energy based scan before partial scan
+ * on A band.
+ */
+enum iwm_scan_offload_flags {
+ IWM_SCAN_OFFLOAD_FLAG_PASS_ALL = (1 << 0),
+ IWM_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL = (1 << 2),
+ IWM_SCAN_OFFLOAD_FLAG_ENERGY_SCAN = (1 << 3),
+};
+
+/**
+ * iwm_scan_offload_req - scan offload request command
+ * @flags: bitmap - enum iwm_scan_offload_flags.
+ * @watchdog: maximum scan duration in TU.
+ * @delay: delay in seconds before first iteration.
+ * @schedule_line: scan offload schedule, for fast and regular scan.
+ */
+struct iwm_scan_offload_req {
+ uint16_t flags;
+ uint16_t watchdog;
+ uint16_t delay;
+ uint16_t reserved;
+ struct iwm_scan_offload_schedule schedule_line[2];
+} __packed;
+
+enum iwm_scan_offload_compleate_status {
+ IWM_SCAN_OFFLOAD_COMPLETED = 1,
+ IWM_SCAN_OFFLOAD_ABORTED = 2,
+};
+
+/**
+ * iwm_scan_offload_complete - IWM_SCAN_OFFLOAD_COMPLETE_NTF_API_S_VER_1
+ * @last_schedule_line: last schedule line executed (fast or regular)
+ * @last_schedule_iteration: last scan iteration executed before scan abort
+ * @status: enum iwm_scan_offload_compleate_status
+ */
+struct iwm_scan_offload_complete {
+ uint8_t last_schedule_line;
+ uint8_t last_schedule_iteration;
+ uint8_t status;
+ uint8_t reserved;
+} __packed;
+
+/**
+ * iwm_sched_scan_results - IWM_SCAN_OFFLOAD_MATCH_FOUND_NTF_API_S_VER_1
+ * @ssid_bitmap: SSIDs indexes found in this iteration
+ * @client_bitmap: clients that are active and wait for this notification
+ */
+struct iwm_sched_scan_results {
+ uint16_t ssid_bitmap;
+ uint8_t client_bitmap;
+ uint8_t reserved;
+};
+
+/*
+ * END mvm/fw-api-scan.h
+ */
+
+/*
+ * BEGIN mvm/fw-api-sta.h
+ */
+
+/**
+ * enum iwm_sta_flags - flags for the ADD_STA host command
+ * @IWM_STA_FLG_REDUCED_TX_PWR_CTRL:
+ * @IWM_STA_FLG_REDUCED_TX_PWR_DATA:
+ * @IWM_STA_FLG_FLG_ANT_MSK: Antenna selection
+ * @IWM_STA_FLG_PS: set if STA is in Power Save
+ * @IWM_STA_FLG_INVALID: set if STA is invalid
+ * @IWM_STA_FLG_DLP_EN: Direct Link Protocol is enabled
+ * @IWM_STA_FLG_SET_ALL_KEYS: the current key applies to all key IDs
+ * @IWM_STA_FLG_DRAIN_FLOW: drain flow
+ * @IWM_STA_FLG_PAN: STA is for PAN interface
+ * @IWM_STA_FLG_CLASS_AUTH:
+ * @IWM_STA_FLG_CLASS_ASSOC:
+ * @IWM_STA_FLG_CLASS_MIMO_PROT:
+ * @IWM_STA_FLG_MAX_AGG_SIZE_MSK: maximal size for A-MPDU
+ * @IWM_STA_FLG_AGG_MPDU_DENS_MSK: maximal MPDU density for Tx aggregation
+ * @IWM_STA_FLG_FAT_EN_MSK: support for channel width (for Tx). This flag is
+ * initialised by driver and can be updated by fw upon reception of
+ * action frames that can change the channel width. When cleared the fw
+ * will send all the frames in 20MHz even when FAT channel is requested.
+ * @IWM_STA_FLG_MIMO_EN_MSK: support for MIMO. This flag is initialised by the
+ * driver and can be updated by fw upon reception of action frames.
+ * @IWM_STA_FLG_MFP_EN: Management Frame Protection
+ */
+enum iwm_sta_flags {
+ IWM_STA_FLG_REDUCED_TX_PWR_CTRL = (1 << 3),
+ IWM_STA_FLG_REDUCED_TX_PWR_DATA = (1 << 6),
+
+ IWM_STA_FLG_FLG_ANT_A = (1 << 4),
+ IWM_STA_FLG_FLG_ANT_B = (2 << 4),
+ IWM_STA_FLG_FLG_ANT_MSK = (IWM_STA_FLG_FLG_ANT_A |
+ IWM_STA_FLG_FLG_ANT_B),
+
+ IWM_STA_FLG_PS = (1 << 8),
+ IWM_STA_FLG_DRAIN_FLOW = (1 << 12),
+ IWM_STA_FLG_PAN = (1 << 13),
+ IWM_STA_FLG_CLASS_AUTH = (1 << 14),
+ IWM_STA_FLG_CLASS_ASSOC = (1 << 15),
+ IWM_STA_FLG_RTS_MIMO_PROT = (1 << 17),
+
+ IWM_STA_FLG_MAX_AGG_SIZE_SHIFT = 19,
+ IWM_STA_FLG_MAX_AGG_SIZE_8K = (0 << IWM_STA_FLG_MAX_AGG_SIZE_SHIFT),
+ IWM_STA_FLG_MAX_AGG_SIZE_16K = (1 << IWM_STA_FLG_MAX_AGG_SIZE_SHIFT),
+ IWM_STA_FLG_MAX_AGG_SIZE_32K = (2 << IWM_STA_FLG_MAX_AGG_SIZE_SHIFT),
+ IWM_STA_FLG_MAX_AGG_SIZE_64K = (3 << IWM_STA_FLG_MAX_AGG_SIZE_SHIFT),
+ IWM_STA_FLG_MAX_AGG_SIZE_128K = (4 << IWM_STA_FLG_MAX_AGG_SIZE_SHIFT),
+ IWM_STA_FLG_MAX_AGG_SIZE_256K = (5 << IWM_STA_FLG_MAX_AGG_SIZE_SHIFT),
+ IWM_STA_FLG_MAX_AGG_SIZE_512K = (6 << IWM_STA_FLG_MAX_AGG_SIZE_SHIFT),
+ IWM_STA_FLG_MAX_AGG_SIZE_1024K = (7 << IWM_STA_FLG_MAX_AGG_SIZE_SHIFT),
+ IWM_STA_FLG_MAX_AGG_SIZE_MSK = (7 << IWM_STA_FLG_MAX_AGG_SIZE_SHIFT),
+
+ IWM_STA_FLG_AGG_MPDU_DENS_SHIFT = 23,
+ IWM_STA_FLG_AGG_MPDU_DENS_2US = (4 << IWM_STA_FLG_AGG_MPDU_DENS_SHIFT),
+ IWM_STA_FLG_AGG_MPDU_DENS_4US = (5 << IWM_STA_FLG_AGG_MPDU_DENS_SHIFT),
+ IWM_STA_FLG_AGG_MPDU_DENS_8US = (6 << IWM_STA_FLG_AGG_MPDU_DENS_SHIFT),
+ IWM_STA_FLG_AGG_MPDU_DENS_16US = (7 << IWM_STA_FLG_AGG_MPDU_DENS_SHIFT),
+ IWM_STA_FLG_AGG_MPDU_DENS_MSK = (7 << IWM_STA_FLG_AGG_MPDU_DENS_SHIFT),
+
+ IWM_STA_FLG_FAT_EN_20MHZ = (0 << 26),
+ IWM_STA_FLG_FAT_EN_40MHZ = (1 << 26),
+ IWM_STA_FLG_FAT_EN_80MHZ = (2 << 26),
+ IWM_STA_FLG_FAT_EN_160MHZ = (3 << 26),
+ IWM_STA_FLG_FAT_EN_MSK = (3 << 26),
+
+ IWM_STA_FLG_MIMO_EN_SISO = (0 << 28),
+ IWM_STA_FLG_MIMO_EN_MIMO2 = (1 << 28),
+ IWM_STA_FLG_MIMO_EN_MIMO3 = (2 << 28),
+ IWM_STA_FLG_MIMO_EN_MSK = (3 << 28),
+};
+
+/**
+ * enum iwm_sta_key_flag - key flags for the ADD_STA host command
+ * @IWM_STA_KEY_FLG_NO_ENC: no encryption
+ * @IWM_STA_KEY_FLG_WEP: WEP encryption algorithm
+ * @IWM_STA_KEY_FLG_CCM: CCMP encryption algorithm
+ * @IWM_STA_KEY_FLG_TKIP: TKIP encryption algorithm
+ * @IWM_STA_KEY_FLG_EXT: extended cipher algorithm (depends on the FW support)
+ * @IWM_STA_KEY_FLG_CMAC: CMAC encryption algorithm
+ * @IWM_STA_KEY_FLG_ENC_UNKNOWN: unknown encryption algorithm
+ * @IWM_STA_KEY_FLG_EN_MSK: mask for encryption algorithmi value
+ * @IWM_STA_KEY_FLG_WEP_KEY_MAP: wep is either a group key (0 - legacy WEP) or from
+ * station info array (1 - n 1X mode)
+ * @IWM_STA_KEY_FLG_KEYID_MSK: the index of the key
+ * @IWM_STA_KEY_NOT_VALID: key is invalid
+ * @IWM_STA_KEY_FLG_WEP_13BYTES: set for 13 bytes WEP key
+ * @IWM_STA_KEY_MULTICAST: set for multical key
+ * @IWM_STA_KEY_MFP: key is used for Management Frame Protection
+ */
+enum iwm_sta_key_flag {
+ IWM_STA_KEY_FLG_NO_ENC = (0 << 0),
+ IWM_STA_KEY_FLG_WEP = (1 << 0),
+ IWM_STA_KEY_FLG_CCM = (2 << 0),
+ IWM_STA_KEY_FLG_TKIP = (3 << 0),
+ IWM_STA_KEY_FLG_EXT = (4 << 0),
+ IWM_STA_KEY_FLG_CMAC = (6 << 0),
+ IWM_STA_KEY_FLG_ENC_UNKNOWN = (7 << 0),
+ IWM_STA_KEY_FLG_EN_MSK = (7 << 0),
+
+ IWM_STA_KEY_FLG_WEP_KEY_MAP = (1 << 3),
+ IWM_STA_KEY_FLG_KEYID_POS = 8,
+ IWM_STA_KEY_FLG_KEYID_MSK = (3 << IWM_STA_KEY_FLG_KEYID_POS),
+ IWM_STA_KEY_NOT_VALID = (1 << 11),
+ IWM_STA_KEY_FLG_WEP_13BYTES = (1 << 12),
+ IWM_STA_KEY_MULTICAST = (1 << 14),
+ IWM_STA_KEY_MFP = (1 << 15),
+};
+
+/**
+ * enum iwm_sta_modify_flag - indicate to the fw what flag are being changed
+ * @IWM_STA_MODIFY_KEY: this command modifies %key
+ * @IWM_STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx
+ * @IWM_STA_MODIFY_TX_RATE: unused
+ * @IWM_STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid
+ * @IWM_STA_MODIFY_REMOVE_BA_TID: this command modifies %remove_immediate_ba_tid
+ * @IWM_STA_MODIFY_SLEEPING_STA_TX_COUNT: this command modifies %sleep_tx_count
+ * @IWM_STA_MODIFY_PROT_TH:
+ * @IWM_STA_MODIFY_QUEUES: modify the queues used by this station
+ */
+enum iwm_sta_modify_flag {
+ IWM_STA_MODIFY_KEY = (1 << 0),
+ IWM_STA_MODIFY_TID_DISABLE_TX = (1 << 1),
+ IWM_STA_MODIFY_TX_RATE = (1 << 2),
+ IWM_STA_MODIFY_ADD_BA_TID = (1 << 3),
+ IWM_STA_MODIFY_REMOVE_BA_TID = (1 << 4),
+ IWM_STA_MODIFY_SLEEPING_STA_TX_COUNT = (1 << 5),
+ IWM_STA_MODIFY_PROT_TH = (1 << 6),
+ IWM_STA_MODIFY_QUEUES = (1 << 7),
+};
+
+#define IWM_STA_MODE_MODIFY 1
+
+/**
+ * enum iwm_sta_sleep_flag - type of sleep of the station
+ * @IWM_STA_SLEEP_STATE_AWAKE:
+ * @IWM_STA_SLEEP_STATE_PS_POLL:
+ * @IWM_STA_SLEEP_STATE_UAPSD:
+ */
+enum iwm_sta_sleep_flag {
+ IWM_STA_SLEEP_STATE_AWAKE = 0,
+ IWM_STA_SLEEP_STATE_PS_POLL = (1 << 0),
+ IWM_STA_SLEEP_STATE_UAPSD = (1 << 1),
+};
+
+/* STA ID and color bits definitions */
+#define IWM_STA_ID_SEED (0x0f)
+#define IWM_STA_ID_POS (0)
+#define IWM_STA_ID_MSK (IWM_STA_ID_SEED << IWM_STA_ID_POS)
+
+#define IWM_STA_COLOR_SEED (0x7)
+#define IWM_STA_COLOR_POS (4)
+#define IWM_STA_COLOR_MSK (IWM_STA_COLOR_SEED << IWM_STA_COLOR_POS)
+
+#define IWM_STA_ID_N_COLOR_GET_COLOR(id_n_color) \
+ (((id_n_color) & IWM_STA_COLOR_MSK) >> IWM_STA_COLOR_POS)
+#define IWM_STA_ID_N_COLOR_GET_ID(id_n_color) \
+ (((id_n_color) & IWM_STA_ID_MSK) >> IWM_STA_ID_POS)
+
+#define IWM_STA_KEY_MAX_NUM (16)
+#define IWM_STA_KEY_IDX_INVALID (0xff)
+#define IWM_STA_KEY_MAX_DATA_KEY_NUM (4)
+#define IWM_MAX_GLOBAL_KEYS (4)
+#define IWM_STA_KEY_LEN_WEP40 (5)
+#define IWM_STA_KEY_LEN_WEP104 (13)
+
+/**
+ * struct iwm_mvm_keyinfo - key information
+ * @key_flags: type %iwm_sta_key_flag
+ * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
+ * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
+ * @key_offset: key offset in the fw's key table
+ * @key: 16-byte unicast decryption key
+ * @tx_secur_seq_cnt: initial RSC / PN needed for replay check
+ * @hw_tkip_mic_rx_key: byte: MIC Rx Key - used for TKIP only
+ * @hw_tkip_mic_tx_key: byte: MIC Tx Key - used for TKIP only
+ */
+struct iwm_mvm_keyinfo {
+ uint16_t key_flags;
+ uint8_t tkip_rx_tsc_byte2;
+ uint8_t reserved1;
+ uint16_t tkip_rx_ttak[5];
+ uint8_t key_offset;
+ uint8_t reserved2;
+ uint8_t key[16];
+ uint64_t tx_secur_seq_cnt;
+ uint64_t hw_tkip_mic_rx_key;
+ uint64_t hw_tkip_mic_tx_key;
+} __packed;
+
+/**
+ * struct iwm_mvm_add_sta_cmd_v5 - Add/modify a station in the fw's sta table.
+ * ( IWM_REPLY_ADD_STA = 0x18 )
+ * @add_modify: 1: modify existing, 0: add new station
+ * @unicast_tx_key_id: unicast tx key id. Relevant only when unicast key sent
+ * @multicast_tx_key_id: multicast tx key id. Relevant only when multicast key
+ * sent
+ * @mac_id_n_color: the Mac context this station belongs to
+ * @addr[ETHER_ADDR_LEN]: station's MAC address
+ * @sta_id: index of station in uCode's station table
+ * @modify_mask: IWM_STA_MODIFY_*, selects which parameters to modify vs. leave
+ * alone. 1 - modify, 0 - don't change.
+ * @key: look at %iwm_mvm_keyinfo
+ * @station_flags: look at %iwm_sta_flags
+ * @station_flags_msk: what of %station_flags have changed
+ * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
+ * AMPDU for tid x. Set %IWM_STA_MODIFY_TID_DISABLE_TX to change this field.
+ * @add_immediate_ba_tid: tid for which to add block-ack support (Rx)
+ * Set %IWM_STA_MODIFY_ADD_BA_TID to use this field, and also set
+ * add_immediate_ba_ssn.
+ * @remove_immediate_ba_tid: tid for which to remove block-ack support (Rx)
+ * Set %IWM_STA_MODIFY_REMOVE_BA_TID to use this field
+ * @add_immediate_ba_ssn: ssn for the Rx block-ack session. Used together with
+ * add_immediate_ba_tid.
+ * @sleep_tx_count: number of packets to transmit to station even though it is
+ * asleep. Used to synchronise PS-poll and u-APSD responses while ucode
+ * keeps track of STA sleep state.
+ * @sleep_state_flags: Look at %iwm_sta_sleep_flag.
+ * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP
+ * mac-addr.
+ * @beamform_flags: beam forming controls
+ * @tfd_queue_msk: tfd queues used by this station
+ *
+ * The device contains an internal table of per-station information, with info
+ * on security keys, aggregation parameters, and Tx rates for initial Tx
+ * attempt and any retries (set by IWM_REPLY_TX_LINK_QUALITY_CMD).
+ *
+ * ADD_STA sets up the table entry for one station, either creating a new
+ * entry, or modifying a pre-existing one.
+ */
+struct iwm_mvm_add_sta_cmd_v5 {
+ uint8_t add_modify;
+ uint8_t unicast_tx_key_id;
+ uint8_t multicast_tx_key_id;
+ uint8_t reserved1;
+ uint32_t mac_id_n_color;
+ uint8_t addr[ETHER_ADDR_LEN];
+ uint16_t reserved2;
+ uint8_t sta_id;
+ uint8_t modify_mask;
+ uint16_t reserved3;
+ struct iwm_mvm_keyinfo key;
+ uint32_t station_flags;
+ uint32_t station_flags_msk;
+ uint16_t tid_disable_tx;
+ uint16_t reserved4;
+ uint8_t add_immediate_ba_tid;
+ uint8_t remove_immediate_ba_tid;
+ uint16_t add_immediate_ba_ssn;
+ uint16_t sleep_tx_count;
+ uint16_t sleep_state_flags;
+ uint16_t assoc_id;
+ uint16_t beamform_flags;
+ uint32_t tfd_queue_msk;
+} __packed; /* IWM_ADD_STA_CMD_API_S_VER_5 */
+
+/**
+ * struct iwm_mvm_add_sta_cmd_v6 - Add / modify a station
+ * VER_6 of this command is quite similar to VER_5 except
+ * exclusion of all fields related to the security key installation.
+ */
+struct iwm_mvm_add_sta_cmd_v6 {
+ uint8_t add_modify;
+ uint8_t reserved1;
+ uint16_t tid_disable_tx;
+ uint32_t mac_id_n_color;
+ uint8_t addr[ETHER_ADDR_LEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */
+ uint16_t reserved2;
+ uint8_t sta_id;
+ uint8_t modify_mask;
+ uint16_t reserved3;
+ uint32_t station_flags;
+ uint32_t station_flags_msk;
+ uint8_t add_immediate_ba_tid;
+ uint8_t remove_immediate_ba_tid;
+ uint16_t add_immediate_ba_ssn;
+ uint16_t sleep_tx_count;
+ uint16_t sleep_state_flags;
+ uint16_t assoc_id;
+ uint16_t beamform_flags;
+ uint32_t tfd_queue_msk;
+} __packed; /* IWM_ADD_STA_CMD_API_S_VER_6 */
+
+/**
+ * struct iwm_mvm_add_sta_key_cmd - add/modify sta key
+ * ( IWM_REPLY_ADD_STA_KEY = 0x17 )
+ * @sta_id: index of station in uCode's station table
+ * @key_offset: key offset in key storage
+ * @key_flags: type %iwm_sta_key_flag
+ * @key: key material data
+ * @key2: key material data
+ * @rx_secur_seq_cnt: RX security sequence counter for the key
+ * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
+ * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
+ */
+struct iwm_mvm_add_sta_key_cmd {
+ uint8_t sta_id;
+ uint8_t key_offset;
+ uint16_t key_flags;
+ uint8_t key[16];
+ uint8_t key2[16];
+ uint8_t rx_secur_seq_cnt[16];
+ uint8_t tkip_rx_tsc_byte2;
+ uint8_t reserved;
+ uint16_t tkip_rx_ttak[5];
+} __packed; /* IWM_ADD_MODIFY_STA_KEY_API_S_VER_1 */
+
+/**
+ * enum iwm_mvm_add_sta_rsp_status - status in the response to ADD_STA command
+ * @IWM_ADD_STA_SUCCESS: operation was executed successfully
+ * @IWM_ADD_STA_STATIONS_OVERLOAD: no room left in the fw's station table
+ * @IWM_ADD_STA_IMMEDIATE_BA_FAILURE: can't add Rx block ack session
+ * @IWM_ADD_STA_MODIFY_NON_EXISTING_STA: driver requested to modify a station
+ * that doesn't exist.
+ */
+enum iwm_mvm_add_sta_rsp_status {
+ IWM_ADD_STA_SUCCESS = 0x1,
+ IWM_ADD_STA_STATIONS_OVERLOAD = 0x2,
+ IWM_ADD_STA_IMMEDIATE_BA_FAILURE = 0x4,
+ IWM_ADD_STA_MODIFY_NON_EXISTING_STA = 0x8,
+};
+
+/**
+ * struct iwm_mvm_rm_sta_cmd - Add / modify a station in the fw's station table
+ * ( IWM_REMOVE_STA = 0x19 )
+ * @sta_id: the station id of the station to be removed
+ */
+struct iwm_mvm_rm_sta_cmd {
+ uint8_t sta_id;
+ uint8_t reserved[3];
+} __packed; /* IWM_REMOVE_STA_CMD_API_S_VER_2 */
+
+/**
+ * struct iwm_mvm_mgmt_mcast_key_cmd
+ * ( IWM_MGMT_MCAST_KEY = 0x1f )
+ * @ctrl_flags: %iwm_sta_key_flag
+ * @IGTK:
+ * @K1: IGTK master key
+ * @K2: IGTK sub key
+ * @sta_id: station ID that support IGTK
+ * @key_id:
+ * @receive_seq_cnt: initial RSC/PN needed for replay check
+ */
+struct iwm_mvm_mgmt_mcast_key_cmd {
+ uint32_t ctrl_flags;
+ uint8_t IGTK[16];
+ uint8_t K1[16];
+ uint8_t K2[16];
+ uint32_t key_id;
+ uint32_t sta_id;
+ uint64_t receive_seq_cnt;
+} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_1 */
+
+struct iwm_mvm_wep_key {
+ uint8_t key_index;
+ uint8_t key_offset;
+ uint16_t reserved1;
+ uint8_t key_size;
+ uint8_t reserved2[3];
+ uint8_t key[16];
+} __packed;
+
+struct iwm_mvm_wep_key_cmd {
+ uint32_t mac_id_n_color;
+ uint8_t num_keys;
+ uint8_t decryption_type;
+ uint8_t flags;
+ uint8_t reserved;
+ struct iwm_mvm_wep_key wep_key[0];
+} __packed; /* SEC_CURR_WEP_KEY_CMD_API_S_VER_2 */
+
+
+/*
+ * END mvm/fw-api-sta.h
+ */
+
+/*
+ * Some cherry-picked definitions
+ */
+
+#define IWM_FRAME_LIMIT 64
+
+struct iwm_cmd_header {
+ uint8_t code;
+ uint8_t flags;
+ uint8_t idx;
+ uint8_t qid;
+} __packed;
+
+enum iwm_power_scheme {
+ IWM_POWER_SCHEME_CAM = 1,
+ IWM_POWER_SCHEME_BPS,
+ IWM_POWER_SCHEME_LP
+};
+
+#define IWM_DEF_CMD_PAYLOAD_SIZE 320
+#define IWM_CMD_FAILED_MSK 0x40
+
+struct iwm_device_cmd {
+ struct iwm_cmd_header hdr;
+
+ uint8_t data[IWM_DEF_CMD_PAYLOAD_SIZE];
+} __packed;
+
+struct iwm_rx_packet {
+ /*
+ * The first 4 bytes of the RX frame header contain both the RX frame
+ * size and some flags.
+ * Bit fields:
+ * 31: flag flush RB request
+ * 30: flag ignore TC (terminal counter) request
+ * 29: flag fast IRQ request
+ * 28-14: Reserved
+ * 13-00: RX frame size
+ */
+ uint32_t len_n_flags;
+ struct iwm_cmd_header hdr;
+ uint8_t data[];
+} __packed;
+
+#define IWM_FH_RSCSR_FRAME_SIZE_MSK 0x00003fff
+
+static uint32_t
+iwm_rx_packet_len(const struct iwm_rx_packet *pkt)
+{
+
+ return le32toh(pkt->len_n_flags) & IWM_FH_RSCSR_FRAME_SIZE_MSK;
+}
+
+static uint32_t
+iwm_rx_packet_payload_len(const struct iwm_rx_packet *pkt)
+{
+
+ return iwm_rx_packet_len(pkt) - sizeof(pkt->hdr);
+}
+
+
+#define IWM_MIN_DBM -100
+#define IWM_MAX_DBM -33 /* realistic guess */
+
+#define IWM_READ(sc, reg) \
+ bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
+
+#define IWM_WRITE(sc, reg, val) \
+ bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
+
+#define IWM_WRITE_1(sc, reg, val) \
+ bus_space_write_1((sc)->sc_st, (sc)->sc_sh, (reg), (val))
+
+#define IWM_SETBITS(sc, reg, mask) \
+ IWM_WRITE(sc, reg, IWM_READ(sc, reg) | (mask))
+
+#define IWM_CLRBITS(sc, reg, mask) \
+ IWM_WRITE(sc, reg, IWM_READ(sc, reg) & ~(mask))
+
+#define IWM_BARRIER_WRITE(sc) \
+ bus_space_barrier((sc)->sc_st, (sc)->sc_sh, 0, (sc)->sc_sz, \
+ BUS_SPACE_BARRIER_WRITE)
+
+#define IWM_BARRIER_READ_WRITE(sc) \
+ bus_space_barrier((sc)->sc_st, (sc)->sc_sh, 0, (sc)->sc_sz, \
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE)
diff --git a/sys/dev/pci/if_iwmvar.h b/sys/dev/pci/if_iwmvar.h
new file mode 100644
index 00000000000..2d47ad80fa8
--- /dev/null
+++ b/sys/dev/pci/if_iwmvar.h
@@ -0,0 +1,460 @@
+/* $OpenBSD: if_iwmvar.h,v 1.1 2015/02/06 19:49:29 stsp Exp $ */
+
+/*
+ * Copyright (c) 2014 genua mbh <info@genua.de>
+ * Copyright (c) 2014 Fixup Software Ltd.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*-
+ * Based on BSD-licensed source modules in the Linux iwlwifi driver,
+ * which were used as the reference documentation for this implementation.
+ *
+ * Driver version we are currently based off of is
+ * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
+ *
+ ***********************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*-
+ * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define IWM_UCODE_SECT_MAX 6
+#define IWM_FWNAME "iwm-7260-9"
+#define IWM_FWDMASEGSZ (192*1024)
+/* sanity check value */
+#define IWM_FWMAXSIZE (2*1024*1024)
+
+/*
+ * fw_status is used to determine if we've already parsed the firmware file
+ *
+ * In addition to the following, status < 0 ==> -error
+ */
+#define IWM_FW_STATUS_NONE 0
+#define IWM_FW_STATUS_INPROGRESS 1
+#define IWM_FW_STATUS_DONE 2
+
+enum iwm_ucode_type {
+ IWM_UCODE_TYPE_INIT,
+ IWM_UCODE_TYPE_REGULAR,
+ IWM_UCODE_TYPE_WOW,
+ IWM_UCODE_TYPE_MAX
+};
+
+struct iwm_fw_info {
+ void *fw_rawdata;
+ size_t fw_rawsize;
+ int fw_status;
+
+ struct iwm_fw_sects {
+ struct iwm_fw_onesect {
+ void *fws_data;
+ uint32_t fws_len;
+ uint32_t fws_devoff;
+
+ void *fws_alloc;
+ size_t fws_allocsize;
+ } fw_sect[IWM_UCODE_SECT_MAX];
+ size_t fw_totlen;
+ int fw_count;
+ } fw_sects[IWM_UCODE_TYPE_MAX];
+};
+
+struct iwm_nvm_data {
+ int n_hw_addrs;
+ uint8_t hw_addr[ETHER_ADDR_LEN];
+
+ uint8_t calib_version;
+ uint16_t calib_voltage;
+
+ uint16_t raw_temperature;
+ uint16_t kelvin_temperature;
+ uint16_t kelvin_voltage;
+ uint16_t xtal_calib[2];
+
+ int sku_cap_band_24GHz_enable;
+ int sku_cap_band_52GHz_enable;
+ int sku_cap_11n_enable;
+ int sku_cap_amt_enable;
+ int sku_cap_ipan_enable;
+
+ uint8_t radio_cfg_type;
+ uint8_t radio_cfg_step;
+ uint8_t radio_cfg_dash;
+ uint8_t radio_cfg_pnum;
+ uint8_t valid_tx_ant, valid_rx_ant;
+
+ uint16_t nvm_version;
+ uint8_t max_tx_pwr_half_dbm;
+};
+
+/* max bufs per tfd the driver will use */
+#define IWM_MAX_CMD_TBS_PER_TFD 2
+
+struct iwm_rx_packet;
+struct iwm_host_cmd {
+ const void *data[IWM_MAX_CMD_TBS_PER_TFD];
+ struct iwm_rx_packet *resp_pkt;
+ unsigned long _rx_page_addr;
+ uint32_t _rx_page_order;
+ int handler_status;
+
+ uint32_t flags;
+ uint16_t len[IWM_MAX_CMD_TBS_PER_TFD];
+ uint8_t dataflags[IWM_MAX_CMD_TBS_PER_TFD];
+ uint8_t id;
+};
+
+/*
+ * DMA glue is from iwn
+ */
+
+typedef caddr_t iwm_caddr_t;
+typedef void *iwm_hookarg_t;
+
+struct iwm_dma_info {
+ bus_dma_tag_t tag;
+ bus_dmamap_t map;
+ bus_dma_segment_t seg;
+ bus_addr_t paddr;
+ void *vaddr;
+ bus_size_t size;
+};
+
+#define IWM_TX_RING_COUNT 256
+#define IWM_TX_RING_LOMARK 192
+#define IWM_TX_RING_HIMARK 224
+
+struct iwm_tx_data {
+ bus_dmamap_t map;
+ bus_addr_t cmd_paddr;
+ bus_addr_t scratch_paddr;
+ struct mbuf *m;
+ struct iwm_node *in;
+ int done;
+};
+
+struct iwm_tx_ring {
+ struct iwm_dma_info desc_dma;
+ struct iwm_dma_info cmd_dma;
+ struct iwm_tfd *desc;
+ struct iwm_device_cmd *cmd;
+ struct iwm_tx_data data[IWM_TX_RING_COUNT];
+ int qid;
+ int queued;
+ int cur;
+};
+
+#define IWM_RX_RING_COUNT 256
+#define IWM_RBUF_COUNT (IWM_RX_RING_COUNT + 32)
+/* Linux driver optionally uses 8k buffer */
+#define IWM_RBUF_SIZE 4096
+
+struct iwm_softc;
+struct iwm_rbuf {
+ struct iwm_softc *sc;
+ void *vaddr;
+ bus_addr_t paddr;
+};
+
+struct iwm_rx_data {
+ struct mbuf *m;
+ bus_dmamap_t map;
+ int wantresp;
+};
+
+struct iwm_rx_ring {
+ struct iwm_dma_info desc_dma;
+ struct iwm_dma_info stat_dma;
+ struct iwm_dma_info buf_dma;
+ uint32_t *desc;
+ struct iwm_rb_status *stat;
+ struct iwm_rx_data data[IWM_RX_RING_COUNT];
+ int cur;
+};
+
+#define IWM_FLAG_USE_ICT 0x01
+#define IWM_FLAG_HW_INITED 0x02
+#define IWM_FLAG_STOPPED 0x04
+#define IWM_FLAG_RFKILL 0x08
+
+struct iwm_ucode_status {
+ uint32_t uc_error_event_table;
+ uint32_t uc_log_event_table;
+
+ int uc_ok;
+ int uc_intr;
+};
+
+#define IWM_CMD_RESP_MAX PAGE_SIZE
+
+#define IWM_OTP_LOW_IMAGE_SIZE 2048
+
+#define IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS 500
+#define IWM_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS 400
+
+/*
+ * Command headers are in iwl-trans.h, which is full of all
+ * kinds of other junk, so we just replicate the structures here.
+ * First the software bits:
+ */
+enum IWM_CMD_MODE {
+ IWM_CMD_SYNC = 0,
+ IWM_CMD_ASYNC = (1 << 0),
+ IWM_CMD_WANT_SKB = (1 << 1),
+ IWM_CMD_SEND_IN_RFKILL = (1 << 2),
+};
+enum iwm_hcmd_dataflag {
+ IWM_HCMD_DFL_NOCOPY = (1 << 0),
+ IWM_HCMD_DFL_DUP = (1 << 1),
+};
+
+/*
+ * iwlwifi/iwl-phy-db
+ */
+
+#define IWM_NUM_PAPD_CH_GROUPS 4
+#define IWM_NUM_TXP_CH_GROUPS 9
+
+struct iwm_phy_db_entry {
+ uint16_t size;
+ uint8_t *data;
+};
+
+struct iwm_phy_db {
+ struct iwm_phy_db_entry cfg;
+ struct iwm_phy_db_entry calib_nch;
+ struct iwm_phy_db_entry calib_ch_group_papd[IWM_NUM_PAPD_CH_GROUPS];
+ struct iwm_phy_db_entry calib_ch_group_txp[IWM_NUM_TXP_CH_GROUPS];
+};
+
+struct iwm_int_sta {
+ uint32_t sta_id;
+ uint32_t tfd_queue_msk;
+};
+
+struct iwm_mvm_phy_ctxt {
+ uint16_t id;
+ uint16_t color;
+ uint32_t ref;
+ struct ieee80211_channel *channel;
+};
+
+struct iwm_bf_data {
+ int bf_enabled; /* filtering */
+ int ba_enabled; /* abort */
+ int ave_beacon_signal;
+ int last_cqm_event;
+};
+
+struct iwm_softc {
+ struct device sc_dev;
+ struct ieee80211com sc_ic;
+ int (*sc_newstate)(struct ieee80211com *, enum ieee80211_state, int);
+ int sc_newstate_pending;
+
+ struct ieee80211_amrr sc_amrr;
+ struct timeout sc_calib_to;
+
+ bus_space_tag_t sc_st;
+ bus_space_handle_t sc_sh;
+ bus_size_t sc_sz;
+ bus_dma_tag_t sc_dmat;
+ pci_chipset_tag_t sc_pct;
+ pcitag_t sc_pcitag;
+ const void *sc_ih;
+
+ /* TX scheduler rings. */
+ struct iwm_dma_info sched_dma;
+ uint32_t sched_base;
+
+ /* TX/RX rings. */
+ struct iwm_tx_ring txq[IWM_MVM_MAX_QUEUES];
+ struct iwm_rx_ring rxq;
+ int qfullmsk;
+
+ int sc_sf_state;
+
+ /* ICT table. */
+ struct iwm_dma_info ict_dma;
+ int ict_cur;
+
+ int sc_hw_rev;
+ int sc_hw_id;
+
+ struct iwm_dma_info kw_dma;
+ struct iwm_dma_info fw_dma;
+
+ int sc_fw_chunk_done;
+ int sc_init_complete;
+
+ struct iwm_ucode_status sc_uc;
+ enum iwm_ucode_type sc_uc_current;
+ int sc_fwver;
+
+ int sc_capaflags;
+ int sc_capa_max_probe_len;
+
+ int sc_intmask;
+ int sc_flags;
+
+ /*
+ * So why do we need a separate stopped flag and a generation?
+ * the former protects the device from issueing commands when it's
+ * stopped (duh). The latter protects against race from a very
+ * fast stop/unstop cycle where threads waiting for responses do
+ * not have a chance to run in between. Notably: we want to stop
+ * the device from interrupt context when it craps out, so we
+ * don't have the luxury of waiting for quiescense.
+ */
+ int sc_generation;
+
+ int sc_cap_off; /* PCIe caps */
+
+ const char *sc_fwname;
+ bus_size_t sc_fwdmasegsz;
+ struct iwm_fw_info sc_fw;
+ int sc_fw_phy_config;
+ struct iwm_tlv_calib_ctrl sc_default_calib[IWM_UCODE_TYPE_MAX];
+
+ struct iwm_nvm_data sc_nvm;
+ struct iwm_phy_db sc_phy_db;
+
+ struct iwm_bf_data sc_bf;
+
+ int sc_tx_timer;
+
+ struct iwm_scan_cmd *sc_scan_cmd;
+ size_t sc_scan_cmd_len;
+ int sc_scan_last_antenna;
+ int sc_scanband;
+
+ int sc_auth_prot;
+
+ int sc_fixed_ridx;
+
+ int sc_staid;
+ int sc_nodecolor;
+
+ uint8_t sc_cmd_resp[IWM_CMD_RESP_MAX];
+ int sc_wantresp;
+
+ struct taskq *sc_nswq, *sc_eswq;
+ struct task sc_eswk;
+
+ struct iwm_rx_phy_info sc_last_phy_info;
+ int sc_ampdu_ref;
+
+ struct iwm_int_sta sc_aux_sta;
+
+ /* phy contexts. we only use the first one */
+ struct iwm_mvm_phy_ctxt sc_phyctxt[IWM_NUM_PHY_CTX];
+
+ struct iwm_notif_statistics sc_stats;
+};
+
+struct iwm_node {
+ struct ieee80211_node in_ni;
+ struct iwm_mvm_phy_ctxt *in_phyctxt;
+
+ uint16_t in_id;
+ uint16_t in_color;
+ int in_tsfid;
+
+ /* status "bits" */
+ int in_assoc;
+
+ struct iwm_lq_cmd in_lq;
+ struct ieee80211_amrr_node in_amn;
+
+ uint8_t in_ridx[IEEE80211_RATE_MAXSIZE];
+};
+#define IWM_STATION_ID 0
+
+#define IWM_ICT_SIZE 4096
+#define IWM_ICT_COUNT (IWM_ICT_SIZE / sizeof (uint32_t))
+#define IWM_ICT_PADDR_SHIFT 12