linux/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
   2/*
   3 * Copyright (C) 2012-2015, 2018-2022 Intel Corporation
   4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
   5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
   6 */
   7#include <net/mac80211.h>
   8
   9#include "mvm.h"
  10#include "sta.h"
  11#include "rs.h"
  12
  13/*
  14 * New version of ADD_STA_sta command added new fields at the end of the
  15 * structure, so sending the size of the relevant API's structure is enough to
  16 * support both API versions.
  17 */
  18static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
  19{
  20        if (iwl_mvm_has_new_rx_api(mvm) ||
  21            fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
  22                return sizeof(struct iwl_mvm_add_sta_cmd);
  23        else
  24                return sizeof(struct iwl_mvm_add_sta_cmd_v7);
  25}
  26
  27static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
  28                                    enum nl80211_iftype iftype)
  29{
  30        int sta_id;
  31        u32 reserved_ids = 0;
  32
  33        BUILD_BUG_ON(IWL_MVM_STATION_COUNT_MAX > 32);
  34        WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
  35
  36        lockdep_assert_held(&mvm->mutex);
  37
  38        /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
  39        if (iftype != NL80211_IFTYPE_STATION)
  40                reserved_ids = BIT(0);
  41
  42        /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
  43        for (sta_id = 0; sta_id < mvm->fw->ucode_capa.num_stations; sta_id++) {
  44                if (BIT(sta_id) & reserved_ids)
  45                        continue;
  46
  47                if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
  48                                               lockdep_is_held(&mvm->mutex)))
  49                        return sta_id;
  50        }
  51        return IWL_MVM_INVALID_STA;
  52}
  53
  54/* send station add/update command to firmware */
  55int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
  56                           bool update, unsigned int flags)
  57{
  58        struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
  59        struct iwl_mvm_add_sta_cmd add_sta_cmd = {
  60                .sta_id = mvm_sta->sta_id,
  61                .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
  62                .add_modify = update ? 1 : 0,
  63                .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
  64                                                 STA_FLG_MIMO_EN_MSK |
  65                                                 STA_FLG_RTS_MIMO_PROT),
  66                .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
  67        };
  68        int ret;
  69        u32 status;
  70        u32 agg_size = 0, mpdu_dens = 0;
  71
  72        if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
  73                add_sta_cmd.station_type = mvm_sta->sta_type;
  74
  75        if (!update || (flags & STA_MODIFY_QUEUES)) {
  76                memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
  77
  78                if (!iwl_mvm_has_new_tx_api(mvm)) {
  79                        add_sta_cmd.tfd_queue_msk =
  80                                cpu_to_le32(mvm_sta->tfd_queue_msk);
  81
  82                        if (flags & STA_MODIFY_QUEUES)
  83                                add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
  84                } else {
  85                        WARN_ON(flags & STA_MODIFY_QUEUES);
  86                }
  87        }
  88
  89        switch (sta->deflink.bandwidth) {
  90        case IEEE80211_STA_RX_BW_320:
  91        case IEEE80211_STA_RX_BW_160:
  92                add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
  93                fallthrough;
  94        case IEEE80211_STA_RX_BW_80:
  95                add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
  96                fallthrough;
  97        case IEEE80211_STA_RX_BW_40:
  98                add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
  99                fallthrough;
 100        case IEEE80211_STA_RX_BW_20:
 101                if (sta->deflink.ht_cap.ht_supported)
 102                        add_sta_cmd.station_flags |=
 103                                cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
 104                break;
 105        }
 106
 107        switch (sta->deflink.rx_nss) {
 108        case 1:
 109                add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
 110                break;
 111        case 2:
 112                add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
 113                break;
 114        case 3 ... 8:
 115                add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
 116                break;
 117        }
 118
 119        switch (sta->smps_mode) {
 120        case IEEE80211_SMPS_AUTOMATIC:
 121        case IEEE80211_SMPS_NUM_MODES:
 122                WARN_ON(1);
 123                break;
 124        case IEEE80211_SMPS_STATIC:
 125                /* override NSS */
 126                add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
 127                add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
 128                break;
 129        case IEEE80211_SMPS_DYNAMIC:
 130                add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
 131                break;
 132        case IEEE80211_SMPS_OFF:
 133                /* nothing */
 134                break;
 135        }
 136
 137        if (sta->deflink.ht_cap.ht_supported) {
 138                add_sta_cmd.station_flags_msk |=
 139                        cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
 140                                    STA_FLG_AGG_MPDU_DENS_MSK);
 141
 142                mpdu_dens = sta->deflink.ht_cap.ampdu_density;
 143        }
 144
 145        if (mvm_sta->vif->bss_conf.chandef.chan->band == NL80211_BAND_6GHZ) {
 146                add_sta_cmd.station_flags_msk |=
 147                        cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
 148                                    STA_FLG_AGG_MPDU_DENS_MSK);
 149
 150                mpdu_dens = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
 151                                          IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
 152                agg_size = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
 153                                         IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
 154        } else if (sta->deflink.vht_cap.vht_supported) {
 155                agg_size = sta->deflink.vht_cap.cap &
 156                        IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
 157                agg_size >>=
 158                        IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
 159        } else if (sta->deflink.ht_cap.ht_supported) {
 160                agg_size = sta->deflink.ht_cap.ampdu_factor;
 161        }
 162
 163        /* D6.0 10.12.2 A-MPDU length limit rules
 164         * A STA indicates the maximum length of the A-MPDU preEOF padding
 165         * that it can receive in an HE PPDU in the Maximum A-MPDU Length
 166         * Exponent field in its HT Capabilities, VHT Capabilities,
 167         * and HE 6 GHz Band Capabilities elements (if present) and the
 168         * Maximum AMPDU Length Exponent Extension field in its HE
 169         * Capabilities element
 170         */
 171        if (sta->deflink.he_cap.has_he)
 172                agg_size += u8_get_bits(sta->deflink.he_cap.he_cap_elem.mac_cap_info[3],
 173                                        IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
 174
 175        /* Limit to max A-MPDU supported by FW */
 176        if (agg_size > (STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT))
 177                agg_size = (STA_FLG_MAX_AGG_SIZE_4M >>
 178                            STA_FLG_MAX_AGG_SIZE_SHIFT);
 179
 180        add_sta_cmd.station_flags |=
 181                cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
 182        add_sta_cmd.station_flags |=
 183                cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
 184        if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
 185                add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
 186
 187        if (sta->wme) {
 188                add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
 189
 190                if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
 191                        add_sta_cmd.uapsd_acs |= BIT(AC_BK);
 192                if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
 193                        add_sta_cmd.uapsd_acs |= BIT(AC_BE);
 194                if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
 195                        add_sta_cmd.uapsd_acs |= BIT(AC_VI);
 196                if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
 197                        add_sta_cmd.uapsd_acs |= BIT(AC_VO);
 198                add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
 199                add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
 200        }
 201
 202        status = ADD_STA_SUCCESS;
 203        ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
 204                                          iwl_mvm_add_sta_cmd_size(mvm),
 205                                          &add_sta_cmd, &status);
 206        if (ret)
 207                return ret;
 208
 209        switch (status & IWL_ADD_STA_STATUS_MASK) {
 210        case ADD_STA_SUCCESS:
 211                IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
 212                break;
 213        default:
 214                ret = -EIO;
 215                IWL_ERR(mvm, "ADD_STA failed\n");
 216                break;
 217        }
 218
 219        return ret;
 220}
 221
 222static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
 223{
 224        struct iwl_mvm_baid_data *data =
 225                from_timer(data, t, session_timer);
 226        struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
 227        struct iwl_mvm_baid_data *ba_data;
 228        struct ieee80211_sta *sta;
 229        struct iwl_mvm_sta *mvm_sta;
 230        unsigned long timeout;
 231
 232        rcu_read_lock();
 233
 234        ba_data = rcu_dereference(*rcu_ptr);
 235
 236        if (WARN_ON(!ba_data))
 237                goto unlock;
 238
 239        if (!ba_data->timeout)
 240                goto unlock;
 241
 242        timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
 243        if (time_is_after_jiffies(timeout)) {
 244                mod_timer(&ba_data->session_timer, timeout);
 245                goto unlock;
 246        }
 247
 248        /* Timer expired */
 249        sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
 250
 251        /*
 252         * sta should be valid unless the following happens:
 253         * The firmware asserts which triggers a reconfig flow, but
 254         * the reconfig fails before we set the pointer to sta into
 255         * the fw_id_to_mac_id pointer table. Mac80211 can't stop
 256         * A-MDPU and hence the timer continues to run. Then, the
 257         * timer expires and sta is NULL.
 258         */
 259        if (!sta)
 260                goto unlock;
 261
 262        mvm_sta = iwl_mvm_sta_from_mac80211(sta);
 263        ieee80211_rx_ba_timer_expired(mvm_sta->vif,
 264                                      sta->addr, ba_data->tid);
 265unlock:
 266        rcu_read_unlock();
 267}
 268
 269/* Disable aggregations for a bitmap of TIDs for a given station */
 270static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
 271                                        unsigned long disable_agg_tids,
 272                                        bool remove_queue)
 273{
 274        struct iwl_mvm_add_sta_cmd cmd = {};
 275        struct ieee80211_sta *sta;
 276        struct iwl_mvm_sta *mvmsta;
 277        u32 status;
 278        u8 sta_id;
 279
 280        if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
 281                return -EINVAL;
 282
 283        sta_id = mvm->queue_info[queue].ra_sta_id;
 284
 285        rcu_read_lock();
 286
 287        sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
 288
 289        if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
 290                rcu_read_unlock();
 291                return -EINVAL;
 292        }
 293
 294        mvmsta = iwl_mvm_sta_from_mac80211(sta);
 295
 296        mvmsta->tid_disable_agg |= disable_agg_tids;
 297
 298        cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
 299        cmd.sta_id = mvmsta->sta_id;
 300        cmd.add_modify = STA_MODE_MODIFY;
 301        cmd.modify_mask = STA_MODIFY_QUEUES;
 302        if (disable_agg_tids)
 303                cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
 304        if (remove_queue)
 305                cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
 306        cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
 307        cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
 308
 309        rcu_read_unlock();
 310
 311        /* Notify FW of queue removal from the STA queues */
 312        status = ADD_STA_SUCCESS;
 313        return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
 314                                           iwl_mvm_add_sta_cmd_size(mvm),
 315                                           &cmd, &status);
 316}
 317
 318static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 319                               u16 *queueptr, u8 tid)
 320{
 321        int queue = *queueptr;
 322        struct iwl_scd_txq_cfg_cmd cmd = {
 323                .scd_queue = queue,
 324                .action = SCD_CFG_DISABLE_QUEUE,
 325        };
 326        int ret;
 327
 328        lockdep_assert_held(&mvm->mutex);
 329
 330        if (iwl_mvm_has_new_tx_api(mvm)) {
 331                if (mvm->sta_remove_requires_queue_remove) {
 332                        u32 cmd_id = WIDE_ID(DATA_PATH_GROUP,
 333                                             SCD_QUEUE_CONFIG_CMD);
 334                        struct iwl_scd_queue_cfg_cmd remove_cmd = {
 335                                .operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE),
 336                                .u.remove.queue = cpu_to_le32(queue),
 337                        };
 338
 339                        ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0,
 340                                                   sizeof(remove_cmd),
 341                                                   &remove_cmd);
 342                } else {
 343                        ret = 0;
 344                }
 345
 346                iwl_trans_txq_free(mvm->trans, queue);
 347                *queueptr = IWL_MVM_INVALID_QUEUE;
 348
 349                return ret;
 350        }
 351
 352        if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
 353                return 0;
 354
 355        mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
 356
 357        cmd.action = mvm->queue_info[queue].tid_bitmap ?
 358                SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
 359        if (cmd.action == SCD_CFG_DISABLE_QUEUE)
 360                mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
 361
 362        IWL_DEBUG_TX_QUEUES(mvm,
 363                            "Disabling TXQ #%d tids=0x%x\n",
 364                            queue,
 365                            mvm->queue_info[queue].tid_bitmap);
 366
 367        /* If the queue is still enabled - nothing left to do in this func */
 368        if (cmd.action == SCD_CFG_ENABLE_QUEUE)
 369                return 0;
 370
 371        cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
 372        cmd.tid = mvm->queue_info[queue].txq_tid;
 373
 374        /* Make sure queue info is correct even though we overwrite it */
 375        WARN(mvm->queue_info[queue].tid_bitmap,
 376             "TXQ #%d info out-of-sync - tids=0x%x\n",
 377             queue, mvm->queue_info[queue].tid_bitmap);
 378
 379        /* If we are here - the queue is freed and we can zero out these vals */
 380        mvm->queue_info[queue].tid_bitmap = 0;
 381
 382        if (sta) {
 383                struct iwl_mvm_txq *mvmtxq =
 384                        iwl_mvm_txq_from_tid(sta, tid);
 385
 386                mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
 387        }
 388
 389        /* Regardless if this is a reserved TXQ for a STA - mark it as false */
 390        mvm->queue_info[queue].reserved = false;
 391
 392        iwl_trans_txq_disable(mvm->trans, queue, false);
 393        ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0,
 394                                   sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
 395
 396        if (ret)
 397                IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
 398                        queue, ret);
 399        return ret;
 400}
 401
 402static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
 403{
 404        struct ieee80211_sta *sta;
 405        struct iwl_mvm_sta *mvmsta;
 406        unsigned long tid_bitmap;
 407        unsigned long agg_tids = 0;
 408        u8 sta_id;
 409        int tid;
 410
 411        lockdep_assert_held(&mvm->mutex);
 412
 413        if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
 414                return -EINVAL;
 415
 416        sta_id = mvm->queue_info[queue].ra_sta_id;
 417        tid_bitmap = mvm->queue_info[queue].tid_bitmap;
 418
 419        sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
 420                                        lockdep_is_held(&mvm->mutex));
 421
 422        if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
 423                return -EINVAL;
 424
 425        mvmsta = iwl_mvm_sta_from_mac80211(sta);
 426
 427        spin_lock_bh(&mvmsta->lock);
 428        for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
 429                if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
 430                        agg_tids |= BIT(tid);
 431        }
 432        spin_unlock_bh(&mvmsta->lock);
 433
 434        return agg_tids;
 435}
 436
 437/*
 438 * Remove a queue from a station's resources.
 439 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
 440 * doesn't disable the queue
 441 */
 442static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
 443{
 444        struct ieee80211_sta *sta;
 445        struct iwl_mvm_sta *mvmsta;
 446        unsigned long tid_bitmap;
 447        unsigned long disable_agg_tids = 0;
 448        u8 sta_id;
 449        int tid;
 450
 451        lockdep_assert_held(&mvm->mutex);
 452
 453        if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
 454                return -EINVAL;
 455
 456        sta_id = mvm->queue_info[queue].ra_sta_id;
 457        tid_bitmap = mvm->queue_info[queue].tid_bitmap;
 458
 459        rcu_read_lock();
 460
 461        sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
 462
 463        if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
 464                rcu_read_unlock();
 465                return 0;
 466        }
 467
 468        mvmsta = iwl_mvm_sta_from_mac80211(sta);
 469
 470        spin_lock_bh(&mvmsta->lock);
 471        /* Unmap MAC queues and TIDs from this queue */
 472        for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
 473                struct iwl_mvm_txq *mvmtxq =
 474                        iwl_mvm_txq_from_tid(sta, tid);
 475
 476                if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
 477                        disable_agg_tids |= BIT(tid);
 478                mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
 479
 480                mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
 481        }
 482
 483        mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
 484        spin_unlock_bh(&mvmsta->lock);
 485
 486        rcu_read_unlock();
 487
 488        /*
 489         * The TX path may have been using this TXQ_ID from the tid_data,
 490         * so make sure it's no longer running so that we can safely reuse
 491         * this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE
 492         * above, but nothing guarantees we've stopped using them. Thus,
 493         * without this, we could get to iwl_mvm_disable_txq() and remove
 494         * the queue while still sending frames to it.
 495         */
 496        synchronize_net();
 497
 498        return disable_agg_tids;
 499}
 500
 501static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
 502                                       struct ieee80211_sta *old_sta,
 503                                       u8 new_sta_id)
 504{
 505        struct iwl_mvm_sta *mvmsta;
 506        u8 sta_id, tid;
 507        unsigned long disable_agg_tids = 0;
 508        bool same_sta;
 509        u16 queue_tmp = queue;
 510        int ret;
 511
 512        lockdep_assert_held(&mvm->mutex);
 513
 514        if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
 515                return -EINVAL;
 516
 517        sta_id = mvm->queue_info[queue].ra_sta_id;
 518        tid = mvm->queue_info[queue].txq_tid;
 519
 520        same_sta = sta_id == new_sta_id;
 521
 522        mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
 523        if (WARN_ON(!mvmsta))
 524                return -EINVAL;
 525
 526        disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
 527        /* Disable the queue */
 528        if (disable_agg_tids)
 529                iwl_mvm_invalidate_sta_queue(mvm, queue,
 530                                             disable_agg_tids, false);
 531
 532        ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid);
 533        if (ret) {
 534                IWL_ERR(mvm,
 535                        "Failed to free inactive queue %d (ret=%d)\n",
 536                        queue, ret);
 537
 538                return ret;
 539        }
 540
 541        /* If TXQ is allocated to another STA, update removal in FW */
 542        if (!same_sta)
 543                iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
 544
 545        return 0;
 546}
 547
 548static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
 549                                    unsigned long tfd_queue_mask, u8 ac)
 550{
 551        int queue = 0;
 552        u8 ac_to_queue[IEEE80211_NUM_ACS];
 553        int i;
 554
 555        /*
 556         * This protects us against grabbing a queue that's being reconfigured
 557         * by the inactivity checker.
 558         */
 559        lockdep_assert_held(&mvm->mutex);
 560
 561        if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
 562                return -EINVAL;
 563
 564        memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
 565
 566        /* See what ACs the existing queues for this STA have */
 567        for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
 568                /* Only DATA queues can be shared */
 569                if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
 570                    i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
 571                        continue;
 572
 573                ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
 574        }
 575
 576        /*
 577         * The queue to share is chosen only from DATA queues as follows (in
 578         * descending priority):
 579         * 1. An AC_BE queue
 580         * 2. Same AC queue
 581         * 3. Highest AC queue that is lower than new AC
 582         * 4. Any existing AC (there always is at least 1 DATA queue)
 583         */
 584
 585        /* Priority 1: An AC_BE queue */
 586        if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
 587                queue = ac_to_queue[IEEE80211_AC_BE];
 588        /* Priority 2: Same AC queue */
 589        else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
 590                queue = ac_to_queue[ac];
 591        /* Priority 3a: If new AC is VO and VI exists - use VI */
 592        else if (ac == IEEE80211_AC_VO &&
 593                 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
 594                queue = ac_to_queue[IEEE80211_AC_VI];
 595        /* Priority 3b: No BE so only AC less than the new one is BK */
 596        else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
 597                queue = ac_to_queue[IEEE80211_AC_BK];
 598        /* Priority 4a: No BE nor BK - use VI if exists */
 599        else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
 600                queue = ac_to_queue[IEEE80211_AC_VI];
 601        /* Priority 4b: No BE, BK nor VI - use VO if exists */
 602        else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
 603                queue = ac_to_queue[IEEE80211_AC_VO];
 604
 605        /* Make sure queue found (or not) is legal */
 606        if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
 607            !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
 608            (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
 609                IWL_ERR(mvm, "No DATA queues available to share\n");
 610                return -ENOSPC;
 611        }
 612
 613        return queue;
 614}
 615
 616/* Re-configure the SCD for a queue that has already been configured */
 617static int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo,
 618                                int sta_id, int tid, int frame_limit, u16 ssn)
 619{
 620        struct iwl_scd_txq_cfg_cmd cmd = {
 621                .scd_queue = queue,
 622                .action = SCD_CFG_ENABLE_QUEUE,
 623                .window = frame_limit,
 624                .sta_id = sta_id,
 625                .ssn = cpu_to_le16(ssn),
 626                .tx_fifo = fifo,
 627                .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
 628                              queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
 629                .tid = tid,
 630        };
 631        int ret;
 632
 633        if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
 634                return -EINVAL;
 635
 636        if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
 637                 "Trying to reconfig unallocated queue %d\n", queue))
 638                return -ENXIO;
 639
 640        IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
 641
 642        ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
 643        WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
 644                  queue, fifo, ret);
 645
 646        return ret;
 647}
 648
 649/*
 650 * If a given queue has a higher AC than the TID stream that is being compared
 651 * to, the queue needs to be redirected to the lower AC. This function does that
 652 * in such a case, otherwise - if no redirection required - it does nothing,
 653 * unless the %force param is true.
 654 */
 655static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
 656                                  int ac, int ssn, unsigned int wdg_timeout,
 657                                  bool force, struct iwl_mvm_txq *txq)
 658{
 659        struct iwl_scd_txq_cfg_cmd cmd = {
 660                .scd_queue = queue,
 661                .action = SCD_CFG_DISABLE_QUEUE,
 662        };
 663        bool shared_queue;
 664        int ret;
 665
 666        if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
 667                return -EINVAL;
 668
 669        /*
 670         * If the AC is lower than current one - FIFO needs to be redirected to
 671         * the lowest one of the streams in the queue. Check if this is needed
 672         * here.
 673         * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
 674         * value 3 and VO with value 0, so to check if ac X is lower than ac Y
 675         * we need to check if the numerical value of X is LARGER than of Y.
 676         */
 677        if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
 678                IWL_DEBUG_TX_QUEUES(mvm,
 679                                    "No redirection needed on TXQ #%d\n",
 680                                    queue);
 681                return 0;
 682        }
 683
 684        cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
 685        cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
 686        cmd.tid = mvm->queue_info[queue].txq_tid;
 687        shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
 688
 689        IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
 690                            queue, iwl_mvm_ac_to_tx_fifo[ac]);
 691
 692        /* Stop the queue and wait for it to empty */
 693        txq->stopped = true;
 694
 695        ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
 696        if (ret) {
 697                IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
 698                        queue);
 699                ret = -EIO;
 700                goto out;
 701        }
 702
 703        /* Before redirecting the queue we need to de-activate it */
 704        iwl_trans_txq_disable(mvm->trans, queue, false);
 705        ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
 706        if (ret)
 707                IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
 708                        ret);
 709
 710        /* Make sure the SCD wrptr is correctly set before reconfiguring */
 711        iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
 712
 713        /* Update the TID "owner" of the queue */
 714        mvm->queue_info[queue].txq_tid = tid;
 715
 716        /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
 717
 718        /* Redirect to lower AC */
 719        iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
 720                             cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
 721
 722        /* Update AC marking of the queue */
 723        mvm->queue_info[queue].mac80211_ac = ac;
 724
 725        /*
 726         * Mark queue as shared in transport if shared
 727         * Note this has to be done after queue enablement because enablement
 728         * can also set this value, and there is no indication there to shared
 729         * queues
 730         */
 731        if (shared_queue)
 732                iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
 733
 734out:
 735        /* Continue using the queue */
 736        txq->stopped = false;
 737
 738        return ret;
 739}
 740
 741static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
 742                                   u8 minq, u8 maxq)
 743{
 744        int i;
 745
 746        lockdep_assert_held(&mvm->mutex);
 747
 748        if (WARN(maxq >= mvm->trans->trans_cfg->base_params->num_of_queues,
 749                 "max queue %d >= num_of_queues (%d)", maxq,
 750                 mvm->trans->trans_cfg->base_params->num_of_queues))
 751                maxq = mvm->trans->trans_cfg->base_params->num_of_queues - 1;
 752
 753        /* This should not be hit with new TX path */
 754        if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
 755                return -ENOSPC;
 756
 757        /* Start by looking for a free queue */
 758        for (i = minq; i <= maxq; i++)
 759                if (mvm->queue_info[i].tid_bitmap == 0 &&
 760                    mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
 761                        return i;
 762
 763        return -ENOSPC;
 764}
 765
 766static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
 767                                   u8 sta_id, u8 tid, unsigned int timeout)
 768{
 769        int queue, size;
 770
 771        if (tid == IWL_MAX_TID_COUNT) {
 772                tid = IWL_MGMT_TID;
 773                size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
 774                             mvm->trans->cfg->min_txq_size);
 775        } else {
 776                struct ieee80211_sta *sta;
 777
 778                rcu_read_lock();
 779                sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
 780
 781                /* this queue isn't used for traffic (cab_queue) */
 782                if (IS_ERR_OR_NULL(sta)) {
 783                        size = IWL_MGMT_QUEUE_SIZE;
 784                } else if (sta->deflink.he_cap.has_he) {
 785                        /* support for 256 ba size */
 786                        size = IWL_DEFAULT_QUEUE_SIZE_HE;
 787                } else {
 788                        size = IWL_DEFAULT_QUEUE_SIZE;
 789                }
 790
 791                rcu_read_unlock();
 792        }
 793
 794        /* take the min with bc tbl entries allowed */
 795        size = min_t(u32, size, mvm->trans->txqs.bc_tbl_size / sizeof(u16));
 796
 797        /* size needs to be power of 2 values for calculating read/write pointers */
 798        size = rounddown_pow_of_two(size);
 799
 800        do {
 801                queue = iwl_trans_txq_alloc(mvm->trans, 0, BIT(sta_id),
 802                                            tid, size, timeout);
 803
 804                if (queue < 0)
 805                        IWL_DEBUG_TX_QUEUES(mvm,
 806                                            "Failed allocating TXQ of size %d for sta %d tid %d, ret: %d\n",
 807                                            size, sta_id, tid, queue);
 808                size /= 2;
 809        } while (queue < 0 && size >= 16);
 810
 811        if (queue < 0)
 812                return queue;
 813
 814        IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
 815                            queue, sta_id, tid);
 816
 817        return queue;
 818}
 819
 820static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
 821                                        struct ieee80211_sta *sta, u8 ac,
 822                                        int tid)
 823{
 824        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 825        struct iwl_mvm_txq *mvmtxq =
 826                iwl_mvm_txq_from_tid(sta, tid);
 827        unsigned int wdg_timeout =
 828                iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
 829        int queue = -1;
 830
 831        lockdep_assert_held(&mvm->mutex);
 832
 833        IWL_DEBUG_TX_QUEUES(mvm,
 834                            "Allocating queue for sta %d on tid %d\n",
 835                            mvmsta->sta_id, tid);
 836        queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout);
 837        if (queue < 0)
 838                return queue;
 839
 840        mvmtxq->txq_id = queue;
 841        mvm->tvqm_info[queue].txq_tid = tid;
 842        mvm->tvqm_info[queue].sta_id = mvmsta->sta_id;
 843
 844        IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
 845
 846        spin_lock_bh(&mvmsta->lock);
 847        mvmsta->tid_data[tid].txq_id = queue;
 848        spin_unlock_bh(&mvmsta->lock);
 849
 850        return 0;
 851}
 852
 853static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm,
 854                                       struct ieee80211_sta *sta,
 855                                       int queue, u8 sta_id, u8 tid)
 856{
 857        bool enable_queue = true;
 858
 859        /* Make sure this TID isn't already enabled */
 860        if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
 861                IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
 862                        queue, tid);
 863                return false;
 864        }
 865
 866        /* Update mappings and refcounts */
 867        if (mvm->queue_info[queue].tid_bitmap)
 868                enable_queue = false;
 869
 870        mvm->queue_info[queue].tid_bitmap |= BIT(tid);
 871        mvm->queue_info[queue].ra_sta_id = sta_id;
 872
 873        if (enable_queue) {
 874                if (tid != IWL_MAX_TID_COUNT)
 875                        mvm->queue_info[queue].mac80211_ac =
 876                                tid_to_mac80211_ac[tid];
 877                else
 878                        mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
 879
 880                mvm->queue_info[queue].txq_tid = tid;
 881        }
 882
 883        if (sta) {
 884                struct iwl_mvm_txq *mvmtxq =
 885                        iwl_mvm_txq_from_tid(sta, tid);
 886
 887                mvmtxq->txq_id = queue;
 888        }
 889
 890        IWL_DEBUG_TX_QUEUES(mvm,
 891                            "Enabling TXQ #%d tids=0x%x\n",
 892                            queue, mvm->queue_info[queue].tid_bitmap);
 893
 894        return enable_queue;
 895}
 896
 897static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 898                               int queue, u16 ssn,
 899                               const struct iwl_trans_txq_scd_cfg *cfg,
 900                               unsigned int wdg_timeout)
 901{
 902        struct iwl_scd_txq_cfg_cmd cmd = {
 903                .scd_queue = queue,
 904                .action = SCD_CFG_ENABLE_QUEUE,
 905                .window = cfg->frame_limit,
 906                .sta_id = cfg->sta_id,
 907                .ssn = cpu_to_le16(ssn),
 908                .tx_fifo = cfg->fifo,
 909                .aggregate = cfg->aggregate,
 910                .tid = cfg->tid,
 911        };
 912        bool inc_ssn;
 913
 914        if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
 915                return false;
 916
 917        /* Send the enabling command if we need to */
 918        if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid))
 919                return false;
 920
 921        inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
 922                                           NULL, wdg_timeout);
 923        if (inc_ssn)
 924                le16_add_cpu(&cmd.ssn, 1);
 925
 926        WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
 927             "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
 928
 929        return inc_ssn;
 930}
 931
 932static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
 933{
 934        struct iwl_scd_txq_cfg_cmd cmd = {
 935                .scd_queue = queue,
 936                .action = SCD_CFG_UPDATE_QUEUE_TID,
 937        };
 938        int tid;
 939        unsigned long tid_bitmap;
 940        int ret;
 941
 942        lockdep_assert_held(&mvm->mutex);
 943
 944        if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
 945                return;
 946
 947        tid_bitmap = mvm->queue_info[queue].tid_bitmap;
 948
 949        if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
 950                return;
 951
 952        /* Find any TID for queue */
 953        tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
 954        cmd.tid = tid;
 955        cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
 956
 957        ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
 958        if (ret) {
 959                IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
 960                        queue, ret);
 961                return;
 962        }
 963
 964        mvm->queue_info[queue].txq_tid = tid;
 965        IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
 966                            queue, tid);
 967}
 968
 969static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
 970{
 971        struct ieee80211_sta *sta;
 972        struct iwl_mvm_sta *mvmsta;
 973        u8 sta_id;
 974        int tid = -1;
 975        unsigned long tid_bitmap;
 976        unsigned int wdg_timeout;
 977        int ssn;
 978        int ret = true;
 979
 980        /* queue sharing is disabled on new TX path */
 981        if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
 982                return;
 983
 984        lockdep_assert_held(&mvm->mutex);
 985
 986        sta_id = mvm->queue_info[queue].ra_sta_id;
 987        tid_bitmap = mvm->queue_info[queue].tid_bitmap;
 988
 989        /* Find TID for queue, and make sure it is the only one on the queue */
 990        tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
 991        if (tid_bitmap != BIT(tid)) {
 992                IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
 993                        queue, tid_bitmap);
 994                return;
 995        }
 996
 997        IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
 998                            tid);
 999
1000        sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1001                                        lockdep_is_held(&mvm->mutex));
1002
1003        if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
1004                return;
1005
1006        mvmsta = iwl_mvm_sta_from_mac80211(sta);
1007        wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1008
1009        ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1010
1011        ret = iwl_mvm_redirect_queue(mvm, queue, tid,
1012                                     tid_to_mac80211_ac[tid], ssn,
1013                                     wdg_timeout, true,
1014                                     iwl_mvm_txq_from_tid(sta, tid));
1015        if (ret) {
1016                IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
1017                return;
1018        }
1019
1020        /* If aggs should be turned back on - do it */
1021        if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
1022                struct iwl_mvm_add_sta_cmd cmd = {0};
1023
1024                mvmsta->tid_disable_agg &= ~BIT(tid);
1025
1026                cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1027                cmd.sta_id = mvmsta->sta_id;
1028                cmd.add_modify = STA_MODE_MODIFY;
1029                cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1030                cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
1031                cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
1032
1033                ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
1034                                           iwl_mvm_add_sta_cmd_size(mvm), &cmd);
1035                if (!ret) {
1036                        IWL_DEBUG_TX_QUEUES(mvm,
1037                                            "TXQ #%d is now aggregated again\n",
1038                                            queue);
1039
1040                        /* Mark queue intenally as aggregating again */
1041                        iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1042                }
1043        }
1044
1045        mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1046}
1047
1048/*
1049 * Remove inactive TIDs of a given queue.
1050 * If all queue TIDs are inactive - mark the queue as inactive
1051 * If only some the queue TIDs are inactive - unmap them from the queue
1052 *
1053 * Returns %true if all TIDs were removed and the queue could be reused.
1054 */
1055static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
1056                                         struct iwl_mvm_sta *mvmsta, int queue,
1057                                         unsigned long tid_bitmap,
1058                                         unsigned long *unshare_queues,
1059                                         unsigned long *changetid_queues)
1060{
1061        unsigned int tid;
1062
1063        lockdep_assert_held(&mvmsta->lock);
1064        lockdep_assert_held(&mvm->mutex);
1065
1066        if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1067                return false;
1068
1069        /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
1070        for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1071                /* If some TFDs are still queued - don't mark TID as inactive */
1072                if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1073                        tid_bitmap &= ~BIT(tid);
1074
1075                /* Don't mark as inactive any TID that has an active BA */
1076                if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1077                        tid_bitmap &= ~BIT(tid);
1078        }
1079
1080        /* If all TIDs in the queue are inactive - return it can be reused */
1081        if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
1082                IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);
1083                return true;
1084        }
1085
1086        /*
1087         * If we are here, this is a shared queue and not all TIDs timed-out.
1088         * Remove the ones that did.
1089         */
1090        for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1091                u16 q_tid_bitmap;
1092
1093                mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1094                mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
1095
1096                q_tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1097
1098                /*
1099                 * We need to take into account a situation in which a TXQ was
1100                 * allocated to TID x, and then turned shared by adding TIDs y
1101                 * and z. If TID x becomes inactive and is removed from the TXQ,
1102                 * ownership must be given to one of the remaining TIDs.
1103                 * This is mainly because if TID x continues - a new queue can't
1104                 * be allocated for it as long as it is an owner of another TXQ.
1105                 *
1106                 * Mark this queue in the right bitmap, we'll send the command
1107                 * to the firmware later.
1108                 */
1109                if (!(q_tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
1110                        set_bit(queue, changetid_queues);
1111
1112                IWL_DEBUG_TX_QUEUES(mvm,
1113                                    "Removing inactive TID %d from shared Q:%d\n",
1114                                    tid, queue);
1115        }
1116
1117        IWL_DEBUG_TX_QUEUES(mvm,
1118                            "TXQ #%d left with tid bitmap 0x%x\n", queue,
1119                            mvm->queue_info[queue].tid_bitmap);
1120
1121        /*
1122         * There may be different TIDs with the same mac queues, so make
1123         * sure all TIDs have existing corresponding mac queues enabled
1124         */
1125        tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1126
1127        /* If the queue is marked as shared - "unshare" it */
1128        if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
1129            mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
1130                IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1131                                    queue);
1132                set_bit(queue, unshare_queues);
1133        }
1134
1135        return false;
1136}
1137
1138/*
1139 * Check for inactivity - this includes checking if any queue
1140 * can be unshared and finding one (and only one) that can be
1141 * reused.
1142 * This function is also invoked as a sort of clean-up task,
1143 * in which case @alloc_for_sta is IWL_MVM_INVALID_STA.
1144 *
1145 * Returns the queue number, or -ENOSPC.
1146 */
1147static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
1148{
1149        unsigned long now = jiffies;
1150        unsigned long unshare_queues = 0;
1151        unsigned long changetid_queues = 0;
1152        int i, ret, free_queue = -ENOSPC;
1153        struct ieee80211_sta *queue_owner  = NULL;
1154
1155        lockdep_assert_held(&mvm->mutex);
1156
1157        if (iwl_mvm_has_new_tx_api(mvm))
1158                return -ENOSPC;
1159
1160        rcu_read_lock();
1161
1162        /* we skip the CMD queue below by starting at 1 */
1163        BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
1164
1165        for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
1166                struct ieee80211_sta *sta;
1167                struct iwl_mvm_sta *mvmsta;
1168                u8 sta_id;
1169                int tid;
1170                unsigned long inactive_tid_bitmap = 0;
1171                unsigned long queue_tid_bitmap;
1172
1173                queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
1174                if (!queue_tid_bitmap)
1175                        continue;
1176
1177                /* If TXQ isn't in active use anyway - nothing to do here... */
1178                if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
1179                    mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)
1180                        continue;
1181
1182                /* Check to see if there are inactive TIDs on this queue */
1183                for_each_set_bit(tid, &queue_tid_bitmap,
1184                                 IWL_MAX_TID_COUNT + 1) {
1185                        if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1186                                       IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1187                                continue;
1188
1189                        inactive_tid_bitmap |= BIT(tid);
1190                }
1191
1192                /* If all TIDs are active - finish check on this queue */
1193                if (!inactive_tid_bitmap)
1194                        continue;
1195
1196                /*
1197                 * If we are here - the queue hadn't been served recently and is
1198                 * in use
1199                 */
1200
1201                sta_id = mvm->queue_info[i].ra_sta_id;
1202                sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1203
1204                /*
1205                 * If the STA doesn't exist anymore, it isn't an error. It could
1206                 * be that it was removed since getting the queues, and in this
1207                 * case it should've inactivated its queues anyway.
1208                 */
1209                if (IS_ERR_OR_NULL(sta))
1210                        continue;
1211
1212                mvmsta = iwl_mvm_sta_from_mac80211(sta);
1213
1214                spin_lock_bh(&mvmsta->lock);
1215                ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
1216                                                   inactive_tid_bitmap,
1217                                                   &unshare_queues,
1218                                                   &changetid_queues);
1219                if (ret && free_queue < 0) {
1220                        queue_owner = sta;
1221                        free_queue = i;
1222                }
1223                /* only unlock sta lock - we still need the queue info lock */
1224                spin_unlock_bh(&mvmsta->lock);
1225        }
1226
1227
1228        /* Reconfigure queues requiring reconfiguation */
1229        for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
1230                iwl_mvm_unshare_queue(mvm, i);
1231        for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
1232                iwl_mvm_change_queue_tid(mvm, i);
1233
1234        rcu_read_unlock();
1235
1236        if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
1237                ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,
1238                                                  alloc_for_sta);
1239                if (ret)
1240                        return ret;
1241        }
1242
1243        return free_queue;
1244}
1245
1246static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
1247                                   struct ieee80211_sta *sta, u8 ac, int tid)
1248{
1249        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1250        struct iwl_trans_txq_scd_cfg cfg = {
1251                .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
1252                .sta_id = mvmsta->sta_id,
1253                .tid = tid,
1254                .frame_limit = IWL_FRAME_LIMIT,
1255        };
1256        unsigned int wdg_timeout =
1257                iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1258        int queue = -1;
1259        u16 queue_tmp;
1260        unsigned long disable_agg_tids = 0;
1261        enum iwl_mvm_agg_state queue_state;
1262        bool shared_queue = false, inc_ssn;
1263        int ssn;
1264        unsigned long tfd_queue_mask;
1265        int ret;
1266
1267        lockdep_assert_held(&mvm->mutex);
1268
1269        if (iwl_mvm_has_new_tx_api(mvm))
1270                return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
1271
1272        spin_lock_bh(&mvmsta->lock);
1273        tfd_queue_mask = mvmsta->tfd_queue_msk;
1274        ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1275        spin_unlock_bh(&mvmsta->lock);
1276
1277        if (tid == IWL_MAX_TID_COUNT) {
1278                queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1279                                                IWL_MVM_DQA_MIN_MGMT_QUEUE,
1280                                                IWL_MVM_DQA_MAX_MGMT_QUEUE);
1281                if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
1282                        IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
1283                                            queue);
1284
1285                /* If no such queue is found, we'll use a DATA queue instead */
1286        }
1287
1288        if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
1289            (mvm->queue_info[mvmsta->reserved_queue].status ==
1290                        IWL_MVM_QUEUE_RESERVED)) {
1291                queue = mvmsta->reserved_queue;
1292                mvm->queue_info[queue].reserved = true;
1293                IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
1294        }
1295
1296        if (queue < 0)
1297                queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1298                                                IWL_MVM_DQA_MIN_DATA_QUEUE,
1299                                                IWL_MVM_DQA_MAX_DATA_QUEUE);
1300        if (queue < 0) {
1301                /* try harder - perhaps kill an inactive queue */
1302                queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1303        }
1304
1305        /* No free queue - we'll have to share */
1306        if (queue <= 0) {
1307                queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
1308                if (queue > 0) {
1309                        shared_queue = true;
1310                        mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
1311                }
1312        }
1313
1314        /*
1315         * Mark TXQ as ready, even though it hasn't been fully configured yet,
1316         * to make sure no one else takes it.
1317         * This will allow avoiding re-acquiring the lock at the end of the
1318         * configuration. On error we'll mark it back as free.
1319         */
1320        if (queue > 0 && !shared_queue)
1321                mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1322
1323        /* This shouldn't happen - out of queues */
1324        if (WARN_ON(queue <= 0)) {
1325                IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
1326                        tid, cfg.sta_id);
1327                return queue;
1328        }
1329
1330        /*
1331         * Actual en/disablement of aggregations is through the ADD_STA HCMD,
1332         * but for configuring the SCD to send A-MPDUs we need to mark the queue
1333         * as aggregatable.
1334         * Mark all DATA queues as allowing to be aggregated at some point
1335         */
1336        cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1337                         queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1338
1339        IWL_DEBUG_TX_QUEUES(mvm,
1340                            "Allocating %squeue #%d to sta %d on tid %d\n",
1341                            shared_queue ? "shared " : "", queue,
1342                            mvmsta->sta_id, tid);
1343
1344        if (shared_queue) {
1345                /* Disable any open aggs on this queue */
1346                disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
1347
1348                if (disable_agg_tids) {
1349                        IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
1350                                            queue);
1351                        iwl_mvm_invalidate_sta_queue(mvm, queue,
1352                                                     disable_agg_tids, false);
1353                }
1354        }
1355
1356        inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout);
1357
1358        /*
1359         * Mark queue as shared in transport if shared
1360         * Note this has to be done after queue enablement because enablement
1361         * can also set this value, and there is no indication there to shared
1362         * queues
1363         */
1364        if (shared_queue)
1365                iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
1366
1367        spin_lock_bh(&mvmsta->lock);
1368        /*
1369         * This looks racy, but it is not. We have only one packet for
1370         * this ra/tid in our Tx path since we stop the Qdisc when we
1371         * need to allocate a new TFD queue.
1372         */
1373        if (inc_ssn) {
1374                mvmsta->tid_data[tid].seq_number += 0x10;
1375                ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
1376        }
1377        mvmsta->tid_data[tid].txq_id = queue;
1378        mvmsta->tfd_queue_msk |= BIT(queue);
1379        queue_state = mvmsta->tid_data[tid].state;
1380
1381        if (mvmsta->reserved_queue == queue)
1382                mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
1383        spin_unlock_bh(&mvmsta->lock);
1384
1385        if (!shared_queue) {
1386                ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
1387                if (ret)
1388                        goto out_err;
1389
1390                /* If we need to re-enable aggregations... */
1391                if (queue_state == IWL_AGG_ON) {
1392                        ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1393                        if (ret)
1394                                goto out_err;
1395                }
1396        } else {
1397                /* Redirect queue, if needed */
1398                ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn,
1399                                             wdg_timeout, false,
1400                                             iwl_mvm_txq_from_tid(sta, tid));
1401                if (ret)
1402                        goto out_err;
1403        }
1404
1405        return 0;
1406
1407out_err:
1408        queue_tmp = queue;
1409        iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid);
1410
1411        return ret;
1412}
1413
1414void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1415{
1416        struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1417                                           add_stream_wk);
1418
1419        mutex_lock(&mvm->mutex);
1420
1421        iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1422
1423        while (!list_empty(&mvm->add_stream_txqs)) {
1424                struct iwl_mvm_txq *mvmtxq;
1425                struct ieee80211_txq *txq;
1426                u8 tid;
1427
1428                mvmtxq = list_first_entry(&mvm->add_stream_txqs,
1429                                          struct iwl_mvm_txq, list);
1430
1431                txq = container_of((void *)mvmtxq, struct ieee80211_txq,
1432                                   drv_priv);
1433                tid = txq->tid;
1434                if (tid == IEEE80211_NUM_TIDS)
1435                        tid = IWL_MAX_TID_COUNT;
1436
1437                /*
1438                 * We can't really do much here, but if this fails we can't
1439                 * transmit anyway - so just don't transmit the frame etc.
1440                 * and let them back up ... we've tried our best to allocate
1441                 * a queue in the function itself.
1442                 */
1443                if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) {
1444                        list_del_init(&mvmtxq->list);
1445                        continue;
1446                }
1447
1448                list_del_init(&mvmtxq->list);
1449                local_bh_disable();
1450                iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
1451                local_bh_enable();
1452        }
1453
1454        mutex_unlock(&mvm->mutex);
1455}
1456
1457static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
1458                                      struct ieee80211_sta *sta,
1459                                      enum nl80211_iftype vif_type)
1460{
1461        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1462        int queue;
1463
1464        /* queue reserving is disabled on new TX path */
1465        if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1466                return 0;
1467
1468        /* run the general cleanup/unsharing of queues */
1469        iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1470
1471        /* Make sure we have free resources for this STA */
1472        if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1473            !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
1474            (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1475             IWL_MVM_QUEUE_FREE))
1476                queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1477        else
1478                queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1479                                                IWL_MVM_DQA_MIN_DATA_QUEUE,
1480                                                IWL_MVM_DQA_MAX_DATA_QUEUE);
1481        if (queue < 0) {
1482                /* try again - this time kick out a queue if needed */
1483                queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1484                if (queue < 0) {
1485                        IWL_ERR(mvm, "No available queues for new station\n");
1486                        return -ENOSPC;
1487                }
1488        }
1489        mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
1490
1491        mvmsta->reserved_queue = queue;
1492
1493        IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1494                            queue, mvmsta->sta_id);
1495
1496        return 0;
1497}
1498
1499/*
1500 * In DQA mode, after a HW restart the queues should be allocated as before, in
1501 * order to avoid race conditions when there are shared queues. This function
1502 * does the re-mapping and queue allocation.
1503 *
1504 * Note that re-enabling aggregations isn't done in this function.
1505 */
1506static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1507                                                 struct ieee80211_sta *sta)
1508{
1509        struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1510        unsigned int wdg =
1511                iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1512        int i;
1513        struct iwl_trans_txq_scd_cfg cfg = {
1514                .sta_id = mvm_sta->sta_id,
1515                .frame_limit = IWL_FRAME_LIMIT,
1516        };
1517
1518        /* Make sure reserved queue is still marked as such (if allocated) */
1519        if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1520                mvm->queue_info[mvm_sta->reserved_queue].status =
1521                        IWL_MVM_QUEUE_RESERVED;
1522
1523        for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1524                struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1525                int txq_id = tid_data->txq_id;
1526                int ac;
1527
1528                if (txq_id == IWL_MVM_INVALID_QUEUE)
1529                        continue;
1530
1531                ac = tid_to_mac80211_ac[i];
1532
1533                if (iwl_mvm_has_new_tx_api(mvm)) {
1534                        IWL_DEBUG_TX_QUEUES(mvm,
1535                                            "Re-mapping sta %d tid %d\n",
1536                                            mvm_sta->sta_id, i);
1537                        txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id,
1538                                                         i, wdg);
1539                        /*
1540                         * on failures, just set it to IWL_MVM_INVALID_QUEUE
1541                         * to try again later, we have no other good way of
1542                         * failing here
1543                         */
1544                        if (txq_id < 0)
1545                                txq_id = IWL_MVM_INVALID_QUEUE;
1546                        tid_data->txq_id = txq_id;
1547
1548                        /*
1549                         * Since we don't set the seq number after reset, and HW
1550                         * sets it now, FW reset will cause the seq num to start
1551                         * at 0 again, so driver will need to update it
1552                         * internally as well, so it keeps in sync with real val
1553                         */
1554                        tid_data->seq_number = 0;
1555                } else {
1556                        u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
1557
1558                        cfg.tid = i;
1559                        cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
1560                        cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1561                                         txq_id ==
1562                                         IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1563
1564                        IWL_DEBUG_TX_QUEUES(mvm,
1565                                            "Re-mapping sta %d tid %d to queue %d\n",
1566                                            mvm_sta->sta_id, i, txq_id);
1567
1568                        iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg);
1569                        mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1570                }
1571        }
1572}
1573
1574static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1575                                      struct iwl_mvm_int_sta *sta,
1576                                      const u8 *addr,
1577                                      u16 mac_id, u16 color)
1578{
1579        struct iwl_mvm_add_sta_cmd cmd;
1580        int ret;
1581        u32 status = ADD_STA_SUCCESS;
1582
1583        lockdep_assert_held(&mvm->mutex);
1584
1585        memset(&cmd, 0, sizeof(cmd));
1586        cmd.sta_id = sta->sta_id;
1587
1588        if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12 &&
1589            sta->type == IWL_STA_AUX_ACTIVITY)
1590                cmd.mac_id_n_color = cpu_to_le32(mac_id);
1591        else
1592                cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1593                                                                     color));
1594
1595        if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1596                cmd.station_type = sta->type;
1597
1598        if (!iwl_mvm_has_new_tx_api(mvm))
1599                cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1600        cmd.tid_disable_tx = cpu_to_le16(0xffff);
1601
1602        if (addr)
1603                memcpy(cmd.addr, addr, ETH_ALEN);
1604
1605        ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1606                                          iwl_mvm_add_sta_cmd_size(mvm),
1607                                          &cmd, &status);
1608        if (ret)
1609                return ret;
1610
1611        switch (status & IWL_ADD_STA_STATUS_MASK) {
1612        case ADD_STA_SUCCESS:
1613                IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1614                return 0;
1615        default:
1616                ret = -EIO;
1617                IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1618                        status);
1619                break;
1620        }
1621        return ret;
1622}
1623
1624int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1625                    struct ieee80211_vif *vif,
1626                    struct ieee80211_sta *sta)
1627{
1628        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1629        struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1630        struct iwl_mvm_rxq_dup_data *dup_data;
1631        int i, ret, sta_id;
1632        bool sta_update = false;
1633        unsigned int sta_flags = 0;
1634
1635        lockdep_assert_held(&mvm->mutex);
1636
1637        if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1638                sta_id = iwl_mvm_find_free_sta_id(mvm,
1639                                                  ieee80211_vif_type_p2p(vif));
1640        else
1641                sta_id = mvm_sta->sta_id;
1642
1643        if (sta_id == IWL_MVM_INVALID_STA)
1644                return -ENOSPC;
1645
1646        spin_lock_init(&mvm_sta->lock);
1647
1648        /* if this is a HW restart re-alloc existing queues */
1649        if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1650                struct iwl_mvm_int_sta tmp_sta = {
1651                        .sta_id = sta_id,
1652                        .type = mvm_sta->sta_type,
1653                };
1654
1655                /*
1656                 * First add an empty station since allocating
1657                 * a queue requires a valid station
1658                 */
1659                ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1660                                                 mvmvif->id, mvmvif->color);
1661                if (ret)
1662                        goto err;
1663
1664                iwl_mvm_realloc_queues_after_restart(mvm, sta);
1665                sta_update = true;
1666                sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
1667                goto update_fw;
1668        }
1669
1670        mvm_sta->sta_id = sta_id;
1671        mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1672                                                      mvmvif->color);
1673        mvm_sta->vif = vif;
1674        if (!mvm->trans->trans_cfg->gen2)
1675                mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1676        else
1677                mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
1678        mvm_sta->tx_protection = 0;
1679        mvm_sta->tt_tx_protection = false;
1680        mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
1681
1682        /* HW restart, don't assume the memory has been zeroed */
1683        mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
1684        mvm_sta->tfd_queue_msk = 0;
1685
1686        /* for HW restart - reset everything but the sequence number */
1687        for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1688                u16 seq = mvm_sta->tid_data[i].seq_number;
1689                memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1690                mvm_sta->tid_data[i].seq_number = seq;
1691
1692                /*
1693                 * Mark all queues for this STA as unallocated and defer TX
1694                 * frames until the queue is allocated
1695                 */
1696                mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1697        }
1698
1699        for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1700                struct iwl_mvm_txq *mvmtxq =
1701                        iwl_mvm_txq_from_mac80211(sta->txq[i]);
1702
1703                mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1704                INIT_LIST_HEAD(&mvmtxq->list);
1705                atomic_set(&mvmtxq->tx_request, 0);
1706        }
1707
1708        mvm_sta->agg_tids = 0;
1709
1710        if (iwl_mvm_has_new_rx_api(mvm) &&
1711            !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1712                int q;
1713
1714                dup_data = kcalloc(mvm->trans->num_rx_queues,
1715                                   sizeof(*dup_data), GFP_KERNEL);
1716                if (!dup_data)
1717                        return -ENOMEM;
1718                /*
1719                 * Initialize all the last_seq values to 0xffff which can never
1720                 * compare equal to the frame's seq_ctrl in the check in
1721                 * iwl_mvm_is_dup() since the lower 4 bits are the fragment
1722                 * number and fragmented packets don't reach that function.
1723                 *
1724                 * This thus allows receiving a packet with seqno 0 and the
1725                 * retry bit set as the very first packet on a new TID.
1726                 */
1727                for (q = 0; q < mvm->trans->num_rx_queues; q++)
1728                        memset(dup_data[q].last_seq, 0xff,
1729                               sizeof(dup_data[q].last_seq));
1730                mvm_sta->dup_data = dup_data;
1731        }
1732
1733        if (!iwl_mvm_has_new_tx_api(mvm)) {
1734                ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1735                                                 ieee80211_vif_type_p2p(vif));
1736                if (ret)
1737                        goto err;
1738        }
1739
1740        /*
1741         * if rs is registered with mac80211, then "add station" will be handled
1742         * via the corresponding ops, otherwise need to notify rate scaling here
1743         */
1744        if (iwl_mvm_has_tlc_offload(mvm))
1745                iwl_mvm_rs_add_sta(mvm, mvm_sta);
1746        else
1747                spin_lock_init(&mvm_sta->lq_sta.rs_drv.pers.lock);
1748
1749        iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
1750
1751update_fw:
1752        ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
1753        if (ret)
1754                goto err;
1755
1756        if (vif->type == NL80211_IFTYPE_STATION) {
1757                if (!sta->tdls) {
1758                        WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
1759                        mvmvif->ap_sta_id = sta_id;
1760                } else {
1761                        WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
1762                }
1763        }
1764
1765        rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1766
1767        return 0;
1768
1769err:
1770        return ret;
1771}
1772
1773int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1774                      bool drain)
1775{
1776        struct iwl_mvm_add_sta_cmd cmd = {};
1777        int ret;
1778        u32 status;
1779
1780        lockdep_assert_held(&mvm->mutex);
1781
1782        cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1783        cmd.sta_id = mvmsta->sta_id;
1784        cmd.add_modify = STA_MODE_MODIFY;
1785        cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1786        cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1787
1788        status = ADD_STA_SUCCESS;
1789        ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1790                                          iwl_mvm_add_sta_cmd_size(mvm),
1791                                          &cmd, &status);
1792        if (ret)
1793                return ret;
1794
1795        switch (status & IWL_ADD_STA_STATUS_MASK) {
1796        case ADD_STA_SUCCESS:
1797                IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1798                               mvmsta->sta_id);
1799                break;
1800        default:
1801                ret = -EIO;
1802                IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1803                        mvmsta->sta_id);
1804                break;
1805        }
1806
1807        return ret;
1808}
1809
1810/*
1811 * Remove a station from the FW table. Before sending the command to remove
1812 * the station validate that the station is indeed known to the driver (sanity
1813 * only).
1814 */
1815static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1816{
1817        struct ieee80211_sta *sta;
1818        struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1819                .sta_id = sta_id,
1820        };
1821        int ret;
1822
1823        sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1824                                        lockdep_is_held(&mvm->mutex));
1825
1826        /* Note: internal stations are marked as error values */
1827        if (!sta) {
1828                IWL_ERR(mvm, "Invalid station id\n");
1829                return -EINVAL;
1830        }
1831
1832        ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
1833                                   sizeof(rm_sta_cmd), &rm_sta_cmd);
1834        if (ret) {
1835                IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1836                return ret;
1837        }
1838
1839        return 0;
1840}
1841
1842static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1843                                       struct ieee80211_vif *vif,
1844                                       struct ieee80211_sta *sta)
1845{
1846        struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1847        int i;
1848
1849        lockdep_assert_held(&mvm->mutex);
1850
1851        for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1852                if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
1853                        continue;
1854
1855                iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i);
1856                mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1857        }
1858
1859        for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1860                struct iwl_mvm_txq *mvmtxq =
1861                        iwl_mvm_txq_from_mac80211(sta->txq[i]);
1862
1863                mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1864                list_del_init(&mvmtxq->list);
1865        }
1866}
1867
1868int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1869                                  struct iwl_mvm_sta *mvm_sta)
1870{
1871        int i;
1872
1873        for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1874                u16 txq_id;
1875                int ret;
1876
1877                spin_lock_bh(&mvm_sta->lock);
1878                txq_id = mvm_sta->tid_data[i].txq_id;
1879                spin_unlock_bh(&mvm_sta->lock);
1880
1881                if (txq_id == IWL_MVM_INVALID_QUEUE)
1882                        continue;
1883
1884                ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
1885                if (ret)
1886                        return ret;
1887        }
1888
1889        return 0;
1890}
1891
1892int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1893                   struct ieee80211_vif *vif,
1894                   struct ieee80211_sta *sta)
1895{
1896        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1897        struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1898        u8 sta_id = mvm_sta->sta_id;
1899        int ret;
1900
1901        lockdep_assert_held(&mvm->mutex);
1902
1903        if (iwl_mvm_has_new_rx_api(mvm))
1904                kfree(mvm_sta->dup_data);
1905
1906        ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1907        if (ret)
1908                return ret;
1909
1910        /* flush its queues here since we are freeing mvm_sta */
1911        ret = iwl_mvm_flush_sta(mvm, mvm_sta, false);
1912        if (ret)
1913                return ret;
1914        if (iwl_mvm_has_new_tx_api(mvm)) {
1915                ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
1916        } else {
1917                u32 q_mask = mvm_sta->tfd_queue_msk;
1918
1919                ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
1920                                                     q_mask);
1921        }
1922        if (ret)
1923                return ret;
1924
1925        ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
1926
1927        iwl_mvm_disable_sta_queues(mvm, vif, sta);
1928
1929        /* If there is a TXQ still marked as reserved - free it */
1930        if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
1931                u8 reserved_txq = mvm_sta->reserved_queue;
1932                enum iwl_mvm_queue_status *status;
1933
1934                /*
1935                 * If no traffic has gone through the reserved TXQ - it
1936                 * is still marked as IWL_MVM_QUEUE_RESERVED, and
1937                 * should be manually marked as free again
1938                 */
1939                status = &mvm->queue_info[reserved_txq].status;
1940                if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
1941                         (*status != IWL_MVM_QUEUE_FREE),
1942                         "sta_id %d reserved txq %d status %d",
1943                         sta_id, reserved_txq, *status))
1944                        return -EINVAL;
1945
1946                *status = IWL_MVM_QUEUE_FREE;
1947        }
1948
1949        if (vif->type == NL80211_IFTYPE_STATION &&
1950            mvmvif->ap_sta_id == sta_id) {
1951                /* if associated - we can't remove the AP STA now */
1952                if (vif->cfg.assoc)
1953                        return ret;
1954
1955                /* unassoc - go ahead - remove the AP STA now */
1956                mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
1957        }
1958
1959        /*
1960         * This shouldn't happen - the TDLS channel switch should be canceled
1961         * before the STA is removed.
1962         */
1963        if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
1964                mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
1965                cancel_delayed_work(&mvm->tdls_cs.dwork);
1966        }
1967
1968        /*
1969         * Make sure that the tx response code sees the station as -EBUSY and
1970         * calls the drain worker.
1971         */
1972        spin_lock_bh(&mvm_sta->lock);
1973        spin_unlock_bh(&mvm_sta->lock);
1974
1975        ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
1976        RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
1977
1978        return ret;
1979}
1980
1981int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
1982                      struct ieee80211_vif *vif,
1983                      u8 sta_id)
1984{
1985        int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1986
1987        lockdep_assert_held(&mvm->mutex);
1988
1989        RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
1990        return ret;
1991}
1992
1993int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
1994                             struct iwl_mvm_int_sta *sta,
1995                             u32 qmask, enum nl80211_iftype iftype,
1996                             enum iwl_sta_type type)
1997{
1998        if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
1999            sta->sta_id == IWL_MVM_INVALID_STA) {
2000                sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
2001                if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
2002                        return -ENOSPC;
2003        }
2004
2005        sta->tfd_queue_msk = qmask;
2006        sta->type = type;
2007
2008        /* put a non-NULL value so iterating over the stations won't stop */
2009        rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
2010        return 0;
2011}
2012
2013void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
2014{
2015        RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
2016        memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
2017        sta->sta_id = IWL_MVM_INVALID_STA;
2018}
2019
2020static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,
2021                                          u8 sta_id, u8 fifo)
2022{
2023        unsigned int wdg_timeout =
2024                mvm->trans->trans_cfg->base_params->wd_timeout;
2025        struct iwl_trans_txq_scd_cfg cfg = {
2026                .fifo = fifo,
2027                .sta_id = sta_id,
2028                .tid = IWL_MAX_TID_COUNT,
2029                .aggregate = false,
2030                .frame_limit = IWL_FRAME_LIMIT,
2031        };
2032
2033        WARN_ON(iwl_mvm_has_new_tx_api(mvm));
2034
2035        iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
2036}
2037
2038static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id)
2039{
2040        unsigned int wdg_timeout =
2041                mvm->trans->trans_cfg->base_params->wd_timeout;
2042
2043        WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
2044
2045        return iwl_mvm_tvqm_enable_txq(mvm, sta_id, IWL_MAX_TID_COUNT,
2046                                       wdg_timeout);
2047}
2048
2049static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
2050                                          int maccolor, u8 *addr,
2051                                          struct iwl_mvm_int_sta *sta,
2052                                          u16 *queue, int fifo)
2053{
2054        int ret;
2055
2056        /* Map queue to fifo - needs to happen before adding station */
2057        if (!iwl_mvm_has_new_tx_api(mvm))
2058                iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo);
2059
2060        ret = iwl_mvm_add_int_sta_common(mvm, sta, addr, macidx, maccolor);
2061        if (ret) {
2062                if (!iwl_mvm_has_new_tx_api(mvm))
2063                        iwl_mvm_disable_txq(mvm, NULL, queue,
2064                                            IWL_MAX_TID_COUNT);
2065                return ret;
2066        }
2067
2068        /*
2069         * For 22000 firmware and on we cannot add queue to a station unknown
2070         * to firmware so enable queue here - after the station was added
2071         */
2072        if (iwl_mvm_has_new_tx_api(mvm)) {
2073                int txq;
2074
2075                txq = iwl_mvm_enable_aux_snif_queue_tvqm(mvm, sta->sta_id);
2076                if (txq < 0) {
2077                        iwl_mvm_rm_sta_common(mvm, sta->sta_id);
2078                        return txq;
2079                }
2080
2081                *queue = txq;
2082        }
2083
2084        return 0;
2085}
2086
2087int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id)
2088{
2089        int ret;
2090
2091        lockdep_assert_held(&mvm->mutex);
2092
2093        /* Allocate aux station and assign to it the aux queue */
2094        ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
2095                                       NL80211_IFTYPE_UNSPECIFIED,
2096                                       IWL_STA_AUX_ACTIVITY);
2097        if (ret)
2098                return ret;
2099
2100        /*
2101         * In CDB NICs we need to specify which lmac to use for aux activity
2102         * using the mac_id argument place to send lmac_id to the function
2103         */
2104        ret = iwl_mvm_add_int_sta_with_queue(mvm, lmac_id, 0, NULL,
2105                                             &mvm->aux_sta, &mvm->aux_queue,
2106                                             IWL_MVM_TX_FIFO_MCAST);
2107        if (ret) {
2108                iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2109                return ret;
2110        }
2111
2112        return 0;
2113}
2114
2115int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2116{
2117        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2118
2119        lockdep_assert_held(&mvm->mutex);
2120
2121        return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
2122                                              NULL, &mvm->snif_sta,
2123                                              &mvm->snif_queue,
2124                                              IWL_MVM_TX_FIFO_BE);
2125}
2126
2127int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2128{
2129        int ret;
2130
2131        lockdep_assert_held(&mvm->mutex);
2132
2133        if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA))
2134                return -EINVAL;
2135
2136        iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT);
2137        ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
2138        if (ret)
2139                IWL_WARN(mvm, "Failed sending remove station\n");
2140
2141        return ret;
2142}
2143
2144int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
2145{
2146        int ret;
2147
2148        lockdep_assert_held(&mvm->mutex);
2149
2150        if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA))
2151                return -EINVAL;
2152
2153        iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT);
2154        ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
2155        if (ret)
2156                IWL_WARN(mvm, "Failed sending remove station\n");
2157        iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2158
2159        return ret;
2160}
2161
2162void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
2163{
2164        iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
2165}
2166
2167/*
2168 * Send the add station command for the vif's broadcast station.
2169 * Assumes that the station was already allocated.
2170 *
2171 * @mvm: the mvm component
2172 * @vif: the interface to which the broadcast station is added
2173 * @bsta: the broadcast station to add.
2174 */
2175int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2176{
2177        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2178        struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2179        static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
2180        const u8 *baddr = _baddr;
2181        int queue;
2182        int ret;
2183        unsigned int wdg_timeout =
2184                iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2185        struct iwl_trans_txq_scd_cfg cfg = {
2186                .fifo = IWL_MVM_TX_FIFO_VO,
2187                .sta_id = mvmvif->bcast_sta.sta_id,
2188                .tid = IWL_MAX_TID_COUNT,
2189                .aggregate = false,
2190                .frame_limit = IWL_FRAME_LIMIT,
2191        };
2192
2193        lockdep_assert_held(&mvm->mutex);
2194
2195        if (!iwl_mvm_has_new_tx_api(mvm)) {
2196                if (vif->type == NL80211_IFTYPE_AP ||
2197                    vif->type == NL80211_IFTYPE_ADHOC) {
2198                        queue = mvm->probe_queue;
2199                } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2200                        queue = mvm->p2p_dev_queue;
2201                } else {
2202                        WARN(1, "Missing required TXQ for adding bcast STA\n");
2203                        return -EINVAL;
2204                }
2205
2206                bsta->tfd_queue_msk |= BIT(queue);
2207
2208                iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
2209        }
2210
2211        if (vif->type == NL80211_IFTYPE_ADHOC)
2212                baddr = vif->bss_conf.bssid;
2213
2214        if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
2215                return -ENOSPC;
2216
2217        ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
2218                                         mvmvif->id, mvmvif->color);
2219        if (ret)
2220                return ret;
2221
2222        /*
2223         * For 22000 firmware and on we cannot add queue to a station unknown
2224         * to firmware so enable queue here - after the station was added
2225         */
2226        if (iwl_mvm_has_new_tx_api(mvm)) {
2227                queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id,
2228                                                IWL_MAX_TID_COUNT,
2229                                                wdg_timeout);
2230                if (queue < 0) {
2231                        iwl_mvm_rm_sta_common(mvm, bsta->sta_id);
2232                        return queue;
2233                }
2234
2235                if (vif->type == NL80211_IFTYPE_AP ||
2236                    vif->type == NL80211_IFTYPE_ADHOC)
2237                        mvm->probe_queue = queue;
2238                else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
2239                        mvm->p2p_dev_queue = queue;
2240        }
2241
2242        return 0;
2243}
2244
2245static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
2246                                          struct ieee80211_vif *vif)
2247{
2248        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2249        u16 *queueptr, queue;
2250
2251        lockdep_assert_held(&mvm->mutex);
2252
2253        iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true);
2254
2255        switch (vif->type) {
2256        case NL80211_IFTYPE_AP:
2257        case NL80211_IFTYPE_ADHOC:
2258                queueptr = &mvm->probe_queue;
2259                break;
2260        case NL80211_IFTYPE_P2P_DEVICE:
2261                queueptr = &mvm->p2p_dev_queue;
2262                break;
2263        default:
2264                WARN(1, "Can't free bcast queue on vif type %d\n",
2265                     vif->type);
2266                return;
2267        }
2268
2269        queue = *queueptr;
2270        iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT);
2271        if (iwl_mvm_has_new_tx_api(mvm))
2272                return;
2273
2274        WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
2275        mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
2276}
2277
2278/* Send the FW a request to remove the station from it's internal data
2279 * structures, but DO NOT remove the entry from the local data structures. */
2280int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2281{
2282        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2283        int ret;
2284
2285        lockdep_assert_held(&mvm->mutex);
2286
2287        iwl_mvm_free_bcast_sta_queues(mvm, vif);
2288
2289        ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
2290        if (ret)
2291                IWL_WARN(mvm, "Failed sending remove station\n");
2292        return ret;
2293}
2294
2295int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2296{
2297        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2298
2299        lockdep_assert_held(&mvm->mutex);
2300
2301        return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
2302                                        ieee80211_vif_type_p2p(vif),
2303                                        IWL_STA_GENERAL_PURPOSE);
2304}
2305
2306/* Allocate a new station entry for the broadcast station to the given vif,
2307 * and send it to the FW.
2308 * Note that each P2P mac should have its own broadcast station.
2309 *
2310 * @mvm: the mvm component
2311 * @vif: the interface to which the broadcast station is added
2312 * @bsta: the broadcast station to add. */
2313int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2314{
2315        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2316        struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2317        int ret;
2318
2319        lockdep_assert_held(&mvm->mutex);
2320
2321        ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
2322        if (ret)
2323                return ret;
2324
2325        ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2326
2327        if (ret)
2328                iwl_mvm_dealloc_int_sta(mvm, bsta);
2329
2330        return ret;
2331}
2332
2333void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2334{
2335        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2336
2337        iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
2338}
2339
2340/*
2341 * Send the FW a request to remove the station from it's internal data
2342 * structures, and in addition remove it from the local data structure.
2343 */
2344int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2345{
2346        int ret;
2347
2348        lockdep_assert_held(&mvm->mutex);
2349
2350        ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
2351
2352        iwl_mvm_dealloc_bcast_sta(mvm, vif);
2353
2354        return ret;
2355}
2356
2357/*
2358 * Allocate a new station entry for the multicast station to the given vif,
2359 * and send it to the FW.
2360 * Note that each AP/GO mac should have its own multicast station.
2361 *
2362 * @mvm: the mvm component
2363 * @vif: the interface to which the multicast station is added
2364 */
2365int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2366{
2367        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2368        struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
2369        static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2370        const u8 *maddr = _maddr;
2371        struct iwl_trans_txq_scd_cfg cfg = {
2372                .fifo = vif->type == NL80211_IFTYPE_AP ?
2373                        IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE,
2374                .sta_id = msta->sta_id,
2375                .tid = 0,
2376                .aggregate = false,
2377                .frame_limit = IWL_FRAME_LIMIT,
2378        };
2379        unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2380        int ret;
2381
2382        lockdep_assert_held(&mvm->mutex);
2383
2384        if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2385                    vif->type != NL80211_IFTYPE_ADHOC))
2386                return -ENOTSUPP;
2387
2388        /*
2389         * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2390         * invalid, so make sure we use the queue we want.
2391         * Note that this is done here as we want to avoid making DQA
2392         * changes in mac80211 layer.
2393         */
2394        if (vif->type == NL80211_IFTYPE_ADHOC)
2395                mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2396
2397        /*
2398         * While in previous FWs we had to exclude cab queue from TFD queue
2399         * mask, now it is needed as any other queue.
2400         */
2401        if (!iwl_mvm_has_new_tx_api(mvm) &&
2402            fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2403                iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2404                                   timeout);
2405                msta->tfd_queue_msk |= BIT(mvmvif->cab_queue);
2406        }
2407        ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2408                                         mvmvif->id, mvmvif->color);
2409        if (ret)
2410                goto err;
2411
2412        /*
2413         * Enable cab queue after the ADD_STA command is sent.
2414         * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
2415         * command with unknown station id, and for FW that doesn't support
2416         * station API since the cab queue is not included in the
2417         * tfd_queue_mask.
2418         */
2419        if (iwl_mvm_has_new_tx_api(mvm)) {
2420                int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id,
2421                                                    0,
2422                                                    timeout);
2423                if (queue < 0) {
2424                        ret = queue;
2425                        goto err;
2426                }
2427                mvmvif->cab_queue = queue;
2428        } else if (!fw_has_api(&mvm->fw->ucode_capa,
2429                               IWL_UCODE_TLV_API_STA_TYPE))
2430                iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2431                                   timeout);
2432
2433        return 0;
2434err:
2435        iwl_mvm_dealloc_int_sta(mvm, msta);
2436        return ret;
2437}
2438
2439static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
2440                                    struct ieee80211_key_conf *keyconf,
2441                                    bool mcast)
2442{
2443        union {
2444                struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
2445                struct iwl_mvm_add_sta_key_cmd cmd;
2446        } u = {};
2447        bool new_api = fw_has_api(&mvm->fw->ucode_capa,
2448                                  IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
2449        __le16 key_flags;
2450        int ret, size;
2451        u32 status;
2452
2453        /* This is a valid situation for GTK removal */
2454        if (sta_id == IWL_MVM_INVALID_STA)
2455                return 0;
2456
2457        key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
2458                                 STA_KEY_FLG_KEYID_MSK);
2459        key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
2460        key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
2461
2462        if (mcast)
2463                key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2464
2465        /*
2466         * The fields assigned here are in the same location at the start
2467         * of the command, so we can do this union trick.
2468         */
2469        u.cmd.common.key_flags = key_flags;
2470        u.cmd.common.key_offset = keyconf->hw_key_idx;
2471        u.cmd.common.sta_id = sta_id;
2472
2473        size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
2474
2475        status = ADD_STA_SUCCESS;
2476        ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
2477                                          &status);
2478
2479        switch (status) {
2480        case ADD_STA_SUCCESS:
2481                IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
2482                break;
2483        default:
2484                ret = -EIO;
2485                IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
2486                break;
2487        }
2488
2489        return ret;
2490}
2491
2492/*
2493 * Send the FW a request to remove the station from it's internal data
2494 * structures, and in addition remove it from the local data structure.
2495 */
2496int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2497{
2498        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2499        int ret;
2500
2501        lockdep_assert_held(&mvm->mutex);
2502
2503        iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true);
2504
2505        iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0);
2506
2507        ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2508        if (ret)
2509                IWL_WARN(mvm, "Failed sending remove station\n");
2510
2511        return ret;
2512}
2513
2514static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
2515{
2516        struct iwl_mvm_delba_data notif = {
2517                .baid = baid,
2518        };
2519
2520        iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_NOTIF_DEL_BA, true,
2521                                        &notif, sizeof(notif));
2522};
2523
2524static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2525                                 struct iwl_mvm_baid_data *data)
2526{
2527        int i;
2528
2529        iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2530
2531        for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2532                int j;
2533                struct iwl_mvm_reorder_buffer *reorder_buf =
2534                        &data->reorder_buf[i];
2535                struct iwl_mvm_reorder_buf_entry *entries =
2536                        &data->entries[i * data->entries_per_queue];
2537
2538                spin_lock_bh(&reorder_buf->lock);
2539                if (likely(!reorder_buf->num_stored)) {
2540                        spin_unlock_bh(&reorder_buf->lock);
2541                        continue;
2542                }
2543
2544                /*
2545                 * This shouldn't happen in regular DELBA since the internal
2546                 * delBA notification should trigger a release of all frames in
2547                 * the reorder buffer.
2548                 */
2549                WARN_ON(1);
2550
2551                for (j = 0; j < reorder_buf->buf_size; j++)
2552                        __skb_queue_purge(&entries[j].e.frames);
2553                /*
2554                 * Prevent timer re-arm. This prevents a very far fetched case
2555                 * where we timed out on the notification. There may be prior
2556                 * RX frames pending in the RX queue before the notification
2557                 * that might get processed between now and the actual deletion
2558                 * and we would re-arm the timer although we are deleting the
2559                 * reorder buffer.
2560                 */
2561                reorder_buf->removed = true;
2562                spin_unlock_bh(&reorder_buf->lock);
2563                del_timer_sync(&reorder_buf->reorder_timer);
2564        }
2565}
2566
2567static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2568                                        struct iwl_mvm_baid_data *data,
2569                                        u16 ssn, u16 buf_size)
2570{
2571        int i;
2572
2573        for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2574                struct iwl_mvm_reorder_buffer *reorder_buf =
2575                        &data->reorder_buf[i];
2576                struct iwl_mvm_reorder_buf_entry *entries =
2577                        &data->entries[i * data->entries_per_queue];
2578                int j;
2579
2580                reorder_buf->num_stored = 0;
2581                reorder_buf->head_sn = ssn;
2582                reorder_buf->buf_size = buf_size;
2583                /* rx reorder timer */
2584                timer_setup(&reorder_buf->reorder_timer,
2585                            iwl_mvm_reorder_timer_expired, 0);
2586                spin_lock_init(&reorder_buf->lock);
2587                reorder_buf->mvm = mvm;
2588                reorder_buf->queue = i;
2589                reorder_buf->valid = false;
2590                for (j = 0; j < reorder_buf->buf_size; j++)
2591                        __skb_queue_head_init(&entries[j].e.frames);
2592        }
2593}
2594
2595static int iwl_mvm_fw_baid_op_sta(struct iwl_mvm *mvm,
2596                                  struct iwl_mvm_sta *mvm_sta,
2597                                  bool start, int tid, u16 ssn,
2598                                  u16 buf_size)
2599{
2600        struct iwl_mvm_add_sta_cmd cmd = {
2601                .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
2602                .sta_id = mvm_sta->sta_id,
2603                .add_modify = STA_MODE_MODIFY,
2604        };
2605        u32 status;
2606        int ret;
2607
2608        if (start) {
2609                cmd.add_immediate_ba_tid = tid;
2610                cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
2611                cmd.rx_ba_window = cpu_to_le16(buf_size);
2612                cmd.modify_mask = STA_MODIFY_ADD_BA_TID;
2613        } else {
2614                cmd.remove_immediate_ba_tid = tid;
2615                cmd.modify_mask = STA_MODIFY_REMOVE_BA_TID;
2616        }
2617
2618        status = ADD_STA_SUCCESS;
2619        ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2620                                          iwl_mvm_add_sta_cmd_size(mvm),
2621                                          &cmd, &status);
2622        if (ret)
2623                return ret;
2624
2625        switch (status & IWL_ADD_STA_STATUS_MASK) {
2626        case ADD_STA_SUCCESS:
2627                IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2628                             start ? "start" : "stopp");
2629                if (WARN_ON(start && iwl_mvm_has_new_rx_api(mvm) &&
2630                            !(status & IWL_ADD_STA_BAID_VALID_MASK)))
2631                        return -EINVAL;
2632                return u32_get_bits(status, IWL_ADD_STA_BAID_MASK);
2633        case ADD_STA_IMMEDIATE_BA_FAILURE:
2634                IWL_WARN(mvm, "RX BA Session refused by fw\n");
2635                return -ENOSPC;
2636        default:
2637                IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2638                        start ? "start" : "stopp", status);
2639                return -EIO;
2640        }
2641}
2642
2643static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
2644                                  struct iwl_mvm_sta *mvm_sta,
2645                                  bool start, int tid, u16 ssn,
2646                                  u16 buf_size, int baid)
2647{
2648        struct iwl_rx_baid_cfg_cmd cmd = {
2649                .action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) :
2650                                  cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE),
2651        };
2652        u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
2653        int ret;
2654
2655        BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
2656
2657        if (start) {
2658                cmd.alloc.sta_id_mask = cpu_to_le32(BIT(mvm_sta->sta_id));
2659                cmd.alloc.tid = tid;
2660                cmd.alloc.ssn = cpu_to_le16(ssn);
2661                cmd.alloc.win_size = cpu_to_le16(buf_size);
2662                baid = -EIO;
2663        } else if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1) == 1) {
2664                cmd.remove_v1.baid = cpu_to_le32(baid);
2665                BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove));
2666        } else {
2667                cmd.remove.sta_id_mask = cpu_to_le32(BIT(mvm_sta->sta_id));
2668                cmd.remove.tid = cpu_to_le32(tid);
2669        }
2670
2671        ret = iwl_mvm_send_cmd_pdu_status(mvm, cmd_id, sizeof(cmd),
2672                                          &cmd, &baid);
2673        if (ret)
2674                return ret;
2675
2676        if (!start) {
2677                /* ignore firmware baid on remove */
2678                baid = 0;
2679        }
2680
2681        IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2682                     start ? "start" : "stopp");
2683
2684        if (baid < 0 || baid >= ARRAY_SIZE(mvm->baid_map))
2685                return -EINVAL;
2686
2687        return baid;
2688}
2689
2690static int iwl_mvm_fw_baid_op(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta,
2691                              bool start, int tid, u16 ssn, u16 buf_size,
2692                              int baid)
2693{
2694        if (fw_has_capa(&mvm->fw->ucode_capa,
2695                        IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT))
2696                return iwl_mvm_fw_baid_op_cmd(mvm, mvm_sta, start,
2697                                              tid, ssn, buf_size, baid);
2698
2699        return iwl_mvm_fw_baid_op_sta(mvm, mvm_sta, start,
2700                                      tid, ssn, buf_size);
2701}
2702
2703int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2704                       int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
2705{
2706        struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2707        struct iwl_mvm_baid_data *baid_data = NULL;
2708        int ret, baid;
2709        u32 max_ba_id_sessions = iwl_mvm_has_new_tx_api(mvm) ? IWL_MAX_BAID :
2710                                                               IWL_MAX_BAID_OLD;
2711
2712        lockdep_assert_held(&mvm->mutex);
2713
2714        if (start && mvm->rx_ba_sessions >= max_ba_id_sessions) {
2715                IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2716                return -ENOSPC;
2717        }
2718
2719        if (iwl_mvm_has_new_rx_api(mvm) && start) {
2720                u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
2721
2722                /* sparse doesn't like the __align() so don't check */
2723#ifndef __CHECKER__
2724                /*
2725                 * The division below will be OK if either the cache line size
2726                 * can be divided by the entry size (ALIGN will round up) or if
2727                 * if the entry size can be divided by the cache line size, in
2728                 * which case the ALIGN() will do nothing.
2729                 */
2730                BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
2731                             sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
2732#endif
2733
2734                /*
2735                 * Upward align the reorder buffer size to fill an entire cache
2736                 * line for each queue, to avoid sharing cache lines between
2737                 * different queues.
2738                 */
2739                reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
2740
2741                /*
2742                 * Allocate here so if allocation fails we can bail out early
2743                 * before starting the BA session in the firmware
2744                 */
2745                baid_data = kzalloc(sizeof(*baid_data) +
2746                                    mvm->trans->num_rx_queues *
2747                                    reorder_buf_size,
2748                                    GFP_KERNEL);
2749                if (!baid_data)
2750                        return -ENOMEM;
2751
2752                /*
2753                 * This division is why we need the above BUILD_BUG_ON(),
2754                 * if that doesn't hold then this will not be right.
2755                 */
2756                baid_data->entries_per_queue =
2757                        reorder_buf_size / sizeof(baid_data->entries[0]);
2758        }
2759
2760        if (iwl_mvm_has_new_rx_api(mvm) && !start) {
2761                baid = mvm_sta->tid_to_baid[tid];
2762        } else {
2763                /* we don't really need it in this case */
2764                baid = -1;
2765        }
2766
2767        /* Don't send command to remove (start=0) BAID during restart */
2768        if (start || !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
2769                baid = iwl_mvm_fw_baid_op(mvm, mvm_sta, start, tid, ssn, buf_size,
2770                                          baid);
2771
2772        if (baid < 0) {
2773                ret = baid;
2774                goto out_free;
2775        }
2776
2777        if (start) {
2778                mvm->rx_ba_sessions++;
2779
2780                if (!iwl_mvm_has_new_rx_api(mvm))
2781                        return 0;
2782
2783                baid_data->baid = baid;
2784                baid_data->timeout = timeout;
2785                baid_data->last_rx = jiffies;
2786                baid_data->rcu_ptr = &mvm->baid_map[baid];
2787                timer_setup(&baid_data->session_timer,
2788                            iwl_mvm_rx_agg_session_expired, 0);
2789                baid_data->mvm = mvm;
2790                baid_data->tid = tid;
2791                baid_data->sta_id = mvm_sta->sta_id;
2792
2793                mvm_sta->tid_to_baid[tid] = baid;
2794                if (timeout)
2795                        mod_timer(&baid_data->session_timer,
2796                                  TU_TO_EXP_TIME(timeout * 2));
2797
2798                iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
2799                /*
2800                 * protect the BA data with RCU to cover a case where our
2801                 * internal RX sync mechanism will timeout (not that it's
2802                 * supposed to happen) and we will free the session data while
2803                 * RX is being processed in parallel
2804                 */
2805                IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2806                             mvm_sta->sta_id, tid, baid);
2807                WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2808                rcu_assign_pointer(mvm->baid_map[baid], baid_data);
2809        } else  {
2810                baid = mvm_sta->tid_to_baid[tid];
2811
2812                if (mvm->rx_ba_sessions > 0)
2813                        /* check that restart flow didn't zero the counter */
2814                        mvm->rx_ba_sessions--;
2815                if (!iwl_mvm_has_new_rx_api(mvm))
2816                        return 0;
2817
2818                if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2819                        return -EINVAL;
2820
2821                baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2822                if (WARN_ON(!baid_data))
2823                        return -EINVAL;
2824
2825                /* synchronize all rx queues so we can safely delete */
2826                iwl_mvm_free_reorder(mvm, baid_data);
2827                del_timer_sync(&baid_data->session_timer);
2828                RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2829                kfree_rcu(baid_data, rcu_head);
2830                IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
2831
2832                /*
2833                 * After we've deleted it, do another queue sync
2834                 * so if an IWL_MVM_RXQ_NSSN_SYNC was concurrently
2835                 * running it won't find a new session in the old
2836                 * BAID. It can find the NULL pointer for the BAID,
2837                 * but we must not have it find a different session.
2838                 */
2839                iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY,
2840                                                true, NULL, 0);
2841        }
2842        return 0;
2843
2844out_free:
2845        kfree(baid_data);
2846        return ret;
2847}
2848
2849int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2850                       int tid, u8 queue, bool start)
2851{
2852        struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2853        struct iwl_mvm_add_sta_cmd cmd = {};
2854        int ret;
2855        u32 status;
2856
2857        lockdep_assert_held(&mvm->mutex);
2858
2859        if (start) {
2860                mvm_sta->tfd_queue_msk |= BIT(queue);
2861                mvm_sta->tid_disable_agg &= ~BIT(tid);
2862        } else {
2863                /* In DQA-mode the queue isn't removed on agg termination */
2864                mvm_sta->tid_disable_agg |= BIT(tid);
2865        }
2866
2867        cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2868        cmd.sta_id = mvm_sta->sta_id;
2869        cmd.add_modify = STA_MODE_MODIFY;
2870        if (!iwl_mvm_has_new_tx_api(mvm))
2871                cmd.modify_mask = STA_MODIFY_QUEUES;
2872        cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
2873        cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2874        cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2875
2876        status = ADD_STA_SUCCESS;
2877        ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2878                                          iwl_mvm_add_sta_cmd_size(mvm),
2879                                          &cmd, &status);
2880        if (ret)
2881                return ret;
2882
2883        switch (status & IWL_ADD_STA_STATUS_MASK) {
2884        case ADD_STA_SUCCESS:
2885                break;
2886        default:
2887                ret = -EIO;
2888                IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2889                        start ? "start" : "stopp", status);
2890                break;
2891        }
2892
2893        return ret;
2894}
2895
2896const u8 tid_to_mac80211_ac[] = {
2897        IEEE80211_AC_BE,
2898        IEEE80211_AC_BK,
2899        IEEE80211_AC_BK,
2900        IEEE80211_AC_BE,
2901        IEEE80211_AC_VI,
2902        IEEE80211_AC_VI,
2903        IEEE80211_AC_VO,
2904        IEEE80211_AC_VO,
2905        IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
2906};
2907
2908static const u8 tid_to_ucode_ac[] = {
2909        AC_BE,
2910        AC_BK,
2911        AC_BK,
2912        AC_BE,
2913        AC_VI,
2914        AC_VI,
2915        AC_VO,
2916        AC_VO,
2917};
2918
2919int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2920                             struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2921{
2922        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2923        struct iwl_mvm_tid_data *tid_data;
2924        u16 normalized_ssn;
2925        u16 txq_id;
2926        int ret;
2927
2928        if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2929                return -EINVAL;
2930
2931        if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
2932            mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2933                IWL_ERR(mvm,
2934                        "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
2935                        mvmsta->tid_data[tid].state);
2936                return -ENXIO;
2937        }
2938
2939        lockdep_assert_held(&mvm->mutex);
2940
2941        if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
2942            iwl_mvm_has_new_tx_api(mvm)) {
2943                u8 ac = tid_to_mac80211_ac[tid];
2944
2945                ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
2946                if (ret)
2947                        return ret;
2948        }
2949
2950        spin_lock_bh(&mvmsta->lock);
2951
2952        /*
2953         * Note the possible cases:
2954         *  1. An enabled TXQ - TXQ needs to become agg'ed
2955         *  2. The TXQ hasn't yet been enabled, so find a free one and mark
2956         *      it as reserved
2957         */
2958        txq_id = mvmsta->tid_data[tid].txq_id;
2959        if (txq_id == IWL_MVM_INVALID_QUEUE) {
2960                ret = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
2961                                              IWL_MVM_DQA_MIN_DATA_QUEUE,
2962                                              IWL_MVM_DQA_MAX_DATA_QUEUE);
2963                if (ret < 0) {
2964                        IWL_ERR(mvm, "Failed to allocate agg queue\n");
2965                        goto out;
2966                }
2967
2968                txq_id = ret;
2969
2970                /* TXQ hasn't yet been enabled, so mark it only as reserved */
2971                mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
2972        } else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) {
2973                ret = -ENXIO;
2974                IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n",
2975                        tid, IWL_MAX_HW_QUEUES - 1);
2976                goto out;
2977
2978        } else if (unlikely(mvm->queue_info[txq_id].status ==
2979                            IWL_MVM_QUEUE_SHARED)) {
2980                ret = -ENXIO;
2981                IWL_DEBUG_TX_QUEUES(mvm,
2982                                    "Can't start tid %d agg on shared queue!\n",
2983                                    tid);
2984                goto out;
2985        }
2986
2987        IWL_DEBUG_TX_QUEUES(mvm,
2988                            "AGG for tid %d will be on queue #%d\n",
2989                            tid, txq_id);
2990
2991        tid_data = &mvmsta->tid_data[tid];
2992        tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2993        tid_data->txq_id = txq_id;
2994        *ssn = tid_data->ssn;
2995
2996        IWL_DEBUG_TX_QUEUES(mvm,
2997                            "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2998                            mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2999                            tid_data->next_reclaimed);
3000
3001        /*
3002         * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
3003         * to align the wrap around of ssn so we compare relevant values.
3004         */
3005        normalized_ssn = tid_data->ssn;
3006        if (mvm->trans->trans_cfg->gen2)
3007                normalized_ssn &= 0xff;
3008
3009        if (normalized_ssn == tid_data->next_reclaimed) {
3010                tid_data->state = IWL_AGG_STARTING;
3011                ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
3012        } else {
3013                tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
3014                ret = IEEE80211_AMPDU_TX_START_DELAY_ADDBA;
3015        }
3016
3017out:
3018        spin_unlock_bh(&mvmsta->lock);
3019
3020        return ret;
3021}
3022
3023int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3024                            struct ieee80211_sta *sta, u16 tid, u16 buf_size,
3025                            bool amsdu)
3026{
3027        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3028        struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3029        unsigned int wdg_timeout =
3030                iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
3031        int queue, ret;
3032        bool alloc_queue = true;
3033        enum iwl_mvm_queue_status queue_status;
3034        u16 ssn;
3035
3036        struct iwl_trans_txq_scd_cfg cfg = {
3037                .sta_id = mvmsta->sta_id,
3038                .tid = tid,
3039                .frame_limit = buf_size,
3040                .aggregate = true,
3041        };
3042
3043        /*
3044         * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
3045         * manager, so this function should never be called in this case.
3046         */
3047        if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
3048                return -EINVAL;
3049
3050        BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
3051                     != IWL_MAX_TID_COUNT);
3052
3053        spin_lock_bh(&mvmsta->lock);
3054        ssn = tid_data->ssn;
3055        queue = tid_data->txq_id;
3056        tid_data->state = IWL_AGG_ON;
3057        mvmsta->agg_tids |= BIT(tid);
3058        tid_data->ssn = 0xffff;
3059        tid_data->amsdu_in_ampdu_allowed = amsdu;
3060        spin_unlock_bh(&mvmsta->lock);
3061
3062        if (iwl_mvm_has_new_tx_api(mvm)) {
3063                /*
3064                 * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
3065                 * would have failed, so if we are here there is no need to
3066                 * allocate a queue.
3067                 * However, if aggregation size is different than the default
3068                 * size, the scheduler should be reconfigured.
3069                 * We cannot do this with the new TX API, so return unsupported
3070                 * for now, until it will be offloaded to firmware..
3071                 * Note that if SCD default value changes - this condition
3072                 * should be updated as well.
3073                 */
3074                if (buf_size < IWL_FRAME_LIMIT)
3075                        return -ENOTSUPP;
3076
3077                ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3078                if (ret)
3079                        return -EIO;
3080                goto out;
3081        }
3082
3083        cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
3084
3085        queue_status = mvm->queue_info[queue].status;
3086
3087        /* Maybe there is no need to even alloc a queue... */
3088        if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
3089                alloc_queue = false;
3090
3091        /*
3092         * Only reconfig the SCD for the queue if the window size has
3093         * changed from current (become smaller)
3094         */
3095        if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
3096                /*
3097                 * If reconfiguring an existing queue, it first must be
3098                 * drained
3099                 */
3100                ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
3101                                                     BIT(queue));
3102                if (ret) {
3103                        IWL_ERR(mvm,
3104                                "Error draining queue before reconfig\n");
3105                        return ret;
3106                }
3107
3108                ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
3109                                           mvmsta->sta_id, tid,
3110                                           buf_size, ssn);
3111                if (ret) {
3112                        IWL_ERR(mvm,
3113                                "Error reconfiguring TXQ #%d\n", queue);
3114                        return ret;
3115                }
3116        }
3117
3118        if (alloc_queue)
3119                iwl_mvm_enable_txq(mvm, sta, queue, ssn,
3120                                   &cfg, wdg_timeout);
3121
3122        /* Send ADD_STA command to enable aggs only if the queue isn't shared */
3123        if (queue_status != IWL_MVM_QUEUE_SHARED) {
3124                ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3125                if (ret)
3126                        return -EIO;
3127        }
3128
3129        /* No need to mark as reserved */
3130        mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
3131
3132out:
3133        /*
3134         * Even though in theory the peer could have different
3135         * aggregation reorder buffer sizes for different sessions,
3136         * our ucode doesn't allow for that and has a global limit
3137         * for each station. Therefore, use the minimum of all the
3138         * aggregation sessions and our default value.
3139         */
3140        mvmsta->max_agg_bufsize =
3141                min(mvmsta->max_agg_bufsize, buf_size);
3142        mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
3143
3144        IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
3145                     sta->addr, tid);
3146
3147        return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq);
3148}
3149
3150static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
3151                                        struct iwl_mvm_sta *mvmsta,
3152                                        struct iwl_mvm_tid_data *tid_data)
3153{
3154        u16 txq_id = tid_data->txq_id;
3155
3156        lockdep_assert_held(&mvm->mutex);
3157
3158        if (iwl_mvm_has_new_tx_api(mvm))
3159                return;
3160
3161        /*
3162         * The TXQ is marked as reserved only if no traffic came through yet
3163         * This means no traffic has been sent on this TID (agg'd or not), so
3164         * we no longer have use for the queue. Since it hasn't even been
3165         * allocated through iwl_mvm_enable_txq, so we can just mark it back as
3166         * free.
3167         */
3168        if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
3169                mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
3170                tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
3171        }
3172}
3173
3174int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3175                            struct ieee80211_sta *sta, u16 tid)
3176{
3177        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3178        struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3179        u16 txq_id;
3180        int err;
3181
3182        /*
3183         * If mac80211 is cleaning its state, then say that we finished since
3184         * our state has been cleared anyway.
3185         */
3186        if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
3187                ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3188                return 0;
3189        }
3190
3191        spin_lock_bh(&mvmsta->lock);
3192
3193        txq_id = tid_data->txq_id;
3194
3195        IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
3196                            mvmsta->sta_id, tid, txq_id, tid_data->state);
3197
3198        mvmsta->agg_tids &= ~BIT(tid);
3199
3200        iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3201
3202        switch (tid_data->state) {
3203        case IWL_AGG_ON:
3204                tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3205
3206                IWL_DEBUG_TX_QUEUES(mvm,
3207                                    "ssn = %d, next_recl = %d\n",
3208                                    tid_data->ssn, tid_data->next_reclaimed);
3209
3210                tid_data->ssn = 0xffff;
3211                tid_data->state = IWL_AGG_OFF;
3212                spin_unlock_bh(&mvmsta->lock);
3213
3214                ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3215
3216                iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3217                return 0;
3218        case IWL_AGG_STARTING:
3219        case IWL_EMPTYING_HW_QUEUE_ADDBA:
3220                /*
3221                 * The agg session has been stopped before it was set up. This
3222                 * can happen when the AddBA timer times out for example.
3223                 */
3224
3225                /* No barriers since we are under mutex */
3226                lockdep_assert_held(&mvm->mutex);
3227
3228                ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3229                tid_data->state = IWL_AGG_OFF;
3230                err = 0;
3231                break;
3232        default:
3233                IWL_ERR(mvm,
3234                        "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
3235                        mvmsta->sta_id, tid, tid_data->state);
3236                IWL_ERR(mvm,
3237                        "\ttid_data->txq_id = %d\n", tid_data->txq_id);
3238                err = -EINVAL;
3239        }
3240
3241        spin_unlock_bh(&mvmsta->lock);
3242
3243        return err;
3244}
3245
3246int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3247                            struct ieee80211_sta *sta, u16 tid)
3248{
3249        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3250        struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3251        u16 txq_id;
3252        enum iwl_mvm_agg_state old_state;
3253
3254        /*
3255         * First set the agg state to OFF to avoid calling
3256         * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
3257         */
3258        spin_lock_bh(&mvmsta->lock);
3259        txq_id = tid_data->txq_id;
3260        IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
3261                            mvmsta->sta_id, tid, txq_id, tid_data->state);
3262        old_state = tid_data->state;
3263        tid_data->state = IWL_AGG_OFF;
3264        mvmsta->agg_tids &= ~BIT(tid);
3265        spin_unlock_bh(&mvmsta->lock);
3266
3267        iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3268
3269        if (old_state >= IWL_AGG_ON) {
3270                iwl_mvm_drain_sta(mvm, mvmsta, true);
3271
3272                if (iwl_mvm_has_new_tx_api(mvm)) {
3273                        if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
3274                                                   BIT(tid)))
3275                                IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3276                        iwl_trans_wait_txq_empty(mvm->trans, txq_id);
3277                } else {
3278                        if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id)))
3279                                IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3280                        iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
3281                }
3282
3283                iwl_mvm_drain_sta(mvm, mvmsta, false);
3284
3285                iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3286        }
3287
3288        return 0;
3289}
3290
3291static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
3292{
3293        int i, max = -1, max_offs = -1;
3294
3295        lockdep_assert_held(&mvm->mutex);
3296
3297        /* Pick the unused key offset with the highest 'deleted'
3298         * counter. Every time a key is deleted, all the counters
3299         * are incremented and the one that was just deleted is
3300         * reset to zero. Thus, the highest counter is the one
3301         * that was deleted longest ago. Pick that one.
3302         */
3303        for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3304                if (test_bit(i, mvm->fw_key_table))
3305                        continue;
3306                if (mvm->fw_key_deleted[i] > max) {
3307                        max = mvm->fw_key_deleted[i];
3308                        max_offs = i;
3309                }
3310        }
3311
3312        if (max_offs < 0)
3313                return STA_KEY_IDX_INVALID;
3314
3315        return max_offs;
3316}
3317
3318static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
3319                                               struct ieee80211_vif *vif,
3320                                               struct ieee80211_sta *sta)
3321{
3322        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3323
3324        if (sta)
3325                return iwl_mvm_sta_from_mac80211(sta);
3326
3327        /*
3328         * The device expects GTKs for station interfaces to be
3329         * installed as GTKs for the AP station. If we have no
3330         * station ID, then use AP's station ID.
3331         */
3332        if (vif->type == NL80211_IFTYPE_STATION &&
3333            mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3334                u8 sta_id = mvmvif->ap_sta_id;
3335
3336                sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
3337                                            lockdep_is_held(&mvm->mutex));
3338
3339                /*
3340                 * It is possible that the 'sta' parameter is NULL,
3341                 * for example when a GTK is removed - the sta_id will then
3342                 * be the AP ID, and no station was passed by mac80211.
3343                 */
3344                if (IS_ERR_OR_NULL(sta))
3345                        return NULL;
3346
3347                return iwl_mvm_sta_from_mac80211(sta);
3348        }
3349
3350        return NULL;
3351}
3352
3353static int iwl_mvm_pn_cmp(const u8 *pn1, const u8 *pn2, int len)
3354{
3355        int i;
3356
3357        for (i = len - 1; i >= 0; i--) {
3358                if (pn1[i] > pn2[i])
3359                        return 1;
3360                if (pn1[i] < pn2[i])
3361                        return -1;
3362        }
3363
3364        return 0;
3365}
3366
3367static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
3368                                u32 sta_id,
3369                                struct ieee80211_key_conf *key, bool mcast,
3370                                u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
3371                                u8 key_offset, bool mfp)
3372{
3373        union {
3374                struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3375                struct iwl_mvm_add_sta_key_cmd cmd;
3376        } u = {};
3377        __le16 key_flags;
3378        int ret;
3379        u32 status;
3380        u16 keyidx;
3381        u64 pn = 0;
3382        int i, size;
3383        bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3384                                  IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
3385        int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA_KEY,
3386                                            new_api ? 2 : 1);
3387
3388        if (sta_id == IWL_MVM_INVALID_STA)
3389                return -EINVAL;
3390
3391        keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
3392                 STA_KEY_FLG_KEYID_MSK;
3393        key_flags = cpu_to_le16(keyidx);
3394        key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
3395
3396        switch (key->cipher) {
3397        case WLAN_CIPHER_SUITE_TKIP:
3398                key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
3399                if (api_ver >= 2) {
3400                        memcpy((void *)&u.cmd.tx_mic_key,
3401                               &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
3402                               IWL_MIC_KEY_SIZE);
3403
3404                        memcpy((void *)&u.cmd.rx_mic_key,
3405                               &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
3406                               IWL_MIC_KEY_SIZE);
3407                        pn = atomic64_read(&key->tx_pn);
3408
3409                } else {
3410                        u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
3411                        for (i = 0; i < 5; i++)
3412                                u.cmd_v1.tkip_rx_ttak[i] =
3413                                        cpu_to_le16(tkip_p1k[i]);
3414                }
3415                memcpy(u.cmd.common.key, key->key, key->keylen);
3416                break;
3417        case WLAN_CIPHER_SUITE_CCMP:
3418                key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
3419                memcpy(u.cmd.common.key, key->key, key->keylen);
3420                if (api_ver >= 2)
3421                        pn = atomic64_read(&key->tx_pn);
3422                break;
3423        case WLAN_CIPHER_SUITE_WEP104:
3424                key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
3425                fallthrough;
3426        case WLAN_CIPHER_SUITE_WEP40:
3427                key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
3428                memcpy(u.cmd.common.key + 3, key->key, key->keylen);
3429                break;
3430        case WLAN_CIPHER_SUITE_GCMP_256:
3431                key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
3432                fallthrough;
3433        case WLAN_CIPHER_SUITE_GCMP:
3434                key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
3435                memcpy(u.cmd.common.key, key->key, key->keylen);
3436                if (api_ver >= 2)
3437                        pn = atomic64_read(&key->tx_pn);
3438                break;
3439        default:
3440                key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
3441                memcpy(u.cmd.common.key, key->key, key->keylen);
3442        }
3443
3444        if (mcast)
3445                key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3446        if (mfp)
3447                key_flags |= cpu_to_le16(STA_KEY_MFP);
3448
3449        u.cmd.common.key_offset = key_offset;
3450        u.cmd.common.key_flags = key_flags;
3451        u.cmd.common.sta_id = sta_id;
3452
3453        if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
3454                i = 0;
3455        else
3456                i = -1;
3457
3458        for (; i < IEEE80211_NUM_TIDS; i++) {
3459                struct ieee80211_key_seq seq = {};
3460                u8 _rx_pn[IEEE80211_MAX_PN_LEN] = {}, *rx_pn = _rx_pn;
3461                int rx_pn_len = 8;
3462                /* there's a hole at 2/3 in FW format depending on version */
3463                int hole = api_ver >= 3 ? 0 : 2;
3464
3465                ieee80211_get_key_rx_seq(key, i, &seq);
3466
3467                if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
3468                        rx_pn[0] = seq.tkip.iv16;
3469                        rx_pn[1] = seq.tkip.iv16 >> 8;
3470                        rx_pn[2 + hole] = seq.tkip.iv32;
3471                        rx_pn[3 + hole] = seq.tkip.iv32 >> 8;
3472                        rx_pn[4 + hole] = seq.tkip.iv32 >> 16;
3473                        rx_pn[5 + hole] = seq.tkip.iv32 >> 24;
3474                } else if (key_flags & cpu_to_le16(STA_KEY_FLG_EXT)) {
3475                        rx_pn = seq.hw.seq;
3476                        rx_pn_len = seq.hw.seq_len;
3477                } else {
3478                        rx_pn[0] = seq.ccmp.pn[0];
3479                        rx_pn[1] = seq.ccmp.pn[1];
3480                        rx_pn[2 + hole] = seq.ccmp.pn[2];
3481                        rx_pn[3 + hole] = seq.ccmp.pn[3];
3482                        rx_pn[4 + hole] = seq.ccmp.pn[4];
3483                        rx_pn[5 + hole] = seq.ccmp.pn[5];
3484                }
3485
3486                if (iwl_mvm_pn_cmp(rx_pn, (u8 *)&u.cmd.common.rx_secur_seq_cnt,
3487                                   rx_pn_len) > 0)
3488                        memcpy(&u.cmd.common.rx_secur_seq_cnt, rx_pn,
3489                               rx_pn_len);
3490        }
3491
3492        if (api_ver >= 2) {
3493                u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
3494                size = sizeof(u.cmd);
3495        } else {
3496                size = sizeof(u.cmd_v1);
3497        }
3498
3499        status = ADD_STA_SUCCESS;
3500        if (cmd_flags & CMD_ASYNC)
3501                ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
3502                                           &u.cmd);
3503        else
3504                ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
3505                                                  &u.cmd, &status);
3506
3507        switch (status) {
3508        case ADD_STA_SUCCESS:
3509                IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
3510                break;
3511        default:
3512                ret = -EIO;
3513                IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
3514                break;
3515        }
3516
3517        return ret;
3518}
3519
3520static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
3521                                 struct ieee80211_key_conf *keyconf,
3522                                 u8 sta_id, bool remove_key)
3523{
3524        struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
3525
3526        /* verify the key details match the required command's expectations */
3527        if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
3528                    (keyconf->keyidx != 4 && keyconf->keyidx != 5 &&
3529                     keyconf->keyidx != 6 && keyconf->keyidx != 7) ||
3530                    (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3531                     keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3532                     keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3533                return -EINVAL;
3534
3535        if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3536                    keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
3537                return -EINVAL;
3538
3539        igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3540        igtk_cmd.sta_id = cpu_to_le32(sta_id);
3541
3542        if (remove_key) {
3543                /* This is a valid situation for IGTK */
3544                if (sta_id == IWL_MVM_INVALID_STA)
3545                        return 0;
3546
3547                igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3548        } else {
3549                struct ieee80211_key_seq seq;
3550                const u8 *pn;
3551
3552                switch (keyconf->cipher) {
3553                case WLAN_CIPHER_SUITE_AES_CMAC:
3554                        igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3555                        break;
3556                case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3557                case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3558                        igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3559                        break;
3560                default:
3561                        return -EINVAL;
3562                }
3563
3564                memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3565                if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3566                        igtk_cmd.ctrl_flags |=
3567                                cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
3568                ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3569                pn = seq.aes_cmac.pn;
3570                igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3571                                                       ((u64) pn[4] << 8) |
3572                                                       ((u64) pn[3] << 16) |
3573                                                       ((u64) pn[2] << 24) |
3574                                                       ((u64) pn[1] << 32) |
3575                                                       ((u64) pn[0] << 40));
3576        }
3577
3578        IWL_DEBUG_INFO(mvm, "%s %sIGTK (%d) for sta %u\n",
3579                       remove_key ? "removing" : "installing",
3580                       keyconf->keyidx >= 6 ? "B" : "",
3581                       keyconf->keyidx, igtk_cmd.sta_id);
3582
3583        if (!iwl_mvm_has_new_rx_api(mvm)) {
3584                struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3585                        .ctrl_flags = igtk_cmd.ctrl_flags,
3586                        .key_id = igtk_cmd.key_id,
3587                        .sta_id = igtk_cmd.sta_id,
3588                        .receive_seq_cnt = igtk_cmd.receive_seq_cnt
3589                };
3590
3591                memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3592                       ARRAY_SIZE(igtk_cmd_v1.igtk));
3593                return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3594                                            sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3595        }
3596        return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3597                                    sizeof(igtk_cmd), &igtk_cmd);
3598}
3599
3600
3601static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3602                                       struct ieee80211_vif *vif,
3603                                       struct ieee80211_sta *sta)
3604{
3605        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3606
3607        if (sta)
3608                return sta->addr;
3609
3610        if (vif->type == NL80211_IFTYPE_STATION &&
3611            mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3612                u8 sta_id = mvmvif->ap_sta_id;
3613                sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3614                                                lockdep_is_held(&mvm->mutex));
3615                return sta->addr;
3616        }
3617
3618
3619        return NULL;
3620}
3621
3622static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3623                                 struct ieee80211_vif *vif,
3624                                 struct ieee80211_sta *sta,
3625                                 struct ieee80211_key_conf *keyconf,
3626                                 u8 key_offset,
3627                                 bool mcast)
3628{
3629        const u8 *addr;
3630        struct ieee80211_key_seq seq;
3631        u16 p1k[5];
3632        u32 sta_id;
3633        bool mfp = false;
3634
3635        if (sta) {
3636                struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3637
3638                sta_id = mvm_sta->sta_id;
3639                mfp = sta->mfp;
3640        } else if (vif->type == NL80211_IFTYPE_AP &&
3641                   !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3642                struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3643
3644                sta_id = mvmvif->mcast_sta.sta_id;
3645        } else {
3646                IWL_ERR(mvm, "Failed to find station id\n");
3647                return -EINVAL;
3648        }
3649
3650        if (keyconf->cipher == WLAN_CIPHER_SUITE_TKIP) {
3651                addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3652                /* get phase 1 key from mac80211 */
3653                ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3654                ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
3655
3656                return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3657                                            seq.tkip.iv32, p1k, 0, key_offset,
3658                                            mfp);
3659        }
3660
3661        return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3662                                    0, NULL, 0, key_offset, mfp);
3663}
3664
3665int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3666                        struct ieee80211_vif *vif,
3667                        struct ieee80211_sta *sta,
3668                        struct ieee80211_key_conf *keyconf,
3669                        u8 key_offset)
3670{
3671        bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3672        struct iwl_mvm_sta *mvm_sta;
3673        u8 sta_id = IWL_MVM_INVALID_STA;
3674        int ret;
3675        static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
3676
3677        lockdep_assert_held(&mvm->mutex);
3678
3679        if (vif->type != NL80211_IFTYPE_AP ||
3680            keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3681                /* Get the station id from the mvm local station table */
3682                mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3683                if (!mvm_sta) {
3684                        IWL_ERR(mvm, "Failed to find station\n");
3685                        return -EINVAL;
3686                }
3687                sta_id = mvm_sta->sta_id;
3688
3689                /*
3690                 * It is possible that the 'sta' parameter is NULL, and thus
3691                 * there is a need to retrieve the sta from the local station
3692                 * table.
3693                 */
3694                if (!sta) {
3695                        sta = rcu_dereference_protected(
3696                                mvm->fw_id_to_mac_id[sta_id],
3697                                lockdep_is_held(&mvm->mutex));
3698                        if (IS_ERR_OR_NULL(sta)) {
3699                                IWL_ERR(mvm, "Invalid station id\n");
3700                                return -EINVAL;
3701                        }
3702                }
3703
3704                if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3705                        return -EINVAL;
3706        } else {
3707                struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3708
3709                sta_id = mvmvif->mcast_sta.sta_id;
3710        }
3711
3712        if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3713            keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3714            keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3715                ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3716                goto end;
3717        }
3718
3719        /* If the key_offset is not pre-assigned, we need to find a
3720         * new offset to use.  In normal cases, the offset is not
3721         * pre-assigned, but during HW_RESTART we want to reuse the
3722         * same indices, so we pass them when this function is called.
3723         *
3724         * In D3 entry, we need to hardcoded the indices (because the
3725         * firmware hardcodes the PTK offset to 0).  In this case, we
3726         * need to make sure we don't overwrite the hw_key_idx in the
3727         * keyconf structure, because otherwise we cannot configure
3728         * the original ones back when resuming.
3729         */
3730        if (key_offset == STA_KEY_IDX_INVALID) {
3731                key_offset  = iwl_mvm_set_fw_key_idx(mvm);
3732                if (key_offset == STA_KEY_IDX_INVALID)
3733                        return -ENOSPC;
3734                keyconf->hw_key_idx = key_offset;
3735        }
3736
3737        ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
3738        if (ret)
3739                goto end;
3740
3741        /*
3742         * For WEP, the same key is used for multicast and unicast. Upload it
3743         * again, using the same key offset, and now pointing the other one
3744         * to the same key slot (offset).
3745         * If this fails, remove the original as well.
3746         */
3747        if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3748             keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3749            sta) {
3750                ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3751                                            key_offset, !mcast);
3752                if (ret) {
3753                        __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3754                        goto end;
3755                }
3756        }
3757
3758        __set_bit(key_offset, mvm->fw_key_table);
3759
3760end:
3761        IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3762                      keyconf->cipher, keyconf->keylen, keyconf->keyidx,
3763                      sta ? sta->addr : zero_addr, ret);
3764        return ret;
3765}
3766
3767int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3768                           struct ieee80211_vif *vif,
3769                           struct ieee80211_sta *sta,
3770                           struct ieee80211_key_conf *keyconf)
3771{
3772        bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3773        struct iwl_mvm_sta *mvm_sta;
3774        u8 sta_id = IWL_MVM_INVALID_STA;
3775        int ret, i;
3776
3777        lockdep_assert_held(&mvm->mutex);
3778
3779        /* Get the station from the mvm local station table */
3780        mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3781        if (mvm_sta)
3782                sta_id = mvm_sta->sta_id;
3783        else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3784                sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
3785
3786
3787        IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3788                      keyconf->keyidx, sta_id);
3789
3790        if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3791            keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3792            keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3793                return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3794
3795        if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3796                IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3797                        keyconf->hw_key_idx);
3798                return -ENOENT;
3799        }
3800
3801        /* track which key was deleted last */
3802        for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3803                if (mvm->fw_key_deleted[i] < U8_MAX)
3804                        mvm->fw_key_deleted[i]++;
3805        }
3806        mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3807
3808        if (sta && !mvm_sta) {
3809                IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3810                return 0;
3811        }
3812
3813        ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3814        if (ret)
3815                return ret;
3816
3817        /* delete WEP key twice to get rid of (now useless) offset */
3818        if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3819            keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3820                ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3821
3822        return ret;
3823}
3824
3825void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3826                             struct ieee80211_vif *vif,
3827                             struct ieee80211_key_conf *keyconf,
3828                             struct ieee80211_sta *sta, u32 iv32,
3829                             u16 *phase1key)
3830{
3831        struct iwl_mvm_sta *mvm_sta;
3832        bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3833        bool mfp = sta ? sta->mfp : false;
3834
3835        rcu_read_lock();
3836
3837        mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3838        if (WARN_ON_ONCE(!mvm_sta))
3839                goto unlock;
3840        iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
3841                             iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
3842                             mfp);
3843
3844 unlock:
3845        rcu_read_unlock();
3846}
3847
3848void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3849                                struct ieee80211_sta *sta)
3850{
3851        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3852        struct iwl_mvm_add_sta_cmd cmd = {
3853                .add_modify = STA_MODE_MODIFY,
3854                .sta_id = mvmsta->sta_id,
3855                .station_flags_msk = cpu_to_le32(STA_FLG_PS),
3856                .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3857        };
3858        int ret;
3859
3860        ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3861                                   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3862        if (ret)
3863                IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3864}
3865
3866void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3867                                       struct ieee80211_sta *sta,
3868                                       enum ieee80211_frame_release_type reason,
3869                                       u16 cnt, u16 tids, bool more_data,
3870                                       bool single_sta_queue)
3871{
3872        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3873        struct iwl_mvm_add_sta_cmd cmd = {
3874                .add_modify = STA_MODE_MODIFY,
3875                .sta_id = mvmsta->sta_id,
3876                .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3877                .sleep_tx_count = cpu_to_le16(cnt),
3878                .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3879        };
3880        int tid, ret;
3881        unsigned long _tids = tids;
3882
3883        /* convert TIDs to ACs - we don't support TSPEC so that's OK
3884         * Note that this field is reserved and unused by firmware not
3885         * supporting GO uAPSD, so it's safe to always do this.
3886         */
3887        for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3888                cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3889
3890        /* If we're releasing frames from aggregation or dqa queues then check
3891         * if all the queues that we're releasing frames from, combined, have:
3892         *  - more frames than the service period, in which case more_data
3893         *    needs to be set
3894         *  - fewer than 'cnt' frames, in which case we need to adjust the
3895         *    firmware command (but do that unconditionally)
3896         */
3897        if (single_sta_queue) {
3898                int remaining = cnt;
3899                int sleep_tx_count;
3900
3901                spin_lock_bh(&mvmsta->lock);
3902                for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3903                        struct iwl_mvm_tid_data *tid_data;
3904                        u16 n_queued;
3905
3906                        tid_data = &mvmsta->tid_data[tid];
3907
3908                        n_queued = iwl_mvm_tid_queued(mvm, tid_data);
3909                        if (n_queued > remaining) {
3910                                more_data = true;
3911                                remaining = 0;
3912                                break;
3913                        }
3914                        remaining -= n_queued;
3915                }
3916                sleep_tx_count = cnt - remaining;
3917                if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3918                        mvmsta->sleep_tx_count = sleep_tx_count;
3919                spin_unlock_bh(&mvmsta->lock);
3920
3921                cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
3922                if (WARN_ON(cnt - remaining == 0)) {
3923                        ieee80211_sta_eosp(sta);
3924                        return;
3925                }
3926        }
3927
3928        /* Note: this is ignored by firmware not supporting GO uAPSD */
3929        if (more_data)
3930                cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
3931
3932        if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3933                mvmsta->next_status_eosp = true;
3934                cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
3935        } else {
3936                cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
3937        }
3938
3939        /* block the Tx queues until the FW updated the sleep Tx count */
3940        iwl_trans_block_txq_ptrs(mvm->trans, true);
3941
3942        ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3943                                   CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
3944                                   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3945        if (ret)
3946                IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3947}
3948
3949void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3950                           struct iwl_rx_cmd_buffer *rxb)
3951{
3952        struct iwl_rx_packet *pkt = rxb_addr(rxb);
3953        struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3954        struct ieee80211_sta *sta;
3955        u32 sta_id = le32_to_cpu(notif->sta_id);
3956
3957        if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations))
3958                return;
3959
3960        rcu_read_lock();
3961        sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3962        if (!IS_ERR_OR_NULL(sta))
3963                ieee80211_sta_eosp(sta);
3964        rcu_read_unlock();
3965}
3966
3967void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3968                                   struct iwl_mvm_sta *mvmsta, bool disable)
3969{
3970        struct iwl_mvm_add_sta_cmd cmd = {
3971                .add_modify = STA_MODE_MODIFY,
3972                .sta_id = mvmsta->sta_id,
3973                .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3974                .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3975                .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3976        };
3977        int ret;
3978
3979        ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3980                                   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3981        if (ret)
3982                IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3983}
3984
3985void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3986                                      struct ieee80211_sta *sta,
3987                                      bool disable)
3988{
3989        struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3990
3991        spin_lock_bh(&mvm_sta->lock);
3992
3993        if (mvm_sta->disable_tx == disable) {
3994                spin_unlock_bh(&mvm_sta->lock);
3995                return;
3996        }
3997
3998        mvm_sta->disable_tx = disable;
3999
4000        /*
4001         * If sta PS state is handled by mac80211, tell it to start/stop
4002         * queuing tx for this station.
4003         */
4004        if (!ieee80211_hw_check(mvm->hw, AP_LINK_PS))
4005                ieee80211_sta_block_awake(mvm->hw, sta, disable);
4006
4007        iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
4008
4009        spin_unlock_bh(&mvm_sta->lock);
4010}
4011
4012static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
4013                                              struct iwl_mvm_vif *mvmvif,
4014                                              struct iwl_mvm_int_sta *sta,
4015                                              bool disable)
4016{
4017        u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
4018        struct iwl_mvm_add_sta_cmd cmd = {
4019                .add_modify = STA_MODE_MODIFY,
4020                .sta_id = sta->sta_id,
4021                .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
4022                .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
4023                .mac_id_n_color = cpu_to_le32(id),
4024        };
4025        int ret;
4026
4027        ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
4028                                   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
4029        if (ret)
4030                IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
4031}
4032
4033void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
4034                                       struct iwl_mvm_vif *mvmvif,
4035                                       bool disable)
4036{
4037        struct ieee80211_sta *sta;
4038        struct iwl_mvm_sta *mvm_sta;
4039        int i;
4040
4041        rcu_read_lock();
4042
4043        /* Block/unblock all the stations of the given mvmvif */
4044        for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
4045                sta = rcu_dereference(mvm->fw_id_to_mac_id[i]);
4046                if (IS_ERR_OR_NULL(sta))
4047                        continue;
4048
4049                mvm_sta = iwl_mvm_sta_from_mac80211(sta);
4050                if (mvm_sta->mac_id_n_color !=
4051                    FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
4052                        continue;
4053
4054                iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
4055        }
4056
4057        rcu_read_unlock();
4058
4059        if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
4060                return;
4061
4062        /* Need to block/unblock also multicast station */
4063        if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
4064                iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
4065                                                  &mvmvif->mcast_sta, disable);
4066
4067        /*
4068         * Only unblock the broadcast station (FW blocks it for immediate
4069         * quiet, not the driver)
4070         */
4071        if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
4072                iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
4073                                                  &mvmvif->bcast_sta, disable);
4074}
4075
4076void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
4077{
4078        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4079        struct iwl_mvm_sta *mvmsta;
4080
4081        rcu_read_lock();
4082
4083        mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
4084
4085        if (mvmsta)
4086                iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
4087
4088        rcu_read_unlock();
4089}
4090
4091u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
4092{
4093        u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
4094
4095        /*
4096         * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
4097         * to align the wrap around of ssn so we compare relevant values.
4098         */
4099        if (mvm->trans->trans_cfg->gen2)
4100                sn &= 0xff;
4101
4102        return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
4103}
4104
4105int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
4106                         struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
4107                         u8 *key, u32 key_len)
4108{
4109        int ret;
4110        u16 queue;
4111        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4112        struct ieee80211_key_conf *keyconf;
4113
4114        ret = iwl_mvm_allocate_int_sta(mvm, sta, 0,
4115                                       NL80211_IFTYPE_UNSPECIFIED,
4116                                       IWL_STA_LINK);
4117        if (ret)
4118                return ret;
4119
4120        ret = iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
4121                                             addr, sta, &queue,
4122                                             IWL_MVM_TX_FIFO_BE);
4123        if (ret)
4124                goto out;
4125
4126        keyconf = kzalloc(sizeof(*keyconf) + key_len, GFP_KERNEL);
4127        if (!keyconf) {
4128                ret = -ENOBUFS;
4129                goto out;
4130        }
4131
4132        keyconf->cipher = cipher;
4133        memcpy(keyconf->key, key, key_len);
4134        keyconf->keylen = key_len;
4135
4136        ret = iwl_mvm_send_sta_key(mvm, sta->sta_id, keyconf, false,
4137                                   0, NULL, 0, 0, true);
4138        kfree(keyconf);
4139        return 0;
4140out:
4141        iwl_mvm_dealloc_int_sta(mvm, sta);
4142        return ret;
4143}
4144
4145void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
4146                                   struct ieee80211_vif *vif,
4147                                   u32 mac_id)
4148{
4149        struct iwl_cancel_channel_switch_cmd cancel_channel_switch_cmd = {
4150                .mac_id = cpu_to_le32(mac_id),
4151        };
4152        int ret;
4153
4154        ret = iwl_mvm_send_cmd_pdu(mvm,
4155                                   WIDE_ID(MAC_CONF_GROUP, CANCEL_CHANNEL_SWITCH_CMD),
4156                                   CMD_ASYNC,
4157                                   sizeof(cancel_channel_switch_cmd),
4158                                   &cancel_channel_switch_cmd);
4159        if (ret)
4160                IWL_ERR(mvm, "Failed to cancel the channel switch\n");
4161}
4162