1
2
3
4
5
6
7
8
9#include <linux/module.h>
10#include <linux/firmware.h>
11#include <linux/etherdevice.h>
12#include <linux/vmalloc.h>
13#include <linux/interrupt.h>
14#include <linux/irq.h>
15#include <linux/pm_runtime.h>
16#include <linux/pm_wakeirq.h>
17
18#include "wlcore.h"
19#include "debug.h"
20#include "wl12xx_80211.h"
21#include "io.h"
22#include "tx.h"
23#include "ps.h"
24#include "init.h"
25#include "debugfs.h"
26#include "testmode.h"
27#include "vendor_cmd.h"
28#include "scan.h"
29#include "hw_ops.h"
30#include "sysfs.h"
31
32#define WL1271_BOOT_RETRIES 3
33#define WL1271_WAKEUP_TIMEOUT 500
34
35static char *fwlog_param;
36static int fwlog_mem_blocks = -1;
37static int bug_on_recovery = -1;
38static int no_recovery = -1;
39
40static void __wl1271_op_remove_interface(struct wl1271 *wl,
41 struct ieee80211_vif *vif,
42 bool reset_tx_queues);
43static void wlcore_op_stop_locked(struct wl1271 *wl);
44static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
45
46static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
47{
48 int ret;
49
50 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
51 return -EINVAL;
52
53 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
54 return 0;
55
56 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
57 return 0;
58
59 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
60 if (ret < 0)
61 return ret;
62
63 wl1271_info("Association completed.");
64 return 0;
65}
66
67static void wl1271_reg_notify(struct wiphy *wiphy,
68 struct regulatory_request *request)
69{
70 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
71 struct wl1271 *wl = hw->priv;
72
73
74 if (request)
75 wl->dfs_region = request->dfs_region;
76
77 wlcore_regdomain_config(wl);
78}
79
80static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
81 bool enable)
82{
83 int ret = 0;
84
85
86 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
87 if (ret < 0)
88 goto out;
89
90 if (enable)
91 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
92 else
93 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
94out:
95 return ret;
96}
97
98
99
100
101
102int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
103{
104 int ret = 0;
105 int period = wl->conf.rx_streaming.interval;
106
107
108 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
109 goto out;
110
111
112 if (period &&
113 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
114 (wl->conf.rx_streaming.always ||
115 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
116 ret = wl1271_set_rx_streaming(wl, wlvif, true);
117 else {
118 ret = wl1271_set_rx_streaming(wl, wlvif, false);
119
120 del_timer_sync(&wlvif->rx_streaming_timer);
121 }
122out:
123 return ret;
124}
125
126static void wl1271_rx_streaming_enable_work(struct work_struct *work)
127{
128 int ret;
129 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
130 rx_streaming_enable_work);
131 struct wl1271 *wl = wlvif->wl;
132
133 mutex_lock(&wl->mutex);
134
135 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
136 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
137 (!wl->conf.rx_streaming.always &&
138 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
139 goto out;
140
141 if (!wl->conf.rx_streaming.interval)
142 goto out;
143
144 ret = pm_runtime_get_sync(wl->dev);
145 if (ret < 0) {
146 pm_runtime_put_noidle(wl->dev);
147 goto out;
148 }
149
150 ret = wl1271_set_rx_streaming(wl, wlvif, true);
151 if (ret < 0)
152 goto out_sleep;
153
154
155 mod_timer(&wlvif->rx_streaming_timer,
156 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
157
158out_sleep:
159 pm_runtime_mark_last_busy(wl->dev);
160 pm_runtime_put_autosuspend(wl->dev);
161out:
162 mutex_unlock(&wl->mutex);
163}
164
165static void wl1271_rx_streaming_disable_work(struct work_struct *work)
166{
167 int ret;
168 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
169 rx_streaming_disable_work);
170 struct wl1271 *wl = wlvif->wl;
171
172 mutex_lock(&wl->mutex);
173
174 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
175 goto out;
176
177 ret = pm_runtime_get_sync(wl->dev);
178 if (ret < 0) {
179 pm_runtime_put_noidle(wl->dev);
180 goto out;
181 }
182
183 ret = wl1271_set_rx_streaming(wl, wlvif, false);
184 if (ret)
185 goto out_sleep;
186
187out_sleep:
188 pm_runtime_mark_last_busy(wl->dev);
189 pm_runtime_put_autosuspend(wl->dev);
190out:
191 mutex_unlock(&wl->mutex);
192}
193
194static void wl1271_rx_streaming_timer(struct timer_list *t)
195{
196 struct wl12xx_vif *wlvif = from_timer(wlvif, t, rx_streaming_timer);
197 struct wl1271 *wl = wlvif->wl;
198 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
199}
200
201
202void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
203{
204
205 if (wl->tx_allocated_blocks == 0)
206 return;
207
208 cancel_delayed_work(&wl->tx_watchdog_work);
209 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
210 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
211}
212
213static void wlcore_rc_update_work(struct work_struct *work)
214{
215 int ret;
216 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
217 rc_update_work);
218 struct wl1271 *wl = wlvif->wl;
219 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
220
221 mutex_lock(&wl->mutex);
222
223 if (unlikely(wl->state != WLCORE_STATE_ON))
224 goto out;
225
226 ret = pm_runtime_get_sync(wl->dev);
227 if (ret < 0) {
228 pm_runtime_put_noidle(wl->dev);
229 goto out;
230 }
231
232 if (ieee80211_vif_is_mesh(vif)) {
233 ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
234 true, wlvif->sta.hlid);
235 if (ret < 0)
236 goto out_sleep;
237 } else {
238 wlcore_hw_sta_rc_update(wl, wlvif);
239 }
240
241out_sleep:
242 pm_runtime_mark_last_busy(wl->dev);
243 pm_runtime_put_autosuspend(wl->dev);
244out:
245 mutex_unlock(&wl->mutex);
246}
247
248static void wl12xx_tx_watchdog_work(struct work_struct *work)
249{
250 struct delayed_work *dwork;
251 struct wl1271 *wl;
252
253 dwork = to_delayed_work(work);
254 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
255
256 mutex_lock(&wl->mutex);
257
258 if (unlikely(wl->state != WLCORE_STATE_ON))
259 goto out;
260
261
262 if (unlikely(wl->tx_allocated_blocks == 0))
263 goto out;
264
265
266
267
268
269 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
270 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
271 wl->conf.tx.tx_watchdog_timeout);
272 wl12xx_rearm_tx_watchdog_locked(wl);
273 goto out;
274 }
275
276
277
278
279
280 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
281 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
282 wl->conf.tx.tx_watchdog_timeout);
283 wl12xx_rearm_tx_watchdog_locked(wl);
284 goto out;
285 }
286
287
288
289
290
291
292
293 if (wl->active_sta_count) {
294 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
295 " %d stations",
296 wl->conf.tx.tx_watchdog_timeout,
297 wl->active_sta_count);
298 wl12xx_rearm_tx_watchdog_locked(wl);
299 goto out;
300 }
301
302 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
303 wl->conf.tx.tx_watchdog_timeout);
304 wl12xx_queue_recovery_work(wl);
305
306out:
307 mutex_unlock(&wl->mutex);
308}
309
310static void wlcore_adjust_conf(struct wl1271 *wl)
311{
312
313 if (fwlog_param) {
314 if (!strcmp(fwlog_param, "continuous")) {
315 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
316 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
317 } else if (!strcmp(fwlog_param, "dbgpins")) {
318 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
319 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
320 } else if (!strcmp(fwlog_param, "disable")) {
321 wl->conf.fwlog.mem_blocks = 0;
322 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
323 } else {
324 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
325 }
326 }
327
328 if (bug_on_recovery != -1)
329 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
330
331 if (no_recovery != -1)
332 wl->conf.recovery.no_recovery = (u8) no_recovery;
333}
334
335static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
336 struct wl12xx_vif *wlvif,
337 u8 hlid, u8 tx_pkts)
338{
339 bool fw_ps;
340
341 fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
342
343
344
345
346
347 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
348 wl12xx_ps_link_end(wl, wlvif, hlid);
349
350
351
352
353
354
355
356
357
358
359 else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
360 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
361 wl12xx_ps_link_start(wl, wlvif, hlid, true);
362}
363
364static void wl12xx_irq_update_links_status(struct wl1271 *wl,
365 struct wl12xx_vif *wlvif,
366 struct wl_fw_status *status)
367{
368 unsigned long cur_fw_ps_map;
369 u8 hlid;
370
371 cur_fw_ps_map = status->link_ps_bitmap;
372 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
373 wl1271_debug(DEBUG_PSM,
374 "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
375 wl->ap_fw_ps_map, cur_fw_ps_map,
376 wl->ap_fw_ps_map ^ cur_fw_ps_map);
377
378 wl->ap_fw_ps_map = cur_fw_ps_map;
379 }
380
381 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
382 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
383 wl->links[hlid].allocated_pkts);
384}
385
386static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
387{
388 struct wl12xx_vif *wlvif;
389 u32 old_tx_blk_count = wl->tx_blocks_available;
390 int avail, freed_blocks;
391 int i;
392 int ret;
393 struct wl1271_link *lnk;
394
395 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
396 wl->raw_fw_status,
397 wl->fw_status_len, false);
398 if (ret < 0)
399 return ret;
400
401 wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
402
403 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
404 "drv_rx_counter = %d, tx_results_counter = %d)",
405 status->intr,
406 status->fw_rx_counter,
407 status->drv_rx_counter,
408 status->tx_results_counter);
409
410 for (i = 0; i < NUM_TX_QUEUES; i++) {
411
412 wl->tx_allocated_pkts[i] -=
413 (status->counters.tx_released_pkts[i] -
414 wl->tx_pkts_freed[i]) & 0xff;
415
416 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
417 }
418
419
420 for_each_set_bit(i, wl->links_map, wl->num_links) {
421 u8 diff;
422 lnk = &wl->links[i];
423
424
425 diff = (status->counters.tx_lnk_free_pkts[i] -
426 lnk->prev_freed_pkts) & 0xff;
427
428 if (diff == 0)
429 continue;
430
431 lnk->allocated_pkts -= diff;
432 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
433
434
435 lnk->total_freed_pkts += diff;
436 }
437
438
439 if (likely(wl->tx_blocks_freed <= status->total_released_blks))
440 freed_blocks = status->total_released_blks -
441 wl->tx_blocks_freed;
442 else
443 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
444 status->total_released_blks;
445
446 wl->tx_blocks_freed = status->total_released_blks;
447
448 wl->tx_allocated_blocks -= freed_blocks;
449
450
451
452
453
454
455 if (freed_blocks) {
456 if (wl->tx_allocated_blocks)
457 wl12xx_rearm_tx_watchdog_locked(wl);
458 else
459 cancel_delayed_work(&wl->tx_watchdog_work);
460 }
461
462 avail = status->tx_total - wl->tx_allocated_blocks;
463
464
465
466
467
468
469
470
471
472 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
473 avail);
474
475
476 if (wl->tx_blocks_available > old_tx_blk_count)
477 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
478
479
480 wl12xx_for_each_wlvif_ap(wl, wlvif) {
481 wl12xx_irq_update_links_status(wl, wlvif, status);
482 }
483
484
485 wl->time_offset = (ktime_get_boottime_ns() >> 10) -
486 (s64)(status->fw_localtime);
487
488 wl->fw_fast_lnk_map = status->link_fast_bitmap;
489
490 return 0;
491}
492
493static void wl1271_flush_deferred_work(struct wl1271 *wl)
494{
495 struct sk_buff *skb;
496
497
498 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
499 ieee80211_rx_ni(wl->hw, skb);
500
501
502 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
503 ieee80211_tx_status_ni(wl->hw, skb);
504}
505
506static void wl1271_netstack_work(struct work_struct *work)
507{
508 struct wl1271 *wl =
509 container_of(work, struct wl1271, netstack_work);
510
511 do {
512 wl1271_flush_deferred_work(wl);
513 } while (skb_queue_len(&wl->deferred_rx_queue));
514}
515
516#define WL1271_IRQ_MAX_LOOPS 256
517
518static int wlcore_irq_locked(struct wl1271 *wl)
519{
520 int ret = 0;
521 u32 intr;
522 int loopcount = WL1271_IRQ_MAX_LOOPS;
523 bool run_tx_queue = true;
524 bool done = false;
525 unsigned int defer_count;
526 unsigned long flags;
527
528
529
530
531
532 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
533 loopcount = 1;
534
535 wl1271_debug(DEBUG_IRQ, "IRQ work");
536
537 if (unlikely(wl->state != WLCORE_STATE_ON))
538 goto out;
539
540 ret = pm_runtime_get_sync(wl->dev);
541 if (ret < 0) {
542 pm_runtime_put_noidle(wl->dev);
543 goto out;
544 }
545
546 while (!done && loopcount--) {
547 smp_mb__after_atomic();
548
549 ret = wlcore_fw_status(wl, wl->fw_status);
550 if (ret < 0)
551 goto err_ret;
552
553 wlcore_hw_tx_immediate_compl(wl);
554
555 intr = wl->fw_status->intr;
556 intr &= WLCORE_ALL_INTR_MASK;
557 if (!intr) {
558 done = true;
559 continue;
560 }
561
562 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
563 wl1271_error("HW watchdog interrupt received! starting recovery.");
564 wl->watchdog_recovery = true;
565 ret = -EIO;
566
567
568 goto err_ret;
569 }
570
571 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
572 wl1271_error("SW watchdog interrupt received! "
573 "starting recovery.");
574 wl->watchdog_recovery = true;
575 ret = -EIO;
576
577
578 goto err_ret;
579 }
580
581 if (likely(intr & WL1271_ACX_INTR_DATA)) {
582 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
583
584 ret = wlcore_rx(wl, wl->fw_status);
585 if (ret < 0)
586 goto err_ret;
587
588
589 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) {
590 if (spin_trylock_irqsave(&wl->wl_lock, flags)) {
591 if (!wl1271_tx_total_queue_count(wl))
592 run_tx_queue = false;
593 spin_unlock_irqrestore(&wl->wl_lock, flags);
594 }
595
596
597
598
599
600 if (run_tx_queue) {
601 ret = wlcore_tx_work_locked(wl);
602 if (ret < 0)
603 goto err_ret;
604 }
605 }
606
607
608 ret = wlcore_hw_tx_delayed_compl(wl);
609 if (ret < 0)
610 goto err_ret;
611
612
613 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
614 skb_queue_len(&wl->deferred_rx_queue);
615 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
616 wl1271_flush_deferred_work(wl);
617 }
618
619 if (intr & WL1271_ACX_INTR_EVENT_A) {
620 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
621 ret = wl1271_event_handle(wl, 0);
622 if (ret < 0)
623 goto err_ret;
624 }
625
626 if (intr & WL1271_ACX_INTR_EVENT_B) {
627 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
628 ret = wl1271_event_handle(wl, 1);
629 if (ret < 0)
630 goto err_ret;
631 }
632
633 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
634 wl1271_debug(DEBUG_IRQ,
635 "WL1271_ACX_INTR_INIT_COMPLETE");
636
637 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
638 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
639 }
640
641err_ret:
642 pm_runtime_mark_last_busy(wl->dev);
643 pm_runtime_put_autosuspend(wl->dev);
644
645out:
646 return ret;
647}
648
649static irqreturn_t wlcore_irq(int irq, void *cookie)
650{
651 int ret;
652 unsigned long flags;
653 struct wl1271 *wl = cookie;
654 bool queue_tx_work = true;
655
656 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
657
658
659 if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags)) {
660 spin_lock_irqsave(&wl->wl_lock, flags);
661 if (wl->elp_compl)
662 complete(wl->elp_compl);
663 spin_unlock_irqrestore(&wl->wl_lock, flags);
664 }
665
666 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
667
668 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
669 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
670 spin_lock_irqsave(&wl->wl_lock, flags);
671 disable_irq_nosync(wl->irq);
672 pm_wakeup_event(wl->dev, 0);
673 spin_unlock_irqrestore(&wl->wl_lock, flags);
674 goto out_handled;
675 }
676
677
678 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
679 cancel_work_sync(&wl->tx_work);
680
681 mutex_lock(&wl->mutex);
682
683 ret = wlcore_irq_locked(wl);
684 if (ret)
685 wl12xx_queue_recovery_work(wl);
686
687
688 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
689 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) {
690 if (spin_trylock_irqsave(&wl->wl_lock, flags)) {
691 if (!wl1271_tx_total_queue_count(wl))
692 queue_tx_work = false;
693 spin_unlock_irqrestore(&wl->wl_lock, flags);
694 }
695 if (queue_tx_work)
696 ieee80211_queue_work(wl->hw, &wl->tx_work);
697 }
698
699 mutex_unlock(&wl->mutex);
700
701out_handled:
702 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
703
704 return IRQ_HANDLED;
705}
706
707struct vif_counter_data {
708 u8 counter;
709
710 struct ieee80211_vif *cur_vif;
711 bool cur_vif_running;
712};
713
714static void wl12xx_vif_count_iter(void *data, u8 *mac,
715 struct ieee80211_vif *vif)
716{
717 struct vif_counter_data *counter = data;
718
719 counter->counter++;
720 if (counter->cur_vif == vif)
721 counter->cur_vif_running = true;
722}
723
724
725static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
726 struct ieee80211_vif *cur_vif,
727 struct vif_counter_data *data)
728{
729 memset(data, 0, sizeof(*data));
730 data->cur_vif = cur_vif;
731
732 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
733 wl12xx_vif_count_iter, data);
734}
735
736static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
737{
738 const struct firmware *fw;
739 const char *fw_name;
740 enum wl12xx_fw_type fw_type;
741 int ret;
742
743 if (plt) {
744 fw_type = WL12XX_FW_TYPE_PLT;
745 fw_name = wl->plt_fw_name;
746 } else {
747
748
749
750
751 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
752 fw_type = WL12XX_FW_TYPE_MULTI;
753 fw_name = wl->mr_fw_name;
754 } else {
755 fw_type = WL12XX_FW_TYPE_NORMAL;
756 fw_name = wl->sr_fw_name;
757 }
758 }
759
760 if (wl->fw_type == fw_type)
761 return 0;
762
763 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
764
765 ret = request_firmware(&fw, fw_name, wl->dev);
766
767 if (ret < 0) {
768 wl1271_error("could not get firmware %s: %d", fw_name, ret);
769 return ret;
770 }
771
772 if (fw->size % 4) {
773 wl1271_error("firmware size is not multiple of 32 bits: %zu",
774 fw->size);
775 ret = -EILSEQ;
776 goto out;
777 }
778
779 vfree(wl->fw);
780 wl->fw_type = WL12XX_FW_TYPE_NONE;
781 wl->fw_len = fw->size;
782 wl->fw = vmalloc(wl->fw_len);
783
784 if (!wl->fw) {
785 wl1271_error("could not allocate memory for the firmware");
786 ret = -ENOMEM;
787 goto out;
788 }
789
790 memcpy(wl->fw, fw->data, wl->fw_len);
791 ret = 0;
792 wl->fw_type = fw_type;
793out:
794 release_firmware(fw);
795
796 return ret;
797}
798
799void wl12xx_queue_recovery_work(struct wl1271 *wl)
800{
801
802 if (wl->state == WLCORE_STATE_ON) {
803 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
804 &wl->flags));
805
806 wl->state = WLCORE_STATE_RESTARTING;
807 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
808 ieee80211_queue_work(wl->hw, &wl->recovery_work);
809 }
810}
811
812size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
813{
814 size_t len;
815
816
817 len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
818
819
820 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
821 wl->fwlog_size += len;
822
823 return len;
824}
825
826static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
827{
828 u32 end_of_log = 0;
829 int error;
830
831 if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
832 return;
833
834 wl1271_info("Reading FW panic log");
835
836
837
838
839
840
841 error = pm_runtime_get_sync(wl->dev);
842 if (error < 0) {
843 pm_runtime_put_noidle(wl->dev);
844 return;
845 }
846 if (!wl->watchdog_recovery &&
847 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
848 wl12xx_cmd_stop_fwlog(wl);
849
850
851 do {
852 end_of_log = wlcore_event_fw_logger(wl);
853 if (end_of_log == 0) {
854 msleep(100);
855 end_of_log = wlcore_event_fw_logger(wl);
856 }
857 } while (end_of_log != 0);
858}
859
860static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
861 u8 hlid, struct ieee80211_sta *sta)
862{
863 struct wl1271_station *wl_sta;
864 u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
865
866 wl_sta = (void *)sta->drv_priv;
867 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
868
869
870
871
872
873 if (wlvif->encryption_type == KEY_GEM)
874 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
875
876 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
877 wl_sta->total_freed_pkts += sqn_recovery_padding;
878}
879
880static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
881 struct wl12xx_vif *wlvif,
882 u8 hlid, const u8 *addr)
883{
884 struct ieee80211_sta *sta;
885 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
886
887 if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
888 is_zero_ether_addr(addr)))
889 return;
890
891 rcu_read_lock();
892 sta = ieee80211_find_sta(vif, addr);
893 if (sta)
894 wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
895 rcu_read_unlock();
896}
897
898static void wlcore_print_recovery(struct wl1271 *wl)
899{
900 u32 pc = 0;
901 u32 hint_sts = 0;
902 int ret;
903
904 wl1271_info("Hardware recovery in progress. FW ver: %s",
905 wl->chip.fw_ver_str);
906
907
908 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
909 if (ret < 0)
910 return;
911
912 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
913 if (ret < 0)
914 return;
915
916 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
917 if (ret < 0)
918 return;
919
920 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
921 pc, hint_sts, ++wl->recovery_count);
922
923 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
924}
925
926
927static void wl1271_recovery_work(struct work_struct *work)
928{
929 struct wl1271 *wl =
930 container_of(work, struct wl1271, recovery_work);
931 struct wl12xx_vif *wlvif;
932 struct ieee80211_vif *vif;
933 int error;
934
935 mutex_lock(&wl->mutex);
936
937 if (wl->state == WLCORE_STATE_OFF || wl->plt)
938 goto out_unlock;
939
940 error = pm_runtime_get_sync(wl->dev);
941 if (error < 0) {
942 wl1271_warning("Enable for recovery failed");
943 pm_runtime_put_noidle(wl->dev);
944 }
945 wlcore_disable_interrupts_nosync(wl);
946
947 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
948 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
949 wl12xx_read_fwlog_panic(wl);
950 wlcore_print_recovery(wl);
951 }
952
953 BUG_ON(wl->conf.recovery.bug_on_recovery &&
954 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
955
956 clear_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
957
958 if (wl->conf.recovery.no_recovery) {
959 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
960 goto out_unlock;
961 }
962
963
964 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
965
966
967 while (!list_empty(&wl->wlvif_list)) {
968 wlvif = list_first_entry(&wl->wlvif_list,
969 struct wl12xx_vif, list);
970 vif = wl12xx_wlvif_to_vif(wlvif);
971
972 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
973 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
974 wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
975 vif->bss_conf.bssid);
976 }
977
978 __wl1271_op_remove_interface(wl, vif, false);
979 }
980
981 wlcore_op_stop_locked(wl);
982 pm_runtime_mark_last_busy(wl->dev);
983 pm_runtime_put_autosuspend(wl->dev);
984
985 ieee80211_restart_hw(wl->hw);
986
987
988
989
990
991 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
992
993out_unlock:
994 wl->watchdog_recovery = false;
995 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
996 mutex_unlock(&wl->mutex);
997}
998
999static int wlcore_fw_wakeup(struct wl1271 *wl)
1000{
1001 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1002}
1003
1004static int wl1271_setup(struct wl1271 *wl)
1005{
1006 wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1007 if (!wl->raw_fw_status)
1008 goto err;
1009
1010 wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1011 if (!wl->fw_status)
1012 goto err;
1013
1014 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1015 if (!wl->tx_res_if)
1016 goto err;
1017
1018 return 0;
1019err:
1020 kfree(wl->fw_status);
1021 kfree(wl->raw_fw_status);
1022 return -ENOMEM;
1023}
1024
1025static int wl12xx_set_power_on(struct wl1271 *wl)
1026{
1027 int ret;
1028
1029 msleep(WL1271_PRE_POWER_ON_SLEEP);
1030 ret = wl1271_power_on(wl);
1031 if (ret < 0)
1032 goto out;
1033 msleep(WL1271_POWER_ON_SLEEP);
1034 wl1271_io_reset(wl);
1035 wl1271_io_init(wl);
1036
1037 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1038 if (ret < 0)
1039 goto fail;
1040
1041
1042 ret = wlcore_fw_wakeup(wl);
1043 if (ret < 0)
1044 goto fail;
1045
1046out:
1047 return ret;
1048
1049fail:
1050 wl1271_power_off(wl);
1051 return ret;
1052}
1053
1054static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1055{
1056 int ret = 0;
1057
1058 ret = wl12xx_set_power_on(wl);
1059 if (ret < 0)
1060 goto out;
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073 if (!wl1271_set_block_size(wl))
1074 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1075
1076
1077
1078 ret = wl1271_setup(wl);
1079 if (ret < 0)
1080 goto out;
1081
1082 ret = wl12xx_fetch_firmware(wl, plt);
1083 if (ret < 0) {
1084 kfree(wl->fw_status);
1085 kfree(wl->raw_fw_status);
1086 kfree(wl->tx_res_if);
1087 }
1088
1089out:
1090 return ret;
1091}
1092
1093int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1094{
1095 int retries = WL1271_BOOT_RETRIES;
1096 struct wiphy *wiphy = wl->hw->wiphy;
1097
1098 static const char* const PLT_MODE[] = {
1099 "PLT_OFF",
1100 "PLT_ON",
1101 "PLT_FEM_DETECT",
1102 "PLT_CHIP_AWAKE"
1103 };
1104
1105 int ret;
1106
1107 mutex_lock(&wl->mutex);
1108
1109 wl1271_notice("power up");
1110
1111 if (wl->state != WLCORE_STATE_OFF) {
1112 wl1271_error("cannot go into PLT state because not "
1113 "in off state: %d", wl->state);
1114 ret = -EBUSY;
1115 goto out;
1116 }
1117
1118
1119 wl->plt = true;
1120 wl->plt_mode = plt_mode;
1121
1122 while (retries) {
1123 retries--;
1124 ret = wl12xx_chip_wakeup(wl, true);
1125 if (ret < 0)
1126 goto power_off;
1127
1128 if (plt_mode != PLT_CHIP_AWAKE) {
1129 ret = wl->ops->plt_init(wl);
1130 if (ret < 0)
1131 goto power_off;
1132 }
1133
1134 wl->state = WLCORE_STATE_ON;
1135 wl1271_notice("firmware booted in PLT mode %s (%s)",
1136 PLT_MODE[plt_mode],
1137 wl->chip.fw_ver_str);
1138
1139
1140 wiphy->hw_version = wl->chip.id;
1141 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1142 sizeof(wiphy->fw_version));
1143
1144 goto out;
1145
1146power_off:
1147 wl1271_power_off(wl);
1148 }
1149
1150 wl->plt = false;
1151 wl->plt_mode = PLT_OFF;
1152
1153 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1154 WL1271_BOOT_RETRIES);
1155out:
1156 mutex_unlock(&wl->mutex);
1157
1158 return ret;
1159}
1160
1161int wl1271_plt_stop(struct wl1271 *wl)
1162{
1163 int ret = 0;
1164
1165 wl1271_notice("power down");
1166
1167
1168
1169
1170
1171
1172 wlcore_disable_interrupts(wl);
1173 mutex_lock(&wl->mutex);
1174 if (!wl->plt) {
1175 mutex_unlock(&wl->mutex);
1176
1177
1178
1179
1180
1181
1182 wlcore_enable_interrupts(wl);
1183
1184 wl1271_error("cannot power down because not in PLT "
1185 "state: %d", wl->state);
1186 ret = -EBUSY;
1187 goto out;
1188 }
1189
1190 mutex_unlock(&wl->mutex);
1191
1192 wl1271_flush_deferred_work(wl);
1193 cancel_work_sync(&wl->netstack_work);
1194 cancel_work_sync(&wl->recovery_work);
1195 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1196
1197 mutex_lock(&wl->mutex);
1198 wl1271_power_off(wl);
1199 wl->flags = 0;
1200 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1201 wl->state = WLCORE_STATE_OFF;
1202 wl->plt = false;
1203 wl->plt_mode = PLT_OFF;
1204 wl->rx_counter = 0;
1205 mutex_unlock(&wl->mutex);
1206
1207out:
1208 return ret;
1209}
1210
1211static void wl1271_op_tx(struct ieee80211_hw *hw,
1212 struct ieee80211_tx_control *control,
1213 struct sk_buff *skb)
1214{
1215 struct wl1271 *wl = hw->priv;
1216 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1217 struct ieee80211_vif *vif = info->control.vif;
1218 struct wl12xx_vif *wlvif = NULL;
1219 unsigned long flags;
1220 int q, mapping;
1221 u8 hlid;
1222
1223 if (!vif) {
1224 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1225 ieee80211_free_txskb(hw, skb);
1226 return;
1227 }
1228
1229 wlvif = wl12xx_vif_to_data(vif);
1230 mapping = skb_get_queue_mapping(skb);
1231 q = wl1271_tx_get_queue(mapping);
1232
1233 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1234
1235 spin_lock_irqsave(&wl->wl_lock, flags);
1236
1237
1238
1239
1240
1241
1242 if (hlid == WL12XX_INVALID_LINK_ID ||
1243 (!test_bit(hlid, wlvif->links_map)) ||
1244 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1245 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1246 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1247 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1248 ieee80211_free_txskb(hw, skb);
1249 goto out;
1250 }
1251
1252 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1253 hlid, q, skb->len);
1254 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1255
1256 wl->tx_queue_count[q]++;
1257 wlvif->tx_queue_count[q]++;
1258
1259
1260
1261
1262
1263 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1264 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1265 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1266 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1267 wlcore_stop_queue_locked(wl, wlvif, q,
1268 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1269 }
1270
1271
1272
1273
1274
1275
1276 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1277 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1278 ieee80211_queue_work(wl->hw, &wl->tx_work);
1279
1280out:
1281 spin_unlock_irqrestore(&wl->wl_lock, flags);
1282}
1283
1284int wl1271_tx_dummy_packet(struct wl1271 *wl)
1285{
1286 unsigned long flags;
1287 int q;
1288
1289
1290 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1291 return 0;
1292
1293 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1294
1295 spin_lock_irqsave(&wl->wl_lock, flags);
1296 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1297 wl->tx_queue_count[q]++;
1298 spin_unlock_irqrestore(&wl->wl_lock, flags);
1299
1300
1301 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1302 return wlcore_tx_work_locked(wl);
1303
1304
1305
1306
1307
1308 return 0;
1309}
1310
1311
1312
1313
1314
1315
1316#define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1317
1318static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1319{
1320 struct sk_buff *skb;
1321 struct ieee80211_hdr_3addr *hdr;
1322 unsigned int dummy_packet_size;
1323
1324 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1325 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1326
1327 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1328 if (!skb) {
1329 wl1271_warning("Failed to allocate a dummy packet skb");
1330 return NULL;
1331 }
1332
1333 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1334
1335 hdr = skb_put_zero(skb, sizeof(*hdr));
1336 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1337 IEEE80211_STYPE_NULLFUNC |
1338 IEEE80211_FCTL_TODS);
1339
1340 skb_put_zero(skb, dummy_packet_size);
1341
1342
1343 skb->priority = WL1271_TID_MGMT;
1344
1345
1346 skb_set_queue_mapping(skb, 0);
1347 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1348
1349 return skb;
1350}
1351
1352
1353static int
1354wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1355{
1356 int num_fields = 0, in_field = 0, fields_size = 0;
1357 int i, pattern_len = 0;
1358
1359 if (!p->mask) {
1360 wl1271_warning("No mask in WoWLAN pattern");
1361 return -EINVAL;
1362 }
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373 for (i = 0; i < p->pattern_len; i++) {
1374 if (test_bit(i, (unsigned long *)p->mask)) {
1375 if (!in_field) {
1376 in_field = 1;
1377 pattern_len = 1;
1378 } else {
1379 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1380 num_fields++;
1381 fields_size += pattern_len +
1382 RX_FILTER_FIELD_OVERHEAD;
1383 pattern_len = 1;
1384 } else
1385 pattern_len++;
1386 }
1387 } else {
1388 if (in_field) {
1389 in_field = 0;
1390 fields_size += pattern_len +
1391 RX_FILTER_FIELD_OVERHEAD;
1392 num_fields++;
1393 }
1394 }
1395 }
1396
1397 if (in_field) {
1398 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1399 num_fields++;
1400 }
1401
1402 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1403 wl1271_warning("RX Filter too complex. Too many segments");
1404 return -EINVAL;
1405 }
1406
1407 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1408 wl1271_warning("RX filter pattern is too big");
1409 return -E2BIG;
1410 }
1411
1412 return 0;
1413}
1414
1415struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1416{
1417 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1418}
1419
1420void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1421{
1422 int i;
1423
1424 if (filter == NULL)
1425 return;
1426
1427 for (i = 0; i < filter->num_fields; i++)
1428 kfree(filter->fields[i].pattern);
1429
1430 kfree(filter);
1431}
1432
1433int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1434 u16 offset, u8 flags,
1435 const u8 *pattern, u8 len)
1436{
1437 struct wl12xx_rx_filter_field *field;
1438
1439 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1440 wl1271_warning("Max fields per RX filter. can't alloc another");
1441 return -EINVAL;
1442 }
1443
1444 field = &filter->fields[filter->num_fields];
1445
1446 field->pattern = kmemdup(pattern, len, GFP_KERNEL);
1447 if (!field->pattern) {
1448 wl1271_warning("Failed to allocate RX filter pattern");
1449 return -ENOMEM;
1450 }
1451
1452 filter->num_fields++;
1453
1454 field->offset = cpu_to_le16(offset);
1455 field->flags = flags;
1456 field->len = len;
1457
1458 return 0;
1459}
1460
1461int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1462{
1463 int i, fields_size = 0;
1464
1465 for (i = 0; i < filter->num_fields; i++)
1466 fields_size += filter->fields[i].len +
1467 sizeof(struct wl12xx_rx_filter_field) -
1468 sizeof(u8 *);
1469
1470 return fields_size;
1471}
1472
1473void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1474 u8 *buf)
1475{
1476 int i;
1477 struct wl12xx_rx_filter_field *field;
1478
1479 for (i = 0; i < filter->num_fields; i++) {
1480 field = (struct wl12xx_rx_filter_field *)buf;
1481
1482 field->offset = filter->fields[i].offset;
1483 field->flags = filter->fields[i].flags;
1484 field->len = filter->fields[i].len;
1485
1486 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1487 buf += sizeof(struct wl12xx_rx_filter_field) -
1488 sizeof(u8 *) + field->len;
1489 }
1490}
1491
1492
1493
1494
1495
1496static int
1497wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1498 struct wl12xx_rx_filter **f)
1499{
1500 int i, j, ret = 0;
1501 struct wl12xx_rx_filter *filter;
1502 u16 offset;
1503 u8 flags, len;
1504
1505 filter = wl1271_rx_filter_alloc();
1506 if (!filter) {
1507 wl1271_warning("Failed to alloc rx filter");
1508 ret = -ENOMEM;
1509 goto err;
1510 }
1511
1512 i = 0;
1513 while (i < p->pattern_len) {
1514 if (!test_bit(i, (unsigned long *)p->mask)) {
1515 i++;
1516 continue;
1517 }
1518
1519 for (j = i; j < p->pattern_len; j++) {
1520 if (!test_bit(j, (unsigned long *)p->mask))
1521 break;
1522
1523 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1524 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1525 break;
1526 }
1527
1528 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1529 offset = i;
1530 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1531 } else {
1532 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1533 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1534 }
1535
1536 len = j - i;
1537
1538 ret = wl1271_rx_filter_alloc_field(filter,
1539 offset,
1540 flags,
1541 &p->pattern[i], len);
1542 if (ret)
1543 goto err;
1544
1545 i = j;
1546 }
1547
1548 filter->action = FILTER_SIGNAL;
1549
1550 *f = filter;
1551 return 0;
1552
1553err:
1554 wl1271_rx_filter_free(filter);
1555 *f = NULL;
1556
1557 return ret;
1558}
1559
1560static int wl1271_configure_wowlan(struct wl1271 *wl,
1561 struct cfg80211_wowlan *wow)
1562{
1563 int i, ret;
1564
1565 if (!wow || wow->any || !wow->n_patterns) {
1566 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1567 FILTER_SIGNAL);
1568 if (ret)
1569 goto out;
1570
1571 ret = wl1271_rx_filter_clear_all(wl);
1572 if (ret)
1573 goto out;
1574
1575 return 0;
1576 }
1577
1578 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1579 return -EINVAL;
1580
1581
1582 for (i = 0; i < wow->n_patterns; i++) {
1583 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1584 if (ret) {
1585 wl1271_warning("Bad wowlan pattern %d", i);
1586 return ret;
1587 }
1588 }
1589
1590 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1591 if (ret)
1592 goto out;
1593
1594 ret = wl1271_rx_filter_clear_all(wl);
1595 if (ret)
1596 goto out;
1597
1598
1599 for (i = 0; i < wow->n_patterns; i++) {
1600 struct cfg80211_pkt_pattern *p;
1601 struct wl12xx_rx_filter *filter = NULL;
1602
1603 p = &wow->patterns[i];
1604
1605 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1606 if (ret) {
1607 wl1271_warning("Failed to create an RX filter from "
1608 "wowlan pattern %d", i);
1609 goto out;
1610 }
1611
1612 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1613
1614 wl1271_rx_filter_free(filter);
1615 if (ret)
1616 goto out;
1617 }
1618
1619 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1620
1621out:
1622 return ret;
1623}
1624
1625static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1626 struct wl12xx_vif *wlvif,
1627 struct cfg80211_wowlan *wow)
1628{
1629 int ret = 0;
1630
1631 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1632 goto out;
1633
1634 ret = wl1271_configure_wowlan(wl, wow);
1635 if (ret < 0)
1636 goto out;
1637
1638 if ((wl->conf.conn.suspend_wake_up_event ==
1639 wl->conf.conn.wake_up_event) &&
1640 (wl->conf.conn.suspend_listen_interval ==
1641 wl->conf.conn.listen_interval))
1642 goto out;
1643
1644 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1645 wl->conf.conn.suspend_wake_up_event,
1646 wl->conf.conn.suspend_listen_interval);
1647
1648 if (ret < 0)
1649 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1650out:
1651 return ret;
1652
1653}
1654
1655static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1656 struct wl12xx_vif *wlvif,
1657 struct cfg80211_wowlan *wow)
1658{
1659 int ret = 0;
1660
1661 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1662 goto out;
1663
1664 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1665 if (ret < 0)
1666 goto out;
1667
1668 ret = wl1271_configure_wowlan(wl, wow);
1669 if (ret < 0)
1670 goto out;
1671
1672out:
1673 return ret;
1674
1675}
1676
1677static int wl1271_configure_suspend(struct wl1271 *wl,
1678 struct wl12xx_vif *wlvif,
1679 struct cfg80211_wowlan *wow)
1680{
1681 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1682 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1683 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1684 return wl1271_configure_suspend_ap(wl, wlvif, wow);
1685 return 0;
1686}
1687
1688static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1689{
1690 int ret = 0;
1691 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1692 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1693
1694 if ((!is_ap) && (!is_sta))
1695 return;
1696
1697 if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1698 (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1699 return;
1700
1701 wl1271_configure_wowlan(wl, NULL);
1702
1703 if (is_sta) {
1704 if ((wl->conf.conn.suspend_wake_up_event ==
1705 wl->conf.conn.wake_up_event) &&
1706 (wl->conf.conn.suspend_listen_interval ==
1707 wl->conf.conn.listen_interval))
1708 return;
1709
1710 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1711 wl->conf.conn.wake_up_event,
1712 wl->conf.conn.listen_interval);
1713
1714 if (ret < 0)
1715 wl1271_error("resume: wake up conditions failed: %d",
1716 ret);
1717
1718 } else if (is_ap) {
1719 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1720 }
1721}
1722
1723static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
1724 struct cfg80211_wowlan *wow)
1725{
1726 struct wl1271 *wl = hw->priv;
1727 struct wl12xx_vif *wlvif;
1728 unsigned long flags;
1729 int ret;
1730
1731 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1732 WARN_ON(!wow);
1733
1734
1735 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1736 wl1271_warning("postponing suspend to perform recovery");
1737 return -EBUSY;
1738 }
1739
1740 wl1271_tx_flush(wl);
1741
1742 mutex_lock(&wl->mutex);
1743
1744 ret = pm_runtime_get_sync(wl->dev);
1745 if (ret < 0) {
1746 pm_runtime_put_noidle(wl->dev);
1747 mutex_unlock(&wl->mutex);
1748 return ret;
1749 }
1750
1751 wl->wow_enabled = true;
1752 wl12xx_for_each_wlvif(wl, wlvif) {
1753 if (wlcore_is_p2p_mgmt(wlvif))
1754 continue;
1755
1756 ret = wl1271_configure_suspend(wl, wlvif, wow);
1757 if (ret < 0) {
1758 goto out_sleep;
1759 }
1760 }
1761
1762
1763 ret = wlcore_hw_interrupt_notify(wl, false);
1764 if (ret < 0)
1765 goto out_sleep;
1766
1767
1768 ret = wlcore_hw_rx_ba_filter(wl,
1769 !!wl->conf.conn.suspend_rx_ba_activity);
1770 if (ret < 0)
1771 goto out_sleep;
1772
1773out_sleep:
1774 pm_runtime_put_noidle(wl->dev);
1775 mutex_unlock(&wl->mutex);
1776
1777 if (ret < 0) {
1778 wl1271_warning("couldn't prepare device to suspend");
1779 return ret;
1780 }
1781
1782
1783 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1784
1785 flush_work(&wl->tx_work);
1786
1787
1788
1789
1790
1791 cancel_delayed_work(&wl->tx_watchdog_work);
1792
1793
1794
1795
1796
1797 spin_lock_irqsave(&wl->wl_lock, flags);
1798 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1799 spin_unlock_irqrestore(&wl->wl_lock, flags);
1800
1801 return pm_runtime_force_suspend(wl->dev);
1802}
1803
1804static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
1805{
1806 struct wl1271 *wl = hw->priv;
1807 struct wl12xx_vif *wlvif;
1808 unsigned long flags;
1809 bool run_irq_work = false, pending_recovery;
1810 int ret;
1811
1812 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1813 wl->wow_enabled);
1814 WARN_ON(!wl->wow_enabled);
1815
1816 ret = pm_runtime_force_resume(wl->dev);
1817 if (ret < 0) {
1818 wl1271_error("ELP wakeup failure!");
1819 goto out_sleep;
1820 }
1821
1822
1823
1824
1825
1826 spin_lock_irqsave(&wl->wl_lock, flags);
1827 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1828 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1829 run_irq_work = true;
1830 spin_unlock_irqrestore(&wl->wl_lock, flags);
1831
1832 mutex_lock(&wl->mutex);
1833
1834
1835 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1836 &wl->flags);
1837
1838 if (run_irq_work) {
1839 wl1271_debug(DEBUG_MAC80211,
1840 "run postponed irq_work directly");
1841
1842
1843 if (!pending_recovery) {
1844 ret = wlcore_irq_locked(wl);
1845 if (ret)
1846 wl12xx_queue_recovery_work(wl);
1847 }
1848
1849 wlcore_enable_interrupts(wl);
1850 }
1851
1852 if (pending_recovery) {
1853 wl1271_warning("queuing forgotten recovery on resume");
1854 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1855 goto out_sleep;
1856 }
1857
1858 ret = pm_runtime_get_sync(wl->dev);
1859 if (ret < 0) {
1860 pm_runtime_put_noidle(wl->dev);
1861 goto out;
1862 }
1863
1864 wl12xx_for_each_wlvif(wl, wlvif) {
1865 if (wlcore_is_p2p_mgmt(wlvif))
1866 continue;
1867
1868 wl1271_configure_resume(wl, wlvif);
1869 }
1870
1871 ret = wlcore_hw_interrupt_notify(wl, true);
1872 if (ret < 0)
1873 goto out_sleep;
1874
1875
1876 ret = wlcore_hw_rx_ba_filter(wl, false);
1877 if (ret < 0)
1878 goto out_sleep;
1879
1880out_sleep:
1881 pm_runtime_mark_last_busy(wl->dev);
1882 pm_runtime_put_autosuspend(wl->dev);
1883
1884out:
1885 wl->wow_enabled = false;
1886
1887
1888
1889
1890
1891
1892 set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1893 mutex_unlock(&wl->mutex);
1894
1895 return 0;
1896}
1897
1898static int wl1271_op_start(struct ieee80211_hw *hw)
1899{
1900 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913 return 0;
1914}
1915
1916static void wlcore_op_stop_locked(struct wl1271 *wl)
1917{
1918 int i;
1919
1920 if (wl->state == WLCORE_STATE_OFF) {
1921 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1922 &wl->flags))
1923 wlcore_enable_interrupts(wl);
1924
1925 return;
1926 }
1927
1928
1929
1930
1931
1932 wl->state = WLCORE_STATE_OFF;
1933
1934
1935
1936
1937
1938 wlcore_disable_interrupts_nosync(wl);
1939
1940 mutex_unlock(&wl->mutex);
1941
1942 wlcore_synchronize_interrupts(wl);
1943 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1944 cancel_work_sync(&wl->recovery_work);
1945 wl1271_flush_deferred_work(wl);
1946 cancel_delayed_work_sync(&wl->scan_complete_work);
1947 cancel_work_sync(&wl->netstack_work);
1948 cancel_work_sync(&wl->tx_work);
1949 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1950
1951
1952 mutex_lock(&wl->mutex);
1953 wl12xx_tx_reset(wl);
1954
1955 wl1271_power_off(wl);
1956
1957
1958
1959
1960
1961 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1962 wlcore_enable_interrupts(wl);
1963
1964 wl->band = NL80211_BAND_2GHZ;
1965
1966 wl->rx_counter = 0;
1967 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1968 wl->channel_type = NL80211_CHAN_NO_HT;
1969 wl->tx_blocks_available = 0;
1970 wl->tx_allocated_blocks = 0;
1971 wl->tx_results_count = 0;
1972 wl->tx_packets_count = 0;
1973 wl->time_offset = 0;
1974 wl->ap_fw_ps_map = 0;
1975 wl->ap_ps_map = 0;
1976 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1977 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1978 memset(wl->links_map, 0, sizeof(wl->links_map));
1979 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1980 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1981 memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1982 wl->active_sta_count = 0;
1983 wl->active_link_count = 0;
1984
1985
1986 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1987 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1988 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1989
1990
1991
1992
1993
1994
1995 wl->flags = 0;
1996
1997 wl->tx_blocks_freed = 0;
1998
1999 for (i = 0; i < NUM_TX_QUEUES; i++) {
2000 wl->tx_pkts_freed[i] = 0;
2001 wl->tx_allocated_pkts[i] = 0;
2002 }
2003
2004 wl1271_debugfs_reset(wl);
2005
2006 kfree(wl->raw_fw_status);
2007 wl->raw_fw_status = NULL;
2008 kfree(wl->fw_status);
2009 wl->fw_status = NULL;
2010 kfree(wl->tx_res_if);
2011 wl->tx_res_if = NULL;
2012 kfree(wl->target_mem_map);
2013 wl->target_mem_map = NULL;
2014
2015
2016
2017
2018
2019 memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2020 sizeof(wl->reg_ch_conf_pending));
2021 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2022}
2023
2024static void wlcore_op_stop(struct ieee80211_hw *hw)
2025{
2026 struct wl1271 *wl = hw->priv;
2027
2028 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2029
2030 mutex_lock(&wl->mutex);
2031
2032 wlcore_op_stop_locked(wl);
2033
2034 mutex_unlock(&wl->mutex);
2035}
2036
2037static void wlcore_channel_switch_work(struct work_struct *work)
2038{
2039 struct delayed_work *dwork;
2040 struct wl1271 *wl;
2041 struct ieee80211_vif *vif;
2042 struct wl12xx_vif *wlvif;
2043 int ret;
2044
2045 dwork = to_delayed_work(work);
2046 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2047 wl = wlvif->wl;
2048
2049 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2050
2051 mutex_lock(&wl->mutex);
2052
2053 if (unlikely(wl->state != WLCORE_STATE_ON))
2054 goto out;
2055
2056
2057 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2058 goto out;
2059
2060 vif = wl12xx_wlvif_to_vif(wlvif);
2061 ieee80211_chswitch_done(vif, false);
2062
2063 ret = pm_runtime_get_sync(wl->dev);
2064 if (ret < 0) {
2065 pm_runtime_put_noidle(wl->dev);
2066 goto out;
2067 }
2068
2069 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2070
2071 pm_runtime_mark_last_busy(wl->dev);
2072 pm_runtime_put_autosuspend(wl->dev);
2073out:
2074 mutex_unlock(&wl->mutex);
2075}
2076
2077static void wlcore_connection_loss_work(struct work_struct *work)
2078{
2079 struct delayed_work *dwork;
2080 struct wl1271 *wl;
2081 struct ieee80211_vif *vif;
2082 struct wl12xx_vif *wlvif;
2083
2084 dwork = to_delayed_work(work);
2085 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2086 wl = wlvif->wl;
2087
2088 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2089
2090 mutex_lock(&wl->mutex);
2091
2092 if (unlikely(wl->state != WLCORE_STATE_ON))
2093 goto out;
2094
2095
2096 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2097 goto out;
2098
2099 vif = wl12xx_wlvif_to_vif(wlvif);
2100 ieee80211_connection_loss(vif);
2101out:
2102 mutex_unlock(&wl->mutex);
2103}
2104
2105static void wlcore_pending_auth_complete_work(struct work_struct *work)
2106{
2107 struct delayed_work *dwork;
2108 struct wl1271 *wl;
2109 struct wl12xx_vif *wlvif;
2110 unsigned long time_spare;
2111 int ret;
2112
2113 dwork = to_delayed_work(work);
2114 wlvif = container_of(dwork, struct wl12xx_vif,
2115 pending_auth_complete_work);
2116 wl = wlvif->wl;
2117
2118 mutex_lock(&wl->mutex);
2119
2120 if (unlikely(wl->state != WLCORE_STATE_ON))
2121 goto out;
2122
2123
2124
2125
2126
2127
2128
2129 time_spare = jiffies +
2130 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2131 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2132 goto out;
2133
2134 ret = pm_runtime_get_sync(wl->dev);
2135 if (ret < 0) {
2136 pm_runtime_put_noidle(wl->dev);
2137 goto out;
2138 }
2139
2140
2141 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2142
2143 pm_runtime_mark_last_busy(wl->dev);
2144 pm_runtime_put_autosuspend(wl->dev);
2145out:
2146 mutex_unlock(&wl->mutex);
2147}
2148
2149static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2150{
2151 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2152 WL12XX_MAX_RATE_POLICIES);
2153 if (policy >= WL12XX_MAX_RATE_POLICIES)
2154 return -EBUSY;
2155
2156 __set_bit(policy, wl->rate_policies_map);
2157 *idx = policy;
2158 return 0;
2159}
2160
2161static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2162{
2163 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2164 return;
2165
2166 __clear_bit(*idx, wl->rate_policies_map);
2167 *idx = WL12XX_MAX_RATE_POLICIES;
2168}
2169
2170static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2171{
2172 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2173 WLCORE_MAX_KLV_TEMPLATES);
2174 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2175 return -EBUSY;
2176
2177 __set_bit(policy, wl->klv_templates_map);
2178 *idx = policy;
2179 return 0;
2180}
2181
2182static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2183{
2184 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2185 return;
2186
2187 __clear_bit(*idx, wl->klv_templates_map);
2188 *idx = WLCORE_MAX_KLV_TEMPLATES;
2189}
2190
2191static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2192{
2193 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2194
2195 switch (wlvif->bss_type) {
2196 case BSS_TYPE_AP_BSS:
2197 if (wlvif->p2p)
2198 return WL1271_ROLE_P2P_GO;
2199 else if (ieee80211_vif_is_mesh(vif))
2200 return WL1271_ROLE_MESH_POINT;
2201 else
2202 return WL1271_ROLE_AP;
2203
2204 case BSS_TYPE_STA_BSS:
2205 if (wlvif->p2p)
2206 return WL1271_ROLE_P2P_CL;
2207 else
2208 return WL1271_ROLE_STA;
2209
2210 case BSS_TYPE_IBSS:
2211 return WL1271_ROLE_IBSS;
2212
2213 default:
2214 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2215 }
2216 return WL12XX_INVALID_ROLE_TYPE;
2217}
2218
2219static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2220{
2221 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2222 int i;
2223
2224
2225 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2226
2227 switch (ieee80211_vif_type_p2p(vif)) {
2228 case NL80211_IFTYPE_P2P_CLIENT:
2229 wlvif->p2p = 1;
2230 fallthrough;
2231 case NL80211_IFTYPE_STATION:
2232 case NL80211_IFTYPE_P2P_DEVICE:
2233 wlvif->bss_type = BSS_TYPE_STA_BSS;
2234 break;
2235 case NL80211_IFTYPE_ADHOC:
2236 wlvif->bss_type = BSS_TYPE_IBSS;
2237 break;
2238 case NL80211_IFTYPE_P2P_GO:
2239 wlvif->p2p = 1;
2240 fallthrough;
2241 case NL80211_IFTYPE_AP:
2242 case NL80211_IFTYPE_MESH_POINT:
2243 wlvif->bss_type = BSS_TYPE_AP_BSS;
2244 break;
2245 default:
2246 wlvif->bss_type = MAX_BSS_TYPE;
2247 return -EOPNOTSUPP;
2248 }
2249
2250 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2251 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2252 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2253
2254 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2255 wlvif->bss_type == BSS_TYPE_IBSS) {
2256
2257 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2258 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2259 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2260 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2261 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2262 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2263 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2264 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2265 } else {
2266
2267 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2268 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2269 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2270 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2271 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2272 wl12xx_allocate_rate_policy(wl,
2273 &wlvif->ap.ucast_rate_idx[i]);
2274 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2275
2276
2277
2278
2279
2280 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2281
2282 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2283 }
2284
2285 wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2286 wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2287 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2288
2289
2290
2291
2292
2293 wlvif->band = wl->band;
2294 wlvif->channel = wl->channel;
2295 wlvif->power_level = wl->power_level;
2296 wlvif->channel_type = wl->channel_type;
2297
2298 INIT_WORK(&wlvif->rx_streaming_enable_work,
2299 wl1271_rx_streaming_enable_work);
2300 INIT_WORK(&wlvif->rx_streaming_disable_work,
2301 wl1271_rx_streaming_disable_work);
2302 INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2303 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2304 wlcore_channel_switch_work);
2305 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2306 wlcore_connection_loss_work);
2307 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2308 wlcore_pending_auth_complete_work);
2309 INIT_LIST_HEAD(&wlvif->list);
2310
2311 timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0);
2312 return 0;
2313}
2314
2315static int wl12xx_init_fw(struct wl1271 *wl)
2316{
2317 int retries = WL1271_BOOT_RETRIES;
2318 bool booted = false;
2319 struct wiphy *wiphy = wl->hw->wiphy;
2320 int ret;
2321
2322 while (retries) {
2323 retries--;
2324 ret = wl12xx_chip_wakeup(wl, false);
2325 if (ret < 0)
2326 goto power_off;
2327
2328 ret = wl->ops->boot(wl);
2329 if (ret < 0)
2330 goto power_off;
2331
2332 ret = wl1271_hw_init(wl);
2333 if (ret < 0)
2334 goto irq_disable;
2335
2336 booted = true;
2337 break;
2338
2339irq_disable:
2340 mutex_unlock(&wl->mutex);
2341
2342
2343
2344
2345
2346
2347
2348 wlcore_disable_interrupts(wl);
2349 wl1271_flush_deferred_work(wl);
2350 cancel_work_sync(&wl->netstack_work);
2351 mutex_lock(&wl->mutex);
2352power_off:
2353 wl1271_power_off(wl);
2354 }
2355
2356 if (!booted) {
2357 wl1271_error("firmware boot failed despite %d retries",
2358 WL1271_BOOT_RETRIES);
2359 goto out;
2360 }
2361
2362 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2363
2364
2365 wiphy->hw_version = wl->chip.id;
2366 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2367 sizeof(wiphy->fw_version));
2368
2369
2370
2371
2372
2373 if (!wl->enable_11a)
2374 wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2375
2376 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2377 wl->enable_11a ? "" : "not ");
2378
2379 wl->state = WLCORE_STATE_ON;
2380out:
2381 return ret;
2382}
2383
2384static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2385{
2386 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2387}
2388
2389
2390
2391
2392
2393
2394
2395
2396static bool wl12xx_need_fw_change(struct wl1271 *wl,
2397 struct vif_counter_data vif_counter_data,
2398 bool add)
2399{
2400 enum wl12xx_fw_type current_fw = wl->fw_type;
2401 u8 vif_count = vif_counter_data.counter;
2402
2403 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2404 return false;
2405
2406
2407 if (add && !vif_counter_data.cur_vif_running)
2408 vif_count++;
2409
2410 wl->last_vif_count = vif_count;
2411
2412
2413 if (wl->state == WLCORE_STATE_OFF)
2414 return false;
2415
2416
2417 if (!wl->mr_fw_name)
2418 return false;
2419
2420 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2421 return true;
2422 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2423 return true;
2424
2425 return false;
2426}
2427
2428
2429
2430
2431
2432static void wl12xx_force_active_psm(struct wl1271 *wl)
2433{
2434 struct wl12xx_vif *wlvif;
2435
2436 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2437 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2438 }
2439}
2440
2441struct wlcore_hw_queue_iter_data {
2442 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2443
2444 struct ieee80211_vif *vif;
2445
2446 bool cur_running;
2447};
2448
2449static void wlcore_hw_queue_iter(void *data, u8 *mac,
2450 struct ieee80211_vif *vif)
2451{
2452 struct wlcore_hw_queue_iter_data *iter_data = data;
2453
2454 if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2455 WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2456 return;
2457
2458 if (iter_data->cur_running || vif == iter_data->vif) {
2459 iter_data->cur_running = true;
2460 return;
2461 }
2462
2463 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2464}
2465
2466static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2467 struct wl12xx_vif *wlvif)
2468{
2469 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2470 struct wlcore_hw_queue_iter_data iter_data = {};
2471 int i, q_base;
2472
2473 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2474 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2475 return 0;
2476 }
2477
2478 iter_data.vif = vif;
2479
2480
2481 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2482 IEEE80211_IFACE_ITER_RESUME_ALL,
2483 wlcore_hw_queue_iter, &iter_data);
2484
2485
2486 if (iter_data.cur_running) {
2487 wlvif->hw_queue_base = vif->hw_queue[0];
2488 wl1271_debug(DEBUG_MAC80211,
2489 "using pre-allocated hw queue base %d",
2490 wlvif->hw_queue_base);
2491
2492
2493 goto adjust_cab_queue;
2494 }
2495
2496 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2497 WLCORE_NUM_MAC_ADDRESSES);
2498 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2499 return -EBUSY;
2500
2501 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2502 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2503 wlvif->hw_queue_base);
2504
2505 for (i = 0; i < NUM_TX_QUEUES; i++) {
2506 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2507
2508 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2509 }
2510
2511adjust_cab_queue:
2512
2513 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2514 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2515 wlvif->hw_queue_base / NUM_TX_QUEUES;
2516 else
2517 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2518
2519 return 0;
2520}
2521
2522static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2523 struct ieee80211_vif *vif)
2524{
2525 struct wl1271 *wl = hw->priv;
2526 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2527 struct vif_counter_data vif_count;
2528 int ret = 0;
2529 u8 role_type;
2530
2531 if (wl->plt) {
2532 wl1271_error("Adding Interface not allowed while in PLT mode");
2533 return -EBUSY;
2534 }
2535
2536 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2537 IEEE80211_VIF_SUPPORTS_UAPSD |
2538 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2539
2540 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2541 ieee80211_vif_type_p2p(vif), vif->addr);
2542
2543 wl12xx_get_vif_count(hw, vif, &vif_count);
2544
2545 mutex_lock(&wl->mutex);
2546
2547
2548
2549
2550
2551
2552 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2553 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2554 ret = -EBUSY;
2555 goto out;
2556 }
2557
2558
2559 ret = wl12xx_init_vif_data(wl, vif);
2560 if (ret < 0)
2561 goto out;
2562
2563 wlvif->wl = wl;
2564 role_type = wl12xx_get_role_type(wl, wlvif);
2565 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2566 ret = -EINVAL;
2567 goto out;
2568 }
2569
2570 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2571 if (ret < 0)
2572 goto out;
2573
2574
2575
2576
2577
2578 if (wl->state == WLCORE_STATE_OFF) {
2579
2580
2581
2582
2583 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2584
2585 ret = wl12xx_init_fw(wl);
2586 if (ret < 0)
2587 goto out;
2588 }
2589
2590
2591
2592
2593
2594 ret = pm_runtime_get_sync(wl->dev);
2595 if (ret < 0) {
2596 pm_runtime_put_noidle(wl->dev);
2597 goto out_unlock;
2598 }
2599
2600 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2601 wl12xx_force_active_psm(wl);
2602 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2603 mutex_unlock(&wl->mutex);
2604 wl1271_recovery_work(&wl->recovery_work);
2605 return 0;
2606 }
2607
2608 if (!wlcore_is_p2p_mgmt(wlvif)) {
2609 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2610 role_type, &wlvif->role_id);
2611 if (ret < 0)
2612 goto out;
2613
2614 ret = wl1271_init_vif_specific(wl, vif);
2615 if (ret < 0)
2616 goto out;
2617
2618 } else {
2619 ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2620 &wlvif->dev_role_id);
2621 if (ret < 0)
2622 goto out;
2623
2624
2625 ret = wl1271_sta_hw_init(wl, wlvif);
2626 if (ret < 0)
2627 goto out;
2628 }
2629
2630 list_add(&wlvif->list, &wl->wlvif_list);
2631 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2632
2633 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2634 wl->ap_count++;
2635 else
2636 wl->sta_count++;
2637out:
2638 pm_runtime_mark_last_busy(wl->dev);
2639 pm_runtime_put_autosuspend(wl->dev);
2640out_unlock:
2641 mutex_unlock(&wl->mutex);
2642
2643 return ret;
2644}
2645
2646static void __wl1271_op_remove_interface(struct wl1271 *wl,
2647 struct ieee80211_vif *vif,
2648 bool reset_tx_queues)
2649{
2650 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2651 int i, ret;
2652 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2653
2654 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2655
2656 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2657 return;
2658
2659
2660 if (wl->state == WLCORE_STATE_OFF)
2661 return;
2662
2663 wl1271_info("down");
2664
2665 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2666 wl->scan_wlvif == wlvif) {
2667 struct cfg80211_scan_info info = {
2668 .aborted = true,
2669 };
2670
2671
2672
2673
2674
2675 wl12xx_rearm_tx_watchdog_locked(wl);
2676
2677 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2678 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2679 wl->scan_wlvif = NULL;
2680 wl->scan.req = NULL;
2681 ieee80211_scan_completed(wl->hw, &info);
2682 }
2683
2684 if (wl->sched_vif == wlvif)
2685 wl->sched_vif = NULL;
2686
2687 if (wl->roc_vif == vif) {
2688 wl->roc_vif = NULL;
2689 ieee80211_remain_on_channel_expired(wl->hw);
2690 }
2691
2692 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2693
2694 ret = pm_runtime_get_sync(wl->dev);
2695 if (ret < 0) {
2696 pm_runtime_put_noidle(wl->dev);
2697 goto deinit;
2698 }
2699
2700 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2701 wlvif->bss_type == BSS_TYPE_IBSS) {
2702 if (wl12xx_dev_role_started(wlvif))
2703 wl12xx_stop_dev(wl, wlvif);
2704 }
2705
2706 if (!wlcore_is_p2p_mgmt(wlvif)) {
2707 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2708 if (ret < 0) {
2709 pm_runtime_put_noidle(wl->dev);
2710 goto deinit;
2711 }
2712 } else {
2713 ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2714 if (ret < 0) {
2715 pm_runtime_put_noidle(wl->dev);
2716 goto deinit;
2717 }
2718 }
2719
2720 pm_runtime_mark_last_busy(wl->dev);
2721 pm_runtime_put_autosuspend(wl->dev);
2722 }
2723deinit:
2724 wl12xx_tx_reset_wlvif(wl, wlvif);
2725
2726
2727 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2728
2729 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2730 wlvif->bss_type == BSS_TYPE_IBSS) {
2731 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2732 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2733 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2734 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2735 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2736 } else {
2737 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2738 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2739 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2740 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2741 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2742 wl12xx_free_rate_policy(wl,
2743 &wlvif->ap.ucast_rate_idx[i]);
2744 wl1271_free_ap_keys(wl, wlvif);
2745 }
2746
2747 dev_kfree_skb(wlvif->probereq);
2748 wlvif->probereq = NULL;
2749 if (wl->last_wlvif == wlvif)
2750 wl->last_wlvif = NULL;
2751 list_del(&wlvif->list);
2752 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2753 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2754 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2755
2756 if (is_ap)
2757 wl->ap_count--;
2758 else
2759 wl->sta_count--;
2760
2761
2762
2763
2764
2765 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2766 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2767 goto unlock;
2768
2769 if (wl->ap_count == 0 && is_ap) {
2770
2771 wl->event_mask &= ~wl->ap_event_mask;
2772 wl1271_event_unmask(wl);
2773 }
2774
2775 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2776 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2777
2778 if (sta_auth != WL1271_PSM_ILLEGAL)
2779 wl1271_acx_sleep_auth(wl, sta_auth);
2780
2781 else
2782 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2783 }
2784
2785unlock:
2786 mutex_unlock(&wl->mutex);
2787
2788 del_timer_sync(&wlvif->rx_streaming_timer);
2789 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2790 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2791 cancel_work_sync(&wlvif->rc_update_work);
2792 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2793 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2794 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2795
2796 mutex_lock(&wl->mutex);
2797}
2798
2799static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2800 struct ieee80211_vif *vif)
2801{
2802 struct wl1271 *wl = hw->priv;
2803 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2804 struct wl12xx_vif *iter;
2805 struct vif_counter_data vif_count;
2806
2807 wl12xx_get_vif_count(hw, vif, &vif_count);
2808 mutex_lock(&wl->mutex);
2809
2810 if (wl->state == WLCORE_STATE_OFF ||
2811 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2812 goto out;
2813
2814
2815
2816
2817
2818 wl12xx_for_each_wlvif(wl, iter) {
2819 if (iter != wlvif)
2820 continue;
2821
2822 __wl1271_op_remove_interface(wl, vif, true);
2823 break;
2824 }
2825 WARN_ON(iter != wlvif);
2826 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2827 wl12xx_force_active_psm(wl);
2828 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2829 wl12xx_queue_recovery_work(wl);
2830 }
2831out:
2832 mutex_unlock(&wl->mutex);
2833}
2834
2835static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2836 struct ieee80211_vif *vif,
2837 enum nl80211_iftype new_type, bool p2p)
2838{
2839 struct wl1271 *wl = hw->priv;
2840 int ret;
2841
2842 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2843 wl1271_op_remove_interface(hw, vif);
2844
2845 vif->type = new_type;
2846 vif->p2p = p2p;
2847 ret = wl1271_op_add_interface(hw, vif);
2848
2849 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2850 return ret;
2851}
2852
2853static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2854{
2855 int ret;
2856 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2868 wl1271_info("JOIN while associated.");
2869
2870
2871 wlvif->encryption_type = KEY_NONE;
2872
2873 if (is_ibss)
2874 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2875 else
2876 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2877
2878 return ret;
2879}
2880
2881static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2882 int offset)
2883{
2884 u8 ssid_len;
2885 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2886 skb->len - offset);
2887
2888 if (!ptr) {
2889 wl1271_error("No SSID in IEs!");
2890 return -ENOENT;
2891 }
2892
2893 ssid_len = ptr[1];
2894 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2895 wl1271_error("SSID is too long!");
2896 return -EINVAL;
2897 }
2898
2899 wlvif->ssid_len = ssid_len;
2900 memcpy(wlvif->ssid, ptr+2, ssid_len);
2901 return 0;
2902}
2903
2904static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2905{
2906 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2907 struct sk_buff *skb;
2908 int ieoffset;
2909
2910
2911 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2912 return -EINVAL;
2913
2914 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2915 if (!skb)
2916 return -EINVAL;
2917
2918 ieoffset = offsetof(struct ieee80211_mgmt,
2919 u.probe_req.variable);
2920 wl1271_ssid_set(wlvif, skb, ieoffset);
2921 dev_kfree_skb(skb);
2922
2923 return 0;
2924}
2925
2926static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2927 struct ieee80211_bss_conf *bss_conf,
2928 u32 sta_rate_set)
2929{
2930 int ieoffset;
2931 int ret;
2932
2933 wlvif->aid = bss_conf->aid;
2934 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2935 wlvif->beacon_int = bss_conf->beacon_int;
2936 wlvif->wmm_enabled = bss_conf->qos;
2937
2938 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2939
2940
2941
2942
2943
2944
2945
2946 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2947 if (ret < 0)
2948 return ret;
2949
2950
2951
2952
2953 dev_kfree_skb(wlvif->probereq);
2954 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2955 wlvif,
2956 NULL);
2957 ieoffset = offsetof(struct ieee80211_mgmt,
2958 u.probe_req.variable);
2959 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2960
2961
2962 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2963 if (ret < 0)
2964 return ret;
2965
2966
2967
2968
2969
2970
2971
2972 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2973 if (ret < 0)
2974 return ret;
2975
2976 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2977 if (ret < 0)
2978 return ret;
2979
2980 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2981 if (ret < 0)
2982 return ret;
2983
2984 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2985 wlvif->sta.klv_template_id,
2986 ACX_KEEP_ALIVE_TPL_VALID);
2987 if (ret < 0)
2988 return ret;
2989
2990
2991
2992
2993
2994 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2995 if (ret < 0)
2996 return ret;
2997
2998 if (sta_rate_set) {
2999 wlvif->rate_set =
3000 wl1271_tx_enabled_rates_get(wl,
3001 sta_rate_set,
3002 wlvif->band);
3003 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3004 if (ret < 0)
3005 return ret;
3006 }
3007
3008 return ret;
3009}
3010
3011static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3012{
3013 int ret;
3014 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3015
3016
3017 if (sta &&
3018 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3019 return false;
3020
3021
3022 if (!sta &&
3023 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3024 return false;
3025
3026 if (sta) {
3027
3028 wlvif->aid = 0;
3029
3030
3031 dev_kfree_skb(wlvif->probereq);
3032 wlvif->probereq = NULL;
3033
3034
3035 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3036 if (ret < 0)
3037 return ret;
3038
3039
3040 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3041 if (ret < 0)
3042 return ret;
3043
3044
3045 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3046 if (ret < 0)
3047 return ret;
3048 }
3049
3050 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3051 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3052
3053 wl12xx_cmd_stop_channel_switch(wl, wlvif);
3054 ieee80211_chswitch_done(vif, false);
3055 cancel_delayed_work(&wlvif->channel_switch_work);
3056 }
3057
3058
3059 wl1271_acx_keep_alive_config(wl, wlvif,
3060 wlvif->sta.klv_template_id,
3061 ACX_KEEP_ALIVE_TPL_INVALID);
3062
3063 return 0;
3064}
3065
3066static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3067{
3068 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3069 wlvif->rate_set = wlvif->basic_rate_set;
3070}
3071
3072static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3073 bool idle)
3074{
3075 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3076
3077 if (idle == cur_idle)
3078 return;
3079
3080 if (idle) {
3081 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3082 } else {
3083
3084 if (wl->sched_vif == wlvif)
3085 wl->ops->sched_scan_stop(wl, wlvif);
3086
3087 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3088 }
3089}
3090
3091static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3092 struct ieee80211_conf *conf, u32 changed)
3093{
3094 int ret;
3095
3096 if (wlcore_is_p2p_mgmt(wlvif))
3097 return 0;
3098
3099 if (conf->power_level != wlvif->power_level) {
3100 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3101 if (ret < 0)
3102 return ret;
3103
3104 wlvif->power_level = conf->power_level;
3105 }
3106
3107 return 0;
3108}
3109
3110static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3111{
3112 struct wl1271 *wl = hw->priv;
3113 struct wl12xx_vif *wlvif;
3114 struct ieee80211_conf *conf = &hw->conf;
3115 int ret = 0;
3116
3117 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3118 " changed 0x%x",
3119 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3120 conf->power_level,
3121 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3122 changed);
3123
3124 mutex_lock(&wl->mutex);
3125
3126 if (changed & IEEE80211_CONF_CHANGE_POWER)
3127 wl->power_level = conf->power_level;
3128
3129 if (unlikely(wl->state != WLCORE_STATE_ON))
3130 goto out;
3131
3132 ret = pm_runtime_get_sync(wl->dev);
3133 if (ret < 0) {
3134 pm_runtime_put_noidle(wl->dev);
3135 goto out;
3136 }
3137
3138
3139 wl12xx_for_each_wlvif(wl, wlvif) {
3140 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3141 if (ret < 0)
3142 goto out_sleep;
3143 }
3144
3145out_sleep:
3146 pm_runtime_mark_last_busy(wl->dev);
3147 pm_runtime_put_autosuspend(wl->dev);
3148
3149out:
3150 mutex_unlock(&wl->mutex);
3151
3152 return ret;
3153}
3154
3155struct wl1271_filter_params {
3156 bool enabled;
3157 int mc_list_length;
3158 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3159};
3160
3161static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3162 struct netdev_hw_addr_list *mc_list)
3163{
3164 struct wl1271_filter_params *fp;
3165 struct netdev_hw_addr *ha;
3166
3167 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3168 if (!fp) {
3169 wl1271_error("Out of memory setting filters.");
3170 return 0;
3171 }
3172
3173
3174 fp->mc_list_length = 0;
3175 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3176 fp->enabled = false;
3177 } else {
3178 fp->enabled = true;
3179 netdev_hw_addr_list_for_each(ha, mc_list) {
3180 memcpy(fp->mc_list[fp->mc_list_length],
3181 ha->addr, ETH_ALEN);
3182 fp->mc_list_length++;
3183 }
3184 }
3185
3186 return (u64)(unsigned long)fp;
3187}
3188
3189#define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3190 FIF_FCSFAIL | \
3191 FIF_BCN_PRBRESP_PROMISC | \
3192 FIF_CONTROL | \
3193 FIF_OTHER_BSS)
3194
3195static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3196 unsigned int changed,
3197 unsigned int *total, u64 multicast)
3198{
3199 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3200 struct wl1271 *wl = hw->priv;
3201 struct wl12xx_vif *wlvif;
3202
3203 int ret;
3204
3205 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3206 " total %x", changed, *total);
3207
3208 mutex_lock(&wl->mutex);
3209
3210 *total &= WL1271_SUPPORTED_FILTERS;
3211 changed &= WL1271_SUPPORTED_FILTERS;
3212
3213 if (unlikely(wl->state != WLCORE_STATE_ON))
3214 goto out;
3215
3216 ret = pm_runtime_get_sync(wl->dev);
3217 if (ret < 0) {
3218 pm_runtime_put_noidle(wl->dev);
3219 goto out;
3220 }
3221
3222 wl12xx_for_each_wlvif(wl, wlvif) {
3223 if (wlcore_is_p2p_mgmt(wlvif))
3224 continue;
3225
3226 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3227 if (*total & FIF_ALLMULTI)
3228 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3229 false,
3230 NULL, 0);
3231 else if (fp)
3232 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3233 fp->enabled,
3234 fp->mc_list,
3235 fp->mc_list_length);
3236 if (ret < 0)
3237 goto out_sleep;
3238 }
3239
3240
3241
3242
3243
3244
3245 if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
3246 if (*total & FIF_ALLMULTI) {
3247 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3248 false,
3249 NULL, 0);
3250 if (ret < 0)
3251 goto out_sleep;
3252 }
3253 }
3254 }
3255
3256
3257
3258
3259
3260
3261
3262out_sleep:
3263 pm_runtime_mark_last_busy(wl->dev);
3264 pm_runtime_put_autosuspend(wl->dev);
3265
3266out:
3267 mutex_unlock(&wl->mutex);
3268 kfree(fp);
3269}
3270
3271static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3272 u8 id, u8 key_type, u8 key_size,
3273 const u8 *key, u8 hlid, u32 tx_seq_32,
3274 u16 tx_seq_16, bool is_pairwise)
3275{
3276 struct wl1271_ap_key *ap_key;
3277 int i;
3278
3279 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3280
3281 if (key_size > MAX_KEY_SIZE)
3282 return -EINVAL;
3283
3284
3285
3286
3287
3288 for (i = 0; i < MAX_NUM_KEYS; i++) {
3289 if (wlvif->ap.recorded_keys[i] == NULL)
3290 break;
3291
3292 if (wlvif->ap.recorded_keys[i]->id == id) {
3293 wl1271_warning("trying to record key replacement");
3294 return -EINVAL;
3295 }
3296 }
3297
3298 if (i == MAX_NUM_KEYS)
3299 return -EBUSY;
3300
3301 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3302 if (!ap_key)
3303 return -ENOMEM;
3304
3305 ap_key->id = id;
3306 ap_key->key_type = key_type;
3307 ap_key->key_size = key_size;
3308 memcpy(ap_key->key, key, key_size);
3309 ap_key->hlid = hlid;
3310 ap_key->tx_seq_32 = tx_seq_32;
3311 ap_key->tx_seq_16 = tx_seq_16;
3312 ap_key->is_pairwise = is_pairwise;
3313
3314 wlvif->ap.recorded_keys[i] = ap_key;
3315 return 0;
3316}
3317
3318static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3319{
3320 int i;
3321
3322 for (i = 0; i < MAX_NUM_KEYS; i++) {
3323 kfree(wlvif->ap.recorded_keys[i]);
3324 wlvif->ap.recorded_keys[i] = NULL;
3325 }
3326}
3327
3328static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3329{
3330 int i, ret = 0;
3331 struct wl1271_ap_key *key;
3332 bool wep_key_added = false;
3333
3334 for (i = 0; i < MAX_NUM_KEYS; i++) {
3335 u8 hlid;
3336 if (wlvif->ap.recorded_keys[i] == NULL)
3337 break;
3338
3339 key = wlvif->ap.recorded_keys[i];
3340 hlid = key->hlid;
3341 if (hlid == WL12XX_INVALID_LINK_ID)
3342 hlid = wlvif->ap.bcast_hlid;
3343
3344 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3345 key->id, key->key_type,
3346 key->key_size, key->key,
3347 hlid, key->tx_seq_32,
3348 key->tx_seq_16, key->is_pairwise);
3349 if (ret < 0)
3350 goto out;
3351
3352 if (key->key_type == KEY_WEP)
3353 wep_key_added = true;
3354 }
3355
3356 if (wep_key_added) {
3357 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3358 wlvif->ap.bcast_hlid);
3359 if (ret < 0)
3360 goto out;
3361 }
3362
3363out:
3364 wl1271_free_ap_keys(wl, wlvif);
3365 return ret;
3366}
3367
3368static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3369 u16 action, u8 id, u8 key_type,
3370 u8 key_size, const u8 *key, u32 tx_seq_32,
3371 u16 tx_seq_16, struct ieee80211_sta *sta,
3372 bool is_pairwise)
3373{
3374 int ret;
3375 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3376
3377 if (is_ap) {
3378 struct wl1271_station *wl_sta;
3379 u8 hlid;
3380
3381 if (sta) {
3382 wl_sta = (struct wl1271_station *)sta->drv_priv;
3383 hlid = wl_sta->hlid;
3384 } else {
3385 hlid = wlvif->ap.bcast_hlid;
3386 }
3387
3388 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3389
3390
3391
3392
3393 if (action != KEY_ADD_OR_REPLACE)
3394 return 0;
3395
3396 ret = wl1271_record_ap_key(wl, wlvif, id,
3397 key_type, key_size,
3398 key, hlid, tx_seq_32,
3399 tx_seq_16, is_pairwise);
3400 } else {
3401 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3402 id, key_type, key_size,
3403 key, hlid, tx_seq_32,
3404 tx_seq_16, is_pairwise);
3405 }
3406
3407 if (ret < 0)
3408 return ret;
3409 } else {
3410 const u8 *addr;
3411 static const u8 bcast_addr[ETH_ALEN] = {
3412 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3413 };
3414
3415 addr = sta ? sta->addr : bcast_addr;
3416
3417 if (is_zero_ether_addr(addr)) {
3418
3419 return -EOPNOTSUPP;
3420 }
3421
3422
3423
3424
3425
3426 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3427 return 0;
3428
3429
3430 if (action == KEY_REMOVE &&
3431 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3432 return 0;
3433
3434 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3435 id, key_type, key_size,
3436 key, addr, tx_seq_32,
3437 tx_seq_16);
3438 if (ret < 0)
3439 return ret;
3440
3441 }
3442
3443 return 0;
3444}
3445
3446static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3447 struct ieee80211_vif *vif,
3448 struct ieee80211_sta *sta,
3449 struct ieee80211_key_conf *key_conf)
3450{
3451 struct wl1271 *wl = hw->priv;
3452 int ret;
3453 bool might_change_spare =
3454 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3455 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3456
3457 if (might_change_spare) {
3458
3459
3460
3461
3462 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3463 wl1271_tx_flush(wl);
3464 }
3465
3466 mutex_lock(&wl->mutex);
3467
3468 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3469 ret = -EAGAIN;
3470 goto out_wake_queues;
3471 }
3472
3473 ret = pm_runtime_get_sync(wl->dev);
3474 if (ret < 0) {
3475 pm_runtime_put_noidle(wl->dev);
3476 goto out_wake_queues;
3477 }
3478
3479 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3480
3481 pm_runtime_mark_last_busy(wl->dev);
3482 pm_runtime_put_autosuspend(wl->dev);
3483
3484out_wake_queues:
3485 if (might_change_spare)
3486 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3487
3488 mutex_unlock(&wl->mutex);
3489
3490 return ret;
3491}
3492
3493int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3494 struct ieee80211_vif *vif,
3495 struct ieee80211_sta *sta,
3496 struct ieee80211_key_conf *key_conf)
3497{
3498 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3499 int ret;
3500 u32 tx_seq_32 = 0;
3501 u16 tx_seq_16 = 0;
3502 u8 key_type;
3503 u8 hlid;
3504 bool is_pairwise;
3505
3506 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3507
3508 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3509 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3510 key_conf->cipher, key_conf->keyidx,
3511 key_conf->keylen, key_conf->flags);
3512 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3513
3514 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3515 if (sta) {
3516 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3517 hlid = wl_sta->hlid;
3518 } else {
3519 hlid = wlvif->ap.bcast_hlid;
3520 }
3521 else
3522 hlid = wlvif->sta.hlid;
3523
3524 if (hlid != WL12XX_INVALID_LINK_ID) {
3525 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3526 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3527 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3528 }
3529
3530 switch (key_conf->cipher) {
3531 case WLAN_CIPHER_SUITE_WEP40:
3532 case WLAN_CIPHER_SUITE_WEP104:
3533 key_type = KEY_WEP;
3534
3535 key_conf->hw_key_idx = key_conf->keyidx;
3536 break;
3537 case WLAN_CIPHER_SUITE_TKIP:
3538 key_type = KEY_TKIP;
3539 key_conf->hw_key_idx = key_conf->keyidx;
3540 break;
3541 case WLAN_CIPHER_SUITE_CCMP:
3542 key_type = KEY_AES;
3543 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3544 break;
3545 case WL1271_CIPHER_SUITE_GEM:
3546 key_type = KEY_GEM;
3547 break;
3548 default:
3549 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3550
3551 return -EOPNOTSUPP;
3552 }
3553
3554 is_pairwise = key_conf->flags & IEEE80211_KEY_FLAG_PAIRWISE;
3555
3556 switch (cmd) {
3557 case SET_KEY:
3558 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3559 key_conf->keyidx, key_type,
3560 key_conf->keylen, key_conf->key,
3561 tx_seq_32, tx_seq_16, sta, is_pairwise);
3562 if (ret < 0) {
3563 wl1271_error("Could not add or replace key");
3564 return ret;
3565 }
3566
3567
3568
3569
3570
3571 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3572 (sta || key_type == KEY_WEP) &&
3573 wlvif->encryption_type != key_type) {
3574 wlvif->encryption_type = key_type;
3575 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3576 if (ret < 0) {
3577 wl1271_warning("build arp rsp failed: %d", ret);
3578 return ret;
3579 }
3580 }
3581 break;
3582
3583 case DISABLE_KEY:
3584 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3585 key_conf->keyidx, key_type,
3586 key_conf->keylen, key_conf->key,
3587 0, 0, sta, is_pairwise);
3588 if (ret < 0) {
3589 wl1271_error("Could not remove key");
3590 return ret;
3591 }
3592 break;
3593
3594 default:
3595 wl1271_error("Unsupported key cmd 0x%x", cmd);
3596 return -EOPNOTSUPP;
3597 }
3598
3599 return ret;
3600}
3601EXPORT_SYMBOL_GPL(wlcore_set_key);
3602
3603static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3604 struct ieee80211_vif *vif,
3605 int key_idx)
3606{
3607 struct wl1271 *wl = hw->priv;
3608 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3609 int ret;
3610
3611 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3612 key_idx);
3613
3614
3615 if (key_idx == -1)
3616 return;
3617
3618 mutex_lock(&wl->mutex);
3619
3620 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3621 ret = -EAGAIN;
3622 goto out_unlock;
3623 }
3624
3625 ret = pm_runtime_get_sync(wl->dev);
3626 if (ret < 0) {
3627 pm_runtime_put_noidle(wl->dev);
3628 goto out_unlock;
3629 }
3630
3631 wlvif->default_key = key_idx;
3632
3633
3634 if (wlvif->encryption_type == KEY_WEP) {
3635 ret = wl12xx_cmd_set_default_wep_key(wl,
3636 key_idx,
3637 wlvif->sta.hlid);
3638 if (ret < 0)
3639 goto out_sleep;
3640 }
3641
3642out_sleep:
3643 pm_runtime_mark_last_busy(wl->dev);
3644 pm_runtime_put_autosuspend(wl->dev);
3645
3646out_unlock:
3647 mutex_unlock(&wl->mutex);
3648}
3649
3650void wlcore_regdomain_config(struct wl1271 *wl)
3651{
3652 int ret;
3653
3654 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3655 return;
3656
3657 mutex_lock(&wl->mutex);
3658
3659 if (unlikely(wl->state != WLCORE_STATE_ON))
3660 goto out;
3661
3662 ret = pm_runtime_get_sync(wl->dev);
3663 if (ret < 0) {
3664 pm_runtime_put_autosuspend(wl->dev);
3665 goto out;
3666 }
3667
3668 ret = wlcore_cmd_regdomain_config_locked(wl);
3669 if (ret < 0) {
3670 wl12xx_queue_recovery_work(wl);
3671 goto out;
3672 }
3673
3674 pm_runtime_mark_last_busy(wl->dev);
3675 pm_runtime_put_autosuspend(wl->dev);
3676out:
3677 mutex_unlock(&wl->mutex);
3678}
3679
3680static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3681 struct ieee80211_vif *vif,
3682 struct ieee80211_scan_request *hw_req)
3683{
3684 struct cfg80211_scan_request *req = &hw_req->req;
3685 struct wl1271 *wl = hw->priv;
3686 int ret;
3687 u8 *ssid = NULL;
3688 size_t len = 0;
3689
3690 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3691
3692 if (req->n_ssids) {
3693 ssid = req->ssids[0].ssid;
3694 len = req->ssids[0].ssid_len;
3695 }
3696
3697 mutex_lock(&wl->mutex);
3698
3699 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3700
3701
3702
3703
3704
3705 ret = -EAGAIN;
3706 goto out;
3707 }
3708
3709 ret = pm_runtime_get_sync(wl->dev);
3710 if (ret < 0) {
3711 pm_runtime_put_noidle(wl->dev);
3712 goto out;
3713 }
3714
3715
3716 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3717
3718 ret = -EBUSY;
3719 goto out_sleep;
3720 }
3721
3722 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3723out_sleep:
3724 pm_runtime_mark_last_busy(wl->dev);
3725 pm_runtime_put_autosuspend(wl->dev);
3726out:
3727 mutex_unlock(&wl->mutex);
3728
3729 return ret;
3730}
3731
3732static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3733 struct ieee80211_vif *vif)
3734{
3735 struct wl1271 *wl = hw->priv;
3736 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3737 struct cfg80211_scan_info info = {
3738 .aborted = true,
3739 };
3740 int ret;
3741
3742 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3743
3744 mutex_lock(&wl->mutex);
3745
3746 if (unlikely(wl->state != WLCORE_STATE_ON))
3747 goto out;
3748
3749 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3750 goto out;
3751
3752 ret = pm_runtime_get_sync(wl->dev);
3753 if (ret < 0) {
3754 pm_runtime_put_noidle(wl->dev);
3755 goto out;
3756 }
3757
3758 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3759 ret = wl->ops->scan_stop(wl, wlvif);
3760 if (ret < 0)
3761 goto out_sleep;
3762 }
3763
3764
3765
3766
3767
3768 wl12xx_rearm_tx_watchdog_locked(wl);
3769
3770 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3771 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3772 wl->scan_wlvif = NULL;
3773 wl->scan.req = NULL;
3774 ieee80211_scan_completed(wl->hw, &info);
3775
3776out_sleep:
3777 pm_runtime_mark_last_busy(wl->dev);
3778 pm_runtime_put_autosuspend(wl->dev);
3779out:
3780 mutex_unlock(&wl->mutex);
3781
3782 cancel_delayed_work_sync(&wl->scan_complete_work);
3783}
3784
3785static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3786 struct ieee80211_vif *vif,
3787 struct cfg80211_sched_scan_request *req,
3788 struct ieee80211_scan_ies *ies)
3789{
3790 struct wl1271 *wl = hw->priv;
3791 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3792 int ret;
3793
3794 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3795
3796 mutex_lock(&wl->mutex);
3797
3798 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3799 ret = -EAGAIN;
3800 goto out;
3801 }
3802
3803 ret = pm_runtime_get_sync(wl->dev);
3804 if (ret < 0) {
3805 pm_runtime_put_noidle(wl->dev);
3806 goto out;
3807 }
3808
3809 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3810 if (ret < 0)
3811 goto out_sleep;
3812
3813 wl->sched_vif = wlvif;
3814
3815out_sleep:
3816 pm_runtime_mark_last_busy(wl->dev);
3817 pm_runtime_put_autosuspend(wl->dev);
3818out:
3819 mutex_unlock(&wl->mutex);
3820 return ret;
3821}
3822
3823static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3824 struct ieee80211_vif *vif)
3825{
3826 struct wl1271 *wl = hw->priv;
3827 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3828 int ret;
3829
3830 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3831
3832 mutex_lock(&wl->mutex);
3833
3834 if (unlikely(wl->state != WLCORE_STATE_ON))
3835 goto out;
3836
3837 ret = pm_runtime_get_sync(wl->dev);
3838 if (ret < 0) {
3839 pm_runtime_put_noidle(wl->dev);
3840 goto out;
3841 }
3842
3843 wl->ops->sched_scan_stop(wl, wlvif);
3844
3845 pm_runtime_mark_last_busy(wl->dev);
3846 pm_runtime_put_autosuspend(wl->dev);
3847out:
3848 mutex_unlock(&wl->mutex);
3849
3850 return 0;
3851}
3852
3853static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3854{
3855 struct wl1271 *wl = hw->priv;
3856 int ret = 0;
3857
3858 mutex_lock(&wl->mutex);
3859
3860 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3861 ret = -EAGAIN;
3862 goto out;
3863 }
3864
3865 ret = pm_runtime_get_sync(wl->dev);
3866 if (ret < 0) {
3867 pm_runtime_put_noidle(wl->dev);
3868 goto out;
3869 }
3870
3871 ret = wl1271_acx_frag_threshold(wl, value);
3872 if (ret < 0)
3873 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3874
3875 pm_runtime_mark_last_busy(wl->dev);
3876 pm_runtime_put_autosuspend(wl->dev);
3877
3878out:
3879 mutex_unlock(&wl->mutex);
3880
3881 return ret;
3882}
3883
3884static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3885{
3886 struct wl1271 *wl = hw->priv;
3887 struct wl12xx_vif *wlvif;
3888 int ret = 0;
3889
3890 mutex_lock(&wl->mutex);
3891
3892 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3893 ret = -EAGAIN;
3894 goto out;
3895 }
3896
3897 ret = pm_runtime_get_sync(wl->dev);
3898 if (ret < 0) {
3899 pm_runtime_put_noidle(wl->dev);
3900 goto out;
3901 }
3902
3903 wl12xx_for_each_wlvif(wl, wlvif) {
3904 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3905 if (ret < 0)
3906 wl1271_warning("set rts threshold failed: %d", ret);
3907 }
3908 pm_runtime_mark_last_busy(wl->dev);
3909 pm_runtime_put_autosuspend(wl->dev);
3910
3911out:
3912 mutex_unlock(&wl->mutex);
3913
3914 return ret;
3915}
3916
3917static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3918{
3919 int len;
3920 const u8 *next, *end = skb->data + skb->len;
3921 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3922 skb->len - ieoffset);
3923 if (!ie)
3924 return;
3925 len = ie[1] + 2;
3926 next = ie + len;
3927 memmove(ie, next, end - next);
3928 skb_trim(skb, skb->len - len);
3929}
3930
3931static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3932 unsigned int oui, u8 oui_type,
3933 int ieoffset)
3934{
3935 int len;
3936 const u8 *next, *end = skb->data + skb->len;
3937 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3938 skb->data + ieoffset,
3939 skb->len - ieoffset);
3940 if (!ie)
3941 return;
3942 len = ie[1] + 2;
3943 next = ie + len;
3944 memmove(ie, next, end - next);
3945 skb_trim(skb, skb->len - len);
3946}
3947
3948static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3949 struct ieee80211_vif *vif)
3950{
3951 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3952 struct sk_buff *skb;
3953 int ret;
3954
3955 skb = ieee80211_proberesp_get(wl->hw, vif);
3956 if (!skb)
3957 return -EOPNOTSUPP;
3958
3959 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3960 CMD_TEMPL_AP_PROBE_RESPONSE,
3961 skb->data,
3962 skb->len, 0,
3963 rates);
3964 dev_kfree_skb(skb);
3965
3966 if (ret < 0)
3967 goto out;
3968
3969 wl1271_debug(DEBUG_AP, "probe response updated");
3970 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3971
3972out:
3973 return ret;
3974}
3975
3976static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3977 struct ieee80211_vif *vif,
3978 u8 *probe_rsp_data,
3979 size_t probe_rsp_len,
3980 u32 rates)
3981{
3982 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3983 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3984 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3985 int ssid_ie_offset, ie_offset, templ_len;
3986 const u8 *ptr;
3987
3988
3989 if (wlvif->ssid_len > 0)
3990 return wl1271_cmd_template_set(wl, wlvif->role_id,
3991 CMD_TEMPL_AP_PROBE_RESPONSE,
3992 probe_rsp_data,
3993 probe_rsp_len, 0,
3994 rates);
3995
3996 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3997 wl1271_error("probe_rsp template too big");
3998 return -EINVAL;
3999 }
4000
4001
4002 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
4003
4004 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
4005 probe_rsp_len - ie_offset);
4006 if (!ptr) {
4007 wl1271_error("No SSID in beacon!");
4008 return -EINVAL;
4009 }
4010
4011 ssid_ie_offset = ptr - probe_rsp_data;
4012 ptr += (ptr[1] + 2);
4013
4014 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
4015
4016
4017 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
4018 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
4019 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
4020 bss_conf->ssid, bss_conf->ssid_len);
4021 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
4022
4023 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
4024 ptr, probe_rsp_len - (ptr - probe_rsp_data));
4025 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
4026
4027 return wl1271_cmd_template_set(wl, wlvif->role_id,
4028 CMD_TEMPL_AP_PROBE_RESPONSE,
4029 probe_rsp_templ,
4030 templ_len, 0,
4031 rates);
4032}
4033
4034static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
4035 struct ieee80211_vif *vif,
4036 struct ieee80211_bss_conf *bss_conf,
4037 u32 changed)
4038{
4039 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4040 int ret = 0;
4041
4042 if (changed & BSS_CHANGED_ERP_SLOT) {
4043 if (bss_conf->use_short_slot)
4044 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
4045 else
4046