1
2
3
4
5
6
7
8
9
10#include <linux/module.h>
11
12#include <linux/interrupt.h>
13#include <linux/msi.h>
14#include <linux/kthread.h>
15#include <linux/workqueue.h>
16#include <linux/iommu.h>
17#include <net/pkt_cls.h>
18
19#include <linux/fsl/mc.h>
20
21#include "dpaa2-switch.h"
22
23
24#define DPSW_MIN_VER_MAJOR 8
25#define DPSW_MIN_VER_MINOR 9
26
27#define DEFAULT_VLAN_ID 1
28
29static u16 dpaa2_switch_port_get_fdb_id(struct ethsw_port_priv *port_priv)
30{
31 return port_priv->fdb->fdb_id;
32}
33
34static struct dpaa2_switch_fdb *dpaa2_switch_fdb_get_unused(struct ethsw_core *ethsw)
35{
36 int i;
37
38 for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
39 if (!ethsw->fdbs[i].in_use)
40 return ðsw->fdbs[i];
41 return NULL;
42}
43
44static struct dpaa2_switch_acl_tbl *
45dpaa2_switch_acl_tbl_get_unused(struct ethsw_core *ethsw)
46{
47 int i;
48
49 for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
50 if (!ethsw->acls[i].in_use)
51 return ðsw->acls[i];
52 return NULL;
53}
54
55static u16 dpaa2_switch_port_set_fdb(struct ethsw_port_priv *port_priv,
56 struct net_device *bridge_dev)
57{
58 struct ethsw_port_priv *other_port_priv = NULL;
59 struct dpaa2_switch_fdb *fdb;
60 struct net_device *other_dev;
61 struct list_head *iter;
62
63
64
65
66 if (!bridge_dev) {
67 fdb = dpaa2_switch_fdb_get_unused(port_priv->ethsw_data);
68
69
70
71
72
73
74 if (!fdb) {
75 port_priv->fdb->bridge_dev = NULL;
76 return 0;
77 }
78
79 port_priv->fdb = fdb;
80 port_priv->fdb->in_use = true;
81 port_priv->fdb->bridge_dev = NULL;
82 return 0;
83 }
84
85
86
87
88
89 ASSERT_RTNL();
90
91
92
93
94 netdev_for_each_lower_dev(bridge_dev, other_dev, iter) {
95 if (!dpaa2_switch_port_dev_check(other_dev))
96 continue;
97
98 if (other_dev == port_priv->netdev)
99 continue;
100
101 other_port_priv = netdev_priv(other_dev);
102 break;
103 }
104
105
106
107
108 if (other_port_priv) {
109
110
111
112 port_priv->fdb->in_use = false;
113 port_priv->fdb->bridge_dev = NULL;
114
115
116 port_priv->fdb = other_port_priv->fdb;
117 }
118
119
120 port_priv->fdb->bridge_dev = bridge_dev;
121
122 return 0;
123}
124
125static void dpaa2_switch_fdb_get_flood_cfg(struct ethsw_core *ethsw, u16 fdb_id,
126 enum dpsw_flood_type type,
127 struct dpsw_egress_flood_cfg *cfg)
128{
129 int i = 0, j;
130
131 memset(cfg, 0, sizeof(*cfg));
132
133
134
135
136 for (j = 0; j < ethsw->sw_attr.num_ifs; j++) {
137 if (!ethsw->ports[j])
138 continue;
139 if (ethsw->ports[j]->fdb->fdb_id != fdb_id)
140 continue;
141
142 if (type == DPSW_BROADCAST && ethsw->ports[j]->bcast_flood)
143 cfg->if_id[i++] = ethsw->ports[j]->idx;
144 else if (type == DPSW_FLOODING && ethsw->ports[j]->ucast_flood)
145 cfg->if_id[i++] = ethsw->ports[j]->idx;
146 }
147
148
149 cfg->if_id[i++] = ethsw->sw_attr.num_ifs;
150
151 cfg->fdb_id = fdb_id;
152 cfg->flood_type = type;
153 cfg->num_ifs = i;
154}
155
156static int dpaa2_switch_fdb_set_egress_flood(struct ethsw_core *ethsw, u16 fdb_id)
157{
158 struct dpsw_egress_flood_cfg flood_cfg;
159 int err;
160
161
162 dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_BROADCAST, &flood_cfg);
163 err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle,
164 &flood_cfg);
165 if (err) {
166 dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err);
167 return err;
168 }
169
170
171 dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_FLOODING, &flood_cfg);
172 err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle,
173 &flood_cfg);
174 if (err) {
175 dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err);
176 return err;
177 }
178
179 return 0;
180}
181
182static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
183 dma_addr_t iova_addr)
184{
185 phys_addr_t phys_addr;
186
187 phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
188
189 return phys_to_virt(phys_addr);
190}
191
192static int dpaa2_switch_add_vlan(struct ethsw_port_priv *port_priv, u16 vid)
193{
194 struct ethsw_core *ethsw = port_priv->ethsw_data;
195 struct dpsw_vlan_cfg vcfg = {0};
196 int err;
197
198 vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
199 err = dpsw_vlan_add(ethsw->mc_io, 0,
200 ethsw->dpsw_handle, vid, &vcfg);
201 if (err) {
202 dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err);
203 return err;
204 }
205 ethsw->vlans[vid] = ETHSW_VLAN_MEMBER;
206
207 return 0;
208}
209
210static bool dpaa2_switch_port_is_up(struct ethsw_port_priv *port_priv)
211{
212 struct net_device *netdev = port_priv->netdev;
213 struct dpsw_link_state state;
214 int err;
215
216 err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
217 port_priv->ethsw_data->dpsw_handle,
218 port_priv->idx, &state);
219 if (err) {
220 netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
221 return true;
222 }
223
224 WARN_ONCE(state.up > 1, "Garbage read into link_state");
225
226 return state.up ? true : false;
227}
228
229static int dpaa2_switch_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid)
230{
231 struct ethsw_core *ethsw = port_priv->ethsw_data;
232 struct net_device *netdev = port_priv->netdev;
233 struct dpsw_tci_cfg tci_cfg = { 0 };
234 bool up;
235 int err, ret;
236
237 err = dpsw_if_get_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
238 port_priv->idx, &tci_cfg);
239 if (err) {
240 netdev_err(netdev, "dpsw_if_get_tci err %d\n", err);
241 return err;
242 }
243
244 tci_cfg.vlan_id = pvid;
245
246
247 up = dpaa2_switch_port_is_up(port_priv);
248 if (up) {
249 err = dpsw_if_disable(ethsw->mc_io, 0,
250 ethsw->dpsw_handle,
251 port_priv->idx);
252 if (err) {
253 netdev_err(netdev, "dpsw_if_disable err %d\n", err);
254 return err;
255 }
256 }
257
258 err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
259 port_priv->idx, &tci_cfg);
260 if (err) {
261 netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
262 goto set_tci_error;
263 }
264
265
266 port_priv->vlans[port_priv->pvid] &= ~ETHSW_VLAN_PVID;
267 port_priv->vlans[pvid] |= ETHSW_VLAN_PVID;
268 port_priv->pvid = pvid;
269
270set_tci_error:
271 if (up) {
272 ret = dpsw_if_enable(ethsw->mc_io, 0,
273 ethsw->dpsw_handle,
274 port_priv->idx);
275 if (ret) {
276 netdev_err(netdev, "dpsw_if_enable err %d\n", ret);
277 return ret;
278 }
279 }
280
281 return err;
282}
283
284static int dpaa2_switch_port_add_vlan(struct ethsw_port_priv *port_priv,
285 u16 vid, u16 flags)
286{
287 struct ethsw_core *ethsw = port_priv->ethsw_data;
288 struct net_device *netdev = port_priv->netdev;
289 struct dpsw_vlan_if_cfg vcfg = {0};
290 int err;
291
292 if (port_priv->vlans[vid]) {
293 netdev_warn(netdev, "VLAN %d already configured\n", vid);
294 return -EEXIST;
295 }
296
297
298
299
300 vcfg.num_ifs = 1;
301 vcfg.if_id[0] = port_priv->idx;
302 vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
303 vcfg.options |= DPSW_VLAN_ADD_IF_OPT_FDB_ID;
304 err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid, &vcfg);
305 if (err) {
306 netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err);
307 return err;
308 }
309
310 port_priv->vlans[vid] = ETHSW_VLAN_MEMBER;
311
312 if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
313 err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0,
314 ethsw->dpsw_handle,
315 vid, &vcfg);
316 if (err) {
317 netdev_err(netdev,
318 "dpsw_vlan_add_if_untagged err %d\n", err);
319 return err;
320 }
321 port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED;
322 }
323
324 if (flags & BRIDGE_VLAN_INFO_PVID) {
325 err = dpaa2_switch_port_set_pvid(port_priv, vid);
326 if (err)
327 return err;
328 }
329
330 return 0;
331}
332
333static enum dpsw_stp_state br_stp_state_to_dpsw(u8 state)
334{
335 switch (state) {
336 case BR_STATE_DISABLED:
337 return DPSW_STP_STATE_DISABLED;
338 case BR_STATE_LISTENING:
339 return DPSW_STP_STATE_LISTENING;
340 case BR_STATE_LEARNING:
341 return DPSW_STP_STATE_LEARNING;
342 case BR_STATE_FORWARDING:
343 return DPSW_STP_STATE_FORWARDING;
344 case BR_STATE_BLOCKING:
345 return DPSW_STP_STATE_BLOCKING;
346 default:
347 return DPSW_STP_STATE_DISABLED;
348 }
349}
350
351static int dpaa2_switch_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state)
352{
353 struct dpsw_stp_cfg stp_cfg = {0};
354 int err;
355 u16 vid;
356
357 if (!netif_running(port_priv->netdev) || state == port_priv->stp_state)
358 return 0;
359
360 stp_cfg.state = br_stp_state_to_dpsw(state);
361 for (vid = 0; vid <= VLAN_VID_MASK; vid++) {
362 if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
363 stp_cfg.vlan_id = vid;
364 err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0,
365 port_priv->ethsw_data->dpsw_handle,
366 port_priv->idx, &stp_cfg);
367 if (err) {
368 netdev_err(port_priv->netdev,
369 "dpsw_if_set_stp err %d\n", err);
370 return err;
371 }
372 }
373 }
374
375 port_priv->stp_state = state;
376
377 return 0;
378}
379
380static int dpaa2_switch_dellink(struct ethsw_core *ethsw, u16 vid)
381{
382 struct ethsw_port_priv *ppriv_local = NULL;
383 int i, err;
384
385 if (!ethsw->vlans[vid])
386 return -ENOENT;
387
388 err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid);
389 if (err) {
390 dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err);
391 return err;
392 }
393 ethsw->vlans[vid] = 0;
394
395 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
396 ppriv_local = ethsw->ports[i];
397 ppriv_local->vlans[vid] = 0;
398 }
399
400 return 0;
401}
402
403static int dpaa2_switch_port_fdb_add_uc(struct ethsw_port_priv *port_priv,
404 const unsigned char *addr)
405{
406 struct dpsw_fdb_unicast_cfg entry = {0};
407 u16 fdb_id;
408 int err;
409
410 entry.if_egress = port_priv->idx;
411 entry.type = DPSW_FDB_ENTRY_STATIC;
412 ether_addr_copy(entry.mac_addr, addr);
413
414 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
415 err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0,
416 port_priv->ethsw_data->dpsw_handle,
417 fdb_id, &entry);
418 if (err)
419 netdev_err(port_priv->netdev,
420 "dpsw_fdb_add_unicast err %d\n", err);
421 return err;
422}
423
424static int dpaa2_switch_port_fdb_del_uc(struct ethsw_port_priv *port_priv,
425 const unsigned char *addr)
426{
427 struct dpsw_fdb_unicast_cfg entry = {0};
428 u16 fdb_id;
429 int err;
430
431 entry.if_egress = port_priv->idx;
432 entry.type = DPSW_FDB_ENTRY_STATIC;
433 ether_addr_copy(entry.mac_addr, addr);
434
435 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
436 err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0,
437 port_priv->ethsw_data->dpsw_handle,
438 fdb_id, &entry);
439
440 if (err && err != -ENXIO)
441 netdev_err(port_priv->netdev,
442 "dpsw_fdb_remove_unicast err %d\n", err);
443 return err;
444}
445
446static int dpaa2_switch_port_fdb_add_mc(struct ethsw_port_priv *port_priv,
447 const unsigned char *addr)
448{
449 struct dpsw_fdb_multicast_cfg entry = {0};
450 u16 fdb_id;
451 int err;
452
453 ether_addr_copy(entry.mac_addr, addr);
454 entry.type = DPSW_FDB_ENTRY_STATIC;
455 entry.num_ifs = 1;
456 entry.if_id[0] = port_priv->idx;
457
458 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
459 err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0,
460 port_priv->ethsw_data->dpsw_handle,
461 fdb_id, &entry);
462
463 if (err && err != -ENXIO)
464 netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err %d\n",
465 err);
466 return err;
467}
468
469static int dpaa2_switch_port_fdb_del_mc(struct ethsw_port_priv *port_priv,
470 const unsigned char *addr)
471{
472 struct dpsw_fdb_multicast_cfg entry = {0};
473 u16 fdb_id;
474 int err;
475
476 ether_addr_copy(entry.mac_addr, addr);
477 entry.type = DPSW_FDB_ENTRY_STATIC;
478 entry.num_ifs = 1;
479 entry.if_id[0] = port_priv->idx;
480
481 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
482 err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0,
483 port_priv->ethsw_data->dpsw_handle,
484 fdb_id, &entry);
485
486 if (err && err != -ENAVAIL)
487 netdev_err(port_priv->netdev,
488 "dpsw_fdb_remove_multicast err %d\n", err);
489 return err;
490}
491
492static void dpaa2_switch_port_get_stats(struct net_device *netdev,
493 struct rtnl_link_stats64 *stats)
494{
495 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
496 u64 tmp;
497 int err;
498
499 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
500 port_priv->ethsw_data->dpsw_handle,
501 port_priv->idx,
502 DPSW_CNT_ING_FRAME, &stats->rx_packets);
503 if (err)
504 goto error;
505
506 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
507 port_priv->ethsw_data->dpsw_handle,
508 port_priv->idx,
509 DPSW_CNT_EGR_FRAME, &stats->tx_packets);
510 if (err)
511 goto error;
512
513 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
514 port_priv->ethsw_data->dpsw_handle,
515 port_priv->idx,
516 DPSW_CNT_ING_BYTE, &stats->rx_bytes);
517 if (err)
518 goto error;
519
520 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
521 port_priv->ethsw_data->dpsw_handle,
522 port_priv->idx,
523 DPSW_CNT_EGR_BYTE, &stats->tx_bytes);
524 if (err)
525 goto error;
526
527 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
528 port_priv->ethsw_data->dpsw_handle,
529 port_priv->idx,
530 DPSW_CNT_ING_FRAME_DISCARD,
531 &stats->rx_dropped);
532 if (err)
533 goto error;
534
535 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
536 port_priv->ethsw_data->dpsw_handle,
537 port_priv->idx,
538 DPSW_CNT_ING_FLTR_FRAME,
539 &tmp);
540 if (err)
541 goto error;
542 stats->rx_dropped += tmp;
543
544 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
545 port_priv->ethsw_data->dpsw_handle,
546 port_priv->idx,
547 DPSW_CNT_EGR_FRAME_DISCARD,
548 &stats->tx_dropped);
549 if (err)
550 goto error;
551
552 return;
553
554error:
555 netdev_err(netdev, "dpsw_if_get_counter err %d\n", err);
556}
557
558static bool dpaa2_switch_port_has_offload_stats(const struct net_device *netdev,
559 int attr_id)
560{
561 return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT);
562}
563
564static int dpaa2_switch_port_get_offload_stats(int attr_id,
565 const struct net_device *netdev,
566 void *sp)
567{
568 switch (attr_id) {
569 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
570 dpaa2_switch_port_get_stats((struct net_device *)netdev, sp);
571 return 0;
572 }
573
574 return -EINVAL;
575}
576
577static int dpaa2_switch_port_change_mtu(struct net_device *netdev, int mtu)
578{
579 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
580 int err;
581
582 err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io,
583 0,
584 port_priv->ethsw_data->dpsw_handle,
585 port_priv->idx,
586 (u16)ETHSW_L2_MAX_FRM(mtu));
587 if (err) {
588 netdev_err(netdev,
589 "dpsw_if_set_max_frame_length() err %d\n", err);
590 return err;
591 }
592
593 netdev->mtu = mtu;
594 return 0;
595}
596
597static int dpaa2_switch_port_carrier_state_sync(struct net_device *netdev)
598{
599 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
600 struct dpsw_link_state state;
601 int err;
602
603
604
605
606 if (!netif_running(netdev))
607 return 0;
608
609 err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
610 port_priv->ethsw_data->dpsw_handle,
611 port_priv->idx, &state);
612 if (err) {
613 netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
614 return err;
615 }
616
617 WARN_ONCE(state.up > 1, "Garbage read into link_state");
618
619 if (state.up != port_priv->link_state) {
620 if (state.up) {
621 netif_carrier_on(netdev);
622 netif_tx_start_all_queues(netdev);
623 } else {
624 netif_carrier_off(netdev);
625 netif_tx_stop_all_queues(netdev);
626 }
627 port_priv->link_state = state.up;
628 }
629
630 return 0;
631}
632
633
634
635
636
637
638
639
640static void dpaa2_switch_enable_ctrl_if_napi(struct ethsw_core *ethsw)
641{
642 int i;
643
644
645 ASSERT_RTNL();
646
647
648 ethsw->napi_users++;
649
650
651 if (ethsw->napi_users > 1)
652 return;
653
654 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
655 napi_enable(ðsw->fq[i].napi);
656}
657
658static void dpaa2_switch_disable_ctrl_if_napi(struct ethsw_core *ethsw)
659{
660 int i;
661
662
663 ASSERT_RTNL();
664
665
666 ethsw->napi_users--;
667 if (ethsw->napi_users)
668 return;
669
670 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
671 napi_disable(ðsw->fq[i].napi);
672}
673
674static int dpaa2_switch_port_open(struct net_device *netdev)
675{
676 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
677 struct ethsw_core *ethsw = port_priv->ethsw_data;
678 int err;
679
680
681
682
683
684
685 netif_carrier_off(netdev);
686
687 err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0,
688 port_priv->ethsw_data->dpsw_handle,
689 port_priv->idx);
690 if (err) {
691 netdev_err(netdev, "dpsw_if_enable err %d\n", err);
692 return err;
693 }
694
695
696 err = dpaa2_switch_port_carrier_state_sync(netdev);
697 if (err) {
698 netdev_err(netdev,
699 "dpaa2_switch_port_carrier_state_sync err %d\n", err);
700 goto err_carrier_sync;
701 }
702
703 dpaa2_switch_enable_ctrl_if_napi(ethsw);
704
705 return 0;
706
707err_carrier_sync:
708 dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
709 port_priv->ethsw_data->dpsw_handle,
710 port_priv->idx);
711 return err;
712}
713
714static int dpaa2_switch_port_stop(struct net_device *netdev)
715{
716 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
717 struct ethsw_core *ethsw = port_priv->ethsw_data;
718 int err;
719
720 err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
721 port_priv->ethsw_data->dpsw_handle,
722 port_priv->idx);
723 if (err) {
724 netdev_err(netdev, "dpsw_if_disable err %d\n", err);
725 return err;
726 }
727
728 dpaa2_switch_disable_ctrl_if_napi(ethsw);
729
730 return 0;
731}
732
733static int dpaa2_switch_port_parent_id(struct net_device *dev,
734 struct netdev_phys_item_id *ppid)
735{
736 struct ethsw_port_priv *port_priv = netdev_priv(dev);
737
738 ppid->id_len = 1;
739 ppid->id[0] = port_priv->ethsw_data->dev_id;
740
741 return 0;
742}
743
744static int dpaa2_switch_port_get_phys_name(struct net_device *netdev, char *name,
745 size_t len)
746{
747 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
748 int err;
749
750 err = snprintf(name, len, "p%d", port_priv->idx);
751 if (err >= len)
752 return -EINVAL;
753
754 return 0;
755}
756
757struct ethsw_dump_ctx {
758 struct net_device *dev;
759 struct sk_buff *skb;
760 struct netlink_callback *cb;
761 int idx;
762};
763
764static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry,
765 struct ethsw_dump_ctx *dump)
766{
767 int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC;
768 u32 portid = NETLINK_CB(dump->cb->skb).portid;
769 u32 seq = dump->cb->nlh->nlmsg_seq;
770 struct nlmsghdr *nlh;
771 struct ndmsg *ndm;
772
773 if (dump->idx < dump->cb->args[2])
774 goto skip;
775
776 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
777 sizeof(*ndm), NLM_F_MULTI);
778 if (!nlh)
779 return -EMSGSIZE;
780
781 ndm = nlmsg_data(nlh);
782 ndm->ndm_family = AF_BRIDGE;
783 ndm->ndm_pad1 = 0;
784 ndm->ndm_pad2 = 0;
785 ndm->ndm_flags = NTF_SELF;
786 ndm->ndm_type = 0;
787 ndm->ndm_ifindex = dump->dev->ifindex;
788 ndm->ndm_state = is_dynamic ? NUD_REACHABLE : NUD_NOARP;
789
790 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac_addr))
791 goto nla_put_failure;
792
793 nlmsg_end(dump->skb, nlh);
794
795skip:
796 dump->idx++;
797 return 0;
798
799nla_put_failure:
800 nlmsg_cancel(dump->skb, nlh);
801 return -EMSGSIZE;
802}
803
804static int dpaa2_switch_port_fdb_valid_entry(struct fdb_dump_entry *entry,
805 struct ethsw_port_priv *port_priv)
806{
807 int idx = port_priv->idx;
808 int valid;
809
810 if (entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST)
811 valid = entry->if_info == port_priv->idx;
812 else
813 valid = entry->if_mask[idx / 8] & BIT(idx % 8);
814
815 return valid;
816}
817
818static int dpaa2_switch_fdb_iterate(struct ethsw_port_priv *port_priv,
819 dpaa2_switch_fdb_cb_t cb, void *data)
820{
821 struct net_device *net_dev = port_priv->netdev;
822 struct ethsw_core *ethsw = port_priv->ethsw_data;
823 struct device *dev = net_dev->dev.parent;
824 struct fdb_dump_entry *fdb_entries;
825 struct fdb_dump_entry fdb_entry;
826 dma_addr_t fdb_dump_iova;
827 u16 num_fdb_entries;
828 u32 fdb_dump_size;
829 int err = 0, i;
830 u8 *dma_mem;
831 u16 fdb_id;
832
833 fdb_dump_size = ethsw->sw_attr.max_fdb_entries * sizeof(fdb_entry);
834 dma_mem = kzalloc(fdb_dump_size, GFP_KERNEL);
835 if (!dma_mem)
836 return -ENOMEM;
837
838 fdb_dump_iova = dma_map_single(dev, dma_mem, fdb_dump_size,
839 DMA_FROM_DEVICE);
840 if (dma_mapping_error(dev, fdb_dump_iova)) {
841 netdev_err(net_dev, "dma_map_single() failed\n");
842 err = -ENOMEM;
843 goto err_map;
844 }
845
846 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
847 err = dpsw_fdb_dump(ethsw->mc_io, 0, ethsw->dpsw_handle, fdb_id,
848 fdb_dump_iova, fdb_dump_size, &num_fdb_entries);
849 if (err) {
850 netdev_err(net_dev, "dpsw_fdb_dump() = %d\n", err);
851 goto err_dump;
852 }
853
854 dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_FROM_DEVICE);
855
856 fdb_entries = (struct fdb_dump_entry *)dma_mem;
857 for (i = 0; i < num_fdb_entries; i++) {
858 fdb_entry = fdb_entries[i];
859
860 err = cb(port_priv, &fdb_entry, data);
861 if (err)
862 goto end;
863 }
864
865end:
866 kfree(dma_mem);
867
868 return 0;
869
870err_dump:
871 dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_TO_DEVICE);
872err_map:
873 kfree(dma_mem);
874 return err;
875}
876
877static int dpaa2_switch_fdb_entry_dump(struct ethsw_port_priv *port_priv,
878 struct fdb_dump_entry *fdb_entry,
879 void *data)
880{
881 if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv))
882 return 0;
883
884 return dpaa2_switch_fdb_dump_nl(fdb_entry, data);
885}
886
887static int dpaa2_switch_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
888 struct net_device *net_dev,
889 struct net_device *filter_dev, int *idx)
890{
891 struct ethsw_port_priv *port_priv = netdev_priv(net_dev);
892 struct ethsw_dump_ctx dump = {
893 .dev = net_dev,
894 .skb = skb,
895 .cb = cb,
896 .idx = *idx,
897 };
898 int err;
899
900 err = dpaa2_switch_fdb_iterate(port_priv, dpaa2_switch_fdb_entry_dump, &dump);
901 *idx = dump.idx;
902
903 return err;
904}
905
906static int dpaa2_switch_fdb_entry_fast_age(struct ethsw_port_priv *port_priv,
907 struct fdb_dump_entry *fdb_entry,
908 void *data __always_unused)
909{
910 if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv))
911 return 0;
912
913 if (!(fdb_entry->type & DPSW_FDB_ENTRY_TYPE_DYNAMIC))
914 return 0;
915
916 if (fdb_entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST)
917 dpaa2_switch_port_fdb_del_uc(port_priv, fdb_entry->mac_addr);
918 else
919 dpaa2_switch_port_fdb_del_mc(port_priv, fdb_entry->mac_addr);
920
921 return 0;
922}
923
924static void dpaa2_switch_port_fast_age(struct ethsw_port_priv *port_priv)
925{
926 dpaa2_switch_fdb_iterate(port_priv,
927 dpaa2_switch_fdb_entry_fast_age, NULL);
928}
929
930static int dpaa2_switch_port_vlan_add(struct net_device *netdev, __be16 proto,
931 u16 vid)
932{
933 struct switchdev_obj_port_vlan vlan = {
934 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
935 .vid = vid,
936 .obj.orig_dev = netdev,
937
938 .flags = 0,
939 };
940
941 return dpaa2_switch_port_vlans_add(netdev, &vlan);
942}
943
944static int dpaa2_switch_port_vlan_kill(struct net_device *netdev, __be16 proto,
945 u16 vid)
946{
947 struct switchdev_obj_port_vlan vlan = {
948 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
949 .vid = vid,
950 .obj.orig_dev = netdev,
951
952 .flags = 0,
953 };
954
955 return dpaa2_switch_port_vlans_del(netdev, &vlan);
956}
957
958static int dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv *port_priv)
959{
960 struct ethsw_core *ethsw = port_priv->ethsw_data;
961 struct net_device *net_dev = port_priv->netdev;
962 struct device *dev = net_dev->dev.parent;
963 u8 mac_addr[ETH_ALEN];
964 int err;
965
966 if (!(ethsw->features & ETHSW_FEATURE_MAC_ADDR))
967 return 0;
968
969
970 err = dpsw_if_get_port_mac_addr(ethsw->mc_io, 0, ethsw->dpsw_handle,
971 port_priv->idx, mac_addr);
972 if (err) {
973 dev_err(dev, "dpsw_if_get_port_mac_addr() failed\n");
974 return err;
975 }
976
977
978 if (!is_zero_ether_addr(mac_addr)) {
979 memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
980 } else {
981
982
983
984 eth_hw_addr_random(net_dev);
985 dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
986
987
988
989
990
991
992 net_dev->addr_assign_type = NET_ADDR_PERM;
993 }
994
995 return 0;
996}
997
998static void dpaa2_switch_free_fd(const struct ethsw_core *ethsw,
999 const struct dpaa2_fd *fd)
1000{
1001 struct device *dev = ethsw->dev;
1002 unsigned char *buffer_start;
1003 struct sk_buff **skbh, *skb;
1004 dma_addr_t fd_addr;
1005
1006 fd_addr = dpaa2_fd_get_addr(fd);
1007 skbh = dpaa2_iova_to_virt(ethsw->iommu_domain, fd_addr);
1008
1009 skb = *skbh;
1010 buffer_start = (unsigned char *)skbh;
1011
1012 dma_unmap_single(dev, fd_addr,
1013 skb_tail_pointer(skb) - buffer_start,
1014 DMA_TO_DEVICE);
1015
1016
1017 dev_kfree_skb(skb);
1018}
1019
1020static int dpaa2_switch_build_single_fd(struct ethsw_core *ethsw,
1021 struct sk_buff *skb,
1022 struct dpaa2_fd *fd)
1023{
1024 struct device *dev = ethsw->dev;
1025 struct sk_buff **skbh;
1026 dma_addr_t addr;
1027 u8 *buff_start;
1028 void *hwa;
1029
1030 buff_start = PTR_ALIGN(skb->data - DPAA2_SWITCH_TX_DATA_OFFSET -
1031 DPAA2_SWITCH_TX_BUF_ALIGN,
1032 DPAA2_SWITCH_TX_BUF_ALIGN);
1033
1034
1035
1036
1037
1038 hwa = buff_start + DPAA2_SWITCH_SWA_SIZE;
1039 memset(hwa, 0, 8);
1040
1041
1042
1043
1044
1045 skbh = (struct sk_buff **)buff_start;
1046 *skbh = skb;
1047
1048 addr = dma_map_single(dev, buff_start,
1049 skb_tail_pointer(skb) - buff_start,
1050 DMA_TO_DEVICE);
1051 if (unlikely(dma_mapping_error(dev, addr)))
1052 return -ENOMEM;
1053
1054
1055 memset(fd, 0, sizeof(*fd));
1056
1057 dpaa2_fd_set_addr(fd, addr);
1058 dpaa2_fd_set_offset(fd, (u16)(skb->data - buff_start));
1059 dpaa2_fd_set_len(fd, skb->len);
1060 dpaa2_fd_set_format(fd, dpaa2_fd_single);
1061
1062 return 0;
1063}
1064
1065static netdev_tx_t dpaa2_switch_port_tx(struct sk_buff *skb,
1066 struct net_device *net_dev)
1067{
1068 struct ethsw_port_priv *port_priv = netdev_priv(net_dev);
1069 struct ethsw_core *ethsw = port_priv->ethsw_data;
1070 int retries = DPAA2_SWITCH_SWP_BUSY_RETRIES;
1071 struct dpaa2_fd fd;
1072 int err;
1073
1074 if (unlikely(skb_headroom(skb) < DPAA2_SWITCH_NEEDED_HEADROOM)) {
1075 struct sk_buff *ns;
1076
1077 ns = skb_realloc_headroom(skb, DPAA2_SWITCH_NEEDED_HEADROOM);
1078 if (unlikely(!ns)) {
1079 net_err_ratelimited("%s: Error reallocating skb headroom\n", net_dev->name);
1080 goto err_free_skb;
1081 }
1082 dev_consume_skb_any(skb);
1083 skb = ns;
1084 }
1085
1086
1087 skb = skb_unshare(skb, GFP_ATOMIC);
1088 if (unlikely(!skb)) {
1089
1090 net_err_ratelimited("%s: Error copying the socket buffer\n", net_dev->name);
1091 goto err_exit;
1092 }
1093
1094
1095
1096
1097 err = skb_linearize(skb);
1098 if (err) {
1099 net_err_ratelimited("%s: skb_linearize error (%d)!\n", net_dev->name, err);
1100 goto err_free_skb;
1101 }
1102
1103 err = dpaa2_switch_build_single_fd(ethsw, skb, &fd);
1104 if (unlikely(err)) {
1105 net_err_ratelimited("%s: ethsw_build_*_fd() %d\n", net_dev->name, err);
1106 goto err_free_skb;
1107 }
1108
1109 do {
1110 err = dpaa2_io_service_enqueue_qd(NULL,
1111 port_priv->tx_qdid,
1112 8, 0, &fd);
1113 retries--;
1114 } while (err == -EBUSY && retries);
1115
1116 if (unlikely(err < 0)) {
1117 dpaa2_switch_free_fd(ethsw, &fd);
1118 goto err_exit;
1119 }
1120
1121 return NETDEV_TX_OK;
1122
1123err_free_skb:
1124 dev_kfree_skb(skb);
1125err_exit:
1126 return NETDEV_TX_OK;
1127}
1128
1129static int
1130dpaa2_switch_setup_tc_cls_flower(struct dpaa2_switch_acl_tbl *acl_tbl,
1131 struct flow_cls_offload *f)
1132{
1133 switch (f->command) {
1134 case FLOW_CLS_REPLACE:
1135 return dpaa2_switch_cls_flower_replace(acl_tbl, f);
1136 case FLOW_CLS_DESTROY:
1137 return dpaa2_switch_cls_flower_destroy(acl_tbl, f);
1138 default:
1139 return -EOPNOTSUPP;
1140 }
1141}
1142
1143static int
1144dpaa2_switch_setup_tc_cls_matchall(struct dpaa2_switch_acl_tbl *acl_tbl,
1145 struct tc_cls_matchall_offload *f)
1146{
1147 switch (f->command) {
1148 case TC_CLSMATCHALL_REPLACE:
1149 return dpaa2_switch_cls_matchall_replace(acl_tbl, f);
1150 case TC_CLSMATCHALL_DESTROY:
1151 return dpaa2_switch_cls_matchall_destroy(acl_tbl, f);
1152 default:
1153 return -EOPNOTSUPP;
1154 }
1155}
1156
1157static int dpaa2_switch_port_setup_tc_block_cb_ig(enum tc_setup_type type,
1158 void *type_data,
1159 void *cb_priv)
1160{
1161 switch (type) {
1162 case TC_SETUP_CLSFLOWER:
1163 return dpaa2_switch_setup_tc_cls_flower(cb_priv, type_data);
1164 case TC_SETUP_CLSMATCHALL:
1165 return dpaa2_switch_setup_tc_cls_matchall(cb_priv, type_data);
1166 default:
1167 return -EOPNOTSUPP;
1168 }
1169}
1170
1171static LIST_HEAD(dpaa2_switch_block_cb_list);
1172
1173static int dpaa2_switch_port_acl_tbl_bind(struct ethsw_port_priv *port_priv,
1174 struct dpaa2_switch_acl_tbl *acl_tbl)
1175{
1176 struct ethsw_core *ethsw = port_priv->ethsw_data;
1177 struct net_device *netdev = port_priv->netdev;
1178 struct dpsw_acl_if_cfg acl_if_cfg;
1179 int err;
1180
1181 if (port_priv->acl_tbl)
1182 return -EINVAL;
1183
1184 acl_if_cfg.if_id[0] = port_priv->idx;
1185 acl_if_cfg.num_ifs = 1;
1186 err = dpsw_acl_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
1187 acl_tbl->id, &acl_if_cfg);
1188 if (err) {
1189 netdev_err(netdev, "dpsw_acl_add_if err %d\n", err);
1190 return err;
1191 }
1192
1193 acl_tbl->ports |= BIT(port_priv->idx);
1194 port_priv->acl_tbl = acl_tbl;
1195
1196 return 0;
1197}
1198
1199static int
1200dpaa2_switch_port_acl_tbl_unbind(struct ethsw_port_priv *port_priv,
1201 struct dpaa2_switch_acl_tbl *acl_tbl)
1202{
1203 struct ethsw_core *ethsw = port_priv->ethsw_data;
1204 struct net_device *netdev = port_priv->netdev;
1205 struct dpsw_acl_if_cfg acl_if_cfg;
1206 int err;
1207
1208 if (port_priv->acl_tbl != acl_tbl)
1209 return -EINVAL;
1210
1211 acl_if_cfg.if_id[0] = port_priv->idx;
1212 acl_if_cfg.num_ifs = 1;
1213 err = dpsw_acl_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
1214 acl_tbl->id, &acl_if_cfg);
1215 if (err) {
1216 netdev_err(netdev, "dpsw_acl_add_if err %d\n", err);
1217 return err;
1218 }
1219
1220 acl_tbl->ports &= ~BIT(port_priv->idx);
1221 port_priv->acl_tbl = NULL;
1222 return 0;
1223}
1224
1225static int dpaa2_switch_port_block_bind(struct ethsw_port_priv *port_priv,
1226 struct dpaa2_switch_acl_tbl *acl_tbl)
1227{
1228 struct dpaa2_switch_acl_tbl *old_acl_tbl = port_priv->acl_tbl;
1229 int err;
1230
1231
1232
1233
1234 if (port_priv->acl_tbl == acl_tbl)
1235 return 0;
1236
1237 err = dpaa2_switch_port_acl_tbl_unbind(port_priv, old_acl_tbl);
1238 if (err)
1239 return err;
1240
1241
1242
1243
1244 if (old_acl_tbl->ports == 0)
1245 old_acl_tbl->in_use = false;
1246
1247 return dpaa2_switch_port_acl_tbl_bind(port_priv, acl_tbl);
1248}
1249
1250static int dpaa2_switch_port_block_unbind(struct ethsw_port_priv *port_priv,
1251 struct dpaa2_switch_acl_tbl *acl_tbl)
1252{
1253 struct ethsw_core *ethsw = port_priv->ethsw_data;
1254 struct dpaa2_switch_acl_tbl *new_acl_tbl;
1255 int err;
1256
1257
1258
1259
1260 if (acl_tbl->ports == BIT(port_priv->idx))
1261 return 0;
1262
1263 err = dpaa2_switch_port_acl_tbl_unbind(port_priv, acl_tbl);
1264 if (err)
1265 return err;
1266
1267 if (acl_tbl->ports == 0)
1268 acl_tbl->in_use = false;
1269
1270 new_acl_tbl = dpaa2_switch_acl_tbl_get_unused(ethsw);
1271 new_acl_tbl->in_use = true;
1272 return dpaa2_switch_port_acl_tbl_bind(port_priv, new_acl_tbl);
1273}
1274
1275static int dpaa2_switch_setup_tc_block_bind(struct net_device *netdev,
1276 struct flow_block_offload *f)
1277{
1278 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1279 struct ethsw_core *ethsw = port_priv->ethsw_data;
1280 struct dpaa2_switch_acl_tbl *acl_tbl;
1281 struct flow_block_cb *block_cb;
1282 bool register_block = false;
1283 int err;
1284
1285 block_cb = flow_block_cb_lookup(f->block,
1286 dpaa2_switch_port_setup_tc_block_cb_ig,
1287 ethsw);
1288
1289 if (!block_cb) {
1290
1291
1292
1293
1294 acl_tbl = port_priv->acl_tbl;
1295
1296 block_cb = flow_block_cb_alloc(dpaa2_switch_port_setup_tc_block_cb_ig,
1297 ethsw, acl_tbl, NULL);
1298 if (IS_ERR(block_cb))
1299 return PTR_ERR(block_cb);
1300
1301 register_block = true;
1302 } else {
1303 acl_tbl = flow_block_cb_priv(block_cb);
1304 }
1305
1306 flow_block_cb_incref(block_cb);
1307 err = dpaa2_switch_port_block_bind(port_priv, acl_tbl);
1308 if (err)
1309 goto err_block_bind;
1310
1311 if (register_block) {
1312 flow_block_cb_add(block_cb, f);
1313 list_add_tail(&block_cb->driver_list,
1314 &dpaa2_switch_block_cb_list);
1315 }
1316
1317 return 0;
1318
1319err_block_bind:
1320 if (!flow_block_cb_decref(block_cb))
1321 flow_block_cb_free(block_cb);
1322 return err;
1323}
1324
1325static void dpaa2_switch_setup_tc_block_unbind(struct net_device *netdev,
1326 struct flow_block_offload *f)
1327{
1328 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1329 struct ethsw_core *ethsw = port_priv->ethsw_data;
1330 struct dpaa2_switch_acl_tbl *acl_tbl;
1331 struct flow_block_cb *block_cb;
1332 int err;
1333
1334 block_cb = flow_block_cb_lookup(f->block,
1335 dpaa2_switch_port_setup_tc_block_cb_ig,
1336 ethsw);
1337 if (!block_cb)
1338 return;
1339
1340 acl_tbl = flow_block_cb_priv(block_cb);
1341 err = dpaa2_switch_port_block_unbind(port_priv, acl_tbl);
1342 if (!err && !flow_block_cb_decref(block_cb)) {
1343 flow_block_cb_remove(block_cb, f);
1344 list_del(&block_cb->driver_list);
1345 }
1346}
1347
1348static int dpaa2_switch_setup_tc_block(struct net_device *netdev,
1349 struct flow_block_offload *f)
1350{
1351 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1352 return -EOPNOTSUPP;
1353
1354 f->driver_block_list = &dpaa2_switch_block_cb_list;
1355
1356 switch (f->command) {
1357 case FLOW_BLOCK_BIND:
1358 return dpaa2_switch_setup_tc_block_bind(netdev, f);
1359 case FLOW_BLOCK_UNBIND:
1360 dpaa2_switch_setup_tc_block_unbind(netdev, f);
1361 return 0;
1362 default:
1363 return -EOPNOTSUPP;
1364 }
1365}
1366
1367static int dpaa2_switch_port_setup_tc(struct net_device *netdev,
1368 enum tc_setup_type type,
1369 void *type_data)
1370{
1371 switch (type) {
1372 case TC_SETUP_BLOCK: {
1373 return dpaa2_switch_setup_tc_block(netdev, type_data);
1374 }
1375 default:
1376 return -EOPNOTSUPP;
1377 }
1378
1379 return 0;
1380}
1381
1382static const struct net_device_ops dpaa2_switch_port_ops = {
1383 .ndo_open = dpaa2_switch_port_open,
1384 .ndo_stop = dpaa2_switch_port_stop,
1385
1386 .ndo_set_mac_address = eth_mac_addr,
1387 .ndo_get_stats64 = dpaa2_switch_port_get_stats,
1388 .ndo_change_mtu = dpaa2_switch_port_change_mtu,
1389 .ndo_has_offload_stats = dpaa2_switch_port_has_offload_stats,
1390 .ndo_get_offload_stats = dpaa2_switch_port_get_offload_stats,
1391 .ndo_fdb_dump = dpaa2_switch_port_fdb_dump,
1392 .ndo_vlan_rx_add_vid = dpaa2_switch_port_vlan_add,
1393 .ndo_vlan_rx_kill_vid = dpaa2_switch_port_vlan_kill,
1394
1395 .ndo_start_xmit = dpaa2_switch_port_tx,
1396 .ndo_get_port_parent_id = dpaa2_switch_port_parent_id,
1397 .ndo_get_phys_port_name = dpaa2_switch_port_get_phys_name,
1398 .ndo_setup_tc = dpaa2_switch_port_setup_tc,
1399};
1400
1401bool dpaa2_switch_port_dev_check(const struct net_device *netdev)
1402{
1403 return netdev->netdev_ops == &dpaa2_switch_port_ops;
1404}
1405
1406static void dpaa2_switch_links_state_update(struct ethsw_core *ethsw)
1407{
1408 int i;
1409
1410 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
1411 dpaa2_switch_port_carrier_state_sync(ethsw->ports[i]->netdev);
1412 dpaa2_switch_port_set_mac_addr(ethsw->ports[i]);
1413 }
1414}
1415
1416static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
1417{
1418 struct device *dev = (struct device *)arg;
1419 struct ethsw_core *ethsw = dev_get_drvdata(dev);
1420
1421
1422 u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
1423 int err;
1424
1425 err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
1426 DPSW_IRQ_INDEX_IF, &status);
1427 if (err) {
1428 dev_err(dev, "Can't get irq status (err %d)\n", err);
1429
1430 err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
1431 DPSW_IRQ_INDEX_IF, 0xFFFFFFFF);
1432 if (err)
1433 dev_err(dev, "Can't clear irq status (err %d)\n", err);
1434 goto out;
1435 }
1436
1437 if (status & DPSW_IRQ_EVENT_LINK_CHANGED)
1438 dpaa2_switch_links_state_update(ethsw);
1439
1440out:
1441 return IRQ_HANDLED;
1442}
1443
1444static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev)
1445{
1446 struct device *dev = &sw_dev->dev;
1447 struct ethsw_core *ethsw = dev_get_drvdata(dev);
1448 u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED;
1449 struct fsl_mc_device_irq *irq;
1450 int err;
1451
1452 err = fsl_mc_allocate_irqs(sw_dev);
1453 if (err) {
1454 dev_err(dev, "MC irqs allocation failed\n");
1455 return err;
1456 }
1457
1458 if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) {
1459 err = -EINVAL;
1460 goto free_irq;
1461 }
1462
1463 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
1464 DPSW_IRQ_INDEX_IF, 0);
1465 if (err) {
1466 dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
1467 goto free_irq;
1468 }
1469
1470 irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
1471
1472 err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
1473 NULL,
1474 dpaa2_switch_irq0_handler_thread,
1475 IRQF_NO_SUSPEND | IRQF_ONESHOT,
1476 dev_name(dev), dev);
1477 if (err) {
1478 dev_err(dev, "devm_request_threaded_irq(): %d\n", err);
1479 goto free_irq;
1480 }
1481
1482 err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle,
1483 DPSW_IRQ_INDEX_IF, mask);
1484 if (err) {
1485 dev_err(dev, "dpsw_set_irq_mask(): %d\n", err);
1486 goto free_devm_irq;
1487 }
1488
1489 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
1490 DPSW_IRQ_INDEX_IF, 1);
1491 if (err) {
1492 dev_err(dev, "dpsw_set_irq_enable(): %d\n", err);
1493 goto free_devm_irq;
1494 }
1495
1496 return 0;
1497
1498free_devm_irq:
1499 devm_free_irq(dev, irq->msi_desc->irq, dev);
1500free_irq:
1501 fsl_mc_free_irqs(sw_dev);
1502 return err;
1503}
1504
1505static void dpaa2_switch_teardown_irqs(struct fsl_mc_device *sw_dev)
1506{
1507 struct device *dev = &sw_dev->dev;
1508 struct ethsw_core *ethsw = dev_get_drvdata(dev);
1509 int err;
1510
1511 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
1512 DPSW_IRQ_INDEX_IF, 0);
1513 if (err)
1514 dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
1515
1516 fsl_mc_free_irqs(sw_dev);
1517}
1518
1519static int dpaa2_switch_port_set_learning(struct ethsw_port_priv *port_priv, bool enable)
1520{
1521 struct ethsw_core *ethsw = port_priv->ethsw_data;
1522 enum dpsw_learning_mode learn_mode;
1523 int err;
1524
1525 if (enable)
1526 learn_mode = DPSW_LEARNING_MODE_HW;
1527 else
1528 learn_mode = DPSW_LEARNING_MODE_DIS;
1529
1530 err = dpsw_if_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle,
1531 port_priv->idx, learn_mode);
1532 if (err)
1533 netdev_err(port_priv->netdev, "dpsw_if_set_learning_mode err %d\n", err);
1534
1535 if (!enable)
1536 dpaa2_switch_port_fast_age(port_priv);
1537
1538 return err;
1539}
1540
1541static int dpaa2_switch_port_attr_stp_state_set(struct net_device *netdev,
1542 u8 state)
1543{
1544 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1545 int err;
1546
1547 err = dpaa2_switch_port_set_stp_state(port_priv, state);
1548 if (err)
1549 return err;
1550
1551 switch (state) {
1552 case BR_STATE_DISABLED:
1553 case BR_STATE_BLOCKING:
1554 case BR_STATE_LISTENING:
1555 err = dpaa2_switch_port_set_learning(port_priv, false);
1556 break;
1557 case BR_STATE_LEARNING:
1558 case BR_STATE_FORWARDING:
1559 err = dpaa2_switch_port_set_learning(port_priv,
1560 port_priv->learn_ena);
1561 break;
1562 }
1563
1564 return err;
1565}
1566
1567static int dpaa2_switch_port_flood(struct ethsw_port_priv *port_priv,
1568 struct switchdev_brport_flags flags)
1569{
1570 struct ethsw_core *ethsw = port_priv->ethsw_data;
1571
1572 if (flags.mask & BR_BCAST_FLOOD)
1573 port_priv->bcast_flood = !!(flags.val & BR_BCAST_FLOOD);
1574
1575 if (flags.mask & BR_FLOOD)
1576 port_priv->ucast_flood = !!(flags.val & BR_FLOOD);
1577
1578 return dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
1579}
1580
1581static int dpaa2_switch_port_pre_bridge_flags(struct net_device *netdev,
1582 struct switchdev_brport_flags flags,
1583 struct netlink_ext_ack *extack)
1584{
1585 if (flags.mask & ~(BR_LEARNING | BR_BCAST_FLOOD | BR_FLOOD |
1586 BR_MCAST_FLOOD))
1587 return -EINVAL;
1588
1589 if (flags.mask & (BR_FLOOD | BR_MCAST_FLOOD)) {
1590 bool multicast = !!(flags.val & BR_MCAST_FLOOD);
1591 bool unicast = !!(flags.val & BR_FLOOD);
1592
1593 if (unicast != multicast) {
1594 NL_SET_ERR_MSG_MOD(extack,
1595 "Cannot configure multicast flooding independently of unicast");
1596 return -EINVAL;
1597 }
1598 }
1599
1600 return 0;
1601}
1602
1603static int dpaa2_switch_port_bridge_flags(struct net_device *netdev,
1604 struct switchdev_brport_flags flags,
1605 struct netlink_ext_ack *extack)
1606{
1607 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1608 int err;
1609
1610 if (flags.mask & BR_LEARNING) {
1611 bool learn_ena = !!(flags.val & BR_LEARNING);
1612
1613 err = dpaa2_switch_port_set_learning(port_priv, learn_ena);
1614 if (err)
1615 return err;
1616 port_priv->learn_ena = learn_ena;
1617 }
1618
1619 if (flags.mask & (BR_BCAST_FLOOD | BR_FLOOD | BR_MCAST_FLOOD)) {
1620 err = dpaa2_switch_port_flood(port_priv, flags);
1621 if (err)
1622 return err;
1623 }
1624
1625 return 0;
1626}
1627
1628static int dpaa2_switch_port_attr_set(struct net_device *netdev, const void *ctx,
1629 const struct switchdev_attr *attr,
1630 struct netlink_ext_ack *extack)
1631{
1632 int err = 0;
1633
1634 switch (attr->id) {
1635 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
1636 err = dpaa2_switch_port_attr_stp_state_set(netdev,
1637 attr->u.stp_state);
1638 break;
1639 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
1640 if (!attr->u.vlan_filtering) {
1641 NL_SET_ERR_MSG_MOD(extack,
1642 "The DPAA2 switch does not support VLAN-unaware operation");
1643 return -EOPNOTSUPP;
1644 }
1645 break;
1646 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
1647 err = dpaa2_switch_port_pre_bridge_flags(netdev, attr->u.brport_flags, extack);
1648 break;
1649 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
1650 err = dpaa2_switch_port_bridge_flags(netdev, attr->u.brport_flags, extack);
1651 break;
1652 default:
1653 err = -EOPNOTSUPP;
1654 break;
1655 }
1656
1657 return err;
1658}
1659
1660int dpaa2_switch_port_vlans_add(struct net_device *netdev,
1661 const struct switchdev_obj_port_vlan *vlan)
1662{
1663 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1664 struct ethsw_core *ethsw = port_priv->ethsw_data;
1665 struct dpsw_attr *attr = ðsw->sw_attr;
1666 int err = 0;
1667
1668
1669
1670
1671 if (port_priv->vlans[vlan->vid] & ETHSW_VLAN_MEMBER)
1672 return -EEXIST;
1673
1674
1675 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
1676 ðsw->sw_attr);
1677 if (err) {
1678 netdev_err(netdev, "dpsw_get_attributes err %d\n", err);
1679 return err;
1680 }
1681 if (attr->max_vlans - attr->num_vlans < 1)
1682 return -ENOSPC;
1683
1684
1685 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
1686 ðsw->sw_attr);
1687 if (err) {
1688 netdev_err(netdev, "dpsw_get_attributes err %d\n", err);
1689 return err;
1690 }
1691 if (attr->max_vlans - attr->num_vlans < 1)
1692 return -ENOSPC;
1693
1694 if (!port_priv->ethsw_data->vlans[vlan->vid]) {
1695
1696 err = dpaa2_switch_add_vlan(port_priv, vlan->vid);
1697 if (err)
1698 return err;
1699
1700 port_priv->ethsw_data->vlans[vlan->vid] |= ETHSW_VLAN_GLOBAL;
1701 }
1702
1703 return dpaa2_switch_port_add_vlan(port_priv, vlan->vid, vlan->flags);
1704}
1705
1706static int dpaa2_switch_port_lookup_address(struct net_device *netdev, int is_uc,
1707 const unsigned char *addr)
1708{
1709 struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc;
1710 struct netdev_hw_addr *ha;
1711
1712 netif_addr_lock_bh(netdev);
1713 list_for_each_entry(ha, &list->list, list) {
1714 if (ether_addr_equal(ha->addr, addr)) {
1715 netif_addr_unlock_bh(netdev);
1716 return 1;
1717 }
1718 }
1719 netif_addr_unlock_bh(netdev);
1720 return 0;
1721}
1722
1723static int dpaa2_switch_port_mdb_add(struct net_device *netdev,
1724 const struct switchdev_obj_port_mdb *mdb)
1725{
1726 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1727 int err;
1728
1729
1730 if (dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr))
1731 return -EEXIST;
1732
1733 err = dpaa2_switch_port_fdb_add_mc(port_priv, mdb->addr);
1734 if (err)
1735 return err;
1736
1737 err = dev_mc_add(netdev, mdb->addr);
1738 if (err) {
1739 netdev_err(netdev, "dev_mc_add err %d\n", err);
1740 dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr);
1741 }
1742
1743 return err;
1744}
1745
1746static int dpaa2_switch_port_obj_add(struct net_device *netdev,
1747 const struct switchdev_obj *obj)
1748{
1749 int err;
1750
1751 switch (obj->id) {
1752 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1753 err = dpaa2_switch_port_vlans_add(netdev,
1754 SWITCHDEV_OBJ_PORT_VLAN(obj));
1755 break;
1756 case SWITCHDEV_OBJ_ID_PORT_MDB:
1757 err = dpaa2_switch_port_mdb_add(netdev,
1758 SWITCHDEV_OBJ_PORT_MDB(obj));
1759 break;
1760 default:
1761 err = -EOPNOTSUPP;
1762 break;
1763 }
1764
1765 return err;
1766}
1767
1768static int dpaa2_switch_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid)
1769{
1770 struct ethsw_core *ethsw = port_priv->ethsw_data;
1771 struct net_device *netdev = port_priv->netdev;
1772 struct dpsw_vlan_if_cfg vcfg;
1773 int i, err;
1774
1775 if (!port_priv->vlans[vid])
1776 return -ENOENT;
1777
1778 if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) {
1779
1780
1781
1782
1783 err = dpaa2_switch_port_set_pvid(port_priv, 4095);
1784 if (err)
1785 return err;
1786 }
1787
1788 vcfg.num_ifs = 1;
1789 vcfg.if_id[0] = port_priv->idx;
1790 if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) {
1791 err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0,
1792 ethsw->dpsw_handle,
1793 vid, &vcfg);
1794 if (err) {
1795 netdev_err(netdev,
1796 "dpsw_vlan_remove_if_untagged err %d\n",
1797 err);
1798 }
1799 port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED;
1800 }
1801
1802 if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
1803 err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
1804 vid, &vcfg);
1805 if (err) {
1806 netdev_err(netdev,
1807 "dpsw_vlan_remove_if err %d\n", err);
1808 return err;
1809 }
1810 port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER;
1811
1812
1813
1814
1815 for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
1816 if (ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER)
1817 return 0;
1818
1819 ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL;
1820
1821 err = dpaa2_switch_dellink(ethsw, vid);
1822 if (err)
1823 return err;
1824 }
1825
1826 return 0;
1827}
1828
1829int dpaa2_switch_port_vlans_del(struct net_device *netdev,
1830 const struct switchdev_obj_port_vlan *vlan)
1831{
1832 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1833
1834 if (netif_is_bridge_master(vlan->obj.orig_dev))
1835 return -EOPNOTSUPP;
1836
1837 return dpaa2_switch_port_del_vlan(port_priv, vlan->vid);
1838}
1839
1840static int dpaa2_switch_port_mdb_del(struct net_device *netdev,
1841 const struct switchdev_obj_port_mdb *mdb)
1842{
1843 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1844 int err;
1845
1846 if (!dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr))
1847 return -ENOENT;
1848
1849 err = dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr);
1850 if (err)
1851 return err;
1852
1853 err = dev_mc_del(netdev, mdb->addr);
1854 if (err) {
1855 netdev_err(netdev, "dev_mc_del err %d\n", err);
1856 return err;
1857 }
1858
1859 return err;
1860}
1861
1862static int dpaa2_switch_port_obj_del(struct net_device *netdev,
1863 const struct switchdev_obj *obj)
1864{
1865 int err;
1866
1867 switch (obj->id) {
1868 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1869 err = dpaa2_switch_port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj));
1870 break;
1871 case SWITCHDEV_OBJ_ID_PORT_MDB:
1872 err = dpaa2_switch_port_mdb_del(netdev, SWITCHDEV_OBJ_PORT_MDB(obj));
1873 break;
1874 default:
1875 err = -EOPNOTSUPP;
1876 break;
1877 }
1878 return err;
1879}
1880
1881static int dpaa2_switch_port_attr_set_event(struct net_device *netdev,
1882 struct switchdev_notifier_port_attr_info *ptr)
1883{
1884 int err;
1885
1886 err = switchdev_handle_port_attr_set(netdev, ptr,
1887 dpaa2_switch_port_dev_check,
1888 dpaa2_switch_port_attr_set);
1889 return notifier_from_errno(err);
1890}
1891
1892static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
1893 struct net_device *upper_dev)
1894{
1895 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1896 struct ethsw_core *ethsw = port_priv->ethsw_data;
1897 struct ethsw_port_priv *other_port_priv;
1898 struct net_device *other_dev;
1899 struct list_head *iter;
1900 bool learn_ena;
1901 int err;
1902
1903 netdev_for_each_lower_dev(upper_dev, other_dev, iter) {
1904 if (!dpaa2_switch_port_dev_check(other_dev))
1905 continue;
1906
1907 other_port_priv = netdev_priv(other_dev);
1908 if (other_port_priv->ethsw_data != port_priv->ethsw_data) {
1909 netdev_err(netdev,
1910 "Interface from a different DPSW is in the bridge already!\n");
1911 return -EINVAL;
1912 }
1913 }
1914
1915
1916 err = dpaa2_switch_port_del_vlan(port_priv, 1);
1917 if (err)
1918 return err;
1919
1920 dpaa2_switch_port_set_fdb(port_priv, upper_dev);
1921
1922
1923 learn_ena = br_port_flag_is_set(netdev, BR_LEARNING);
1924 err = dpaa2_switch_port_set_learning(port_priv, learn_ena);
1925 port_priv->learn_ena = learn_ena;
1926
1927
1928 err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
1929 if (err)
1930 goto err_egress_flood;
1931
1932 return 0;
1933
1934err_egress_flood:
1935 dpaa2_switch_port_set_fdb(port_priv, NULL);
1936 return err;
1937}
1938
1939static int dpaa2_switch_port_clear_rxvlan(struct net_device *vdev, int vid, void *arg)
1940{
1941 __be16 vlan_proto = htons(ETH_P_8021Q);
1942
1943 if (vdev)
1944 vlan_proto = vlan_dev_vlan_proto(vdev);
1945
1946 return dpaa2_switch_port_vlan_kill(arg, vlan_proto, vid);
1947}
1948
1949static int dpaa2_switch_port_restore_rxvlan(struct net_device *vdev, int vid, void *arg)
1950{
1951 __be16 vlan_proto = htons(ETH_P_8021Q);
1952
1953 if (vdev)
1954 vlan_proto = vlan_dev_vlan_proto(vdev);
1955
1956 return dpaa2_switch_port_vlan_add(arg, vlan_proto, vid);
1957}
1958
1959static int dpaa2_switch_port_bridge_leave(struct net_device *netdev)
1960{
1961 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1962 struct dpaa2_switch_fdb *old_fdb = port_priv->fdb;
1963 struct ethsw_core *ethsw = port_priv->ethsw_data;
1964 int err;
1965
1966
1967 dpaa2_switch_port_fast_age(port_priv);
1968
1969
1970
1971
1972
1973 err = vlan_for_each(netdev, dpaa2_switch_port_clear_rxvlan, netdev);
1974 if (err)
1975 netdev_err(netdev, "Unable to clear RX VLANs from old FDB table, err (%d)\n", err);
1976
1977 dpaa2_switch_port_set_fdb(port_priv, NULL);
1978
1979
1980 err = vlan_for_each(netdev, dpaa2_switch_port_restore_rxvlan, netdev);
1981 if (err)
1982 netdev_err(netdev, "Unable to restore RX VLANs to the new FDB, err (%d)\n", err);
1983
1984
1985
1986
1987
1988 port_priv->bcast_flood = true;
1989 port_priv->ucast_flood = true;
1990
1991
1992
1993
1994
1995 err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
1996 if (err)
1997 return err;
1998
1999
2000 err = dpaa2_switch_fdb_set_egress_flood(ethsw, old_fdb->fdb_id);
2001 if (err)
2002 return err;
2003
2004
2005 err = dpaa2_switch_port_set_learning(port_priv, false);
2006 if (err)
2007 return err;
2008 port_priv->learn_ena = false;
2009
2010
2011
2012
2013 return dpaa2_switch_port_add_vlan(port_priv, DEFAULT_VLAN_ID,
2014 BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID);
2015}
2016
2017static int dpaa2_switch_prevent_bridging_with_8021q_upper(struct net_device *netdev)
2018{
2019 struct net_device *upper_dev;
2020 struct list_head *iter;
2021
2022
2023
2024
2025 netdev_for_each_upper_dev_rcu(netdev, upper_dev, iter)
2026 if (is_vlan_dev(upper_dev))
2027 return -EOPNOTSUPP;
2028
2029 return 0;
2030}
2031
2032static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb,
2033 unsigned long event, void *ptr)
2034{
2035 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
2036 struct netdev_notifier_changeupper_info *info = ptr;
2037 struct netlink_ext_ack *extack;
2038 struct net_device *upper_dev;
2039 int err = 0;
2040
2041 if (!dpaa2_switch_port_dev_check(netdev))
2042 return NOTIFY_DONE;
2043
2044 extack = netdev_notifier_info_to_extack(&info->info);
2045
2046 switch (event) {
2047 case NETDEV_PRECHANGEUPPER:
2048 upper_dev = info->upper_dev;
2049 if (!netif_is_bridge_master(upper_dev))
2050 break;
2051
2052 if (!br_vlan_enabled(upper_dev)) {
2053 NL_SET_ERR_MSG_MOD(extack, "Cannot join a VLAN-unaware bridge");
2054 err = -EOPNOTSUPP;
2055 goto out;
2056 }
2057
2058 err = dpaa2_switch_prevent_bridging_with_8021q_upper(netdev);
2059 if (err) {
2060 NL_SET_ERR_MSG_MOD(extack,
2061 "Cannot join a bridge while VLAN uppers are present");
2062 goto out;
2063 }
2064
2065 break;
2066 case NETDEV_CHANGEUPPER:
2067 upper_dev = info->upper_dev;
2068 if (netif_is_bridge_master(upper_dev)) {
2069 if (info->linking)
2070 err = dpaa2_switch_port_bridge_join(netdev, upper_dev);
2071 else
2072 err = dpaa2_switch_port_bridge_leave(netdev);
2073 }
2074 break;
2075 }
2076
2077out:
2078 return notifier_from_errno(err);
2079}
2080
2081struct ethsw_switchdev_event_work {
2082 struct work_struct work;
2083 struct switchdev_notifier_fdb_info fdb_info;
2084 struct net_device *dev;
2085 unsigned long event;
2086};
2087
2088static void dpaa2_switch_event_work(struct work_struct *work)
2089{
2090 struct ethsw_switchdev_event_work *switchdev_work =
2091 container_of(work, struct ethsw_switchdev_event_work, work);
2092 struct net_device *dev = switchdev_work->dev;
2093 struct switchdev_notifier_fdb_info *fdb_info;
2094 int err;
2095
2096 rtnl_lock();
2097 fdb_info = &switchdev_work->fdb_info;
2098
2099 switch (switchdev_work->event) {
2100 case SWITCHDEV_FDB_ADD_TO_DEVICE:
2101 if (!fdb_info->added_by_user || fdb_info->is_local)
2102 break;
2103 if (is_unicast_ether_addr(fdb_info->addr))
2104 err = dpaa2_switch_port_fdb_add_uc(netdev_priv(dev),
2105 fdb_info->addr);
2106 else
2107 err = dpaa2_switch_port_fdb_add_mc(netdev_priv(dev),
2108 fdb_info->addr);
2109 if (err)
2110 break;
2111 fdb_info->offloaded = true;
2112 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev,
2113 &fdb_info->info, NULL);
2114 break;
2115 case SWITCHDEV_FDB_DEL_TO_DEVICE:
2116 if (!fdb_info->added_by_user || fdb_info->is_local)
2117 break;
2118 if (is_unicast_ether_addr(fdb_info->addr))
2119 dpaa2_switch_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr);
2120 else
2121 dpaa2_switch_port_fdb_del_mc(netdev_priv(dev), fdb_info->addr);
2122 break;
2123 }
2124
2125 rtnl_unlock();
2126 kfree(switchdev_work->fdb_info.addr);
2127 kfree(switchdev_work);
2128 dev_put(dev);
2129}
2130
2131
2132static int dpaa2_switch_port_event(struct notifier_block *nb,
2133 unsigned long event, void *ptr)
2134{
2135 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2136 struct ethsw_port_priv *port_priv = netdev_priv(dev);
2137 struct ethsw_switchdev_event_work *switchdev_work;
2138 struct switchdev_notifier_fdb_info *fdb_info = ptr;
2139 struct ethsw_core *ethsw = port_priv->ethsw_data;
2140
2141 if (event == SWITCHDEV_PORT_ATTR_SET)
2142 return dpaa2_switch_port_attr_set_event(dev, ptr);
2143
2144 if (!dpaa2_switch_port_dev_check(dev))
2145 return NOTIFY_DONE;
2146
2147 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
2148 if (!switchdev_work)
2149 return NOTIFY_BAD;
2150
2151 INIT_WORK(&switchdev_work->work, dpaa2_switch_event_work);
2152 switchdev_work->dev = dev;
2153 switchdev_work->event = event;
2154
2155 switch (event) {
2156 case SWITCHDEV_FDB_ADD_TO_DEVICE:
2157 case SWITCHDEV_FDB_DEL_TO_DEVICE:
2158 memcpy(&switchdev_work->fdb_info, ptr,
2159 sizeof(switchdev_work->fdb_info));
2160 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
2161 if (!switchdev_work->fdb_info.addr)
2162 goto err_addr_alloc;
2163
2164 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
2165 fdb_info->addr);
2166
2167
2168 dev_hold(dev);
2169 break;
2170 default:
2171 kfree(switchdev_work);
2172 return NOTIFY_DONE;
2173 }
2174
2175 queue_work(ethsw->workqueue, &switchdev_work->work);
2176
2177 return NOTIFY_DONE;
2178
2179err_addr_alloc:
2180 kfree(switchdev_work);
2181 return NOTIFY_BAD;
2182}
2183
2184static int dpaa2_switch_port_obj_event(unsigned long event,
2185 struct net_device *netdev,
2186 struct switchdev_notifier_port_obj_info *port_obj_info)
2187{
2188 int err = -EOPNOTSUPP;
2189
2190 if (!dpaa2_switch_port_dev_check(netdev))
2191 return NOTIFY_DONE;
2192
2193 switch (event) {
2194 case SWITCHDEV_PORT_OBJ_ADD:
2195 err = dpaa2_switch_port_obj_add(netdev, port_obj_info->obj);
2196 break;
2197 case SWITCHDEV_PORT_OBJ_DEL:
2198 err = dpaa2_switch_port_obj_del(netdev, port_obj_info->obj);
2199 break;
2200 }
2201
2202 port_obj_info->handled = true;
2203 return notifier_from_errno(err);
2204}
2205
2206static int dpaa2_switch_port_blocking_event(struct notifier_block *nb,
2207 unsigned long event, void *ptr)
2208{
2209 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2210
2211 switch (event) {
2212 case SWITCHDEV_PORT_OBJ_ADD:
2213 case SWITCHDEV_PORT_OBJ_DEL:
2214 return dpaa2_switch_port_obj_event(event, dev, ptr);
2215 case SWITCHDEV_PORT_ATTR_SET:
2216 return dpaa2_switch_port_attr_set_event(dev, ptr);
2217 }
2218
2219 return NOTIFY_DONE;
2220}
2221
2222
2223static struct sk_buff *dpaa2_switch_build_linear_skb(struct ethsw_core *ethsw,
2224 const struct dpaa2_fd *fd)
2225{
2226 u16 fd_offset = dpaa2_fd_get_offset(fd);
2227 dma_addr_t addr = dpaa2_fd_get_addr(fd);
2228 u32 fd_length = dpaa2_fd_get_len(fd);
2229 struct device *dev = ethsw->dev;
2230 struct sk_buff *skb = NULL;
2231 void *fd_vaddr;
2232
2233 fd_vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, addr);
2234 dma_unmap_page(dev, addr, DPAA2_SWITCH_RX_BUF_SIZE,
2235 DMA_FROM_DEVICE);
2236
2237 skb = build_skb(fd_vaddr, DPAA2_SWITCH_RX_BUF_SIZE +
2238 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
2239 if (unlikely(!skb)) {
2240 dev_err(dev, "build_skb() failed\n");
2241 return NULL;
2242 }
2243
2244 skb_reserve(skb, fd_offset);
2245 skb_put(skb, fd_length);
2246
2247 ethsw->buf_count--;
2248
2249 return skb;
2250}
2251
2252static void dpaa2_switch_tx_conf(struct dpaa2_switch_fq *fq,
2253 const struct dpaa2_fd *fd)
2254{
2255 dpaa2_switch_free_fd(fq->ethsw, fd);
2256}
2257
2258static void dpaa2_switch_rx(struct dpaa2_switch_fq *fq,
2259 const struct dpaa2_fd *fd)
2260{
2261 struct ethsw_core *ethsw = fq->ethsw;
2262 struct ethsw_port_priv *port_priv;
2263 struct net_device *netdev;
2264 struct vlan_ethhdr *hdr;
2265 struct sk_buff *skb;
2266 u16 vlan_tci, vid;
2267 int if_id, err;
2268
2269
2270 if_id = upper_32_bits(dpaa2_fd_get_flc(fd)) & 0x0000FFFF;
2271
2272 if (if_id >= ethsw->sw_attr.num_ifs) {
2273 dev_err(ethsw->dev, "Frame received from unknown interface!\n");
2274 goto err_free_fd;
2275 }
2276 port_priv = ethsw->ports[if_id];
2277 netdev = port_priv->netdev;
2278
2279
2280 if (dpaa2_fd_get_format(fd) != dpaa2_fd_single) {
2281 if (net_ratelimit()) {
2282 netdev_err(netdev, "Received invalid frame format\n");
2283 goto err_free_fd;
2284 }
2285 }
2286
2287 skb = dpaa2_switch_build_linear_skb(ethsw, fd);
2288 if (unlikely(!skb))
2289 goto err_free_fd;
2290
2291 skb_reset_mac_header(skb);
2292
2293
2294
2295
2296
2297
2298
2299 hdr = vlan_eth_hdr(skb);
2300 vid = ntohs(hdr->h_vlan_TCI) & VLAN_VID_MASK;
2301 if (vid == port_priv->pvid) {
2302 err = __skb_vlan_pop(skb, &vlan_tci);
2303 if (err) {
2304 dev_info(ethsw->dev, "__skb_vlan_pop() returned %d", err);
2305 goto err_free_fd;
2306 }
2307 }
2308
2309 skb->dev = netdev;
2310 skb->protocol = eth_type_trans(skb, skb->dev);
2311
2312
2313 skb->offload_fwd_mark = !!(port_priv->fdb->bridge_dev);
2314
2315 netif_receive_skb(skb);
2316
2317 return;
2318
2319err_free_fd:
2320 dpaa2_switch_free_fd(ethsw, fd);
2321}
2322
2323static void dpaa2_switch_detect_features(struct ethsw_core *ethsw)
2324{
2325 ethsw->features = 0;
2326
2327 if (ethsw->major > 8 || (ethsw->major == 8 && ethsw->minor >= 6))
2328 ethsw->features |= ETHSW_FEATURE_MAC_ADDR;
2329}
2330
2331static int dpaa2_switch_setup_fqs(struct ethsw_core *ethsw)
2332{
2333 struct dpsw_ctrl_if_attr ctrl_if_attr;
2334 struct device *dev = ethsw->dev;
2335 int i = 0;
2336 int err;
2337
2338 err = dpsw_ctrl_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
2339 &ctrl_if_attr);
2340 if (err) {
2341 dev_err(dev, "dpsw_ctrl_if_get_attributes() = %d\n", err);
2342 return err;
2343 }
2344
2345 ethsw->fq[i].fqid = ctrl_if_attr.rx_fqid;
2346 ethsw->fq[i].ethsw = ethsw;
2347 ethsw->fq[i++].type = DPSW_QUEUE_RX;
2348
2349 ethsw->fq[i].fqid = ctrl_if_attr.tx_err_conf_fqid;
2350 ethsw->fq[i].ethsw = ethsw;
2351 ethsw->fq[i++].type = DPSW_QUEUE_TX_ERR_CONF;
2352
2353 return 0;
2354}
2355
2356
2357
2358
2359static void dpaa2_switch_free_bufs(struct ethsw_core *ethsw, u64 *buf_array, int count)
2360{
2361 struct device *dev = ethsw->dev;
2362 void *vaddr;
2363 int i;
2364
2365 for (i = 0; i < count; i++) {
2366 vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, buf_array[i]);
2367 dma_unmap_page(dev, buf_array[i], DPAA2_SWITCH_RX_BUF_SIZE,
2368 DMA_FROM_DEVICE);
2369 free_pages((unsigned long)vaddr, 0);
2370 }
2371}
2372
2373
2374
2375
2376static int dpaa2_switch_add_bufs(struct ethsw_core *ethsw, u16 bpid)
2377{
2378 struct device *dev = ethsw->dev;
2379 u64 buf_array[BUFS_PER_CMD];
2380 struct page *page;
2381 int retries = 0;
2382 dma_addr_t addr;
2383 int err;
2384 int i;
2385
2386 for (i = 0; i < BUFS_PER_CMD; i++) {
2387
2388
2389
2390
2391 page = dev_alloc_pages(0);
2392 if (!page) {
2393 dev_err(dev, "buffer allocation failed\n");
2394 goto err_alloc;
2395 }
2396
2397 addr = dma_map_page(dev, page, 0, DPAA2_SWITCH_RX_BUF_SIZE,
2398 DMA_FROM_DEVICE);
2399 if (dma_mapping_error(dev, addr)) {
2400 dev_err(dev, "dma_map_single() failed\n");
2401 goto err_map;
2402 }
2403 buf_array[i] = addr;
2404 }
2405
2406release_bufs:
2407
2408
2409
2410 while ((err = dpaa2_io_service_release(NULL, bpid,
2411 buf_array, i)) == -EBUSY) {
2412 if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES)
2413 break;
2414
2415 cpu_relax();
2416 }
2417
2418
2419 if (err) {
2420 dpaa2_switch_free_bufs(ethsw, buf_array, i);
2421 return 0;
2422 }
2423
2424 return i;
2425
2426err_map:
2427 __free_pages(page, 0);
2428err_alloc:
2429
2430
2431
2432 if (i)
2433 goto release_bufs;
2434
2435 return 0;
2436}
2437
2438static int dpaa2_switch_refill_bp(struct ethsw_core *ethsw)
2439{
2440 int *count = ðsw->buf_count;
2441 int new_count;
2442 int err = 0;
2443
2444 if (unlikely(*count < DPAA2_ETHSW_REFILL_THRESH)) {
2445 do {
2446 new_count = dpaa2_switch_add_bufs(ethsw, ethsw->bpid);
2447 if (unlikely(!new_count)) {
2448
2449
2450
2451 break;
2452 }
2453 *count += new_count;
2454 } while (*count < DPAA2_ETHSW_NUM_BUFS);
2455
2456 if (unlikely(*count < DPAA2_ETHSW_NUM_BUFS))
2457 err = -ENOMEM;
2458 }
2459
2460 return err;
2461}
2462
2463static int dpaa2_switch_seed_bp(struct ethsw_core *ethsw)
2464{
2465 int *count, i;
2466
2467 for (i = 0; i < DPAA2_ETHSW_NUM_BUFS; i += BUFS_PER_CMD) {
2468 count = ðsw->buf_count;
2469 *count += dpaa2_switch_add_bufs(ethsw, ethsw->bpid);
2470
2471 if (unlikely(*count < BUFS_PER_CMD))
2472 return -ENOMEM;
2473 }
2474
2475 return 0;
2476}
2477
2478static void dpaa2_switch_drain_bp(struct ethsw_core *ethsw)
2479{
2480 u64 buf_array[BUFS_PER_CMD];
2481 int ret;
2482
2483 do {
2484 ret = dpaa2_io_service_acquire(NULL, ethsw->bpid,
2485 buf_array, BUFS_PER_CMD);
2486 if (ret < 0) {
2487 dev_err(ethsw->dev,
2488 "dpaa2_io_service_acquire() = %d\n", ret);
2489 return;
2490 }
2491 dpaa2_switch_free_bufs(ethsw, buf_array, ret);
2492
2493 } while (ret);
2494}
2495
2496static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw)
2497{
2498 struct dpsw_ctrl_if_pools_cfg dpsw_ctrl_if_pools_cfg = { 0 };
2499 struct device *dev = ethsw->dev;
2500 struct fsl_mc_device *dpbp_dev;
2501 struct dpbp_attr dpbp_attrs;
2502 int err;
2503
2504 err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
2505 &dpbp_dev);
2506 if (err) {
2507 if (err == -ENXIO)
2508 err = -EPROBE_DEFER;
2509 else
2510 dev_err(dev, "DPBP device allocation failed\n");
2511 return err;
2512 }
2513 ethsw->dpbp_dev = dpbp_dev;
2514
2515 err = dpbp_open(ethsw->mc_io, 0, dpbp_dev->obj_desc.id,
2516 &dpbp_dev->mc_handle);
2517 if (err) {
2518 dev_err(dev, "dpbp_open() failed\n");
2519 goto err_open;
2520 }
2521
2522 err = dpbp_reset(ethsw->mc_io, 0, dpbp_dev->mc_handle);
2523 if (err) {
2524 dev_err(dev, "dpbp_reset() failed\n");
2525 goto err_reset;
2526 }
2527
2528 err = dpbp_enable(ethsw->mc_io, 0, dpbp_dev->mc_handle);
2529 if (err) {
2530 dev_err(dev, "dpbp_enable() failed\n");
2531 goto err_enable;
2532 }
2533
2534 err = dpbp_get_attributes(ethsw->mc_io, 0, dpbp_dev->mc_handle,
2535 &dpbp_attrs);
2536 if (err) {
2537 dev_err(dev, "dpbp_get_attributes() failed\n");
2538 goto err_get_attr;
2539 }
2540
2541 dpsw_ctrl_if_pools_cfg.num_dpbp = 1;
2542 dpsw_ctrl_if_pools_cfg.pools[0].dpbp_id = dpbp_attrs.id;
2543 dpsw_ctrl_if_pools_cfg.pools[0].buffer_size = DPAA2_SWITCH_RX_BUF_SIZE;
2544 dpsw_ctrl_if_pools_cfg.pools[0].backup_pool = 0;
2545
2546 err = dpsw_ctrl_if_set_pools(ethsw->mc_io, 0, ethsw->dpsw_handle,
2547 &dpsw_ctrl_if_pools_cfg);
2548 if (err) {
2549 dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n");
2550 goto err_get_attr;
2551 }
2552 ethsw->bpid = dpbp_attrs.id;
2553
2554 return 0;
2555
2556err_get_attr:
2557 dpbp_disable(ethsw->mc_io, 0, dpbp_dev->mc_handle);
2558err_enable:
2559err_reset:
2560 dpbp_close(ethsw->mc_io, 0, dpbp_dev->mc_handle);
2561err_open:
2562 fsl_mc_object_free(dpbp_dev);
2563 return err;
2564}
2565
2566static void dpaa2_switch_free_dpbp(struct ethsw_core *ethsw)
2567{
2568 dpbp_disable(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle);
2569 dpbp_close(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle);
2570 fsl_mc_object_free(ethsw->dpbp_dev);
2571}
2572
2573static int dpaa2_switch_alloc_rings(struct ethsw_core *ethsw)
2574{
2575 int i;
2576
2577 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) {
2578 ethsw->fq[i].store =
2579 dpaa2_io_store_create(DPAA2_SWITCH_STORE_SIZE,
2580 ethsw->dev);
2581 if (!ethsw->fq[i].store) {
2582 dev_err(ethsw->dev, "dpaa2_io_store_create failed\n");
2583 while (--i >= 0)
2584 dpaa2_io_store_destroy(ethsw->fq[i].store);
2585 return -ENOMEM;
2586 }
2587 }
2588
2589 return 0;
2590}
2591
2592static void dpaa2_switch_destroy_rings(struct ethsw_core *ethsw)
2593{
2594 int i;
2595
2596 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
2597 dpaa2_io_store_destroy(ethsw->fq[i].store);
2598}
2599
2600static int dpaa2_switch_pull_fq(struct dpaa2_switch_fq *fq)
2601{
2602 int err, retries = 0;
2603
2604
2605
2606
2607 do {
2608 err = dpaa2_io_service_pull_fq(NULL, fq->fqid, fq->store);
2609 cpu_relax();
2610 } while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES);
2611
2612 if (unlikely(err))
2613 dev_err(fq->ethsw->dev, "dpaa2_io_service_pull err %d", err);
2614
2615 return err;
2616}
2617
2618
2619static int dpaa2_switch_store_consume(struct dpaa2_switch_fq *fq)
2620{
2621 struct ethsw_core *ethsw = fq->ethsw;
2622 int cleaned = 0, is_last;
2623 struct dpaa2_dq *dq;
2624 int retries = 0;
2625
2626 do {
2627
2628 dq = dpaa2_io_store_next(fq->store, &is_last);
2629 if (unlikely(!dq)) {
2630 if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES) {
2631 dev_err_once(ethsw->dev,
2632 "No valid dequeue response\n");
2633 return -ETIMEDOUT;
2634 }
2635 continue;
2636 }
2637
2638 if (fq->type == DPSW_QUEUE_RX)
2639 dpaa2_switch_rx(fq, dpaa2_dq_fd(dq));
2640 else
2641 dpaa2_switch_tx_conf(fq, dpaa2_dq_fd(dq));
2642 cleaned++;
2643
2644 } while (!is_last);
2645
2646 return cleaned;
2647}
2648
2649
2650static int dpaa2_switch_poll(struct napi_struct *napi, int budget)
2651{
2652 int err, cleaned = 0, store_cleaned, work_done;
2653 struct dpaa2_switch_fq *fq;
2654 int retries = 0;
2655
2656 fq = container_of(napi, struct dpaa2_switch_fq, napi);
2657
2658 do {
2659 err = dpaa2_switch_pull_fq(fq);
2660 if (unlikely(err))
2661 break;
2662
2663
2664 dpaa2_switch_refill_bp(fq->ethsw);
2665
2666 store_cleaned = dpaa2_switch_store_consume(fq);
2667 cleaned += store_cleaned;
2668
2669 if (cleaned >= budget) {
2670 work_done = budget;
2671 goto out;
2672 }
2673
2674 } while (store_cleaned);
2675
2676
2677
2678
2679 napi_complete_done(napi, cleaned);
2680 do {
2681 err = dpaa2_io_service_rearm(NULL, &fq->nctx);
2682 cpu_relax();
2683 } while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES);
2684
2685 work_done = max(cleaned, 1);
2686out:
2687
2688 return work_done;
2689}
2690
2691static void dpaa2_switch_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
2692{
2693 struct dpaa2_switch_fq *fq;
2694
2695 fq = container_of(nctx, struct dpaa2_switch_fq, nctx);
2696
2697 napi_schedule(&fq->napi);
2698}
2699
2700static int dpaa2_switch_setup_dpio(struct ethsw_core *ethsw)
2701{
2702 struct dpsw_ctrl_if_queue_cfg queue_cfg;
2703 struct dpaa2_io_notification_ctx *nctx;
2704 int err, i, j;
2705
2706 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) {
2707 nctx = ðsw->fq[i].nctx;
2708
2709
2710
2711
2712
2713 nctx->is_cdan = 0;
2714 nctx->id = ethsw->fq[i].fqid;
2715 nctx->desired_cpu = DPAA2_IO_ANY_CPU;
2716 nctx->cb = dpaa2_switch_fqdan_cb;
2717 err = dpaa2_io_service_register(NULL, nctx, ethsw->dev);
2718 if (err) {
2719 err = -EPROBE_DEFER;
2720 goto err_register;
2721 }
2722
2723 queue_cfg.options = DPSW_CTRL_IF_QUEUE_OPT_DEST |
2724 DPSW_CTRL_IF_QUEUE_OPT_USER_CTX;
2725 queue_cfg.dest_cfg.dest_type = DPSW_CTRL_IF_DEST_DPIO;
2726 queue_cfg.dest_cfg.dest_id = nctx->dpio_id;
2727 queue_cfg.dest_cfg.priority = 0;
2728 queue_cfg.user_ctx = nctx->qman64;
2729
2730 err = dpsw_ctrl_if_set_queue(ethsw->mc_io, 0,
2731 ethsw->dpsw_handle,
2732 ethsw->fq[i].type,
2733 &queue_cfg);
2734 if (err)
2735 goto err_set_queue;
2736 }
2737
2738 return 0;
2739
2740err_set_queue:
2741 dpaa2_io_service_deregister(NULL, nctx, ethsw->dev);
2742err_register:
2743 for (j = 0; j < i; j++)
2744 dpaa2_io_service_deregister(NULL, ðsw->fq[j].nctx,
2745 ethsw->dev);
2746
2747 return err;
2748}
2749
2750static void dpaa2_switch_free_dpio(struct ethsw_core *ethsw)
2751{
2752 int i;
2753
2754 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
2755 dpaa2_io_service_deregister(NULL, ðsw->fq[i].nctx,
2756 ethsw->dev);
2757}
2758
2759static int dpaa2_switch_ctrl_if_setup(struct ethsw_core *ethsw)
2760{
2761 int err;
2762
2763
2764 err = dpaa2_switch_setup_fqs(ethsw);
2765 if (err)
2766 return err;
2767
2768
2769 err = dpaa2_switch_setup_dpbp(ethsw);
2770 if (err)
2771 return err;
2772
2773 err = dpaa2_switch_alloc_rings(ethsw);
2774 if (err)
2775 goto err_free_dpbp;
2776
2777 err = dpaa2_switch_setup_dpio(ethsw);
2778 if (err)
2779 goto err_destroy_rings;
2780
2781 err = dpaa2_switch_seed_bp(ethsw);
2782 if (err)
2783 goto err_deregister_dpio;
2784
2785 err = dpsw_ctrl_if_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
2786 if (err) {
2787 dev_err(ethsw->dev, "dpsw_ctrl_if_enable err %d\n", err);
2788 goto err_drain_dpbp;
2789 }
2790
2791 return 0;
2792
2793err_drain_dpbp:
2794 dpaa2_switch_drain_bp(ethsw);
2795err_deregister_dpio:
2796 dpaa2_switch_free_dpio(ethsw);
2797err_destroy_rings:
2798 dpaa2_switch_destroy_rings(ethsw);
2799err_free_dpbp:
2800 dpaa2_switch_free_dpbp(ethsw);
2801
2802 return err;
2803}
2804
2805static int dpaa2_switch_init(struct fsl_mc_device *sw_dev)
2806{
2807 struct device *dev = &sw_dev->dev;
2808 struct ethsw_core *ethsw = dev_get_drvdata(dev);
2809 struct dpsw_vlan_if_cfg vcfg = {0};
2810 struct dpsw_tci_cfg tci_cfg = {0};
2811 struct dpsw_stp_cfg stp_cfg;
2812 int err;
2813 u16 i;
2814
2815 ethsw->dev_id = sw_dev->obj_desc.id;
2816
2817 err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, ðsw->dpsw_handle);
2818 if (err) {
2819 dev_err(dev, "dpsw_open err %d\n", err);
2820 return err;
2821 }
2822
2823 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
2824 ðsw->sw_attr);
2825 if (err) {
2826 dev_err(dev, "dpsw_get_attributes err %d\n", err);
2827 goto err_close;
2828 }
2829
2830 err = dpsw_get_api_version(ethsw->mc_io, 0,
2831 ðsw->major,
2832 ðsw->minor);
2833 if (err) {
2834 dev_err(dev, "dpsw_get_api_version err %d\n", err);
2835 goto err_close;
2836 }
2837
2838
2839 if (ethsw->major < DPSW_MIN_VER_MAJOR ||
2840 (ethsw->major == DPSW_MIN_VER_MAJOR &&
2841 ethsw->minor < DPSW_MIN_VER_MINOR)) {
2842 dev_err(dev, "DPSW version %d:%d not supported. Use firmware 10.28.0 or greater.\n",
2843 ethsw->major, ethsw->minor);
2844 err = -EOPNOTSUPP;
2845 goto err_close;
2846 }
2847
2848 if (!dpaa2_switch_supports_cpu_traffic(ethsw)) {
2849 err = -EOPNOTSUPP;
2850 goto err_close;
2851 }
2852
2853 dpaa2_switch_detect_features(ethsw);
2854
2855 err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle);
2856 if (err) {
2857 dev_err(dev, "dpsw_reset err %d\n", err);
2858 goto err_close;
2859 }
2860
2861 stp_cfg.vlan_id = DEFAULT_VLAN_ID;
2862 stp_cfg.state = DPSW_STP_STATE_FORWARDING;
2863
2864 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
2865 err = dpsw_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle, i);
2866 if (err) {
2867 dev_err(dev, "dpsw_if_disable err %d\n", err);
2868 goto err_close;
2869 }
2870
2871 err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i,
2872 &stp_cfg);
2873 if (err) {
2874 dev_err(dev, "dpsw_if_set_stp err %d for port %d\n",
2875 err, i);
2876 goto err_close;
2877 }
2878
2879
2880
2881
2882 vcfg.num_ifs = 1;
2883 vcfg.if_id[0] = i;
2884 err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw->dpsw_handle,
2885 DEFAULT_VLAN_ID, &vcfg);
2886 if (err) {
2887 dev_err(dev, "dpsw_vlan_remove_if_untagged err %d\n",
2888 err);
2889 goto err_close;
2890 }
2891
2892 tci_cfg.vlan_id = 4095;
2893 err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, i, &tci_cfg);
2894 if (err) {
2895 dev_err(dev, "dpsw_if_set_tci err %d\n", err);
2896 goto err_close;
2897 }
2898
2899 err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
2900 DEFAULT_VLAN_ID, &vcfg);
2901 if (err) {
2902 dev_err(dev, "dpsw_vlan_remove_if err %d\n", err);
2903 goto err_close;
2904 }
2905 }
2906
2907 err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, DEFAULT_VLAN_ID);
2908 if (err) {
2909 dev_err(dev, "dpsw_vlan_remove err %d\n", err);
2910 goto err_close;
2911 }
2912
2913 ethsw->workqueue = alloc_ordered_workqueue("%s_%d_ordered",
2914 WQ_MEM_RECLAIM, "ethsw",
2915 ethsw->sw_attr.id);
2916 if (!ethsw->workqueue) {
2917 err = -ENOMEM;
2918 goto err_close;
2919 }
2920
2921 err = dpsw_fdb_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, 0);
2922 if (err)
2923 goto err_destroy_ordered_workqueue;
2924
2925 err = dpaa2_switch_ctrl_if_setup(ethsw);
2926 if (err)
2927 goto err_destroy_ordered_workqueue;
2928
2929 return 0;
2930
2931err_destroy_ordered_workqueue:
2932 destroy_workqueue(ethsw->workqueue);
2933
2934err_close:
2935 dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
2936 return err;
2937}
2938
2939
2940
2941
2942static int dpaa2_switch_port_trap_mac_addr(struct ethsw_port_priv *port_priv,
2943 const char *mac)
2944{
2945 struct dpaa2_switch_acl_entry acl_entry = {0};
2946
2947
2948 ether_addr_copy(acl_entry.key.match.l2_dest_mac, mac);
2949 eth_broadcast_addr(acl_entry.key.mask.l2_dest_mac);
2950
2951
2952 acl_entry.cfg.precedence = 0;
2953 acl_entry.cfg.result.action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF;
2954
2955 return dpaa2_switch_acl_entry_add(port_priv->acl_tbl, &acl_entry);
2956}
2957
2958static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
2959{
2960 const char stpa[ETH_ALEN] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00};
2961 struct switchdev_obj_port_vlan vlan = {
2962 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
2963 .vid = DEFAULT_VLAN_ID,
2964 .flags = BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID,
2965 };
2966 struct net_device *netdev = port_priv->netdev;
2967 struct ethsw_core *ethsw = port_priv->ethsw_data;
2968 struct dpaa2_switch_acl_tbl *acl_tbl;
2969 struct dpsw_fdb_cfg fdb_cfg = {0};
2970 struct dpsw_if_attr dpsw_if_attr;
2971 struct dpaa2_switch_fdb *fdb;
2972 struct dpsw_acl_cfg acl_cfg;
2973 u16 fdb_id, acl_tbl_id;
2974 int err;
2975
2976
2977 err = dpsw_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
2978 port_priv->idx, &dpsw_if_attr);
2979 if (err) {
2980 netdev_err(netdev, "dpsw_if_get_attributes err %d\n", err);
2981 return err;
2982 }
2983 port_priv->tx_qdid = dpsw_if_attr.qdid;
2984
2985
2986 fdb_cfg.num_fdb_entries = ethsw->sw_attr.max_fdb_entries / ethsw->sw_attr.num_ifs;
2987 err = dpsw_fdb_add(ethsw->mc_io, 0, ethsw->dpsw_handle,
2988 &fdb_id, &fdb_cfg);
2989 if (err) {
2990 netdev_err(netdev, "dpsw_fdb_add err %d\n", err);
2991 return err;
2992 }
2993
2994
2995 fdb = dpaa2_switch_fdb_get_unused(ethsw);
2996 fdb->fdb_id = fdb_id;
2997 fdb->in_use = true;
2998 fdb->bridge_dev = NULL;
2999 port_priv->fdb = fdb;
3000
3001
3002
3003
3004
3005 err = dpaa2_switch_port_vlans_add(netdev, &vlan);
3006 if (err)
3007 return err;
3008
3009
3010 err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
3011 if (err)
3012 return err;
3013
3014
3015 acl_cfg.max_entries = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES;
3016 err = dpsw_acl_add(ethsw->mc_io, 0, ethsw->dpsw_handle,
3017 &acl_tbl_id, &acl_cfg);
3018 if (err) {
3019 netdev_err(netdev, "dpsw_acl_add err %d\n", err);
3020 return err;
3021 }
3022
3023 acl_tbl = dpaa2_switch_acl_tbl_get_unused(ethsw);
3024 acl_tbl->ethsw = ethsw;
3025 acl_tbl->id = acl_tbl_id;
3026 acl_tbl->in_use = true;
3027 acl_tbl->num_rules = 0;
3028 INIT_LIST_HEAD(&acl_tbl->entries);
3029
3030 err = dpaa2_switch_port_acl_tbl_bind(port_priv, acl_tbl);
3031 if (err)
3032 return err;
3033
3034 err = dpaa2_switch_port_trap_mac_addr(port_priv, stpa);
3035 if (err)
3036 return err;
3037
3038 return err;
3039}
3040
3041static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw)
3042{
3043 dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
3044 dpaa2_switch_free_dpio(ethsw);
3045 dpaa2_switch_destroy_rings(ethsw);
3046 dpaa2_switch_drain_bp(ethsw);
3047 dpaa2_switch_free_dpbp(ethsw);
3048}
3049
3050static void dpaa2_switch_teardown(struct fsl_mc_device *sw_dev)
3051{
3052 struct device *dev = &sw_dev->dev;
3053 struct ethsw_core *ethsw = dev_get_drvdata(dev);
3054 int err;
3055
3056 dpaa2_switch_ctrl_if_teardown(ethsw);
3057
3058 destroy_workqueue(ethsw->workqueue);
3059
3060 err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
3061 if (err)
3062 dev_warn(dev, "dpsw_close err %d\n", err);
3063}
3064
3065static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
3066{
3067 struct ethsw_port_priv *port_priv;
3068 struct ethsw_core *ethsw;
3069 struct device *dev;
3070 int i;
3071
3072 dev = &sw_dev->dev;
3073 ethsw = dev_get_drvdata(dev);
3074
3075 dpaa2_switch_teardown_irqs(sw_dev);
3076
3077 dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
3078
3079 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
3080 port_priv = ethsw->ports[i];
3081 unregister_netdev(port_priv->netdev);
3082 free_netdev(port_priv->netdev);
3083 }
3084
3085 kfree(ethsw->fdbs);
3086 kfree(ethsw->acls);
3087 kfree(ethsw->ports);
3088
3089 dpaa2_switch_teardown(sw_dev);
3090
3091 fsl_mc_portal_free(ethsw->mc_io);
3092
3093 kfree(ethsw);
3094
3095 dev_set_drvdata(dev, NULL);
3096
3097 return 0;
3098}
3099
3100static int dpaa2_switch_probe_port(struct ethsw_core *ethsw,
3101 u16 port_idx)
3102{
3103 struct ethsw_port_priv *port_priv;
3104 struct device *dev = ethsw->dev;
3105 struct net_device *port_netdev;
3106 int err;
3107
3108 port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv));
3109 if (!port_netdev) {
3110 dev_err(dev, "alloc_etherdev error\n");
3111 return -ENOMEM;
3112 }
3113
3114 port_priv = netdev_priv(port_netdev);
3115 port_priv->netdev = port_netdev;
3116 port_priv->ethsw_data = ethsw;
3117
3118 port_priv->idx = port_idx;
3119 port_priv->stp_state = BR_STATE_FORWARDING;
3120
3121 SET_NETDEV_DEV(port_netdev, dev);
3122 port_netdev->netdev_ops = &dpaa2_switch_port_ops;
3123 port_netdev->ethtool_ops = &dpaa2_switch_port_ethtool_ops;
3124
3125 port_netdev->needed_headroom = DPAA2_SWITCH_NEEDED_HEADROOM;
3126
3127 port_priv->bcast_flood = true;
3128 port_priv->ucast_flood = true;
3129
3130
3131 port_netdev->min_mtu = ETH_MIN_MTU;
3132 port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH;
3133
3134
3135
3136
3137 ethsw->ports[port_idx] = port_priv;
3138
3139
3140
3141
3142 port_netdev->features = NETIF_F_HW_VLAN_CTAG_FILTER |
3143 NETIF_F_HW_VLAN_STAG_FILTER |
3144 NETIF_F_HW_TC;
3145
3146 err = dpaa2_switch_port_init(port_priv, port_idx);
3147 if (err)
3148 goto err_port_probe;
3149
3150 err = dpaa2_switch_port_set_mac_addr(port_priv);
3151 if (err)
3152 goto err_port_probe;
3153
3154 err = dpaa2_switch_port_set_learning(port_priv, false);
3155 if (err)
3156 goto err_port_probe;
3157 port_priv->learn_ena = false;
3158
3159 return 0;
3160
3161err_port_probe:
3162 free_netdev(port_netdev);
3163 ethsw->ports[port_idx] = NULL;
3164
3165 return err;
3166}
3167
3168static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
3169{
3170 struct device *dev = &sw_dev->dev;
3171 struct ethsw_core *ethsw;
3172 int i, err;
3173
3174
3175 ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL);
3176
3177 if (!ethsw)
3178 return -ENOMEM;
3179
3180 ethsw->dev = dev;
3181 ethsw->iommu_domain = iommu_get_domain_for_dev(dev);
3182 dev_set_drvdata(dev, ethsw);
3183
3184 err = fsl_mc_portal_allocate(sw_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
3185 ðsw->mc_io);
3186 if (err) {
3187 if (err == -ENXIO)
3188 err = -EPROBE_DEFER;
3189 else
3190 dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
3191 goto err_free_drvdata;
3192 }
3193
3194 err = dpaa2_switch_init(sw_dev);
3195 if (err)
3196 goto err_free_cmdport;
3197
3198 ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports),
3199 GFP_KERNEL);
3200 if (!(ethsw->ports)) {
3201 err = -ENOMEM;
3202 goto err_teardown;
3203 }
3204
3205 ethsw->fdbs = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->fdbs),
3206 GFP_KERNEL);
3207 if (!ethsw->fdbs) {
3208 err = -ENOMEM;
3209 goto err_free_ports;
3210 }
3211
3212 ethsw->acls = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->acls),
3213 GFP_KERNEL);
3214 if (!ethsw->acls) {
3215 err = -ENOMEM;
3216 goto err_free_fdbs;
3217 }
3218
3219 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
3220 err = dpaa2_switch_probe_port(ethsw, i);
3221 if (err)
3222 goto err_free_netdev;
3223 }
3224
3225
3226
3227
3228
3229 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
3230 netif_napi_add(ethsw->ports[0]->netdev,
3231 ðsw->fq[i].napi, dpaa2_switch_poll,
3232 NAPI_POLL_WEIGHT);
3233
3234 err = dpsw_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
3235 if (err) {
3236 dev_err(ethsw->dev, "dpsw_enable err %d\n", err);
3237 goto err_free_netdev;
3238 }
3239
3240
3241 err = dpaa2_switch_setup_irqs(sw_dev);
3242 if (err)
3243 goto err_stop;
3244
3245
3246
3247
3248 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
3249 err = register_netdev(ethsw->ports[i]->netdev);
3250 if (err < 0) {
3251 dev_err(dev, "register_netdev error %d\n", err);
3252 goto err_unregister_ports;
3253 }
3254 }
3255
3256 return 0;
3257
3258err_unregister_ports:
3259 for (i--; i >= 0; i--)
3260 unregister_netdev(ethsw->ports[i]->netdev);
3261 dpaa2_switch_teardown_irqs(sw_dev);
3262err_stop:
3263 dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
3264err_free_netdev:
3265 for (i--; i >= 0; i--)
3266 free_netdev(ethsw->ports[i]->netdev);
3267 kfree(ethsw->acls);
3268err_free_fdbs:
3269 kfree(ethsw->fdbs);
3270err_free_ports:
3271 kfree(ethsw->ports);
3272
3273err_teardown:
3274 dpaa2_switch_teardown(sw_dev);
3275
3276err_free_cmdport:
3277 fsl_mc_portal_free(ethsw->mc_io);
3278
3279err_free_drvdata:
3280 kfree(ethsw);
3281 dev_set_drvdata(dev, NULL);
3282
3283 return err;
3284}
3285
3286static const struct fsl_mc_device_id dpaa2_switch_match_id_table[] = {
3287 {
3288 .vendor = FSL_MC_VENDOR_FREESCALE,
3289 .obj_type = "dpsw",
3290 },
3291 { .vendor = 0x0 }
3292};
3293MODULE_DEVICE_TABLE(fslmc, dpaa2_switch_match_id_table);
3294
3295static struct fsl_mc_driver dpaa2_switch_drv = {
3296 .driver = {
3297 .name = KBUILD_MODNAME,
3298 .owner = THIS_MODULE,
3299 },
3300 .probe = dpaa2_switch_probe,
3301 .remove = dpaa2_switch_remove,
3302 .match_id_table = dpaa2_switch_match_id_table
3303};
3304
3305static struct notifier_block dpaa2_switch_port_nb __read_mostly = {
3306 .notifier_call = dpaa2_switch_port_netdevice_event,
3307};
3308
3309static struct notifier_block dpaa2_switch_port_switchdev_nb = {
3310 .notifier_call = dpaa2_switch_port_event,
3311};
3312
3313static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb = {
3314 .notifier_call = dpaa2_switch_port_blocking_event,
3315};
3316
3317static int dpaa2_switch_register_notifiers(void)
3318{
3319 int err;
3320
3321 err = register_netdevice_notifier(&dpaa2_switch_port_nb);
3322 if (err) {
3323 pr_err("dpaa2-switch: failed to register net_device notifier (%d)\n", err);
3324 return err;
3325 }
3326
3327 err = register_switchdev_notifier(&dpaa2_switch_port_switchdev_nb);
3328 if (err) {
3329 pr_err("dpaa2-switch: failed to register switchdev notifier (%d)\n", err);
3330 goto err_switchdev_nb;
3331 }
3332
3333 err = register_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb);
3334 if (err) {
3335 pr_err("dpaa2-switch: failed to register switchdev blocking notifier (%d)\n", err);
3336 goto err_switchdev_blocking_nb;
3337 }
3338
3339 return 0;
3340
3341err_switchdev_blocking_nb:
3342 unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb);
3343err_switchdev_nb:
3344 unregister_netdevice_notifier(&dpaa2_switch_port_nb);
3345
3346 return err;
3347}
3348
3349static void dpaa2_switch_unregister_notifiers(void)
3350{
3351 int err;
3352
3353 err = unregister_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb);
3354 if (err)
3355 pr_err("dpaa2-switch: failed to unregister switchdev blocking notifier (%d)\n",
3356 err);
3357
3358 err = unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb);
3359 if (err)
3360 pr_err("dpaa2-switch: failed to unregister switchdev notifier (%d)\n", err);
3361
3362 err = unregister_netdevice_notifier(&dpaa2_switch_port_nb);
3363 if (err)
3364 pr_err("dpaa2-switch: failed to unregister net_device notifier (%d)\n", err);
3365}
3366
3367static int __init dpaa2_switch_driver_init(void)
3368{
3369 int err;
3370
3371 err = fsl_mc_driver_register(&dpaa2_switch_drv);
3372 if (err)
3373 return err;
3374
3375 err = dpaa2_switch_register_notifiers();
3376 if (err) {
3377 fsl_mc_driver_unregister(&dpaa2_switch_drv);
3378 return err;
3379 }
3380
3381 return 0;
3382}
3383
3384static void __exit dpaa2_switch_driver_exit(void)
3385{
3386 dpaa2_switch_unregister_notifiers();
3387 fsl_mc_driver_unregister(&dpaa2_switch_drv);
3388}
3389
3390module_init(dpaa2_switch_driver_init);
3391module_exit(dpaa2_switch_driver_exit);
3392
3393MODULE_LICENSE("GPL v2");
3394MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver");
3395