1
2
3
4
5
6
7
8#include <linux/kernel.h>
9#include <linux/types.h>
10#include <linux/init.h>
11#include <linux/mutex.h>
12#include <linux/notifier.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/if_bridge.h>
16#include <linux/list.h>
17#include <linux/workqueue.h>
18#include <linux/if_vlan.h>
19#include <linux/rtnetlink.h>
20#include <net/switchdev.h>
21
22static LIST_HEAD(deferred);
23static DEFINE_SPINLOCK(deferred_lock);
24
25typedef void switchdev_deferred_func_t(struct net_device *dev,
26 const void *data);
27
28struct switchdev_deferred_item {
29 struct list_head list;
30 struct net_device *dev;
31 switchdev_deferred_func_t *func;
32 unsigned long data[];
33};
34
35static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
36{
37 struct switchdev_deferred_item *dfitem;
38
39 spin_lock_bh(&deferred_lock);
40 if (list_empty(&deferred)) {
41 dfitem = NULL;
42 goto unlock;
43 }
44 dfitem = list_first_entry(&deferred,
45 struct switchdev_deferred_item, list);
46 list_del(&dfitem->list);
47unlock:
48 spin_unlock_bh(&deferred_lock);
49 return dfitem;
50}
51
52
53
54
55
56
57
58void switchdev_deferred_process(void)
59{
60 struct switchdev_deferred_item *dfitem;
61
62 ASSERT_RTNL();
63
64 while ((dfitem = switchdev_deferred_dequeue())) {
65 dfitem->func(dfitem->dev, dfitem->data);
66 dev_put(dfitem->dev);
67 kfree(dfitem);
68 }
69}
70EXPORT_SYMBOL_GPL(switchdev_deferred_process);
71
72static void switchdev_deferred_process_work(struct work_struct *work)
73{
74 rtnl_lock();
75 switchdev_deferred_process();
76 rtnl_unlock();
77}
78
79static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
80
81static int switchdev_deferred_enqueue(struct net_device *dev,
82 const void *data, size_t data_len,
83 switchdev_deferred_func_t *func)
84{
85 struct switchdev_deferred_item *dfitem;
86
87 dfitem = kmalloc(sizeof(*dfitem) + data_len, GFP_ATOMIC);
88 if (!dfitem)
89 return -ENOMEM;
90 dfitem->dev = dev;
91 dfitem->func = func;
92 memcpy(dfitem->data, data, data_len);
93 dev_hold(dev);
94 spin_lock_bh(&deferred_lock);
95 list_add_tail(&dfitem->list, &deferred);
96 spin_unlock_bh(&deferred_lock);
97 schedule_work(&deferred_process_work);
98 return 0;
99}
100
101static int switchdev_port_attr_notify(enum switchdev_notifier_type nt,
102 struct net_device *dev,
103 const struct switchdev_attr *attr,
104 struct netlink_ext_ack *extack)
105{
106 int err;
107 int rc;
108
109 struct switchdev_notifier_port_attr_info attr_info = {
110 .attr = attr,
111 .handled = false,
112 };
113
114 rc = call_switchdev_blocking_notifiers(nt, dev,
115 &attr_info.info, extack);
116 err = notifier_to_errno(rc);
117 if (err) {
118 WARN_ON(!attr_info.handled);
119 return err;
120 }
121
122 if (!attr_info.handled)
123 return -EOPNOTSUPP;
124
125 return 0;
126}
127
128static int switchdev_port_attr_set_now(struct net_device *dev,
129 const struct switchdev_attr *attr,
130 struct netlink_ext_ack *extack)
131{
132 return switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
133 extack);
134}
135
136static void switchdev_port_attr_set_deferred(struct net_device *dev,
137 const void *data)
138{
139 const struct switchdev_attr *attr = data;
140 int err;
141
142 err = switchdev_port_attr_set_now(dev, attr, NULL);
143 if (err && err != -EOPNOTSUPP)
144 netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
145 err, attr->id);
146 if (attr->complete)
147 attr->complete(dev, err, attr->complete_priv);
148}
149
150static int switchdev_port_attr_set_defer(struct net_device *dev,
151 const struct switchdev_attr *attr)
152{
153 return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
154 switchdev_port_attr_set_deferred);
155}
156
157
158
159
160
161
162
163
164
165
166
167int switchdev_port_attr_set(struct net_device *dev,
168 const struct switchdev_attr *attr,
169 struct netlink_ext_ack *extack)
170{
171 if (attr->flags & SWITCHDEV_F_DEFER)
172 return switchdev_port_attr_set_defer(dev, attr);
173 ASSERT_RTNL();
174 return switchdev_port_attr_set_now(dev, attr, extack);
175}
176EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
177
178static size_t switchdev_obj_size(const struct switchdev_obj *obj)
179{
180 switch (obj->id) {
181 case SWITCHDEV_OBJ_ID_PORT_VLAN:
182 return sizeof(struct switchdev_obj_port_vlan);
183 case SWITCHDEV_OBJ_ID_PORT_MDB:
184 return sizeof(struct switchdev_obj_port_mdb);
185 case SWITCHDEV_OBJ_ID_HOST_MDB:
186 return sizeof(struct switchdev_obj_port_mdb);
187 default:
188 BUG();
189 }
190 return 0;
191}
192
193static int switchdev_port_obj_notify(enum switchdev_notifier_type nt,
194 struct net_device *dev,
195 const struct switchdev_obj *obj,
196 struct netlink_ext_ack *extack)
197{
198 int rc;
199 int err;
200
201 struct switchdev_notifier_port_obj_info obj_info = {
202 .obj = obj,
203 .handled = false,
204 };
205
206 rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack);
207 err = notifier_to_errno(rc);
208 if (err) {
209 WARN_ON(!obj_info.handled);
210 return err;
211 }
212 if (!obj_info.handled)
213 return -EOPNOTSUPP;
214 return 0;
215}
216
217static void switchdev_port_obj_add_deferred(struct net_device *dev,
218 const void *data)
219{
220 const struct switchdev_obj *obj = data;
221 int err;
222
223 ASSERT_RTNL();
224 err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
225 dev, obj, NULL);
226 if (err && err != -EOPNOTSUPP)
227 netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
228 err, obj->id);
229 if (obj->complete)
230 obj->complete(dev, err, obj->complete_priv);
231}
232
233static int switchdev_port_obj_add_defer(struct net_device *dev,
234 const struct switchdev_obj *obj)
235{
236 return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
237 switchdev_port_obj_add_deferred);
238}
239
240
241
242
243
244
245
246
247
248
249
250int switchdev_port_obj_add(struct net_device *dev,
251 const struct switchdev_obj *obj,
252 struct netlink_ext_ack *extack)
253{
254 if (obj->flags & SWITCHDEV_F_DEFER)
255 return switchdev_port_obj_add_defer(dev, obj);
256 ASSERT_RTNL();
257 return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
258 dev, obj, extack);
259}
260EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
261
262static int switchdev_port_obj_del_now(struct net_device *dev,
263 const struct switchdev_obj *obj)
264{
265 return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL,
266 dev, obj, NULL);
267}
268
269static void switchdev_port_obj_del_deferred(struct net_device *dev,
270 const void *data)
271{
272 const struct switchdev_obj *obj = data;
273 int err;
274
275 err = switchdev_port_obj_del_now(dev, obj);
276 if (err && err != -EOPNOTSUPP)
277 netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
278 err, obj->id);
279 if (obj->complete)
280 obj->complete(dev, err, obj->complete_priv);
281}
282
283static int switchdev_port_obj_del_defer(struct net_device *dev,
284 const struct switchdev_obj *obj)
285{
286 return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
287 switchdev_port_obj_del_deferred);
288}
289
290
291
292
293
294
295
296
297
298
299int switchdev_port_obj_del(struct net_device *dev,
300 const struct switchdev_obj *obj)
301{
302 if (obj->flags & SWITCHDEV_F_DEFER)
303 return switchdev_port_obj_del_defer(dev, obj);
304 ASSERT_RTNL();
305 return switchdev_port_obj_del_now(dev, obj);
306}
307EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
308
309static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
310static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
311
312
313
314
315
316
317
318int register_switchdev_notifier(struct notifier_block *nb)
319{
320 return atomic_notifier_chain_register(&switchdev_notif_chain, nb);
321}
322EXPORT_SYMBOL_GPL(register_switchdev_notifier);
323
324
325
326
327
328
329
330int unregister_switchdev_notifier(struct notifier_block *nb)
331{
332 return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb);
333}
334EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
335
336
337
338
339
340
341
342
343
344int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
345 struct switchdev_notifier_info *info,
346 struct netlink_ext_ack *extack)
347{
348 info->dev = dev;
349 info->extack = extack;
350 return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
351}
352EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
353
354int register_switchdev_blocking_notifier(struct notifier_block *nb)
355{
356 struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
357
358 return blocking_notifier_chain_register(chain, nb);
359}
360EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier);
361
362int unregister_switchdev_blocking_notifier(struct notifier_block *nb)
363{
364 struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
365
366 return blocking_notifier_chain_unregister(chain, nb);
367}
368EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier);
369
370int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
371 struct switchdev_notifier_info *info,
372 struct netlink_ext_ack *extack)
373{
374 info->dev = dev;
375 info->extack = extack;
376 return blocking_notifier_call_chain(&switchdev_blocking_notif_chain,
377 val, info);
378}
379EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
380
381static int __switchdev_handle_port_obj_add(struct net_device *dev,
382 struct switchdev_notifier_port_obj_info *port_obj_info,
383 bool (*check_cb)(const struct net_device *dev),
384 int (*add_cb)(struct net_device *dev,
385 const struct switchdev_obj *obj,
386 struct netlink_ext_ack *extack))
387{
388 struct netlink_ext_ack *extack;
389 struct net_device *lower_dev;
390 struct list_head *iter;
391 int err = -EOPNOTSUPP;
392
393 extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
394
395 if (check_cb(dev)) {
396 err = add_cb(dev, port_obj_info->obj, extack);
397 if (err != -EOPNOTSUPP)
398 port_obj_info->handled = true;
399 return err;
400 }
401
402
403
404
405
406
407
408
409 netdev_for_each_lower_dev(dev, lower_dev, iter) {
410 if (netif_is_bridge_master(lower_dev))
411 continue;
412
413 err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
414 check_cb, add_cb);
415 if (err && err != -EOPNOTSUPP)
416 return err;
417 }
418
419 return err;
420}
421
422int switchdev_handle_port_obj_add(struct net_device *dev,
423 struct switchdev_notifier_port_obj_info *port_obj_info,
424 bool (*check_cb)(const struct net_device *dev),
425 int (*add_cb)(struct net_device *dev,
426 const struct switchdev_obj *obj,
427 struct netlink_ext_ack *extack))
428{
429 int err;
430
431 err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
432 add_cb);
433 if (err == -EOPNOTSUPP)
434 err = 0;
435 return err;
436}
437EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
438
439static int __switchdev_handle_port_obj_del(struct net_device *dev,
440 struct switchdev_notifier_port_obj_info *port_obj_info,
441 bool (*check_cb)(const struct net_device *dev),
442 int (*del_cb)(struct net_device *dev,
443 const struct switchdev_obj *obj))
444{
445 struct net_device *lower_dev;
446 struct list_head *iter;
447 int err = -EOPNOTSUPP;
448
449 if (check_cb(dev)) {
450 err = del_cb(dev, port_obj_info->obj);
451 if (err != -EOPNOTSUPP)
452 port_obj_info->handled = true;
453 return err;
454 }
455
456
457
458
459
460
461
462
463 netdev_for_each_lower_dev(dev, lower_dev, iter) {
464 if (netif_is_bridge_master(lower_dev))
465 continue;
466
467 err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
468 check_cb, del_cb);
469 if (err && err != -EOPNOTSUPP)
470 return err;
471 }
472
473 return err;
474}
475
476int switchdev_handle_port_obj_del(struct net_device *dev,
477 struct switchdev_notifier_port_obj_info *port_obj_info,
478 bool (*check_cb)(const struct net_device *dev),
479 int (*del_cb)(struct net_device *dev,
480 const struct switchdev_obj *obj))
481{
482 int err;
483
484 err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
485 del_cb);
486 if (err == -EOPNOTSUPP)
487 err = 0;
488 return err;
489}
490EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
491
492static int __switchdev_handle_port_attr_set(struct net_device *dev,
493 struct switchdev_notifier_port_attr_info *port_attr_info,
494 bool (*check_cb)(const struct net_device *dev),
495 int (*set_cb)(struct net_device *dev,
496 const struct switchdev_attr *attr,
497 struct netlink_ext_ack *extack))
498{
499 struct netlink_ext_ack *extack;
500 struct net_device *lower_dev;
501 struct list_head *iter;
502 int err = -EOPNOTSUPP;
503
504 extack = switchdev_notifier_info_to_extack(&port_attr_info->info);
505
506 if (check_cb(dev)) {
507 err = set_cb(dev, port_attr_info->attr, extack);
508 if (err != -EOPNOTSUPP)
509 port_attr_info->handled = true;
510 return err;
511 }
512
513
514
515
516
517
518
519
520 netdev_for_each_lower_dev(dev, lower_dev, iter) {
521 if (netif_is_bridge_master(lower_dev))
522 continue;
523
524 err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info,
525 check_cb, set_cb);
526 if (err && err != -EOPNOTSUPP)
527 return err;
528 }
529
530 return err;
531}
532
533int switchdev_handle_port_attr_set(struct net_device *dev,
534 struct switchdev_notifier_port_attr_info *port_attr_info,
535 bool (*check_cb)(const struct net_device *dev),
536 int (*set_cb)(struct net_device *dev,
537 const struct switchdev_attr *attr,
538 struct netlink_ext_ack *extack))
539{
540 int err;
541
542 err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb,
543 set_cb);
544 if (err == -EOPNOTSUPP)
545 err = 0;
546 return err;
547}
548EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set);
549