1
2
3
4
5
6
7
8
9
10
11#include <linux/acpi.h>
12#include <linux/cpufreq.h>
13#include <linux/device.h>
14#include <linux/err.h>
15#include <linux/fwnode.h>
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/string.h>
20#include <linux/kdev_t.h>
21#include <linux/notifier.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/genhd.h>
25#include <linux/mutex.h>
26#include <linux/pm_runtime.h>
27#include <linux/netdevice.h>
28#include <linux/sched/signal.h>
29#include <linux/sched/mm.h>
30#include <linux/sysfs.h>
31#include <linux/dma-map-ops.h>
32
33#include "base.h"
34#include "power/power.h"
35
36#ifdef CONFIG_SYSFS_DEPRECATED
37#ifdef CONFIG_SYSFS_DEPRECATED_V2
38long sysfs_deprecated = 1;
39#else
40long sysfs_deprecated = 0;
41#endif
42static int __init sysfs_deprecated_setup(char *arg)
43{
44 return kstrtol(arg, 10, &sysfs_deprecated);
45}
46early_param("sysfs.deprecated", sysfs_deprecated_setup);
47#endif
48
49
50static LIST_HEAD(deferred_sync);
51static unsigned int defer_sync_state_count = 1;
52static DEFINE_MUTEX(fwnode_link_lock);
53static bool fw_devlink_is_permissive(void);
54static bool fw_devlink_drv_reg_done;
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup)
74{
75 struct fwnode_link *link;
76 int ret = 0;
77
78 mutex_lock(&fwnode_link_lock);
79
80 list_for_each_entry(link, &sup->consumers, s_hook)
81 if (link->consumer == con)
82 goto out;
83
84 link = kzalloc(sizeof(*link), GFP_KERNEL);
85 if (!link) {
86 ret = -ENOMEM;
87 goto out;
88 }
89
90 link->supplier = sup;
91 INIT_LIST_HEAD(&link->s_hook);
92 link->consumer = con;
93 INIT_LIST_HEAD(&link->c_hook);
94
95 list_add(&link->s_hook, &sup->consumers);
96 list_add(&link->c_hook, &con->suppliers);
97out:
98 mutex_unlock(&fwnode_link_lock);
99
100 return ret;
101}
102
103
104
105
106
107
108
109static void fwnode_links_purge_suppliers(struct fwnode_handle *fwnode)
110{
111 struct fwnode_link *link, *tmp;
112
113 mutex_lock(&fwnode_link_lock);
114 list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) {
115 list_del(&link->s_hook);
116 list_del(&link->c_hook);
117 kfree(link);
118 }
119 mutex_unlock(&fwnode_link_lock);
120}
121
122
123
124
125
126
127
128static void fwnode_links_purge_consumers(struct fwnode_handle *fwnode)
129{
130 struct fwnode_link *link, *tmp;
131
132 mutex_lock(&fwnode_link_lock);
133 list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) {
134 list_del(&link->s_hook);
135 list_del(&link->c_hook);
136 kfree(link);
137 }
138 mutex_unlock(&fwnode_link_lock);
139}
140
141
142
143
144
145
146
147void fwnode_links_purge(struct fwnode_handle *fwnode)
148{
149 fwnode_links_purge_suppliers(fwnode);
150 fwnode_links_purge_consumers(fwnode);
151}
152
153void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode)
154{
155 struct fwnode_handle *child;
156
157
158 if (fwnode->dev)
159 return;
160
161 fwnode->flags |= FWNODE_FLAG_NOT_DEVICE;
162 fwnode_links_purge_consumers(fwnode);
163
164 fwnode_for_each_available_child_node(fwnode, child)
165 fw_devlink_purge_absent_suppliers(child);
166}
167EXPORT_SYMBOL_GPL(fw_devlink_purge_absent_suppliers);
168
169#ifdef CONFIG_SRCU
170static DEFINE_MUTEX(device_links_lock);
171DEFINE_STATIC_SRCU(device_links_srcu);
172
173static inline void device_links_write_lock(void)
174{
175 mutex_lock(&device_links_lock);
176}
177
178static inline void device_links_write_unlock(void)
179{
180 mutex_unlock(&device_links_lock);
181}
182
183int device_links_read_lock(void) __acquires(&device_links_srcu)
184{
185 return srcu_read_lock(&device_links_srcu);
186}
187
188void device_links_read_unlock(int idx) __releases(&device_links_srcu)
189{
190 srcu_read_unlock(&device_links_srcu, idx);
191}
192
193int device_links_read_lock_held(void)
194{
195 return srcu_read_lock_held(&device_links_srcu);
196}
197
198static void device_link_synchronize_removal(void)
199{
200 synchronize_srcu(&device_links_srcu);
201}
202
203static void device_link_remove_from_lists(struct device_link *link)
204{
205 list_del_rcu(&link->s_node);
206 list_del_rcu(&link->c_node);
207}
208#else
209static DECLARE_RWSEM(device_links_lock);
210
211static inline void device_links_write_lock(void)
212{
213 down_write(&device_links_lock);
214}
215
216static inline void device_links_write_unlock(void)
217{
218 up_write(&device_links_lock);
219}
220
221int device_links_read_lock(void)
222{
223 down_read(&device_links_lock);
224 return 0;
225}
226
227void device_links_read_unlock(int not_used)
228{
229 up_read(&device_links_lock);
230}
231
232#ifdef CONFIG_DEBUG_LOCK_ALLOC
233int device_links_read_lock_held(void)
234{
235 return lockdep_is_held(&device_links_lock);
236}
237#endif
238
239static inline void device_link_synchronize_removal(void)
240{
241}
242
243static void device_link_remove_from_lists(struct device_link *link)
244{
245 list_del(&link->s_node);
246 list_del(&link->c_node);
247}
248#endif
249
250static bool device_is_ancestor(struct device *dev, struct device *target)
251{
252 while (target->parent) {
253 target = target->parent;
254 if (dev == target)
255 return true;
256 }
257 return false;
258}
259
260
261
262
263
264
265
266
267
268int device_is_dependent(struct device *dev, void *target)
269{
270 struct device_link *link;
271 int ret;
272
273
274
275
276
277
278 if (dev == target || device_is_ancestor(dev, target))
279 return 1;
280
281 ret = device_for_each_child(dev, target, device_is_dependent);
282 if (ret)
283 return ret;
284
285 list_for_each_entry(link, &dev->links.consumers, s_node) {
286 if ((link->flags & ~DL_FLAG_INFERRED) ==
287 (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
288 continue;
289
290 if (link->consumer == target)
291 return 1;
292
293 ret = device_is_dependent(link->consumer, target);
294 if (ret)
295 break;
296 }
297 return ret;
298}
299
300static void device_link_init_status(struct device_link *link,
301 struct device *consumer,
302 struct device *supplier)
303{
304 switch (supplier->links.status) {
305 case DL_DEV_PROBING:
306 switch (consumer->links.status) {
307 case DL_DEV_PROBING:
308
309
310
311
312
313
314
315 link->status = DL_STATE_CONSUMER_PROBE;
316 break;
317 default:
318 link->status = DL_STATE_DORMANT;
319 break;
320 }
321 break;
322 case DL_DEV_DRIVER_BOUND:
323 switch (consumer->links.status) {
324 case DL_DEV_PROBING:
325 link->status = DL_STATE_CONSUMER_PROBE;
326 break;
327 case DL_DEV_DRIVER_BOUND:
328 link->status = DL_STATE_ACTIVE;
329 break;
330 default:
331 link->status = DL_STATE_AVAILABLE;
332 break;
333 }
334 break;
335 case DL_DEV_UNBINDING:
336 link->status = DL_STATE_SUPPLIER_UNBIND;
337 break;
338 default:
339 link->status = DL_STATE_DORMANT;
340 break;
341 }
342}
343
344static int device_reorder_to_tail(struct device *dev, void *not_used)
345{
346 struct device_link *link;
347
348
349
350
351
352 if (device_is_registered(dev))
353 devices_kset_move_last(dev);
354
355 if (device_pm_initialized(dev))
356 device_pm_move_last(dev);
357
358 device_for_each_child(dev, NULL, device_reorder_to_tail);
359 list_for_each_entry(link, &dev->links.consumers, s_node) {
360 if ((link->flags & ~DL_FLAG_INFERRED) ==
361 (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
362 continue;
363 device_reorder_to_tail(link->consumer, NULL);
364 }
365
366 return 0;
367}
368
369
370
371
372
373
374
375
376
377
378void device_pm_move_to_tail(struct device *dev)
379{
380 int idx;
381
382 idx = device_links_read_lock();
383 device_pm_lock();
384 device_reorder_to_tail(dev, NULL);
385 device_pm_unlock();
386 device_links_read_unlock(idx);
387}
388
389#define to_devlink(dev) container_of((dev), struct device_link, link_dev)
390
391static ssize_t status_show(struct device *dev,
392 struct device_attribute *attr, char *buf)
393{
394 const char *output;
395
396 switch (to_devlink(dev)->status) {
397 case DL_STATE_NONE:
398 output = "not tracked";
399 break;
400 case DL_STATE_DORMANT:
401 output = "dormant";
402 break;
403 case DL_STATE_AVAILABLE:
404 output = "available";
405 break;
406 case DL_STATE_CONSUMER_PROBE:
407 output = "consumer probing";
408 break;
409 case DL_STATE_ACTIVE:
410 output = "active";
411 break;
412 case DL_STATE_SUPPLIER_UNBIND:
413 output = "supplier unbinding";
414 break;
415 default:
416 output = "unknown";
417 break;
418 }
419
420 return sysfs_emit(buf, "%s\n", output);
421}
422static DEVICE_ATTR_RO(status);
423
424static ssize_t auto_remove_on_show(struct device *dev,
425 struct device_attribute *attr, char *buf)
426{
427 struct device_link *link = to_devlink(dev);
428 const char *output;
429
430 if (link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
431 output = "supplier unbind";
432 else if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
433 output = "consumer unbind";
434 else
435 output = "never";
436
437 return sysfs_emit(buf, "%s\n", output);
438}
439static DEVICE_ATTR_RO(auto_remove_on);
440
441static ssize_t runtime_pm_show(struct device *dev,
442 struct device_attribute *attr, char *buf)
443{
444 struct device_link *link = to_devlink(dev);
445
446 return sysfs_emit(buf, "%d\n", !!(link->flags & DL_FLAG_PM_RUNTIME));
447}
448static DEVICE_ATTR_RO(runtime_pm);
449
450static ssize_t sync_state_only_show(struct device *dev,
451 struct device_attribute *attr, char *buf)
452{
453 struct device_link *link = to_devlink(dev);
454
455 return sysfs_emit(buf, "%d\n",
456 !!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
457}
458static DEVICE_ATTR_RO(sync_state_only);
459
460static struct attribute *devlink_attrs[] = {
461 &dev_attr_status.attr,
462 &dev_attr_auto_remove_on.attr,
463 &dev_attr_runtime_pm.attr,
464 &dev_attr_sync_state_only.attr,
465 NULL,
466};
467ATTRIBUTE_GROUPS(devlink);
468
469static void device_link_release_fn(struct work_struct *work)
470{
471 struct device_link *link = container_of(work, struct device_link, rm_work);
472
473
474 device_link_synchronize_removal();
475
476 while (refcount_dec_not_one(&link->rpm_active))
477 pm_runtime_put(link->supplier);
478
479 put_device(link->consumer);
480 put_device(link->supplier);
481 kfree(link);
482}
483
484static void devlink_dev_release(struct device *dev)
485{
486 struct device_link *link = to_devlink(dev);
487
488 INIT_WORK(&link->rm_work, device_link_release_fn);
489
490
491
492
493
494
495 queue_work(system_long_wq, &link->rm_work);
496}
497
498static struct class devlink_class = {
499 .name = "devlink",
500 .owner = THIS_MODULE,
501 .dev_groups = devlink_groups,
502 .dev_release = devlink_dev_release,
503};
504
505static int devlink_add_symlinks(struct device *dev,
506 struct class_interface *class_intf)
507{
508 int ret;
509 size_t len;
510 struct device_link *link = to_devlink(dev);
511 struct device *sup = link->supplier;
512 struct device *con = link->consumer;
513 char *buf;
514
515 len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
516 strlen(dev_bus_name(con)) + strlen(dev_name(con)));
517 len += strlen(":");
518 len += strlen("supplier:") + 1;
519 buf = kzalloc(len, GFP_KERNEL);
520 if (!buf)
521 return -ENOMEM;
522
523 ret = sysfs_create_link(&link->link_dev.kobj, &sup->kobj, "supplier");
524 if (ret)
525 goto out;
526
527 ret = sysfs_create_link(&link->link_dev.kobj, &con->kobj, "consumer");
528 if (ret)
529 goto err_con;
530
531 snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
532 ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf);
533 if (ret)
534 goto err_con_dev;
535
536 snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
537 ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf);
538 if (ret)
539 goto err_sup_dev;
540
541 goto out;
542
543err_sup_dev:
544 snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
545 sysfs_remove_link(&sup->kobj, buf);
546err_con_dev:
547 sysfs_remove_link(&link->link_dev.kobj, "consumer");
548err_con:
549 sysfs_remove_link(&link->link_dev.kobj, "supplier");
550out:
551 kfree(buf);
552 return ret;
553}
554
555static void devlink_remove_symlinks(struct device *dev,
556 struct class_interface *class_intf)
557{
558 struct device_link *link = to_devlink(dev);
559 size_t len;
560 struct device *sup = link->supplier;
561 struct device *con = link->consumer;
562 char *buf;
563
564 sysfs_remove_link(&link->link_dev.kobj, "consumer");
565 sysfs_remove_link(&link->link_dev.kobj, "supplier");
566
567 len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
568 strlen(dev_bus_name(con)) + strlen(dev_name(con)));
569 len += strlen(":");
570 len += strlen("supplier:") + 1;
571 buf = kzalloc(len, GFP_KERNEL);
572 if (!buf) {
573 WARN(1, "Unable to properly free device link symlinks!\n");
574 return;
575 }
576
577 if (device_is_registered(con)) {
578 snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
579 sysfs_remove_link(&con->kobj, buf);
580 }
581 snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
582 sysfs_remove_link(&sup->kobj, buf);
583 kfree(buf);
584}
585
586static struct class_interface devlink_class_intf = {
587 .class = &devlink_class,
588 .add_dev = devlink_add_symlinks,
589 .remove_dev = devlink_remove_symlinks,
590};
591
592static int __init devlink_class_init(void)
593{
594 int ret;
595
596 ret = class_register(&devlink_class);
597 if (ret)
598 return ret;
599
600 ret = class_interface_register(&devlink_class_intf);
601 if (ret)
602 class_unregister(&devlink_class);
603
604 return ret;
605}
606postcore_initcall(devlink_class_init);
607
608#define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \
609 DL_FLAG_AUTOREMOVE_SUPPLIER | \
610 DL_FLAG_AUTOPROBE_CONSUMER | \
611 DL_FLAG_SYNC_STATE_ONLY | \
612 DL_FLAG_INFERRED)
613
614#define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \
615 DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673struct device_link *device_link_add(struct device *consumer,
674 struct device *supplier, u32 flags)
675{
676 struct device_link *link;
677
678 if (!consumer || !supplier || flags & ~DL_ADD_VALID_FLAGS ||
679 (flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) ||
680 (flags & DL_FLAG_SYNC_STATE_ONLY &&
681 (flags & ~DL_FLAG_INFERRED) != DL_FLAG_SYNC_STATE_ONLY) ||
682 (flags & DL_FLAG_AUTOPROBE_CONSUMER &&
683 flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
684 DL_FLAG_AUTOREMOVE_SUPPLIER)))
685 return NULL;
686
687 if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) {
688 if (pm_runtime_get_sync(supplier) < 0) {
689 pm_runtime_put_noidle(supplier);
690 return NULL;
691 }
692 }
693
694 if (!(flags & DL_FLAG_STATELESS))
695 flags |= DL_FLAG_MANAGED;
696
697 device_links_write_lock();
698 device_pm_lock();
699
700
701
702
703
704
705
706
707 if (!device_pm_initialized(supplier)
708 || (!(flags & DL_FLAG_SYNC_STATE_ONLY) &&
709 device_is_dependent(consumer, supplier))) {
710 link = NULL;
711 goto out;
712 }
713
714
715
716
717
718 if (flags & DL_FLAG_SYNC_STATE_ONLY &&
719 consumer->links.status != DL_DEV_NO_DRIVER &&
720 consumer->links.status != DL_DEV_PROBING) {
721 link = NULL;
722 goto out;
723 }
724
725
726
727
728
729
730 if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
731 flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
732
733 list_for_each_entry(link, &supplier->links.consumers, s_node) {
734 if (link->consumer != consumer)
735 continue;
736
737 if (link->flags & DL_FLAG_INFERRED &&
738 !(flags & DL_FLAG_INFERRED))
739 link->flags &= ~DL_FLAG_INFERRED;
740
741 if (flags & DL_FLAG_PM_RUNTIME) {
742 if (!(link->flags & DL_FLAG_PM_RUNTIME)) {
743 pm_runtime_new_link(consumer);
744 link->flags |= DL_FLAG_PM_RUNTIME;
745 }
746 if (flags & DL_FLAG_RPM_ACTIVE)
747 refcount_inc(&link->rpm_active);
748 }
749
750 if (flags & DL_FLAG_STATELESS) {
751 kref_get(&link->kref);
752 if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
753 !(link->flags & DL_FLAG_STATELESS)) {
754 link->flags |= DL_FLAG_STATELESS;
755 goto reorder;
756 } else {
757 link->flags |= DL_FLAG_STATELESS;
758 goto out;
759 }
760 }
761
762
763
764
765
766
767 if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) {
768 if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
769 link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
770 link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER;
771 }
772 } else if (!(flags & DL_FLAG_AUTOREMOVE_CONSUMER)) {
773 link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER |
774 DL_FLAG_AUTOREMOVE_SUPPLIER);
775 }
776 if (!(link->flags & DL_FLAG_MANAGED)) {
777 kref_get(&link->kref);
778 link->flags |= DL_FLAG_MANAGED;
779 device_link_init_status(link, consumer, supplier);
780 }
781 if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
782 !(flags & DL_FLAG_SYNC_STATE_ONLY)) {
783 link->flags &= ~DL_FLAG_SYNC_STATE_ONLY;
784 goto reorder;
785 }
786
787 goto out;
788 }
789
790 link = kzalloc(sizeof(*link), GFP_KERNEL);
791 if (!link)
792 goto out;
793
794 refcount_set(&link->rpm_active, 1);
795
796 get_device(supplier);
797 link->supplier = supplier;
798 INIT_LIST_HEAD(&link->s_node);
799 get_device(consumer);
800 link->consumer = consumer;
801 INIT_LIST_HEAD(&link->c_node);
802 link->flags = flags;
803 kref_init(&link->kref);
804
805 link->link_dev.class = &devlink_class;
806 device_set_pm_not_required(&link->link_dev);
807 dev_set_name(&link->link_dev, "%s:%s--%s:%s",
808 dev_bus_name(supplier), dev_name(supplier),
809 dev_bus_name(consumer), dev_name(consumer));
810 if (device_register(&link->link_dev)) {
811 put_device(consumer);
812 put_device(supplier);
813 kfree(link);
814 link = NULL;
815 goto out;
816 }
817
818 if (flags & DL_FLAG_PM_RUNTIME) {
819 if (flags & DL_FLAG_RPM_ACTIVE)
820 refcount_inc(&link->rpm_active);
821
822 pm_runtime_new_link(consumer);
823 }
824
825
826 if (flags & DL_FLAG_STATELESS)
827 link->status = DL_STATE_NONE;
828 else
829 device_link_init_status(link, consumer, supplier);
830
831
832
833
834
835 if (link->status == DL_STATE_CONSUMER_PROBE &&
836 flags & DL_FLAG_PM_RUNTIME)
837 pm_runtime_resume(supplier);
838
839 list_add_tail_rcu(&link->s_node, &supplier->links.consumers);
840 list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);
841
842 if (flags & DL_FLAG_SYNC_STATE_ONLY) {
843 dev_dbg(consumer,
844 "Linked as a sync state only consumer to %s\n",
845 dev_name(supplier));
846 goto out;
847 }
848
849reorder:
850
851
852
853
854
855
856
857 device_reorder_to_tail(consumer, NULL);
858
859 dev_dbg(consumer, "Linked as a consumer to %s\n", dev_name(supplier));
860
861out:
862 device_pm_unlock();
863 device_links_write_unlock();
864
865 if ((flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) && !link)
866 pm_runtime_put(supplier);
867
868 return link;
869}
870EXPORT_SYMBOL_GPL(device_link_add);
871
872static void __device_link_del(struct kref *kref)
873{
874 struct device_link *link = container_of(kref, struct device_link, kref);
875
876 dev_dbg(link->consumer, "Dropping the link to %s\n",
877 dev_name(link->supplier));
878
879 pm_runtime_drop_link(link);
880
881 device_link_remove_from_lists(link);
882 device_unregister(&link->link_dev);
883}
884
885static void device_link_put_kref(struct device_link *link)
886{
887 if (link->flags & DL_FLAG_STATELESS)
888 kref_put(&link->kref, __device_link_del);
889 else
890 WARN(1, "Unable to drop a managed device link reference\n");
891}
892
893
894
895
896
897
898
899
900
901
902void device_link_del(struct device_link *link)
903{
904 device_links_write_lock();
905 device_link_put_kref(link);
906 device_links_write_unlock();
907}
908EXPORT_SYMBOL_GPL(device_link_del);
909
910
911
912
913
914
915
916
917
918void device_link_remove(void *consumer, struct device *supplier)
919{
920 struct device_link *link;
921
922 if (WARN_ON(consumer == supplier))
923 return;
924
925 device_links_write_lock();
926
927 list_for_each_entry(link, &supplier->links.consumers, s_node) {
928 if (link->consumer == consumer) {
929 device_link_put_kref(link);
930 break;
931 }
932 }
933
934 device_links_write_unlock();
935}
936EXPORT_SYMBOL_GPL(device_link_remove);
937
938static void device_links_missing_supplier(struct device *dev)
939{
940 struct device_link *link;
941
942 list_for_each_entry(link, &dev->links.suppliers, c_node) {
943 if (link->status != DL_STATE_CONSUMER_PROBE)
944 continue;
945
946 if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
947 WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
948 } else {
949 WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
950 WRITE_ONCE(link->status, DL_STATE_DORMANT);
951 }
952 }
953}
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971int device_links_check_suppliers(struct device *dev)
972{
973 struct device_link *link;
974 int ret = 0;
975
976
977
978
979
980 mutex_lock(&fwnode_link_lock);
981 if (dev->fwnode && !list_empty(&dev->fwnode->suppliers) &&
982 !fw_devlink_is_permissive()) {
983 dev_dbg(dev, "probe deferral - wait for supplier %pfwP\n",
984 list_first_entry(&dev->fwnode->suppliers,
985 struct fwnode_link,
986 c_hook)->supplier);
987 mutex_unlock(&fwnode_link_lock);
988 return -EPROBE_DEFER;
989 }
990 mutex_unlock(&fwnode_link_lock);
991
992 device_links_write_lock();
993
994 list_for_each_entry(link, &dev->links.suppliers, c_node) {
995 if (!(link->flags & DL_FLAG_MANAGED))
996 continue;
997
998 if (link->status != DL_STATE_AVAILABLE &&
999 !(link->flags & DL_FLAG_SYNC_STATE_ONLY)) {
1000 device_links_missing_supplier(dev);
1001 dev_dbg(dev, "probe deferral - supplier %s not ready\n",
1002 dev_name(link->supplier));
1003 ret = -EPROBE_DEFER;
1004 break;
1005 }
1006 WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
1007 }
1008 dev->links.status = DL_DEV_PROBING;
1009
1010 device_links_write_unlock();
1011 return ret;
1012}
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032static void __device_links_queue_sync_state(struct device *dev,
1033 struct list_head *list)
1034{
1035 struct device_link *link;
1036
1037 if (!dev_has_sync_state(dev))
1038 return;
1039 if (dev->state_synced)
1040 return;
1041
1042 list_for_each_entry(link, &dev->links.consumers, s_node) {
1043 if (!(link->flags & DL_FLAG_MANAGED))
1044 continue;
1045 if (link->status != DL_STATE_ACTIVE)
1046 return;
1047 }
1048
1049
1050
1051
1052
1053
1054 dev->state_synced = true;
1055
1056 if (WARN_ON(!list_empty(&dev->links.defer_sync)))
1057 return;
1058
1059 get_device(dev);
1060 list_add_tail(&dev->links.defer_sync, list);
1061}
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073static void device_links_flush_sync_list(struct list_head *list,
1074 struct device *dont_lock_dev)
1075{
1076 struct device *dev, *tmp;
1077
1078 list_for_each_entry_safe(dev, tmp, list, links.defer_sync) {
1079 list_del_init(&dev->links.defer_sync);
1080
1081 if (dev != dont_lock_dev)
1082 device_lock(dev);
1083
1084 if (dev->bus->sync_state)
1085 dev->bus->sync_state(dev);
1086 else if (dev->driver && dev->driver->sync_state)
1087 dev->driver->sync_state(dev);
1088
1089 if (dev != dont_lock_dev)
1090 device_unlock(dev);
1091
1092 put_device(dev);
1093 }
1094}
1095
1096void device_links_supplier_sync_state_pause(void)
1097{
1098 device_links_write_lock();
1099 defer_sync_state_count++;
1100 device_links_write_unlock();
1101}
1102
1103void device_links_supplier_sync_state_resume(void)
1104{
1105 struct device *dev, *tmp;
1106 LIST_HEAD(sync_list);
1107
1108 device_links_write_lock();
1109 if (!defer_sync_state_count) {
1110 WARN(true, "Unmatched sync_state pause/resume!");
1111 goto out;
1112 }
1113 defer_sync_state_count--;
1114 if (defer_sync_state_count)
1115 goto out;
1116
1117 list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_sync) {
1118
1119
1120
1121
1122 list_del_init(&dev->links.defer_sync);
1123 __device_links_queue_sync_state(dev, &sync_list);
1124 }
1125out:
1126 device_links_write_unlock();
1127
1128 device_links_flush_sync_list(&sync_list, NULL);
1129}
1130
1131static int sync_state_resume_initcall(void)
1132{
1133 device_links_supplier_sync_state_resume();
1134 return 0;
1135}
1136late_initcall(sync_state_resume_initcall);
1137
1138static void __device_links_supplier_defer_sync(struct device *sup)
1139{
1140 if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup))
1141 list_add_tail(&sup->links.defer_sync, &deferred_sync);
1142}
1143
1144static void device_link_drop_managed(struct device_link *link)
1145{
1146 link->flags &= ~DL_FLAG_MANAGED;
1147 WRITE_ONCE(link->status, DL_STATE_NONE);
1148 kref_put(&link->kref, __device_link_del);
1149}
1150
1151static ssize_t waiting_for_supplier_show(struct device *dev,
1152 struct device_attribute *attr,
1153 char *buf)
1154{
1155 bool val;
1156
1157 device_lock(dev);
1158 val = !list_empty(&dev->fwnode->suppliers);
1159 device_unlock(dev);
1160 return sysfs_emit(buf, "%u\n", val);
1161}
1162static DEVICE_ATTR_RO(waiting_for_supplier);
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178void device_links_force_bind(struct device *dev)
1179{
1180 struct device_link *link, *ln;
1181
1182 device_links_write_lock();
1183
1184 list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
1185 if (!(link->flags & DL_FLAG_MANAGED))
1186 continue;
1187
1188 if (link->status != DL_STATE_AVAILABLE) {
1189 device_link_drop_managed(link);
1190 continue;
1191 }
1192 WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
1193 }
1194 dev->links.status = DL_DEV_PROBING;
1195
1196 device_links_write_unlock();
1197}
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210void device_links_driver_bound(struct device *dev)
1211{
1212 struct device_link *link, *ln;
1213 LIST_HEAD(sync_list);
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226 if (dev->fwnode && dev->fwnode->dev == dev) {
1227 struct fwnode_handle *child;
1228 fwnode_links_purge_suppliers(dev->fwnode);
1229 fwnode_for_each_available_child_node(dev->fwnode, child)
1230 fw_devlink_purge_absent_suppliers(child);
1231 }
1232 device_remove_file(dev, &dev_attr_waiting_for_supplier);
1233
1234 device_links_write_lock();
1235
1236 list_for_each_entry(link, &dev->links.consumers, s_node) {
1237 if (!(link->flags & DL_FLAG_MANAGED))
1238 continue;
1239
1240
1241
1242
1243
1244
1245
1246 if (link->status == DL_STATE_CONSUMER_PROBE ||
1247 link->status == DL_STATE_ACTIVE)
1248 continue;
1249
1250 WARN_ON(link->status != DL_STATE_DORMANT);
1251 WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
1252
1253 if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER)
1254 driver_deferred_probe_add(link->consumer);
1255 }
1256
1257 if (defer_sync_state_count)
1258 __device_links_supplier_defer_sync(dev);
1259 else
1260 __device_links_queue_sync_state(dev, &sync_list);
1261
1262 list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
1263 struct device *supplier;
1264
1265 if (!(link->flags & DL_FLAG_MANAGED))
1266 continue;
1267
1268 supplier = link->supplier;
1269 if (link->flags & DL_FLAG_SYNC_STATE_ONLY) {
1270
1271
1272
1273
1274
1275 device_link_drop_managed(link);
1276 } else {
1277 WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
1278 WRITE_ONCE(link->status, DL_STATE_ACTIVE);
1279 }
1280
1281
1282
1283
1284
1285
1286
1287 if (defer_sync_state_count)
1288 __device_links_supplier_defer_sync(supplier);
1289 else
1290 __device_links_queue_sync_state(supplier, &sync_list);
1291 }
1292
1293 dev->links.status = DL_DEV_DRIVER_BOUND;
1294
1295 device_links_write_unlock();
1296
1297 device_links_flush_sync_list(&sync_list, dev);
1298}
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312static void __device_links_no_driver(struct device *dev)
1313{
1314 struct device_link *link, *ln;
1315
1316 list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
1317 if (!(link->flags & DL_FLAG_MANAGED))
1318 continue;
1319
1320 if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
1321 device_link_drop_managed(link);
1322 continue;
1323 }
1324
1325 if (link->status != DL_STATE_CONSUMER_PROBE &&
1326 link->status != DL_STATE_ACTIVE)
1327 continue;
1328
1329 if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
1330 WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
1331 } else {
1332 WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
1333 WRITE_ONCE(link->status, DL_STATE_DORMANT);
1334 }
1335 }
1336
1337 dev->links.status = DL_DEV_NO_DRIVER;
1338}
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350void device_links_no_driver(struct device *dev)
1351{
1352 struct device_link *link;
1353
1354 device_links_write_lock();
1355
1356 list_for_each_entry(link, &dev->links.consumers, s_node) {
1357 if (!(link->flags & DL_FLAG_MANAGED))
1358 continue;
1359
1360
1361
1362
1363
1364
1365
1366
1367 if (link->status == DL_STATE_CONSUMER_PROBE ||
1368 link->status == DL_STATE_ACTIVE)
1369 WRITE_ONCE(link->status, DL_STATE_DORMANT);
1370 }
1371
1372 __device_links_no_driver(dev);
1373
1374 device_links_write_unlock();
1375}
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387void device_links_driver_cleanup(struct device *dev)
1388{
1389 struct device_link *link, *ln;
1390
1391 device_links_write_lock();
1392
1393 list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) {
1394 if (!(link->flags & DL_FLAG_MANAGED))
1395 continue;
1396
1397 WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER);
1398 WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
1399
1400
1401
1402
1403
1404
1405 if (link->status == DL_STATE_SUPPLIER_UNBIND &&
1406 link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
1407 device_link_drop_managed(link);
1408
1409 WRITE_ONCE(link->status, DL_STATE_DORMANT);
1410 }
1411
1412 list_del_init(&dev->links.defer_sync);
1413 __device_links_no_driver(dev);
1414
1415 device_links_write_unlock();
1416}
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432bool device_links_busy(struct device *dev)
1433{
1434 struct device_link *link;
1435 bool ret = false;
1436
1437 device_links_write_lock();
1438
1439 list_for_each_entry(link, &dev->links.consumers, s_node) {
1440 if (!(link->flags & DL_FLAG_MANAGED))
1441 continue;
1442
1443 if (link->status == DL_STATE_CONSUMER_PROBE
1444 || link->status == DL_STATE_ACTIVE) {
1445 ret = true;
1446 break;
1447 }
1448 WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
1449 }
1450
1451 dev->links.status = DL_DEV_UNBINDING;
1452
1453 device_links_write_unlock();
1454 return ret;
1455}
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472void device_links_unbind_consumers(struct device *dev)
1473{
1474 struct device_link *link;
1475
1476 start:
1477 device_links_write_lock();
1478
1479 list_for_each_entry(link, &dev->links.consumers, s_node) {
1480 enum device_link_state status;
1481
1482 if (!(link->flags & DL_FLAG_MANAGED) ||
1483 link->flags & DL_FLAG_SYNC_STATE_ONLY)
1484 continue;
1485
1486 status = link->status;
1487 if (status == DL_STATE_CONSUMER_PROBE) {
1488 device_links_write_unlock();
1489
1490 wait_for_device_probe();
1491 goto start;
1492 }
1493 WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
1494 if (status == DL_STATE_ACTIVE) {
1495 struct device *consumer = link->consumer;
1496
1497 get_device(consumer);
1498
1499 device_links_write_unlock();
1500
1501 device_release_driver_internal(consumer, NULL,
1502 consumer->parent);
1503 put_device(consumer);
1504 goto start;
1505 }
1506 }
1507
1508 device_links_write_unlock();
1509}
1510
1511
1512
1513
1514
1515static void device_links_purge(struct device *dev)
1516{
1517 struct device_link *link, *ln;
1518
1519 if (dev->class == &devlink_class)
1520 return;
1521
1522
1523
1524
1525
1526 device_links_write_lock();
1527
1528 list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
1529 WARN_ON(link->status == DL_STATE_ACTIVE);
1530 __device_link_del(&link->kref);
1531 }
1532
1533 list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) {
1534 WARN_ON(link->status != DL_STATE_DORMANT &&
1535 link->status != DL_STATE_NONE);
1536 __device_link_del(&link->kref);
1537 }
1538
1539 device_links_write_unlock();
1540}
1541
1542#define FW_DEVLINK_FLAGS_PERMISSIVE (DL_FLAG_INFERRED | \
1543 DL_FLAG_SYNC_STATE_ONLY)
1544#define FW_DEVLINK_FLAGS_ON (DL_FLAG_INFERRED | \
1545 DL_FLAG_AUTOPROBE_CONSUMER)
1546#define FW_DEVLINK_FLAGS_RPM (FW_DEVLINK_FLAGS_ON | \
1547 DL_FLAG_PM_RUNTIME)
1548
1549static u32 fw_devlink_flags = FW_DEVLINK_FLAGS_ON;
1550static int __init fw_devlink_setup(char *arg)
1551{
1552 if (!arg)
1553 return -EINVAL;
1554
1555 if (strcmp(arg, "off") == 0) {
1556 fw_devlink_flags = 0;
1557 } else if (strcmp(arg, "permissive") == 0) {
1558 fw_devlink_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
1559 } else if (strcmp(arg, "on") == 0) {
1560 fw_devlink_flags = FW_DEVLINK_FLAGS_ON;
1561 } else if (strcmp(arg, "rpm") == 0) {
1562 fw_devlink_flags = FW_DEVLINK_FLAGS_RPM;
1563 }
1564 return 0;
1565}
1566early_param("fw_devlink", fw_devlink_setup);
1567
1568static bool fw_devlink_strict;
1569static int __init fw_devlink_strict_setup(char *arg)
1570{
1571 return strtobool(arg, &fw_devlink_strict);
1572}
1573early_param("fw_devlink.strict", fw_devlink_strict_setup);
1574
1575u32 fw_devlink_get_flags(void)
1576{
1577 return fw_devlink_flags;
1578}
1579
1580static bool fw_devlink_is_permissive(void)
1581{
1582 return fw_devlink_flags == FW_DEVLINK_FLAGS_PERMISSIVE;
1583}
1584
1585bool fw_devlink_is_strict(void)
1586{
1587 return fw_devlink_strict && !fw_devlink_is_permissive();
1588}
1589
1590static void fw_devlink_parse_fwnode(struct fwnode_handle *fwnode)
1591{
1592 if (fwnode->flags & FWNODE_FLAG_LINKS_ADDED)
1593 return;
1594
1595 fwnode_call_int_op(fwnode, add_links);
1596 fwnode->flags |= FWNODE_FLAG_LINKS_ADDED;
1597}
1598
1599static void fw_devlink_parse_fwtree(struct fwnode_handle *fwnode)
1600{
1601 struct fwnode_handle *child = NULL;
1602
1603 fw_devlink_parse_fwnode(fwnode);
1604
1605 while ((child = fwnode_get_next_available_child_node(fwnode, child)))
1606 fw_devlink_parse_fwtree(child);
1607}
1608
1609static void fw_devlink_relax_link(struct device_link *link)
1610{
1611 if (!(link->flags & DL_FLAG_INFERRED))
1612 return;
1613
1614 if (link->flags == (DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE))
1615 return;
1616
1617 pm_runtime_drop_link(link);
1618 link->flags = DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE;
1619 dev_dbg(link->consumer, "Relaxing link with %s\n",
1620 dev_name(link->supplier));
1621}
1622
1623static int fw_devlink_no_driver(struct device *dev, void *data)
1624{
1625 struct device_link *link = to_devlink(dev);
1626
1627 if (!link->supplier->can_match)
1628 fw_devlink_relax_link(link);
1629
1630 return 0;
1631}
1632
1633void fw_devlink_drivers_done(void)
1634{
1635 fw_devlink_drv_reg_done = true;
1636 device_links_write_lock();
1637 class_for_each_device(&devlink_class, NULL, NULL,
1638 fw_devlink_no_driver);
1639 device_links_write_unlock();
1640}
1641
1642static void fw_devlink_unblock_consumers(struct device *dev)
1643{
1644 struct device_link *link;
1645
1646 if (!fw_devlink_flags || fw_devlink_is_permissive())
1647 return;
1648
1649 device_links_write_lock();
1650 list_for_each_entry(link, &dev->links.consumers, s_node)
1651 fw_devlink_relax_link(link);
1652 device_links_write_unlock();
1653}
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669static int fw_devlink_relax_cycle(struct device *con, void *sup)
1670{
1671 struct device_link *link;
1672 int ret;
1673
1674 if (con == sup)
1675 return 1;
1676
1677 ret = device_for_each_child(con, sup, fw_devlink_relax_cycle);
1678 if (ret)
1679 return ret;
1680
1681 list_for_each_entry(link, &con->links.consumers, s_node) {
1682 if ((link->flags & ~DL_FLAG_INFERRED) ==
1683 (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
1684 continue;
1685
1686 if (!fw_devlink_relax_cycle(link->consumer, sup))
1687 continue;
1688
1689 ret = 1;
1690
1691 fw_devlink_relax_link(link);
1692 }
1693 return ret;
1694}
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716static int fw_devlink_create_devlink(struct device *con,
1717 struct fwnode_handle *sup_handle, u32 flags)
1718{
1719 struct device *sup_dev;
1720 int ret = 0;
1721
1722 sup_dev = get_dev_from_fwnode(sup_handle);
1723 if (sup_dev) {
1724
1725
1726
1727
1728
1729 if (sup_dev->links.status == DL_DEV_NO_DRIVER &&
1730 sup_handle->flags & FWNODE_FLAG_INITIALIZED) {
1731 ret = -EINVAL;
1732 goto out;
1733 }
1734
1735
1736
1737
1738
1739 if (!device_link_add(con, sup_dev, flags) &&
1740 !(flags & DL_FLAG_SYNC_STATE_ONLY)) {
1741 dev_info(con, "Fixing up cyclic dependency with %s\n",
1742 dev_name(sup_dev));
1743 device_links_write_lock();
1744 fw_devlink_relax_cycle(con, sup_dev);
1745 device_links_write_unlock();
1746 device_link_add(con, sup_dev,
1747 FW_DEVLINK_FLAGS_PERMISSIVE);
1748 ret = -EINVAL;
1749 }
1750
1751 goto out;
1752 }
1753
1754
1755 if (sup_handle->flags & FWNODE_FLAG_INITIALIZED)
1756 return -EINVAL;
1757
1758
1759
1760
1761
1762
1763 if (flags & DL_FLAG_SYNC_STATE_ONLY)
1764 return -EAGAIN;
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776 sup_dev = fwnode_get_next_parent_dev(sup_handle);
1777 if (sup_dev && device_is_dependent(con, sup_dev)) {
1778 dev_dbg(con, "Not linking to %pfwP - False link\n",
1779 sup_handle);
1780 ret = -EINVAL;
1781 } else {
1782
1783
1784
1785
1786 ret = -EAGAIN;
1787 }
1788
1789out:
1790 put_device(sup_dev);
1791 return ret;
1792}
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810static void __fw_devlink_link_to_consumers(struct device *dev)
1811{
1812 struct fwnode_handle *fwnode = dev->fwnode;
1813 struct fwnode_link *link, *tmp;
1814
1815 list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) {
1816 u32 dl_flags = fw_devlink_get_flags();
1817 struct device *con_dev;
1818 bool own_link = true;
1819 int ret;
1820
1821 con_dev = get_dev_from_fwnode(link->consumer);
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832 if (!con_dev) {
1833 con_dev = fwnode_get_next_parent_dev(link->consumer);
1834
1835
1836
1837
1838
1839
1840 if (con_dev &&
1841 fwnode_is_ancestor_of(con_dev->fwnode, fwnode)) {
1842 put_device(con_dev);
1843 con_dev = NULL;
1844 } else {
1845 own_link = false;
1846 dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
1847 }
1848 }
1849
1850 if (!con_dev)
1851 continue;
1852
1853 ret = fw_devlink_create_devlink(con_dev, fwnode, dl_flags);
1854 put_device(con_dev);
1855 if (!own_link || ret == -EAGAIN)
1856 continue;
1857
1858 list_del(&link->s_hook);
1859 list_del(&link->c_hook);
1860 kfree(link);
1861 }
1862}
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890static void __fw_devlink_link_to_suppliers(struct device *dev,
1891 struct fwnode_handle *fwnode)
1892{
1893 bool own_link = (dev->fwnode == fwnode);
1894 struct fwnode_link *link, *tmp;
1895 struct fwnode_handle *child = NULL;
1896 u32 dl_flags;
1897
1898 if (own_link)
1899 dl_flags = fw_devlink_get_flags();
1900 else
1901 dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
1902
1903 list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) {
1904 int ret;
1905 struct device *sup_dev;
1906 struct fwnode_handle *sup = link->supplier;
1907
1908 ret = fw_devlink_create_devlink(dev, sup, dl_flags);
1909 if (!own_link || ret == -EAGAIN)
1910 continue;
1911
1912 list_del(&link->s_hook);
1913 list_del(&link->c_hook);
1914 kfree(link);
1915
1916
1917 if (ret)
1918 continue;
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933 sup_dev = get_dev_from_fwnode(sup);
1934 __fw_devlink_link_to_suppliers(sup_dev, sup_dev->fwnode);
1935 put_device(sup_dev);
1936 }
1937
1938
1939
1940
1941
1942
1943
1944 while ((child = fwnode_get_next_available_child_node(fwnode, child)))
1945 __fw_devlink_link_to_suppliers(dev, child);
1946}
1947
1948static void fw_devlink_link_device(struct device *dev)
1949{
1950 struct fwnode_handle *fwnode = dev->fwnode;
1951
1952 if (!fw_devlink_flags)
1953 return;
1954
1955 fw_devlink_parse_fwtree(fwnode);
1956
1957 mutex_lock(&fwnode_link_lock);
1958 __fw_devlink_link_to_consumers(dev);
1959 __fw_devlink_link_to_suppliers(dev, fwnode);
1960 mutex_unlock(&fwnode_link_lock);
1961}
1962
1963
1964
1965int (*platform_notify)(struct device *dev) = NULL;
1966int (*platform_notify_remove)(struct device *dev) = NULL;
1967static struct kobject *dev_kobj;
1968struct kobject *sysfs_dev_char_kobj;
1969struct kobject *sysfs_dev_block_kobj;
1970
1971static DEFINE_MUTEX(device_hotplug_lock);
1972
1973void lock_device_hotplug(void)
1974{
1975 mutex_lock(&device_hotplug_lock);
1976}
1977
1978void unlock_device_hotplug(void)
1979{
1980 mutex_unlock(&device_hotplug_lock);
1981}
1982
1983int lock_device_hotplug_sysfs(void)
1984{
1985 if (mutex_trylock(&device_hotplug_lock))
1986 return 0;
1987
1988
1989 msleep(5);
1990 return restart_syscall();
1991}
1992
1993#ifdef CONFIG_BLOCK
1994static inline int device_is_not_partition(struct device *dev)
1995{
1996 return !(dev->type == &part_type);
1997}
1998#else
1999static inline int device_is_not_partition(struct device *dev)
2000{
2001 return 1;
2002}
2003#endif
2004
2005static int
2006device_platform_notify(struct device *dev, enum kobject_action action)
2007{
2008 int ret;
2009
2010 ret = acpi_platform_notify(dev, action);
2011 if (ret)
2012 return ret;
2013
2014 ret = software_node_notify(dev, action);
2015 if (ret)
2016 return ret;
2017
2018 if (platform_notify && action == KOBJ_ADD)
2019 platform_notify(dev);
2020 else if (platform_notify_remove && action == KOBJ_REMOVE)
2021 platform_notify_remove(dev);
2022 return 0;
2023}
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034const char *dev_driver_string(const struct device *dev)
2035{
2036 struct device_driver *drv;
2037
2038
2039
2040
2041
2042 drv = READ_ONCE(dev->driver);
2043 return drv ? drv->name : dev_bus_name(dev);
2044}
2045EXPORT_SYMBOL(dev_driver_string);
2046
2047#define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
2048
2049static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr,
2050 char *buf)
2051{
2052 struct device_attribute *dev_attr = to_dev_attr(attr);
2053 struct device *dev = kobj_to_dev(kobj);
2054 ssize_t ret = -EIO;
2055
2056 if (dev_attr->show)
2057 ret = dev_attr->show(dev, dev_attr, buf);
2058 if (ret >= (ssize_t)PAGE_SIZE) {
2059 printk("dev_attr_show: %pS returned bad count\n",
2060 dev_attr->show);
2061 }
2062 return ret;
2063}
2064
2065static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
2066 const char *buf, size_t count)
2067{
2068 struct device_attribute *dev_attr = to_dev_attr(attr);
2069 struct device *dev = kobj_to_dev(kobj);
2070 ssize_t ret = -EIO;
2071
2072 if (dev_attr->store)
2073 ret = dev_attr->store(dev, dev_attr, buf, count);
2074 return ret;
2075}
2076
2077static const struct sysfs_ops dev_sysfs_ops = {
2078 .show = dev_attr_show,
2079 .store = dev_attr_store,
2080};
2081
2082#define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr)
2083
2084ssize_t device_store_ulong(struct device *dev,
2085 struct device_attribute *attr,
2086 const char *buf, size_t size)
2087{
2088 struct dev_ext_attribute *ea = to_ext_attr(attr);
2089 int ret;
2090 unsigned long new;
2091
2092 ret = kstrtoul(buf, 0, &new);
2093 if (ret)
2094 return ret;
2095 *(unsigned long *)(ea->var) = new;
2096
2097 return size;
2098}
2099EXPORT_SYMBOL_GPL(device_store_ulong);
2100
2101ssize_t device_show_ulong(struct device *dev,
2102 struct device_attribute *attr,
2103 char *buf)
2104{
2105 struct dev_ext_attribute *ea = to_ext_attr(attr);
2106 return sysfs_emit(buf, "%lx\n", *(unsigned long *)(ea->var));
2107}
2108EXPORT_SYMBOL_GPL(device_show_ulong);
2109
2110ssize_t device_store_int(struct device *dev,
2111 struct device_attribute *attr,
2112 const char *buf, size_t size)
2113{
2114 struct dev_ext_attribute *ea = to_ext_attr(attr);
2115 int ret;
2116 long new;
2117
2118 ret = kstrtol(buf, 0, &new);
2119 if (ret)
2120 return ret;
2121
2122 if (new > INT_MAX || new < INT_MIN)
2123 return -EINVAL;
2124 *(int *)(ea->var) = new;
2125
2126 return size;
2127}
2128EXPORT_SYMBOL_GPL(device_store_int);
2129
2130ssize_t device_show_int(struct device *dev,
2131 struct device_attribute *attr,
2132 char *buf)
2133{
2134 struct dev_ext_attribute *ea = to_ext_attr(attr);
2135
2136 return sysfs_emit(buf, "%d\n", *(int *)(ea->var));
2137}
2138EXPORT_SYMBOL_GPL(device_show_int);
2139
2140ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
2141 const char *buf, size_t size)
2142{
2143 struct dev_ext_attribute *ea = to_ext_attr(attr);
2144
2145 if (strtobool(buf, ea->var) < 0)
2146 return -EINVAL;
2147
2148 return size;
2149}
2150EXPORT_SYMBOL_GPL(device_store_bool);
2151
2152ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
2153 char *buf)
2154{
2155 struct dev_ext_attribute *ea = to_ext_attr(attr);
2156
2157 return sysfs_emit(buf, "%d\n", *(bool *)(ea->var));
2158}
2159EXPORT_SYMBOL_GPL(device_show_bool);
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169static void device_release(struct kobject *kobj)
2170{
2171 struct device *dev = kobj_to_dev(kobj);
2172 struct device_private *p = dev->p;
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183 devres_release_all(dev);
2184
2185 kfree(dev->dma_range_map);
2186
2187 if (dev->release)
2188 dev->release(dev);
2189 else if (dev->type && dev->type->release)
2190 dev->type->release(dev);
2191 else if (dev->class && dev->class->dev_release)
2192 dev->class->dev_release(dev);
2193 else
2194 WARN(1, KERN_ERR "Device '%s' does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n",
2195 dev_name(dev));
2196 kfree(p);
2197}
2198
2199static const void *device_namespace(struct kobject *kobj)
2200{
2201 struct device *dev = kobj_to_dev(kobj);
2202 const void *ns = NULL;
2203
2204 if (dev->class && dev->class->ns_type)
2205 ns = dev->class->namespace(dev);
2206
2207 return ns;
2208}
2209
2210static void device_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
2211{
2212 struct device *dev = kobj_to_dev(kobj);
2213
2214 if (dev->class && dev->class->get_ownership)
2215 dev->class->get_ownership(dev, uid, gid);
2216}
2217
2218static struct kobj_type device_ktype = {
2219 .release = device_release,
2220 .sysfs_ops = &dev_sysfs_ops,
2221 .namespace = device_namespace,
2222 .get_ownership = device_get_ownership,
2223};
2224
2225
2226static int dev_uevent_filter(struct kset *kset, struct kobject *kobj)
2227{
2228 struct kobj_type *ktype = get_ktype(kobj);
2229
2230 if (ktype == &device_ktype) {
2231 struct device *dev = kobj_to_dev(kobj);
2232 if (dev->bus)
2233 return 1;
2234 if (dev->class)
2235 return 1;
2236 }
2237 return 0;
2238}
2239
2240static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj)
2241{
2242 struct device *dev = kobj_to_dev(kobj);
2243
2244 if (dev->bus)
2245 return dev->bus->name;
2246 if (dev->class)
2247 return dev->class->name;
2248 return NULL;
2249}
2250
2251static int dev_uevent(struct kset *kset, struct kobject *kobj,
2252 struct kobj_uevent_env *env)
2253{
2254 struct device *dev = kobj_to_dev(kobj);
2255 int retval = 0;
2256
2257
2258 if (MAJOR(dev->devt)) {
2259 const char *tmp;
2260 const char *name;
2261 umode_t mode = 0;
2262 kuid_t uid = GLOBAL_ROOT_UID;
2263 kgid_t gid = GLOBAL_ROOT_GID;
2264
2265 add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt));
2266 add_uevent_var(env, "MINOR=%u", MINOR(dev->devt));
2267 name = device_get_devnode(dev, &mode, &uid, &gid, &tmp);
2268 if (name) {
2269 add_uevent_var(env, "DEVNAME=%s", name);
2270 if (mode)
2271 add_uevent_var(env, "DEVMODE=%#o", mode & 0777);
2272 if (!uid_eq(uid, GLOBAL_ROOT_UID))
2273 add_uevent_var(env, "DEVUID=%u", from_kuid(&init_user_ns, uid));
2274 if (!gid_eq(gid, GLOBAL_ROOT_GID))
2275 add_uevent_var(env, "DEVGID=%u", from_kgid(&init_user_ns, gid));
2276 kfree(tmp);
2277 }
2278 }
2279
2280 if (dev->type && dev->type->name)
2281 add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
2282
2283 if (dev->driver)
2284 add_uevent_var(env, "DRIVER=%s", dev->driver->name);
2285
2286
2287 of_device_uevent(dev, env);
2288
2289
2290 if (dev->bus && dev->bus->uevent) {
2291 retval = dev->bus->uevent(dev, env);
2292 if (retval)
2293 pr_debug("device: '%s': %s: bus uevent() returned %d\n",
2294 dev_name(dev), __func__, retval);
2295 }
2296
2297
2298 if (dev->class && dev->class->dev_uevent) {
2299 retval = dev->class->dev_uevent(dev, env);
2300 if (retval)
2301 pr_debug("device: '%s': %s: class uevent() "
2302 "returned %d\n", dev_name(dev),
2303 __func__, retval);
2304 }
2305
2306
2307 if (dev->type && dev->type->uevent) {
2308 retval = dev->type->uevent(dev, env);
2309 if (retval)
2310 pr_debug("device: '%s': %s: dev_type uevent() "
2311 "returned %d\n", dev_name(dev),
2312 __func__, retval);
2313 }
2314
2315 return retval;
2316}
2317
2318static const struct kset_uevent_ops device_uevent_ops = {
2319 .filter = dev_uevent_filter,
2320 .name = dev_uevent_name,
2321 .uevent = dev_uevent,
2322};
2323
2324static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
2325 char *buf)
2326{
2327 struct kobject *top_kobj;
2328 struct kset *kset;
2329 struct kobj_uevent_env *env = NULL;
2330 int i;
2331 int len = 0;
2332 int retval;
2333
2334
2335 top_kobj = &dev->kobj;
2336 while (!top_kobj->kset && top_kobj->parent)
2337 top_kobj = top_kobj->parent;
2338 if (!top_kobj->kset)
2339 goto out;
2340
2341 kset = top_kobj->kset;
2342 if (!kset->uevent_ops || !kset->uevent_ops->uevent)
2343 goto out;
2344
2345
2346 if (kset->uevent_ops && kset->uevent_ops->filter)
2347 if (!kset->uevent_ops->filter(kset, &dev->kobj))
2348 goto out;
2349
2350 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
2351 if (!env)
2352 return -ENOMEM;
2353
2354
2355 retval = kset->uevent_ops->uevent(kset, &dev->kobj, env);
2356 if (retval)
2357 goto out;
2358
2359
2360 for (i = 0; i < env->envp_idx; i++)
2361 len += sysfs_emit_at(buf, len, "%s\n", env->envp[i]);
2362out:
2363 kfree(env);
2364 return len;
2365}
2366
2367static ssize_t uevent_store(struct device *dev, struct device_attribute *attr,
2368 const char *buf, size_t count)
2369{
2370 int rc;
2371
2372 rc = kobject_synth_uevent(&dev->kobj, buf, count);
2373
2374 if (rc) {
2375 dev_err(dev, "uevent: failed to send synthetic uevent\n");
2376 return rc;
2377 }
2378
2379 return count;
2380}
2381static DEVICE_ATTR_RW(uevent);
2382
2383static ssize_t online_show(struct device *dev, struct device_attribute *attr,
2384 char *buf)
2385{
2386 bool val;
2387
2388 device_lock(dev);
2389 val = !dev->offline;
2390 device_unlock(dev);
2391 return sysfs_emit(buf, "%u\n", val);
2392}
2393
2394static ssize_t online_store(struct device *dev, struct device_attribute *attr,
2395 const char *buf, size_t count)
2396{
2397 bool val;
2398 int ret;
2399
2400 ret = strtobool(buf, &val);
2401 if (ret < 0)
2402 return ret;
2403
2404 ret = lock_device_hotplug_sysfs();
2405 if (ret)
2406 return ret;
2407
2408 ret = val ? device_online(dev) : device_offline(dev);
2409 unlock_device_hotplug();
2410 return ret < 0 ? ret : count;
2411}
2412static DEVICE_ATTR_RW(online);
2413
2414static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
2415 char *buf)
2416{
2417 const char *loc;
2418
2419 switch (dev->removable) {
2420 case DEVICE_REMOVABLE:
2421 loc = "removable";
2422 break;
2423 case DEVICE_FIXED:
2424 loc = "fixed";
2425 break;
2426 default:
2427 loc = "unknown";
2428 }
2429 return sysfs_emit(buf, "%s\n", loc);
2430}
2431static DEVICE_ATTR_RO(removable);
2432
2433int device_add_groups(struct device *dev, const struct attribute_group **groups)
2434{
2435 return sysfs_create_groups(&dev->kobj, groups);
2436}
2437EXPORT_SYMBOL_GPL(device_add_groups);
2438
2439void device_remove_groups(struct device *dev,
2440 const struct attribute_group **groups)
2441{
2442 sysfs_remove_groups(&dev->kobj, groups);
2443}
2444EXPORT_SYMBOL_GPL(device_remove_groups);
2445
2446union device_attr_group_devres {
2447 const struct attribute_group *group;
2448 const struct attribute_group **groups;
2449};
2450
2451static int devm_attr_group_match(struct device *dev, void *res, void *data)
2452{
2453 return ((union device_attr_group_devres *)res)->group == data;
2454}
2455
2456static void devm_attr_group_remove(struct device *dev, void *res)
2457{
2458 union device_attr_group_devres *devres = res;
2459 const struct attribute_group *group = devres->group;
2460
2461 dev_dbg(dev, "%s: removing group %p\n", __func__, group);
2462 sysfs_remove_group(&dev->kobj, group);
2463}
2464
2465static void devm_attr_groups_remove(struct device *dev, void *res)
2466{
2467 union device_attr_group_devres *devres = res;
2468 const struct attribute_group **groups = devres->groups;
2469
2470 dev_dbg(dev, "%s: removing groups %p\n", __func__, groups);
2471 sysfs_remove_groups(&dev->kobj, groups);
2472}
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484int devm_device_add_group(struct device *dev, const struct attribute_group *grp)
2485{
2486 union device_attr_group_devres *devres;
2487 int error;
2488
2489 devres = devres_alloc(devm_attr_group_remove,
2490 sizeof(*devres), GFP_KERNEL);
2491 if (!devres)
2492 return -ENOMEM;
2493
2494 error = sysfs_create_group(&dev->kobj, grp);
2495 if (error) {
2496 devres_free(devres);
2497 return error;
2498 }
2499
2500 devres->group = grp;
2501 devres_add(dev, devres);
2502 return 0;
2503}
2504EXPORT_SYMBOL_GPL(devm_device_add_group);
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514void devm_device_remove_group(struct device *dev,
2515 const struct attribute_group *grp)
2516{
2517 WARN_ON(devres_release(dev, devm_attr_group_remove,
2518 devm_attr_group_match,
2519 (void *)grp));
2520}
2521EXPORT_SYMBOL_GPL(devm_device_remove_group);
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536int devm_device_add_groups(struct device *dev,
2537 const struct attribute_group **groups)
2538{
2539 union device_attr_group_devres *devres;
2540 int error;
2541
2542 devres = devres_alloc(devm_attr_groups_remove,
2543 sizeof(*devres), GFP_KERNEL);
2544 if (!devres)
2545 return -ENOMEM;
2546
2547 error = sysfs_create_groups(&dev->kobj, groups);
2548 if (error) {
2549 devres_free(devres);
2550 return error;
2551 }
2552
2553 devres->groups = groups;
2554 devres_add(dev, devres);
2555 return 0;
2556}
2557EXPORT_SYMBOL_GPL(devm_device_add_groups);
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567void devm_device_remove_groups(struct device *dev,
2568 const struct attribute_group **groups)
2569{
2570 WARN_ON(devres_release(dev, devm_attr_groups_remove,
2571 devm_attr_group_match,
2572 (void *)groups));
2573}
2574EXPORT_SYMBOL_GPL(devm_device_remove_groups);
2575
2576static int device_add_attrs(struct device *dev)
2577{
2578 struct class *class = dev->class;
2579 const struct device_type *type = dev->type;
2580 int error;
2581
2582 if (class) {
2583 error = device_add_groups(dev, class->dev_groups);
2584 if (error)
2585 return error;
2586 }
2587
2588 if (type) {
2589 error = device_add_groups(dev, type->groups);
2590 if (error)
2591 goto err_remove_class_groups;
2592 }
2593
2594 error = device_add_groups(dev, dev->groups);
2595 if (error)
2596 goto err_remove_type_groups;
2597
2598 if (device_supports_offline(dev) && !dev->offline_disabled) {
2599 error = device_create_file(dev, &dev_attr_online);
2600 if (error)
2601 goto err_remove_dev_groups;
2602 }
2603
2604 if (fw_devlink_flags && !fw_devlink_is_permissive() && dev->fwnode) {
2605 error = device_create_file(dev, &dev_attr_waiting_for_supplier);
2606 if (error)
2607 goto err_remove_dev_online;
2608 }
2609
2610 if (dev_removable_is_valid(dev)) {
2611 error = device_create_file(dev, &dev_attr_removable);
2612 if (error)
2613 goto err_remove_dev_waiting_for_supplier;
2614 }
2615
2616 return 0;
2617
2618 err_remove_dev_waiting_for_supplier:
2619 device_remove_file(dev, &dev_attr_waiting_for_supplier);
2620 err_remove_dev_online:
2621 device_remove_file(dev, &dev_attr_online);
2622 err_remove_dev_groups:
2623 device_remove_groups(dev, dev->groups);
2624 err_remove_type_groups:
2625 if (type)
2626 device_remove_groups(dev, type->groups);
2627 err_remove_class_groups:
2628 if (class)
2629 device_remove_groups(dev, class->dev_groups);
2630
2631 return error;
2632}
2633
2634static void device_remove_attrs(struct device *dev)
2635{
2636 struct class *class = dev->class;
2637 const struct device_type *type = dev->type;
2638
2639 device_remove_file(dev, &dev_attr_removable);
2640 device_remove_file(dev, &dev_attr_waiting_for_supplier);
2641 device_remove_file(dev, &dev_attr_online);
2642 device_remove_groups(dev, dev->groups);
2643
2644 if (type)
2645 device_remove_groups(dev, type->groups);
2646
2647 if (class)
2648 device_remove_groups(dev, class->dev_groups);
2649}
2650
2651static ssize_t dev_show(struct device *dev, struct device_attribute *attr,
2652 char *buf)
2653{
2654 return print_dev_t(buf, dev->devt);
2655}
2656static DEVICE_ATTR_RO(dev);
2657
2658
2659struct kset *devices_kset;
2660
2661
2662
2663
2664
2665
2666static void devices_kset_move_before(struct device *deva, struct device *devb)
2667{
2668 if (!devices_kset)
2669 return;
2670 pr_debug("devices_kset: Moving %s before %s\n",
2671 dev_name(deva), dev_name(devb));
2672 spin_lock(&devices_kset->list_lock);
2673 list_move_tail(&deva->kobj.entry, &devb->kobj.entry);
2674 spin_unlock(&devices_kset->list_lock);
2675}
2676
2677
2678
2679
2680
2681
2682static void devices_kset_move_after(struct device *deva, struct device *devb)
2683{
2684 if (!devices_kset)
2685 return;
2686 pr_debug("devices_kset: Moving %s after %s\n",
2687 dev_name(deva), dev_name(devb));
2688 spin_lock(&devices_kset->list_lock);
2689 list_move(&deva->kobj.entry, &devb->kobj.entry);
2690 spin_unlock(&devices_kset->list_lock);
2691}
2692
2693
2694
2695
2696
2697void devices_kset_move_last(struct device *dev)
2698{
2699 if (!devices_kset)
2700 return;
2701 pr_debug("devices_kset: Moving %s to end of list\n", dev_name(dev));
2702 spin_lock(&devices_kset->list_lock);
2703 list_move_tail(&dev->kobj.entry, &devices_kset->list);
2704 spin_unlock(&devices_kset->list_lock);
2705}
2706
2707
2708
2709
2710
2711
2712int device_create_file(struct device *dev,
2713 const struct device_attribute *attr)
2714{
2715 int error = 0;
2716
2717 if (dev) {
2718 WARN(((attr->attr.mode & S_IWUGO) && !attr->store),
2719 "Attribute %s: write permission without 'store'\n",
2720 attr->attr.name);
2721 WARN(((attr->attr.mode & S_IRUGO) && !attr->show),
2722 "Attribute %s: read permission without 'show'\n",
2723 attr->attr.name);
2724 error = sysfs_create_file(&dev->kobj, &attr->attr);
2725 }
2726
2727 return error;
2728}
2729EXPORT_SYMBOL_GPL(device_create_file);
2730
2731
2732
2733
2734
2735
2736void device_remove_file(struct device *dev,
2737 const struct device_attribute *attr)
2738{
2739 if (dev)
2740 sysfs_remove_file(&dev->kobj, &attr->attr);
2741}
2742EXPORT_SYMBOL_GPL(device_remove_file);
2743
2744
2745
2746
2747
2748
2749
2750
2751bool device_remove_file_self(struct device *dev,
2752 const struct device_attribute *attr)
2753{
2754 if (dev)
2755 return sysfs_remove_file_self(&dev->kobj, &attr->attr);
2756 else
2757 return false;
2758}
2759EXPORT_SYMBOL_GPL(device_remove_file_self);
2760
2761
2762
2763
2764
2765
2766int device_create_bin_file(struct device *dev,
2767 const struct bin_attribute *attr)
2768{
2769 int error = -EINVAL;
2770 if (dev)
2771 error = sysfs_create_bin_file(&dev->kobj, attr);
2772 return error;
2773}
2774EXPORT_SYMBOL_GPL(device_create_bin_file);
2775
2776
2777
2778
2779
2780
2781void device_remove_bin_file(struct device *dev,
2782 const struct bin_attribute *attr)
2783{
2784 if (dev)
2785 sysfs_remove_bin_file(&dev->kobj, attr);
2786}
2787EXPORT_SYMBOL_GPL(device_remove_bin_file);
2788
2789static void klist_children_get(struct klist_node *n)
2790{
2791 struct device_private *p = to_device_private_parent(n);
2792 struct device *dev = p->device;
2793
2794 get_device(dev);
2795}
2796
2797static void klist_children_put(struct klist_node *n)
2798{
2799 struct device_private *p = to_device_private_parent(n);
2800 struct device *dev = p->device;
2801
2802 put_device(dev);
2803}
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825void device_initialize(struct device *dev)
2826{
2827 dev->kobj.kset = devices_kset;
2828 kobject_init(&dev->kobj, &device_ktype);
2829 INIT_LIST_HEAD(&dev->dma_pools);
2830 mutex_init(&dev->mutex);
2831#ifdef CONFIG_PROVE_LOCKING
2832 mutex_init(&dev->lockdep_mutex);
2833#endif
2834 lockdep_set_novalidate_class(&dev->mutex);
2835 spin_lock_init(&dev->devres_lock);
2836 INIT_LIST_HEAD(&dev->devres_head);
2837 device_pm_init(dev);
2838 set_dev_node(dev, -1);
2839#ifdef CONFIG_GENERIC_MSI_IRQ
2840 raw_spin_lock_init(&dev->msi_lock);
2841 INIT_LIST_HEAD(&dev->msi_list);
2842#endif
2843 INIT_LIST_HEAD(&dev->links.consumers);
2844 INIT_LIST_HEAD(&dev->links.suppliers);
2845 INIT_LIST_HEAD(&dev->links.defer_sync);
2846 dev->links.status = DL_DEV_NO_DRIVER;
2847#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
2848 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
2849 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
2850 dev->dma_coherent = dma_default_coherent;
2851#endif
2852}
2853EXPORT_SYMBOL_GPL(device_initialize);
2854
2855struct kobject *virtual_device_parent(struct device *dev)
2856{
2857 static struct kobject *virtual_dir = NULL;
2858
2859 if (!virtual_dir)
2860 virtual_dir = kobject_create_and_add("virtual",
2861 &devices_kset->kobj);
2862
2863 return virtual_dir;
2864}
2865
2866struct class_dir {
2867 struct kobject kobj;
2868 struct class *class;
2869};
2870
2871#define to_class_dir(obj) container_of(obj, struct class_dir, kobj)
2872
2873static void class_dir_release(struct kobject *kobj)
2874{
2875 struct class_dir *dir = to_class_dir(kobj);
2876 kfree(dir);
2877}
2878
2879static const
2880struct kobj_ns_type_operations *class_dir_child_ns_type(struct kobject *kobj)
2881{
2882 struct class_dir *dir = to_class_dir(kobj);
2883 return dir->class->ns_type;
2884}
2885
2886static struct kobj_type class_dir_ktype = {
2887 .release = class_dir_release,
2888 .sysfs_ops = &kobj_sysfs_ops,
2889 .child_ns_type = class_dir_child_ns_type
2890};
2891
2892static struct kobject *
2893class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
2894{
2895 struct class_dir *dir;
2896 int retval;
2897
2898 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
2899 if (!dir)
2900 return ERR_PTR(-ENOMEM);
2901
2902 dir->class = class;
2903 kobject_init(&dir->kobj, &class_dir_ktype);
2904
2905 dir->kobj.kset = &class->p->glue_dirs;
2906
2907 retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name);
2908 if (retval < 0) {
2909 kobject_put(&dir->kobj);
2910 return ERR_PTR(retval);
2911 }
2912 return &dir->kobj;
2913}
2914
2915static DEFINE_MUTEX(gdp_mutex);
2916
2917static struct kobject *get_device_parent(struct device *dev,
2918 struct device *parent)
2919{
2920 if (dev->class) {
2921 struct kobject *kobj = NULL;
2922 struct kobject *parent_kobj;
2923 struct kobject *k;
2924
2925#ifdef CONFIG_BLOCK
2926
2927 if (sysfs_deprecated && dev->class == &block_class) {
2928 if (parent && parent->class == &block_class)
2929 return &parent->kobj;
2930 return &block_class.p->subsys.kobj;
2931 }
2932#endif
2933
2934
2935
2936
2937
2938
2939 if (parent == NULL)
2940 parent_kobj = virtual_device_parent(dev);
2941 else if (parent->class && !dev->class->ns_type)
2942 return &parent->kobj;
2943 else
2944 parent_kobj = &parent->kobj;
2945
2946 mutex_lock(&gdp_mutex);
2947
2948
2949 spin_lock(&dev->class->p->glue_dirs.list_lock);
2950 list_for_each_entry(k, &dev->class->p->glue_dirs.list, entry)
2951 if (k->parent == parent_kobj) {
2952 kobj = kobject_get(k);
2953 break;
2954 }
2955 spin_unlock(&dev->class->p->glue_dirs.list_lock);
2956 if (kobj) {
2957 mutex_unlock(&gdp_mutex);
2958 return kobj;
2959 }
2960
2961
2962 k = class_dir_create_and_add(dev->class, parent_kobj);
2963
2964 mutex_unlock(&gdp_mutex);
2965 return k;
2966 }
2967
2968
2969 if (!parent && dev->bus && dev->bus->dev_root)
2970 return &dev->bus->dev_root->kobj;
2971
2972 if (parent)
2973 return &parent->kobj;
2974 return NULL;
2975}
2976
2977static inline bool live_in_glue_dir(struct kobject *kobj,
2978 struct device *dev)
2979{
2980 if (!kobj || !dev->class ||
2981 kobj->kset != &dev->class->p->glue_dirs)
2982 return false;
2983 return true;
2984}
2985
2986static inline struct kobject *get_glue_dir(struct device *dev)
2987{
2988 return dev->kobj.parent;
2989}
2990
2991
2992
2993
2994
2995
2996static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
2997{
2998 unsigned int ref;
2999
3000
3001 if (!live_in_glue_dir(glue_dir, dev))
3002 return;
3003
3004 mutex_lock(&gdp_mutex);
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053 ref = kref_read(&glue_dir->kref);
3054 if (!kobject_has_children(glue_dir) && !--ref)
3055 kobject_del(glue_dir);
3056 kobject_put(glue_dir);
3057 mutex_unlock(&gdp_mutex);
3058}
3059
3060static int device_add_class_symlinks(struct device *dev)
3061{
3062 struct device_node *of_node = dev_of_node(dev);
3063 int error;
3064
3065 if (of_node) {
3066 error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node");
3067 if (error)
3068 dev_warn(dev, "Error %d creating of_node link\n",error);
3069
3070 }
3071
3072 if (!dev->class)
3073 return 0;
3074
3075 error = sysfs_create_link(&dev->kobj,
3076 &dev->class->p->subsys.kobj,
3077 "subsystem");
3078 if (error)
3079 goto out_devnode;
3080
3081 if (dev->parent && device_is_not_partition(dev)) {
3082 error = sysfs_create_link(&dev->kobj, &dev->parent->kobj,
3083 "device");
3084 if (error)
3085 goto out_subsys;
3086 }
3087
3088#ifdef CONFIG_BLOCK
3089
3090 if (sysfs_deprecated && dev->class == &block_class)
3091 return 0;
3092#endif
3093
3094
3095 error = sysfs_create_link(&dev->class->p->subsys.kobj,
3096 &dev->kobj, dev_name(dev));
3097 if (error)
3098 goto out_device;
3099
3100 return 0;
3101
3102out_device:
3103 sysfs_remove_link(&dev->kobj, "device");
3104
3105out_subsys:
3106 sysfs_remove_link(&dev->kobj, "subsystem");
3107out_devnode:
3108 sysfs_remove_link(&dev->kobj, "of_node");
3109 return error;
3110}
3111
3112static void device_remove_class_symlinks(struct device *dev)
3113{
3114 if (dev_of_node(dev))
3115 sysfs_remove_link(&dev->kobj, "of_node");
3116
3117 if (!dev->class)
3118 return;
3119
3120 if (dev->parent && device_is_not_partition(dev))
3121 sysfs_remove_link(&dev->kobj, "device");
3122 sysfs_remove_link(&dev->kobj, "subsystem");
3123#ifdef CONFIG_BLOCK
3124 if (sysfs_deprecated && dev->class == &block_class)
3125 return;
3126#endif
3127 sysfs_delete_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev));
3128}
3129
3130
3131
3132
3133
3134
3135int dev_set_name(struct device *dev, const char *fmt, ...)
3136{
3137 va_list vargs;
3138 int err;
3139
3140 va_start(vargs, fmt);
3141 err = kobject_set_name_vargs(&dev->kobj, fmt, vargs);
3142 va_end(vargs);
3143 return err;
3144}
3145EXPORT_SYMBOL_GPL(dev_set_name);
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158static struct kobject *device_to_dev_kobj(struct device *dev)
3159{
3160 struct kobject *kobj;
3161
3162 if (dev->class)
3163 kobj = dev->class->dev_kobj;
3164 else
3165 kobj = sysfs_dev_char_kobj;
3166
3167 return kobj;
3168}
3169
3170static int device_create_sys_dev_entry(struct device *dev)
3171{
3172 struct kobject *kobj = device_to_dev_kobj(dev);
3173 int error = 0;
3174 char devt_str[15];
3175
3176 if (kobj) {
3177 format_dev_t(devt_str, dev->devt);
3178 error = sysfs_create_link(kobj, &dev->kobj, devt_str);
3179 }
3180
3181 return error;
3182}
3183
3184static void device_remove_sys_dev_entry(struct device *dev)
3185{
3186 struct kobject *kobj = device_to_dev_kobj(dev);
3187 char devt_str[15];
3188
3189 if (kobj) {
3190 format_dev_t(devt_str, dev->devt);
3191 sysfs_remove_link(kobj, devt_str);
3192 }
3193}
3194
3195static int device_private_init(struct device *dev)
3196{
3197 dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL);
3198 if (!dev->p)
3199 return -ENOMEM;
3200 dev->p->device = dev;
3201 klist_init(&dev->p->klist_children, klist_children_get,
3202 klist_children_put);
3203 INIT_LIST_HEAD(&dev->p->deferred_probe);
3204 return 0;
3205}
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234int device_add(struct device *dev)
3235{
3236 struct device *parent;
3237 struct kobject *kobj;
3238 struct class_interface *class_intf;
3239 int error = -EINVAL;
3240 struct kobject *glue_dir = NULL;
3241
3242 dev = get_device(dev);
3243 if (!dev)
3244 goto done;
3245
3246 if (!dev->p) {
3247 error = device_private_init(dev);
3248 if (error)
3249 goto done;
3250 }
3251
3252
3253
3254
3255
3256
3257 if (dev->init_name) {
3258 dev_set_name(dev, "%s", dev->init_name);
3259 dev->init_name = NULL;
3260 }
3261
3262
3263 if (!dev_name(dev) && dev->bus && dev->bus->dev_name)
3264 dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id);
3265
3266 if (!dev_name(dev)) {
3267 error = -EINVAL;
3268 goto name_error;
3269 }
3270
3271 pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
3272
3273 parent = get_device(dev->parent);
3274 kobj = get_device_parent(dev, parent);
3275 if (IS_ERR(kobj)) {
3276 error = PTR_ERR(kobj);
3277 goto parent_error;
3278 }
3279 if (kobj)
3280 dev->kobj.parent = kobj;
3281
3282
3283 if (parent && (dev_to_node(dev) == NUMA_NO_NODE))
3284 set_dev_node(dev, dev_to_node(parent));
3285
3286
3287
3288 error = kobject_add(&dev->kobj, dev->kobj.parent, NULL);
3289 if (error) {
3290 glue_dir = get_glue_dir(dev);
3291 goto Error;
3292 }
3293
3294
3295 error = device_platform_notify(dev, KOBJ_ADD);
3296 if (error)
3297 goto platform_error;
3298
3299 error = device_create_file(dev, &dev_attr_uevent);
3300 if (error)
3301 goto attrError;
3302
3303 error = device_add_class_symlinks(dev);
3304 if (error)
3305 goto SymlinkError;
3306 error = device_add_attrs(dev);
3307 if (error)
3308 goto AttrsError;
3309 error = bus_add_device(dev);
3310 if (error)
3311 goto BusError;
3312 error = dpm_sysfs_add(dev);
3313 if (error)
3314 goto DPMError;
3315 device_pm_add(dev);
3316
3317 if (MAJOR(dev->devt)) {
3318 error = device_create_file(dev, &dev_attr_dev);
3319 if (error)
3320 goto DevAttrError;
3321
3322 error = device_create_sys_dev_entry(dev);
3323 if (error)
3324 goto SysEntryError;
3325
3326 devtmpfs_create_node(dev);
3327 }
3328
3329
3330
3331
3332 if (dev->bus)
3333 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
3334 BUS_NOTIFY_ADD_DEVICE, dev);
3335
3336 kobject_uevent(&dev->kobj, KOBJ_ADD);
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350 if (dev->fwnode && !dev->fwnode->dev) {
3351 dev->fwnode->dev = dev;
3352 fw_devlink_link_device(dev);
3353 }
3354
3355 bus_probe_device(dev);
3356
3357
3358
3359
3360
3361
3362 if (dev->fwnode && fw_devlink_drv_reg_done && !dev->can_match)
3363 fw_devlink_unblock_consumers(dev);
3364
3365 if (parent)
3366 klist_add_tail(&dev->p->knode_parent,
3367 &parent->p->klist_children);
3368
3369 if (dev->class) {
3370 mutex_lock(&dev->class->p->mutex);
3371
3372 klist_add_tail(&dev->p->knode_class,
3373 &dev->class->p->klist_devices);
3374
3375
3376 list_for_each_entry(class_intf,
3377 &dev->class->p->interfaces, node)
3378 if (class_intf->add_dev)
3379 class_intf->add_dev(dev, class_intf);
3380 mutex_unlock(&dev->class->p->mutex);
3381 }
3382done:
3383 put_device(dev);
3384 return error;
3385 SysEntryError:
3386 if (MAJOR(dev->devt))
3387 device_remove_file(dev, &dev_attr_dev);
3388 DevAttrError:
3389 device_pm_remove(dev);
3390 dpm_sysfs_remove(dev);
3391 DPMError:
3392 bus_remove_device(dev);
3393 BusError:
3394 device_remove_attrs(dev);
3395 AttrsError:
3396 device_remove_class_symlinks(dev);
3397 SymlinkError:
3398 device_remove_file(dev, &dev_attr_uevent);
3399 attrError:
3400 device_platform_notify(dev, KOBJ_REMOVE);
3401platform_error:
3402 kobject_uevent(&dev->kobj, KOBJ_REMOVE);
3403 glue_dir = get_glue_dir(dev);
3404 kobject_del(&dev->kobj);
3405 Error:
3406 cleanup_glue_dir(dev, glue_dir);
3407parent_error:
3408 put_device(parent);
3409name_error:
3410 kfree(dev->p);
3411 dev->p = NULL;
3412 goto done;
3413}
3414EXPORT_SYMBOL_GPL(device_add);
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434int device_register(struct device *dev)
3435{
3436 device_initialize(dev);
3437 return device_add(dev);
3438}
3439EXPORT_SYMBOL_GPL(device_register);
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449struct device *get_device(struct device *dev)
3450{
3451 return dev ? kobj_to_dev(kobject_get(&dev->kobj)) : NULL;
3452}
3453EXPORT_SYMBOL_GPL(get_device);
3454
3455
3456
3457
3458
3459void put_device(struct device *dev)
3460{
3461
3462 if (dev)
3463 kobject_put(&dev->kobj);
3464}
3465EXPORT_SYMBOL_GPL(put_device);
3466
3467bool kill_device(struct device *dev)
3468{
3469
3470
3471
3472
3473
3474
3475
3476 device_lock_assert(dev);
3477
3478 if (dev->p->dead)
3479 return false;
3480 dev->p->dead = true;
3481 return true;
3482}
3483EXPORT_SYMBOL_GPL(kill_device);
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498void device_del(struct device *dev)
3499{
3500 struct device *parent = dev->parent;
3501 struct kobject *glue_dir = NULL;
3502 struct class_interface *class_intf;
3503 unsigned int noio_flag;
3504
3505 device_lock(dev);
3506 kill_device(dev);
3507 device_unlock(dev);
3508
3509 if (dev->fwnode && dev->fwnode->dev == dev)
3510 dev->fwnode->dev = NULL;
3511
3512
3513
3514
3515 noio_flag = memalloc_noio_save();
3516 if (dev->bus)
3517 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
3518 BUS_NOTIFY_DEL_DEVICE, dev);
3519
3520 dpm_sysfs_remove(dev);
3521 if (parent)
3522 klist_del(&dev->p->knode_parent);
3523 if (MAJOR(dev->devt)) {
3524 devtmpfs_delete_node(dev);
3525 device_remove_sys_dev_entry(dev);
3526 device_remove_file(dev, &dev_attr_dev);
3527 }
3528 if (dev->class) {
3529 device_remove_class_symlinks(dev);
3530
3531 mutex_lock(&dev->class->p->mutex);
3532
3533 list_for_each_entry(class_intf,
3534 &dev->class->p->interfaces, node)
3535 if (class_intf->remove_dev)
3536 class_intf->remove_dev(dev, class_intf);
3537
3538 klist_del(&dev->p->knode_class);
3539 mutex_unlock(&dev->class->p->mutex);
3540 }
3541 device_remove_file(dev, &dev_attr_uevent);
3542 device_remove_attrs(dev);
3543 bus_remove_device(dev);
3544 device_pm_remove(dev);
3545 driver_deferred_probe_del(dev);
3546 device_platform_notify(dev, KOBJ_REMOVE);
3547 device_remove_properties(dev);
3548 device_links_purge(dev);
3549
3550 if (dev->bus)
3551 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
3552 BUS_NOTIFY_REMOVED_DEVICE, dev);
3553 kobject_uevent(&dev->kobj, KOBJ_REMOVE);
3554 glue_dir = get_glue_dir(dev);
3555 kobject_del(&dev->kobj);
3556 cleanup_glue_dir(dev, glue_dir);
3557 memalloc_noio_restore(noio_flag);
3558 put_device(parent);
3559}
3560EXPORT_SYMBOL_GPL(device_del);
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573void device_unregister(struct device *dev)
3574{
3575 pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
3576 device_del(dev);
3577 put_device(dev);
3578}
3579EXPORT_SYMBOL_GPL(device_unregister);
3580
3581static struct device *prev_device(struct klist_iter *i)
3582{
3583 struct klist_node *n = klist_prev(i);
3584 struct device *dev = NULL;
3585 struct device_private *p;
3586
3587 if (n) {
3588 p = to_device_private_parent(n);
3589 dev = p->device;
3590 }
3591 return dev;
3592}
3593
3594static struct device *next_device(struct klist_iter *i)
3595{
3596 struct klist_node *n = klist_next(i);
3597 struct device *dev = NULL;
3598 struct device_private *p;
3599
3600 if (n) {
3601 p = to_device_private_parent(n);
3602 dev = p->device;
3603 }
3604 return dev;
3605}
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620const char *device_get_devnode(struct device *dev,
3621 umode_t *mode, kuid_t *uid, kgid_t *gid,
3622 const char **tmp)
3623{
3624 char *s;
3625
3626 *tmp = NULL;
3627
3628
3629 if (dev->type && dev->type->devnode)
3630 *tmp = dev->type->devnode(dev, mode, uid, gid);
3631 if (*tmp)
3632 return *tmp;
3633
3634
3635 if (dev->class && dev->class->devnode)
3636 *tmp = dev->class->devnode(dev, mode);
3637 if (*tmp)
3638 return *tmp;
3639
3640
3641 if (strchr(dev_name(dev), '!') == NULL)
3642 return dev_name(dev);
3643
3644
3645 s = kstrdup(dev_name(dev), GFP_KERNEL);
3646 if (!s)
3647 return NULL;
3648 strreplace(s, '!', '/');
3649 return *tmp = s;
3650}
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664int device_for_each_child(struct device *parent, void *data,
3665 int (*fn)(struct device *dev, void *data))
3666{
3667 struct klist_iter i;
3668 struct device *child;
3669 int error = 0;
3670
3671 if (!parent->p)
3672 return 0;
3673
3674 klist_iter_init(&parent->p->klist_children, &i);
3675 while (!error && (child = next_device(&i)))
3676 error = fn(child, data);
3677 klist_iter_exit(&i);
3678 return error;
3679}
3680EXPORT_SYMBOL_GPL(device_for_each_child);
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694int device_for_each_child_reverse(struct device *parent, void *data,
3695 int (*fn)(struct device *dev, void *data))
3696{
3697 struct klist_iter i;
3698 struct device *child;
3699 int error = 0;
3700
3701 if (!parent->p)
3702 return 0;
3703
3704 klist_iter_init(&parent->p->klist_children, &i);
3705 while ((child = prev_device(&i)) && !error)
3706 error = fn(child, data);
3707 klist_iter_exit(&i);
3708 return error;
3709}
3710EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729struct device *device_find_child(struct device *parent, void *data,
3730 int (*match)(struct device *dev, void *data))
3731{
3732 struct klist_iter i;
3733 struct device *child;
3734
3735 if (!parent)
3736 return NULL;
3737
3738 klist_iter_init(&parent->p->klist_children, &i);
3739 while ((child = next_device(&i)))
3740 if (match(child, data) && get_device(child))
3741 break;
3742 klist_iter_exit(&i);
3743 return child;
3744}
3745EXPORT_SYMBOL_GPL(device_find_child);
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757struct device *device_find_child_by_name(struct device *parent,
3758 const char *name)
3759{
3760 struct klist_iter i;
3761 struct device *child;
3762
3763 if (!parent)
3764 return NULL;
3765
3766 klist_iter_init(&parent->p->klist_children, &i);
3767 while ((child = next_device(&i)))
3768 if (sysfs_streq(dev_name(child), name) && get_device(child))
3769 break;
3770 klist_iter_exit(&i);
3771 return child;
3772}
3773EXPORT_SYMBOL_GPL(device_find_child_by_name);
3774
3775int __init devices_init(void)
3776{
3777 devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL);
3778 if (!devices_kset)
3779 return -ENOMEM;
3780 dev_kobj = kobject_create_and_add("dev", NULL);
3781 if (!dev_kobj)
3782 goto dev_kobj_err;
3783 sysfs_dev_block_kobj = kobject_create_and_add("block", dev_kobj);
3784 if (!sysfs_dev_block_kobj)
3785 goto block_kobj_err;
3786 sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
3787 if (!sysfs_dev_char_kobj)
3788 goto char_kobj_err;
3789
3790 return 0;
3791
3792 char_kobj_err:
3793 kobject_put(sysfs_dev_block_kobj);
3794 block_kobj_err:
3795 kobject_put(dev_kobj);
3796 dev_kobj_err:
3797 kset_unregister(devices_kset);
3798 return -ENOMEM;
3799}
3800
3801static int device_check_offline(struct device *dev, void *not_used)
3802{
3803 int ret;
3804
3805 ret = device_for_each_child(dev, NULL, device_check_offline);
3806 if (ret)
3807 return ret;
3808
3809 return device_supports_offline(dev) && !dev->offline ? -EBUSY : 0;
3810}
3811
3812
3813
3814
3815
3816
3817
3818
3819
3820
3821
3822
3823int device_offline(struct device *dev)
3824{
3825 int ret;
3826
3827 if (dev->offline_disabled)
3828 return -EPERM;
3829
3830 ret = device_for_each_child(dev, NULL, device_check_offline);
3831 if (ret)
3832 return ret;
3833
3834 device_lock(dev);
3835 if (device_supports_offline(dev)) {
3836 if (dev->offline) {
3837 ret = 1;
3838 } else {
3839 ret = dev->bus->offline(dev);
3840 if (!ret) {
3841 kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
3842 dev->offline = true;
3843 }
3844 }
3845 }
3846 device_unlock(dev);
3847
3848 return ret;
3849}
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861int device_online(struct device *dev)
3862{
3863 int ret = 0;
3864
3865 device_lock(dev);
3866 if (device_supports_offline(dev)) {
3867 if (dev->offline) {
3868 ret = dev->bus->online(dev);
3869 if (!ret) {
3870 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
3871 dev->offline = false;
3872 }
3873 } else {
3874 ret = 1;
3875 }
3876 }
3877 device_unlock(dev);
3878
3879 return ret;
3880}
3881
3882struct root_device {
3883 struct device dev;
3884 struct module *owner;
3885};
3886
3887static inline struct root_device *to_root_device(struct device *d)
3888{
3889 return container_of(d, struct root_device, dev);
3890}
3891
3892static void root_device_release(struct device *dev)
3893{
3894 kfree(to_root_device(dev));
3895}
3896
3897
3898
3899
3900
3901
3902
3903
3904
3905
3906
3907
3908
3909
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919struct device *__root_device_register(const char *name, struct module *owner)
3920{
3921 struct root_device *root;
3922 int err = -ENOMEM;
3923
3924 root = kzalloc(sizeof(struct root_device), GFP_KERNEL);
3925 if (!root)
3926 return ERR_PTR(err);
3927
3928 err = dev_set_name(&root->dev, "%s", name);
3929 if (err) {
3930 kfree(root);
3931 return ERR_PTR(err);
3932 }
3933
3934 root->dev.release = root_device_release;
3935
3936 err = device_register(&root->dev);
3937 if (err) {
3938 put_device(&root->dev);
3939 return ERR_PTR(err);
3940 }
3941
3942#ifdef CONFIG_MODULES
3943 if (owner) {
3944 struct module_kobject *mk = &owner->mkobj;
3945
3946 err = sysfs_create_link(&root->dev.kobj, &mk->kobj, "module");
3947 if (err) {
3948 device_unregister(&root->dev);
3949 return ERR_PTR(err);
3950 }
3951 root->owner = owner;
3952 }
3953#endif
3954
3955 return &root->dev;
3956}
3957EXPORT_SYMBOL_GPL(__root_device_register);
3958
3959
3960
3961
3962
3963
3964
3965
3966void root_device_unregister(struct device *dev)
3967{
3968 struct root_device *root = to_root_device(dev);
3969
3970 if (root->owner)
3971 sysfs_remove_link(&root->dev.kobj, "module");
3972
3973 device_unregister(dev);
3974}
3975EXPORT_SYMBOL_GPL(root_device_unregister);
3976
3977
3978static void device_create_release(struct device *dev)
3979{
3980 pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
3981 kfree(dev);
3982}
3983
3984static __printf(6, 0) struct device *
3985device_create_groups_vargs(struct class *class, struct device *parent,
3986 dev_t devt, void *drvdata,
3987 const struct attribute_group **groups,
3988 const char *fmt, va_list args)
3989{
3990 struct device *dev = NULL;
3991 int retval = -ENODEV;
3992
3993 if (class == NULL || IS_ERR(class))
3994 goto error;
3995
3996 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
3997 if (!dev) {
3998 retval = -ENOMEM;
3999 goto error;
4000 }
4001
4002 device_initialize(dev);
4003 dev->devt = devt;
4004 dev->class = class;
4005 dev->parent = parent;
4006 dev->groups = groups;
4007 dev->release = device_create_release;
4008 dev_set_drvdata(dev, drvdata);
4009
4010 retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
4011 if (retval)
4012 goto error;
4013
4014 retval = device_add(dev);
4015 if (retval)
4016 goto error;
4017
4018 return dev;
4019
4020error:
4021 put_device(dev);
4022 return ERR_PTR(retval);
4023}
4024
4025
4026
4027
4028
4029
4030
4031
4032
4033
4034
4035
4036
4037
4038
4039
4040
4041
4042
4043
4044
4045
4046
4047
4048
4049struct device *device_create(struct class *class, struct device *parent,
4050 dev_t devt, void *drvdata, const char *fmt, ...)
4051{
4052 va_list vargs;
4053 struct device *dev;
4054
4055 va_start(vargs, fmt);
4056 dev = device_create_groups_vargs(class, parent, devt, drvdata, NULL,
4057 fmt, vargs);
4058 va_end(vargs);
4059 return dev;
4060}
4061EXPORT_SYMBOL_GPL(device_create);
4062
4063
4064
4065
4066
4067
4068
4069
4070
4071
4072
4073
4074
4075
4076
4077
4078
4079
4080
4081
4082
4083
4084
4085
4086
4087
4088
4089
4090struct device *device_create_with_groups(struct class *class,
4091 struct device *parent, dev_t devt,
4092 void *drvdata,
4093 const struct attribute_group **groups,
4094 const char *fmt, ...)
4095{
4096 va_list vargs;
4097 struct device *dev;
4098
4099 va_start(vargs, fmt);
4100 dev = device_create_groups_vargs(class, parent, devt, drvdata, groups,
4101 fmt, vargs);
4102 va_end(vargs);
4103 return dev;
4104}
4105EXPORT_SYMBOL_GPL(device_create_with_groups);
4106
4107
4108
4109
4110
4111
4112
4113
4114
4115void device_destroy(struct class *class, dev_t devt)
4116{
4117 struct device *dev;
4118
4119 dev = class_find_device_by_devt(class, devt);
4120 if (dev) {
4121 put_device(dev);
4122 device_unregister(dev);
4123 }
4124}
4125EXPORT_SYMBOL_GPL(device_destroy);
4126
4127
4128
4129
4130
4131
4132
4133
4134
4135
4136
4137
4138
4139
4140
4141
4142
4143
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158
4159
4160
4161
4162
4163
4164
4165
4166int device_rename(struct device *dev, const char *new_name)
4167{
4168 struct kobject *kobj = &dev->kobj;
4169 char *old_device_name = NULL;
4170 int error;
4171
4172 dev = get_device(dev);
4173 if (!dev)
4174 return -EINVAL;
4175
4176 dev_dbg(dev, "renaming to %s\n", new_name);
4177
4178 old_device_name = kstrdup(dev_name(dev), GFP_KERNEL);
4179 if (!old_device_name) {
4180 error = -ENOMEM;
4181 goto out;
4182 }
4183
4184 if (dev->class) {
4185 error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj,
4186 kobj, old_device_name,
4187 new_name, kobject_namespace(kobj));
4188 if (error)
4189 goto out;
4190 }
4191
4192 error = kobject_rename(kobj, new_name);
4193 if (error)
4194 goto out;
4195
4196out:
4197 put_device(dev);
4198
4199 kfree(old_device_name);
4200
4201 return error;
4202}
4203EXPORT_SYMBOL_GPL(device_rename);
4204
4205static int device_move_class_links(struct device *dev,
4206 struct device *old_parent,
4207 struct device *new_parent)
4208{
4209 int error = 0;
4210
4211 if (old_parent)
4212 sysfs_remove_link(&dev->kobj, "device");
4213 if (new_parent)
4214 error = sysfs_create_link(&dev->kobj, &new_parent->kobj,
4215 "device");
4216 return error;
4217}
4218
4219
4220
4221
4222
4223
4224
4225int device_move(struct device *dev, struct device *new_parent,
4226 enum dpm_order dpm_order)
4227{
4228 int error;
4229 struct device *old_parent;
4230 struct kobject *new_parent_kobj;
4231
4232 dev = get_device(dev);
4233 if (!dev)
4234 return -EINVAL;
4235
4236 device_pm_lock();
4237 new_parent = get_device(new_parent);
4238 new_parent_kobj = get_device_parent(dev, new_parent);
4239 if (IS_ERR(new_parent_kobj)) {
4240 error = PTR_ERR(new_parent_kobj);
4241 put_device(new_parent);
4242 goto out;
4243 }
4244
4245 pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev),
4246 __func__, new_parent ? dev_name(new_parent) : "<NULL>");
4247 error = kobject_move(&dev->kobj, new_parent_kobj);
4248 if (error) {
4249 cleanup_glue_dir(dev, new_parent_kobj);
4250 put_device(new_parent);
4251 goto out;
4252 }
4253 old_parent = dev->parent;
4254 dev->parent = new_parent;
4255 if (old_parent)
4256 klist_remove(&dev->p->knode_parent);
4257 if (new_parent) {
4258 klist_add_tail(&dev->p->knode_parent,
4259 &new_parent->p->klist_children);
4260 set_dev_node(dev, dev_to_node(new_parent));
4261 }
4262
4263 if (dev->class) {
4264 error = device_move_class_links(dev, old_parent, new_parent);
4265 if (error) {
4266
4267 device_move_class_links(dev, new_parent, old_parent);
4268 if (!kobject_move(&dev->kobj, &old_parent->kobj)) {
4269 if (new_parent)
4270 klist_remove(&dev->p->knode_parent);
4271 dev->parent = old_parent;
4272 if (old_parent) {
4273 klist_add_tail(&dev->p->knode_parent,
4274 &old_parent->p->klist_children);
4275 set_dev_node(dev, dev_to_node(old_parent));
4276 }
4277 }
4278 cleanup_glue_dir(dev, new_parent_kobj);
4279 put_device(new_parent);
4280 goto out;
4281 }
4282 }
4283 switch (dpm_order) {
4284 case DPM_ORDER_NONE:
4285 break;
4286 case DPM_ORDER_DEV_AFTER_PARENT:
4287 device_pm_move_after(dev, new_parent);
4288 devices_kset_move_after(dev, new_parent);
4289 break;
4290 case DPM_ORDER_PARENT_BEFORE_DEV:
4291 device_pm_move_before(new_parent, dev);
4292 devices_kset_move_before(new_parent, dev);
4293 break;
4294 case DPM_ORDER_DEV_LAST:
4295 device_pm_move_last(dev);
4296 devices_kset_move_last(dev);
4297 break;
4298 }
4299
4300 put_device(old_parent);
4301out:
4302 device_pm_unlock();
4303 put_device(dev);
4304 return error;
4305}
4306EXPORT_SYMBOL_GPL(device_move);
4307
4308static int device_attrs_change_owner(struct device *dev, kuid_t kuid,
4309 kgid_t kgid)
4310{
4311 struct kobject *kobj = &dev->kobj;
4312 struct class *class = dev->class;
4313 const struct device_type *type = dev->type;
4314 int error;
4315
4316 if (class) {
4317
4318
4319
4320
4321 error = sysfs_groups_change_owner(kobj, class->dev_groups, kuid,
4322 kgid);
4323 if (error)
4324 return error;
4325 }
4326
4327 if (type) {
4328
4329
4330
4331
4332 error = sysfs_groups_change_owner(kobj, type->groups, kuid,
4333 kgid);
4334 if (error)
4335 return error;
4336 }
4337
4338
4339 error = sysfs_groups_change_owner(kobj, dev->groups, kuid, kgid);
4340 if (error)
4341 return error;
4342
4343 if (device_supports_offline(dev) && !dev->offline_disabled) {
4344
4345 error = sysfs_file_change_owner(kobj, dev_attr_online.attr.name,
4346 kuid, kgid);
4347 if (error)
4348 return error;
4349 }
4350
4351 return 0;
4352}
4353
4354
4355
4356
4357
4358
4359
4360
4361
4362
4363
4364
4365
4366int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid)
4367{
4368 int error;
4369 struct kobject *kobj = &dev->kobj;
4370
4371 dev = get_device(dev);
4372 if (!dev)
4373 return -EINVAL;
4374
4375
4376
4377
4378
4379 error = sysfs_change_owner(kobj, kuid, kgid);
4380 if (error)
4381 goto out;
4382
4383
4384
4385
4386
4387
4388 error = sysfs_file_change_owner(kobj, dev_attr_uevent.attr.name, kuid,
4389 kgid);
4390 if (error)
4391 goto out;
4392
4393
4394
4395
4396
4397
4398 error = device_attrs_change_owner(dev, kuid, kgid);
4399 if (error)
4400 goto out;
4401
4402 error = dpm_sysfs_change_owner(dev, kuid, kgid);
4403 if (error)
4404 goto out;
4405
4406#ifdef CONFIG_BLOCK
4407 if (sysfs_deprecated && dev->class == &block_class)
4408 goto out;
4409#endif
4410
4411
4412
4413
4414
4415
4416
4417 error = sysfs_link_change_owner(&dev->class->p->subsys.kobj, &dev->kobj,
4418 dev_name(dev), kuid, kgid);
4419 if (error)
4420 goto out;
4421
4422out:
4423 put_device(dev);
4424 return error;
4425}
4426EXPORT_SYMBOL_GPL(device_change_owner);
4427
4428
4429
4430
4431void device_shutdown(void)
4432{
4433 struct device *dev, *parent;
4434
4435 wait_for_device_probe();
4436 device_block_probing();
4437
4438 cpufreq_suspend();
4439
4440 spin_lock(&devices_kset->list_lock);
4441
4442
4443
4444
4445
4446 while (!list_empty(&devices_kset->list)) {
4447 dev = list_entry(devices_kset->list.prev, struct device,
4448 kobj.entry);
4449
4450
4451
4452
4453
4454
4455 parent = get_device(dev->parent);
4456 get_device(dev);
4457
4458
4459
4460
4461 list_del_init(&dev->kobj.entry);
4462 spin_unlock(&devices_kset->list_lock);
4463
4464
4465 if (parent)
4466 device_lock(parent);
4467 device_lock(dev);
4468
4469
4470 pm_runtime_get_noresume(dev);
4471 pm_runtime_barrier(dev);
4472
4473 if (dev->class && dev->class->shutdown_pre) {
4474 if (initcall_debug)
4475 dev_info(dev, "shutdown_pre\n");
4476 dev->class->shutdown_pre(dev);
4477 }
4478 if (dev->bus && dev->bus->shutdown) {
4479 if (initcall_debug)
4480 dev_info(dev, "shutdown\n");
4481 dev->bus->shutdown(dev);
4482 } else if (dev->driver && dev->driver->shutdown) {
4483 if (initcall_debug)
4484 dev_info(dev, "shutdown\n");
4485 dev->driver->shutdown(dev);
4486 }
4487
4488 device_unlock(dev);
4489 if (parent)
4490 device_unlock(parent);
4491
4492 put_device(dev);
4493 put_device(parent);
4494
4495 spin_lock(&devices_kset->list_lock);
4496 }
4497 spin_unlock(&devices_kset->list_lock);
4498}
4499
4500
4501
4502
4503
4504#ifdef CONFIG_PRINTK
4505static void
4506set_dev_info(const struct device *dev, struct dev_printk_info *dev_info)
4507{
4508 const char *subsys;
4509
4510 memset(dev_info, 0, sizeof(*dev_info));
4511
4512 if (dev->class)
4513 subsys = dev->class->name;
4514 else if (dev->bus)
4515 subsys = dev->bus->name;
4516 else
4517 return;
4518
4519 strscpy(dev_info->subsystem, subsys, sizeof(dev_info->subsystem));
4520
4521
4522
4523
4524
4525
4526
4527
4528 if (MAJOR(dev->devt)) {
4529 char c;
4530
4531 if (strcmp(subsys, "block") == 0)
4532 c = 'b';
4533 else
4534 c = 'c';
4535
4536 snprintf(dev_info->device, sizeof(dev_info->device),
4537 "%c%u:%u", c, MAJOR(dev->devt), MINOR(dev->devt));
4538 } else if (strcmp(subsys, "net") == 0) {
4539 struct net_device *net = to_net_dev(dev);
4540
4541 snprintf(dev_info->device, sizeof(dev_info->device),
4542 "n%u", net->ifindex);
4543 } else {
4544 snprintf(dev_info->device, sizeof(dev_info->device),
4545 "+%s:%s", subsys, dev_name(dev));
4546 }
4547}
4548
4549int dev_vprintk_emit(int level, const struct device *dev,
4550 const char *fmt, va_list args)
4551{
4552 struct dev_printk_info dev_info;
4553
4554 set_dev_info(dev, &dev_info);
4555
4556 return vprintk_emit(0, level, &dev_info, fmt, args);
4557}
4558EXPORT_SYMBOL(dev_vprintk_emit);
4559
4560int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
4561{
4562 va_list args;
4563 int r;
4564
4565 va_start(args, fmt);
4566
4567 r = dev_vprintk_emit(level, dev, fmt, args);
4568
4569 va_end(args);
4570
4571 return r;
4572}
4573EXPORT_SYMBOL(dev_printk_emit);
4574
4575static void __dev_printk(const char *level, const struct device *dev,
4576 struct va_format *vaf)
4577{
4578 if (dev)
4579 dev_printk_emit(level[1] - '0', dev, "%s %s: %pV",
4580 dev_driver_string(dev), dev_name(dev), vaf);
4581 else
4582 printk("%s(NULL device *): %pV", level, vaf);
4583}
4584
4585void dev_printk(const char *level, const struct device *dev,
4586 const char *fmt, ...)
4587{
4588 struct va_format vaf;
4589 va_list args;
4590
4591 va_start(args, fmt);
4592
4593 vaf.fmt = fmt;
4594 vaf.va = &args;
4595
4596 __dev_printk(level, dev, &vaf);
4597
4598 va_end(args);
4599}
4600EXPORT_SYMBOL(dev_printk);
4601
4602#define define_dev_printk_level(func, kern_level) \
4603void func(const struct device *dev, const char *fmt, ...) \
4604{ \
4605 struct va_format vaf; \
4606 va_list args; \
4607 \
4608 va_start(args, fmt); \
4609 \
4610 vaf.fmt = fmt; \
4611 vaf.va = &args; \
4612 \
4613 __dev_printk(kern_level, dev, &vaf); \
4614 \
4615 va_end(args); \
4616} \
4617EXPORT_SYMBOL(func);
4618
4619define_dev_printk_level(_dev_emerg, KERN_EMERG);
4620define_dev_printk_level(_dev_alert, KERN_ALERT);
4621define_dev_printk_level(_dev_crit, KERN_CRIT);
4622define_dev_printk_level(_dev_err, KERN_ERR);
4623define_dev_printk_level(_dev_warn, KERN_WARNING);
4624define_dev_printk_level(_dev_notice, KERN_NOTICE);
4625define_dev_printk_level(_dev_info, KERN_INFO);
4626
4627#endif
4628
4629
4630
4631
4632
4633
4634
4635
4636
4637
4638
4639
4640
4641
4642
4643
4644
4645
4646
4647
4648
4649
4650
4651
4652
4653
4654
4655
4656int dev_err_probe(const struct device *dev, int err, const char *fmt, ...)
4657{
4658 struct va_format vaf;
4659 va_list args;
4660
4661 va_start(args, fmt);
4662 vaf.fmt = fmt;
4663 vaf.va = &args;
4664
4665 if (err != -EPROBE_DEFER) {
4666 dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
4667 } else {
4668 device_set_deferred_probe_reason(dev, &vaf);
4669 dev_dbg(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
4670 }
4671
4672 va_end(args);
4673
4674 return err;
4675}
4676EXPORT_SYMBOL_GPL(dev_err_probe);
4677
4678static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
4679{
4680 return fwnode && !IS_ERR(fwnode->secondary);
4681}
4682
4683
4684
4685
4686
4687
4688
4689
4690
4691
4692
4693
4694
4695
4696
4697void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
4698{
4699 struct device *parent = dev->parent;
4700 struct fwnode_handle *fn = dev->fwnode;
4701
4702 if (fwnode) {
4703 if (fwnode_is_primary(fn))
4704 fn = fn->secondary;
4705
4706 if (fn) {
4707 WARN_ON(fwnode->secondary);
4708 fwnode->secondary = fn;
4709 }
4710 dev->fwnode = fwnode;
4711 } else {
4712 if (fwnode_is_primary(fn)) {
4713 dev->fwnode = fn->secondary;
4714
4715 if (!(parent && fn == parent->fwnode))
4716 fn->secondary = NULL;
4717 } else {
4718 dev->fwnode = NULL;
4719 }
4720 }
4721}
4722EXPORT_SYMBOL_GPL(set_primary_fwnode);
4723
4724
4725
4726
4727
4728
4729
4730
4731
4732
4733void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
4734{
4735 if (fwnode)
4736 fwnode->secondary = ERR_PTR(-ENODEV);
4737
4738 if (fwnode_is_primary(dev->fwnode))
4739 dev->fwnode->secondary = fwnode;
4740 else
4741 dev->fwnode = fwnode;
4742}
4743EXPORT_SYMBOL_GPL(set_secondary_fwnode);
4744
4745
4746
4747
4748
4749
4750
4751
4752
4753void device_set_of_node_from_dev(struct device *dev, const struct device *dev2)
4754{
4755 of_node_put(dev->of_node);
4756 dev->of_node = of_node_get(dev2->of_node);
4757 dev->of_node_reused = true;
4758}
4759EXPORT_SYMBOL_GPL(device_set_of_node_from_dev);
4760
4761void device_set_node(struct device *dev, struct fwnode_handle *fwnode)
4762{
4763 dev->fwnode = fwnode;
4764 dev->of_node = to_of_node(fwnode);
4765}
4766EXPORT_SYMBOL_GPL(device_set_node);
4767
4768int device_match_name(struct device *dev, const void *name)
4769{
4770 return sysfs_streq(dev_name(dev), name);
4771}
4772EXPORT_SYMBOL_GPL(device_match_name);
4773
4774int device_match_of_node(struct device *dev, const void *np)
4775{
4776 return dev->of_node == np;
4777}
4778EXPORT_SYMBOL_GPL(device_match_of_node);
4779
4780int device_match_fwnode(struct device *dev, const void *fwnode)
4781{
4782 return dev_fwnode(dev) == fwnode;
4783}
4784EXPORT_SYMBOL_GPL(device_match_fwnode);
4785
4786int device_match_devt(struct device *dev, const void *pdevt)
4787{
4788 return dev->devt == *(dev_t *)pdevt;
4789}
4790EXPORT_SYMBOL_GPL(device_match_devt);
4791
4792int device_match_acpi_dev(struct device *dev, const void *adev)
4793{
4794 return ACPI_COMPANION(dev) == adev;
4795}
4796EXPORT_SYMBOL(device_match_acpi_dev);
4797
4798int device_match_any(struct device *dev, const void *unused)
4799{
4800 return 1;
4801}
4802EXPORT_SYMBOL_GPL(device_match_any);
4803