1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/bitmap.h>
18#include <linux/device.h>
19#include <linux/export.h>
20#include <linux/idr.h>
21#include <linux/io.h>
22#include <linux/kernel.h>
23#include <linux/ktime.h>
24#include <linux/list.h>
25#include <linux/module.h>
26#include <linux/of_address.h>
27#include <linux/of_device.h>
28#include <linux/processor.h>
29#include <linux/refcount.h>
30#include <linux/slab.h>
31
32#include "common.h"
33#include "notify.h"
34
35#define CREATE_TRACE_POINTS
36#include <trace/events/scmi.h>
37
38enum scmi_error_codes {
39 SCMI_SUCCESS = 0,
40 SCMI_ERR_SUPPORT = -1,
41 SCMI_ERR_PARAMS = -2,
42 SCMI_ERR_ACCESS = -3,
43 SCMI_ERR_ENTRY = -4,
44 SCMI_ERR_RANGE = -5,
45 SCMI_ERR_BUSY = -6,
46 SCMI_ERR_COMMS = -7,
47 SCMI_ERR_GENERIC = -8,
48 SCMI_ERR_HARDWARE = -9,
49 SCMI_ERR_PROTOCOL = -10,
50};
51
52
53static LIST_HEAD(scmi_list);
54
55static DEFINE_MUTEX(scmi_list_mutex);
56
57static atomic_t transfer_last_id;
58
59static DEFINE_IDR(scmi_requested_devices);
60static DEFINE_MUTEX(scmi_requested_devices_mtx);
61
62struct scmi_requested_dev {
63 const struct scmi_device_id *id_table;
64 struct list_head node;
65};
66
67
68
69
70
71
72
73
74
75
76struct scmi_xfers_info {
77 struct scmi_xfer *xfer_block;
78 unsigned long *xfer_alloc_table;
79 spinlock_t xfer_lock;
80};
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95struct scmi_protocol_instance {
96 const struct scmi_handle *handle;
97 const struct scmi_protocol *proto;
98 void *gid;
99 refcount_t users;
100 void *priv;
101 struct scmi_protocol_handle ph;
102};
103
104#define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130struct scmi_info {
131 struct device *dev;
132 const struct scmi_desc *desc;
133 struct scmi_revision_info version;
134 struct scmi_handle handle;
135 struct scmi_xfers_info tx_minfo;
136 struct scmi_xfers_info rx_minfo;
137 struct idr tx_idr;
138 struct idr rx_idr;
139 struct idr protocols;
140
141 struct mutex protocols_mtx;
142 u8 *protocols_imp;
143 struct idr active_protocols;
144 void *notify_priv;
145 struct list_head node;
146 int users;
147};
148
149#define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
150
151static const int scmi_linux_errmap[] = {
152
153 0,
154 -EOPNOTSUPP,
155 -EINVAL,
156 -EACCES,
157 -ENOENT,
158 -ERANGE,
159 -EBUSY,
160 -ECOMM,
161 -EIO,
162 -EREMOTEIO,
163 -EPROTO,
164};
165
166static inline int scmi_to_linux_errno(int errno)
167{
168 int err_idx = -errno;
169
170 if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
171 return scmi_linux_errmap[err_idx];
172 return -EIO;
173}
174
175
176
177
178
179
180
181static inline void scmi_dump_header_dbg(struct device *dev,
182 struct scmi_msg_hdr *hdr)
183{
184 dev_dbg(dev, "Message ID: %x Sequence ID: %x Protocol: %x\n",
185 hdr->id, hdr->seq, hdr->protocol_id);
186}
187
188void scmi_notification_instance_data_set(const struct scmi_handle *handle,
189 void *priv)
190{
191 struct scmi_info *info = handle_to_scmi_info(handle);
192
193 info->notify_priv = priv;
194
195 smp_wmb();
196}
197
198void *scmi_notification_instance_data_get(const struct scmi_handle *handle)
199{
200 struct scmi_info *info = handle_to_scmi_info(handle);
201
202
203 smp_rmb();
204 return info->notify_priv;
205}
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
223 struct scmi_xfers_info *minfo)
224{
225 u16 xfer_id;
226 struct scmi_xfer *xfer;
227 unsigned long flags, bit_pos;
228 struct scmi_info *info = handle_to_scmi_info(handle);
229
230
231 spin_lock_irqsave(&minfo->xfer_lock, flags);
232 bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
233 info->desc->max_msg);
234 if (bit_pos == info->desc->max_msg) {
235 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
236 return ERR_PTR(-ENOMEM);
237 }
238 set_bit(bit_pos, minfo->xfer_alloc_table);
239 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
240
241 xfer_id = bit_pos;
242
243 xfer = &minfo->xfer_block[xfer_id];
244 xfer->hdr.seq = xfer_id;
245 xfer->transfer_id = atomic_inc_return(&transfer_last_id);
246
247 return xfer;
248}
249
250
251
252
253
254
255
256
257
258static void
259__scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
260{
261 unsigned long flags;
262
263
264
265
266
267
268 spin_lock_irqsave(&minfo->xfer_lock, flags);
269 clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
270 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
271}
272
273static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr)
274{
275 struct scmi_xfer *xfer;
276 struct device *dev = cinfo->dev;
277 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
278 struct scmi_xfers_info *minfo = &info->rx_minfo;
279 ktime_t ts;
280
281 ts = ktime_get_boottime();
282 xfer = scmi_xfer_get(cinfo->handle, minfo);
283 if (IS_ERR(xfer)) {
284 dev_err(dev, "failed to get free message slot (%ld)\n",
285 PTR_ERR(xfer));
286 info->desc->ops->clear_channel(cinfo);
287 return;
288 }
289
290 unpack_scmi_header(msg_hdr, &xfer->hdr);
291 scmi_dump_header_dbg(dev, &xfer->hdr);
292 info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
293 xfer);
294 scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
295 xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
296
297 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
298 xfer->hdr.protocol_id, xfer->hdr.seq,
299 MSG_TYPE_NOTIFICATION);
300
301 __scmi_xfer_put(minfo, xfer);
302
303 info->desc->ops->clear_channel(cinfo);
304}
305
306static void scmi_handle_response(struct scmi_chan_info *cinfo,
307 u16 xfer_id, u8 msg_type)
308{
309 struct scmi_xfer *xfer;
310 struct device *dev = cinfo->dev;
311 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
312 struct scmi_xfers_info *minfo = &info->tx_minfo;
313
314
315 if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
316 dev_err(dev, "message for %d is not expected!\n", xfer_id);
317 info->desc->ops->clear_channel(cinfo);
318 return;
319 }
320
321 xfer = &minfo->xfer_block[xfer_id];
322
323
324
325
326
327
328 if (unlikely(msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done)) {
329 dev_err(dev,
330 "Delayed Response for %d not expected! Buggy F/W ?\n",
331 xfer_id);
332 info->desc->ops->clear_channel(cinfo);
333
334 __scmi_xfer_put(minfo, xfer);
335 return;
336 }
337
338
339 if (msg_type == MSG_TYPE_DELAYED_RESP)
340 xfer->rx.len = info->desc->max_msg_size;
341
342 scmi_dump_header_dbg(dev, &xfer->hdr);
343
344 info->desc->ops->fetch_response(cinfo, xfer);
345
346 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
347 xfer->hdr.protocol_id, xfer->hdr.seq,
348 msg_type);
349
350 if (msg_type == MSG_TYPE_DELAYED_RESP) {
351 info->desc->ops->clear_channel(cinfo);
352 complete(xfer->async_done);
353 } else {
354 complete(&xfer->done);
355 }
356}
357
358
359
360
361
362
363
364
365
366
367
368
369
370void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
371{
372 u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
373 u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
374
375 switch (msg_type) {
376 case MSG_TYPE_NOTIFICATION:
377 scmi_handle_notification(cinfo, msg_hdr);
378 break;
379 case MSG_TYPE_COMMAND:
380 case MSG_TYPE_DELAYED_RESP:
381 scmi_handle_response(cinfo, xfer_id, msg_type);
382 break;
383 default:
384 WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
385 break;
386 }
387}
388
389
390
391
392
393
394
395static void xfer_put(const struct scmi_protocol_handle *ph,
396 struct scmi_xfer *xfer)
397{
398 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
399 struct scmi_info *info = handle_to_scmi_info(pi->handle);
400
401 __scmi_xfer_put(&info->tx_minfo, xfer);
402}
403
404#define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC)
405
406static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
407 struct scmi_xfer *xfer, ktime_t stop)
408{
409 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
410
411 return info->desc->ops->poll_done(cinfo, xfer) ||
412 ktime_after(ktime_get(), stop);
413}
414
415
416
417
418
419
420
421
422
423
424
425static int do_xfer(const struct scmi_protocol_handle *ph,
426 struct scmi_xfer *xfer)
427{
428 int ret;
429 int timeout;
430 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
431 struct scmi_info *info = handle_to_scmi_info(pi->handle);
432 struct device *dev = info->dev;
433 struct scmi_chan_info *cinfo;
434
435
436
437
438
439
440 xfer->hdr.protocol_id = pi->proto->id;
441 reinit_completion(&xfer->done);
442
443 cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id);
444 if (unlikely(!cinfo))
445 return -EINVAL;
446
447 trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
448 xfer->hdr.protocol_id, xfer->hdr.seq,
449 xfer->hdr.poll_completion);
450
451 ret = info->desc->ops->send_message(cinfo, xfer);
452 if (ret < 0) {
453 dev_dbg(dev, "Failed to send message %d\n", ret);
454 return ret;
455 }
456
457 if (xfer->hdr.poll_completion) {
458 ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);
459
460 spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
461
462 if (ktime_before(ktime_get(), stop))
463 info->desc->ops->fetch_response(cinfo, xfer);
464 else
465 ret = -ETIMEDOUT;
466 } else {
467
468 timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
469 if (!wait_for_completion_timeout(&xfer->done, timeout)) {
470 dev_err(dev, "timed out in resp(caller: %pS)\n",
471 (void *)_RET_IP_);
472 ret = -ETIMEDOUT;
473 }
474 }
475
476 if (!ret && xfer->hdr.status)
477 ret = scmi_to_linux_errno(xfer->hdr.status);
478
479 if (info->desc->ops->mark_txdone)
480 info->desc->ops->mark_txdone(cinfo, ret);
481
482 trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
483 xfer->hdr.protocol_id, xfer->hdr.seq, ret);
484
485 return ret;
486}
487
488static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
489 struct scmi_xfer *xfer)
490{
491 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
492 struct scmi_info *info = handle_to_scmi_info(pi->handle);
493
494 xfer->rx.len = info->desc->max_msg_size;
495}
496
497#define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
498
499
500
501
502
503
504
505
506
507
508
509static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
510 struct scmi_xfer *xfer)
511{
512 int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
513 DECLARE_COMPLETION_ONSTACK(async_response);
514
515 xfer->async_done = &async_response;
516
517 ret = do_xfer(ph, xfer);
518 if (!ret) {
519 if (!wait_for_completion_timeout(xfer->async_done, timeout))
520 ret = -ETIMEDOUT;
521 else if (xfer->hdr.status)
522 ret = scmi_to_linux_errno(xfer->hdr.status);
523 }
524
525 xfer->async_done = NULL;
526 return ret;
527}
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544static int xfer_get_init(const struct scmi_protocol_handle *ph,
545 u8 msg_id, size_t tx_size, size_t rx_size,
546 struct scmi_xfer **p)
547{
548 int ret;
549 struct scmi_xfer *xfer;
550 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
551 struct scmi_info *info = handle_to_scmi_info(pi->handle);
552 struct scmi_xfers_info *minfo = &info->tx_minfo;
553 struct device *dev = info->dev;
554
555
556 if (rx_size > info->desc->max_msg_size ||
557 tx_size > info->desc->max_msg_size)
558 return -ERANGE;
559
560 xfer = scmi_xfer_get(pi->handle, minfo);
561 if (IS_ERR(xfer)) {
562 ret = PTR_ERR(xfer);
563 dev_err(dev, "failed to get free message slot(%d)\n", ret);
564 return ret;
565 }
566
567 xfer->tx.len = tx_size;
568 xfer->rx.len = rx_size ? : info->desc->max_msg_size;
569 xfer->hdr.id = msg_id;
570 xfer->hdr.poll_completion = false;
571
572 *p = xfer;
573
574 return 0;
575}
576
577
578
579
580
581
582
583
584
585
586
587static int version_get(const struct scmi_protocol_handle *ph, u32 *version)
588{
589 int ret;
590 __le32 *rev_info;
591 struct scmi_xfer *t;
592
593 ret = xfer_get_init(ph, PROTOCOL_VERSION, 0, sizeof(*version), &t);
594 if (ret)
595 return ret;
596
597 ret = do_xfer(ph, t);
598 if (!ret) {
599 rev_info = t->rx.buf;
600 *version = le32_to_cpu(*rev_info);
601 }
602
603 xfer_put(ph, t);
604 return ret;
605}
606
607
608
609
610
611
612
613
614
615static int scmi_set_protocol_priv(const struct scmi_protocol_handle *ph,
616 void *priv)
617{
618 struct scmi_protocol_instance *pi = ph_to_pi(ph);
619
620 pi->priv = priv;
621
622 return 0;
623}
624
625
626
627
628
629
630
631
632static void *scmi_get_protocol_priv(const struct scmi_protocol_handle *ph)
633{
634 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
635
636 return pi->priv;
637}
638
639static const struct scmi_xfer_ops xfer_ops = {
640 .version_get = version_get,
641 .xfer_get_init = xfer_get_init,
642 .reset_rx_to_maxsz = reset_rx_to_maxsz,
643 .do_xfer = do_xfer,
644 .do_xfer_with_response = do_xfer_with_response,
645 .xfer_put = xfer_put,
646};
647
648
649
650
651
652
653
654
655
656
657
658
659struct scmi_revision_info *
660scmi_revision_area_get(const struct scmi_protocol_handle *ph)
661{
662 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
663
664 return pi->handle->version;
665}
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683static struct scmi_protocol_instance *
684scmi_alloc_init_protocol_instance(struct scmi_info *info,
685 const struct scmi_protocol *proto)
686{
687 int ret = -ENOMEM;
688 void *gid;
689 struct scmi_protocol_instance *pi;
690 const struct scmi_handle *handle = &info->handle;
691
692
693 gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
694 if (!gid) {
695 scmi_protocol_put(proto->id);
696 goto out;
697 }
698
699 pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL);
700 if (!pi)
701 goto clean;
702
703 pi->gid = gid;
704 pi->proto = proto;
705 pi->handle = handle;
706 pi->ph.dev = handle->dev;
707 pi->ph.xops = &xfer_ops;
708 pi->ph.set_priv = scmi_set_protocol_priv;
709 pi->ph.get_priv = scmi_get_protocol_priv;
710 refcount_set(&pi->users, 1);
711
712 ret = pi->proto->instance_init(&pi->ph);
713 if (ret)
714 goto clean;
715
716 ret = idr_alloc(&info->protocols, pi, proto->id, proto->id + 1,
717 GFP_KERNEL);
718 if (ret != proto->id)
719 goto clean;
720
721
722
723
724
725 if (pi->proto->events) {
726 ret = scmi_register_protocol_events(handle, pi->proto->id,
727 &pi->ph,
728 pi->proto->events);
729 if (ret)
730 dev_warn(handle->dev,
731 "Protocol:%X - Events Registration Failed - err:%d\n",
732 pi->proto->id, ret);
733 }
734
735 devres_close_group(handle->dev, pi->gid);
736 dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id);
737
738 return pi;
739
740clean:
741
742 scmi_protocol_put(proto->id);
743 devres_release_group(handle->dev, gid);
744out:
745 return ERR_PTR(ret);
746}
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761static struct scmi_protocol_instance * __must_check
762scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id)
763{
764 struct scmi_protocol_instance *pi;
765 struct scmi_info *info = handle_to_scmi_info(handle);
766
767 mutex_lock(&info->protocols_mtx);
768 pi = idr_find(&info->protocols, protocol_id);
769
770 if (pi) {
771 refcount_inc(&pi->users);
772 } else {
773 const struct scmi_protocol *proto;
774
775
776 proto = scmi_protocol_get(protocol_id);
777 if (proto)
778 pi = scmi_alloc_init_protocol_instance(info, proto);
779 else
780 pi = ERR_PTR(-EPROBE_DEFER);
781 }
782 mutex_unlock(&info->protocols_mtx);
783
784 return pi;
785}
786
787
788
789
790
791
792
793
794
795
796
797int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id)
798{
799 return PTR_ERR_OR_ZERO(scmi_get_protocol_instance(handle, protocol_id));
800}
801
802
803
804
805
806
807
808
809
810void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id)
811{
812 struct scmi_info *info = handle_to_scmi_info(handle);
813 struct scmi_protocol_instance *pi;
814
815 mutex_lock(&info->protocols_mtx);
816 pi = idr_find(&info->protocols, protocol_id);
817 if (WARN_ON(!pi))
818 goto out;
819
820 if (refcount_dec_and_test(&pi->users)) {
821 void *gid = pi->gid;
822
823 if (pi->proto->events)
824 scmi_deregister_protocol_events(handle, protocol_id);
825
826 if (pi->proto->instance_deinit)
827 pi->proto->instance_deinit(&pi->ph);
828
829 idr_remove(&info->protocols, protocol_id);
830
831 scmi_protocol_put(protocol_id);
832
833 devres_release_group(handle->dev, gid);
834 dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n",
835 protocol_id);
836 }
837
838out:
839 mutex_unlock(&info->protocols_mtx);
840}
841
842void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
843 u8 *prot_imp)
844{
845 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
846 struct scmi_info *info = handle_to_scmi_info(pi->handle);
847
848 info->protocols_imp = prot_imp;
849}
850
851static bool
852scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
853{
854 int i;
855 struct scmi_info *info = handle_to_scmi_info(handle);
856
857 if (!info->protocols_imp)
858 return false;
859
860 for (i = 0; i < MAX_PROTOCOLS_IMP; i++)
861 if (info->protocols_imp[i] == prot_id)
862 return true;
863 return false;
864}
865
866struct scmi_protocol_devres {
867 const struct scmi_handle *handle;
868 u8 protocol_id;
869};
870
871static void scmi_devm_release_protocol(struct device *dev, void *res)
872{
873 struct scmi_protocol_devres *dres = res;
874
875 scmi_protocol_release(dres->handle, dres->protocol_id);
876}
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896static const void __must_check *
897scmi_devm_protocol_get(struct scmi_device *sdev, u8 protocol_id,
898 struct scmi_protocol_handle **ph)
899{
900 struct scmi_protocol_instance *pi;
901 struct scmi_protocol_devres *dres;
902 struct scmi_handle *handle = sdev->handle;
903
904 if (!ph)
905 return ERR_PTR(-EINVAL);
906
907 dres = devres_alloc(scmi_devm_release_protocol,
908 sizeof(*dres), GFP_KERNEL);
909 if (!dres)
910 return ERR_PTR(-ENOMEM);
911
912 pi = scmi_get_protocol_instance(handle, protocol_id);
913 if (IS_ERR(pi)) {
914 devres_free(dres);
915 return pi;
916 }
917
918 dres->handle = handle;
919 dres->protocol_id = protocol_id;
920 devres_add(&sdev->dev, dres);
921
922 *ph = &pi->ph;
923
924 return pi->proto->ops;
925}
926
927static int scmi_devm_protocol_match(struct device *dev, void *res, void *data)
928{
929 struct scmi_protocol_devres *dres = res;
930
931 if (WARN_ON(!dres || !data))
932 return 0;
933
934 return dres->protocol_id == *((u8 *)data);
935}
936
937
938
939
940
941
942
943
944
945
946static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id)
947{
948 int ret;
949
950 ret = devres_release(&sdev->dev, scmi_devm_release_protocol,
951 scmi_devm_protocol_match, &protocol_id);
952 WARN_ON(ret);
953}
954
955static inline
956struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info)
957{
958 info->users++;
959 return &info->handle;
960}
961
962
963
964
965
966
967
968
969
970
971
972
973struct scmi_handle *scmi_handle_get(struct device *dev)
974{
975 struct list_head *p;
976 struct scmi_info *info;
977 struct scmi_handle *handle = NULL;
978
979 mutex_lock(&scmi_list_mutex);
980 list_for_each(p, &scmi_list) {
981 info = list_entry(p, struct scmi_info, node);
982 if (dev->parent == info->dev) {
983 handle = scmi_handle_get_from_info_unlocked(info);
984 break;
985 }
986 }
987 mutex_unlock(&scmi_list_mutex);
988
989 return handle;
990}
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004int scmi_handle_put(const struct scmi_handle *handle)
1005{
1006 struct scmi_info *info;
1007
1008 if (!handle)
1009 return -EINVAL;
1010
1011 info = handle_to_scmi_info(handle);
1012 mutex_lock(&scmi_list_mutex);
1013 if (!WARN_ON(!info->users))
1014 info->users--;
1015 mutex_unlock(&scmi_list_mutex);
1016
1017 return 0;
1018}
1019
1020static int __scmi_xfer_info_init(struct scmi_info *sinfo,
1021 struct scmi_xfers_info *info)
1022{
1023 int i;
1024 struct scmi_xfer *xfer;
1025 struct device *dev = sinfo->dev;
1026 const struct scmi_desc *desc = sinfo->desc;
1027
1028
1029 if (WARN_ON(!desc->max_msg || desc->max_msg > MSG_TOKEN_MAX)) {
1030 dev_err(dev,
1031 "Invalid maximum messages %d, not in range [1 - %lu]\n",
1032 desc->max_msg, MSG_TOKEN_MAX);
1033 return -EINVAL;
1034 }
1035
1036 info->xfer_block = devm_kcalloc(dev, desc->max_msg,
1037 sizeof(*info->xfer_block), GFP_KERNEL);
1038 if (!info->xfer_block)
1039 return -ENOMEM;
1040
1041 info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(desc->max_msg),
1042 sizeof(long), GFP_KERNEL);
1043 if (!info->xfer_alloc_table)
1044 return -ENOMEM;
1045
1046
1047 for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) {
1048 xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
1049 GFP_KERNEL);
1050 if (!xfer->rx.buf)
1051 return -ENOMEM;
1052
1053 xfer->tx.buf = xfer->rx.buf;
1054 init_completion(&xfer->done);
1055 }
1056
1057 spin_lock_init(&info->xfer_lock);
1058
1059 return 0;
1060}
1061
1062static int scmi_xfer_info_init(struct scmi_info *sinfo)
1063{
1064 int ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
1065
1066 if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE))
1067 ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
1068
1069 return ret;
1070}
1071
1072static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
1073 int prot_id, bool tx)
1074{
1075 int ret, idx;
1076 struct scmi_chan_info *cinfo;
1077 struct idr *idr;
1078
1079
1080 idx = tx ? 0 : 1;
1081 idr = tx ? &info->tx_idr : &info->rx_idr;
1082
1083
1084 cinfo = idr_find(idr, prot_id);
1085 if (cinfo)
1086 return 0;
1087
1088 if (!info->desc->ops->chan_available(dev, idx)) {
1089 cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
1090 if (unlikely(!cinfo))
1091 return -EINVAL;
1092 goto idr_alloc;
1093 }
1094
1095 cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
1096 if (!cinfo)
1097 return -ENOMEM;
1098
1099 cinfo->dev = dev;
1100
1101 ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
1102 if (ret)
1103 return ret;
1104
1105idr_alloc:
1106 ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
1107 if (ret != prot_id) {
1108 dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
1109 return ret;
1110 }
1111
1112 cinfo->handle = &info->handle;
1113 return 0;
1114}
1115
1116static inline int
1117scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
1118{
1119 int ret = scmi_chan_setup(info, dev, prot_id, true);
1120
1121 if (!ret)
1122 scmi_chan_setup(info, dev, prot_id, false);
1123
1124 return ret;
1125}
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145static inline struct scmi_device *
1146scmi_get_protocol_device(struct device_node *np, struct scmi_info *info,
1147 int prot_id, const char *name)
1148{
1149 struct scmi_device *sdev;
1150
1151
1152 sdev = scmi_child_dev_find(info->dev, prot_id, name);
1153 if (sdev)
1154 return sdev;
1155
1156 pr_debug("Creating SCMI device (%s) for protocol %x\n", name, prot_id);
1157
1158 sdev = scmi_device_create(np, info->dev, prot_id, name);
1159 if (!sdev) {
1160 dev_err(info->dev, "failed to create %d protocol device\n",
1161 prot_id);
1162 return NULL;
1163 }
1164
1165 if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
1166 dev_err(&sdev->dev, "failed to setup transport\n");
1167 scmi_device_destroy(sdev);
1168 return NULL;
1169 }
1170
1171 return sdev;
1172}
1173
1174static inline void
1175scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
1176 int prot_id, const char *name)
1177{
1178 struct scmi_device *sdev;
1179
1180 sdev = scmi_get_protocol_device(np, info, prot_id, name);
1181 if (!sdev)
1182 return;
1183
1184
1185 scmi_set_handle(sdev);
1186}
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199static void scmi_create_protocol_devices(struct device_node *np,
1200 struct scmi_info *info, int prot_id)
1201{
1202 struct list_head *phead;
1203
1204 mutex_lock(&scmi_requested_devices_mtx);
1205 phead = idr_find(&scmi_requested_devices, prot_id);
1206 if (phead) {
1207 struct scmi_requested_dev *rdev;
1208
1209 list_for_each_entry(rdev, phead, node)
1210 scmi_create_protocol_device(np, info, prot_id,
1211 rdev->id_table->name);
1212 }
1213 mutex_unlock(&scmi_requested_devices_mtx);
1214}
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239int scmi_protocol_device_request(const struct scmi_device_id *id_table)
1240{
1241 int ret = 0;
1242 unsigned int id = 0;
1243 struct list_head *head, *phead = NULL;
1244 struct scmi_requested_dev *rdev;
1245 struct scmi_info *info;
1246
1247 pr_debug("Requesting SCMI device (%s) for protocol %x\n",
1248 id_table->name, id_table->protocol_id);
1249
1250
1251
1252
1253
1254 mutex_lock(&scmi_requested_devices_mtx);
1255 idr_for_each_entry(&scmi_requested_devices, head, id) {
1256 if (!phead) {
1257
1258 rdev = list_first_entry(head, struct scmi_requested_dev,
1259 node);
1260 if (rdev->id_table->protocol_id ==
1261 id_table->protocol_id)
1262 phead = head;
1263 }
1264 list_for_each_entry(rdev, head, node) {
1265 if (!strcmp(rdev->id_table->name, id_table->name)) {
1266 pr_err("Ignoring duplicate request [%d] %s\n",
1267 rdev->id_table->protocol_id,
1268 rdev->id_table->name);
1269 ret = -EINVAL;
1270 goto out;
1271 }
1272 }
1273 }
1274
1275
1276
1277
1278
1279 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
1280 if (!rdev) {
1281 ret = -ENOMEM;
1282 goto out;
1283 }
1284 rdev->id_table = id_table;
1285
1286
1287
1288
1289
1290
1291 if (!phead) {
1292 phead = kzalloc(sizeof(*phead), GFP_KERNEL);
1293 if (!phead) {
1294 kfree(rdev);
1295 ret = -ENOMEM;
1296 goto out;
1297 }
1298 INIT_LIST_HEAD(phead);
1299
1300 ret = idr_alloc(&scmi_requested_devices, (void *)phead,
1301 id_table->protocol_id,
1302 id_table->protocol_id + 1, GFP_KERNEL);
1303 if (ret != id_table->protocol_id) {
1304 pr_err("Failed to save SCMI device - ret:%d\n", ret);
1305 kfree(rdev);
1306 kfree(phead);
1307 ret = -EINVAL;
1308 goto out;
1309 }
1310 ret = 0;
1311 }
1312 list_add(&rdev->node, phead);
1313
1314
1315
1316
1317
1318
1319
1320 mutex_lock(&scmi_list_mutex);
1321 list_for_each_entry(info, &scmi_list, node) {
1322 struct device_node *child;
1323
1324 child = idr_find(&info->active_protocols,
1325 id_table->protocol_id);
1326 if (child) {
1327 struct scmi_device *sdev;
1328
1329 sdev = scmi_get_protocol_device(child, info,
1330 id_table->protocol_id,
1331 id_table->name);
1332
1333 if (sdev && !sdev->handle)
1334 sdev->handle =
1335 scmi_handle_get_from_info_unlocked(info);
1336 } else {
1337 dev_err(info->dev,
1338 "Failed. SCMI protocol %d not active.\n",
1339 id_table->protocol_id);
1340 }
1341 }
1342 mutex_unlock(&scmi_list_mutex);
1343
1344out:
1345 mutex_unlock(&scmi_requested_devices_mtx);
1346
1347 return ret;
1348}
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table)
1365{
1366 struct list_head *phead;
1367
1368 pr_debug("Unrequesting SCMI device (%s) for protocol %x\n",
1369 id_table->name, id_table->protocol_id);
1370
1371 mutex_lock(&scmi_requested_devices_mtx);
1372 phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
1373 if (phead) {
1374 struct scmi_requested_dev *victim, *tmp;
1375
1376 list_for_each_entry_safe(victim, tmp, phead, node) {
1377 if (!strcmp(victim->id_table->name, id_table->name)) {
1378 list_del(&victim->node);
1379 kfree(victim);
1380 break;
1381 }
1382 }
1383
1384 if (list_empty(phead)) {
1385 idr_remove(&scmi_requested_devices,
1386 id_table->protocol_id);
1387 kfree(phead);
1388 }
1389 }
1390 mutex_unlock(&scmi_requested_devices_mtx);
1391}
1392
1393static int scmi_probe(struct platform_device *pdev)
1394{
1395 int ret;
1396 struct scmi_handle *handle;
1397 const struct scmi_desc *desc;
1398 struct scmi_info *info;
1399 struct device *dev = &pdev->dev;
1400 struct device_node *child, *np = dev->of_node;
1401
1402 desc = of_device_get_match_data(dev);
1403 if (!desc)
1404 return -EINVAL;
1405
1406 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
1407 if (!info)
1408 return -ENOMEM;
1409
1410 info->dev = dev;
1411 info->desc = desc;
1412 INIT_LIST_HEAD(&info->node);
1413 idr_init(&info->protocols);
1414 mutex_init(&info->protocols_mtx);
1415 idr_init(&info->active_protocols);
1416
1417 platform_set_drvdata(pdev, info);
1418 idr_init(&info->tx_idr);
1419 idr_init(&info->rx_idr);
1420
1421 handle = &info->handle;
1422 handle->dev = info->dev;
1423 handle->version = &info->version;
1424 handle->devm_protocol_get = scmi_devm_protocol_get;
1425 handle->devm_protocol_put = scmi_devm_protocol_put;
1426
1427 ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
1428 if (ret)
1429 return ret;
1430
1431 ret = scmi_xfer_info_init(info);
1432 if (ret)
1433 return ret;
1434
1435 if (scmi_notification_init(handle))
1436 dev_err(dev, "SCMI Notifications NOT available.\n");
1437
1438
1439
1440
1441
1442
1443 ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
1444 if (ret) {
1445 dev_err(dev, "unable to communicate with SCMI\n");
1446 return ret;
1447 }
1448
1449 mutex_lock(&scmi_list_mutex);
1450 list_add_tail(&info->node, &scmi_list);
1451 mutex_unlock(&scmi_list_mutex);
1452
1453 for_each_available_child_of_node(np, child) {
1454 u32 prot_id;
1455
1456 if (of_property_read_u32(child, "reg", &prot_id))
1457 continue;
1458
1459 if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
1460 dev_err(dev, "Out of range protocol %d\n", prot_id);
1461
1462 if (!scmi_is_protocol_implemented(handle, prot_id)) {
1463 dev_err(dev, "SCMI protocol %d not implemented\n",
1464 prot_id);
1465 continue;
1466 }
1467
1468
1469
1470
1471
1472 ret = idr_alloc(&info->active_protocols, child,
1473 prot_id, prot_id + 1, GFP_KERNEL);
1474 if (ret != prot_id) {
1475 dev_err(dev, "SCMI protocol %d already activated. Skip\n",
1476 prot_id);
1477 continue;
1478 }
1479
1480 of_node_get(child);
1481 scmi_create_protocol_devices(child, info, prot_id);
1482 }
1483
1484 return 0;
1485}
1486
1487void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
1488{
1489 idr_remove(idr, id);
1490}
1491
1492static int scmi_remove(struct platform_device *pdev)
1493{
1494 int ret = 0, id;
1495 struct scmi_info *info = platform_get_drvdata(pdev);
1496 struct idr *idr = &info->tx_idr;
1497 struct device_node *child;
1498
1499 mutex_lock(&scmi_list_mutex);
1500 if (info->users)
1501 ret = -EBUSY;
1502 else
1503 list_del(&info->node);
1504 mutex_unlock(&scmi_list_mutex);
1505
1506 if (ret)
1507 return ret;
1508
1509 scmi_notification_exit(&info->handle);
1510
1511 mutex_lock(&info->protocols_mtx);
1512 idr_destroy(&info->protocols);
1513 mutex_unlock(&info->protocols_mtx);
1514
1515 idr_for_each_entry(&info->active_protocols, child, id)
1516 of_node_put(child);
1517 idr_destroy(&info->active_protocols);
1518
1519
1520 ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
1521 idr_destroy(&info->tx_idr);
1522
1523 idr = &info->rx_idr;
1524 ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
1525 idr_destroy(&info->rx_idr);
1526
1527 return ret;
1528}
1529
1530static ssize_t protocol_version_show(struct device *dev,
1531 struct device_attribute *attr, char *buf)
1532{
1533 struct scmi_info *info = dev_get_drvdata(dev);
1534
1535 return sprintf(buf, "%u.%u\n", info->version.major_ver,
1536 info->version.minor_ver);
1537}
1538static DEVICE_ATTR_RO(protocol_version);
1539
1540static ssize_t firmware_version_show(struct device *dev,
1541 struct device_attribute *attr, char *buf)
1542{
1543 struct scmi_info *info = dev_get_drvdata(dev);
1544
1545 return sprintf(buf, "0x%x\n", info->version.impl_ver);
1546}
1547static DEVICE_ATTR_RO(firmware_version);
1548
1549static ssize_t vendor_id_show(struct device *dev,
1550 struct device_attribute *attr, char *buf)
1551{
1552 struct scmi_info *info = dev_get_drvdata(dev);
1553
1554 return sprintf(buf, "%s\n", info->version.vendor_id);
1555}
1556static DEVICE_ATTR_RO(vendor_id);
1557
1558static ssize_t sub_vendor_id_show(struct device *dev,
1559 struct device_attribute *attr, char *buf)
1560{
1561 struct scmi_info *info = dev_get_drvdata(dev);
1562
1563 return sprintf(buf, "%s\n", info->version.sub_vendor_id);
1564}
1565static DEVICE_ATTR_RO(sub_vendor_id);
1566
1567static struct attribute *versions_attrs[] = {
1568 &dev_attr_firmware_version.attr,
1569 &dev_attr_protocol_version.attr,
1570 &dev_attr_vendor_id.attr,
1571 &dev_attr_sub_vendor_id.attr,
1572 NULL,
1573};
1574ATTRIBUTE_GROUPS(versions);
1575
1576
1577static const struct of_device_id scmi_of_match[] = {
1578#ifdef CONFIG_MAILBOX
1579 { .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
1580#endif
1581#ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
1582 { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
1583#endif
1584 { },
1585};
1586
1587MODULE_DEVICE_TABLE(of, scmi_of_match);
1588
1589static struct platform_driver scmi_driver = {
1590 .driver = {
1591 .name = "arm-scmi",
1592 .of_match_table = scmi_of_match,
1593 .dev_groups = versions_groups,
1594 },
1595 .probe = scmi_probe,
1596 .remove = scmi_remove,
1597};
1598
1599static int __init scmi_driver_init(void)
1600{
1601 scmi_bus_init();
1602
1603 scmi_base_register();
1604
1605 scmi_clock_register();
1606 scmi_perf_register();
1607 scmi_power_register();
1608 scmi_reset_register();
1609 scmi_sensors_register();
1610 scmi_voltage_register();
1611 scmi_system_register();
1612
1613 return platform_driver_register(&scmi_driver);
1614}
1615subsys_initcall(scmi_driver_init);
1616
1617static void __exit scmi_driver_exit(void)
1618{
1619 scmi_base_unregister();
1620
1621 scmi_clock_unregister();
1622 scmi_perf_unregister();
1623 scmi_power_unregister();
1624 scmi_reset_unregister();
1625 scmi_sensors_unregister();
1626 scmi_voltage_unregister();
1627 scmi_system_unregister();
1628
1629 scmi_bus_exit();
1630
1631 platform_driver_unregister(&scmi_driver);
1632}
1633module_exit(scmi_driver_exit);
1634
1635MODULE_ALIAS("platform: arm-scmi");
1636MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
1637MODULE_DESCRIPTION("ARM SCMI protocol driver");
1638MODULE_LICENSE("GPL v2");
1639