1
2
3
4
5
6
7
8
9
10
11
12#ifndef _UFSHCD_H
13#define _UFSHCD_H
14
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/interrupt.h>
19#include <linux/io.h>
20#include <linux/delay.h>
21#include <linux/slab.h>
22#include <linux/spinlock.h>
23#include <linux/rwsem.h>
24#include <linux/workqueue.h>
25#include <linux/errno.h>
26#include <linux/types.h>
27#include <linux/wait.h>
28#include <linux/bitops.h>
29#include <linux/pm_runtime.h>
30#include <linux/clk.h>
31#include <linux/completion.h>
32#include <linux/regulator/consumer.h>
33#include <linux/bitfield.h>
34#include <linux/devfreq.h>
35#include <linux/keyslot-manager.h>
36#include "unipro.h"
37
38#include <asm/irq.h>
39#include <asm/byteorder.h>
40#include <scsi/scsi.h>
41#include <scsi/scsi_cmnd.h>
42#include <scsi/scsi_host.h>
43#include <scsi/scsi_tcq.h>
44#include <scsi/scsi_dbg.h>
45#include <scsi/scsi_eh.h>
46
47#include "ufs.h"
48#include "ufs_quirks.h"
49#include "ufshci.h"
50
51#define UFSHCD "ufshcd"
52#define UFSHCD_DRIVER_VERSION "0.2"
53
54struct ufs_hba;
55
56enum dev_cmd_type {
57 DEV_CMD_TYPE_NOP = 0x0,
58 DEV_CMD_TYPE_QUERY = 0x1,
59};
60
61enum ufs_event_type {
62
63 UFS_EVT_PA_ERR = 0,
64 UFS_EVT_DL_ERR,
65 UFS_EVT_NL_ERR,
66 UFS_EVT_TL_ERR,
67 UFS_EVT_DME_ERR,
68
69
70 UFS_EVT_AUTO_HIBERN8_ERR,
71 UFS_EVT_FATAL_ERR,
72 UFS_EVT_LINK_STARTUP_FAIL,
73 UFS_EVT_RESUME_ERR,
74 UFS_EVT_SUSPEND_ERR,
75 UFS_EVT_WL_SUSP_ERR,
76 UFS_EVT_WL_RES_ERR,
77
78
79 UFS_EVT_DEV_RESET,
80 UFS_EVT_HOST_RESET,
81 UFS_EVT_ABORT,
82
83 UFS_EVT_CNT,
84};
85
86
87
88
89
90
91
92
93
94
95struct uic_command {
96 u32 command;
97 u32 argument1;
98 u32 argument2;
99 u32 argument3;
100 int cmd_active;
101 struct completion done;
102};
103
104
105enum ufs_pm_op {
106 UFS_RUNTIME_PM,
107 UFS_SYSTEM_PM,
108 UFS_SHUTDOWN_PM,
109};
110
111
112enum uic_link_state {
113 UIC_LINK_OFF_STATE = 0,
114 UIC_LINK_ACTIVE_STATE = 1,
115 UIC_LINK_HIBERN8_STATE = 2,
116 UIC_LINK_BROKEN_STATE = 3,
117};
118
119#define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE)
120#define ufshcd_is_link_active(hba) ((hba)->uic_link_state == \
121 UIC_LINK_ACTIVE_STATE)
122#define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \
123 UIC_LINK_HIBERN8_STATE)
124#define ufshcd_is_link_broken(hba) ((hba)->uic_link_state == \
125 UIC_LINK_BROKEN_STATE)
126#define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE)
127#define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \
128 UIC_LINK_ACTIVE_STATE)
129#define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \
130 UIC_LINK_HIBERN8_STATE)
131#define ufshcd_set_link_broken(hba) ((hba)->uic_link_state = \
132 UIC_LINK_BROKEN_STATE)
133
134#define ufshcd_set_ufs_dev_active(h) \
135 ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
136#define ufshcd_set_ufs_dev_sleep(h) \
137 ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
138#define ufshcd_set_ufs_dev_poweroff(h) \
139 ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
140#define ufshcd_set_ufs_dev_deepsleep(h) \
141 ((h)->curr_dev_pwr_mode = UFS_DEEPSLEEP_PWR_MODE)
142#define ufshcd_is_ufs_dev_active(h) \
143 ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
144#define ufshcd_is_ufs_dev_sleep(h) \
145 ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
146#define ufshcd_is_ufs_dev_poweroff(h) \
147 ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
148#define ufshcd_is_ufs_dev_deepsleep(h) \
149 ((h)->curr_dev_pwr_mode == UFS_DEEPSLEEP_PWR_MODE)
150
151
152
153
154
155
156
157enum ufs_pm_level {
158 UFS_PM_LVL_0,
159 UFS_PM_LVL_1,
160 UFS_PM_LVL_2,
161 UFS_PM_LVL_3,
162 UFS_PM_LVL_4,
163 UFS_PM_LVL_5,
164 UFS_PM_LVL_6,
165 UFS_PM_LVL_MAX
166};
167
168struct ufs_pm_lvl_states {
169 enum ufs_dev_pwr_mode dev_state;
170 enum uic_link_state link_state;
171};
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197struct ufshcd_lrb {
198 struct utp_transfer_req_desc *utr_descriptor_ptr;
199 struct utp_upiu_req *ucd_req_ptr;
200 struct utp_upiu_rsp *ucd_rsp_ptr;
201 struct ufshcd_sg_entry *ucd_prdt_ptr;
202
203 dma_addr_t utrd_dma_addr;
204 dma_addr_t ucd_req_dma_addr;
205 dma_addr_t ucd_rsp_dma_addr;
206 dma_addr_t ucd_prdt_dma_addr;
207
208 struct scsi_cmnd *cmd;
209 u8 *sense_buffer;
210 unsigned int sense_bufflen;
211 int scsi_status;
212
213 int command_type;
214 int task_tag;
215 u8 lun;
216 bool intr_cmd;
217 ktime_t issue_time_stamp;
218 ktime_t compl_time_stamp;
219#ifdef CONFIG_SCSI_UFS_CRYPTO
220 int crypto_key_slot;
221 u64 data_unit_num;
222#endif
223
224 bool req_abort_skip;
225};
226
227
228
229
230
231
232
233struct ufs_query {
234 struct ufs_query_req request;
235 u8 *descriptor;
236 struct ufs_query_res response;
237};
238
239
240
241
242
243
244
245struct ufs_dev_cmd {
246 enum dev_cmd_type type;
247 struct mutex lock;
248 struct completion *complete;
249 struct ufs_query query;
250};
251
252
253
254
255
256
257
258
259
260
261
262
263
264struct ufs_clk_info {
265 struct list_head list;
266 struct clk *clk;
267 const char *name;
268 u32 max_freq;
269 u32 min_freq;
270 u32 curr_freq;
271 bool keep_link_active;
272 bool enabled;
273};
274
275enum ufs_notify_change_status {
276 PRE_CHANGE,
277 POST_CHANGE,
278};
279
280struct ufs_pa_layer_attr {
281 u32 gear_rx;
282 u32 gear_tx;
283 u32 lane_rx;
284 u32 lane_tx;
285 u32 pwr_rx;
286 u32 pwr_tx;
287 u32 hs_rate;
288};
289
290struct ufs_pwr_mode_info {
291 bool is_valid;
292 struct ufs_pa_layer_attr info;
293};
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324struct ufs_hba_variant_ops {
325 const char *name;
326 int (*init)(struct ufs_hba *);
327 void (*exit)(struct ufs_hba *);
328 u32 (*get_ufs_hci_version)(struct ufs_hba *);
329 int (*clk_scale_notify)(struct ufs_hba *, bool,
330 enum ufs_notify_change_status);
331 int (*setup_clocks)(struct ufs_hba *, bool,
332 enum ufs_notify_change_status);
333 int (*hce_enable_notify)(struct ufs_hba *,
334 enum ufs_notify_change_status);
335 int (*link_startup_notify)(struct ufs_hba *,
336 enum ufs_notify_change_status);
337 int (*pwr_change_notify)(struct ufs_hba *,
338 enum ufs_notify_change_status status,
339 struct ufs_pa_layer_attr *,
340 struct ufs_pa_layer_attr *);
341 void (*setup_xfer_req)(struct ufs_hba *, int, bool);
342 void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
343 void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
344 enum ufs_notify_change_status);
345 int (*apply_dev_quirks)(struct ufs_hba *hba);
346 void (*fixup_dev_quirks)(struct ufs_hba *hba);
347 int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
348 int (*resume)(struct ufs_hba *, enum ufs_pm_op);
349 void (*dbg_register_dump)(struct ufs_hba *hba);
350 int (*phy_initialization)(struct ufs_hba *);
351 int (*device_reset)(struct ufs_hba *hba);
352 void (*config_scaling_param)(struct ufs_hba *hba,
353 struct devfreq_dev_profile *profile,
354 void *data);
355 int (*program_key)(struct ufs_hba *hba,
356 const union ufs_crypto_cfg_entry *cfg, int slot);
357 void (*event_notify)(struct ufs_hba *hba,
358 enum ufs_event_type evt, void *data);
359};
360
361
362enum clk_gating_state {
363 CLKS_OFF,
364 CLKS_ON,
365 REQ_CLKS_OFF,
366 REQ_CLKS_ON,
367};
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386struct ufs_clk_gating {
387 struct delayed_work gate_work;
388 struct work_struct ungate_work;
389 enum clk_gating_state state;
390 unsigned long delay_ms;
391 bool is_suspended;
392 struct device_attribute delay_attr;
393 struct device_attribute enable_attr;
394 bool is_enabled;
395 bool is_initialized;
396 int active_reqs;
397 struct workqueue_struct *clk_gating_workq;
398};
399
400struct ufs_saved_pwr_info {
401 struct ufs_pa_layer_attr info;
402 bool is_valid;
403};
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428struct ufs_clk_scaling {
429 int active_reqs;
430 unsigned long tot_busy_t;
431 ktime_t window_start_t;
432 ktime_t busy_start_t;
433 struct device_attribute enable_attr;
434 struct ufs_saved_pwr_info saved_pwr_info;
435 struct workqueue_struct *workq;
436 struct work_struct suspend_work;
437 struct work_struct resume_work;
438 u32 min_gear;
439 bool is_enabled;
440 bool is_allowed;
441 bool is_initialized;
442 bool is_busy_started;
443 bool is_suspended;
444};
445
446#define UFS_EVENT_HIST_LENGTH 8
447
448
449
450
451
452
453
454struct ufs_event_hist {
455 int pos;
456 u32 val[UFS_EVENT_HIST_LENGTH];
457 ktime_t tstamp[UFS_EVENT_HIST_LENGTH];
458 unsigned long long cnt;
459};
460
461
462
463
464
465
466
467
468
469
470struct ufs_stats {
471 u32 last_intr_status;
472 ktime_t last_intr_ts;
473
474 u32 hibern8_exit_cnt;
475 ktime_t last_hibern8_exit_tstamp;
476 struct ufs_event_hist event[UFS_EVT_CNT];
477};
478
479enum ufshcd_quirks {
480
481 UFSHCD_QUIRK_BROKEN_INTR_AGGR = 1 << 0,
482
483
484
485
486
487 UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS = 1 << 1,
488
489
490
491
492
493
494
495
496 UFSHCD_QUIRK_BROKEN_LCC = 1 << 2,
497
498
499
500
501
502
503 UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP = 1 << 3,
504
505
506
507
508
509
510 UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE = 1 << 4,
511
512
513
514
515
516
517
518 UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION = 1 << 5,
519
520
521
522
523 UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR = 1 << 6,
524
525
526
527
528
529 UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR = 1 << 7,
530
531
532
533
534
535 UFSHCI_QUIRK_BROKEN_HCE = 1 << 8,
536
537
538
539
540
541 UFSHCD_QUIRK_PRDT_BYTE_GRAN = 1 << 9,
542
543
544
545
546
547 UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR = 1 << 10,
548
549
550
551
552
553 UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 = 1 << 11,
554
555
556
557
558 UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL = 1 << 12,
559
560
561
562
563
564 UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING = 1 << 13,
565
566
567
568
569 UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE = 1 << 14,
570};
571
572enum ufshcd_caps {
573
574 UFSHCD_CAP_CLK_GATING = 1 << 0,
575
576
577 UFSHCD_CAP_HIBERN8_WITH_CLK_GATING = 1 << 1,
578
579
580 UFSHCD_CAP_CLK_SCALING = 1 << 2,
581
582
583 UFSHCD_CAP_AUTO_BKOPS_SUSPEND = 1 << 3,
584
585
586
587
588
589
590 UFSHCD_CAP_INTR_AGGR = 1 << 4,
591
592
593
594
595
596
597
598
599 UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND = 1 << 5,
600
601
602
603
604
605
606 UFSHCD_CAP_RPM_AUTOSUSPEND = 1 << 6,
607
608
609
610
611
612
613 UFSHCD_CAP_WB_EN = 1 << 7,
614
615
616
617
618
619 UFSHCD_CAP_CRYPTO = 1 << 8,
620
621
622
623
624
625
626 UFSHCD_CAP_AGGR_POWER_COLLAPSE = 1 << 9,
627
628
629
630
631
632
633
634 UFSHCD_CAP_DEEPSLEEP = 1 << 10,
635};
636
637struct ufs_hba_variant_params {
638 struct devfreq_dev_profile devfreq_profile;
639 struct devfreq_simple_ondemand_data ondemand_data;
640 u16 hba_enable_delay_us;
641 u32 wb_flush_threshold;
642};
643
644struct ufs_hba_monitor {
645 unsigned long chunk_size;
646
647 unsigned long nr_sec_rw[2];
648 ktime_t total_busy[2];
649
650 unsigned long nr_req[2];
651
652 ktime_t lat_sum[2];
653 ktime_t lat_max[2];
654 ktime_t lat_min[2];
655
656 u32 nr_queued[2];
657 ktime_t busy_start_ts[2];
658
659 ktime_t enabled_ts;
660 bool enabled;
661};
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724struct ufs_hba {
725 void __iomem *mmio_base;
726
727
728 struct utp_transfer_cmd_desc *ucdl_base_addr;
729 struct utp_transfer_req_desc *utrdl_base_addr;
730 struct utp_task_req_desc *utmrdl_base_addr;
731
732
733 dma_addr_t ucdl_dma_addr;
734 dma_addr_t utrdl_dma_addr;
735 dma_addr_t utmrdl_dma_addr;
736
737 struct Scsi_Host *host;
738 struct device *dev;
739 struct request_queue *cmd_queue;
740
741
742
743
744 struct scsi_device *sdev_ufs_device;
745 struct scsi_device *sdev_rpmb;
746
747 enum ufs_dev_pwr_mode curr_dev_pwr_mode;
748 enum uic_link_state uic_link_state;
749
750 enum ufs_pm_level rpm_lvl;
751
752 enum ufs_pm_level spm_lvl;
753 struct device_attribute rpm_lvl_attr;
754 struct device_attribute spm_lvl_attr;
755 int pm_op_in_progress;
756
757
758 u32 ahit;
759
760 struct ufshcd_lrb *lrb;
761
762 unsigned long outstanding_tasks;
763 unsigned long outstanding_reqs;
764
765 u32 capabilities;
766 int nutrs;
767 int nutmrs;
768 u32 ufs_version;
769 const struct ufs_hba_variant_ops *vops;
770 struct ufs_hba_variant_params *vps;
771 void *priv;
772 unsigned int irq;
773 bool is_irq_enabled;
774 enum ufs_ref_clk_freq dev_ref_clk_freq;
775
776 unsigned int quirks;
777
778
779 unsigned int dev_quirks;
780
781 struct blk_mq_tag_set tmf_tag_set;
782 struct request_queue *tmf_queue;
783
784 struct uic_command *active_uic_cmd;
785 struct mutex uic_cmd_mutex;
786 struct completion *uic_async_done;
787
788 u32 ufshcd_state;
789 u32 eh_flags;
790 u32 intr_mask;
791 u16 ee_ctrl_mask;
792 u16 ee_drv_mask;
793 u16 ee_usr_mask;
794 struct mutex ee_ctrl_mutex;
795 bool is_powered;
796 bool shutting_down;
797 struct semaphore host_sem;
798
799
800 struct workqueue_struct *eh_wq;
801 struct work_struct eh_work;
802 struct work_struct eeh_work;
803
804
805 u32 errors;
806 u32 uic_error;
807 u32 saved_err;
808 u32 saved_uic_err;
809 struct ufs_stats ufs_stats;
810 bool force_reset;
811 bool force_pmc;
812 bool silence_err_logs;
813
814
815 struct ufs_dev_cmd dev_cmd;
816 ktime_t last_dme_cmd_tstamp;
817
818
819 struct ufs_dev_info dev_info;
820 bool auto_bkops_enabled;
821 struct ufs_vreg_info vreg_info;
822 struct list_head clk_list_head;
823
824 bool wlun_dev_clr_ua;
825 bool wlun_rpmb_clr_ua;
826
827
828 int req_abort_count;
829
830
831 u32 lanes_per_direction;
832 struct ufs_pa_layer_attr pwr_info;
833 struct ufs_pwr_mode_info max_pwr_info;
834
835 struct ufs_clk_gating clk_gating;
836
837 u32 caps;
838
839 struct devfreq *devfreq;
840 struct ufs_clk_scaling clk_scaling;
841 bool is_sys_suspended;
842
843 enum bkops_status urgent_bkops_lvl;
844 bool is_urgent_bkops_lvl_checked;
845
846 struct rw_semaphore clk_scaling_lock;
847 unsigned char desc_size[QUERY_DESC_IDN_MAX];
848 atomic_t scsi_block_reqs_cnt;
849
850 struct device bsg_dev;
851 struct request_queue *bsg_queue;
852 struct delayed_work rpm_dev_flush_recheck_work;
853
854 struct ufs_hba_monitor monitor;
855
856#ifdef CONFIG_SCSI_UFS_CRYPTO
857 union ufs_crypto_capabilities crypto_capabilities;
858 union ufs_crypto_cap_entry *crypto_cap_array;
859 u32 crypto_cfg_register;
860 struct blk_keyslot_manager ksm;
861#endif
862#ifdef CONFIG_DEBUG_FS
863 struct dentry *debugfs_root;
864 struct delayed_work debugfs_ee_work;
865 u32 debugfs_ee_rate_limit_ms;
866#endif
867 u32 luns_avail;
868 bool complete_put;
869 bool rpmb_complete_put;
870};
871
872
873static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
874{
875 return hba->caps & UFSHCD_CAP_CLK_GATING;
876}
877static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba)
878{
879 return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
880}
881static inline int ufshcd_is_clkscaling_supported(struct ufs_hba *hba)
882{
883 return hba->caps & UFSHCD_CAP_CLK_SCALING;
884}
885static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba)
886{
887 return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
888}
889static inline bool ufshcd_is_rpm_autosuspend_allowed(struct ufs_hba *hba)
890{
891 return hba->caps & UFSHCD_CAP_RPM_AUTOSUSPEND;
892}
893
894static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
895{
896
897#ifndef CONFIG_SCSI_UFS_DWC
898 if ((hba->caps & UFSHCD_CAP_INTR_AGGR) &&
899 !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR))
900 return true;
901 else
902 return false;
903#else
904return true;
905#endif
906}
907
908static inline bool ufshcd_can_aggressive_pc(struct ufs_hba *hba)
909{
910 return !!(ufshcd_is_link_hibern8(hba) &&
911 (hba->caps & UFSHCD_CAP_AGGR_POWER_COLLAPSE));
912}
913
914static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba)
915{
916 return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) &&
917 !(hba->quirks & UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8);
918}
919
920static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba)
921{
922 return FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, hba->ahit) ? true : false;
923}
924
925static inline bool ufshcd_is_wb_allowed(struct ufs_hba *hba)
926{
927 return hba->caps & UFSHCD_CAP_WB_EN;
928}
929
930static inline bool ufshcd_is_user_access_allowed(struct ufs_hba *hba)
931{
932 return !hba->shutting_down;
933}
934
935#define ufshcd_writel(hba, val, reg) \
936 writel((val), (hba)->mmio_base + (reg))
937#define ufshcd_readl(hba, reg) \
938 readl((hba)->mmio_base + (reg))
939
940
941
942
943
944
945
946
947static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
948{
949 u32 tmp;
950
951 tmp = ufshcd_readl(hba, reg);
952 tmp &= ~mask;
953 tmp |= (val & mask);
954 ufshcd_writel(hba, tmp, reg);
955}
956
957int ufshcd_alloc_host(struct device *, struct ufs_hba **);
958void ufshcd_dealloc_host(struct ufs_hba *);
959int ufshcd_hba_enable(struct ufs_hba *hba);
960int ufshcd_init(struct ufs_hba *, void __iomem *, unsigned int);
961int ufshcd_link_recovery(struct ufs_hba *hba);
962int ufshcd_make_hba_operational(struct ufs_hba *hba);
963void ufshcd_remove(struct ufs_hba *);
964int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
965void ufshcd_delay_us(unsigned long us, unsigned long tolerance);
966int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
967 u32 val, unsigned long interval_us,
968 unsigned long timeout_ms);
969void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
970void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val);
971void ufshcd_hba_stop(struct ufs_hba *hba);
972
973static inline void check_upiu_size(void)
974{
975 BUILD_BUG_ON(ALIGNED_UPIU_SIZE <
976 GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE);
977}
978
979
980
981
982
983
984static inline void ufshcd_set_variant(struct ufs_hba *hba, void *variant)
985{
986 BUG_ON(!hba);
987 hba->priv = variant;
988}
989
990
991
992
993
994static inline void *ufshcd_get_variant(struct ufs_hba *hba)
995{
996 BUG_ON(!hba);
997 return hba->priv;
998}
999static inline bool ufshcd_keep_autobkops_enabled_except_suspend(
1000 struct ufs_hba *hba)
1001{
1002 return hba->caps & UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND;
1003}
1004
1005static inline u8 ufshcd_wb_get_query_index(struct ufs_hba *hba)
1006{
1007 if (hba->dev_info.wb_buffer_type == WB_BUF_MODE_LU_DEDICATED)
1008 return hba->dev_info.wb_dedicated_lu;
1009 return 0;
1010}
1011
1012extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
1013extern int ufshcd_runtime_resume(struct ufs_hba *hba);
1014extern int ufshcd_runtime_idle(struct ufs_hba *hba);
1015extern int ufshcd_system_suspend(struct ufs_hba *hba);
1016extern int ufshcd_system_resume(struct ufs_hba *hba);
1017extern int ufshcd_shutdown(struct ufs_hba *hba);
1018extern int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
1019 int agreed_gear,
1020 int adapt_val);
1021extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
1022 u8 attr_set, u32 mib_val, u8 peer);
1023extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
1024 u32 *mib_val, u8 peer);
1025extern int ufshcd_config_pwr_mode(struct ufs_hba *hba,
1026 struct ufs_pa_layer_attr *desired_pwr_mode);
1027
1028
1029#define DME_LOCAL 0
1030#define DME_PEER 1
1031#define ATTR_SET_NOR 0
1032#define ATTR_SET_ST 1
1033
1034static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel,
1035 u32 mib_val)
1036{
1037 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
1038 mib_val, DME_LOCAL);
1039}
1040
1041static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel,
1042 u32 mib_val)
1043{
1044 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
1045 mib_val, DME_LOCAL);
1046}
1047
1048static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel,
1049 u32 mib_val)
1050{
1051 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
1052 mib_val, DME_PEER);
1053}
1054
1055static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel,
1056 u32 mib_val)
1057{
1058 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
1059 mib_val, DME_PEER);
1060}
1061
1062static inline int ufshcd_dme_get(struct ufs_hba *hba,
1063 u32 attr_sel, u32 *mib_val)
1064{
1065 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL);
1066}
1067
1068static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
1069 u32 attr_sel, u32 *mib_val)
1070{
1071 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
1072}
1073
1074static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
1075{
1076 return (pwr_info->pwr_rx == FAST_MODE ||
1077 pwr_info->pwr_rx == FASTAUTO_MODE) &&
1078 (pwr_info->pwr_tx == FAST_MODE ||
1079 pwr_info->pwr_tx == FASTAUTO_MODE);
1080}
1081
1082static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba)
1083{
1084 return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
1085}
1086
1087
1088int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
1089 enum query_opcode opcode,
1090 enum desc_idn idn, u8 index,
1091 u8 selector,
1092 u8 *desc_buf, int *buf_len);
1093int ufshcd_read_desc_param(struct ufs_hba *hba,
1094 enum desc_idn desc_id,
1095 int desc_index,
1096 u8 param_offset,
1097 u8 *param_read_buf,
1098 u8 param_size);
1099int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
1100 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val);
1101int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
1102 enum flag_idn idn, u8 index, bool *flag_res);
1103
1104void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
1105void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
1106void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups);
1107#define SD_ASCII_STD true
1108#define SD_RAW false
1109int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
1110 u8 **buf, bool ascii);
1111
1112int ufshcd_hold(struct ufs_hba *hba, bool async);
1113void ufshcd_release(struct ufs_hba *hba);
1114
1115void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
1116 int *desc_length);
1117
1118u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
1119
1120int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
1121
1122int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
1123 struct utp_upiu_req *req_upiu,
1124 struct utp_upiu_req *rsp_upiu,
1125 int msgcode,
1126 u8 *desc_buff, int *buff_len,
1127 enum query_opcode desc_op);
1128
1129int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable);
1130int ufshcd_suspend_prepare(struct device *dev);
1131void ufshcd_resume_complete(struct device *dev);
1132
1133
1134static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
1135{
1136 if (hba->vops)
1137 return hba->vops->name;
1138 return "";
1139}
1140
1141static inline int ufshcd_vops_init(struct ufs_hba *hba)
1142{
1143 if (hba->vops && hba->vops->init)
1144 return hba->vops->init(hba);
1145
1146 return 0;
1147}
1148
1149static inline void ufshcd_vops_exit(struct ufs_hba *hba)
1150{
1151 if (hba->vops && hba->vops->exit)
1152 return hba->vops->exit(hba);
1153}
1154
1155static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba)
1156{
1157 if (hba->vops && hba->vops->get_ufs_hci_version)
1158 return hba->vops->get_ufs_hci_version(hba);
1159
1160 return ufshcd_readl(hba, REG_UFS_VERSION);
1161}
1162
1163static inline bool ufshcd_has_utrlcnr(struct ufs_hba *hba)
1164{
1165 return (hba->ufs_version >= ufshci_version(3, 0));
1166}
1167
1168static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
1169 bool up, enum ufs_notify_change_status status)
1170{
1171 if (hba->vops && hba->vops->clk_scale_notify)
1172 return hba->vops->clk_scale_notify(hba, up, status);
1173 return 0;
1174}
1175
1176static inline void ufshcd_vops_event_notify(struct ufs_hba *hba,
1177 enum ufs_event_type evt,
1178 void *data)
1179{
1180 if (hba->vops && hba->vops->event_notify)
1181 hba->vops->event_notify(hba, evt, data);
1182}
1183
1184static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on,
1185 enum ufs_notify_change_status status)
1186{
1187 if (hba->vops && hba->vops->setup_clocks)
1188 return hba->vops->setup_clocks(hba, on, status);
1189 return 0;
1190}
1191
1192static inline int ufshcd_vops_hce_enable_notify(struct ufs_hba *hba,
1193 bool status)
1194{
1195 if (hba->vops && hba->vops->hce_enable_notify)
1196 return hba->vops->hce_enable_notify(hba, status);
1197
1198 return 0;
1199}
1200static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba,
1201 bool status)
1202{
1203 if (hba->vops && hba->vops->link_startup_notify)
1204 return hba->vops->link_startup_notify(hba, status);
1205
1206 return 0;
1207}
1208
1209static inline int ufshcd_vops_phy_initialization(struct ufs_hba *hba)
1210{
1211 if (hba->vops && hba->vops->phy_initialization)
1212 return hba->vops->phy_initialization(hba);
1213
1214 return 0;
1215}
1216
1217static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
1218 enum ufs_notify_change_status status,
1219 struct ufs_pa_layer_attr *dev_max_params,
1220 struct ufs_pa_layer_attr *dev_req_params)
1221{
1222 if (hba->vops && hba->vops->pwr_change_notify)
1223 return hba->vops->pwr_change_notify(hba, status,
1224 dev_max_params, dev_req_params);
1225
1226 return -ENOTSUPP;
1227}
1228
1229static inline void ufshcd_vops_setup_xfer_req(struct ufs_hba *hba, int tag,
1230 bool is_scsi_cmd)
1231{
1232 if (hba->vops && hba->vops->setup_xfer_req) {
1233 unsigned long flags;
1234
1235 spin_lock_irqsave(hba->host->host_lock, flags);
1236 hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd);
1237 spin_unlock_irqrestore(hba->host->host_lock, flags);
1238 }
1239}
1240
1241static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba,
1242 int tag, u8 tm_function)
1243{
1244 if (hba->vops && hba->vops->setup_task_mgmt)
1245 return hba->vops->setup_task_mgmt(hba, tag, tm_function);
1246}
1247
1248static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba,
1249 enum uic_cmd_dme cmd,
1250 enum ufs_notify_change_status status)
1251{
1252 if (hba->vops && hba->vops->hibern8_notify)
1253 return hba->vops->hibern8_notify(hba, cmd, status);
1254}
1255
1256static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba)
1257{
1258 if (hba->vops && hba->vops->apply_dev_quirks)
1259 return hba->vops->apply_dev_quirks(hba);
1260 return 0;
1261}
1262
1263static inline void ufshcd_vops_fixup_dev_quirks(struct ufs_hba *hba)
1264{
1265 if (hba->vops && hba->vops->fixup_dev_quirks)
1266 hba->vops->fixup_dev_quirks(hba);
1267}
1268
1269static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
1270{
1271 if (hba->vops && hba->vops->suspend)
1272 return hba->vops->suspend(hba, op);
1273
1274 return 0;
1275}
1276
1277static inline int ufshcd_vops_resume(struct ufs_hba *hba, enum ufs_pm_op op)
1278{
1279 if (hba->vops && hba->vops->resume)
1280 return hba->vops->resume(hba, op);
1281
1282 return 0;
1283}
1284
1285static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
1286{
1287 if (hba->vops && hba->vops->dbg_register_dump)
1288 hba->vops->dbg_register_dump(hba);
1289}
1290
1291static inline int ufshcd_vops_device_reset(struct ufs_hba *hba)
1292{
1293 if (hba->vops && hba->vops->device_reset)
1294 return hba->vops->device_reset(hba);
1295
1296 return -EOPNOTSUPP;
1297}
1298
1299static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba,
1300 struct devfreq_dev_profile
1301 *profile, void *data)
1302{
1303 if (hba->vops && hba->vops->config_scaling_param)
1304 hba->vops->config_scaling_param(hba, profile, data);
1305}
1306
1307extern struct ufs_pm_lvl_states ufs_pm_lvl_states[];
1308
1309
1310
1311
1312
1313
1314
1315static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
1316{
1317 if (scsi_is_wlun(scsi_lun))
1318 return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
1319 | UFS_UPIU_WLUN_ID;
1320 else
1321 return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
1322}
1323
1324int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
1325 const char *prefix);
1326
1327int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask);
1328int ufshcd_write_ee_control(struct ufs_hba *hba);
1329int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, u16 *other_mask,
1330 u16 set, u16 clr);
1331
1332static inline int ufshcd_update_ee_drv_mask(struct ufs_hba *hba,
1333 u16 set, u16 clr)
1334{
1335 return ufshcd_update_ee_control(hba, &hba->ee_drv_mask,
1336 &hba->ee_usr_mask, set, clr);
1337}
1338
1339static inline int ufshcd_update_ee_usr_mask(struct ufs_hba *hba,
1340 u16 set, u16 clr)
1341{
1342 return ufshcd_update_ee_control(hba, &hba->ee_usr_mask,
1343 &hba->ee_drv_mask, set, clr);
1344}
1345
1346static inline int ufshcd_rpm_get_sync(struct ufs_hba *hba)
1347{
1348 return pm_runtime_get_sync(&hba->sdev_ufs_device->sdev_gendev);
1349}
1350
1351static inline int ufshcd_rpm_put_sync(struct ufs_hba *hba)
1352{
1353 return pm_runtime_put_sync(&hba->sdev_ufs_device->sdev_gendev);
1354}
1355
1356static inline int ufshcd_rpm_put(struct ufs_hba *hba)
1357{
1358 return pm_runtime_put(&hba->sdev_ufs_device->sdev_gendev);
1359}
1360
1361static inline int ufshcd_rpmb_rpm_get_sync(struct ufs_hba *hba)
1362{
1363 return pm_runtime_get_sync(&hba->sdev_rpmb->sdev_gendev);
1364}
1365
1366static inline int ufshcd_rpmb_rpm_put(struct ufs_hba *hba)
1367{
1368 return pm_runtime_put(&hba->sdev_rpmb->sdev_gendev);
1369}
1370
1371#endif
1372