linux/drivers/scsi/ufs/ufshcd.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Universal Flash Storage Host controller driver Core
   4 * Copyright (C) 2011-2013 Samsung India Software Operations
   5 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
   6 *
   7 * Authors:
   8 *      Santosh Yaraganavi <santosh.sy@samsung.com>
   9 *      Vinayak Holikatti <h.vinayak@samsung.com>
  10 */
  11
  12#include <linux/async.h>
  13#include <linux/devfreq.h>
  14#include <linux/nls.h>
  15#include <linux/of.h>
  16#include <linux/bitfield.h>
  17#include <linux/blk-pm.h>
  18#include <linux/blkdev.h>
  19#include <scsi/scsi_driver.h>
  20#include "ufshcd.h"
  21#include "ufs_quirks.h"
  22#include "unipro.h"
  23#include "ufs-sysfs.h"
  24#include "ufs-debugfs.h"
  25#include "ufs_bsg.h"
  26#include "ufshcd-crypto.h"
  27#include <asm/unaligned.h>
  28#include "../sd.h"
  29
  30#define CREATE_TRACE_POINTS
  31#include <trace/events/ufs.h>
  32
  33#define UFSHCD_ENABLE_INTRS     (UTP_TRANSFER_REQ_COMPL |\
  34                                 UTP_TASK_REQ_COMPL |\
  35                                 UFSHCD_ERROR_MASK)
  36/* UIC command timeout, unit: ms */
  37#define UIC_CMD_TIMEOUT 500
  38
  39/* NOP OUT retries waiting for NOP IN response */
  40#define NOP_OUT_RETRIES    10
  41/* Timeout after 50 msecs if NOP OUT hangs without response */
  42#define NOP_OUT_TIMEOUT    50 /* msecs */
  43
  44/* Query request retries */
  45#define QUERY_REQ_RETRIES 3
  46/* Query request timeout */
  47#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
  48
  49/* Task management command timeout */
  50#define TM_CMD_TIMEOUT  100 /* msecs */
  51
  52/* maximum number of retries for a general UIC command  */
  53#define UFS_UIC_COMMAND_RETRIES 3
  54
  55/* maximum number of link-startup retries */
  56#define DME_LINKSTARTUP_RETRIES 3
  57
  58/* Maximum retries for Hibern8 enter */
  59#define UIC_HIBERN8_ENTER_RETRIES 3
  60
  61/* maximum number of reset retries before giving up */
  62#define MAX_HOST_RESET_RETRIES 5
  63
  64/* Expose the flag value from utp_upiu_query.value */
  65#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
  66
  67/* Interrupt aggregation default timeout, unit: 40us */
  68#define INT_AGGR_DEF_TO 0x02
  69
  70/* default delay of autosuspend: 2000 ms */
  71#define RPM_AUTOSUSPEND_DELAY_MS 2000
  72
  73/* Default delay of RPM device flush delayed work */
  74#define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
  75
  76/* Default value of wait time before gating device ref clock */
  77#define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
  78
  79/* Polling time to wait for fDeviceInit */
  80#define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
  81
  82#define wlun_dev_to_hba(dv) shost_priv(to_scsi_device(dv)->host)
  83
  84#define ufshcd_toggle_vreg(_dev, _vreg, _on)                            \
  85        ({                                                              \
  86                int _ret;                                               \
  87                if (_on)                                                \
  88                        _ret = ufshcd_enable_vreg(_dev, _vreg);         \
  89                else                                                    \
  90                        _ret = ufshcd_disable_vreg(_dev, _vreg);        \
  91                _ret;                                                   \
  92        })
  93
  94#define ufshcd_hex_dump(prefix_str, buf, len) do {                       \
  95        size_t __len = (len);                                            \
  96        print_hex_dump(KERN_ERR, prefix_str,                             \
  97                       __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
  98                       16, 4, buf, __len, false);                        \
  99} while (0)
 100
 101int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
 102                     const char *prefix)
 103{
 104        u32 *regs;
 105        size_t pos;
 106
 107        if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
 108                return -EINVAL;
 109
 110        regs = kzalloc(len, GFP_ATOMIC);
 111        if (!regs)
 112                return -ENOMEM;
 113
 114        for (pos = 0; pos < len; pos += 4)
 115                regs[pos / 4] = ufshcd_readl(hba, offset + pos);
 116
 117        ufshcd_hex_dump(prefix, regs, len);
 118        kfree(regs);
 119
 120        return 0;
 121}
 122EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
 123
 124enum {
 125        UFSHCD_MAX_CHANNEL      = 0,
 126        UFSHCD_MAX_ID           = 1,
 127        UFSHCD_CMD_PER_LUN      = 32,
 128        UFSHCD_CAN_QUEUE        = 32,
 129};
 130
 131/* UFSHCD states */
 132enum {
 133        UFSHCD_STATE_RESET,
 134        UFSHCD_STATE_ERROR,
 135        UFSHCD_STATE_OPERATIONAL,
 136        UFSHCD_STATE_EH_SCHEDULED_FATAL,
 137        UFSHCD_STATE_EH_SCHEDULED_NON_FATAL,
 138};
 139
 140/* UFSHCD error handling flags */
 141enum {
 142        UFSHCD_EH_IN_PROGRESS = (1 << 0),
 143};
 144
 145/* UFSHCD UIC layer error flags */
 146enum {
 147        UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
 148        UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
 149        UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
 150        UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
 151        UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
 152        UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
 153        UFSHCD_UIC_PA_GENERIC_ERROR = (1 << 6), /* Generic PA error */
 154};
 155
 156#define ufshcd_set_eh_in_progress(h) \
 157        ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
 158#define ufshcd_eh_in_progress(h) \
 159        ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
 160#define ufshcd_clear_eh_in_progress(h) \
 161        ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
 162
 163struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
 164        [UFS_PM_LVL_0] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
 165        [UFS_PM_LVL_1] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
 166        [UFS_PM_LVL_2] = {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
 167        [UFS_PM_LVL_3] = {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
 168        [UFS_PM_LVL_4] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
 169        [UFS_PM_LVL_5] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
 170        /*
 171         * For DeepSleep, the link is first put in hibern8 and then off.
 172         * Leaving the link in hibern8 is not supported.
 173         */
 174        [UFS_PM_LVL_6] = {UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE},
 175};
 176
 177static inline enum ufs_dev_pwr_mode
 178ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
 179{
 180        return ufs_pm_lvl_states[lvl].dev_state;
 181}
 182
 183static inline enum uic_link_state
 184ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
 185{
 186        return ufs_pm_lvl_states[lvl].link_state;
 187}
 188
 189static inline enum ufs_pm_level
 190ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
 191                                        enum uic_link_state link_state)
 192{
 193        enum ufs_pm_level lvl;
 194
 195        for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
 196                if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
 197                        (ufs_pm_lvl_states[lvl].link_state == link_state))
 198                        return lvl;
 199        }
 200
 201        /* if no match found, return the level 0 */
 202        return UFS_PM_LVL_0;
 203}
 204
 205static struct ufs_dev_fix ufs_fixups[] = {
 206        /* UFS cards deviations table */
 207        UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
 208                UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
 209        UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
 210                UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
 211                UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
 212                UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
 213        UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
 214                UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
 215        UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/,
 216                UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME),
 217        UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
 218                UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
 219        UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
 220                UFS_DEVICE_QUIRK_PA_TACTIVATE),
 221        UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
 222                UFS_DEVICE_QUIRK_PA_TACTIVATE),
 223        END_FIX
 224};
 225
 226static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
 227static void ufshcd_async_scan(void *data, async_cookie_t cookie);
 228static int ufshcd_reset_and_restore(struct ufs_hba *hba);
 229static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
 230static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
 231static void ufshcd_hba_exit(struct ufs_hba *hba);
 232static int ufshcd_clear_ua_wluns(struct ufs_hba *hba);
 233static int ufshcd_probe_hba(struct ufs_hba *hba, bool async);
 234static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
 235static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
 236static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
 237static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
 238static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
 239static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
 240static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
 241static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
 242static irqreturn_t ufshcd_intr(int irq, void *__hba);
 243static int ufshcd_change_power_mode(struct ufs_hba *hba,
 244                             struct ufs_pa_layer_attr *pwr_mode);
 245static void ufshcd_schedule_eh_work(struct ufs_hba *hba);
 246static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
 247static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
 248static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
 249                                         struct ufs_vreg *vreg);
 250static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
 251static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
 252static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
 253static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
 254static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
 255
 256static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
 257{
 258        return tag >= 0 && tag < hba->nutrs;
 259}
 260
 261static inline void ufshcd_enable_irq(struct ufs_hba *hba)
 262{
 263        if (!hba->is_irq_enabled) {
 264                enable_irq(hba->irq);
 265                hba->is_irq_enabled = true;
 266        }
 267}
 268
 269static inline void ufshcd_disable_irq(struct ufs_hba *hba)
 270{
 271        if (hba->is_irq_enabled) {
 272                disable_irq(hba->irq);
 273                hba->is_irq_enabled = false;
 274        }
 275}
 276
 277static inline void ufshcd_wb_config(struct ufs_hba *hba)
 278{
 279        if (!ufshcd_is_wb_allowed(hba))
 280                return;
 281
 282        ufshcd_wb_toggle(hba, true);
 283
 284        ufshcd_wb_toggle_flush_during_h8(hba, true);
 285        if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL))
 286                ufshcd_wb_toggle_flush(hba, true);
 287}
 288
 289static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
 290{
 291        if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
 292                scsi_unblock_requests(hba->host);
 293}
 294
 295static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
 296{
 297        if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
 298                scsi_block_requests(hba->host);
 299}
 300
 301static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
 302                                      enum ufs_trace_str_t str_t)
 303{
 304        struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
 305        struct utp_upiu_header *header;
 306
 307        if (!trace_ufshcd_upiu_enabled())
 308                return;
 309
 310        if (str_t == UFS_CMD_SEND)
 311                header = &rq->header;
 312        else
 313                header = &hba->lrb[tag].ucd_rsp_ptr->header;
 314
 315        trace_ufshcd_upiu(dev_name(hba->dev), str_t, header, &rq->sc.cdb,
 316                          UFS_TSF_CDB);
 317}
 318
 319static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba,
 320                                        enum ufs_trace_str_t str_t,
 321                                        struct utp_upiu_req *rq_rsp)
 322{
 323        if (!trace_ufshcd_upiu_enabled())
 324                return;
 325
 326        trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header,
 327                          &rq_rsp->qr, UFS_TSF_OSF);
 328}
 329
 330static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
 331                                     enum ufs_trace_str_t str_t)
 332{
 333        int off = (int)tag - hba->nutrs;
 334        struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off];
 335
 336        if (!trace_ufshcd_upiu_enabled())
 337                return;
 338
 339        if (str_t == UFS_TM_SEND)
 340                trace_ufshcd_upiu(dev_name(hba->dev), str_t,
 341                                  &descp->upiu_req.req_header,
 342                                  &descp->upiu_req.input_param1,
 343                                  UFS_TSF_TM_INPUT);
 344        else
 345                trace_ufshcd_upiu(dev_name(hba->dev), str_t,
 346                                  &descp->upiu_rsp.rsp_header,
 347                                  &descp->upiu_rsp.output_param1,
 348                                  UFS_TSF_TM_OUTPUT);
 349}
 350
 351static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
 352                                         struct uic_command *ucmd,
 353                                         enum ufs_trace_str_t str_t)
 354{
 355        u32 cmd;
 356
 357        if (!trace_ufshcd_uic_command_enabled())
 358                return;
 359
 360        if (str_t == UFS_CMD_SEND)
 361                cmd = ucmd->command;
 362        else
 363                cmd = ufshcd_readl(hba, REG_UIC_COMMAND);
 364
 365        trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd,
 366                                 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1),
 367                                 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2),
 368                                 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
 369}
 370
 371static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
 372                                     enum ufs_trace_str_t str_t)
 373{
 374        u64 lba = -1;
 375        u8 opcode = 0, group_id = 0;
 376        u32 intr, doorbell;
 377        struct ufshcd_lrb *lrbp = &hba->lrb[tag];
 378        struct scsi_cmnd *cmd = lrbp->cmd;
 379        int transfer_len = -1;
 380
 381        if (!cmd)
 382                return;
 383
 384        if (!trace_ufshcd_command_enabled()) {
 385                /* trace UPIU W/O tracing command */
 386                ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
 387                return;
 388        }
 389
 390        /* trace UPIU also */
 391        ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
 392        opcode = cmd->cmnd[0];
 393        lba = sectors_to_logical(cmd->device, blk_rq_pos(cmd->request));
 394
 395        if (opcode == READ_10 || opcode == WRITE_10) {
 396                /*
 397                 * Currently we only fully trace read(10) and write(10) commands
 398                 */
 399                transfer_len =
 400                       be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
 401                if (opcode == WRITE_10)
 402                        group_id = lrbp->cmd->cmnd[6];
 403        } else if (opcode == UNMAP) {
 404                /*
 405                 * The number of Bytes to be unmapped beginning with the lba.
 406                 */
 407                transfer_len = blk_rq_bytes(cmd->request);
 408        }
 409
 410        intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
 411        doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 412        trace_ufshcd_command(dev_name(hba->dev), str_t, tag,
 413                        doorbell, transfer_len, intr, lba, opcode, group_id);
 414}
 415
 416static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
 417{
 418        struct ufs_clk_info *clki;
 419        struct list_head *head = &hba->clk_list_head;
 420
 421        if (list_empty(head))
 422                return;
 423
 424        list_for_each_entry(clki, head, list) {
 425                if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
 426                                clki->max_freq)
 427                        dev_err(hba->dev, "clk: %s, rate: %u\n",
 428                                        clki->name, clki->curr_freq);
 429        }
 430}
 431
 432static void ufshcd_print_evt(struct ufs_hba *hba, u32 id,
 433                             char *err_name)
 434{
 435        int i;
 436        bool found = false;
 437        struct ufs_event_hist *e;
 438
 439        if (id >= UFS_EVT_CNT)
 440                return;
 441
 442        e = &hba->ufs_stats.event[id];
 443
 444        for (i = 0; i < UFS_EVENT_HIST_LENGTH; i++) {
 445                int p = (i + e->pos) % UFS_EVENT_HIST_LENGTH;
 446
 447                if (e->tstamp[p] == 0)
 448                        continue;
 449                dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
 450                        e->val[p], ktime_to_us(e->tstamp[p]));
 451                found = true;
 452        }
 453
 454        if (!found)
 455                dev_err(hba->dev, "No record of %s\n", err_name);
 456        else
 457                dev_err(hba->dev, "%s: total cnt=%llu\n", err_name, e->cnt);
 458}
 459
 460static void ufshcd_print_evt_hist(struct ufs_hba *hba)
 461{
 462        ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
 463
 464        ufshcd_print_evt(hba, UFS_EVT_PA_ERR, "pa_err");
 465        ufshcd_print_evt(hba, UFS_EVT_DL_ERR, "dl_err");
 466        ufshcd_print_evt(hba, UFS_EVT_NL_ERR, "nl_err");
 467        ufshcd_print_evt(hba, UFS_EVT_TL_ERR, "tl_err");
 468        ufshcd_print_evt(hba, UFS_EVT_DME_ERR, "dme_err");
 469        ufshcd_print_evt(hba, UFS_EVT_AUTO_HIBERN8_ERR,
 470                         "auto_hibern8_err");
 471        ufshcd_print_evt(hba, UFS_EVT_FATAL_ERR, "fatal_err");
 472        ufshcd_print_evt(hba, UFS_EVT_LINK_STARTUP_FAIL,
 473                         "link_startup_fail");
 474        ufshcd_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail");
 475        ufshcd_print_evt(hba, UFS_EVT_SUSPEND_ERR,
 476                         "suspend_fail");
 477        ufshcd_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset");
 478        ufshcd_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset");
 479        ufshcd_print_evt(hba, UFS_EVT_ABORT, "task_abort");
 480
 481        ufshcd_vops_dbg_register_dump(hba);
 482}
 483
 484static
 485void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
 486{
 487        struct ufshcd_lrb *lrbp;
 488        int prdt_length;
 489        int tag;
 490
 491        for_each_set_bit(tag, &bitmap, hba->nutrs) {
 492                lrbp = &hba->lrb[tag];
 493
 494                dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
 495                                tag, ktime_to_us(lrbp->issue_time_stamp));
 496                dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
 497                                tag, ktime_to_us(lrbp->compl_time_stamp));
 498                dev_err(hba->dev,
 499                        "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
 500                        tag, (u64)lrbp->utrd_dma_addr);
 501
 502                ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
 503                                sizeof(struct utp_transfer_req_desc));
 504                dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
 505                        (u64)lrbp->ucd_req_dma_addr);
 506                ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
 507                                sizeof(struct utp_upiu_req));
 508                dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
 509                        (u64)lrbp->ucd_rsp_dma_addr);
 510                ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
 511                                sizeof(struct utp_upiu_rsp));
 512
 513                prdt_length = le16_to_cpu(
 514                        lrbp->utr_descriptor_ptr->prd_table_length);
 515                if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
 516                        prdt_length /= sizeof(struct ufshcd_sg_entry);
 517
 518                dev_err(hba->dev,
 519                        "UPIU[%d] - PRDT - %d entries  phys@0x%llx\n",
 520                        tag, prdt_length,
 521                        (u64)lrbp->ucd_prdt_dma_addr);
 522
 523                if (pr_prdt)
 524                        ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
 525                                sizeof(struct ufshcd_sg_entry) * prdt_length);
 526        }
 527}
 528
 529static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
 530{
 531        int tag;
 532
 533        for_each_set_bit(tag, &bitmap, hba->nutmrs) {
 534                struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
 535
 536                dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
 537                ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
 538        }
 539}
 540
 541static void ufshcd_print_host_state(struct ufs_hba *hba)
 542{
 543        struct scsi_device *sdev_ufs = hba->sdev_ufs_device;
 544
 545        dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
 546        dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
 547                hba->outstanding_reqs, hba->outstanding_tasks);
 548        dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
 549                hba->saved_err, hba->saved_uic_err);
 550        dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
 551                hba->curr_dev_pwr_mode, hba->uic_link_state);
 552        dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
 553                hba->pm_op_in_progress, hba->is_sys_suspended);
 554        dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
 555                hba->auto_bkops_enabled, hba->host->host_self_blocked);
 556        dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
 557        dev_err(hba->dev,
 558                "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
 559                ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
 560                hba->ufs_stats.hibern8_exit_cnt);
 561        dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
 562                ktime_to_us(hba->ufs_stats.last_intr_ts),
 563                hba->ufs_stats.last_intr_status);
 564        dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
 565                hba->eh_flags, hba->req_abort_count);
 566        dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
 567                hba->ufs_version, hba->capabilities, hba->caps);
 568        dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
 569                hba->dev_quirks);
 570        if (sdev_ufs)
 571                dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n",
 572                        sdev_ufs->vendor, sdev_ufs->model, sdev_ufs->rev);
 573
 574        ufshcd_print_clk_freqs(hba);
 575}
 576
 577/**
 578 * ufshcd_print_pwr_info - print power params as saved in hba
 579 * power info
 580 * @hba: per-adapter instance
 581 */
 582static void ufshcd_print_pwr_info(struct ufs_hba *hba)
 583{
 584        static const char * const names[] = {
 585                "INVALID MODE",
 586                "FAST MODE",
 587                "SLOW_MODE",
 588                "INVALID MODE",
 589                "FASTAUTO_MODE",
 590                "SLOWAUTO_MODE",
 591                "INVALID MODE",
 592        };
 593
 594        dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
 595                 __func__,
 596                 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
 597                 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
 598                 names[hba->pwr_info.pwr_rx],
 599                 names[hba->pwr_info.pwr_tx],
 600                 hba->pwr_info.hs_rate);
 601}
 602
 603static void ufshcd_device_reset(struct ufs_hba *hba)
 604{
 605        int err;
 606
 607        err = ufshcd_vops_device_reset(hba);
 608
 609        if (!err) {
 610                ufshcd_set_ufs_dev_active(hba);
 611                if (ufshcd_is_wb_allowed(hba)) {
 612                        hba->dev_info.wb_enabled = false;
 613                        hba->dev_info.wb_buf_flush_enabled = false;
 614                }
 615        }
 616        if (err != -EOPNOTSUPP)
 617                ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
 618}
 619
 620void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
 621{
 622        if (!us)
 623                return;
 624
 625        if (us < 10)
 626                udelay(us);
 627        else
 628                usleep_range(us, us + tolerance);
 629}
 630EXPORT_SYMBOL_GPL(ufshcd_delay_us);
 631
 632/**
 633 * ufshcd_wait_for_register - wait for register value to change
 634 * @hba: per-adapter interface
 635 * @reg: mmio register offset
 636 * @mask: mask to apply to the read register value
 637 * @val: value to wait for
 638 * @interval_us: polling interval in microseconds
 639 * @timeout_ms: timeout in milliseconds
 640 *
 641 * Return:
 642 * -ETIMEDOUT on error, zero on success.
 643 */
 644int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
 645                                u32 val, unsigned long interval_us,
 646                                unsigned long timeout_ms)
 647{
 648        int err = 0;
 649        unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
 650
 651        /* ignore bits that we don't intend to wait on */
 652        val = val & mask;
 653
 654        while ((ufshcd_readl(hba, reg) & mask) != val) {
 655                usleep_range(interval_us, interval_us + 50);
 656                if (time_after(jiffies, timeout)) {
 657                        if ((ufshcd_readl(hba, reg) & mask) != val)
 658                                err = -ETIMEDOUT;
 659                        break;
 660                }
 661        }
 662
 663        return err;
 664}
 665
 666/**
 667 * ufshcd_get_intr_mask - Get the interrupt bit mask
 668 * @hba: Pointer to adapter instance
 669 *
 670 * Returns interrupt bit mask per version
 671 */
 672static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
 673{
 674        if (hba->ufs_version == ufshci_version(1, 0))
 675                return INTERRUPT_MASK_ALL_VER_10;
 676        if (hba->ufs_version <= ufshci_version(2, 0))
 677                return INTERRUPT_MASK_ALL_VER_11;
 678
 679        return INTERRUPT_MASK_ALL_VER_21;
 680}
 681
 682/**
 683 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
 684 * @hba: Pointer to adapter instance
 685 *
 686 * Returns UFSHCI version supported by the controller
 687 */
 688static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
 689{
 690        u32 ufshci_ver;
 691
 692        if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
 693                ufshci_ver = ufshcd_vops_get_ufs_hci_version(hba);
 694        else
 695                ufshci_ver = ufshcd_readl(hba, REG_UFS_VERSION);
 696
 697        /*
 698         * UFSHCI v1.x uses a different version scheme, in order
 699         * to allow the use of comparisons with the ufshci_version
 700         * function, we convert it to the same scheme as ufs 2.0+.
 701         */
 702        if (ufshci_ver & 0x00010000)
 703                return ufshci_version(1, ufshci_ver & 0x00000100);
 704
 705        return ufshci_ver;
 706}
 707
 708/**
 709 * ufshcd_is_device_present - Check if any device connected to
 710 *                            the host controller
 711 * @hba: pointer to adapter instance
 712 *
 713 * Returns true if device present, false if no device detected
 714 */
 715static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
 716{
 717        return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
 718                                                DEVICE_PRESENT) ? true : false;
 719}
 720
 721/**
 722 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
 723 * @lrbp: pointer to local command reference block
 724 *
 725 * This function is used to get the OCS field from UTRD
 726 * Returns the OCS field in the UTRD
 727 */
 728static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
 729{
 730        return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
 731}
 732
 733/**
 734 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
 735 * @hba: per adapter instance
 736 * @pos: position of the bit to be cleared
 737 */
 738static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
 739{
 740        if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
 741                ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
 742        else
 743                ufshcd_writel(hba, ~(1 << pos),
 744                                REG_UTP_TRANSFER_REQ_LIST_CLEAR);
 745}
 746
 747/**
 748 * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
 749 * @hba: per adapter instance
 750 * @pos: position of the bit to be cleared
 751 */
 752static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
 753{
 754        if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
 755                ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
 756        else
 757                ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
 758}
 759
 760/**
 761 * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
 762 * @hba: per adapter instance
 763 * @tag: position of the bit to be cleared
 764 */
 765static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
 766{
 767        clear_bit(tag, &hba->outstanding_reqs);
 768}
 769
 770/**
 771 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
 772 * @reg: Register value of host controller status
 773 *
 774 * Returns integer, 0 on Success and positive value if failed
 775 */
 776static inline int ufshcd_get_lists_status(u32 reg)
 777{
 778        return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
 779}
 780
 781/**
 782 * ufshcd_get_uic_cmd_result - Get the UIC command result
 783 * @hba: Pointer to adapter instance
 784 *
 785 * This function gets the result of UIC command completion
 786 * Returns 0 on success, non zero value on error
 787 */
 788static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
 789{
 790        return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
 791               MASK_UIC_COMMAND_RESULT;
 792}
 793
 794/**
 795 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
 796 * @hba: Pointer to adapter instance
 797 *
 798 * This function gets UIC command argument3
 799 * Returns 0 on success, non zero value on error
 800 */
 801static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
 802{
 803        return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
 804}
 805
 806/**
 807 * ufshcd_get_req_rsp - returns the TR response transaction type
 808 * @ucd_rsp_ptr: pointer to response UPIU
 809 */
 810static inline int
 811ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
 812{
 813        return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
 814}
 815
 816/**
 817 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
 818 * @ucd_rsp_ptr: pointer to response UPIU
 819 *
 820 * This function gets the response status and scsi_status from response UPIU
 821 * Returns the response result code.
 822 */
 823static inline int
 824ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
 825{
 826        return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
 827}
 828
 829/*
 830 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
 831 *                              from response UPIU
 832 * @ucd_rsp_ptr: pointer to response UPIU
 833 *
 834 * Return the data segment length.
 835 */
 836static inline unsigned int
 837ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
 838{
 839        return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
 840                MASK_RSP_UPIU_DATA_SEG_LEN;
 841}
 842
 843/**
 844 * ufshcd_is_exception_event - Check if the device raised an exception event
 845 * @ucd_rsp_ptr: pointer to response UPIU
 846 *
 847 * The function checks if the device raised an exception event indicated in
 848 * the Device Information field of response UPIU.
 849 *
 850 * Returns true if exception is raised, false otherwise.
 851 */
 852static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
 853{
 854        return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
 855                        MASK_RSP_EXCEPTION_EVENT ? true : false;
 856}
 857
 858/**
 859 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
 860 * @hba: per adapter instance
 861 */
 862static inline void
 863ufshcd_reset_intr_aggr(struct ufs_hba *hba)
 864{
 865        ufshcd_writel(hba, INT_AGGR_ENABLE |
 866                      INT_AGGR_COUNTER_AND_TIMER_RESET,
 867                      REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
 868}
 869
 870/**
 871 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
 872 * @hba: per adapter instance
 873 * @cnt: Interrupt aggregation counter threshold
 874 * @tmout: Interrupt aggregation timeout value
 875 */
 876static inline void
 877ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
 878{
 879        ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
 880                      INT_AGGR_COUNTER_THLD_VAL(cnt) |
 881                      INT_AGGR_TIMEOUT_VAL(tmout),
 882                      REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
 883}
 884
 885/**
 886 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
 887 * @hba: per adapter instance
 888 */
 889static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
 890{
 891        ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
 892}
 893
 894/**
 895 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
 896 *                      When run-stop registers are set to 1, it indicates the
 897 *                      host controller that it can process the requests
 898 * @hba: per adapter instance
 899 */
 900static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
 901{
 902        ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
 903                      REG_UTP_TASK_REQ_LIST_RUN_STOP);
 904        ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
 905                      REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
 906}
 907
 908/**
 909 * ufshcd_hba_start - Start controller initialization sequence
 910 * @hba: per adapter instance
 911 */
 912static inline void ufshcd_hba_start(struct ufs_hba *hba)
 913{
 914        u32 val = CONTROLLER_ENABLE;
 915
 916        if (ufshcd_crypto_enable(hba))
 917                val |= CRYPTO_GENERAL_ENABLE;
 918
 919        ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
 920}
 921
 922/**
 923 * ufshcd_is_hba_active - Get controller state
 924 * @hba: per adapter instance
 925 *
 926 * Returns false if controller is active, true otherwise
 927 */
 928static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
 929{
 930        return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
 931                ? false : true;
 932}
 933
 934u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
 935{
 936        /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
 937        if (hba->ufs_version <= ufshci_version(1, 1))
 938                return UFS_UNIPRO_VER_1_41;
 939        else
 940                return UFS_UNIPRO_VER_1_6;
 941}
 942EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
 943
 944static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
 945{
 946        /*
 947         * If both host and device support UniPro ver1.6 or later, PA layer
 948         * parameters tuning happens during link startup itself.
 949         *
 950         * We can manually tune PA layer parameters if either host or device
 951         * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
 952         * logic simple, we will only do manual tuning if local unipro version
 953         * doesn't support ver1.6 or later.
 954         */
 955        if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
 956                return true;
 957        else
 958                return false;
 959}
 960
 961/**
 962 * ufshcd_set_clk_freq - set UFS controller clock frequencies
 963 * @hba: per adapter instance
 964 * @scale_up: If True, set max possible frequency othewise set low frequency
 965 *
 966 * Returns 0 if successful
 967 * Returns < 0 for any other errors
 968 */
 969static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
 970{
 971        int ret = 0;
 972        struct ufs_clk_info *clki;
 973        struct list_head *head = &hba->clk_list_head;
 974
 975        if (list_empty(head))
 976                goto out;
 977
 978        list_for_each_entry(clki, head, list) {
 979                if (!IS_ERR_OR_NULL(clki->clk)) {
 980                        if (scale_up && clki->max_freq) {
 981                                if (clki->curr_freq == clki->max_freq)
 982                                        continue;
 983
 984                                ret = clk_set_rate(clki->clk, clki->max_freq);
 985                                if (ret) {
 986                                        dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
 987                                                __func__, clki->name,
 988                                                clki->max_freq, ret);
 989                                        break;
 990                                }
 991                                trace_ufshcd_clk_scaling(dev_name(hba->dev),
 992                                                "scaled up", clki->name,
 993                                                clki->curr_freq,
 994                                                clki->max_freq);
 995
 996                                clki->curr_freq = clki->max_freq;
 997
 998                        } else if (!scale_up && clki->min_freq) {
 999                                if (clki->curr_freq == clki->min_freq)
1000                                        continue;
1001
1002                                ret = clk_set_rate(clki->clk, clki->min_freq);
1003                                if (ret) {
1004                                        dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
1005                                                __func__, clki->name,
1006                                                clki->min_freq, ret);
1007                                        break;
1008                                }
1009                                trace_ufshcd_clk_scaling(dev_name(hba->dev),
1010                                                "scaled down", clki->name,
1011                                                clki->curr_freq,
1012                                                clki->min_freq);
1013                                clki->curr_freq = clki->min_freq;
1014                        }
1015                }
1016                dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
1017                                clki->name, clk_get_rate(clki->clk));
1018        }
1019
1020out:
1021        return ret;
1022}
1023
1024/**
1025 * ufshcd_scale_clks - scale up or scale down UFS controller clocks
1026 * @hba: per adapter instance
1027 * @scale_up: True if scaling up and false if scaling down
1028 *
1029 * Returns 0 if successful
1030 * Returns < 0 for any other errors
1031 */
1032static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
1033{
1034        int ret = 0;
1035        ktime_t start = ktime_get();
1036
1037        ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
1038        if (ret)
1039                goto out;
1040
1041        ret = ufshcd_set_clk_freq(hba, scale_up);
1042        if (ret)
1043                goto out;
1044
1045        ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1046        if (ret)
1047                ufshcd_set_clk_freq(hba, !scale_up);
1048
1049out:
1050        trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1051                        (scale_up ? "up" : "down"),
1052                        ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1053        return ret;
1054}
1055
1056/**
1057 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
1058 * @hba: per adapter instance
1059 * @scale_up: True if scaling up and false if scaling down
1060 *
1061 * Returns true if scaling is required, false otherwise.
1062 */
1063static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
1064                                               bool scale_up)
1065{
1066        struct ufs_clk_info *clki;
1067        struct list_head *head = &hba->clk_list_head;
1068
1069        if (list_empty(head))
1070                return false;
1071
1072        list_for_each_entry(clki, head, list) {
1073                if (!IS_ERR_OR_NULL(clki->clk)) {
1074                        if (scale_up && clki->max_freq) {
1075                                if (clki->curr_freq == clki->max_freq)
1076                                        continue;
1077                                return true;
1078                        } else if (!scale_up && clki->min_freq) {
1079                                if (clki->curr_freq == clki->min_freq)
1080                                        continue;
1081                                return true;
1082                        }
1083                }
1084        }
1085
1086        return false;
1087}
1088
1089static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
1090                                        u64 wait_timeout_us)
1091{
1092        unsigned long flags;
1093        int ret = 0;
1094        u32 tm_doorbell;
1095        u32 tr_doorbell;
1096        bool timeout = false, do_last_check = false;
1097        ktime_t start;
1098
1099        ufshcd_hold(hba, false);
1100        spin_lock_irqsave(hba->host->host_lock, flags);
1101        /*
1102         * Wait for all the outstanding tasks/transfer requests.
1103         * Verify by checking the doorbell registers are clear.
1104         */
1105        start = ktime_get();
1106        do {
1107                if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
1108                        ret = -EBUSY;
1109                        goto out;
1110                }
1111
1112                tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
1113                tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1114                if (!tm_doorbell && !tr_doorbell) {
1115                        timeout = false;
1116                        break;
1117                } else if (do_last_check) {
1118                        break;
1119                }
1120
1121                spin_unlock_irqrestore(hba->host->host_lock, flags);
1122                schedule();
1123                if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1124                    wait_timeout_us) {
1125                        timeout = true;
1126                        /*
1127                         * We might have scheduled out for long time so make
1128                         * sure to check if doorbells are cleared by this time
1129                         * or not.
1130                         */
1131                        do_last_check = true;
1132                }
1133                spin_lock_irqsave(hba->host->host_lock, flags);
1134        } while (tm_doorbell || tr_doorbell);
1135
1136        if (timeout) {
1137                dev_err(hba->dev,
1138                        "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1139                        __func__, tm_doorbell, tr_doorbell);
1140                ret = -EBUSY;
1141        }
1142out:
1143        spin_unlock_irqrestore(hba->host->host_lock, flags);
1144        ufshcd_release(hba);
1145        return ret;
1146}
1147
1148/**
1149 * ufshcd_scale_gear - scale up/down UFS gear
1150 * @hba: per adapter instance
1151 * @scale_up: True for scaling up gear and false for scaling down
1152 *
1153 * Returns 0 for success,
1154 * Returns -EBUSY if scaling can't happen at this time
1155 * Returns non-zero for any other errors
1156 */
1157static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1158{
1159        int ret = 0;
1160        struct ufs_pa_layer_attr new_pwr_info;
1161
1162        if (scale_up) {
1163                memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
1164                       sizeof(struct ufs_pa_layer_attr));
1165        } else {
1166                memcpy(&new_pwr_info, &hba->pwr_info,
1167                       sizeof(struct ufs_pa_layer_attr));
1168
1169                if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear ||
1170                    hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) {
1171                        /* save the current power mode */
1172                        memcpy(&hba->clk_scaling.saved_pwr_info.info,
1173                                &hba->pwr_info,
1174                                sizeof(struct ufs_pa_layer_attr));
1175
1176                        /* scale down gear */
1177                        new_pwr_info.gear_tx = hba->clk_scaling.min_gear;
1178                        new_pwr_info.gear_rx = hba->clk_scaling.min_gear;
1179                }
1180        }
1181
1182        /* check if the power mode needs to be changed or not? */
1183        ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
1184        if (ret)
1185                dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1186                        __func__, ret,
1187                        hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1188                        new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1189
1190        return ret;
1191}
1192
1193static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
1194{
1195        #define DOORBELL_CLR_TOUT_US            (1000 * 1000) /* 1 sec */
1196        int ret = 0;
1197        /*
1198         * make sure that there are no outstanding requests when
1199         * clock scaling is in progress
1200         */
1201        ufshcd_scsi_block_requests(hba);
1202        down_write(&hba->clk_scaling_lock);
1203
1204        if (!hba->clk_scaling.is_allowed ||
1205            ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
1206                ret = -EBUSY;
1207                up_write(&hba->clk_scaling_lock);
1208                ufshcd_scsi_unblock_requests(hba);
1209                goto out;
1210        }
1211
1212        /* let's not get into low power until clock scaling is completed */
1213        ufshcd_hold(hba, false);
1214
1215out:
1216        return ret;
1217}
1218
1219static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
1220{
1221        if (writelock)
1222                up_write(&hba->clk_scaling_lock);
1223        else
1224                up_read(&hba->clk_scaling_lock);
1225        ufshcd_scsi_unblock_requests(hba);
1226        ufshcd_release(hba);
1227}
1228
1229/**
1230 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1231 * @hba: per adapter instance
1232 * @scale_up: True for scaling up and false for scalin down
1233 *
1234 * Returns 0 for success,
1235 * Returns -EBUSY if scaling can't happen at this time
1236 * Returns non-zero for any other errors
1237 */
1238static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
1239{
1240        int ret = 0;
1241        bool is_writelock = true;
1242
1243        ret = ufshcd_clock_scaling_prepare(hba);
1244        if (ret)
1245                return ret;
1246
1247        /* scale down the gear before scaling down clocks */
1248        if (!scale_up) {
1249                ret = ufshcd_scale_gear(hba, false);
1250                if (ret)
1251                        goto out_unprepare;
1252        }
1253
1254        ret = ufshcd_scale_clks(hba, scale_up);
1255        if (ret) {
1256                if (!scale_up)
1257                        ufshcd_scale_gear(hba, true);
1258                goto out_unprepare;
1259        }
1260
1261        /* scale up the gear after scaling up clocks */
1262        if (scale_up) {
1263                ret = ufshcd_scale_gear(hba, true);
1264                if (ret) {
1265                        ufshcd_scale_clks(hba, false);
1266                        goto out_unprepare;
1267                }
1268        }
1269
1270        /* Enable Write Booster if we have scaled up else disable it */
1271        downgrade_write(&hba->clk_scaling_lock);
1272        is_writelock = false;
1273        ufshcd_wb_toggle(hba, scale_up);
1274
1275out_unprepare:
1276        ufshcd_clock_scaling_unprepare(hba, is_writelock);
1277        return ret;
1278}
1279
1280static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1281{
1282        struct ufs_hba *hba = container_of(work, struct ufs_hba,
1283                                           clk_scaling.suspend_work);
1284        unsigned long irq_flags;
1285
1286        spin_lock_irqsave(hba->host->host_lock, irq_flags);
1287        if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1288                spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1289                return;
1290        }
1291        hba->clk_scaling.is_suspended = true;
1292        spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1293
1294        __ufshcd_suspend_clkscaling(hba);
1295}
1296
1297static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1298{
1299        struct ufs_hba *hba = container_of(work, struct ufs_hba,
1300                                           clk_scaling.resume_work);
1301        unsigned long irq_flags;
1302
1303        spin_lock_irqsave(hba->host->host_lock, irq_flags);
1304        if (!hba->clk_scaling.is_suspended) {
1305                spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1306                return;
1307        }
1308        hba->clk_scaling.is_suspended = false;
1309        spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1310
1311        devfreq_resume_device(hba->devfreq);
1312}
1313
1314static int ufshcd_devfreq_target(struct device *dev,
1315                                unsigned long *freq, u32 flags)
1316{
1317        int ret = 0;
1318        struct ufs_hba *hba = dev_get_drvdata(dev);
1319        ktime_t start;
1320        bool scale_up, sched_clk_scaling_suspend_work = false;
1321        struct list_head *clk_list = &hba->clk_list_head;
1322        struct ufs_clk_info *clki;
1323        unsigned long irq_flags;
1324
1325        if (!ufshcd_is_clkscaling_supported(hba))
1326                return -EINVAL;
1327
1328        clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
1329        /* Override with the closest supported frequency */
1330        *freq = (unsigned long) clk_round_rate(clki->clk, *freq);
1331        spin_lock_irqsave(hba->host->host_lock, irq_flags);
1332        if (ufshcd_eh_in_progress(hba)) {
1333                spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1334                return 0;
1335        }
1336
1337        if (!hba->clk_scaling.active_reqs)
1338                sched_clk_scaling_suspend_work = true;
1339
1340        if (list_empty(clk_list)) {
1341                spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1342                goto out;
1343        }
1344
1345        /* Decide based on the rounded-off frequency and update */
1346        scale_up = (*freq == clki->max_freq) ? true : false;
1347        if (!scale_up)
1348                *freq = clki->min_freq;
1349        /* Update the frequency */
1350        if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1351                spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1352                ret = 0;
1353                goto out; /* no state change required */
1354        }
1355        spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1356
1357        start = ktime_get();
1358        ret = ufshcd_devfreq_scale(hba, scale_up);
1359
1360        trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1361                (scale_up ? "up" : "down"),
1362                ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1363
1364out:
1365        if (sched_clk_scaling_suspend_work)
1366                queue_work(hba->clk_scaling.workq,
1367                           &hba->clk_scaling.suspend_work);
1368
1369        return ret;
1370}
1371
1372static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved)
1373{
1374        int *busy = priv;
1375
1376        WARN_ON_ONCE(reserved);
1377        (*busy)++;
1378        return false;
1379}
1380
1381/* Whether or not any tag is in use by a request that is in progress. */
1382static bool ufshcd_any_tag_in_use(struct ufs_hba *hba)
1383{
1384        struct request_queue *q = hba->cmd_queue;
1385        int busy = 0;
1386
1387        blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy);
1388        return busy;
1389}
1390
1391static int ufshcd_devfreq_get_dev_status(struct device *dev,
1392                struct devfreq_dev_status *stat)
1393{
1394        struct ufs_hba *hba = dev_get_drvdata(dev);
1395        struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1396        unsigned long flags;
1397        struct list_head *clk_list = &hba->clk_list_head;
1398        struct ufs_clk_info *clki;
1399        ktime_t curr_t;
1400
1401        if (!ufshcd_is_clkscaling_supported(hba))
1402                return -EINVAL;
1403
1404        memset(stat, 0, sizeof(*stat));
1405
1406        spin_lock_irqsave(hba->host->host_lock, flags);
1407        curr_t = ktime_get();
1408        if (!scaling->window_start_t)
1409                goto start_window;
1410
1411        clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1412        /*
1413         * If current frequency is 0, then the ondemand governor considers
1414         * there's no initial frequency set. And it always requests to set
1415         * to max. frequency.
1416         */
1417        stat->current_frequency = clki->curr_freq;
1418        if (scaling->is_busy_started)
1419                scaling->tot_busy_t += ktime_us_delta(curr_t,
1420                                scaling->busy_start_t);
1421
1422        stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t);
1423        stat->busy_time = scaling->tot_busy_t;
1424start_window:
1425        scaling->window_start_t = curr_t;
1426        scaling->tot_busy_t = 0;
1427
1428        if (hba->outstanding_reqs) {
1429                scaling->busy_start_t = curr_t;
1430                scaling->is_busy_started = true;
1431        } else {
1432                scaling->busy_start_t = 0;
1433                scaling->is_busy_started = false;
1434        }
1435        spin_unlock_irqrestore(hba->host->host_lock, flags);
1436        return 0;
1437}
1438
1439static int ufshcd_devfreq_init(struct ufs_hba *hba)
1440{
1441        struct list_head *clk_list = &hba->clk_list_head;
1442        struct ufs_clk_info *clki;
1443        struct devfreq *devfreq;
1444        int ret;
1445
1446        /* Skip devfreq if we don't have any clocks in the list */
1447        if (list_empty(clk_list))
1448                return 0;
1449
1450        clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1451        dev_pm_opp_add(hba->dev, clki->min_freq, 0);
1452        dev_pm_opp_add(hba->dev, clki->max_freq, 0);
1453
1454        ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile,
1455                                         &hba->vps->ondemand_data);
1456        devfreq = devfreq_add_device(hba->dev,
1457                        &hba->vps->devfreq_profile,
1458                        DEVFREQ_GOV_SIMPLE_ONDEMAND,
1459                        &hba->vps->ondemand_data);
1460        if (IS_ERR(devfreq)) {
1461                ret = PTR_ERR(devfreq);
1462                dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
1463
1464                dev_pm_opp_remove(hba->dev, clki->min_freq);
1465                dev_pm_opp_remove(hba->dev, clki->max_freq);
1466                return ret;
1467        }
1468
1469        hba->devfreq = devfreq;
1470
1471        return 0;
1472}
1473
1474static void ufshcd_devfreq_remove(struct ufs_hba *hba)
1475{
1476        struct list_head *clk_list = &hba->clk_list_head;
1477        struct ufs_clk_info *clki;
1478
1479        if (!hba->devfreq)
1480                return;
1481
1482        devfreq_remove_device(hba->devfreq);
1483        hba->devfreq = NULL;
1484
1485        clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1486        dev_pm_opp_remove(hba->dev, clki->min_freq);
1487        dev_pm_opp_remove(hba->dev, clki->max_freq);
1488}
1489
1490static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1491{
1492        unsigned long flags;
1493
1494        devfreq_suspend_device(hba->devfreq);
1495        spin_lock_irqsave(hba->host->host_lock, flags);
1496        hba->clk_scaling.window_start_t = 0;
1497        spin_unlock_irqrestore(hba->host->host_lock, flags);
1498}
1499
1500static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1501{
1502        unsigned long flags;
1503        bool suspend = false;
1504
1505        cancel_work_sync(&hba->clk_scaling.suspend_work);
1506        cancel_work_sync(&hba->clk_scaling.resume_work);
1507
1508        spin_lock_irqsave(hba->host->host_lock, flags);
1509        if (!hba->clk_scaling.is_suspended) {
1510                suspend = true;
1511                hba->clk_scaling.is_suspended = true;
1512        }
1513        spin_unlock_irqrestore(hba->host->host_lock, flags);
1514
1515        if (suspend)
1516                __ufshcd_suspend_clkscaling(hba);
1517}
1518
1519static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1520{
1521        unsigned long flags;
1522        bool resume = false;
1523
1524        spin_lock_irqsave(hba->host->host_lock, flags);
1525        if (hba->clk_scaling.is_suspended) {
1526                resume = true;
1527                hba->clk_scaling.is_suspended = false;
1528        }
1529        spin_unlock_irqrestore(hba->host->host_lock, flags);
1530
1531        if (resume)
1532                devfreq_resume_device(hba->devfreq);
1533}
1534
1535static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1536                struct device_attribute *attr, char *buf)
1537{
1538        struct ufs_hba *hba = dev_get_drvdata(dev);
1539
1540        return sysfs_emit(buf, "%d\n", hba->clk_scaling.is_enabled);
1541}
1542
1543static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1544                struct device_attribute *attr, const char *buf, size_t count)
1545{
1546        struct ufs_hba *hba = dev_get_drvdata(dev);
1547        u32 value;
1548        int err = 0;
1549
1550        if (kstrtou32(buf, 0, &value))
1551                return -EINVAL;
1552
1553        down(&hba->host_sem);
1554        if (!ufshcd_is_user_access_allowed(hba)) {
1555                err = -EBUSY;
1556                goto out;
1557        }
1558
1559        value = !!value;
1560        if (value == hba->clk_scaling.is_enabled)
1561                goto out;
1562
1563        ufshcd_rpm_get_sync(hba);
1564        ufshcd_hold(hba, false);
1565
1566        hba->clk_scaling.is_enabled = value;
1567
1568        if (value) {
1569                ufshcd_resume_clkscaling(hba);
1570        } else {
1571                ufshcd_suspend_clkscaling(hba);
1572                err = ufshcd_devfreq_scale(hba, true);
1573                if (err)
1574                        dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1575                                        __func__, err);
1576        }
1577
1578        ufshcd_release(hba);
1579        ufshcd_rpm_put_sync(hba);
1580out:
1581        up(&hba->host_sem);
1582        return err ? err : count;
1583}
1584
1585static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba *hba)
1586{
1587        hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1588        hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1589        sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1590        hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1591        hba->clk_scaling.enable_attr.attr.mode = 0644;
1592        if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1593                dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1594}
1595
1596static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba)
1597{
1598        if (hba->clk_scaling.enable_attr.attr.name)
1599                device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
1600}
1601
1602static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1603{
1604        char wq_name[sizeof("ufs_clkscaling_00")];
1605
1606        if (!ufshcd_is_clkscaling_supported(hba))
1607                return;
1608
1609        if (!hba->clk_scaling.min_gear)
1610                hba->clk_scaling.min_gear = UFS_HS_G1;
1611
1612        INIT_WORK(&hba->clk_scaling.suspend_work,
1613                  ufshcd_clk_scaling_suspend_work);
1614        INIT_WORK(&hba->clk_scaling.resume_work,
1615                  ufshcd_clk_scaling_resume_work);
1616
1617        snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
1618                 hba->host->host_no);
1619        hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1620
1621        hba->clk_scaling.is_initialized = true;
1622}
1623
1624static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1625{
1626        if (!hba->clk_scaling.is_initialized)
1627                return;
1628
1629        ufshcd_remove_clk_scaling_sysfs(hba);
1630        destroy_workqueue(hba->clk_scaling.workq);
1631        ufshcd_devfreq_remove(hba);
1632        hba->clk_scaling.is_initialized = false;
1633}
1634
1635static void ufshcd_ungate_work(struct work_struct *work)
1636{
1637        int ret;
1638        unsigned long flags;
1639        struct ufs_hba *hba = container_of(work, struct ufs_hba,
1640                        clk_gating.ungate_work);
1641
1642        cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1643
1644        spin_lock_irqsave(hba->host->host_lock, flags);
1645        if (hba->clk_gating.state == CLKS_ON) {
1646                spin_unlock_irqrestore(hba->host->host_lock, flags);
1647                goto unblock_reqs;
1648        }
1649
1650        spin_unlock_irqrestore(hba->host->host_lock, flags);
1651        ufshcd_hba_vreg_set_hpm(hba);
1652        ufshcd_setup_clocks(hba, true);
1653
1654        ufshcd_enable_irq(hba);
1655
1656        /* Exit from hibern8 */
1657        if (ufshcd_can_hibern8_during_gating(hba)) {
1658                /* Prevent gating in this path */
1659                hba->clk_gating.is_suspended = true;
1660                if (ufshcd_is_link_hibern8(hba)) {
1661                        ret = ufshcd_uic_hibern8_exit(hba);
1662                        if (ret)
1663                                dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1664                                        __func__, ret);
1665                        else
1666                                ufshcd_set_link_active(hba);
1667                }
1668                hba->clk_gating.is_suspended = false;
1669        }
1670unblock_reqs:
1671        ufshcd_scsi_unblock_requests(hba);
1672}
1673
1674/**
1675 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1676 * Also, exit from hibern8 mode and set the link as active.
1677 * @hba: per adapter instance
1678 * @async: This indicates whether caller should ungate clocks asynchronously.
1679 */
1680int ufshcd_hold(struct ufs_hba *hba, bool async)
1681{
1682        int rc = 0;
1683        bool flush_result;
1684        unsigned long flags;
1685
1686        if (!ufshcd_is_clkgating_allowed(hba))
1687                goto out;
1688        spin_lock_irqsave(hba->host->host_lock, flags);
1689        hba->clk_gating.active_reqs++;
1690
1691start:
1692        switch (hba->clk_gating.state) {
1693        case CLKS_ON:
1694                /*
1695                 * Wait for the ungate work to complete if in progress.
1696                 * Though the clocks may be in ON state, the link could
1697                 * still be in hibner8 state if hibern8 is allowed
1698                 * during clock gating.
1699                 * Make sure we exit hibern8 state also in addition to
1700                 * clocks being ON.
1701                 */
1702                if (ufshcd_can_hibern8_during_gating(hba) &&
1703                    ufshcd_is_link_hibern8(hba)) {
1704                        if (async) {
1705                                rc = -EAGAIN;
1706                                hba->clk_gating.active_reqs--;
1707                                break;
1708                        }
1709                        spin_unlock_irqrestore(hba->host->host_lock, flags);
1710                        flush_result = flush_work(&hba->clk_gating.ungate_work);
1711                        if (hba->clk_gating.is_suspended && !flush_result)
1712                                goto out;
1713                        spin_lock_irqsave(hba->host->host_lock, flags);
1714                        goto start;
1715                }
1716                break;
1717        case REQ_CLKS_OFF:
1718                if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1719                        hba->clk_gating.state = CLKS_ON;
1720                        trace_ufshcd_clk_gating(dev_name(hba->dev),
1721                                                hba->clk_gating.state);
1722                        break;
1723                }
1724                /*
1725                 * If we are here, it means gating work is either done or
1726                 * currently running. Hence, fall through to cancel gating
1727                 * work and to enable clocks.
1728                 */
1729                fallthrough;
1730        case CLKS_OFF:
1731                hba->clk_gating.state = REQ_CLKS_ON;
1732                trace_ufshcd_clk_gating(dev_name(hba->dev),
1733                                        hba->clk_gating.state);
1734                if (queue_work(hba->clk_gating.clk_gating_workq,
1735                               &hba->clk_gating.ungate_work))
1736                        ufshcd_scsi_block_requests(hba);
1737                /*
1738                 * fall through to check if we should wait for this
1739                 * work to be done or not.
1740                 */
1741                fallthrough;
1742        case REQ_CLKS_ON:
1743                if (async) {
1744                        rc = -EAGAIN;
1745                        hba->clk_gating.active_reqs--;
1746                        break;
1747                }
1748
1749                spin_unlock_irqrestore(hba->host->host_lock, flags);
1750                flush_work(&hba->clk_gating.ungate_work);
1751                /* Make sure state is CLKS_ON before returning */
1752                spin_lock_irqsave(hba->host->host_lock, flags);
1753                goto start;
1754        default:
1755                dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1756                                __func__, hba->clk_gating.state);
1757                break;
1758        }
1759        spin_unlock_irqrestore(hba->host->host_lock, flags);
1760out:
1761        return rc;
1762}
1763EXPORT_SYMBOL_GPL(ufshcd_hold);
1764
1765static void ufshcd_gate_work(struct work_struct *work)
1766{
1767        struct ufs_hba *hba = container_of(work, struct ufs_hba,
1768                        clk_gating.gate_work.work);
1769        unsigned long flags;
1770        int ret;
1771
1772        spin_lock_irqsave(hba->host->host_lock, flags);
1773        /*
1774         * In case you are here to cancel this work the gating state
1775         * would be marked as REQ_CLKS_ON. In this case save time by
1776         * skipping the gating work and exit after changing the clock
1777         * state to CLKS_ON.
1778         */
1779        if (hba->clk_gating.is_suspended ||
1780                (hba->clk_gating.state != REQ_CLKS_OFF)) {
1781                hba->clk_gating.state = CLKS_ON;
1782                trace_ufshcd_clk_gating(dev_name(hba->dev),
1783                                        hba->clk_gating.state);
1784                goto rel_lock;
1785        }
1786
1787        if (hba->clk_gating.active_reqs
1788                || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1789                || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
1790                || hba->active_uic_cmd || hba->uic_async_done)
1791                goto rel_lock;
1792
1793        spin_unlock_irqrestore(hba->host->host_lock, flags);
1794
1795        /* put the link into hibern8 mode before turning off clocks */
1796        if (ufshcd_can_hibern8_during_gating(hba)) {
1797                ret = ufshcd_uic_hibern8_enter(hba);
1798                if (ret) {
1799                        hba->clk_gating.state = CLKS_ON;
1800                        dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
1801                                        __func__, ret);
1802                        trace_ufshcd_clk_gating(dev_name(hba->dev),
1803                                                hba->clk_gating.state);
1804                        goto out;
1805                }
1806                ufshcd_set_link_hibern8(hba);
1807        }
1808
1809        ufshcd_disable_irq(hba);
1810
1811        ufshcd_setup_clocks(hba, false);
1812
1813        /* Put the host controller in low power mode if possible */
1814        ufshcd_hba_vreg_set_lpm(hba);
1815        /*
1816         * In case you are here to cancel this work the gating state
1817         * would be marked as REQ_CLKS_ON. In this case keep the state
1818         * as REQ_CLKS_ON which would anyway imply that clocks are off
1819         * and a request to turn them on is pending. By doing this way,
1820         * we keep the state machine in tact and this would ultimately
1821         * prevent from doing cancel work multiple times when there are
1822         * new requests arriving before the current cancel work is done.
1823         */
1824        spin_lock_irqsave(hba->host->host_lock, flags);
1825        if (hba->clk_gating.state == REQ_CLKS_OFF) {
1826                hba->clk_gating.state = CLKS_OFF;
1827                trace_ufshcd_clk_gating(dev_name(hba->dev),
1828                                        hba->clk_gating.state);
1829        }
1830rel_lock:
1831        spin_unlock_irqrestore(hba->host->host_lock, flags);
1832out:
1833        return;
1834}
1835
1836/* host lock must be held before calling this variant */
1837static void __ufshcd_release(struct ufs_hba *hba)
1838{
1839        if (!ufshcd_is_clkgating_allowed(hba))
1840                return;
1841
1842        hba->clk_gating.active_reqs--;
1843
1844        if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
1845            hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
1846            hba->outstanding_tasks ||
1847            hba->active_uic_cmd || hba->uic_async_done ||
1848            hba->clk_gating.state == CLKS_OFF)
1849                return;
1850
1851        hba->clk_gating.state = REQ_CLKS_OFF;
1852        trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
1853        queue_delayed_work(hba->clk_gating.clk_gating_workq,
1854                           &hba->clk_gating.gate_work,
1855                           msecs_to_jiffies(hba->clk_gating.delay_ms));
1856}
1857
1858void ufshcd_release(struct ufs_hba *hba)
1859{
1860        unsigned long flags;
1861
1862        spin_lock_irqsave(hba->host->host_lock, flags);
1863        __ufshcd_release(hba);
1864        spin_unlock_irqrestore(hba->host->host_lock, flags);
1865}
1866EXPORT_SYMBOL_GPL(ufshcd_release);
1867
1868static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1869                struct device_attribute *attr, char *buf)
1870{
1871        struct ufs_hba *hba = dev_get_drvdata(dev);
1872
1873        return sysfs_emit(buf, "%lu\n", hba->clk_gating.delay_ms);
1874}
1875
1876static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1877                struct device_attribute *attr, const char *buf, size_t count)
1878{
1879        struct ufs_hba *hba = dev_get_drvdata(dev);
1880        unsigned long flags, value;
1881
1882        if (kstrtoul(buf, 0, &value))
1883                return -EINVAL;
1884
1885        spin_lock_irqsave(hba->host->host_lock, flags);
1886        hba->clk_gating.delay_ms = value;
1887        spin_unlock_irqrestore(hba->host->host_lock, flags);
1888        return count;
1889}
1890
1891static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1892                struct device_attribute *attr, char *buf)
1893{
1894        struct ufs_hba *hba = dev_get_drvdata(dev);
1895
1896        return sysfs_emit(buf, "%d\n", hba->clk_gating.is_enabled);
1897}
1898
1899static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1900                struct device_attribute *attr, const char *buf, size_t count)
1901{
1902        struct ufs_hba *hba = dev_get_drvdata(dev);
1903        unsigned long flags;
1904        u32 value;
1905
1906        if (kstrtou32(buf, 0, &value))
1907                return -EINVAL;
1908
1909        value = !!value;
1910
1911        spin_lock_irqsave(hba->host->host_lock, flags);
1912        if (value == hba->clk_gating.is_enabled)
1913                goto out;
1914
1915        if (value)
1916                __ufshcd_release(hba);
1917        else
1918                hba->clk_gating.active_reqs++;
1919
1920        hba->clk_gating.is_enabled = value;
1921out:
1922        spin_unlock_irqrestore(hba->host->host_lock, flags);
1923        return count;
1924}
1925
1926static void ufshcd_init_clk_gating_sysfs(struct ufs_hba *hba)
1927{
1928        hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1929        hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1930        sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1931        hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
1932        hba->clk_gating.delay_attr.attr.mode = 0644;
1933        if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1934                dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
1935
1936        hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1937        hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1938        sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1939        hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1940        hba->clk_gating.enable_attr.attr.mode = 0644;
1941        if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1942                dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
1943}
1944
1945static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba)
1946{
1947        if (hba->clk_gating.delay_attr.attr.name)
1948                device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1949        if (hba->clk_gating.enable_attr.attr.name)
1950                device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
1951}
1952
1953static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1954{
1955        char wq_name[sizeof("ufs_clk_gating_00")];
1956
1957        if (!ufshcd_is_clkgating_allowed(hba))
1958                return;
1959
1960        hba->clk_gating.state = CLKS_ON;
1961
1962        hba->clk_gating.delay_ms = 150;
1963        INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1964        INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1965
1966        snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
1967                 hba->host->host_no);
1968        hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
1969                                        WQ_MEM_RECLAIM | WQ_HIGHPRI);
1970
1971        ufshcd_init_clk_gating_sysfs(hba);
1972
1973        hba->clk_gating.is_enabled = true;
1974        hba->clk_gating.is_initialized = true;
1975}
1976
1977static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1978{
1979        if (!hba->clk_gating.is_initialized)
1980                return;
1981        ufshcd_remove_clk_gating_sysfs(hba);
1982        cancel_work_sync(&hba->clk_gating.ungate_work);
1983        cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1984        destroy_workqueue(hba->clk_gating.clk_gating_workq);
1985        hba->clk_gating.is_initialized = false;
1986}
1987
1988/* Must be called with host lock acquired */
1989static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
1990{
1991        bool queue_resume_work = false;
1992        ktime_t curr_t = ktime_get();
1993        unsigned long flags;
1994
1995        if (!ufshcd_is_clkscaling_supported(hba))
1996                return;
1997
1998        spin_lock_irqsave(hba->host->host_lock, flags);
1999        if (!hba->clk_scaling.active_reqs++)
2000                queue_resume_work = true;
2001
2002        if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) {
2003                spin_unlock_irqrestore(hba->host->host_lock, flags);
2004                return;
2005        }
2006
2007        if (queue_resume_work)
2008                queue_work(hba->clk_scaling.workq,
2009                           &hba->clk_scaling.resume_work);
2010
2011        if (!hba->clk_scaling.window_start_t) {
2012                hba->clk_scaling.window_start_t = curr_t;
2013                hba->clk_scaling.tot_busy_t = 0;
2014                hba->clk_scaling.is_busy_started = false;
2015        }
2016
2017        if (!hba->clk_scaling.is_busy_started) {
2018                hba->clk_scaling.busy_start_t = curr_t;
2019                hba->clk_scaling.is_busy_started = true;
2020        }
2021        spin_unlock_irqrestore(hba->host->host_lock, flags);
2022}
2023
2024static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
2025{
2026        struct ufs_clk_scaling *scaling = &hba->clk_scaling;
2027        unsigned long flags;
2028
2029        if (!ufshcd_is_clkscaling_supported(hba))
2030                return;
2031
2032        spin_lock_irqsave(hba->host->host_lock, flags);
2033        hba->clk_scaling.active_reqs--;
2034        if (!hba->outstanding_reqs && scaling->is_busy_started) {
2035                scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
2036                                        scaling->busy_start_t));
2037                scaling->busy_start_t = 0;
2038                scaling->is_busy_started = false;
2039        }
2040        spin_unlock_irqrestore(hba->host->host_lock, flags);
2041}
2042
2043static inline int ufshcd_monitor_opcode2dir(u8 opcode)
2044{
2045        if (opcode == READ_6 || opcode == READ_10 || opcode == READ_16)
2046                return READ;
2047        else if (opcode == WRITE_6 || opcode == WRITE_10 || opcode == WRITE_16)
2048                return WRITE;
2049        else
2050                return -EINVAL;
2051}
2052
2053static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba,
2054                                                struct ufshcd_lrb *lrbp)
2055{
2056        struct ufs_hba_monitor *m = &hba->monitor;
2057
2058        return (m->enabled && lrbp && lrbp->cmd &&
2059                (!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) &&
2060                ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp));
2061}
2062
2063static void ufshcd_start_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2064{
2065        int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
2066        unsigned long flags;
2067
2068        spin_lock_irqsave(hba->host->host_lock, flags);
2069        if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0)
2070                hba->monitor.busy_start_ts[dir] = ktime_get();
2071        spin_unlock_irqrestore(hba->host->host_lock, flags);
2072}
2073
2074static void ufshcd_update_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2075{
2076        int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
2077        unsigned long flags;
2078
2079        spin_lock_irqsave(hba->host->host_lock, flags);
2080        if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
2081                struct request *req = lrbp->cmd->request;
2082                struct ufs_hba_monitor *m = &hba->monitor;
2083                ktime_t now, inc, lat;
2084
2085                now = lrbp->compl_time_stamp;
2086                inc = ktime_sub(now, m->busy_start_ts[dir]);
2087                m->total_busy[dir] = ktime_add(m->total_busy[dir], inc);
2088                m->nr_sec_rw[dir] += blk_rq_sectors(req);
2089
2090                /* Update latencies */
2091                m->nr_req[dir]++;
2092                lat = ktime_sub(now, lrbp->issue_time_stamp);
2093                m->lat_sum[dir] += lat;
2094                if (m->lat_max[dir] < lat || !m->lat_max[dir])
2095                        m->lat_max[dir] = lat;
2096                if (m->lat_min[dir] > lat || !m->lat_min[dir])
2097                        m->lat_min[dir] = lat;
2098
2099                m->nr_queued[dir]--;
2100                /* Push forward the busy start of monitor */
2101                m->busy_start_ts[dir] = now;
2102        }
2103        spin_unlock_irqrestore(hba->host->host_lock, flags);
2104}
2105
2106/**
2107 * ufshcd_send_command - Send SCSI or device management commands
2108 * @hba: per adapter instance
2109 * @task_tag: Task tag of the command
2110 */
2111static inline
2112void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
2113{
2114        struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
2115
2116        lrbp->issue_time_stamp = ktime_get();
2117        lrbp->compl_time_stamp = ktime_set(0, 0);
2118        ufshcd_vops_setup_xfer_req(hba, task_tag, (lrbp->cmd ? true : false));
2119        ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
2120        ufshcd_clk_scaling_start_busy(hba);
2121        if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
2122                ufshcd_start_monitor(hba, lrbp);
2123        if (ufshcd_has_utrlcnr(hba)) {
2124                set_bit(task_tag, &hba->outstanding_reqs);
2125                ufshcd_writel(hba, 1 << task_tag,
2126                              REG_UTP_TRANSFER_REQ_DOOR_BELL);
2127        } else {
2128                unsigned long flags;
2129
2130                spin_lock_irqsave(hba->host->host_lock, flags);
2131                set_bit(task_tag, &hba->outstanding_reqs);
2132                ufshcd_writel(hba, 1 << task_tag,
2133                              REG_UTP_TRANSFER_REQ_DOOR_BELL);
2134                spin_unlock_irqrestore(hba->host->host_lock, flags);
2135        }
2136        /* Make sure that doorbell is committed immediately */
2137        wmb();
2138}
2139
2140/**
2141 * ufshcd_copy_sense_data - Copy sense data in case of check condition
2142 * @lrbp: pointer to local reference block
2143 */
2144static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
2145{
2146        int len;
2147        if (lrbp->sense_buffer &&
2148            ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
2149                int len_to_copy;
2150
2151                len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
2152                len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
2153
2154                memcpy(lrbp->sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
2155                       len_to_copy);
2156        }
2157}
2158
2159/**
2160 * ufshcd_copy_query_response() - Copy the Query Response and the data
2161 * descriptor
2162 * @hba: per adapter instance
2163 * @lrbp: pointer to local reference block
2164 */
2165static
2166int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2167{
2168        struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2169
2170        memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
2171
2172        /* Get the descriptor */
2173        if (hba->dev_cmd.query.descriptor &&
2174            lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
2175                u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
2176                                GENERAL_UPIU_REQUEST_SIZE;
2177                u16 resp_len;
2178                u16 buf_len;
2179
2180                /* data segment length */
2181                resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
2182                                                MASK_QUERY_DATA_SEG_LEN;
2183                buf_len = be16_to_cpu(
2184                                hba->dev_cmd.query.request.upiu_req.length);
2185                if (likely(buf_len >= resp_len)) {
2186                        memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
2187                } else {
2188                        dev_warn(hba->dev,
2189                                 "%s: rsp size %d is bigger than buffer size %d",
2190                                 __func__, resp_len, buf_len);
2191                        return -EINVAL;
2192                }
2193        }
2194
2195        return 0;
2196}
2197
2198/**
2199 * ufshcd_hba_capabilities - Read controller capabilities
2200 * @hba: per adapter instance
2201 *
2202 * Return: 0 on success, negative on error.
2203 */
2204static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
2205{
2206        int err;
2207
2208        hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
2209
2210        /* nutrs and nutmrs are 0 based values */
2211        hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
2212        hba->nutmrs =
2213        ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
2214
2215        /* Read crypto capabilities */
2216        err = ufshcd_hba_init_crypto_capabilities(hba);
2217        if (err)
2218                dev_err(hba->dev, "crypto setup failed\n");
2219
2220        return err;
2221}
2222
2223/**
2224 * ufshcd_ready_for_uic_cmd - Check if controller is ready
2225 *                            to accept UIC commands
2226 * @hba: per adapter instance
2227 * Return true on success, else false
2228 */
2229static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
2230{
2231        if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
2232                return true;
2233        else
2234                return false;
2235}
2236
2237/**
2238 * ufshcd_get_upmcrs - Get the power mode change request status
2239 * @hba: Pointer to adapter instance
2240 *
2241 * This function gets the UPMCRS field of HCS register
2242 * Returns value of UPMCRS field
2243 */
2244static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
2245{
2246        return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
2247}
2248
2249/**
2250 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
2251 * @hba: per adapter instance
2252 * @uic_cmd: UIC command
2253 *
2254 * Mutex must be held.
2255 */
2256static inline void
2257ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2258{
2259        WARN_ON(hba->active_uic_cmd);
2260
2261        hba->active_uic_cmd = uic_cmd;
2262
2263        /* Write Args */
2264        ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2265        ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2266        ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
2267
2268        ufshcd_add_uic_command_trace(hba, uic_cmd, UFS_CMD_SEND);
2269
2270        /* Write UIC Cmd */
2271        ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
2272                      REG_UIC_COMMAND);
2273}
2274
2275/**
2276 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
2277 * @hba: per adapter instance
2278 * @uic_cmd: UIC command
2279 *
2280 * Must be called with mutex held.
2281 * Returns 0 only if success.
2282 */
2283static int
2284ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2285{
2286        int ret;
2287        unsigned long flags;
2288
2289        if (wait_for_completion_timeout(&uic_cmd->done,
2290                                        msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
2291                ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2292        } else {
2293                ret = -ETIMEDOUT;
2294                dev_err(hba->dev,
2295                        "uic cmd 0x%x with arg3 0x%x completion timeout\n",
2296                        uic_cmd->command, uic_cmd->argument3);
2297
2298                if (!uic_cmd->cmd_active) {
2299                        dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n",
2300                                __func__);
2301                        ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2302                }
2303        }
2304
2305        spin_lock_irqsave(hba->host->host_lock, flags);
2306        hba->active_uic_cmd = NULL;
2307        spin_unlock_irqrestore(hba->host->host_lock, flags);
2308
2309        return ret;
2310}
2311
2312/**
2313 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2314 * @hba: per adapter instance
2315 * @uic_cmd: UIC command
2316 * @completion: initialize the completion only if this is set to true
2317 *
2318 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
2319 * with mutex held and host_lock locked.
2320 * Returns 0 only if success.
2321 */
2322static int
2323__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2324                      bool completion)
2325{
2326        if (!ufshcd_ready_for_uic_cmd(hba)) {
2327                dev_err(hba->dev,
2328                        "Controller not ready to accept UIC commands\n");
2329                return -EIO;
2330        }
2331
2332        if (completion)
2333                init_completion(&uic_cmd->done);
2334
2335        uic_cmd->cmd_active = 1;
2336        ufshcd_dispatch_uic_cmd(hba, uic_cmd);
2337
2338        return 0;
2339}
2340
2341/**
2342 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2343 * @hba: per adapter instance
2344 * @uic_cmd: UIC command
2345 *
2346 * Returns 0 only if success.
2347 */
2348int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2349{
2350        int ret;
2351        unsigned long flags;
2352
2353        ufshcd_hold(hba, false);
2354        mutex_lock(&hba->uic_cmd_mutex);
2355        ufshcd_add_delay_before_dme_cmd(hba);
2356
2357        spin_lock_irqsave(hba->host->host_lock, flags);
2358        ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
2359        spin_unlock_irqrestore(hba->host->host_lock, flags);
2360        if (!ret)
2361                ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2362
2363        mutex_unlock(&hba->uic_cmd_mutex);
2364
2365        ufshcd_release(hba);
2366        return ret;
2367}
2368
2369/**
2370 * ufshcd_map_sg - Map scatter-gather list to prdt
2371 * @hba: per adapter instance
2372 * @lrbp: pointer to local reference block
2373 *
2374 * Returns 0 in case of success, non-zero value in case of failure
2375 */
2376static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2377{
2378        struct ufshcd_sg_entry *prd_table;
2379        struct scatterlist *sg;
2380        struct scsi_cmnd *cmd;
2381        int sg_segments;
2382        int i;
2383
2384        cmd = lrbp->cmd;
2385        sg_segments = scsi_dma_map(cmd);
2386        if (sg_segments < 0)
2387                return sg_segments;
2388
2389        if (sg_segments) {
2390
2391                if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
2392                        lrbp->utr_descriptor_ptr->prd_table_length =
2393                                cpu_to_le16((sg_segments *
2394                                        sizeof(struct ufshcd_sg_entry)));
2395                else
2396                        lrbp->utr_descriptor_ptr->prd_table_length =
2397                                cpu_to_le16((u16) (sg_segments));
2398
2399                prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
2400
2401                scsi_for_each_sg(cmd, sg, sg_segments, i) {
2402                        prd_table[i].size  =
2403                                cpu_to_le32(((u32) sg_dma_len(sg))-1);
2404                        prd_table[i].base_addr =
2405                                cpu_to_le32(lower_32_bits(sg->dma_address));
2406                        prd_table[i].upper_addr =
2407                                cpu_to_le32(upper_32_bits(sg->dma_address));
2408                        prd_table[i].reserved = 0;
2409                }
2410        } else {
2411                lrbp->utr_descriptor_ptr->prd_table_length = 0;
2412        }
2413
2414        return 0;
2415}
2416
2417/**
2418 * ufshcd_enable_intr - enable interrupts
2419 * @hba: per adapter instance
2420 * @intrs: interrupt bits
2421 */
2422static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
2423{
2424        u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2425
2426        if (hba->ufs_version == ufshci_version(1, 0)) {
2427                u32 rw;
2428                rw = set & INTERRUPT_MASK_RW_VER_10;
2429                set = rw | ((set ^ intrs) & intrs);
2430        } else {
2431                set |= intrs;
2432        }
2433
2434        ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2435}
2436
2437/**
2438 * ufshcd_disable_intr - disable interrupts
2439 * @hba: per adapter instance
2440 * @intrs: interrupt bits
2441 */
2442static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2443{
2444        u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2445
2446        if (hba->ufs_version == ufshci_version(1, 0)) {
2447                u32 rw;
2448                rw = (set & INTERRUPT_MASK_RW_VER_10) &
2449                        ~(intrs & INTERRUPT_MASK_RW_VER_10);
2450                set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2451
2452        } else {
2453                set &= ~intrs;
2454        }
2455
2456        ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2457}
2458
2459/**
2460 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
2461 * descriptor according to request
2462 * @lrbp: pointer to local reference block
2463 * @upiu_flags: flags required in the header
2464 * @cmd_dir: requests data direction
2465 */
2466static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
2467                        u8 *upiu_flags, enum dma_data_direction cmd_dir)
2468{
2469        struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2470        u32 data_direction;
2471        u32 dword_0;
2472        u32 dword_1 = 0;
2473        u32 dword_3 = 0;
2474
2475        if (cmd_dir == DMA_FROM_DEVICE) {
2476                data_direction = UTP_DEVICE_TO_HOST;
2477                *upiu_flags = UPIU_CMD_FLAGS_READ;
2478        } else if (cmd_dir == DMA_TO_DEVICE) {
2479                data_direction = UTP_HOST_TO_DEVICE;
2480                *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2481        } else {
2482                data_direction = UTP_NO_DATA_TRANSFER;
2483                *upiu_flags = UPIU_CMD_FLAGS_NONE;
2484        }
2485
2486        dword_0 = data_direction | (lrbp->command_type
2487                                << UPIU_COMMAND_TYPE_OFFSET);
2488        if (lrbp->intr_cmd)
2489                dword_0 |= UTP_REQ_DESC_INT_CMD;
2490
2491        /* Prepare crypto related dwords */
2492        ufshcd_prepare_req_desc_hdr_crypto(lrbp, &dword_0, &dword_1, &dword_3);
2493
2494        /* Transfer request descriptor header fields */
2495        req_desc->header.dword_0 = cpu_to_le32(dword_0);
2496        req_desc->header.dword_1 = cpu_to_le32(dword_1);
2497        /*
2498         * assigning invalid value for command status. Controller
2499         * updates OCS on command completion, with the command
2500         * status
2501         */
2502        req_desc->header.dword_2 =
2503                cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
2504        req_desc->header.dword_3 = cpu_to_le32(dword_3);
2505
2506        req_desc->prd_table_length = 0;
2507}
2508
2509/**
2510 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2511 * for scsi commands
2512 * @lrbp: local reference block pointer
2513 * @upiu_flags: flags
2514 */
2515static
2516void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
2517{
2518        struct scsi_cmnd *cmd = lrbp->cmd;
2519        struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2520        unsigned short cdb_len;
2521
2522        /* command descriptor fields */
2523        ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2524                                UPIU_TRANSACTION_COMMAND, upiu_flags,
2525                                lrbp->lun, lrbp->task_tag);
2526        ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2527                                UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2528
2529        /* Total EHS length and Data segment length will be zero */
2530        ucd_req_ptr->header.dword_2 = 0;
2531
2532        ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
2533
2534        cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
2535        memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
2536        memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
2537
2538        memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2539}
2540
2541/**
2542 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
2543 * for query requsts
2544 * @hba: UFS hba
2545 * @lrbp: local reference block pointer
2546 * @upiu_flags: flags
2547 */
2548static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2549                                struct ufshcd_lrb *lrbp, u8 upiu_flags)
2550{
2551        struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2552        struct ufs_query *query = &hba->dev_cmd.query;
2553        u16 len = be16_to_cpu(query->request.upiu_req.length);
2554
2555        /* Query request header */
2556        ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2557                        UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2558                        lrbp->lun, lrbp->task_tag);
2559        ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2560                        0, query->request.query_func, 0, 0);
2561
2562        /* Data segment length only need for WRITE_DESC */
2563        if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2564                ucd_req_ptr->header.dword_2 =
2565                        UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
2566        else
2567                ucd_req_ptr->header.dword_2 = 0;
2568
2569        /* Copy the Query Request buffer as is */
2570        memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2571                        QUERY_OSF_SIZE);
2572
2573        /* Copy the Descriptor */
2574        if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2575                memcpy(ucd_req_ptr + 1, query->descriptor, len);
2576
2577        memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2578}
2579
2580static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2581{
2582        struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2583
2584        memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2585
2586        /* command descriptor fields */
2587        ucd_req_ptr->header.dword_0 =
2588                UPIU_HEADER_DWORD(
2589                        UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
2590        /* clear rest of the fields of basic header */
2591        ucd_req_ptr->header.dword_1 = 0;
2592        ucd_req_ptr->header.dword_2 = 0;
2593
2594        memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2595}
2596
2597/**
2598 * ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU)
2599 *                           for Device Management Purposes
2600 * @hba: per adapter instance
2601 * @lrbp: pointer to local reference block
2602 */
2603static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
2604                                      struct ufshcd_lrb *lrbp)
2605{
2606        u8 upiu_flags;
2607        int ret = 0;
2608
2609        if (hba->ufs_version <= ufshci_version(1, 1))
2610                lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
2611        else
2612                lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2613
2614        ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
2615        if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2616                ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2617        else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2618                ufshcd_prepare_utp_nop_upiu(lrbp);
2619        else
2620                ret = -EINVAL;
2621
2622        return ret;
2623}
2624
2625/**
2626 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2627 *                         for SCSI Purposes
2628 * @hba: per adapter instance
2629 * @lrbp: pointer to local reference block
2630 */
2631static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2632{
2633        u8 upiu_flags;
2634        int ret = 0;
2635
2636        if (hba->ufs_version <= ufshci_version(1, 1))
2637                lrbp->command_type = UTP_CMD_TYPE_SCSI;
2638        else
2639                lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2640
2641        if (likely(lrbp->cmd)) {
2642                ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
2643                                                lrbp->cmd->sc_data_direction);
2644                ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2645        } else {
2646                ret = -EINVAL;
2647        }
2648
2649        return ret;
2650}
2651
2652/**
2653 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2654 * @upiu_wlun_id: UPIU W-LUN id
2655 *
2656 * Returns SCSI W-LUN id
2657 */
2658static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2659{
2660        return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2661}
2662
2663static inline bool is_rpmb_wlun(struct scsi_device *sdev)
2664{
2665        return sdev->lun == ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN);
2666}
2667
2668static inline bool is_device_wlun(struct scsi_device *sdev)
2669{
2670        return sdev->lun ==
2671                ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN);
2672}
2673
2674static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
2675{
2676        struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
2677        struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
2678        dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
2679                i * sizeof(struct utp_transfer_cmd_desc);
2680        u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
2681                                       response_upiu);
2682        u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
2683
2684        lrb->utr_descriptor_ptr = utrdlp + i;
2685        lrb->utrd_dma_addr = hba->utrdl_dma_addr +
2686                i * sizeof(struct utp_transfer_req_desc);
2687        lrb->ucd_req_ptr = (struct utp_upiu_req *)(cmd_descp + i);
2688        lrb->ucd_req_dma_addr = cmd_desc_element_addr;
2689        lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
2690        lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
2691        lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
2692        lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
2693}
2694
2695/**
2696 * ufshcd_queuecommand - main entry point for SCSI requests
2697 * @host: SCSI host pointer
2698 * @cmd: command from SCSI Midlayer
2699 *
2700 * Returns 0 for success, non-zero in case of failure
2701 */
2702static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2703{
2704        struct ufshcd_lrb *lrbp;
2705        struct ufs_hba *hba;
2706        int tag;
2707        int err = 0;
2708
2709        hba = shost_priv(host);
2710
2711        tag = cmd->request->tag;
2712        if (!ufshcd_valid_tag(hba, tag)) {
2713                dev_err(hba->dev,
2714                        "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
2715                        __func__, tag, cmd, cmd->request);
2716                BUG();
2717        }
2718
2719        if (!down_read_trylock(&hba->clk_scaling_lock))
2720                return SCSI_MLQUEUE_HOST_BUSY;
2721
2722        switch (hba->ufshcd_state) {
2723        case UFSHCD_STATE_OPERATIONAL:
2724        case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
2725                break;
2726        case UFSHCD_STATE_EH_SCHEDULED_FATAL:
2727                /*
2728                 * pm_runtime_get_sync() is used at error handling preparation
2729                 * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
2730                 * PM ops, it can never be finished if we let SCSI layer keep
2731                 * retrying it, which gets err handler stuck forever. Neither
2732                 * can we let the scsi cmd pass through, because UFS is in bad
2733                 * state, the scsi cmd may eventually time out, which will get
2734                 * err handler blocked for too long. So, just fail the scsi cmd
2735                 * sent from PM ops, err handler can recover PM error anyways.
2736                 */
2737                if (hba->pm_op_in_progress) {
2738                        hba->force_reset = true;
2739                        set_host_byte(cmd, DID_BAD_TARGET);
2740                        cmd->scsi_done(cmd);
2741                        goto out;
2742                }
2743                fallthrough;
2744        case UFSHCD_STATE_RESET:
2745                err = SCSI_MLQUEUE_HOST_BUSY;
2746                goto out;
2747        case UFSHCD_STATE_ERROR:
2748                set_host_byte(cmd, DID_ERROR);
2749                cmd->scsi_done(cmd);
2750                goto out;
2751        default:
2752                dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
2753                                __func__, hba->ufshcd_state);
2754                set_host_byte(cmd, DID_BAD_TARGET);
2755                cmd->scsi_done(cmd);
2756                goto out;
2757        }
2758
2759        hba->req_abort_count = 0;
2760
2761        err = ufshcd_hold(hba, true);
2762        if (err) {
2763                err = SCSI_MLQUEUE_HOST_BUSY;
2764                goto out;
2765        }
2766        WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
2767                (hba->clk_gating.state != CLKS_ON));
2768
2769        if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
2770                if (hba->pm_op_in_progress)
2771                        set_host_byte(cmd, DID_BAD_TARGET);
2772                else
2773                        err = SCSI_MLQUEUE_HOST_BUSY;
2774                ufshcd_release(hba);
2775                goto out;
2776        }
2777
2778        lrbp = &hba->lrb[tag];
2779        WARN_ON(lrbp->cmd);
2780        lrbp->cmd = cmd;
2781        lrbp->sense_bufflen = UFS_SENSE_SIZE;
2782        lrbp->sense_buffer = cmd->sense_buffer;
2783        lrbp->task_tag = tag;
2784        lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
2785        lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
2786
2787        ufshcd_prepare_lrbp_crypto(cmd->request, lrbp);
2788
2789        lrbp->req_abort_skip = false;
2790
2791        ufshcd_comp_scsi_upiu(hba, lrbp);
2792
2793        err = ufshcd_map_sg(hba, lrbp);
2794        if (err) {
2795                lrbp->cmd = NULL;
2796                ufshcd_release(hba);
2797                goto out;
2798        }
2799        /* Make sure descriptors are ready before ringing the doorbell */
2800        wmb();
2801
2802        ufshcd_send_command(hba, tag);
2803out:
2804        up_read(&hba->clk_scaling_lock);
2805        return err;
2806}
2807
2808static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
2809                struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
2810{
2811        lrbp->cmd = NULL;
2812        lrbp->sense_bufflen = 0;
2813        lrbp->sense_buffer = NULL;
2814        lrbp->task_tag = tag;
2815        lrbp->lun = 0; /* device management cmd is not specific to any LUN */
2816        lrbp->intr_cmd = true; /* No interrupt aggregation */
2817        ufshcd_prepare_lrbp_crypto(NULL, lrbp);
2818        hba->dev_cmd.type = cmd_type;
2819
2820        return ufshcd_compose_devman_upiu(hba, lrbp);
2821}
2822
2823static int
2824ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
2825{
2826        int err = 0;
2827        unsigned long flags;
2828        u32 mask = 1 << tag;
2829
2830        /* clear outstanding transaction before retry */
2831        spin_lock_irqsave(hba->host->host_lock, flags);
2832        ufshcd_utrl_clear(hba, tag);
2833        spin_unlock_irqrestore(hba->host->host_lock, flags);
2834
2835        /*
2836         * wait for h/w to clear corresponding bit in door-bell.
2837         * max. wait is 1 sec.
2838         */
2839        err = ufshcd_wait_for_register(hba,
2840                        REG_UTP_TRANSFER_REQ_DOOR_BELL,
2841                        mask, ~mask, 1000, 1000);
2842
2843        return err;
2844}
2845
2846static int
2847ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2848{
2849        struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2850
2851        /* Get the UPIU response */
2852        query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
2853                                UPIU_RSP_CODE_OFFSET;
2854        return query_res->response;
2855}
2856
2857/**
2858 * ufshcd_dev_cmd_completion() - handles device management command responses
2859 * @hba: per adapter instance
2860 * @lrbp: pointer to local reference block
2861 */
2862static int
2863ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2864{
2865        int resp;
2866        int err = 0;
2867
2868        hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
2869        resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
2870
2871        switch (resp) {
2872        case UPIU_TRANSACTION_NOP_IN:
2873                if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
2874                        err = -EINVAL;
2875                        dev_err(hba->dev, "%s: unexpected response %x\n",
2876                                        __func__, resp);
2877                }
2878                break;
2879        case UPIU_TRANSACTION_QUERY_RSP:
2880                err = ufshcd_check_query_response(hba, lrbp);
2881                if (!err)
2882                        err = ufshcd_copy_query_response(hba, lrbp);
2883                break;
2884        case UPIU_TRANSACTION_REJECT_UPIU:
2885                /* TODO: handle Reject UPIU Response */
2886                err = -EPERM;
2887                dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
2888                                __func__);
2889                break;
2890        default:
2891                err = -EINVAL;
2892                dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
2893                                __func__, resp);
2894                break;
2895        }
2896
2897        return err;
2898}
2899
2900static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
2901                struct ufshcd_lrb *lrbp, int max_timeout)
2902{
2903        int err = 0;
2904        unsigned long time_left;
2905        unsigned long flags;
2906
2907        time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
2908                        msecs_to_jiffies(max_timeout));
2909
2910        /* Make sure descriptors are ready before ringing the doorbell */
2911        wmb();
2912        spin_lock_irqsave(hba->host->host_lock, flags);
2913        hba->dev_cmd.complete = NULL;
2914        if (likely(time_left)) {
2915                err = ufshcd_get_tr_ocs(lrbp);
2916                if (!err)
2917                        err = ufshcd_dev_cmd_completion(hba, lrbp);
2918        }
2919        spin_unlock_irqrestore(hba->host->host_lock, flags);
2920
2921        if (!time_left) {
2922                err = -ETIMEDOUT;
2923                dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
2924                        __func__, lrbp->task_tag);
2925                if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
2926                        /* successfully cleared the command, retry if needed */
2927                        err = -EAGAIN;
2928                /*
2929                 * in case of an error, after clearing the doorbell,
2930                 * we also need to clear the outstanding_request
2931                 * field in hba
2932                 */
2933                ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
2934        }
2935
2936        return err;
2937}
2938
2939/**
2940 * ufshcd_exec_dev_cmd - API for sending device management requests
2941 * @hba: UFS hba
2942 * @cmd_type: specifies the type (NOP, Query...)
2943 * @timeout: timeout in milliseconds
2944 *
2945 * NOTE: Since there is only one available tag for device management commands,
2946 * it is expected you hold the hba->dev_cmd.lock mutex.
2947 */
2948static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2949                enum dev_cmd_type cmd_type, int timeout)
2950{
2951        struct request_queue *q = hba->cmd_queue;
2952        struct request *req;
2953        struct ufshcd_lrb *lrbp;
2954        int err;
2955        int tag;
2956        struct completion wait;
2957
2958        down_read(&hba->clk_scaling_lock);
2959
2960        /*
2961         * Get free slot, sleep if slots are unavailable.
2962         * Even though we use wait_event() which sleeps indefinitely,
2963         * the maximum wait time is bounded by SCSI request timeout.
2964         */
2965        req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
2966        if (IS_ERR(req)) {
2967                err = PTR_ERR(req);
2968                goto out_unlock;
2969        }
2970        tag = req->tag;
2971        WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
2972        /* Set the timeout such that the SCSI error handler is not activated. */
2973        req->timeout = msecs_to_jiffies(2 * timeout);
2974        blk_mq_start_request(req);
2975
2976        if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
2977                err = -EBUSY;
2978                goto out;
2979        }
2980
2981        init_completion(&wait);
2982        lrbp = &hba->lrb[tag];
2983        WARN_ON(lrbp->cmd);
2984        err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
2985        if (unlikely(err))
2986                goto out;
2987
2988        hba->dev_cmd.complete = &wait;
2989
2990        ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
2991        /* Make sure descriptors are ready before ringing the doorbell */
2992        wmb();
2993
2994        ufshcd_send_command(hba, tag);
2995        err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
2996        ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
2997                                    (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
2998
2999out:
3000        blk_put_request(req);
3001out_unlock:
3002        up_read(&hba->clk_scaling_lock);
3003        return err;
3004}
3005
3006/**
3007 * ufshcd_init_query() - init the query response and request parameters
3008 * @hba: per-adapter instance
3009 * @request: address of the request pointer to be initialized
3010 * @response: address of the response pointer to be initialized
3011 * @opcode: operation to perform
3012 * @idn: flag idn to access
3013 * @index: LU number to access
3014 * @selector: query/flag/descriptor further identification
3015 */
3016static inline void ufshcd_init_query(struct ufs_hba *hba,
3017                struct ufs_query_req **request, struct ufs_query_res **response,
3018                enum query_opcode opcode, u8 idn, u8 index, u8 selector)
3019{
3020        *request = &hba->dev_cmd.query.request;
3021        *response = &hba->dev_cmd.query.response;
3022        memset(*request, 0, sizeof(struct ufs_query_req));
3023        memset(*response, 0, sizeof(struct ufs_query_res));
3024        (*request)->upiu_req.opcode = opcode;
3025        (*request)->upiu_req.idn = idn;
3026        (*request)->upiu_req.index = index;
3027        (*request)->upiu_req.selector = selector;
3028}
3029
3030static int ufshcd_query_flag_retry(struct ufs_hba *hba,
3031        enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
3032{
3033        int ret;
3034        int retries;
3035
3036        for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
3037                ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res);
3038                if (ret)
3039                        dev_dbg(hba->dev,
3040                                "%s: failed with error %d, retries %d\n",
3041                                __func__, ret, retries);
3042                else
3043                        break;
3044        }
3045
3046        if (ret)
3047                dev_err(hba->dev,
3048                        "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
3049                        __func__, opcode, idn, ret, retries);
3050        return ret;
3051}
3052
3053/**
3054 * ufshcd_query_flag() - API function for sending flag query requests
3055 * @hba: per-adapter instance
3056 * @opcode: flag query to perform
3057 * @idn: flag idn to access
3058 * @index: flag index to access
3059 * @flag_res: the flag value after the query request completes
3060 *
3061 * Returns 0 for success, non-zero in case of failure
3062 */
3063int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
3064                        enum flag_idn idn, u8 index, bool *flag_res)
3065{
3066        struct ufs_query_req *request = NULL;
3067        struct ufs_query_res *response = NULL;
3068        int err, selector = 0;
3069        int timeout = QUERY_REQ_TIMEOUT;
3070
3071        BUG_ON(!hba);
3072
3073        ufshcd_hold(hba, false);
3074        mutex_lock(&hba->dev_cmd.lock);
3075        ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3076                        selector);
3077
3078        switch (opcode) {
3079        case UPIU_QUERY_OPCODE_SET_FLAG:
3080        case UPIU_QUERY_OPCODE_CLEAR_FLAG:
3081        case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
3082                request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3083                break;
3084        case UPIU_QUERY_OPCODE_READ_FLAG:
3085                request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3086                if (!flag_res) {
3087                        /* No dummy reads */
3088                        dev_err(hba->dev, "%s: Invalid argument for read request\n",
3089                                        __func__);
3090                        err = -EINVAL;
3091                        goto out_unlock;
3092                }
3093                break;
3094        default:
3095                dev_err(hba->dev,
3096                        "%s: Expected query flag opcode but got = %d\n",
3097                        __func__, opcode);
3098                err = -EINVAL;
3099                goto out_unlock;
3100        }
3101
3102        err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
3103
3104        if (err) {
3105                dev_err(hba->dev,
3106                        "%s: Sending flag query for idn %d failed, err = %d\n",
3107                        __func__, idn, err);
3108                goto out_unlock;
3109        }
3110
3111        if (flag_res)
3112                *flag_res = (be32_to_cpu(response->upiu_res.value) &
3113                                MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
3114
3115out_unlock:
3116        mutex_unlock(&hba->dev_cmd.lock);
3117        ufshcd_release(hba);
3118        return err;
3119}
3120
3121/**
3122 * ufshcd_query_attr - API function for sending attribute requests
3123 * @hba: per-adapter instance
3124 * @opcode: attribute opcode
3125 * @idn: attribute idn to access
3126 * @index: index field
3127 * @selector: selector field
3128 * @attr_val: the attribute value after the query request completes
3129 *
3130 * Returns 0 for success, non-zero in case of failure
3131*/
3132int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
3133                      enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
3134{
3135        struct ufs_query_req *request = NULL;
3136        struct ufs_query_res *response = NULL;
3137        int err;
3138
3139        BUG_ON(!hba);
3140
3141        if (!attr_val) {
3142                dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
3143                                __func__, opcode);
3144                return -EINVAL;
3145        }
3146
3147        ufshcd_hold(hba, false);
3148
3149        mutex_lock(&hba->dev_cmd.lock);
3150        ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3151                        selector);
3152
3153        switch (opcode) {
3154        case UPIU_QUERY_OPCODE_WRITE_ATTR:
3155                request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3156                request->upiu_req.value = cpu_to_be32(*attr_val);
3157                break;
3158        case UPIU_QUERY_OPCODE_READ_ATTR:
3159                request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3160                break;
3161        default:
3162                dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
3163                                __func__, opcode);
3164                err = -EINVAL;
3165                goto out_unlock;
3166        }
3167
3168        err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3169
3170        if (err) {
3171                dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3172                                __func__, opcode, idn, index, err);
3173                goto out_unlock;
3174        }
3175
3176        *attr_val = be32_to_cpu(response->upiu_res.value);
3177
3178out_unlock:
3179        mutex_unlock(&hba->dev_cmd.lock);
3180        ufshcd_release(hba);
3181        return err;
3182}
3183
3184/**
3185 * ufshcd_query_attr_retry() - API function for sending query
3186 * attribute with retries
3187 * @hba: per-adapter instance
3188 * @opcode: attribute opcode
3189 * @idn: attribute idn to access
3190 * @index: index field
3191 * @selector: selector field
3192 * @attr_val: the attribute value after the query request
3193 * completes
3194 *
3195 * Returns 0 for success, non-zero in case of failure
3196*/
3197static int ufshcd_query_attr_retry(struct ufs_hba *hba,
3198        enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
3199        u32 *attr_val)
3200{
3201        int ret = 0;
3202        u32 retries;
3203
3204        for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3205                ret = ufshcd_query_attr(hba, opcode, idn, index,
3206                                                selector, attr_val);
3207                if (ret)
3208                        dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
3209                                __func__, ret, retries);
3210                else
3211                        break;
3212        }
3213
3214        if (ret)
3215                dev_err(hba->dev,
3216                        "%s: query attribute, idn %d, failed with error %d after %d retires\n",
3217                        __func__, idn, ret, QUERY_REQ_RETRIES);
3218        return ret;
3219}
3220
3221static int __ufshcd_query_descriptor(struct ufs_hba *hba,
3222                        enum query_opcode opcode, enum desc_idn idn, u8 index,
3223                        u8 selector, u8 *desc_buf, int *buf_len)
3224{
3225        struct ufs_query_req *request = NULL;
3226        struct ufs_query_res *response = NULL;
3227        int err;
3228
3229        BUG_ON(!hba);
3230
3231        if (!desc_buf) {
3232                dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
3233                                __func__, opcode);
3234                return -EINVAL;
3235        }
3236
3237        if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
3238                dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
3239                                __func__, *buf_len);
3240                return -EINVAL;
3241        }
3242
3243        ufshcd_hold(hba, false);
3244
3245        mutex_lock(&hba->dev_cmd.lock);
3246        ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3247                        selector);
3248        hba->dev_cmd.query.descriptor = desc_buf;
3249        request->upiu_req.length = cpu_to_be16(*buf_len);
3250
3251        switch (opcode) {
3252        case UPIU_QUERY_OPCODE_WRITE_DESC:
3253                request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3254                break;
3255        case UPIU_QUERY_OPCODE_READ_DESC:
3256                request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3257                break;
3258        default:
3259                dev_err(hba->dev,
3260                                "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3261                                __func__, opcode);
3262                err = -EINVAL;
3263                goto out_unlock;
3264        }
3265
3266        err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3267
3268        if (err) {
3269                dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3270                                __func__, opcode, idn, index, err);
3271                goto out_unlock;
3272        }
3273
3274        *buf_len = be16_to_cpu(response->upiu_res.length);
3275
3276out_unlock:
3277        hba->dev_cmd.query.descriptor = NULL;
3278        mutex_unlock(&hba->dev_cmd.lock);
3279        ufshcd_release(hba);
3280        return err;
3281}
3282
3283/**
3284 * ufshcd_query_descriptor_retry - API function for sending descriptor requests
3285 * @hba: per-adapter instance
3286 * @opcode: attribute opcode
3287 * @idn: attribute idn to access
3288 * @index: index field
3289 * @selector: selector field
3290 * @desc_buf: the buffer that contains the descriptor
3291 * @buf_len: length parameter passed to the device
3292 *
3293 * Returns 0 for success, non-zero in case of failure.
3294 * The buf_len parameter will contain, on return, the length parameter
3295 * received on the response.
3296 */
3297int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
3298                                  enum query_opcode opcode,
3299                                  enum desc_idn idn, u8 index,
3300                                  u8 selector,
3301                                  u8 *desc_buf, int *buf_len)
3302{
3303        int err;
3304        int retries;
3305
3306        for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3307                err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3308                                                selector, desc_buf, buf_len);
3309                if (!err || err == -EINVAL)
3310                        break;
3311        }
3312
3313        return err;
3314}
3315
3316/**
3317 * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
3318 * @hba: Pointer to adapter instance
3319 * @desc_id: descriptor idn value
3320 * @desc_len: mapped desc length (out)
3321 */
3322void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
3323                                  int *desc_len)
3324{
3325        if (desc_id >= QUERY_DESC_IDN_MAX || desc_id == QUERY_DESC_IDN_RFU_0 ||
3326            desc_id == QUERY_DESC_IDN_RFU_1)
3327                *desc_len = 0;
3328        else
3329                *desc_len = hba->desc_size[desc_id];
3330}
3331EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
3332
3333static void ufshcd_update_desc_length(struct ufs_hba *hba,
3334                                      enum desc_idn desc_id, int desc_index,
3335                                      unsigned char desc_len)
3336{
3337        if (hba->desc_size[desc_id] == QUERY_DESC_MAX_SIZE &&
3338            desc_id != QUERY_DESC_IDN_STRING && desc_index != UFS_RPMB_UNIT)
3339                /* For UFS 3.1, the normal unit descriptor is 10 bytes larger
3340                 * than the RPMB unit, however, both descriptors share the same
3341                 * desc_idn, to cover both unit descriptors with one length, we
3342                 * choose the normal unit descriptor length by desc_index.
3343                 */
3344                hba->desc_size[desc_id] = desc_len;
3345}
3346
3347/**
3348 * ufshcd_read_desc_param - read the specified descriptor parameter
3349 * @hba: Pointer to adapter instance
3350 * @desc_id: descriptor idn value
3351 * @desc_index: descriptor index
3352 * @param_offset: offset of the parameter to read
3353 * @param_read_buf: pointer to buffer where parameter would be read
3354 * @param_size: sizeof(param_read_buf)
3355 *
3356 * Return 0 in case of success, non-zero otherwise
3357 */
3358int ufshcd_read_desc_param(struct ufs_hba *hba,
3359                           enum desc_idn desc_id,
3360                           int desc_index,
3361                           u8 param_offset,
3362                           u8 *param_read_buf,
3363                           u8 param_size)
3364{
3365        int ret;
3366        u8 *desc_buf;
3367        int buff_len;
3368        bool is_kmalloc = true;
3369
3370        /* Safety check */
3371        if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
3372                return -EINVAL;
3373
3374        /* Get the length of descriptor */
3375        ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
3376        if (!buff_len) {
3377                dev_err(hba->dev, "%s: Failed to get desc length\n", __func__);
3378                return -EINVAL;
3379        }
3380
3381        if (param_offset >= buff_len) {
3382                dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
3383                        __func__, param_offset, desc_id, buff_len);
3384                return -EINVAL;
3385        }
3386
3387        /* Check whether we need temp memory */
3388        if (param_offset != 0 || param_size < buff_len) {
3389                desc_buf = kzalloc(buff_len, GFP_KERNEL);
3390                if (!desc_buf)
3391                        return -ENOMEM;
3392        } else {
3393                desc_buf = param_read_buf;
3394                is_kmalloc = false;
3395        }
3396
3397        /* Request for full descriptor */
3398        ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3399                                        desc_id, desc_index, 0,
3400                                        desc_buf, &buff_len);
3401
3402        if (ret) {
3403                dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
3404                        __func__, desc_id, desc_index, param_offset, ret);
3405                goto out;
3406        }
3407
3408        /* Sanity check */
3409        if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3410                dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n",
3411                        __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3412                ret = -EINVAL;
3413                goto out;
3414        }
3415
3416        /* Update descriptor length */
3417        buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
3418        ufshcd_update_desc_length(hba, desc_id, desc_index, buff_len);
3419
3420        if (is_kmalloc) {
3421                /* Make sure we don't copy more data than available */
3422                if (param_offset + param_size > buff_len)
3423                        param_size = buff_len - param_offset;
3424                memcpy(param_read_buf, &desc_buf[param_offset], param_size);
3425        }
3426out:
3427        if (is_kmalloc)
3428                kfree(desc_buf);
3429        return ret;
3430}
3431
3432/**
3433 * struct uc_string_id - unicode string
3434 *
3435 * @len: size of this descriptor inclusive
3436 * @type: descriptor type
3437 * @uc: unicode string character
3438 */
3439struct uc_string_id {
3440        u8 len;
3441        u8 type;
3442        wchar_t uc[];
3443} __packed;
3444
3445/* replace non-printable or non-ASCII characters with spaces */
3446static inline char ufshcd_remove_non_printable(u8 ch)
3447{
3448        return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
3449}
3450
3451/**
3452 * ufshcd_read_string_desc - read string descriptor
3453 * @hba: pointer to adapter instance
3454 * @desc_index: descriptor index
3455 * @buf: pointer to buffer where descriptor would be read,
3456 *       the caller should free the memory.
3457 * @ascii: if true convert from unicode to ascii characters
3458 *         null terminated string.
3459 *
3460 * Return:
3461 * *      string size on success.
3462 * *      -ENOMEM: on allocation failure
3463 * *      -EINVAL: on a wrong parameter
3464 */
3465int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
3466                            u8 **buf, bool ascii)
3467{
3468        struct uc_string_id *uc_str;
3469        u8 *str;
3470        int ret;
3471
3472        if (!buf)
3473                return -EINVAL;
3474
3475        uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
3476        if (!uc_str)
3477                return -ENOMEM;
3478
3479        ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0,
3480                                     (u8 *)uc_str, QUERY_DESC_MAX_SIZE);
3481        if (ret < 0) {
3482                dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
3483                        QUERY_REQ_RETRIES, ret);
3484                str = NULL;
3485                goto out;
3486        }
3487
3488        if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
3489                dev_dbg(hba->dev, "String Desc is of zero length\n");
3490                str = NULL;
3491                ret = 0;
3492                goto out;
3493        }
3494
3495        if (ascii) {
3496                ssize_t ascii_len;
3497                int i;
3498                /* remove header and divide by 2 to move from UTF16 to UTF8 */
3499                ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3500                str = kzalloc(ascii_len, GFP_KERNEL);
3501                if (!str) {
3502                        ret = -ENOMEM;
3503                        goto out;
3504                }
3505
3506                /*
3507                 * the descriptor contains string in UTF16 format
3508                 * we need to convert to utf-8 so it can be displayed
3509                 */
3510                ret = utf16s_to_utf8s(uc_str->uc,
3511                                      uc_str->len - QUERY_DESC_HDR_SIZE,
3512                                      UTF16_BIG_ENDIAN, str, ascii_len);
3513
3514                /* replace non-printable or non-ASCII characters with spaces */
3515                for (i = 0; i < ret; i++)
3516                        str[i] = ufshcd_remove_non_printable(str[i]);
3517
3518                str[ret++] = '\0';
3519
3520        } else {
3521                str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
3522                if (!str) {
3523                        ret = -ENOMEM;
3524                        goto out;
3525                }
3526                ret = uc_str->len;
3527        }
3528out:
3529        *buf = str;
3530        kfree(uc_str);
3531        return ret;
3532}
3533
3534/**
3535 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3536 * @hba: Pointer to adapter instance
3537 * @lun: lun id
3538 * @param_offset: offset of the parameter to read
3539 * @param_read_buf: pointer to buffer where parameter would be read
3540 * @param_size: sizeof(param_read_buf)
3541 *
3542 * Return 0 in case of success, non-zero otherwise
3543 */
3544static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3545                                              int lun,
3546                                              enum unit_desc_param param_offset,
3547                                              u8 *param_read_buf,
3548                                              u32 param_size)
3549{
3550        /*
3551         * Unit descriptors are only available for general purpose LUs (LUN id
3552         * from 0 to 7) and RPMB Well known LU.
3553         */
3554        if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun, param_offset))
3555                return -EOPNOTSUPP;
3556
3557        return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3558                                      param_offset, param_read_buf, param_size);
3559}
3560
3561static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba)
3562{
3563        int err = 0;
3564        u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3565
3566        if (hba->dev_info.wspecversion >= 0x300) {
3567                err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3568                                QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0,
3569                                &gating_wait);
3570                if (err)
3571                        dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
3572                                         err, gating_wait);
3573
3574                if (gating_wait == 0) {
3575                        gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3576                        dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n",
3577                                         gating_wait);
3578                }
3579
3580                hba->dev_info.clk_gating_wait_us = gating_wait;
3581        }
3582
3583        return err;
3584}
3585
3586/**
3587 * ufshcd_memory_alloc - allocate memory for host memory space data structures
3588 * @hba: per adapter instance
3589 *
3590 * 1. Allocate DMA memory for Command Descriptor array
3591 *      Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3592 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3593 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3594 *      (UTMRDL)
3595 * 4. Allocate memory for local reference block(lrb).
3596 *
3597 * Returns 0 for success, non-zero in case of failure
3598 */
3599static int ufshcd_memory_alloc(struct ufs_hba *hba)
3600{
3601        size_t utmrdl_size, utrdl_size, ucdl_size;
3602
3603        /* Allocate memory for UTP command descriptors */
3604        ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
3605        hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3606                                                  ucdl_size,
3607                                                  &hba->ucdl_dma_addr,
3608                                                  GFP_KERNEL);
3609
3610        /*
3611         * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3612         * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
3613         * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
3614         * be aligned to 128 bytes as well
3615         */
3616        if (!hba->ucdl_base_addr ||
3617            WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3618                dev_err(hba->dev,
3619                        "Command Descriptor Memory allocation failed\n");
3620                goto out;
3621        }
3622
3623        /*
3624         * Allocate memory for UTP Transfer descriptors
3625         * UFSHCI requires 1024 byte alignment of UTRD
3626         */
3627        utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
3628        hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3629                                                   utrdl_size,
3630                                                   &hba->utrdl_dma_addr,
3631                                                   GFP_KERNEL);
3632        if (!hba->utrdl_base_addr ||
3633            WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3634                dev_err(hba->dev,
3635                        "Transfer Descriptor Memory allocation failed\n");
3636                goto out;
3637        }
3638
3639        /*
3640         * Allocate memory for UTP Task Management descriptors
3641         * UFSHCI requires 1024 byte alignment of UTMRD
3642         */
3643        utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
3644        hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3645                                                    utmrdl_size,
3646                                                    &hba->utmrdl_dma_addr,
3647                                                    GFP_KERNEL);
3648        if (!hba->utmrdl_base_addr ||
3649            WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3650                dev_err(hba->dev,
3651                "Task Management Descriptor Memory allocation failed\n");
3652                goto out;
3653        }
3654
3655        /* Allocate memory for local reference block */
3656        hba->lrb = devm_kcalloc(hba->dev,
3657                                hba->nutrs, sizeof(struct ufshcd_lrb),
3658                                GFP_KERNEL);
3659        if (!hba->lrb) {
3660                dev_err(hba->dev, "LRB Memory allocation failed\n");
3661                goto out;
3662        }
3663        return 0;
3664out:
3665        return -ENOMEM;
3666}
3667
3668/**
3669 * ufshcd_host_memory_configure - configure local reference block with
3670 *                              memory offsets
3671 * @hba: per adapter instance
3672 *
3673 * Configure Host memory space
3674 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3675 * address.
3676 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3677 * and PRDT offset.
3678 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3679 * into local reference block.
3680 */
3681static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3682{
3683        struct utp_transfer_req_desc *utrdlp;
3684        dma_addr_t cmd_desc_dma_addr;
3685        dma_addr_t cmd_desc_element_addr;
3686        u16 response_offset;
3687        u16 prdt_offset;
3688        int cmd_desc_size;
3689        int i;
3690
3691        utrdlp = hba->utrdl_base_addr;
3692
3693        response_offset =
3694                offsetof(struct utp_transfer_cmd_desc, response_upiu);
3695        prdt_offset =
3696                offsetof(struct utp_transfer_cmd_desc, prd_table);
3697
3698        cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
3699        cmd_desc_dma_addr = hba->ucdl_dma_addr;
3700
3701        for (i = 0; i < hba->nutrs; i++) {
3702                /* Configure UTRD with command descriptor base address */
3703                cmd_desc_element_addr =
3704                                (cmd_desc_dma_addr + (cmd_desc_size * i));
3705                utrdlp[i].command_desc_base_addr_lo =
3706                                cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
3707                utrdlp[i].command_desc_base_addr_hi =
3708                                cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
3709
3710                /* Response upiu and prdt offset should be in double words */
3711                if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
3712                        utrdlp[i].response_upiu_offset =
3713                                cpu_to_le16(response_offset);
3714                        utrdlp[i].prd_table_offset =
3715                                cpu_to_le16(prdt_offset);
3716                        utrdlp[i].response_upiu_length =
3717                                cpu_to_le16(ALIGNED_UPIU_SIZE);
3718                } else {
3719                        utrdlp[i].response_upiu_offset =
3720                                cpu_to_le16(response_offset >> 2);
3721                        utrdlp[i].prd_table_offset =
3722                                cpu_to_le16(prdt_offset >> 2);
3723                        utrdlp[i].response_upiu_length =
3724                                cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
3725                }
3726
3727                ufshcd_init_lrb(hba, &hba->lrb[i], i);
3728        }
3729}
3730
3731/**
3732 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3733 * @hba: per adapter instance
3734 *
3735 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3736 * in order to initialize the Unipro link startup procedure.
3737 * Once the Unipro links are up, the device connected to the controller
3738 * is detected.
3739 *
3740 * Returns 0 on success, non-zero value on failure
3741 */
3742static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3743{
3744        struct uic_command uic_cmd = {0};
3745        int ret;
3746
3747        uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
3748
3749        ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3750        if (ret)
3751                dev_dbg(hba->dev,
3752                        "dme-link-startup: error code %d\n", ret);
3753        return ret;
3754}
3755/**
3756 * ufshcd_dme_reset - UIC command for DME_RESET
3757 * @hba: per adapter instance
3758 *
3759 * DME_RESET command is issued in order to reset UniPro stack.
3760 * This function now deals with cold reset.
3761 *
3762 * Returns 0 on success, non-zero value on failure
3763 */
3764static int ufshcd_dme_reset(struct ufs_hba *hba)
3765{
3766        struct uic_command uic_cmd = {0};
3767        int ret;
3768
3769        uic_cmd.command = UIC_CMD_DME_RESET;
3770
3771        ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3772        if (ret)
3773                dev_err(hba->dev,
3774                        "dme-reset: error code %d\n", ret);
3775
3776        return ret;
3777}
3778
3779int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
3780                               int agreed_gear,
3781                               int adapt_val)
3782{
3783        int ret;
3784
3785        if (agreed_gear != UFS_HS_G4)
3786                adapt_val = PA_NO_ADAPT;
3787
3788        ret = ufshcd_dme_set(hba,
3789                             UIC_ARG_MIB(PA_TXHSADAPTTYPE),
3790                             adapt_val);
3791        return ret;
3792}
3793EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt);
3794
3795/**
3796 * ufshcd_dme_enable - UIC command for DME_ENABLE
3797 * @hba: per adapter instance
3798 *
3799 * DME_ENABLE command is issued in order to enable UniPro stack.
3800 *
3801 * Returns 0 on success, non-zero value on failure
3802 */
3803static int ufshcd_dme_enable(struct ufs_hba *hba)
3804{
3805        struct uic_command uic_cmd = {0};
3806        int ret;
3807
3808        uic_cmd.command = UIC_CMD_DME_ENABLE;
3809
3810        ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3811        if (ret)
3812                dev_err(hba->dev,
3813                        "dme-enable: error code %d\n", ret);
3814
3815        return ret;
3816}
3817
3818static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3819{
3820        #define MIN_DELAY_BEFORE_DME_CMDS_US    1000
3821        unsigned long min_sleep_time_us;
3822
3823        if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
3824                return;
3825
3826        /*
3827         * last_dme_cmd_tstamp will be 0 only for 1st call to
3828         * this function
3829         */
3830        if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
3831                min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
3832        } else {
3833                unsigned long delta =
3834                        (unsigned long) ktime_to_us(
3835                                ktime_sub(ktime_get(),
3836                                hba->last_dme_cmd_tstamp));
3837
3838                if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
3839                        min_sleep_time_us =
3840                                MIN_DELAY_BEFORE_DME_CMDS_US - delta;
3841                else
3842                        return; /* no more delay required */
3843        }
3844
3845        /* allow sleep for extra 50us if needed */
3846        usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
3847}
3848
3849/**
3850 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
3851 * @hba: per adapter instance
3852 * @attr_sel: uic command argument1
3853 * @attr_set: attribute set type as uic command argument2
3854 * @mib_val: setting value as uic command argument3
3855 * @peer: indicate whether peer or local
3856 *
3857 * Returns 0 on success, non-zero value on failure
3858 */
3859int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
3860                        u8 attr_set, u32 mib_val, u8 peer)
3861{
3862        struct uic_command uic_cmd = {0};
3863        static const char *const action[] = {
3864                "dme-set",
3865                "dme-peer-set"
3866        };
3867        const char *set = action[!!peer];
3868        int ret;
3869        int retries = UFS_UIC_COMMAND_RETRIES;
3870
3871        uic_cmd.command = peer ?
3872                UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
3873        uic_cmd.argument1 = attr_sel;
3874        uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
3875        uic_cmd.argument3 = mib_val;
3876
3877        do {
3878                /* for peer attributes we retry upon failure */
3879                ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3880                if (ret)
3881                        dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
3882                                set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
3883        } while (ret && peer && --retries);
3884
3885        if (ret)
3886                dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
3887                        set, UIC_GET_ATTR_ID(attr_sel), mib_val,
3888                        UFS_UIC_COMMAND_RETRIES - retries);
3889
3890        return ret;
3891}
3892EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
3893
3894/**
3895 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
3896 * @hba: per adapter instance
3897 * @attr_sel: uic command argument1
3898 * @mib_val: the value of the attribute as returned by the UIC command
3899 * @peer: indicate whether peer or local
3900 *
3901 * Returns 0 on success, non-zero value on failure
3902 */
3903int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
3904                        u32 *mib_val, u8 peer)
3905{
3906        struct uic_command uic_cmd = {0};
3907        static const char *const action[] = {
3908                "dme-get",
3909                "dme-peer-get"
3910        };
3911        const char *get = action[!!peer];
3912        int ret;
3913        int retries = UFS_UIC_COMMAND_RETRIES;
3914        struct ufs_pa_layer_attr orig_pwr_info;
3915        struct ufs_pa_layer_attr temp_pwr_info;
3916        bool pwr_mode_change = false;
3917
3918        if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
3919                orig_pwr_info = hba->pwr_info;
3920                temp_pwr_info = orig_pwr_info;
3921
3922                if (orig_pwr_info.pwr_tx == FAST_MODE ||
3923                    orig_pwr_info.pwr_rx == FAST_MODE) {
3924                        temp_pwr_info.pwr_tx = FASTAUTO_MODE;
3925                        temp_pwr_info.pwr_rx = FASTAUTO_MODE;
3926                        pwr_mode_change = true;
3927                } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
3928                    orig_pwr_info.pwr_rx == SLOW_MODE) {
3929                        temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
3930                        temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
3931                        pwr_mode_change = true;
3932                }
3933                if (pwr_mode_change) {
3934                        ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
3935                        if (ret)
3936                                goto out;
3937                }
3938        }
3939
3940        uic_cmd.command = peer ?
3941                UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
3942        uic_cmd.argument1 = attr_sel;
3943
3944        do {
3945                /* for peer attributes we retry upon failure */
3946                ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3947                if (ret)
3948                        dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
3949                                get, UIC_GET_ATTR_ID(attr_sel), ret);
3950        } while (ret && peer && --retries);
3951
3952        if (ret)
3953                dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
3954                        get, UIC_GET_ATTR_ID(attr_sel),
3955                        UFS_UIC_COMMAND_RETRIES - retries);
3956
3957        if (mib_val && !ret)
3958                *mib_val = uic_cmd.argument3;
3959
3960        if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
3961            && pwr_mode_change)
3962                ufshcd_change_power_mode(hba, &orig_pwr_info);
3963out:
3964        return ret;
3965}
3966EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
3967
3968/**
3969 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
3970 * state) and waits for it to take effect.
3971 *
3972 * @hba: per adapter instance
3973 * @cmd: UIC command to execute
3974 *
3975 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
3976 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
3977 * and device UniPro link and hence it's final completion would be indicated by
3978 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
3979 * addition to normal UIC command completion Status (UCCS). This function only
3980 * returns after the relevant status bits indicate the completion.
3981 *
3982 * Returns 0 on success, non-zero value on failure
3983 */
3984static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
3985{
3986        struct completion uic_async_done;
3987        unsigned long flags;
3988        u8 status;
3989        int ret;
3990        bool reenable_intr = false;
3991
3992        mutex_lock(&hba->uic_cmd_mutex);
3993        init_completion(&uic_async_done);
3994        ufshcd_add_delay_before_dme_cmd(hba);
3995
3996        spin_lock_irqsave(hba->host->host_lock, flags);
3997        if (ufshcd_is_link_broken(hba)) {
3998                ret = -ENOLINK;
3999                goto out_unlock;
4000        }
4001        hba->uic_async_done = &uic_async_done;
4002        if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
4003                ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
4004                /*
4005                 * Make sure UIC command completion interrupt is disabled before
4006                 * issuing UIC command.
4007                 */
4008                wmb();
4009                reenable_intr = true;
4010        }
4011        ret = __ufshcd_send_uic_cmd(hba, cmd, false);
4012        spin_unlock_irqrestore(hba->host->host_lock, flags);
4013        if (ret) {
4014                dev_err(hba->dev,
4015                        "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
4016                        cmd->command, cmd->argument3, ret);
4017                goto out;
4018        }
4019
4020        if (!wait_for_completion_timeout(hba->uic_async_done,
4021                                         msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
4022                dev_err(hba->dev,
4023                        "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
4024                        cmd->command, cmd->argument3);
4025
4026                if (!cmd->cmd_active) {
4027                        dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
4028                                __func__);
4029                        goto check_upmcrs;
4030                }
4031
4032                ret = -ETIMEDOUT;
4033                goto out;
4034        }
4035
4036check_upmcrs:
4037        status = ufshcd_get_upmcrs(hba);
4038        if (status != PWR_LOCAL) {
4039                dev_err(hba->dev,
4040                        "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
4041                        cmd->command, status);
4042                ret = (status != PWR_OK) ? status : -1;
4043        }
4044out:
4045        if (ret) {
4046                ufshcd_print_host_state(hba);
4047                ufshcd_print_pwr_info(hba);
4048                ufshcd_print_evt_hist(hba);
4049        }
4050
4051        spin_lock_irqsave(hba->host->host_lock, flags);
4052        hba->active_uic_cmd = NULL;
4053        hba->uic_async_done = NULL;
4054        if (reenable_intr)
4055                ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
4056        if (ret) {
4057                ufshcd_set_link_broken(hba);
4058                ufshcd_schedule_eh_work(hba);
4059        }
4060out_unlock:
4061        spin_unlock_irqrestore(hba->host->host_lock, flags);
4062        mutex_unlock(&hba->uic_cmd_mutex);
4063
4064        return ret;
4065}
4066
4067/**
4068 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
4069 *                              using DME_SET primitives.
4070 * @hba: per adapter instance
4071 * @mode: powr mode value
4072 *
4073 * Returns 0 on success, non-zero value on failure
4074 */
4075static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
4076{
4077        struct uic_command uic_cmd = {0};
4078        int ret;
4079
4080        if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
4081                ret = ufshcd_dme_set(hba,
4082                                UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
4083                if (ret) {
4084                        dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
4085                                                __func__, ret);
4086                        goto out;
4087                }
4088        }
4089
4090        uic_cmd.command = UIC_CMD_DME_SET;
4091        uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
4092        uic_cmd.argument3 = mode;
4093        ufshcd_hold(hba, false);
4094        ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4095        ufshcd_release(hba);
4096
4097out:
4098        return ret;
4099}
4100
4101int ufshcd_link_recovery(struct ufs_hba *hba)
4102{
4103        int ret;
4104        unsigned long flags;
4105
4106        spin_lock_irqsave(hba->host->host_lock, flags);
4107        hba->ufshcd_state = UFSHCD_STATE_RESET;
4108        ufshcd_set_eh_in_progress(hba);
4109        spin_unlock_irqrestore(hba->host->host_lock, flags);
4110
4111        /* Reset the attached device */
4112        ufshcd_device_reset(hba);
4113
4114        ret = ufshcd_host_reset_and_restore(hba);
4115
4116        spin_lock_irqsave(hba->host->host_lock, flags);
4117        if (ret)
4118                hba->ufshcd_state = UFSHCD_STATE_ERROR;
4119        ufshcd_clear_eh_in_progress(hba);
4120        spin_unlock_irqrestore(hba->host->host_lock, flags);
4121
4122        if (ret)
4123                dev_err(hba->dev, "%s: link recovery failed, err %d",
4124                        __func__, ret);
4125        else
4126                ufshcd_clear_ua_wluns(hba);
4127
4128        return ret;
4129}
4130EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
4131
4132static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
4133{
4134        int ret;
4135        struct uic_command uic_cmd = {0};
4136        ktime_t start = ktime_get();
4137
4138        ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
4139
4140        uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
4141        ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4142        trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
4143                             ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4144
4145        if (ret)
4146                dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
4147                        __func__, ret);
4148        else
4149                ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
4150                                                                POST_CHANGE);
4151
4152        return ret;
4153}
4154
4155int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
4156{
4157        struct uic_command uic_cmd = {0};
4158        int ret;
4159        ktime_t start = ktime_get();
4160
4161        ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
4162
4163        uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
4164        ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4165        trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
4166                             ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4167
4168        if (ret) {
4169                dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
4170                        __func__, ret);
4171        } else {
4172                ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
4173                                                                POST_CHANGE);
4174                hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
4175                hba->ufs_stats.hibern8_exit_cnt++;
4176        }
4177
4178        return ret;
4179}
4180EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
4181
4182void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
4183{
4184        unsigned long flags;
4185        bool update = false;
4186
4187        if (!ufshcd_is_auto_hibern8_supported(hba))
4188                return;
4189
4190        spin_lock_irqsave(hba->host->host_lock, flags);
4191        if (hba->ahit != ahit) {
4192                hba->ahit = ahit;
4193                update = true;
4194        }
4195        spin_unlock_irqrestore(hba->host->host_lock, flags);
4196
4197        if (update &&
4198            !pm_runtime_suspended(&hba->sdev_ufs_device->sdev_gendev)) {
4199                ufshcd_rpm_get_sync(hba);
4200                ufshcd_hold(hba, false);
4201                ufshcd_auto_hibern8_enable(hba);
4202                ufshcd_release(hba);
4203                ufshcd_rpm_put_sync(hba);
4204        }
4205}
4206EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
4207
4208void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
4209{
4210        unsigned long flags;
4211
4212        if (!ufshcd_is_auto_hibern8_supported(hba))
4213                return;
4214
4215        spin_lock_irqsave(hba->host->host_lock, flags);
4216        ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
4217        spin_unlock_irqrestore(hba->host->host_lock, flags);
4218}
4219
4220 /**
4221 * ufshcd_init_pwr_info - setting the POR (power on reset)
4222 * values in hba power info
4223 * @hba: per-adapter instance
4224 */
4225static void ufshcd_init_pwr_info(struct ufs_hba *hba)
4226{
4227        hba->pwr_info.gear_rx = UFS_PWM_G1;
4228        hba->pwr_info.gear_tx = UFS_PWM_G1;
4229        hba->pwr_info.lane_rx = 1;
4230        hba->pwr_info.lane_tx = 1;
4231        hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
4232        hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
4233        hba->pwr_info.hs_rate = 0;
4234}
4235
4236/**
4237 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
4238 * @hba: per-adapter instance
4239 */
4240static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
4241{
4242        struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
4243
4244        if (hba->max_pwr_info.is_valid)
4245                return 0;
4246
4247        pwr_info->pwr_tx = FAST_MODE;
4248        pwr_info->pwr_rx = FAST_MODE;
4249        pwr_info->hs_rate = PA_HS_MODE_B;
4250
4251        /* Get the connected lane count */
4252        ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
4253                        &pwr_info->lane_rx);
4254        ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4255                        &pwr_info->lane_tx);
4256
4257        if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
4258                dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4259                                __func__,
4260                                pwr_info->lane_rx,
4261                                pwr_info->lane_tx);
4262                return -EINVAL;
4263        }
4264
4265        /*
4266         * First, get the maximum gears of HS speed.
4267         * If a zero value, it means there is no HSGEAR capability.
4268         * Then, get the maximum gears of PWM speed.
4269         */
4270        ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
4271        if (!pwr_info->gear_rx) {
4272                ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4273                                &pwr_info->gear_rx);
4274                if (!pwr_info->gear_rx) {
4275                        dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
4276                                __func__, pwr_info->gear_rx);
4277                        return -EINVAL;
4278                }
4279                pwr_info->pwr_rx = SLOW_MODE;
4280        }
4281
4282        ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
4283                        &pwr_info->gear_tx);
4284        if (!pwr_info->gear_tx) {
4285                ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4286                                &pwr_info->gear_tx);
4287                if (!pwr_info->gear_tx) {
4288                        dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
4289                                __func__, pwr_info->gear_tx);
4290                        return -EINVAL;
4291                }
4292                pwr_info->pwr_tx = SLOW_MODE;
4293        }
4294
4295        hba->max_pwr_info.is_valid = true;
4296        return 0;
4297}
4298
4299static int ufshcd_change_power_mode(struct ufs_hba *hba,
4300                             struct ufs_pa_layer_attr *pwr_mode)
4301{
4302        int ret;
4303
4304        /* if already configured to the requested pwr_mode */
4305        if (!hba->force_pmc &&
4306            pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
4307            pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
4308            pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4309            pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4310            pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4311            pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4312            pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4313                dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4314                return 0;
4315        }
4316
4317        /*
4318         * Configure attributes for power mode change with below.
4319         * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4320         * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4321         * - PA_HSSERIES
4322         */
4323        ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4324        ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4325                        pwr_mode->lane_rx);
4326        if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4327                        pwr_mode->pwr_rx == FAST_MODE)
4328                ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
4329        else
4330                ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
4331
4332        ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4333        ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4334                        pwr_mode->lane_tx);
4335        if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4336                        pwr_mode->pwr_tx == FAST_MODE)
4337                ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
4338        else
4339                ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
4340
4341        if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4342            pwr_mode->pwr_tx == FASTAUTO_MODE ||
4343            pwr_mode->pwr_rx == FAST_MODE ||
4344            pwr_mode->pwr_tx == FAST_MODE)
4345                ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4346                                                pwr_mode->hs_rate);
4347
4348        if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) {
4349                ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
4350                                DL_FC0ProtectionTimeOutVal_Default);
4351                ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
4352                                DL_TC0ReplayTimeOutVal_Default);
4353                ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
4354                                DL_