linux/drivers/scsi/scsi.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  scsi.c Copyright (C) 1992 Drew Eckhardt
   4 *         Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
   5 *         Copyright (C) 2002, 2003 Christoph Hellwig
   6 *
   7 *  generic mid-level SCSI driver
   8 *      Initial versions: Drew Eckhardt
   9 *      Subsequent revisions: Eric Youngdale
  10 *
  11 *  <drew@colorado.edu>
  12 *
  13 *  Bug correction thanks go to :
  14 *      Rik Faith <faith@cs.unc.edu>
  15 *      Tommy Thorn <tthorn>
  16 *      Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
  17 *
  18 *  Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to
  19 *  add scatter-gather, multiple outstanding request, and other
  20 *  enhancements.
  21 *
  22 *  Native multichannel, wide scsi, /proc/scsi and hot plugging
  23 *  support added by Michael Neuffer <mike@i-connect.net>
  24 *
  25 *  Added request_module("scsi_hostadapter") for kerneld:
  26 *  (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modprobe.conf)
  27 *  Bjorn Ekwall  <bj0rn@blox.se>
  28 *  (changed to kmod)
  29 *
  30 *  Major improvements to the timeout, abort, and reset processing,
  31 *  as well as performance modifications for large queue depths by
  32 *  Leonard N. Zubkoff <lnz@dandelion.com>
  33 *
  34 *  Converted cli() code to spinlocks, Ingo Molnar
  35 *
  36 *  Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
  37 *
  38 *  out_of_space hacks, D. Gilbert (dpg) 990608
  39 */
  40
  41#include <linux/module.h>
  42#include <linux/moduleparam.h>
  43#include <linux/kernel.h>
  44#include <linux/timer.h>
  45#include <linux/string.h>
  46#include <linux/slab.h>
  47#include <linux/blkdev.h>
  48#include <linux/delay.h>
  49#include <linux/init.h>
  50#include <linux/completion.h>
  51#include <linux/unistd.h>
  52#include <linux/spinlock.h>
  53#include <linux/kmod.h>
  54#include <linux/interrupt.h>
  55#include <linux/notifier.h>
  56#include <linux/cpu.h>
  57#include <linux/mutex.h>
  58#include <asm/unaligned.h>
  59
  60#include <scsi/scsi.h>
  61#include <scsi/scsi_cmnd.h>
  62#include <scsi/scsi_dbg.h>
  63#include <scsi/scsi_device.h>
  64#include <scsi/scsi_driver.h>
  65#include <scsi/scsi_eh.h>
  66#include <scsi/scsi_host.h>
  67#include <scsi/scsi_tcq.h>
  68
  69#include "scsi_priv.h"
  70#include "scsi_logging.h"
  71
  72#define CREATE_TRACE_POINTS
  73#include <trace/events/scsi.h>
  74
  75/*
  76 * Definitions and constants.
  77 */
  78
  79/*
  80 * Note - the initial logging level can be set here to log events at boot time.
  81 * After the system is up, you may enable logging via the /proc interface.
  82 */
  83unsigned int scsi_logging_level;
  84#if defined(CONFIG_SCSI_LOGGING)
  85EXPORT_SYMBOL(scsi_logging_level);
  86#endif
  87
  88#ifdef CONFIG_SCSI_LOGGING
  89void scsi_log_send(struct scsi_cmnd *cmd)
  90{
  91        unsigned int level;
  92
  93        /*
  94         * If ML QUEUE log level is greater than or equal to:
  95         *
  96         * 1: nothing (match completion)
  97         *
  98         * 2: log opcode + command of all commands + cmd address
  99         *
 100         * 3: same as 2
 101         *
 102         * 4: same as 3
 103         */
 104        if (unlikely(scsi_logging_level)) {
 105                level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT,
 106                                       SCSI_LOG_MLQUEUE_BITS);
 107                if (level > 1) {
 108                        scmd_printk(KERN_INFO, cmd,
 109                                    "Send: scmd 0x%p\n", cmd);
 110                        scsi_print_command(cmd);
 111                }
 112        }
 113}
 114
 115void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
 116{
 117        unsigned int level;
 118
 119        /*
 120         * If ML COMPLETE log level is greater than or equal to:
 121         *
 122         * 1: log disposition, result, opcode + command, and conditionally
 123         * sense data for failures or non SUCCESS dispositions.
 124         *
 125         * 2: same as 1 but for all command completions.
 126         *
 127         * 3: same as 2
 128         *
 129         * 4: same as 3 plus dump extra junk
 130         */
 131        if (unlikely(scsi_logging_level)) {
 132                level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
 133                                       SCSI_LOG_MLCOMPLETE_BITS);
 134                if (((level > 0) && (cmd->result || disposition != SUCCESS)) ||
 135                    (level > 1)) {
 136                        scsi_print_result(cmd, "Done", disposition);
 137                        scsi_print_command(cmd);
 138                        if (scsi_status_is_check_condition(cmd->result))
 139                                scsi_print_sense(cmd);
 140                        if (level > 3)
 141                                scmd_printk(KERN_INFO, cmd,
 142                                            "scsi host busy %d failed %d\n",
 143                                            scsi_host_busy(cmd->device->host),
 144                                            cmd->device->host->host_failed);
 145                }
 146        }
 147}
 148#endif
 149
 150/**
 151 * scsi_finish_command - cleanup and pass command back to upper layer
 152 * @cmd: the command
 153 *
 154 * Description: Pass command off to upper layer for finishing of I/O
 155 *              request, waking processes that are waiting on results,
 156 *              etc.
 157 */
 158void scsi_finish_command(struct scsi_cmnd *cmd)
 159{
 160        struct scsi_device *sdev = cmd->device;
 161        struct scsi_target *starget = scsi_target(sdev);
 162        struct Scsi_Host *shost = sdev->host;
 163        struct scsi_driver *drv;
 164        unsigned int good_bytes;
 165
 166        scsi_device_unbusy(sdev, cmd);
 167
 168        /*
 169         * Clear the flags that say that the device/target/host is no longer
 170         * capable of accepting new commands.
 171         */
 172        if (atomic_read(&shost->host_blocked))
 173                atomic_set(&shost->host_blocked, 0);
 174        if (atomic_read(&starget->target_blocked))
 175                atomic_set(&starget->target_blocked, 0);
 176        if (atomic_read(&sdev->device_blocked))
 177                atomic_set(&sdev->device_blocked, 0);
 178
 179        SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev,
 180                                "Notifying upper driver of completion "
 181                                "(result %x)\n", cmd->result));
 182
 183        good_bytes = scsi_bufflen(cmd);
 184        if (!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd))) {
 185                int old_good_bytes = good_bytes;
 186                drv = scsi_cmd_to_driver(cmd);
 187                if (drv->done)
 188                        good_bytes = drv->done(cmd);
 189                /*
 190                 * USB may not give sense identifying bad sector and
 191                 * simply return a residue instead, so subtract off the
 192                 * residue if drv->done() error processing indicates no
 193                 * change to the completion length.
 194                 */
 195                if (good_bytes == old_good_bytes)
 196                        good_bytes -= scsi_get_resid(cmd);
 197        }
 198        scsi_io_completion(cmd, good_bytes);
 199}
 200
 201
 202/*
 203 * 4096 is big enough for saturating fast SCSI LUNs.
 204 */
 205int scsi_device_max_queue_depth(struct scsi_device *sdev)
 206{
 207        return min_t(int, sdev->host->can_queue, 4096);
 208}
 209
 210/**
 211 * scsi_change_queue_depth - change a device's queue depth
 212 * @sdev: SCSI Device in question
 213 * @depth: number of commands allowed to be queued to the driver
 214 *
 215 * Sets the device queue depth and returns the new value.
 216 */
 217int scsi_change_queue_depth(struct scsi_device *sdev, int depth)
 218{
 219        depth = min_t(int, depth, scsi_device_max_queue_depth(sdev));
 220
 221        if (depth > 0) {
 222                sdev->queue_depth = depth;
 223                wmb();
 224        }
 225
 226        if (sdev->request_queue)
 227                blk_set_queue_depth(sdev->request_queue, depth);
 228
 229        sbitmap_resize(&sdev->budget_map, sdev->queue_depth);
 230
 231        return sdev->queue_depth;
 232}
 233EXPORT_SYMBOL(scsi_change_queue_depth);
 234
 235/**
 236 * scsi_track_queue_full - track QUEUE_FULL events to adjust queue depth
 237 * @sdev: SCSI Device in question
 238 * @depth: Current number of outstanding SCSI commands on this device,
 239 *         not counting the one returned as QUEUE_FULL.
 240 *
 241 * Description: This function will track successive QUEUE_FULL events on a
 242 *              specific SCSI device to determine if and when there is a
 243 *              need to adjust the queue depth on the device.
 244 *
 245 * Returns:     0 - No change needed, >0 - Adjust queue depth to this new depth,
 246 *              -1 - Drop back to untagged operation using host->cmd_per_lun
 247 *                      as the untagged command depth
 248 *
 249 * Lock Status: None held on entry
 250 *
 251 * Notes:       Low level drivers may call this at any time and we will do
 252 *              "The Right Thing."  We are interrupt context safe.
 253 */
 254int scsi_track_queue_full(struct scsi_device *sdev, int depth)
 255{
 256
 257        /*
 258         * Don't let QUEUE_FULLs on the same
 259         * jiffies count, they could all be from
 260         * same event.
 261         */
 262        if ((jiffies >> 4) == (sdev->last_queue_full_time >> 4))
 263                return 0;
 264
 265        sdev->last_queue_full_time = jiffies;
 266        if (sdev->last_queue_full_depth != depth) {
 267                sdev->last_queue_full_count = 1;
 268                sdev->last_queue_full_depth = depth;
 269        } else {
 270                sdev->last_queue_full_count++;
 271        }
 272
 273        if (sdev->last_queue_full_count <= 10)
 274                return 0;
 275
 276        return scsi_change_queue_depth(sdev, depth);
 277}
 278EXPORT_SYMBOL(scsi_track_queue_full);
 279
 280/**
 281 * scsi_vpd_inquiry - Request a device provide us with a VPD page
 282 * @sdev: The device to ask
 283 * @buffer: Where to put the result
 284 * @page: Which Vital Product Data to return
 285 * @len: The length of the buffer
 286 *
 287 * This is an internal helper function.  You probably want to use
 288 * scsi_get_vpd_page instead.
 289 *
 290 * Returns size of the vpd page on success or a negative error number.
 291 */
 292static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
 293                                                        u8 page, unsigned len)
 294{
 295        int result;
 296        unsigned char cmd[16];
 297
 298        if (len < 4)
 299                return -EINVAL;
 300
 301        cmd[0] = INQUIRY;
 302        cmd[1] = 1;             /* EVPD */
 303        cmd[2] = page;
 304        cmd[3] = len >> 8;
 305        cmd[4] = len & 0xff;
 306        cmd[5] = 0;             /* Control byte */
 307
 308        /*
 309         * I'm not convinced we need to try quite this hard to get VPD, but
 310         * all the existing users tried this hard.
 311         */
 312        result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buffer, len,
 313                                  30 * HZ, 3, NULL);
 314        if (result)
 315                return -EIO;
 316
 317        /*
 318         * Sanity check that we got the page back that we asked for and that
 319         * the page size is not 0.
 320         */
 321        if (buffer[1] != page)
 322                return -EIO;
 323
 324        result = get_unaligned_be16(&buffer[2]);
 325        if (!result)
 326                return -EIO;
 327
 328        return result + 4;
 329}
 330
 331static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
 332{
 333        unsigned char vpd_header[SCSI_VPD_HEADER_SIZE] __aligned(4);
 334        int result;
 335
 336        if (sdev->no_vpd_size)
 337                return SCSI_DEFAULT_VPD_LEN;
 338
 339        /*
 340         * Fetch the VPD page header to find out how big the page
 341         * is. This is done to prevent problems on legacy devices
 342         * which can not handle allocation lengths as large as
 343         * potentially requested by the caller.
 344         */
 345        result = scsi_vpd_inquiry(sdev, vpd_header, page, sizeof(vpd_header));
 346        if (result < 0)
 347                return 0;
 348
 349        if (result < SCSI_VPD_HEADER_SIZE) {
 350                dev_warn_once(&sdev->sdev_gendev,
 351                              "%s: short VPD page 0x%02x length: %d bytes\n",
 352                              __func__, page, result);
 353                return 0;
 354        }
 355
 356        return result;
 357}
 358
 359/**
 360 * scsi_get_vpd_page - Get Vital Product Data from a SCSI device
 361 * @sdev: The device to ask
 362 * @page: Which Vital Product Data to return
 363 * @buf: where to store the VPD
 364 * @buf_len: number of bytes in the VPD buffer area
 365 *
 366 * SCSI devices may optionally supply Vital Product Data.  Each 'page'
 367 * of VPD is defined in the appropriate SCSI document (eg SPC, SBC).
 368 * If the device supports this VPD page, this routine fills @buf
 369 * with the data from that page and return 0. If the VPD page is not
 370 * supported or its content cannot be retrieved, -EINVAL is returned.
 371 */
 372int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
 373                      int buf_len)
 374{
 375        int result, vpd_len;
 376
 377        if (!scsi_device_supports_vpd(sdev))
 378                return -EINVAL;
 379
 380        vpd_len = scsi_get_vpd_size(sdev, page);
 381        if (vpd_len <= 0)
 382                return -EINVAL;
 383
 384        vpd_len = min(vpd_len, buf_len);
 385
 386        /*
 387         * Fetch the actual page. Since the appropriate size was reported
 388         * by the device it is now safe to ask for something bigger.
 389         */
 390        memset(buf, 0, buf_len);
 391        result = scsi_vpd_inquiry(sdev, buf, page, vpd_len);
 392        if (result < 0)
 393                return -EINVAL;
 394        else if (result > vpd_len)
 395                dev_warn_once(&sdev->sdev_gendev,
 396                              "%s: VPD page 0x%02x result %d > %d bytes\n",
 397                              __func__, page, result, vpd_len);
 398
 399        return 0;
 400}
 401EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
 402
 403/**
 404 * scsi_get_vpd_buf - Get Vital Product Data from a SCSI device
 405 * @sdev: The device to ask
 406 * @page: Which Vital Product Data to return
 407 *
 408 * Returns %NULL upon failure.
 409 */
 410static struct scsi_vpd *scsi_get_vpd_buf(struct scsi_device *sdev, u8 page)
 411{
 412        struct scsi_vpd *vpd_buf;
 413        int vpd_len, result;
 414
 415        vpd_len = scsi_get_vpd_size(sdev, page);
 416        if (vpd_len <= 0)
 417                return NULL;
 418
 419retry_pg:
 420        /*
 421         * Fetch the actual page. Since the appropriate size was reported
 422         * by the device it is now safe to ask for something bigger.
 423         */
 424        vpd_buf = kmalloc(sizeof(*vpd_buf) + vpd_len, GFP_KERNEL);
 425        if (!vpd_buf)
 426                return NULL;
 427
 428        result = scsi_vpd_inquiry(sdev, vpd_buf->data, page, vpd_len);
 429        if (result < 0) {
 430                kfree(vpd_buf);
 431                return NULL;
 432        }
 433        if (result > vpd_len) {
 434                dev_warn_once(&sdev->sdev_gendev,
 435                              "%s: VPD page 0x%02x result %d > %d bytes\n",
 436                              __func__, page, result, vpd_len);
 437                vpd_len = result;
 438                kfree(vpd_buf);
 439                goto retry_pg;
 440        }
 441
 442        vpd_buf->len = result;
 443
 444        return vpd_buf;
 445}
 446
 447static void scsi_update_vpd_page(struct scsi_device *sdev, u8 page,
 448                                 struct scsi_vpd __rcu **sdev_vpd_buf)
 449{
 450        struct scsi_vpd *vpd_buf;
 451
 452        vpd_buf = scsi_get_vpd_buf(sdev, page);
 453        if (!vpd_buf)
 454                return;
 455
 456        mutex_lock(&sdev->inquiry_mutex);
 457        vpd_buf = rcu_replace_pointer(*sdev_vpd_buf, vpd_buf,
 458                                      lockdep_is_held(&sdev->inquiry_mutex));
 459        mutex_unlock(&sdev->inquiry_mutex);
 460
 461        if (vpd_buf)
 462                kfree_rcu(vpd_buf, rcu);
 463}
 464
 465/**
 466 * scsi_attach_vpd - Attach Vital Product Data to a SCSI device structure
 467 * @sdev: The device to ask
 468 *
 469 * Attach the 'Device Identification' VPD page (0x83) and the
 470 * 'Unit Serial Number' VPD page (0x80) to a SCSI device
 471 * structure. This information can be used to identify the device
 472 * uniquely.
 473 */
 474void scsi_attach_vpd(struct scsi_device *sdev)
 475{
 476        int i;
 477        struct scsi_vpd *vpd_buf;
 478
 479        if (!scsi_device_supports_vpd(sdev))
 480                return;
 481
 482        /* Ask for all the pages supported by this device */
 483        vpd_buf = scsi_get_vpd_buf(sdev, 0);
 484        if (!vpd_buf)
 485                return;
 486
 487        for (i = 4; i < vpd_buf->len; i++) {
 488                if (vpd_buf->data[i] == 0x0)
 489                        scsi_update_vpd_page(sdev, 0x0, &sdev->vpd_pg0);
 490                if (vpd_buf->data[i] == 0x80)
 491                        scsi_update_vpd_page(sdev, 0x80, &sdev->vpd_pg80);
 492                if (vpd_buf->data[i] == 0x83)
 493                        scsi_update_vpd_page(sdev, 0x83, &sdev->vpd_pg83);
 494                if (vpd_buf->data[i] == 0x89)
 495                        scsi_update_vpd_page(sdev, 0x89, &sdev->vpd_pg89);
 496                if (vpd_buf->data[i] == 0xb0)
 497                        scsi_update_vpd_page(sdev, 0xb0, &sdev->vpd_pgb0);
 498                if (vpd_buf->data[i] == 0xb1)
 499                        scsi_update_vpd_page(sdev, 0xb1, &sdev->vpd_pgb1);
 500                if (vpd_buf->data[i] == 0xb2)
 501                        scsi_update_vpd_page(sdev, 0xb2, &sdev->vpd_pgb2);
 502        }
 503        kfree(vpd_buf);
 504}
 505
 506/**
 507 * scsi_report_opcode - Find out if a given command is supported
 508 * @sdev:       scsi device to query
 509 * @buffer:     scratch buffer (must be at least 20 bytes long)
 510 * @len:        length of buffer
 511 * @opcode:     opcode for the command to look up
 512 * @sa:         service action for the command to look up
 513 *
 514 * Uses the REPORT SUPPORTED OPERATION CODES to check support for the
 515 * command identified with @opcode and @sa. If the command does not
 516 * have a service action, @sa must be 0. Returns -EINVAL if RSOC fails,
 517 * 0 if the command is not supported and 1 if the device claims to
 518 * support the command.
 519 */
 520int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
 521                       unsigned int len, unsigned char opcode,
 522                       unsigned short sa)
 523{
 524        unsigned char cmd[16];
 525        struct scsi_sense_hdr sshdr;
 526        int result, request_len;
 527        const struct scsi_exec_args exec_args = {
 528                .sshdr = &sshdr,
 529        };
 530
 531        if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3)
 532                return -EINVAL;
 533
 534        /* RSOC header + size of command we are asking about */
 535        request_len = 4 + COMMAND_SIZE(opcode);
 536        if (request_len > len) {
 537                dev_warn_once(&sdev->sdev_gendev,
 538                              "%s: len %u bytes, opcode 0x%02x needs %u\n",
 539                              __func__, len, opcode, request_len);
 540                return -EINVAL;
 541        }
 542
 543        memset(cmd, 0, 16);
 544        cmd[0] = MAINTENANCE_IN;
 545        cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES;
 546        if (!sa) {
 547                cmd[2] = 1;     /* One command format */
 548                cmd[3] = opcode;
 549        } else {
 550                cmd[2] = 3;     /* One command format with service action */
 551                cmd[3] = opcode;
 552                put_unaligned_be16(sa, &cmd[4]);
 553        }
 554        put_unaligned_be32(request_len, &cmd[6]);
 555        memset(buffer, 0, len);
 556
 557        result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buffer,
 558                                  request_len, 30 * HZ, 3, &exec_args);
 559        if (result < 0)
 560                return result;
 561        if (result && scsi_sense_valid(&sshdr) &&
 562            sshdr.sense_key == ILLEGAL_REQUEST &&
 563            (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00)
 564                return -EINVAL;
 565
 566        if ((buffer[1] & 3) == 3) /* Command supported */
 567                return 1;
 568
 569        return 0;
 570}
 571EXPORT_SYMBOL(scsi_report_opcode);
 572
 573#define SCSI_CDL_CHECK_BUF_LEN  64
 574
 575static bool scsi_cdl_check_cmd(struct scsi_device *sdev, u8 opcode, u16 sa,
 576                               unsigned char *buf)
 577{
 578        int ret;
 579        u8 cdlp;
 580
 581        /* Check operation code */
 582        ret = scsi_report_opcode(sdev, buf, SCSI_CDL_CHECK_BUF_LEN, opcode, sa);
 583        if (ret <= 0)
 584                return false;
 585
 586        if ((buf[1] & 0x03) != 0x03)
 587                return false;
 588
 589        /*
 590         * See SPC-6, One_command parameter data format for
 591         * REPORT SUPPORTED OPERATION CODES. We have the following cases
 592         * depending on rwcdlp (buf[0] & 0x01) value:
 593         *  - rwcdlp == 0: then cdlp indicates support for the A mode page when
 594         *                 it is equal to 1 and for the B mode page when it is
 595         *                 equal to 2.
 596         *  - rwcdlp == 1: then cdlp indicates support for the T2A mode page
 597         *                 when it is equal to 1 and for the T2B mode page when
 598         *                 it is equal to 2.
 599         * Overall, to detect support for command duration limits, we only need
 600         * to check that cdlp is 1 or 2.
 601         */
 602        cdlp = (buf[1] & 0x18) >> 3;
 603
 604        return cdlp == 0x01 || cdlp == 0x02;
 605}
 606
 607/**
 608 * scsi_cdl_check - Check if a SCSI device supports Command Duration Limits
 609 * @sdev: The device to check
 610 */
 611void scsi_cdl_check(struct scsi_device *sdev)
 612{
 613        bool cdl_supported;
 614        unsigned char *buf;
 615
 616        /*
 617         * Support for CDL was defined in SPC-5. Ignore devices reporting an
 618         * lower SPC version. This also avoids problems with old drives choking
 619         * on MAINTENANCE_IN / MI_REPORT_SUPPORTED_OPERATION_CODES with a
 620         * service action specified, as done in scsi_cdl_check_cmd().
 621         */
 622        if (sdev->scsi_level < SCSI_SPC_5) {
 623                sdev->cdl_supported = 0;
 624                return;
 625        }
 626
 627        buf = kmalloc(SCSI_CDL_CHECK_BUF_LEN, GFP_KERNEL);
 628        if (!buf) {
 629                sdev->cdl_supported = 0;
 630                return;
 631        }
 632
 633        /* Check support for READ_16, WRITE_16, READ_32 and WRITE_32 commands */
 634        cdl_supported =
 635                scsi_cdl_check_cmd(sdev, READ_16, 0, buf) ||
 636                scsi_cdl_check_cmd(sdev, WRITE_16, 0, buf) ||
 637                scsi_cdl_check_cmd(sdev, VARIABLE_LENGTH_CMD, READ_32, buf) ||
 638                scsi_cdl_check_cmd(sdev, VARIABLE_LENGTH_CMD, WRITE_32, buf);
 639        if (cdl_supported) {
 640                /*
 641                 * We have CDL support: force the use of READ16/WRITE16.
 642                 * READ32 and WRITE32 will be used for devices that support
 643                 * the T10_PI_TYPE2_PROTECTION protection type.
 644                 */
 645                sdev->use_16_for_rw = 1;
 646                sdev->use_10_for_rw = 0;
 647
 648                sdev->cdl_supported = 1;
 649        } else {
 650                sdev->cdl_supported = 0;
 651        }
 652
 653        kfree(buf);
 654}
 655
 656/**
 657 * scsi_cdl_enable - Enable or disable a SCSI device supports for Command
 658 *                   Duration Limits
 659 * @sdev: The target device
 660 * @enable: the target state
 661 */
 662int scsi_cdl_enable(struct scsi_device *sdev, bool enable)
 663{
 664        struct scsi_mode_data data;
 665        struct scsi_sense_hdr sshdr;
 666        struct scsi_vpd *vpd;
 667        bool is_ata = false;
 668        char buf[64];
 669        int ret;
 670
 671        if (!sdev->cdl_supported)
 672                return -EOPNOTSUPP;
 673
 674        rcu_read_lock();
 675        vpd = rcu_dereference(sdev->vpd_pg89);
 676        if (vpd)
 677                is_ata = true;
 678        rcu_read_unlock();
 679
 680        /*
 681         * For ATA devices, CDL needs to be enabled with a SET FEATURES command.
 682         */
 683        if (is_ata) {
 684                char *buf_data;
 685                int len;
 686
 687                ret = scsi_mode_sense(sdev, 0x08, 0x0a, 0xf2, buf, sizeof(buf),
 688                                      5 * HZ, 3, &data, NULL);
 689                if (ret)
 690                        return -EINVAL;
 691
 692                /* Enable CDL using the ATA feature page */
 693                len = min_t(size_t, sizeof(buf),
 694                            data.length - data.header_length -
 695                            data.block_descriptor_length);
 696                buf_data = buf + data.header_length +
 697                        data.block_descriptor_length;
 698                if (enable)
 699                        buf_data[4] = 0x02;
 700                else
 701                        buf_data[4] = 0;
 702
 703                ret = scsi_mode_select(sdev, 1, 0, buf_data, len, 5 * HZ, 3,
 704                                       &data, &sshdr);
 705                if (ret) {
 706                        if (ret > 0 && scsi_sense_valid(&sshdr))
 707                                scsi_print_sense_hdr(sdev,
 708                                        dev_name(&sdev->sdev_gendev), &sshdr);
 709                        return ret;
 710                }
 711        }
 712
 713        sdev->cdl_enable = enable;
 714
 715        return 0;
 716}
 717
 718/**
 719 * scsi_device_get  -  get an additional reference to a scsi_device
 720 * @sdev:       device to get a reference to
 721 *
 722 * Description: Gets a reference to the scsi_device and increments the use count
 723 * of the underlying LLDD module.  You must hold host_lock of the
 724 * parent Scsi_Host or already have a reference when calling this.
 725 *
 726 * This will fail if a device is deleted or cancelled, or when the LLD module
 727 * is in the process of being unloaded.
 728 */
 729int scsi_device_get(struct scsi_device *sdev)
 730{
 731        if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL)
 732                goto fail;
 733        if (!try_module_get(sdev->host->hostt->module))
 734                goto fail;
 735        if (!get_device(&sdev->sdev_gendev))
 736                goto fail_put_module;
 737        return 0;
 738
 739fail_put_module:
 740        module_put(sdev->host->hostt->module);
 741fail:
 742        return -ENXIO;
 743}
 744EXPORT_SYMBOL(scsi_device_get);
 745
 746/**
 747 * scsi_device_put  -  release a reference to a scsi_device
 748 * @sdev:       device to release a reference on.
 749 *
 750 * Description: Release a reference to the scsi_device and decrements the use
 751 * count of the underlying LLDD module.  The device is freed once the last
 752 * user vanishes.
 753 */
 754void scsi_device_put(struct scsi_device *sdev)
 755{
 756        struct module *mod = sdev->host->hostt->module;
 757
 758        put_device(&sdev->sdev_gendev);
 759        module_put(mod);
 760}
 761EXPORT_SYMBOL(scsi_device_put);
 762
 763/* helper for shost_for_each_device, see that for documentation */
 764struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost,
 765                                           struct scsi_device *prev)
 766{
 767        struct list_head *list = (prev ? &prev->siblings : &shost->__devices);
 768        struct scsi_device *next = NULL;
 769        unsigned long flags;
 770
 771        spin_lock_irqsave(shost->host_lock, flags);
 772        while (list->next != &shost->__devices) {
 773                next = list_entry(list->next, struct scsi_device, siblings);
 774                /* skip devices that we can't get a reference to */
 775                if (!scsi_device_get(next))
 776                        break;
 777                next = NULL;
 778                list = list->next;
 779        }
 780        spin_unlock_irqrestore(shost->host_lock, flags);
 781
 782        if (prev)
 783                scsi_device_put(prev);
 784        return next;
 785}
 786EXPORT_SYMBOL(__scsi_iterate_devices);
 787
 788/**
 789 * starget_for_each_device  -  helper to walk all devices of a target
 790 * @starget:    target whose devices we want to iterate over.
 791 * @data:       Opaque passed to each function call.
 792 * @fn:         Function to call on each device
 793 *
 794 * This traverses over each device of @starget.  The devices have
 795 * a reference that must be released by scsi_host_put when breaking
 796 * out of the loop.
 797 */
 798void starget_for_each_device(struct scsi_target *starget, void *data,
 799                     void (*fn)(struct scsi_device *, void *))
 800{
 801        struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
 802        struct scsi_device *sdev;
 803
 804        shost_for_each_device(sdev, shost) {
 805                if ((sdev->channel == starget->channel) &&
 806                    (sdev->id == starget->id))
 807                        fn(sdev, data);
 808        }
 809}
 810EXPORT_SYMBOL(starget_for_each_device);
 811
 812/**
 813 * __starget_for_each_device - helper to walk all devices of a target (UNLOCKED)
 814 * @starget:    target whose devices we want to iterate over.
 815 * @data:       parameter for callback @fn()
 816 * @fn:         callback function that is invoked for each device
 817 *
 818 * This traverses over each device of @starget.  It does _not_
 819 * take a reference on the scsi_device, so the whole loop must be
 820 * protected by shost->host_lock.
 821 *
 822 * Note:  The only reason why drivers would want to use this is because
 823 * they need to access the device list in irq context.  Otherwise you
 824 * really want to use starget_for_each_device instead.
 825 **/
 826void __starget_for_each_device(struct scsi_target *starget, void *data,
 827                               void (*fn)(struct scsi_device *, void *))
 828{
 829        struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
 830        struct scsi_device *sdev;
 831
 832        __shost_for_each_device(sdev, shost) {
 833                if ((sdev->channel == starget->channel) &&
 834                    (sdev->id == starget->id))
 835                        fn(sdev, data);
 836        }
 837}
 838EXPORT_SYMBOL(__starget_for_each_device);
 839
 840/**
 841 * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED)
 842 * @starget:    SCSI target pointer
 843 * @lun:        SCSI Logical Unit Number
 844 *
 845 * Description: Looks up the scsi_device with the specified @lun for a given
 846 * @starget.  The returned scsi_device does not have an additional
 847 * reference.  You must hold the host's host_lock over this call and
 848 * any access to the returned scsi_device. A scsi_device in state
 849 * SDEV_DEL is skipped.
 850 *
 851 * Note:  The only reason why drivers should use this is because
 852 * they need to access the device list in irq context.  Otherwise you
 853 * really want to use scsi_device_lookup_by_target instead.
 854 **/
 855struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget,
 856                                                   u64 lun)
 857{
 858        struct scsi_device *sdev;
 859
 860        list_for_each_entry(sdev, &starget->devices, same_target_siblings) {
 861                if (sdev->sdev_state == SDEV_DEL)
 862                        continue;
 863                if (sdev->lun ==lun)
 864                        return sdev;
 865        }
 866
 867        return NULL;
 868}
 869EXPORT_SYMBOL(__scsi_device_lookup_by_target);
 870
 871/**
 872 * scsi_device_lookup_by_target - find a device given the target
 873 * @starget:    SCSI target pointer
 874 * @lun:        SCSI Logical Unit Number
 875 *
 876 * Description: Looks up the scsi_device with the specified @lun for a given
 877 * @starget.  The returned scsi_device has an additional reference that
 878 * needs to be released with scsi_device_put once you're done with it.
 879 **/
 880struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
 881                                                 u64 lun)
 882{
 883        struct scsi_device *sdev;
 884        struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
 885        unsigned long flags;
 886
 887        spin_lock_irqsave(shost->host_lock, flags);
 888        sdev = __scsi_device_lookup_by_target(starget, lun);
 889        if (sdev && scsi_device_get(sdev))
 890                sdev = NULL;
 891        spin_unlock_irqrestore(shost->host_lock, flags);
 892
 893        return sdev;
 894}
 895EXPORT_SYMBOL(scsi_device_lookup_by_target);
 896
 897/**
 898 * __scsi_device_lookup - find a device given the host (UNLOCKED)
 899 * @shost:      SCSI host pointer
 900 * @channel:    SCSI channel (zero if only one channel)
 901 * @id:         SCSI target number (physical unit number)
 902 * @lun:        SCSI Logical Unit Number
 903 *
 904 * Description: Looks up the scsi_device with the specified @channel, @id, @lun
 905 * for a given host. The returned scsi_device does not have an additional
 906 * reference.  You must hold the host's host_lock over this call and any access
 907 * to the returned scsi_device.
 908 *
 909 * Note:  The only reason why drivers would want to use this is because
 910 * they need to access the device list in irq context.  Otherwise you
 911 * really want to use scsi_device_lookup instead.
 912 **/
 913struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost,
 914                uint channel, uint id, u64 lun)
 915{
 916        struct scsi_device *sdev;
 917
 918        list_for_each_entry(sdev, &shost->__devices, siblings) {
 919                if (sdev->sdev_state == SDEV_DEL)
 920                        continue;
 921                if (sdev->channel == channel && sdev->id == id &&
 922                                sdev->lun ==lun)
 923                        return sdev;
 924        }
 925
 926        return NULL;
 927}
 928EXPORT_SYMBOL(__scsi_device_lookup);
 929
 930/**
 931 * scsi_device_lookup - find a device given the host
 932 * @shost:      SCSI host pointer
 933 * @channel:    SCSI channel (zero if only one channel)
 934 * @id:         SCSI target number (physical unit number)
 935 * @lun:        SCSI Logical Unit Number
 936 *
 937 * Description: Looks up the scsi_device with the specified @channel, @id, @lun
 938 * for a given host.  The returned scsi_device has an additional reference that
 939 * needs to be released with scsi_device_put once you're done with it.
 940 **/
 941struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost,
 942                uint channel, uint id, u64 lun)
 943{
 944        struct scsi_device *sdev;
 945        unsigned long flags;
 946
 947        spin_lock_irqsave(shost->host_lock, flags);
 948        sdev = __scsi_device_lookup(shost, channel, id, lun);
 949        if (sdev && scsi_device_get(sdev))
 950                sdev = NULL;
 951        spin_unlock_irqrestore(shost->host_lock, flags);
 952
 953        return sdev;
 954}
 955EXPORT_SYMBOL(scsi_device_lookup);
 956
 957MODULE_DESCRIPTION("SCSI core");
 958MODULE_LICENSE("GPL");
 959
 960module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
 961MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
 962
 963static int __init init_scsi(void)
 964{
 965        int error;
 966
 967        error = scsi_init_procfs();
 968        if (error)
 969                goto cleanup_queue;
 970        error = scsi_init_devinfo();
 971        if (error)
 972                goto cleanup_procfs;
 973        error = scsi_init_hosts();
 974        if (error)
 975                goto cleanup_devlist;
 976        error = scsi_init_sysctl();
 977        if (error)
 978                goto cleanup_hosts;
 979        error = scsi_sysfs_register();
 980        if (error)
 981                goto cleanup_sysctl;
 982
 983        scsi_netlink_init();
 984
 985        printk(KERN_NOTICE "SCSI subsystem initialized\n");
 986        return 0;
 987
 988cleanup_sysctl:
 989        scsi_exit_sysctl();
 990cleanup_hosts:
 991        scsi_exit_hosts();
 992cleanup_devlist:
 993        scsi_exit_devinfo();
 994cleanup_procfs:
 995        scsi_exit_procfs();
 996cleanup_queue:
 997        scsi_exit_queue();
 998        printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n",
 999               -error);
1000        return error;
1001}
1002
1003static void __exit exit_scsi(void)
1004{
1005        scsi_netlink_exit();
1006        scsi_sysfs_unregister();
1007        scsi_exit_sysctl();
1008        scsi_exit_hosts();
1009        scsi_exit_devinfo();
1010        scsi_exit_procfs();
1011        scsi_exit_queue();
1012}
1013
1014subsys_initcall(init_scsi);
1015module_exit(exit_scsi);
1016